Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 13 additions & 0 deletions NIGHTLY_CODEX_FINAL_ATTEMPT_1.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
Implemented a small contribution for [openai/openai-python#2677](https://github.com/openai/openai-python/issues/2677).

Changed:
- Added a focused regression test in [tests/lib/responses/test_responses.py](/home/runner/work/oss-nightly-control/oss-nightly-control/target/tests/lib/responses/test_responses.py:46) that verifies `responses.create()` sends Responses-native `function_call` and `function_call_output` input items.
- Added a minimal migration example in [examples/responses/function_call_inputs.py](/home/runner/work/oss-nightly-control/oss-nightly-control/target/examples/responses/function_call_inputs.py:1).
- Wrote [NIGHTLY_REPORT.md](/home/runner/work/oss-nightly-control/oss-nightly-control/target/NIGHTLY_REPORT.md:1).

Checks run:
- `uv run --with pytest --with pytest-xdist --with respx --with inline-snapshot --with pytest-asyncio pytest tests/lib/responses/test_responses.py -q`
- `uv run --with ruff ruff check tests/lib/responses/test_responses.py examples/responses/function_call_inputs.py`
- `python -m py_compile examples/responses/function_call_inputs.py tests/lib/responses/test_responses.py`

All passed. I left the working tree uncommitted as requested.
30 changes: 30 additions & 0 deletions examples/responses/function_call_inputs.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
from openai import OpenAI

client = OpenAI()

response = client.responses.create(
model="gpt-4o-mini",
input=[
{
"role": "developer",
"content": "Use the supplied function results to answer.",
},
{
"role": "user",
"content": "What is the current order status?",
},
{
"type": "function_call",
"call_id": "call_123",
"name": "get_order_status",
"arguments": '{"order_id":"order_123"}',
},
{
"type": "function_call_output",
"call_id": "call_123",
"output": '{"status":"shipped"}',
},
],
)

print(response.output_text)
75 changes: 75 additions & 0 deletions tests/lib/responses/test_responses.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,9 @@
from __future__ import annotations

import json
from typing_extensions import TypeVar

import httpx
import pytest
from respx import MockRouter
from inline_snapshot import snapshot
Expand Down Expand Up @@ -41,6 +43,79 @@ def test_output_text(client: OpenAI, respx_mock: MockRouter) -> None:
)


@pytest.mark.respx(base_url=base_url)
def test_create_with_function_call_input_items(client: OpenAI, respx_mock: MockRouter) -> None:
def handler(request: httpx.Request) -> httpx.Response:
assert json.loads(request.content) == {
"model": "gpt-4o-mini",
"input": [
{
"role": "developer",
"content": "Use the supplied function results to answer.",
},
{
"role": "user",
"content": "What is the current order status?",
},
{
"type": "function_call",
"call_id": "call_123",
"name": "get_order_status",
"arguments": '{"order_id":"order_123"}',
},
{
"type": "function_call_output",
Comment on lines +61 to +67
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

P2 Badge Cover assistant/tool-role regression in the new test

This test does not exercise the incompatibility described in the commit title (chat-style assistant messages with tool_calls plus tool role follow-ups), because it only sends Responses-native function_call/function_call_output items. As a result, the reported failing migration path can still regress (or remain broken) while this test stays green, so the added coverage does not protect the behavior the change claims to validate.

Useful? React with 👍 / 👎.

"call_id": "call_123",
"output": '{"status":"shipped"}',
},
],
}

return httpx.Response(
200,
json={
"id": "resp_123",
"object": "response",
"created_at": 1754925861,
"status": "completed",
"model": "gpt-4o-mini-2024-07-18",
"output": [],
"parallel_tool_calls": True,
"tool_choice": "auto",
"tools": [],
},
)

respx_mock.post("/responses").mock(side_effect=handler)

response = client.responses.create(
model="gpt-4o-mini",
input=[
{
"role": "developer",
"content": "Use the supplied function results to answer.",
},
{
"role": "user",
"content": "What is the current order status?",
},
{
"type": "function_call",
"call_id": "call_123",
"name": "get_order_status",
"arguments": '{"order_id":"order_123"}',
},
{
"type": "function_call_output",
"call_id": "call_123",
"output": '{"status":"shipped"}',
},
],
)

assert response.id == "resp_123"


@pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"])
def test_stream_method_definition_in_sync(sync: bool, client: OpenAI, async_client: AsyncOpenAI) -> None:
checking_client: OpenAI | AsyncOpenAI = client if sync else async_client
Expand Down