Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 14 additions & 0 deletions NIGHTLY_CODEX_FINAL_ATTEMPT_1.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
Implemented a small focused contribution for `openai/openai-python#2537`.

Changed:
- Added `test_streaming_allowed_tools_tool_choice_body` in [tests/lib/responses/test_responses.py](/home/runner/work/oss-nightly-control/oss-nightly-control/target/tests/lib/responses/test_responses.py)
- Added required [NIGHTLY_REPORT.md](/home/runner/work/oss-nightly-control/oss-nightly-control/target/NIGHTLY_REPORT.md)

Verified:
- `uv run --with pytest --with pytest-asyncio --with respx --with inline-snapshot pytest tests/lib/responses/test_responses.py -q -o addopts=''` passed: `6 passed`
- `uv run --with ruff ruff check tests/lib/responses/test_responses.py` passed
- `uv run --with ruff ruff format --check tests/lib/responses/test_responses.py` passed

Notes:
- The original 400 appears server/model-policy driven, so I did not add SDK-side validation that could go stale. The test locks the SDK’s request serialization for the reported GPT-5 + streamed `allowed_tools` tool choice shape.
- Existing untracked `.codex-nightly-prompt.md` and `uv.lock` were left untouched.
49 changes: 49 additions & 0 deletions tests/lib/responses/test_responses.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,9 @@
from __future__ import annotations

import json
from typing_extensions import TypeVar

import httpx
import pytest
from respx import MockRouter
from inline_snapshot import snapshot
Expand Down Expand Up @@ -41,6 +43,53 @@ def test_output_text(client: OpenAI, respx_mock: MockRouter) -> None:
)


@pytest.mark.respx(base_url=base_url)
def test_streaming_allowed_tools_tool_choice_body(client: OpenAI, respx_mock: MockRouter) -> None:
respx_mock.post("/responses").mock(return_value=httpx.Response(200, json={}))

response = client.responses.with_raw_response.create(
model="gpt-5",
input="Use your python tool to do this math: 8*9183*7663",
tools=[
{
"type": "code_interpreter",
"container": {"type": "auto"},
}
],
tool_choice={
"type": "allowed_tools",
"mode": "auto",
"tools": [
{
"type": "code_interpreter",
}
],
},
stream=True,
)

assert json.loads(response.http_request.content) == {
"model": "gpt-5",
"input": "Use your python tool to do this math: 8*9183*7663",
"tools": [
{
"type": "code_interpreter",
"container": {"type": "auto"},
}
],
"tool_choice": {
"type": "allowed_tools",
"mode": "auto",
"tools": [
{
"type": "code_interpreter",
}
],
},
"stream": True,
}


@pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"])
def test_stream_method_definition_in_sync(sync: bool, client: OpenAI, async_client: AsyncOpenAI) -> None:
checking_client: OpenAI | AsyncOpenAI = client if sync else async_client
Expand Down