Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 12 additions & 0 deletions py/noxfile.py
Original file line number Diff line number Diff line change
Expand Up @@ -259,6 +259,18 @@ def test_agentscope(session, version):
_run_tests(session, f"{INTEGRATION_DIR}/agentscope/test_agentscope.py", version=version)


AUTOGEN_VERSIONS = _get_matrix_versions("autogen-agentchat")


@nox.session()
@nox.parametrize("version", AUTOGEN_VERSIONS, ids=AUTOGEN_VERSIONS)
def test_autogen(session, version):
_install_test_deps(session)
_install_matrix_dep(session, "autogen-agentchat", version)
_install_matrix_dep(session, "autogen-ext", version)
_run_tests(session, f"{INTEGRATION_DIR}/autogen/test_autogen.py", version=version)


# Two test suites with different version requirements:
# 1. wrap_openai approach: works with older versions (0.1.9+)
# 2. Direct wrapper (setup_pydantic_ai): requires 1.10.0+ for all features
Expand Down
13 changes: 13 additions & 0 deletions py/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -167,6 +167,8 @@ lint = [
"agentscope",
"agno",
"anthropic",
"autogen-agentchat",
"autogen-ext[openai]",
"cohere",
"autoevals",
"braintrust-core",
Expand Down Expand Up @@ -278,6 +280,14 @@ latest = "agno==2.6.0"
latest = "agentscope==1.0.19"
"1.0.0" = "agentscope==1.0.0"

[tool.braintrust.matrix.autogen-agentchat]
latest = "autogen-agentchat==0.7.5"
"0.7.0" = "autogen-agentchat==0.7.0"

[tool.braintrust.matrix.autogen-ext]
latest = "autogen-ext[openai]==0.7.5"
"0.7.0" = "autogen-ext[openai]==0.7.0"

[tool.braintrust.matrix.pydantic-ai-integration]
latest = "pydantic-ai==1.86.1"
"1.10.0" = "pydantic-ai==1.10.0"
Expand Down Expand Up @@ -348,6 +358,7 @@ latest = "braintrust-core==0.0.59"
adk = ["google-adk"]
agentscope = ["agentscope"]
agno = ["agno"]
autogen = ["autogen-agentchat"]
anthropic = ["anthropic"]
cohere = ["cohere"]
claude_agent_sdk = ["claude-agent-sdk"]
Expand All @@ -364,6 +375,8 @@ pydantic_ai = ["pydantic-ai-integration", "pydantic-ai-wrap-openai"]
[tool.braintrust.vendor-packages]
agno = "agno"
agentscope = "agentscope"
autogen-agentchat = "autogen_agentchat"
autogen-ext = "autogen_ext"
anthropic = "anthropic"
cohere = "cohere"
autoevals = "autoevals"
Expand Down
5 changes: 5 additions & 0 deletions py/src/braintrust/auto.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
AgentScopeIntegration,
AgnoIntegration,
AnthropicIntegration,
AutoGenIntegration,
ClaudeAgentSDKIntegration,
CohereIntegration,
DSPyIntegration,
Expand Down Expand Up @@ -60,6 +61,7 @@ def auto_instrument(
langchain: bool = True,
openai_agents: bool = True,
cohere: bool = True,
autogen: bool = True,
) -> dict[str, bool]:
"""
Auto-instrument supported AI/ML libraries for Braintrust tracing.
Expand All @@ -86,6 +88,7 @@ def auto_instrument(
langchain: Enable LangChain instrumentation (default: True)
openai_agents: Enable OpenAI Agents SDK instrumentation (default: True)
cohere: Enable Cohere instrumentation (default: True)
autogen: Enable AutoGen instrumentation (default: True)
Returns:
Dict mapping integration name to whether it was successfully instrumented.
Expand Down Expand Up @@ -163,6 +166,8 @@ def auto_instrument(
results["openai_agents"] = _instrument_integration(OpenAIAgentsIntegration)
if cohere:
results["cohere"] = _instrument_integration(CohereIntegration)
if autogen:
results["autogen"] = _instrument_integration(AutoGenIntegration)

return results

Expand Down
2 changes: 2 additions & 0 deletions py/src/braintrust/integrations/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
from .agentscope import AgentScopeIntegration
from .agno import AgnoIntegration
from .anthropic import AnthropicIntegration
from .autogen import AutoGenIntegration
from .claude_agent_sdk import ClaudeAgentSDKIntegration
from .cohere import CohereIntegration
from .dspy import DSPyIntegration
Expand All @@ -20,6 +21,7 @@
"AgentScopeIntegration",
"AgnoIntegration",
"AnthropicIntegration",
"AutoGenIntegration",
"ClaudeAgentSDKIntegration",
"CohereIntegration",
"DSPyIntegration",
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
from autogen_agentchat.agents import BaseChatAgent
from autogen_agentchat.teams import BaseGroupChat
from autogen_core.tools import FunctionTool
from braintrust.auto import auto_instrument


assert not getattr(BaseChatAgent.run, "__braintrust_patched_autogen_chat_agent_run__", False)
assert not getattr(BaseGroupChat.run, "__braintrust_patched_autogen_team_run__", False)
assert not getattr(FunctionTool.run, "__braintrust_patched_autogen_function_tool_run__", False)

results = auto_instrument()
assert results.get("autogen") == True
assert auto_instrument().get("autogen") == True

assert getattr(BaseChatAgent.run, "__braintrust_patched_autogen_chat_agent_run__", False)
assert getattr(BaseChatAgent.run_stream, "__braintrust_patched_autogen_chat_agent_run_stream__", False)
assert getattr(BaseGroupChat.run, "__braintrust_patched_autogen_team_run__", False)
assert getattr(BaseGroupChat.run_stream, "__braintrust_patched_autogen_team_run_stream__", False)
assert getattr(FunctionTool.run, "__braintrust_patched_autogen_function_tool_run__", False)

print("SUCCESS")
20 changes: 20 additions & 0 deletions py/src/braintrust/integrations/autogen/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
"""Braintrust AutoGen integration."""

from braintrust.logger import NOOP_SPAN, current_span, init_logger

from .integration import AutoGenIntegration


def setup_autogen(
api_key: str | None = None,
project_id: str | None = None,
project_name: str | None = None,
) -> bool:
"""Setup Braintrust integration with AutoGen."""
if current_span() == NOOP_SPAN:
init_logger(project=project_name, api_key=api_key, project_id=project_id)

return AutoGenIntegration.setup()


__all__ = ["AutoGenIntegration", "setup_autogen"]
Original file line number Diff line number Diff line change
@@ -0,0 +1,216 @@
interactions:
- request:
body: '{"messages":[{"content":"You are concise. Answer directly.","role":"system"},{"role":"user","name":"user","content":"Say
hello in exactly two words."}],"model":"gpt-4o-mini","stream":false,"temperature":0}'
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '205'
Content-Type:
- application/json
Host:
- api.openai.com
User-Agent:
- AsyncOpenAI/Python 2.32.0
X-Stainless-Arch:
- arm64
X-Stainless-Async:
- async:asyncio
X-Stainless-Lang:
- python
X-Stainless-OS:
- MacOS
X-Stainless-Package-Version:
- 2.32.0
X-Stainless-Runtime:
- CPython
X-Stainless-Runtime-Version:
- 3.12.12
x-stainless-read-timeout:
- '600'
x-stainless-retry-count:
- '0'
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: "{\n \"id\": \"chatcmpl-DYGF4xklfNjHQ3dNZDbuSrGAFsYDu\",\n \"object\":
\"chat.completion\",\n \"created\": 1777057958,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"Hello there.\",\n \"refusal\":
null,\n \"annotations\": []\n },\n \"logprobs\": null,\n
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
26,\n \"completion_tokens\": 3,\n \"total_tokens\": 29,\n \"prompt_tokens_details\":
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_a7190374f3\"\n}\n"
headers:
CF-Cache-Status:
- DYNAMIC
CF-Ray:
- 9f176baf4e10ac31-YYZ
Connection:
- keep-alive
Content-Type:
- application/json
Date:
- Fri, 24 Apr 2026 19:12:39 GMT
Server:
- cloudflare
Strict-Transport-Security:
- max-age=31536000; includeSubDomains; preload
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
content-length:
- '818'
openai-organization:
- braintrust-data
openai-processing-ms:
- '242'
openai-project:
- proj_vsCSXafhhByzWOThMrJcZiw9
openai-version:
- '2020-10-01'
set-cookie:
- __cf_bm=Nc7FmCaFJfxm0G8e_ebd8Eba0ln5XCCF5rMUIYKGhm8-1777057958.2822428-1.0.1.1-dWCiuBiWLzZ_uGu88vNOw4PYjIEEuhEMzc3AoxClyqguU9ItaGD3mo.kpjSf9TKQwRzNOtwJJ9jsn6lNJiHYUMOn.P9rBhdiokBoqTvIe6iVhLEAv9XyQnBiEjn73kyE;
HttpOnly; Secure; Path=/; Domain=api.openai.com; Expires=Fri, 24 Apr 2026
19:42:39 GMT
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-requests:
- '30000'
x-ratelimit-limit-tokens:
- '150000000'
x-ratelimit-remaining-requests:
- '29999'
x-ratelimit-remaining-tokens:
- '149999980'
x-ratelimit-reset-requests:
- 2ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_16ebc0bb94f54d5d92879f3f653c4473
status:
code: 200
message: OK
- request:
body: '{"messages":[{"content":"You are concise. Answer directly.","role":"system"},{"role":"user","name":"user","content":"Say
hello in exactly two words."}],"model":"gpt-4o-mini","stream":false,"temperature":0}'
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '205'
Content-Type:
- application/json
Host:
- api.openai.com
User-Agent:
- AsyncOpenAI/Python 2.32.0
X-Stainless-Arch:
- arm64
X-Stainless-Async:
- async:asyncio
X-Stainless-Lang:
- python
X-Stainless-OS:
- MacOS
X-Stainless-Package-Version:
- 2.32.0
X-Stainless-Runtime:
- CPython
X-Stainless-Runtime-Version:
- 3.12.12
x-stainless-read-timeout:
- '600'
x-stainless-retry-count:
- '0'
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: "{\n \"id\": \"chatcmpl-DYGHRGb8gFqrGHLu01pO9PifTTNAT\",\n \"object\":
\"chat.completion\",\n \"created\": 1777058105,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"Hello there.\",\n \"refusal\":
null,\n \"annotations\": []\n },\n \"logprobs\": null,\n
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
26,\n \"completion_tokens\": 3,\n \"total_tokens\": 29,\n \"prompt_tokens_details\":
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_fa616991a3\"\n}\n"
headers:
CF-Cache-Status:
- DYNAMIC
CF-Ray:
- 9f176f43992c5d15-YYZ
Connection:
- keep-alive
Content-Type:
- application/json
Date:
- Fri, 24 Apr 2026 19:15:05 GMT
Server:
- cloudflare
Strict-Transport-Security:
- max-age=31536000; includeSubDomains; preload
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
content-length:
- '818'
openai-organization:
- braintrust-data
openai-processing-ms:
- '298'
openai-project:
- proj_vsCSXafhhByzWOThMrJcZiw9
openai-version:
- '2020-10-01'
set-cookie:
- __cf_bm=8tkx7xc7WnAjyJWBvTnV.2hAVD3Xd6z1iKcrqd22KKk-1777058104.8957334-1.0.1.1-htZq5VoU00hy9oOwPpLlgjXRvNiv06KVGIalt1HU8QrRn0TOHRU5NJvDT0KbgX8BVE_ZzME0Cx_jEhn4r1li3rtjYfRO2tzr0CsEuzBtFV9cdvvpeyGISz5l2Ht6uHIm;
HttpOnly; Secure; Path=/; Domain=api.openai.com; Expires=Fri, 24 Apr 2026
19:45:05 GMT
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-requests:
- '30000'
x-ratelimit-limit-tokens:
- '150000000'
x-ratelimit-remaining-requests:
- '29999'
x-ratelimit-remaining-tokens:
- '149999980'
x-ratelimit-reset-requests:
- 2ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_c6c492f11afb43fb97f328b69ad54803
status:
code: 200
message: OK
version: 1
Loading