Skip to content

Commit d2b7d5e

Browse files
feat(integration): add gen_ai.conversation.id if available (#5307)
### Description Closes https://linear.app/getsentry/issue/TET-1721/openai-agents-add-gen-aiconversationid --------- Co-authored-by: Alexander Alderman Webb <alexander.webb@sentry.io>
1 parent 11d68ee commit d2b7d5e

File tree

8 files changed

+243
-3
lines changed

8 files changed

+243
-3
lines changed

sentry_sdk/consts.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -465,6 +465,12 @@ class SPANDATA:
465465
Example: "ResearchAssistant"
466466
"""
467467

468+
GEN_AI_CONVERSATION_ID = "gen_ai.conversation.id"
469+
"""
470+
The unique identifier for the conversation/thread with the AI model.
471+
Example: "conv_abc123"
472+
"""
473+
468474
GEN_AI_CHOICE = "gen_ai.choice"
469475
"""
470476
The model's response message.

sentry_sdk/integrations/openai_agents/patches/models.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -125,7 +125,7 @@ async def wrapped_get_response(*args: "Any", **kwargs: "Any") -> "Any":
125125
delattr(agent, "_sentry_response_model")
126126

127127
_set_response_model_on_agent_span(agent, response_model)
128-
update_ai_client_span(span, result, response_model)
128+
update_ai_client_span(span, result, response_model, agent)
129129

130130
return result
131131

@@ -190,7 +190,9 @@ async def wrapped_stream_response(*args: "Any", **kwargs: "Any") -> "Any":
190190
else None
191191
)
192192
_set_response_model_on_agent_span(agent, response_model)
193-
update_ai_client_span(span, streaming_response)
193+
update_ai_client_span(
194+
span, streaming_response, response_model, agent
195+
)
194196

195197
model.stream_response = wrapped_stream_response
196198

sentry_sdk/integrations/openai_agents/patches/runner.py

Lines changed: 20 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22
from functools import wraps
33

44
import sentry_sdk
5+
from sentry_sdk.consts import SPANDATA
56
from sentry_sdk.integrations import DidNotEnable
67
from sentry_sdk.utils import capture_internal_exceptions, reraise
78

@@ -34,7 +35,16 @@ async def wrapper(*args: "Any", **kwargs: "Any") -> "Any":
3435
with sentry_sdk.isolation_scope():
3536
# Clone agent because agent invocation spans are attached per run.
3637
agent = args[0].clone()
37-
with agent_workflow_span(agent):
38+
39+
with agent_workflow_span(agent) as workflow_span:
40+
# Set conversation ID on workflow span early so it's captured even on errors
41+
conversation_id = kwargs.get("conversation_id")
42+
if conversation_id:
43+
agent._sentry_conversation_id = conversation_id
44+
workflow_span.set_data(
45+
SPANDATA.GEN_AI_CONVERSATION_ID, conversation_id
46+
)
47+
3848
args = (agent, *args[1:])
3949
try:
4050
run_result = await original_func(*args, **kwargs)
@@ -91,10 +101,19 @@ def wrapper(*args: "Any", **kwargs: "Any") -> "Any":
91101
# Clone agent because agent invocation spans are attached per run.
92102
agent = args[0].clone()
93103

104+
# Capture conversation_id from kwargs if provided
105+
conversation_id = kwargs.get("conversation_id")
106+
if conversation_id:
107+
agent._sentry_conversation_id = conversation_id
108+
94109
# Start workflow span immediately (before run_streamed returns)
95110
workflow_span = agent_workflow_span(agent)
96111
workflow_span.__enter__()
97112

113+
# Set conversation ID on workflow span early so it's captured even on errors
114+
if conversation_id:
115+
workflow_span.set_data(SPANDATA.GEN_AI_CONVERSATION_ID, conversation_id)
116+
98117
# Store span on agent for cleanup
99118
agent._sentry_workflow_span = workflow_span
100119

sentry_sdk/integrations/openai_agents/spans/ai_client.py

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -46,6 +46,7 @@ def update_ai_client_span(
4646
span: "sentry_sdk.tracing.Span",
4747
response: "Any",
4848
response_model: "Optional[str]" = None,
49+
agent: "Optional[Agent]" = None,
4950
) -> None:
5051
"""Update AI client span with response data (works for streaming and non-streaming)."""
5152
if hasattr(response, "usage") and response.usage:
@@ -59,3 +60,9 @@ def update_ai_client_span(
5960
span.set_data(SPANDATA.GEN_AI_RESPONSE_MODEL, response_model)
6061
elif hasattr(response, "model") and response.model:
6162
span.set_data(SPANDATA.GEN_AI_RESPONSE_MODEL, str(response.model))
63+
64+
# Set conversation ID from agent if available
65+
if agent:
66+
conv_id = getattr(agent, "_sentry_conversation_id", None)
67+
if conv_id:
68+
span.set_data(SPANDATA.GEN_AI_CONVERSATION_ID, conv_id)

sentry_sdk/integrations/openai_agents/spans/execute_tool.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -51,3 +51,8 @@ def update_execute_tool_span(
5151

5252
if should_send_default_pii():
5353
span.set_data(SPANDATA.GEN_AI_TOOL_OUTPUT, result)
54+
55+
# Add conversation ID from agent
56+
conv_id = getattr(agent, "_sentry_conversation_id", None)
57+
if conv_id:
58+
span.set_data(SPANDATA.GEN_AI_CONVERSATION_ID, conv_id)

sentry_sdk/integrations/openai_agents/spans/handoff.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,3 +18,8 @@ def handoff_span(
1818
origin=SPAN_ORIGIN,
1919
) as span:
2020
span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "handoff")
21+
22+
# Add conversation ID from agent
23+
conv_id = getattr(from_agent, "_sentry_conversation_id", None)
24+
if conv_id:
25+
span.set_data(SPANDATA.GEN_AI_CONVERSATION_ID, conv_id)

sentry_sdk/integrations/openai_agents/spans/invoke_agent.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -95,6 +95,11 @@ def update_invoke_agent_span(
9595
span, SPANDATA.GEN_AI_RESPONSE_TEXT, output, unpack=False
9696
)
9797

98+
# Add conversation ID from agent
99+
conv_id = getattr(agent, "_sentry_conversation_id", None)
100+
if conv_id:
101+
span.set_data(SPANDATA.GEN_AI_CONVERSATION_ID, conv_id)
102+
98103
span.__exit__(None, None, None)
99104
delattr(context, "_sentry_agent_span")
100105

tests/integrations/openai_agents/test_openai_agents.py

Lines changed: 191 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2857,3 +2857,194 @@ def mock_get_model(agent, run_config):
28572857

28582858
# Verify streaming flag is set
28592859
assert chat_span._data.get(SPANDATA.GEN_AI_RESPONSE_STREAMING) is True
2860+
2861+
2862+
@pytest.mark.skipif(
2863+
parse_version(OPENAI_AGENTS_VERSION) < (0, 4, 0),
2864+
reason="conversation_id support requires openai-agents >= 0.4.0",
2865+
)
2866+
@pytest.mark.asyncio
2867+
async def test_conversation_id_on_all_spans(
2868+
sentry_init, capture_events, test_agent, mock_model_response
2869+
):
2870+
"""
2871+
Test that gen_ai.conversation.id is set on all AI-related spans when passed to Runner.run().
2872+
"""
2873+
2874+
with patch.dict(os.environ, {"OPENAI_API_KEY": "test-key"}):
2875+
with patch(
2876+
"agents.models.openai_responses.OpenAIResponsesModel.get_response"
2877+
) as mock_get_response:
2878+
mock_get_response.return_value = mock_model_response
2879+
2880+
sentry_init(
2881+
integrations=[OpenAIAgentsIntegration()],
2882+
traces_sample_rate=1.0,
2883+
)
2884+
2885+
events = capture_events()
2886+
2887+
result = await agents.Runner.run(
2888+
test_agent,
2889+
"Test input",
2890+
run_config=test_run_config,
2891+
conversation_id="conv_test_123",
2892+
)
2893+
2894+
assert result is not None
2895+
2896+
(transaction,) = events
2897+
spans = transaction["spans"]
2898+
invoke_agent_span, ai_client_span = spans
2899+
2900+
# Verify workflow span (transaction) has conversation_id
2901+
assert (
2902+
transaction["contexts"]["trace"]["data"]["gen_ai.conversation.id"]
2903+
== "conv_test_123"
2904+
)
2905+
2906+
# Verify invoke_agent span has conversation_id
2907+
assert invoke_agent_span["data"]["gen_ai.conversation.id"] == "conv_test_123"
2908+
2909+
# Verify ai_client span has conversation_id
2910+
assert ai_client_span["data"]["gen_ai.conversation.id"] == "conv_test_123"
2911+
2912+
2913+
@pytest.mark.skipif(
2914+
parse_version(OPENAI_AGENTS_VERSION) < (0, 4, 0),
2915+
reason="conversation_id support requires openai-agents >= 0.4.0",
2916+
)
2917+
@pytest.mark.asyncio
2918+
async def test_conversation_id_on_tool_span(sentry_init, capture_events, test_agent):
2919+
"""
2920+
Test that gen_ai.conversation.id is set on tool execution spans when passed to Runner.run().
2921+
"""
2922+
2923+
@agents.function_tool
2924+
def simple_tool(message: str) -> str:
2925+
"""A simple tool"""
2926+
return f"Result: {message}"
2927+
2928+
agent_with_tool = test_agent.clone(tools=[simple_tool])
2929+
2930+
with patch.dict(os.environ, {"OPENAI_API_KEY": "test-key"}):
2931+
with patch(
2932+
"agents.models.openai_responses.OpenAIResponsesModel.get_response"
2933+
) as mock_get_response:
2934+
tool_call = ResponseFunctionToolCall(
2935+
id="call_123",
2936+
call_id="call_123",
2937+
name="simple_tool",
2938+
type="function_call",
2939+
arguments='{"message": "hello"}',
2940+
)
2941+
2942+
tool_response = ModelResponse(
2943+
output=[tool_call],
2944+
usage=Usage(
2945+
requests=1, input_tokens=10, output_tokens=5, total_tokens=15
2946+
),
2947+
response_id="resp_tool_456",
2948+
)
2949+
2950+
final_response = ModelResponse(
2951+
output=[
2952+
ResponseOutputMessage(
2953+
id="msg_final",
2954+
type="message",
2955+
status="completed",
2956+
content=[
2957+
ResponseOutputText(
2958+
text="Done",
2959+
type="output_text",
2960+
annotations=[],
2961+
)
2962+
],
2963+
role="assistant",
2964+
)
2965+
],
2966+
usage=Usage(
2967+
requests=1, input_tokens=15, output_tokens=10, total_tokens=25
2968+
),
2969+
response_id="resp_final_789",
2970+
)
2971+
2972+
mock_get_response.side_effect = [tool_response, final_response]
2973+
2974+
sentry_init(
2975+
integrations=[OpenAIAgentsIntegration()],
2976+
traces_sample_rate=1.0,
2977+
)
2978+
2979+
events = capture_events()
2980+
2981+
await agents.Runner.run(
2982+
agent_with_tool,
2983+
"Use the tool",
2984+
run_config=test_run_config,
2985+
conversation_id="conv_tool_test_456",
2986+
)
2987+
2988+
(transaction,) = events
2989+
spans = transaction["spans"]
2990+
2991+
# Find the tool span
2992+
tool_span = None
2993+
for span in spans:
2994+
if span.get("description", "").startswith("execute_tool"):
2995+
tool_span = span
2996+
break
2997+
2998+
assert tool_span is not None
2999+
# Tool span should have the conversation_id passed to Runner.run()
3000+
assert tool_span["data"]["gen_ai.conversation.id"] == "conv_tool_test_456"
3001+
3002+
# Workflow span (transaction) should have the same conversation_id
3003+
assert (
3004+
transaction["contexts"]["trace"]["data"]["gen_ai.conversation.id"]
3005+
== "conv_tool_test_456"
3006+
)
3007+
3008+
3009+
@pytest.mark.skipif(
3010+
parse_version(OPENAI_AGENTS_VERSION) < (0, 4, 0),
3011+
reason="conversation_id support requires openai-agents >= 0.4.0",
3012+
)
3013+
@pytest.mark.asyncio
3014+
async def test_no_conversation_id_when_not_provided(
3015+
sentry_init, capture_events, test_agent, mock_model_response
3016+
):
3017+
"""
3018+
Test that gen_ai.conversation.id is not set when not passed to Runner.run().
3019+
"""
3020+
3021+
with patch.dict(os.environ, {"OPENAI_API_KEY": "test-key"}):
3022+
with patch(
3023+
"agents.models.openai_responses.OpenAIResponsesModel.get_response"
3024+
) as mock_get_response:
3025+
mock_get_response.return_value = mock_model_response
3026+
3027+
sentry_init(
3028+
integrations=[OpenAIAgentsIntegration()],
3029+
traces_sample_rate=1.0,
3030+
)
3031+
3032+
events = capture_events()
3033+
3034+
# Don't pass conversation_id
3035+
result = await agents.Runner.run(
3036+
test_agent, "Test input", run_config=test_run_config
3037+
)
3038+
3039+
assert result is not None
3040+
3041+
(transaction,) = events
3042+
spans = transaction["spans"]
3043+
invoke_agent_span, ai_client_span = spans
3044+
3045+
# Verify conversation_id is NOT set on any spans
3046+
assert "gen_ai.conversation.id" not in transaction["contexts"]["trace"].get(
3047+
"data", {}
3048+
)
3049+
assert "gen_ai.conversation.id" not in invoke_agent_span.get("data", {})
3050+
assert "gen_ai.conversation.id" not in ai_client_span.get("data", {})

0 commit comments

Comments
 (0)