Skip to content

Commit 41ef9af

Browse files
authored
Merge branch 'main' into taegyunkim/prof-12434-uwsgi
2 parents 42e5f51 + 8e61161 commit 41ef9af

17 files changed

+316
-160
lines changed

ddtrace/_trace/_span_pointer.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,8 @@ def __post_init__(self):
6464
def __repr__(self):
6565
return (
6666
f"SpanPointer(trace_id={self.trace_id}, span_id={self.span_id}, kind={self.kind}, "
67-
f"direction={self.direction}, hash={self.hash}, attributes={self.attributes})"
67+
f"direction={self.attributes.get('ptr.dir')}, hash={self.attributes.get('ptr.hash')}, "
68+
f"attributes={self.attributes})"
6869
)
6970

7071

ddtrace/appsec/_ai_guard/_langchain.py

Lines changed: 45 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@
1313
from ddtrace.contrib.internal.trace_utils import unwrap
1414
from ddtrace.contrib.internal.trace_utils import wrap
1515
import ddtrace.internal.logger as ddlogger
16+
from ddtrace.internal.utils import get_argument_value
1617

1718

1819
logger = ddlogger.get_logger(__name__)
@@ -164,28 +165,57 @@ def _handle_agent_action_result(client: AIGuardClient, result, kwargs):
164165

165166

166167
def _langchain_chatmodel_generate_before(client: AIGuardClient, message_lists):
167-
from langchain_core.messages import HumanMessage
168-
169168
for messages in message_lists:
170-
# only call evaluator when the last message is an actual user prompt
171-
if len(messages) > 0 and isinstance(messages[-1], HumanMessage):
172-
history = _convert_messages(messages)
173-
prompt = history.pop(-1)
174-
try:
175-
if not client.evaluate_prompt(prompt["role"], prompt["content"], history=history): # type: ignore[typeddict-item]
176-
return AIGuardAbortError()
177-
except AIGuardAbortError as e:
178-
return e
179-
except Exception:
180-
logger.debug("Failed to evaluate chat model prompt", exc_info=True)
169+
result = _evaluate_langchain_messages(client, messages)
170+
if result:
171+
return result
172+
return None
181173

182174

183175
def _langchain_llm_generate_before(client: AIGuardClient, prompts):
184176
for prompt in prompts:
177+
result = _evaluate_langchain_prompt(client, prompt)
178+
if result:
179+
return result
180+
return None
181+
182+
183+
def _langchain_chatmodel_stream_before(client: AIGuardClient, instance, args, kwargs):
184+
input_arg = get_argument_value(args, kwargs, 0, "input")
185+
messages = instance._convert_input(input_arg).to_messages()
186+
return _evaluate_langchain_messages(client, messages)
187+
188+
189+
def _langchain_llm_stream_before(client: AIGuardClient, instance, args, kwargs):
190+
input_arg = get_argument_value(args, kwargs, 0, "input")
191+
prompt = instance._convert_input(input_arg).to_string()
192+
return _evaluate_langchain_prompt(client, prompt)
193+
194+
195+
def _evaluate_langchain_messages(client: AIGuardClient, messages):
196+
from langchain_core.messages import HumanMessage
197+
198+
# only call evaluator when the last message is an actual user prompt
199+
if len(messages) > 0 and isinstance(messages[-1], HumanMessage):
200+
history = _convert_messages(messages)
201+
prompt = history.pop(-1)
185202
try:
186-
if not client.evaluate_prompt("user", prompt):
203+
role, content = (prompt["role"], prompt["content"]) # type: ignore[typeddict-item]
204+
if not client.evaluate_prompt(role, content, history=history):
187205
return AIGuardAbortError()
188206
except AIGuardAbortError as e:
189207
return e
190208
except Exception:
191-
logger.debug("Failed to evaluate llm prompt", exc_info=True)
209+
logger.debug("Failed to evaluate chat model prompt", exc_info=True)
210+
return None
211+
212+
213+
def _evaluate_langchain_prompt(client: AIGuardClient, prompt):
214+
try:
215+
if not client.evaluate_prompt("user", prompt):
216+
return AIGuardAbortError()
217+
except AIGuardAbortError as e:
218+
return e
219+
except Exception:
220+
logger.debug("Failed to evaluate llm prompt", exc_info=True)
221+
return None

ddtrace/appsec/_ai_guard/_listener.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,9 @@
11
from functools import partial
22

33
from ddtrace.appsec._ai_guard._langchain import _langchain_chatmodel_generate_before
4+
from ddtrace.appsec._ai_guard._langchain import _langchain_chatmodel_stream_before
45
from ddtrace.appsec._ai_guard._langchain import _langchain_llm_generate_before
6+
from ddtrace.appsec._ai_guard._langchain import _langchain_llm_stream_before
57
from ddtrace.appsec._ai_guard._langchain import _langchain_patch
68
from ddtrace.appsec._ai_guard._langchain import _langchain_unpatch
79
from ddtrace.appsec.ai_guard import AIGuardClient
@@ -20,6 +22,8 @@ def _langchain_listen(client: AIGuardClient):
2022

2123
core.on("langchain.chatmodel.generate.before", partial(_langchain_chatmodel_generate_before, client))
2224
core.on("langchain.chatmodel.agenerate.before", partial(_langchain_chatmodel_generate_before, client))
25+
core.on("langchain.chatmodel.stream.before", partial(_langchain_chatmodel_stream_before, client))
2326

2427
core.on("langchain.llm.generate.before", partial(_langchain_llm_generate_before, client))
2528
core.on("langchain.llm.agenerate.before", partial(_langchain_llm_generate_before, client))
29+
core.on("langchain.llm.stream.before", partial(_langchain_llm_stream_before, client))

ddtrace/appsec/_common_module_patches.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -222,7 +222,7 @@ def wrapped_open_ED4CF71136E15EBF(original_open_callable, instance, args, kwargs
222222
# api10 response handler for regular reponses
223223
if response.__class__.__name__ == "HTTPResponse":
224224
addresses = {
225-
"DOWN_RES_STATUS": response.status,
225+
"DOWN_RES_STATUS": str(response.status),
226226
"DOWN_RES_HEADERS": _build_headers(response.getheaders()),
227227
}
228228
if use_body:
@@ -242,7 +242,7 @@ def wrapped_open_ED4CF71136E15EBF(original_open_callable, instance, args, kwargs
242242
response_headers = None
243243
if status_code is not None or response_headers is not None:
244244
call_waf_callback(
245-
{"DOWN_RES_STATUS": status_code, "DOWN_RES_HEADERS": response_headers},
245+
{"DOWN_RES_STATUS": str(status_code), "DOWN_RES_HEADERS": response_headers},
246246
rule_type=EXPLOIT_PREVENTION.TYPE.SSRF,
247247
)
248248
raise

ddtrace/contrib/internal/langchain/patch.py

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -412,6 +412,8 @@ def traced_chat_stream(langchain, pin, func, instance, args, kwargs):
412412
llm_provider = instance._llm_type
413413
model = _extract_model_name(instance)
414414

415+
_raising_dispatch("langchain.chatmodel.stream.before", (instance, args, kwargs))
416+
415417
def _on_span_started(span: Span):
416418
integration.record_instance(instance, span)
417419

@@ -443,6 +445,15 @@ def traced_llm_stream(langchain, pin, func, instance, args, kwargs):
443445
llm_provider = instance._llm_type
444446
model = _extract_model_name(instance)
445447

448+
_raising_dispatch(
449+
"langchain.llm.stream.before",
450+
(
451+
instance,
452+
args,
453+
kwargs,
454+
),
455+
)
456+
446457
def _on_span_start(span: Span):
447458
integration.record_instance(instance, span)
448459

ddtrace/llmobs/_integrations/google_genai.py

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -3,12 +3,6 @@
33
from typing import List
44
from typing import Optional
55

6-
7-
try:
8-
from google.genai.types import FunctionDeclaration
9-
except ImportError:
10-
FunctionDeclaration = None
11-
126
from ddtrace._trace.span import Span
137
from ddtrace.llmobs._constants import INPUT_DOCUMENTS
148
from ddtrace.llmobs._constants import INPUT_MESSAGES
@@ -187,6 +181,10 @@ def _function_declaration_to_tool_definition(self, function_declaration) -> Tool
187181
)
188182

189183
def _extract_tools(self, config) -> List[ToolDefinition]:
184+
try:
185+
from google.genai.types import FunctionDeclaration
186+
except ImportError:
187+
FunctionDeclaration = None
190188
tool_definitions = []
191189
tools = _get_attr(config, "tools", []) or []
192190
for tool in tools:

ddtrace/llmobs/_integrations/openai_agents.py

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,7 @@
3737
from ddtrace.llmobs._utils import _get_nearest_llmobs_ancestor
3838
from ddtrace.llmobs._utils import _get_span_name
3939
from ddtrace.llmobs._utils import load_data_value
40+
from ddtrace.llmobs._utils import safe_json
4041
from ddtrace.trace import Span
4142

4243

@@ -232,13 +233,13 @@ def _llmobs_set_response_attributes(self, span: Span, oai_span: OaiSpanAdapter)
232233
if oai_span.response and oai_span.response.output:
233234
messages, tool_call_outputs = oai_span.llmobs_output_messages()
234235

235-
for tool_id, tool_name, tool_args in tool_call_outputs:
236+
for tool_call_output in tool_call_outputs:
236237
core.dispatch(
237238
DISPATCH_ON_LLM_TOOL_CHOICE,
238239
(
239-
tool_id,
240-
tool_name,
241-
tool_args,
240+
tool_call_output["tool_id"],
241+
tool_call_output["name"],
242+
safe_json(tool_call_output["arguments"]),
242243
{
243244
"trace_id": format_trace_id(span.trace_id),
244245
"span_id": str(span.span_id),

0 commit comments

Comments
 (0)