|
5 | 5 |
|
6 | 6 | from langchain_core.callbacks import ( |
7 | 7 | BaseCallbackHandler, |
| 8 | + CallbackManager, |
| 9 | + AsyncCallbackManager, |
8 | 10 | ) |
9 | 11 | from langchain_core.messages import ( |
10 | 12 | AIMessage, |
@@ -85,19 +87,6 @@ def _extract_class_name_from_serialized(serialized: Optional[dict[str, Any]]) -> |
85 | 87 | return "" |
86 | 88 |
|
87 | 89 |
|
88 | | -def _message_type_to_role(message_type: str) -> str: |
89 | | - if message_type == "human": |
90 | | - return "user" |
91 | | - elif message_type == "system": |
92 | | - return "system" |
93 | | - elif message_type == "ai": |
94 | | - return "assistant" |
95 | | - elif message_type == "tool": |
96 | | - return "tool" |
97 | | - else: |
98 | | - return "unknown" |
99 | | - |
100 | | - |
101 | 90 | def _sanitize_metadata_value(value: Any) -> Any: |
102 | 91 | """Convert metadata values to OpenTelemetry-compatible types.""" |
103 | 92 | if value is None: |
@@ -163,6 +152,7 @@ def __init__( |
163 | 152 | self.token_histogram = token_histogram |
164 | 153 | self.spans: dict[UUID, SpanHolder] = {} |
165 | 154 | self.run_inline = True |
| 155 | + self._callback_manager: CallbackManager | AsyncCallbackManager = None |
166 | 156 |
|
167 | 157 | @staticmethod |
168 | 158 | def _get_name_from_callback( |
@@ -192,6 +182,9 @@ def _end_span(self, span: Span, run_id: UUID) -> None: |
192 | 182 | if child_span.end_time is None: # avoid warning on ended spans |
193 | 183 | child_span.end() |
194 | 184 | span.end() |
| 185 | + token = self.spans[run_id].token |
| 186 | + if token: |
| 187 | + context_api.detach(token) |
195 | 188 |
|
196 | 189 | def _create_span( |
197 | 190 | self, |
@@ -230,13 +223,17 @@ def _create_span( |
230 | 223 | else: |
231 | 224 | span = self.tracer.start_span(span_name, kind=kind) |
232 | 225 |
|
| 226 | + token = None |
| 227 | + # TODO: make this unconditional once attach/detach works properly with async callbacks. |
| 228 | + # Currently, it doesn't work due to this - https://github.com/langchain-ai/langchain/issues/31398 |
| 229 | + # As a sidenote, OTel Python users also report similar issues - |
| 230 | + # https://github.com/open-telemetry/opentelemetry-python/issues/2606 |
| 231 | + if self._callback_manager and not self._callback_manager.is_async: |
| 232 | + token = context_api.attach(set_span_in_context(span)) |
| 233 | + |
233 | 234 | _set_span_attribute(span, SpanAttributes.TRACELOOP_WORKFLOW_NAME, workflow_name) |
234 | 235 | _set_span_attribute(span, SpanAttributes.TRACELOOP_ENTITY_PATH, entity_path) |
235 | 236 |
|
236 | | - token = context_api.attach( |
237 | | - context_api.set_value(SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY, True) |
238 | | - ) |
239 | | - |
240 | 237 | self.spans[run_id] = SpanHolder( |
241 | 238 | span, token, None, [], workflow_name, entity_name, entity_path |
242 | 239 | ) |
@@ -300,6 +297,16 @@ def _create_llm_span( |
300 | 297 | _set_span_attribute(span, SpanAttributes.LLM_SYSTEM, vendor) |
301 | 298 | _set_span_attribute(span, SpanAttributes.LLM_REQUEST_TYPE, request_type.value) |
302 | 299 |
|
| 300 | + # we already have an LLM span by this point, |
| 301 | + # so skip any downstream instrumentation from here |
| 302 | + token = context_api.attach( |
| 303 | + context_api.set_value(SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY, True) |
| 304 | + ) |
| 305 | + |
| 306 | + self.spans[run_id] = SpanHolder( |
| 307 | + span, token, None, [], workflow_name, None, entity_path |
| 308 | + ) |
| 309 | + |
303 | 310 | return span |
304 | 311 |
|
305 | 312 | @dont_throw |
@@ -464,7 +471,7 @@ def on_llm_end( |
464 | 471 | "model_name" |
465 | 472 | ) or response.llm_output.get("model_id") |
466 | 473 | if model_name is not None: |
467 | | - _set_span_attribute(span, SpanAttributes.LLM_RESPONSE_MODEL, model_name) |
| 474 | + _set_span_attribute(span, SpanAttributes.LLM_RESPONSE_MODEL, model_name or "unknown") |
468 | 475 |
|
469 | 476 | if self.spans[run_id].request_model is None: |
470 | 477 | _set_span_attribute( |
|
0 commit comments