From 92725704cf1321cabab6397cf8ea06a27dd79c3a Mon Sep 17 00:00:00 2001 From: karthik Date: Tue, 20 Jan 2026 12:19:44 -0500 Subject: [PATCH 1/3] fix(vertexai): add error logging to instrumented functions Adds error handling to the sync and async wrappers in vertexai instrumentation to properly log errors via OTel spans when API calls fail. Changes: - Set ERROR_TYPE attribute with exception class name - Record exception on span - Set span status to ERROR with message Part of #412 Co-Authored-By: Claude Opus 4.5 --- .../instrumentation/vertexai/__init__.py | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/packages/opentelemetry-instrumentation-vertexai/opentelemetry/instrumentation/vertexai/__init__.py b/packages/opentelemetry-instrumentation-vertexai/opentelemetry/instrumentation/vertexai/__init__.py index 5d96fad8b3..dc51cf12d4 100644 --- a/packages/opentelemetry-instrumentation-vertexai/opentelemetry/instrumentation/vertexai/__init__.py +++ b/packages/opentelemetry-instrumentation-vertexai/opentelemetry/instrumentation/vertexai/__init__.py @@ -32,6 +32,7 @@ ) from opentelemetry.trace import SpanKind, get_tracer from opentelemetry.trace.status import Status, StatusCode +from opentelemetry.semconv.attributes.error_attributes import ERROR_TYPE from wrapt import wrap_function_wrapper logger = logging.getLogger(__name__) @@ -245,7 +246,14 @@ async def _awrap(tracer, event_logger, to_wrap, wrapped, instance, args, kwargs) await _handle_request(span, event_logger, args, kwargs, llm_model) - response = await wrapped(*args, **kwargs) + try: + response = await wrapped(*args, **kwargs) + except Exception as e: + span.set_attribute(ERROR_TYPE, e.__class__.__name__) + span.record_exception(e) + span.set_status(Status(StatusCode.ERROR, str(e))) + span.end() + raise if response: if is_streaming_response(response): @@ -299,7 +307,14 @@ def _wrap(tracer, event_logger, to_wrap, wrapped, instance, args, kwargs): else: set_input_attributes_sync(span, args) - response = wrapped(*args, **kwargs) + try: + response = wrapped(*args, **kwargs) + except Exception as e: + span.set_attribute(ERROR_TYPE, e.__class__.__name__) + span.record_exception(e) + span.set_status(Status(StatusCode.ERROR, str(e))) + span.end() + raise if response: if is_streaming_response(response): From c8de468d2c6df521abc6d50fb67cb922acb4eb73 Mon Sep 17 00:00:00 2001 From: karthik Date: Tue, 20 Jan 2026 13:14:10 -0500 Subject: [PATCH 2/3] fix(vertexai): add error handling to streaming response handlers Addresses CodeRabbit review feedback - the streaming iteration loops also need try/except to capture errors that occur during streaming. Co-Authored-By: Claude Opus 4.5 --- .../instrumentation/vertexai/__init__.py | 38 +++++++++++++------ 1 file changed, 26 insertions(+), 12 deletions(-) diff --git a/packages/opentelemetry-instrumentation-vertexai/opentelemetry/instrumentation/vertexai/__init__.py b/packages/opentelemetry-instrumentation-vertexai/opentelemetry/instrumentation/vertexai/__init__.py index dc51cf12d4..98ccffd3c1 100644 --- a/packages/opentelemetry-instrumentation-vertexai/opentelemetry/instrumentation/vertexai/__init__.py +++ b/packages/opentelemetry-instrumentation-vertexai/opentelemetry/instrumentation/vertexai/__init__.py @@ -149,13 +149,20 @@ def handle_streaming_response(span, event_logger, llm_model, response, token_usa def _build_from_streaming_response(span, event_logger, response, llm_model): complete_response = "" token_usage = None - for item in response: - item_to_yield = item - complete_response += str(item.text) - if item.usage_metadata: - token_usage = item.usage_metadata + try: + for item in response: + item_to_yield = item + complete_response += str(item.text) + if item.usage_metadata: + token_usage = item.usage_metadata - yield item_to_yield + yield item_to_yield + except Exception as e: + span.set_attribute(ERROR_TYPE, e.__class__.__name__) + span.record_exception(e) + span.set_status(Status(StatusCode.ERROR, str(e))) + span.end() + raise handle_streaming_response( span, event_logger, llm_model, complete_response, token_usage @@ -168,13 +175,20 @@ def _build_from_streaming_response(span, event_logger, response, llm_model): async def _abuild_from_streaming_response(span, event_logger, response, llm_model): complete_response = "" token_usage = None - async for item in response: - item_to_yield = item - complete_response += str(item.text) - if item.usage_metadata: - token_usage = item.usage_metadata + try: + async for item in response: + item_to_yield = item + complete_response += str(item.text) + if item.usage_metadata: + token_usage = item.usage_metadata - yield item_to_yield + yield item_to_yield + except Exception as e: + span.set_attribute(ERROR_TYPE, e.__class__.__name__) + span.record_exception(e) + span.set_status(Status(StatusCode.ERROR, str(e))) + span.end() + raise handle_streaming_response(span, event_logger, llm_model, response, token_usage) From 79c01bcd9693b672fb5105f5dd45c5b4168cc5b5 Mon Sep 17 00:00:00 2001 From: karthik Date: Tue, 20 Jan 2026 13:33:54 -0500 Subject: [PATCH 3/3] fix(vertexai): pass complete_response instead of generator to handler Fixes bug in async streaming handler where `response` (generator) was passed instead of `complete_response` (accumulated string). Co-Authored-By: Claude Opus 4.5 --- .../opentelemetry/instrumentation/vertexai/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/opentelemetry-instrumentation-vertexai/opentelemetry/instrumentation/vertexai/__init__.py b/packages/opentelemetry-instrumentation-vertexai/opentelemetry/instrumentation/vertexai/__init__.py index 98ccffd3c1..1aec6fa85c 100644 --- a/packages/opentelemetry-instrumentation-vertexai/opentelemetry/instrumentation/vertexai/__init__.py +++ b/packages/opentelemetry-instrumentation-vertexai/opentelemetry/instrumentation/vertexai/__init__.py @@ -190,7 +190,7 @@ async def _abuild_from_streaming_response(span, event_logger, response, llm_mode span.end() raise - handle_streaming_response(span, event_logger, llm_model, response, token_usage) + handle_streaming_response(span, event_logger, llm_model, complete_response, token_usage) span.set_status(Status(StatusCode.OK)) span.end()