@@ -503,24 +503,59 @@ def _downgrade_tool_calls_to_text(self, canonical_request: Any) -> Any:
503503
504504 downgraded : list [ChatMessage ] = []
505505
506+ # Track which tool calls are safe to keep for signature-required backends.
507+ # Any tool call without a thought_signature must be removed, and its tool
508+ # result messages must be downgraded to plain text (otherwise Gemini may
509+ # see orphaned functionResponse parts).
510+ kept_tool_call_ids : set [str ] = set ()
511+
512+ for raw in messages :
513+ role = (
514+ raw .get ("role" ) if isinstance (raw , dict ) else getattr (raw , "role" , None )
515+ )
516+ tool_calls = (
517+ raw .get ("tool_calls" )
518+ if isinstance (raw , dict )
519+ else getattr (raw , "tool_calls" , None )
520+ )
521+ if role != "assistant" or not isinstance (tool_calls , list ):
522+ continue
523+ for tc in tool_calls :
524+ if not self ._extract_thought_signature (tc ):
525+ continue
526+ tc_id = (
527+ tc .get ("id" ) if isinstance (tc , dict ) else getattr (tc , "id" , None )
528+ )
529+ if isinstance (tc_id , str ) and tc_id :
530+ kept_tool_call_ids .add (tc_id )
531+
506532 # Avoid exploding prompt size when tool signature recovery is impossible.
507533 # This path is a best-effort salvage mode, typically triggered after a proxy
508534 # restart or when a client does not preserve thought signatures.
509535 max_tool_result_chars = 2000
510536 max_converted_tool_messages = 50
511537
512- # Keep only the most recent tool result messages.
513- tool_message_count = 0
538+ # Keep only the most recent tool result messages that we need to downgrade .
539+ convertible_tool_message_count = 0
514540 for raw in messages :
515541 role = (
516542 raw .get ("role" ) if isinstance (raw , dict ) else getattr (raw , "role" , None )
517543 )
518- if role == "tool" :
519- tool_message_count += 1
520- tool_message_skip_before = max (
521- 0 , tool_message_count - max_converted_tool_messages
544+ if role != "tool" :
545+ continue
546+ tool_call_id = (
547+ raw .get ("tool_call_id" )
548+ if isinstance (raw , dict )
549+ else getattr (raw , "tool_call_id" , None )
550+ )
551+ if isinstance (tool_call_id , str ) and tool_call_id in kept_tool_call_ids :
552+ continue
553+ convertible_tool_message_count += 1
554+
555+ convertible_tool_message_skip_before = max (
556+ 0 , convertible_tool_message_count - max_converted_tool_messages
522557 )
523- tool_message_seen = 0
558+ convertible_tool_message_seen = 0
524559
525560 for msg in messages :
526561 if isinstance (msg , dict ):
@@ -535,23 +570,71 @@ def _downgrade_tool_calls_to_text(self, canonical_request: Any) -> Any:
535570 continue
536571
537572 if msg .role == "assistant" and msg .tool_calls :
573+ kept_tool_calls : list [Any ] = []
574+ for tc in msg .tool_calls :
575+ sig = self ._extract_thought_signature (tc )
576+ if sig :
577+ kept_tool_calls .append (tc )
578+ tc_id = (
579+ tc .get ("id" )
580+ if isinstance (tc , dict )
581+ else getattr (tc , "id" , None )
582+ )
583+ if isinstance (tc_id , str ) and tc_id :
584+ kept_tool_call_ids .add (tc_id )
585+
538586 # IMPORTANT: Do not append any "downgrade" transcript text.
539587 # That text becomes part of the prompt and can easily cause the model
540588 # to repeat it, creating visible loops for clients.
541- downgraded .append (
542- ChatMessage (
543- role = "assistant" ,
544- content = msg .content ,
545- reasoning_content = msg .reasoning_content ,
546- name = msg .name ,
589+ if kept_tool_calls :
590+ # Preserve any descriptive content in a separate message.
591+ if msg .content :
592+ downgraded .append (
593+ ChatMessage (
594+ role = "assistant" ,
595+ content = msg .content ,
596+ name = msg .name ,
597+ )
598+ )
599+
600+ downgraded .append (
601+ ChatMessage (
602+ role = "assistant" ,
603+ content = None ,
604+ tool_calls = kept_tool_calls ,
605+ reasoning_content = msg .reasoning_content ,
606+ name = msg .name ,
607+ )
608+ )
609+ else :
610+ # No tool calls can be kept; keep the text content.
611+ downgraded .append (
612+ ChatMessage (
613+ role = "assistant" ,
614+ content = msg .content ,
615+ reasoning_content = msg .reasoning_content ,
616+ name = msg .name ,
617+ )
547618 )
548- )
549619 continue
550620
551621 if msg .role == "tool" :
552- tool_message_seen += 1
553- if tool_message_seen <= tool_message_skip_before :
622+ tool_call_id = msg .tool_call_id
623+ if (
624+ isinstance (tool_call_id , str )
625+ and tool_call_id
626+ and tool_call_id in kept_tool_call_ids
627+ ):
628+ downgraded .append (msg )
554629 continue
630+
631+ convertible_tool_message_seen += 1
632+ if (
633+ convertible_tool_message_seen
634+ <= convertible_tool_message_skip_before
635+ ):
636+ continue
637+
555638 tool_text = extract_prompt_text ([msg ])
556639 if tool_text .startswith ("tool:" ):
557640 tool_text = tool_text [len ("tool:" ) :].lstrip ()
0 commit comments