-
Notifications
You must be signed in to change notification settings - Fork 136
Add dynamic router for manual model selection #504
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
201ecb3
536d6d7
3bcee98
4d94f82
ebf954a
16792ed
3872337
dad5ac4
50a5d98
a8843b3
5103237
12acf40
12229e2
61ecb3b
d700240
74c4241
388d1a5
6ea9ec3
19e2cd7
a3d74c9
70d1ba7
5c8a090
49b72aa
95b9226
b594948
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,158 @@ | ||
| import os | ||
| import uuid | ||
|
|
||
| from pydantic import SecretStr | ||
|
|
||
| from openhands.sdk import ( | ||
| LLM, | ||
| Agent, | ||
| Conversation, | ||
| Event, | ||
| LLMConvertibleEvent, | ||
| Message, | ||
| TextContent, | ||
| get_logger, | ||
| ) | ||
| from openhands.sdk.llm.router.impl.dynamic import DynamicRouter | ||
| from openhands.tools.preset.default import get_default_tools | ||
|
|
||
|
|
||
| logger = get_logger(__name__) | ||
|
|
||
| # Configure initial LLM | ||
| api_key = os.getenv("LLM_API_KEY") | ||
| assert api_key is not None, "LLM_API_KEY environment variable is not set." | ||
|
|
||
| # Create DynamicRouter with 2 initial LLMs | ||
| claude_llm = LLM( | ||
| service_id="agent-initial", | ||
| model="litellm_proxy/anthropic/claude-sonnet-4-5-20250929", | ||
| base_url="https://llm-proxy.eval.all-hands.dev", | ||
| api_key=SecretStr(api_key), | ||
| ) | ||
|
|
||
| gpt_5_llm = LLM( | ||
| service_id="gpt-5", | ||
| model="litellm_proxy/openai/gpt-5-2025-08-07", | ||
| base_url="https://llm-proxy.eval.all-hands.dev", | ||
| api_key=SecretStr(api_key), | ||
| ) | ||
|
|
||
| dynamic_router = DynamicRouter( | ||
| service_id="dynamic-router", | ||
| llms_for_routing={ | ||
| "primary": claude_llm, | ||
| "gpt-5": gpt_5_llm, | ||
| }, # primary is the default | ||
| ) | ||
|
|
||
| # Tools | ||
| cwd = os.getcwd() | ||
| tools = get_default_tools() | ||
|
|
||
| # Agent with dynamic router | ||
| agent = Agent(llm=dynamic_router, tools=tools) | ||
|
|
||
| llm_messages = [] # collect raw LLM messages | ||
|
|
||
|
|
||
| def conversation_callback(event: Event): | ||
| if isinstance(event, LLMConvertibleEvent): | ||
| llm_messages.append(event.to_llm_message()) | ||
|
|
||
|
|
||
| # Set up conversation with persistence for serialization demo | ||
| conversation_id = uuid.uuid4() | ||
|
|
||
| conversation = Conversation( | ||
| agent=agent, | ||
| callbacks=[conversation_callback], | ||
| conversation_id=conversation_id, | ||
| workspace=os.getcwd(), | ||
| persistence_dir="./.conversations", | ||
| ) | ||
|
|
||
| print(f"Starting with LLM: {dynamic_router.active_llm_identifier}") | ||
| print(f"Available LLMs: {list(dynamic_router.llms_for_routing.keys())}") | ||
|
|
||
| # First interaction with Claude - primary LLM | ||
| conversation.send_message( | ||
| message=Message( | ||
| role="user", | ||
| content=[TextContent(text="Hi there!")], | ||
| ) | ||
| ) | ||
| conversation.run() | ||
|
|
||
| print("=" * 50) | ||
| print("Switching to GPT-5...") | ||
|
|
||
| # Manually switch to GPT-5 | ||
| success = dynamic_router.switch_to_llm("gpt-5") | ||
| print(f"GPT-5 switched successfully: {success}") | ||
| print(f"Current LLM: {dynamic_router.active_llm_identifier}") | ||
|
|
||
| # Interaction with GPT-5 | ||
| conversation.send_message( | ||
| message=Message( | ||
| role="user", | ||
| content=[TextContent(text="Who trained you as an LLM?")], | ||
| ) | ||
| ) | ||
| conversation.run() | ||
|
|
||
|
|
||
| # Show current state before serialization | ||
| print(f"Before serialization - Current LLM: {dynamic_router.active_llm_identifier}") | ||
| print(f"Available LLMs: {list(dynamic_router.llms_for_routing.keys())}") | ||
|
|
||
| # Delete conversation to simulate restart | ||
| del conversation | ||
|
|
||
| # Recreate conversation from persistence | ||
| print("Recreating conversation from persistence...") | ||
| conversation = Conversation( | ||
| agent=agent, | ||
| callbacks=[conversation_callback], | ||
| conversation_id=conversation_id, | ||
| persistence_dir="./.conversations", | ||
| ) | ||
|
|
||
| print(f"After deserialization - Current LLM: {dynamic_router.active_llm_identifier}") | ||
| assert dynamic_router.active_llm_identifier == "gpt-5" | ||
| print(f"Available LLMs: {list(dynamic_router.llms_for_routing.keys())}") | ||
|
|
||
| # Continue conversation after persistence | ||
| conversation.send_message( | ||
| message=Message( | ||
| role="user", | ||
| content=[TextContent(text="What did we talk about earlier?")], | ||
| ) | ||
| ) | ||
| conversation.run() | ||
|
|
||
| # Switch back to primary model for complex task | ||
| print("Switching back to claude for complex reasoning...") | ||
|
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I wonder how would the user know in advance that they'll need to switch?
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I think we were designing the And further down the road, we should by default initialize LLM as |
||
|
|
||
| dynamic_router.switch_to_llm("primary") | ||
| print(f"Switched to LLM: {dynamic_router.active_llm_identifier}") | ||
|
|
||
| conversation.send_message( | ||
| message=Message( | ||
| role="user", | ||
| content=[ | ||
| TextContent( | ||
| text="Explain the concept of dynamic programming in one sentence." | ||
| ) | ||
| ], | ||
| ) | ||
| ) | ||
| conversation.run() | ||
|
|
||
| print("Demonstrating persistence with LLM switching...") | ||
|
|
||
|
|
||
| print("=" * 100) | ||
| print("Conversation finished. Got the following LLM messages:") | ||
| for i, message in enumerate(llm_messages): | ||
| print(f"Message {i}: {str(message)[:200]}") | ||
Uh oh!
There was an error while loading. Please reload this page.