Skip to content

Commit 69fe4a1

Browse files
committed
feat: add supervisor flow to veadk agent
1 parent 5bc7fca commit 69fe4a1

File tree

5 files changed

+227
-1
lines changed

5 files changed

+227
-1
lines changed

veadk/agent.py

Lines changed: 30 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,8 @@
1717
import os
1818
from typing import Dict, Literal, Optional, Union
1919

20+
from google.adk.flows.llm_flows.base_llm_flow import BaseLlmFlow
21+
2022
# If user didn't set LITELLM_LOCAL_MODEL_COST_MAP, set it to True
2123
# to enable local model cost map.
2224
# This value is `false` by default, which brings heavy performance burden,
@@ -150,6 +152,8 @@ class Agent(LlmAgent):
150152

151153
example_store: Optional[BaseExampleProvider] = None
152154

155+
enable_supervisor: bool = False
156+
153157
def model_post_init(self, __context: Any) -> None:
154158
super().model_post_init(None) # for sub_agents init
155159

@@ -242,7 +246,7 @@ def model_post_init(self, __context: Any) -> None:
242246
self.tools.append(load_kb_queries)
243247

244248
if self.long_term_memory is not None:
245-
from google.adk.tools import load_memory
249+
from google.adk.tools.load_memory_tool import load_memory
246250

247251
if hasattr(load_memory, "custom_metadata"):
248252
if not load_memory.custom_metadata:
@@ -402,6 +406,31 @@ def _prepare_tracers(self):
402406
f"Opentelemetry Tracer init {len(self.tracers[0].exporters)} exporters" # type: ignore
403407
)
404408

409+
@property
410+
def _llm_flow(self) -> BaseLlmFlow:
411+
from google.adk.flows.llm_flows.auto_flow import AutoFlow
412+
from google.adk.flows.llm_flows.single_flow import SingleFlow
413+
414+
if (
415+
self.disallow_transfer_to_parent
416+
and self.disallow_transfer_to_peers
417+
and not self.sub_agents
418+
):
419+
from veadk.flows.supervise_single_flow import SupervisorSingleFlow
420+
421+
if self.enable_supervisor:
422+
logger.debug(f"Enable supervisor flow for agent: {self.name}")
423+
return SupervisorSingleFlow(supervised_agent=self)
424+
else:
425+
return SingleFlow()
426+
else:
427+
from veadk.flows.supervise_auto_flow import SupervisorAutoFlow
428+
429+
if self.enable_supervisor:
430+
logger.debug(f"Enable supervisor flow for agent: {self.name}")
431+
return SupervisorAutoFlow(supervised_agent=self)
432+
return AutoFlow()
433+
405434
async def run(self, **kwargs):
406435
raise NotImplementedError(
407436
"Run method in VeADK agent is deprecated since version 0.5.6. Please use runner.run_async instead. Ref: https://agentkit.gitbook.io/docs/runner/overview"

veadk/agents/supervise_agent.py

Lines changed: 70 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,70 @@
1+
# Copyright (c) 2025 Beijing Volcano Engine Technology Co., Ltd. and/or its affiliates.
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
15+
from google.adk.models.llm_request import LlmRequest
16+
from jinja2 import Template
17+
18+
from veadk import Agent, Runner
19+
from veadk.utils.logger import get_logger
20+
21+
logger = get_logger(__name__)
22+
23+
instruction = Template("""You are a supervisor of an agent system. The system prompt of worker agent is:
24+
25+
```system prompt
26+
{{ system_prompt }}
27+
```
28+
29+
30+
31+
You should guide the agent to finish task and must output a JSON-format string with specific advice and reason:
32+
33+
- If you think the history execution is not correct, you should give your advice to the worker agent: {"advice": "Your advice here", "reason": "Your reason here"}.
34+
- If you think the history execution is correct, you should output an empty string: {"advice": "", "reason": "Your reason here"}.
35+
""")
36+
37+
38+
def build_supervisor(supervised_agent: Agent) -> Agent:
39+
custom_instruction = instruction.render(system_prompt=supervised_agent.instruction)
40+
agent = Agent(
41+
name="supervisor",
42+
description="A supervisor for agent execution",
43+
instruction=custom_instruction,
44+
)
45+
46+
return agent
47+
48+
49+
async def generate_advice(agent: Agent, llm_request: LlmRequest) -> str:
50+
runner = Runner(agent=agent)
51+
52+
messages = ""
53+
for content in llm_request.contents:
54+
if content and content.parts:
55+
for part in content.parts:
56+
if part.text:
57+
messages += f"{content.role}: {part.text}"
58+
if part.function_call:
59+
messages += f"{content.role}: {part.function_call}"
60+
if part.function_response:
61+
messages += f"{content.role}: {part.function_response}"
62+
63+
prompt = (
64+
f"Agent has the following tools: {llm_request.tools_dict}. History trajectory is: "
65+
+ messages
66+
)
67+
68+
logger.debug(f"Prompt for supervisor: {prompt}")
69+
70+
return await runner.run(messages=prompt)

veadk/flows/__init__.py

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,13 @@
1+
# Copyright (c) 2025 Beijing Volcano Engine Technology Co., Ltd. and/or its affiliates.
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.

veadk/flows/supervise_auto_flow.py

Lines changed: 70 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,70 @@
1+
# Copyright (c) 2025 Beijing Volcano Engine Technology Co., Ltd. and/or its affiliates.
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
15+
import json
16+
from typing import AsyncGenerator
17+
18+
from google.adk.agents.invocation_context import InvocationContext
19+
from google.adk.events import Event
20+
from google.adk.models.llm_request import LlmRequest
21+
from google.adk.models.llm_response import LlmResponse
22+
from google.genai.types import Content, Part
23+
24+
from veadk import Agent
25+
from veadk.agents.supervise_agent import generate_advice
26+
from veadk.flows.supervise_single_flow import SupervisorSingleFlow
27+
from veadk.utils.logger import get_logger
28+
29+
logger = get_logger(__name__)
30+
31+
32+
class SupervisorAutoFlow(SupervisorSingleFlow):
33+
def __init__(self, supervised_agent: Agent):
34+
super().__init__(supervised_agent)
35+
36+
async def _call_llm_async(
37+
self,
38+
invocation_context: InvocationContext,
39+
llm_request: LlmRequest,
40+
model_response_event: Event,
41+
) -> AsyncGenerator[LlmResponse, None]:
42+
supervisor_response = await generate_advice(self._supervisor, llm_request)
43+
logger.debug(f"Advice from supervisor: {supervisor_response}")
44+
45+
advice_and_reason = json.loads(supervisor_response)
46+
47+
if advice_and_reason["advice"]:
48+
logger.debug("Add supervisor advice to llm request.")
49+
llm_request.contents.append(
50+
Content(
51+
parts=[
52+
Part(
53+
text=f"""Message from your supervisor (not user): {advice_and_reason["advice"]}, the corresponding reason is {advice_and_reason["reason"]}
54+
55+
Please follow the advice and reason above to optimize your actions.
56+
"""
57+
)
58+
],
59+
role="user",
60+
)
61+
)
62+
else:
63+
logger.info(
64+
f"Supervisor advice is empty, reason: {advice_and_reason['reason']}. Skip adding to llm request."
65+
)
66+
67+
async for llm_response in super()._call_llm_async(
68+
invocation_context, llm_request, model_response_event
69+
):
70+
yield llm_response
Lines changed: 44 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,44 @@
1+
# Copyright (c) 2025 Beijing Volcano Engine Technology Co., Ltd. and/or its affiliates.
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
15+
from typing import AsyncGenerator
16+
17+
from google.adk.agents.invocation_context import InvocationContext
18+
from google.adk.events import Event
19+
from google.adk.flows.llm_flows.single_flow import SingleFlow
20+
from google.adk.models.llm_request import LlmRequest
21+
from google.adk.models.llm_response import LlmResponse
22+
from typing_extensions import override
23+
24+
from veadk import Agent
25+
from veadk.agents.supervise_agent import build_supervisor
26+
27+
28+
class SupervisorSingleFlow(SingleFlow):
29+
def __init__(self, supervised_agent: Agent):
30+
self._supervisor = build_supervisor(supervised_agent)
31+
32+
super().__init__()
33+
34+
@override
35+
async def _call_llm_async(
36+
self,
37+
invocation_context: InvocationContext,
38+
llm_request: LlmRequest,
39+
model_response_event: Event,
40+
) -> AsyncGenerator[LlmResponse, None]:
41+
async for llm_response in super()._call_llm_async(
42+
invocation_context, llm_request, model_response_event
43+
):
44+
yield llm_response

0 commit comments

Comments
 (0)