Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
43 changes: 43 additions & 0 deletions app/alembic/versions/20251209_add_inferring_status.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
"""Add inferring status to projects

Revision ID: 20251209_add_inferring_status
Revises: 20251204181617_19b6f2ee95e6
Create Date: 2025-12-09 16:45:00.000000

"""

from typing import Sequence, Union

from alembic import op

# revision identifiers, used by Alembic.
revision: str = "20251209_add_inferring_status"
down_revision: Union[str, None] = "20251204181617_19b6f2ee95e6"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None


def upgrade() -> None:
# Drop the existing check constraint
op.drop_constraint("check_status", "projects", type_="check")

# Create the new check constraint with 'inferring' status
# Note: 'processing' is included to match ProjectStatusEnum.PROCESSING
op.create_check_constraint(
"check_status",
"projects",
"status IN ('submitted', 'cloned', 'parsed', 'processing', 'inferring', 'ready', 'error')",
)


def downgrade() -> None:
# Drop the constraint with 'inferring'
op.drop_constraint("check_status", "projects", type_="check")

# Restore the original constraint without 'inferring'
# Note: 'processing' is included to match ProjectStatusEnum.PROCESSING
op.create_check_constraint(
"check_status",
"projects",
"status IN ('submitted', 'cloned', 'parsed', 'processing', 'ready', 'error')",
)
Original file line number Diff line number Diff line change
Expand Up @@ -670,6 +670,12 @@ async def _generate_and_stream_ai_response(
project_ids=[project_id]
)

# Get project status to conditionally enable/disable tools
project_info = await self.project_service.get_project_from_db_by_id(
project_id
)
project_status = project_info.get("status") if project_info else None

# Prepare multimodal context - use current message attachments if available
image_attachments = None
if attachment_ids:
Expand Down Expand Up @@ -702,6 +708,7 @@ async def _generate_and_stream_ai_response(
history=validated_history[-12:],
node_ids=[node.node_id for node in node_ids],
query=query,
project_status=project_status,
),
)
)
Expand Down Expand Up @@ -733,6 +740,7 @@ async def _generate_and_stream_ai_response(
history=validated_history[-8:],
node_ids=nodes,
query=query,
project_status=project_status,
image_attachments=image_attachments,
context_images=context_images,
)
Expand Down
6 changes: 6 additions & 0 deletions app/modules/intelligence/agents/chat_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,13 +51,19 @@ class ChatContext(BaseModel):
node_ids: Optional[List[str]] = None
additional_context: str = ""
query: str
# Project parsing status - used to conditionally enable/disable tools
project_status: Optional[str] = None
# Multimodal support - images attached to the current message
image_attachments: Optional[Dict[str, Dict[str, Union[str, int]]]] = (
None # attachment_id -> {base64, mime_type, file_size, etc}
)
# Context images from recent conversation history
context_images: Optional[Dict[str, Dict[str, Union[str, int]]]] = None

def is_inferring(self) -> bool:
"""Check if the project is still in INFERRING state (AI enrichment in progress)"""
return self.project_status == "inferring"

def has_images(self) -> bool:
"""Check if this context contains any images"""
return bool(self.image_attachments) or bool(self.context_images)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ def __init__(
self.llm_provider = llm_provider
self.prompt_provider = prompt_provider

def _build_agent(self):
def _build_agent(self, ctx: ChatContext = None):
agent_config = AgentConfig(
role="Blast Radius Analyzer",
goal="Analyze the impact of code changes",
Expand All @@ -34,6 +34,10 @@ def _build_agent(self):
)
],
)

# Exclude embedding-dependent tools during INFERRING status
exclude_embedding_tools = ctx.is_inferring() if ctx else False

tools = self.tools_provider.get_tools(
[
"get_nodes_from_tags",
Expand All @@ -46,7 +50,8 @@ def _build_agent(self):
"fetch_file",
"analyze_code_structure",
"bash_command",
]
],
exclude_embedding_tools=exclude_embedding_tools,
)
if not self.llm_provider.supports_pydantic("chat"):
raise UnsupportedProviderError(
Expand All @@ -55,12 +60,12 @@ def _build_agent(self):
return PydanticRagAgent(self.llm_provider, agent_config, tools)

async def run(self, ctx: ChatContext) -> ChatAgentResponse:
return await self._build_agent().run(ctx)
return await self._build_agent(ctx).run(ctx)

async def run_stream(
self, ctx: ChatContext
) -> AsyncGenerator[ChatAgentResponse, None]:
async for chunk in self._build_agent().run_stream(ctx):
async for chunk in self._build_agent(ctx).run_stream(ctx):
yield chunk


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ def __init__(
self.tools_provider = tools_provider
self.prompt_provider = prompt_provider

def _build_agent(self) -> ChatAgent:
def _build_agent(self, ctx: ChatContext = None) -> ChatAgent:
agent_config = AgentConfig(
role="Code Generation Agent",
goal="Generate precise, copy-paste ready code modifications that maintain project consistency and handle all dependencies",
Expand All @@ -58,6 +58,14 @@ def _build_agent(self) -> ChatAgent:
)
],
)

# Exclude embedding-dependent tools during INFERRING status
exclude_embedding_tools = ctx.is_inferring() if ctx else False
if exclude_embedding_tools:
logger.info(
"Project is in INFERRING status - excluding embedding-dependent tools"
)

tools = self.tools_provider.get_tools(
[
"get_code_from_multiple_node_ids",
Expand Down Expand Up @@ -91,7 +99,8 @@ def _build_agent(self) -> ChatAgent:
"fetch_file",
"analyze_code_structure",
"bash_command",
]
],
exclude_embedding_tools=exclude_embedding_tools,
)
supports_pydantic = self.llm_provider.supports_pydantic("chat")
should_use_multi = MultiAgentConfig.should_use_multi_agent(
Expand Down Expand Up @@ -145,13 +154,14 @@ async def _enriched_context(self, ctx: ChatContext) -> ChatContext:
return ctx

async def run(self, ctx: ChatContext) -> ChatAgentResponse:
return await self._build_agent().run(await self._enriched_context(ctx))
enriched_ctx = await self._enriched_context(ctx)
return await self._build_agent(enriched_ctx).run(enriched_ctx)

async def run_stream(
self, ctx: ChatContext
) -> AsyncGenerator[ChatAgentResponse, None]:
ctx = await self._enriched_context(ctx)
async for chunk in self._build_agent().run_stream(ctx):
async for chunk in self._build_agent(ctx).run_stream(ctx):
yield chunk


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ def __init__(
self.llm_provider = llm_provider
self.prompt_provider = prompt_provider

def _build_agent(self) -> ChatAgent:
def _build_agent(self, ctx: ChatContext = None) -> ChatAgent:
agent_config = AgentConfig(
role="Context curation agent",
goal="Handle querying the knowledge graph and refining the results to provide accurate and contextually rich responses.",
Expand All @@ -49,6 +49,14 @@ def _build_agent(self) -> ChatAgent:
)
],
)

# Exclude embedding-dependent tools during INFERRING status
exclude_embedding_tools = ctx.is_inferring() if ctx else False
if exclude_embedding_tools:
logger.info(
"Project is in INFERRING status - excluding embedding-dependent tools"
)

tools = self.tools_provider.get_tools(
[
"get_code_from_multiple_node_ids",
Expand Down Expand Up @@ -82,7 +90,8 @@ def _build_agent(self) -> ChatAgent:
"fetch_file",
"analyze_code_structure",
"bash_command",
]
],
exclude_embedding_tools=exclude_embedding_tools,
)

supports_pydantic = self.llm_provider.supports_pydantic("chat")
Expand Down Expand Up @@ -133,13 +142,13 @@ async def _enriched_context(self, ctx: ChatContext) -> ChatContext:

async def run(self, ctx: ChatContext) -> ChatAgentResponse:
ctx = await self._enriched_context(ctx)
return await self._build_agent().run(ctx)
return await self._build_agent(ctx).run(ctx)

async def run_stream(
self, ctx: ChatContext
) -> AsyncGenerator[ChatAgentResponse, None]:
ctx = await self._enriched_context(ctx)
async for chunk in self._build_agent().run_stream(ctx):
async for chunk in self._build_agent(ctx).run_stream(ctx):
yield chunk


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ def __init__(
self.tools_provider = tools_provider
self.prompt_provider = prompt_provider

def _build_agent(self) -> ChatAgent:
def _build_agent(self, ctx: ChatContext = None) -> ChatAgent:
agent_config = AgentConfig(
role="Design Planner",
goal="Create a detailed low-level design plan for implementing new features",
Expand All @@ -45,6 +45,14 @@ def _build_agent(self) -> ChatAgent:
)
],
)

# Exclude embedding-dependent tools during INFERRING status
exclude_embedding_tools = ctx.is_inferring() if ctx else False
if exclude_embedding_tools:
logger.info(
"Project is in INFERRING status - excluding embedding-dependent tools"
)

tools = self.tools_provider.get_tools(
[
"get_code_from_multiple_node_ids",
Expand Down Expand Up @@ -77,7 +85,8 @@ def _build_agent(self) -> ChatAgent:
"fetch_file",
"analyze_code_structure",
"bash_command",
]
],
exclude_embedding_tools=exclude_embedding_tools,
)

supports_pydantic = self.llm_provider.supports_pydantic("chat")
Expand Down Expand Up @@ -138,13 +147,14 @@ async def _enriched_context(self, ctx: ChatContext) -> ChatContext:
return ctx

async def run(self, ctx: ChatContext) -> ChatAgentResponse:
return await self._build_agent().run(await self._enriched_context(ctx))
enriched_ctx = await self._enriched_context(ctx)
return await self._build_agent(enriched_ctx).run(enriched_ctx)

async def run_stream(
self, ctx: ChatContext
) -> AsyncGenerator[ChatAgentResponse, None]:
ctx = await self._enriched_context(ctx)
async for chunk in self._build_agent().run_stream(ctx):
async for chunk in self._build_agent(ctx).run_stream(ctx):
yield chunk


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ def __init__(
self.tools_provider = tools_provider
self.prompt_provider = prompt_provider

def _build_agent(self) -> ChatAgent:
def _build_agent(self, ctx: ChatContext = None) -> ChatAgent:
agent_config = AgentConfig(
role="QNA Agent",
goal="Answer queries of the repo in a detailed fashion",
Expand All @@ -49,6 +49,14 @@ def _build_agent(self) -> ChatAgent:
)
],
)

# Exclude embedding-dependent tools during INFERRING status
exclude_embedding_tools = ctx.is_inferring() if ctx else False
if exclude_embedding_tools:
logger.info(
"Project is in INFERRING status - excluding embedding-dependent tools (ask_knowledge_graph_queries, get_nodes_from_tags)"
)

tools = self.tools_provider.get_tools(
[
"get_code_from_multiple_node_ids",
Expand Down Expand Up @@ -82,7 +90,8 @@ def _build_agent(self) -> ChatAgent:
"fetch_file",
"analyze_code_structure",
"bash_command",
]
],
exclude_embedding_tools=exclude_embedding_tools,
)

supports_pydantic = self.llm_provider.supports_pydantic("chat")
Expand Down Expand Up @@ -143,13 +152,14 @@ async def _enriched_context(self, ctx: ChatContext) -> ChatContext:
return ctx

async def run(self, ctx: ChatContext) -> ChatAgentResponse:
return await self._build_agent().run(await self._enriched_context(ctx))
enriched_ctx = await self._enriched_context(ctx)
return await self._build_agent(enriched_ctx).run(enriched_ctx)

async def run_stream(
self, ctx: ChatContext
) -> AsyncGenerator[ChatAgentResponse, None]:
ctx = await self._enriched_context(ctx)
async for chunk in self._build_agent().run_stream(ctx):
async for chunk in self._build_agent(ctx).run_stream(ctx):
yield chunk


Expand Down
21 changes: 16 additions & 5 deletions app/modules/intelligence/agents/custom_agents/runtime_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ def __init__(
self.tools_provider = tools_provider
self.agent_config = CustomAgentConfig(**agent_config)

def _build_agent(self) -> ChatAgent:
def _build_agent(self, ctx: ChatContext = None) -> ChatAgent:
agent_config = AgentConfig(
role=self.agent_config.role,
goal=self.agent_config.goal,
Expand All @@ -65,7 +65,17 @@ def _build_agent(self) -> ChatAgent:
],
)

tools = self.tools_provider.get_tools(self.agent_config.tasks[0].tools)
# Exclude embedding-dependent tools during INFERRING status
exclude_embedding_tools = ctx.is_inferring() if ctx else False
if exclude_embedding_tools:
logger.info(
"Project is in INFERRING status - excluding embedding-dependent tools for custom agent"
)

tools = self.tools_provider.get_tools(
self.agent_config.tasks[0].tools,
exclude_embedding_tools=exclude_embedding_tools,
)

# Extract MCP servers from the first task with graceful error handling
mcp_servers = []
Expand Down Expand Up @@ -133,13 +143,14 @@ async def _enriched_context(self, ctx: ChatContext) -> ChatContext:
return ctx

async def run(self, ctx: ChatContext) -> ChatAgentResponse:
return await self._build_agent().run(await self._enriched_context(ctx))
enriched_ctx = await self._enriched_context(ctx)
return await self._build_agent(enriched_ctx).run(enriched_ctx)

async def run_stream(
self, ctx: ChatContext
) -> AsyncGenerator[ChatAgentResponse, None]:
ctx = await self._enriched_context(ctx)
async for chunk in self._build_agent().run_stream(ctx):
enriched_ctx = await self._enriched_context(ctx)
async for chunk in self._build_agent(enriched_ctx).run_stream(enriched_ctx):
yield chunk


Expand Down
Loading