Skip to content
Merged
Show file tree
Hide file tree
Changes from 14 commits
Commits
Show all changes
28 commits
Select commit Hold shift + click to select a range
de0a29a
add user_specified_langchain_llm_factory
Noravee Jun 24, 2025
ecdee78
fix indent and pylint
Noravee Jun 24, 2025
d23186e
Add more comments
Noravee Jun 24, 2025
928ea36
Merge branch 'main' into UN-3276_Support_for_user-defined_llm_in_hoco…
Noravee Jun 26, 2025
58b3b8b
remove ollama response error
Noravee Jun 26, 2025
5906c6f
Merge branch 'UN-3276_Support_for_user-defined_llm_in_hocon_file_with…
Noravee Jun 26, 2025
8a8128f
Merge branch 'main' into UN-3276_Support_for_user-defined_llm_in_hoco…
Noravee Jun 26, 2025
23891db
minor changes
Noravee Jun 30, 2025
2a8131f
Merge branch 'main' into UN-3276_Support_for_user-defined_llm_in_hoco…
Noravee Jul 2, 2025
8e4d881
- Remove user_specified_langchain_llm_factory
Noravee Jul 2, 2025
962abf7
Merge branch 'UN-3276_Support_for_user-defined_llm_in_hocon_file_with…
Noravee Jul 2, 2025
17974b5
use alias for api error in langchain run context
Noravee Jul 2, 2025
9d90aaa
change comments
Noravee Jul 2, 2025
7647dc9
remove space
Noravee Jul 2, 2025
741e50e
Add comments
Noravee Jul 2, 2025
f8ce89d
Merge branch 'main' into UN-3276_Support_for_user-defined_llm_in_hoco…
Noravee Jul 2, 2025
69889ad
- Refactor logic on creating llm based on "class" into another method
Noravee Jul 3, 2025
9363936
Merge branch 'UN-3276_Support_for_user-defined_llm_in_hocon_file_with…
Noravee Jul 3, 2025
a796cf8
Add type hints
Noravee Jul 3, 2025
bbc3fbc
Add callbacks
Noravee Jul 3, 2025
50bd7f0
combine user config with the one in class in llm_info
Noravee Jul 4, 2025
1aac685
Merge branch 'main' into UN-3276_Support_for_user-defined_llm_in_hoco…
Noravee Jul 7, 2025
69f38f2
Merge branch 'main' into UN-3276_Support_for_user-defined_llm_in_hoco…
Noravee Jul 7, 2025
e6dc921
remove optional
Noravee Jul 7, 2025
0b98a39
Merge branch 'UN-3276_Support_for_user-defined_llm_in_hocon_file_with…
Noravee Jul 7, 2025
cca1d03
refactor default llm factory with resolver util
Noravee Jul 8, 2025
b021671
Merge branch 'main' into UN-3276_Support_for_user-defined_llm_in_hoco…
Noravee Jul 8, 2025
35c0362
Merge branch 'main' into UN-3276_Support_for_user-defined_llm_in_hoco…
Noravee Jul 9, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
58 changes: 58 additions & 0 deletions llm_extension/groq_langchain_llm_factory.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@

# Copyright (C) 2023-2025 Cognizant Digital Business, Evolutionary AI.
# All Rights Reserved.
# Issued under the Academic Public License.
#
# You can be released from the terms, and requirements of the Academic Public
# License by purchasing a commercial license.
# Purchase of a commercial license is mandatory for any use of the
# neuro-san SDK Software in commercial settings.
#
# END COPYRIGHT

from typing import Any
from typing import Dict
from typing import List

from langchain_core.callbacks.base import BaseCallbackHandler
from langchain_core.language_models.base import BaseLanguageModel
from langchain_groq import ChatGroq

from neuro_san.internals.run_context.langchain.llms.langchain_llm_factory import LangChainLlmFactory


class GroqLangChainLlmFactory(LangChainLlmFactory):
"""
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is for testing only. It will not be in the final PR.

Copy link
Collaborator

@d1donlydfink d1donlydfink Jul 7, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

In the spirit of robust tests + limited library dependencies, you could put this class and extra hocon file under the test/ directory and the extra langchain_groq dependency in requirements-build.txt as long as your intent is to add some kind of regularly run test that uses it (unit test if < 15 secs, integration test if longer)

Factory class for LLM operations
"""

def create_base_chat_model(self, config: Dict[str, Any],
callbacks: List[BaseCallbackHandler] = None) -> BaseLanguageModel:
"""
Create a BaseLanguageModel from the fully-specified llm config.
:param config: The fully specified llm config which is a product of
_create_full_llm_config() above.
:param callbacks: A list of BaseCallbackHandlers to add to the chat model.
:return: A BaseLanguageModel (can be Chat or LLM)
Can raise a ValueError if the config's class or model_name value is
unknown to this method.
"""
# Construct the LLM
llm: BaseLanguageModel = None
chat_class: str = config.get("class")
if chat_class is not None:
chat_class = chat_class.lower()

model_name: str = config.get("model_name")

if chat_class == "groq":
llm = ChatGroq(
model=model_name,
temperature=config.get("temperature")
)
elif chat_class is None:
raise ValueError(f"Class name {chat_class} for model_name {model_name} is unspecified.")
else:
raise ValueError(f"Class {chat_class} for model_name {model_name} is unrecognized.")

return llm
26 changes: 26 additions & 0 deletions llm_extension/llm_info.hocon
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
# Copyright (C) 2023-2025 Cognizant Digital Business, Evolutionary AI.
# All Rights Reserved.
# Issued under the Academic Public License.
#
# You can be released from the terms, and requirements of the Academic Public
# License by purchasing a commercial license.
# Purchase of a commercial license is mandatory for any use of the
# neuro-san SDK Software in commercial settings.
#
# END COPYRIGHT

# The schema specifications for this file are documented here:
# https://github.com/cognizant-ai-lab/neuro-san/blob/main/docs/llm_info_hocon_reference.md

{


"classes": {
"factories": [ "llm_extension.groq_langchain_llm_factory.GroqLangChainLlmFactory" ]
"groq": {
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is for testing only. It will not be in the final PR.

# Add arguments like temperature that you want to pass to the llm here.
"temperature": 0.7
}
}

}
Original file line number Diff line number Diff line change
Expand Up @@ -23,9 +23,8 @@
from logging import Logger
from logging import getLogger

from openai import APIError
from anthropic import BadRequestError
from anthropic import AuthenticationError
from openai import APIError as OpenAI_APIError
from anthropic import APIError as Anthropic_APIError
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Only import APIError.


from pydantic_core import ValidationError

Expand Down Expand Up @@ -495,7 +494,7 @@ async def ainvoke(self, agent_executor: AgentExecutor, inputs: Dict[str, Any], i
while return_dict is None and retries > 0:
try:
return_dict: Dict[str, Any] = await agent_executor.ainvoke(inputs, invoke_config)
except (APIError, BadRequestError, AuthenticationError, ChatGoogleGenerativeAIError) as api_error:
except (OpenAI_APIError, Anthropic_APIError, ChatGoogleGenerativeAIError) as api_error:
message: str = ApiKeyErrorCheck.check_for_api_key_exception(api_error)
if message is not None:
raise ValueError(message) from api_error
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@
from neuro_san.internals.run_context.langchain.llms.langchain_llm_factory import LangChainLlmFactory
from neuro_san.internals.run_context.langchain.llms.llm_info_restorer import LlmInfoRestorer
from neuro_san.internals.run_context.langchain.llms.standard_langchain_llm_factory import StandardLangChainLlmFactory
from neuro_san.internals.run_context.langchain.toolbox.toolbox_factory import ToolboxFactory
from neuro_san.internals.run_context.langchain.util.api_key_error_check import ApiKeyErrorCheck


Expand Down Expand Up @@ -123,24 +124,13 @@ def resolve_one_llm_factory(self, llm_factory_class_name: str, llm_info_file: st
raise ValueError(f"The value for the classes.factories key in {llm_info_file} "
"must be a list of strings")

class_split: List[str] = llm_factory_class_name.split(".")
if len(class_split) <= 2:
raise ValueError(f"Value in the classes.factories in {llm_info_file} must be of the form "
"<package_name>.<module_name>.<ClassName>")

# Create a list of a single package given the name in the value
packages: List[str] = [".".join(class_split[:-2])]
class_name: str = class_split[-1]
resolver = Resolver(packages)

# Resolve the class name
llm_factory_class: Type[LangChainLlmFactory] = None
try:
llm_factory_class: Type[LangChainLlmFactory] = \
resolver.resolve_class_in_module(class_name, module_name=class_split[-2])
except AttributeError as exception:
raise ValueError(f"Class {llm_factory_class_name} in {llm_info_file} "
"not found in PYTHONPATH") from exception
# Resolve the factory class
llm_factory_class = self._resolve_class_from_path(
class_path=llm_factory_class_name,
expected_base=LangChainLlmFactory,
source_file=llm_info_file,
description="classes.factories"
)

# Instantiate it
try:
Expand All @@ -155,6 +145,38 @@ def resolve_one_llm_factory(self, llm_factory_class_name: str, llm_info_file: st
"must be of type LangChainLlmFactory")
return llm_factory

def _resolve_class_from_path(
self,
class_path: str,
expected_base: Type,
source_file: str,
description: str
) -> Type:
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Since there are 2 methods that need to resolve class: resolve_one_llm_factory() and reate_base_chat_model(), refactor by creating a new method _resolve_class_from_path().


parts = class_path.split(".")
if len(parts) <= 2:
raise ValueError(
f"Value for '{description}' in {source_file} must be of the form "
"<package>.<module>.<ClassName>"
)

module_name = parts[-2]
class_name = parts[-1]
packages = [".".join(parts[:-2])]
resolver = Resolver(packages)

try:
cls = resolver.resolve_class_in_module(class_name, module_name=module_name)
except AttributeError as e:
raise ValueError(f"Class {class_path} in {source_file} not found in PYTHONPATH") from e

if not issubclass(cls, expected_base):
raise ValueError(
f"Class {class_path} in {source_file} must be a subclass of {expected_base.__name__}"
)

return cls

def create_llm(self, config: Dict[str, Any], callbacks: List[BaseCallbackHandler] = None) -> BaseLanguageModel:
"""
Creates a langchain LLM based on the 'model_name' value of
Expand All @@ -169,13 +191,19 @@ def create_llm(self, config: Dict[str, Any], callbacks: List[BaseCallbackHandler
"""
full_config: Dict[str, Any] = self.create_full_llm_config(config)
llm: BaseLanguageModel = self.create_base_chat_model(full_config, callbacks)
print(f"\n\n{llm=}\n\n")
return llm

def create_full_llm_config(self, config: Dict[str, Any]) -> Dict[str, Any]:
"""
:param config: The llm_config from the user
:return: The fully specified config with defaults filled in.
"""

if config.get("class"):
# If config has "class", it is a user-specified llm so return config as is,
return config
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If config has class it is a user-specified llm so return config as is.


default_config: Dict[str, Any] = self.llm_infos.get("default_config")
use_config = self.overlayer.overlay(default_config, config)

Expand Down Expand Up @@ -279,6 +307,27 @@ def create_base_chat_model(self, config: Dict[str, Any],
# Let the next model have a crack
found_exception = exception

# Try resolving via 'class' in config if factories failed
class_path = config.get("class")
if found_exception is not None and class_path:
if not isinstance(class_path, str):
raise ValueError("'class' in llm_config must be a string")

# Resolve the 'class'
llm_class = self._resolve_class_from_path(
class_path=class_path,
expected_base=BaseLanguageModel,
source_file="agent network hocon file",
description="llm_config"
)

# copy the config, take 'class' out, and unpack into llm constructor
user_config = config.copy()
user_config.pop("class")
ToolboxFactory.check_invalid_args(llm_class, user_config)
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Using check_invalid_args() from ToolboxFactory to check for invalid arguments.

llm = llm_class(**user_config)
found_exception = None

if found_exception is not None:
raise found_exception

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ def create_base_chat_model(self, config: Dict[str, Any],
if chat_class is not None:
chat_class = chat_class.lower()

model_name: str = config.get("model_name")
model_name: str = config.get("model_name") or config.get("model") or config.get("model_id")

if chat_class == "openai":
llm = ChatOpenAI(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
#
# END COPYRIGHT

from inspect import isclass
from inspect import signature
from types import MethodType
from typing import Any
Expand Down Expand Up @@ -152,7 +153,7 @@ def create_tool_from_toolbox(
self._get_from_api_wrapper_method(tool_class) or tool_class

# Validate and instantiate
self._check_invalid_args(callable_obj, final_args)
ToolboxFactory.check_invalid_args(callable_obj, final_args)
# Instance can be a BaseTool or a BaseToolkit
instance: Union[BaseTool, BaseToolkit] = callable_obj(**final_args)

Expand All @@ -178,7 +179,7 @@ def _resolve_args(self, args: Dict[str, Any]) -> Dict[str, Any]:
# If the argument is a class definition, resolve and instantiate it
nested_class: BaseModel = self._resolve_class(value.get("class"))
nested_args: Dict[str, Any] = self._resolve_args(value.get("args", empty))
self._check_invalid_args(nested_class, nested_args)
ToolboxFactory.check_invalid_args(nested_class, nested_args)
resolved_args[key] = nested_class(**nested_args)
else:
# Otherwise, keep primitive values as they are
Expand Down Expand Up @@ -210,16 +211,24 @@ def _resolve_class(self, class_path: str) -> Type[BaseTool]:
except AttributeError as exception:
raise ValueError(f"Class {class_path} not found in PYTHONPATH") from exception

def _check_invalid_args(self, method_class: Union[Type, MethodType], args: Dict[str, Any]):
@staticmethod
def check_invalid_args(method_class: Union[Type, MethodType], args: Dict[str, Any]):
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Change check_invalid_args from an internal method to a static method.

"""
Check for invalid arguments in class or method
:param method_class: Class or method to check for the invalid arguments
:param args: Arguments to check
"""
class_args_set: Set[str] = set(signature(method_class).parameters.keys())
pydantic_args: Set[str] = set()
# Check for if it is a class that extends pydantic BaseModel
if isclass(method_class) and issubclass(method_class, BaseModel):
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Check if the class extends BaseModel. This is necessary since pydantic allows arguments to have alias.

# Include field names as args
pydantic_args = set(method_class.model_fields.keys())
# Combine the arguments
class_args_set: Set[str] = set(signature(method_class).parameters.keys()).union(pydantic_args)
args_set: Set[str] = set(args.keys())
invalid_args: Set[str] = args_set - class_args_set

# If there are args that are not from class args or alias, raise error
invalid_args: Set[str] = args_set - class_args_set
if invalid_args:
raise ValueError(
f"Arguments {invalid_args} for '{method_class.__name__}' do not match any attributes "
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,12 +22,10 @@

# Azure OpenAI requires several parameters; all can be set via environment variables
# except "deployment_name", which must be provided explicitly.
"AZURE_OPENAI_API_KEY": ["Error code: 401", "invalid subscription key", "wrong API endpoint", "Connection error"],
"AZURE_OPENAI_ENDPOINT": ["validation error", "base_url", "azure_endpoint", "AZURE_OPENAI_ENDPOINT",
"Connection error"],
"OPENAI_API_VERSION": ["validation error", "api_version", "OPENAI_API_VERSION", "Error code: 404",
"Resource not found"],
"deployment_name": ["Error code: 404", "Resource not found", "API deployment for this resource does not exist"],
"AZURE_OPENAI_API_KEY": ["invalid subscription key", "wrong API endpoint"],
"AZURE_OPENAI_ENDPOINT": ["base_url", "azure_endpoint", "AZURE_OPENAI_ENDPOINT"],
"OPENAI_API_VERSION": ["api_version", "OPENAI_API_VERSION"],
"deployment_name": ["API deployment for this resource does not exist"],
}


Expand Down
4 changes: 2 additions & 2 deletions neuro_san/registries/google_serper.hocon
Original file line number Diff line number Diff line change
Expand Up @@ -35,10 +35,10 @@
{
"name": "searcher",
"instructions": "Use your tool to respond to the inquiry.",
"function": {
"function": {
# The description acts as an initial prompt.
"description": "Assist user with answer from internet."
}
}
"tools": ["search_tool"]
},
{
Expand Down
4 changes: 3 additions & 1 deletion neuro_san/registries/manifest.hocon
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,9 @@
# Agents having to do with test infrastructure
"gist.hocon": true,
"assess_failure.hocon": true,

"test_new_model_default_class.hocon": true,
"test_new_class.hocon": true,
"test_new_model_extended_class.hocon": true
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is for testing only. It will not be in the final PR.

# STOP AND READ: YOU PROBABLY DON'T WANT TO ADD YOUR .hocon FILE HERE.
#
# The agent network .hocon files above are examples specific to the neuro-san library.
Expand Down
Loading
Loading