-
Notifications
You must be signed in to change notification settings - Fork 28
UN-3276 support for user defined llm in hocon file with class key #270
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 7 commits
de0a29a
ecdee78
d23186e
928ea36
58b3b8b
5906c6f
8a8128f
23891db
2a8131f
8e4d881
962abf7
17974b5
9d90aaa
7647dc9
741e50e
f8ce89d
69889ad
9363936
a796cf8
bbc3fbc
50bd7f0
1aac685
69f38f2
e6dc921
0b98a39
cca1d03
b021671
35c0362
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -23,9 +23,8 @@ | |
| from logging import Logger | ||
| from logging import getLogger | ||
|
|
||
| from openai import APIError | ||
| from anthropic import BadRequestError | ||
| from anthropic import AuthenticationError | ||
| import openai | ||
| import anthropic | ||
|
|
||
| from pydantic_core import ValidationError | ||
|
|
||
|
|
@@ -495,7 +494,7 @@ async def ainvoke(self, agent_executor: AgentExecutor, inputs: Dict[str, Any], i | |
| while return_dict is None and retries > 0: | ||
| try: | ||
| return_dict: Dict[str, Any] = await agent_executor.ainvoke(inputs, invoke_config) | ||
| except (APIError, BadRequestError, AuthenticationError, ChatGoogleGenerativeAIError) as api_error: | ||
| except (openai.APIError, anthropic.APIError, ChatGoogleGenerativeAIError) as api_error: | ||
|
||
| message: str = ApiKeyErrorCheck.check_for_api_key_exception(api_error) | ||
| if message is not None: | ||
| raise ValueError(message) from api_error | ||
|
|
@@ -509,6 +508,11 @@ async def ainvoke(self, agent_executor: AgentExecutor, inputs: Dict[str, Any], i | |
| retries = retries - 1 | ||
| exception = key_error | ||
| backtrace = traceback.format_exc() | ||
| except TypeError as type_error: | ||
|
||
| self.logger.warning("retrying from TypeError") | ||
| retries = retries - 1 | ||
| exception = type_error | ||
| backtrace = traceback.format_exc() | ||
Noravee marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
| except ValueError as value_error: | ||
| response = str(value_error) | ||
| find_string = "An output parsing error occurred. " + \ | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -33,6 +33,8 @@ | |
| from neuro_san.internals.run_context.langchain.llms.langchain_llm_factory import LangChainLlmFactory | ||
| from neuro_san.internals.run_context.langchain.llms.llm_info_restorer import LlmInfoRestorer | ||
| from neuro_san.internals.run_context.langchain.llms.standard_langchain_llm_factory import StandardLangChainLlmFactory | ||
| from neuro_san.internals.run_context.langchain.llms.user_specified_langchain_llm_factory import \ | ||
| UserSpecifiedLangChainLlmFactory | ||
| from neuro_san.internals.run_context.langchain.util.api_key_error_check import ApiKeyErrorCheck | ||
|
|
||
|
|
||
|
|
@@ -75,6 +77,8 @@ def __init__(self, config: Optional[Dict[str, Any]] = None): | |
| self.llm_factories: List[LangChainLlmFactory] = [ | ||
| StandardLangChainLlmFactory() | ||
| ] | ||
| self.llm_class: str = None | ||
Noravee marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
|
|
||
| if config: | ||
| self.llm_info_file: str = config.get("agent_llm_info_file") | ||
| else: | ||
|
|
@@ -176,6 +180,14 @@ def create_full_llm_config(self, config: Dict[str, Any]) -> Dict[str, Any]: | |
| :param config: The llm_config from the user | ||
| :return: The fully specified config with defaults filled in. | ||
| """ | ||
|
|
||
| self.llm_class = config.get("class") | ||
| if self.llm_class: | ||
| # If config has "class", it is a user-specified llm so return config as is, | ||
| # and replace "StandardLangChainLlmFactory" with "UserSpecifiedLangChainLlmFactory". | ||
| self.llm_factories[0] = UserSpecifiedLangChainLlmFactory() | ||
|
||
| return config | ||
|
||
|
|
||
| default_config: Dict[str, Any] = self.llm_infos.get("default_config") | ||
| use_config = self.overlayer.overlay(default_config, config) | ||
|
|
||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,86 @@ | ||
|
|
||
| # Copyright (C) 2023-2025 Cognizant Digital Business, Evolutionary AI. | ||
| # All Rights Reserved. | ||
| # Issued under the Academic Public License. | ||
| # | ||
| # You can be released from the terms, and requirements of the Academic Public | ||
| # License by purchasing a commercial license. | ||
| # Purchase of a commercial license is mandatory for any use of the | ||
| # neuro-san SDK Software in commercial settings. | ||
| # | ||
| # END COPYRIGHT | ||
|
|
||
| from typing import Any | ||
| from typing import Dict | ||
| from typing import List | ||
|
|
||
| from langchain_anthropic.chat_models import ChatAnthropic | ||
| from langchain_google_genai.chat_models import ChatGoogleGenerativeAI | ||
| from langchain_ollama import ChatOllama | ||
| from langchain_nvidia_ai_endpoints import ChatNVIDIA | ||
| from langchain_core.callbacks.base import BaseCallbackHandler | ||
| from langchain_core.language_models.base import BaseLanguageModel | ||
| from langchain_openai.chat_models.azure import AzureChatOpenAI | ||
| from langchain_openai.chat_models.base import ChatOpenAI | ||
|
|
||
| from neuro_san.internals.run_context.langchain.llms.langchain_llm_factory import LangChainLlmFactory | ||
|
|
||
|
|
||
| class UserSpecifiedLangChainLlmFactory(LangChainLlmFactory): | ||
| """ | ||
| A factory for constructing LLMs based on user-specified configurations provided under the "llm_config" | ||
|
||
| section of the agent network HOCON file. | ||
|
|
||
| The specific LLM class to instantiate is determined by the "class" field in "llm_config", and all | ||
| other keys in the config are passed as arguments to that class's constructor. | ||
| """ | ||
|
|
||
| def create_base_chat_model(self, config: Dict[str, Any], | ||
| callbacks: List[BaseCallbackHandler] = None) -> BaseLanguageModel: | ||
| """ | ||
| Create a BaseLanguageModel from the user-specified llm config. | ||
| :param config: The user-specified llm config | ||
| :param callbacks: A list of BaseCallbackHandlers to add to the chat model. | ||
| :return: A BaseLanguageModel (can be Chat or LLM) | ||
| Can raise a ValueError if the config's class or model_name value is | ||
| unknown to this method. | ||
| """ | ||
| # Construct the LLM | ||
| llm: BaseLanguageModel = None | ||
| chat_class: str = config.get("class") | ||
| if chat_class is not None: | ||
| chat_class = chat_class.lower() | ||
|
|
||
| # Take "class" out of config and add "callback". | ||
| config.pop("class") | ||
| config["callbacks"] = callbacks | ||
Noravee marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
|
|
||
| # Unpack config in the user-specified class | ||
| if chat_class == "openai": | ||
| llm = ChatOpenAI(**config) | ||
| elif chat_class == "azure-openai": | ||
| llm = AzureChatOpenAI(**config) | ||
| elif chat_class == "anthropic": | ||
| llm = ChatAnthropic(**config) | ||
| elif chat_class == "ollama": | ||
| llm = ChatOllama(**config) | ||
| elif chat_class == "nvidia": | ||
| llm = ChatNVIDIA(**config) | ||
| elif chat_class == "gemini": | ||
| llm = ChatGoogleGenerativeAI(**config) | ||
| else: | ||
Noravee marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
| valid_class_map = { | ||
| "openai": "ChatOpenAI", | ||
| "azure-openai": "AzureChatOpenAI", | ||
| "anthropic": "ChatAnthropic", | ||
| "ollama": "ChatOllama", | ||
| "nvidia": "ChatNVIDIA", | ||
| "gemini": "ChatGoogleGenerativeAI", | ||
| } | ||
| available = "\n".join(f" - '{key}': {val}" for key, val in valid_class_map.items()) | ||
| raise ValueError( | ||
| f"Unrecognized model class '{chat_class}'.\n" | ||
| f"Valid class values and their corresponding implementations are:\n{available}" | ||
| ) | ||
|
|
||
| return llm | ||
Uh oh!
There was an error while loading. Please reload this page.