From de7e783e958fa591b5df6794504564d6d589a326 Mon Sep 17 00:00:00 2001 From: Noravee Kanchanavatee Date: Fri, 25 Jul 2025 00:53:42 -0700 Subject: [PATCH 1/3] Modify DefaultLlmFactory.create_base_chat_model() to check if class is in default llm class before trying to resolve it --- .../langchain/llms/default_llm_factory.py | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/neuro_san/internals/run_context/langchain/llms/default_llm_factory.py b/neuro_san/internals/run_context/langchain/llms/default_llm_factory.py index 8362af0d5..9179680d2 100644 --- a/neuro_san/internals/run_context/langchain/llms/default_llm_factory.py +++ b/neuro_san/internals/run_context/langchain/llms/default_llm_factory.py @@ -36,6 +36,7 @@ from neuro_san.internals.run_context.langchain.util.argument_validator import ArgumentValidator from neuro_san.internals.utils.resolver_util import ResolverUtil +DEFAULT_LLM_CLASS: Set[str] = {"anthropic", "azure-openai", "bedrock", "gemini", "ollama", "openai", "nvidia"} KEYS_TO_REMOVE_FOR_USER_CLASS: Set[str] = {"class", "verbose"} @@ -304,9 +305,21 @@ def create_base_chat_model(self, config: Dict[str, Any], # Let the next model have a crack found_exception = exception - # Try resolving via 'class' in config if factories failed + # Try resolving via "class" in config if llm factories failed + # + # Note: config["class"] is always set — if the user intended to use a default LLMs, + # it will point to a known default like "openai" or "bedrock". In those cases, + # we avoid re-resolving it here to prevent masking the original error with + # a new one from create_base_chat_model_from_user_class. + # + # This fallback only applies when the user provides a non-default class path + # and factory resolution failed. class_path: str = config.get("class") - if llm is None and found_exception is not None and class_path: + if ( + llm is None + and found_exception is not None + and class_path not in DEFAULT_LLM_CLASS + ): llm = self.create_base_chat_model_from_user_class(class_path, config) found_exception = None From 68a2807b504a8dfc611a77ed5603db873a1cdf08 Mon Sep 17 00:00:00 2001 From: Noravee Kanchanavatee Date: Fri, 25 Jul 2025 00:59:22 -0700 Subject: [PATCH 2/3] fix typo --- .../run_context/langchain/llms/default_llm_factory.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/neuro_san/internals/run_context/langchain/llms/default_llm_factory.py b/neuro_san/internals/run_context/langchain/llms/default_llm_factory.py index 9179680d2..1e989f722 100644 --- a/neuro_san/internals/run_context/langchain/llms/default_llm_factory.py +++ b/neuro_san/internals/run_context/langchain/llms/default_llm_factory.py @@ -36,7 +36,7 @@ from neuro_san.internals.run_context.langchain.util.argument_validator import ArgumentValidator from neuro_san.internals.utils.resolver_util import ResolverUtil -DEFAULT_LLM_CLASS: Set[str] = {"anthropic", "azure-openai", "bedrock", "gemini", "ollama", "openai", "nvidia"} +DEFAULT_LLM_CLASSES: Set[str] = {"anthropic", "azure-openai", "bedrock", "gemini", "ollama", "openai", "nvidia"} KEYS_TO_REMOVE_FOR_USER_CLASS: Set[str] = {"class", "verbose"} @@ -318,7 +318,7 @@ def create_base_chat_model(self, config: Dict[str, Any], if ( llm is None and found_exception is not None - and class_path not in DEFAULT_LLM_CLASS + and class_path not in DEFAULT_LLM_CLASSES ): llm = self.create_base_chat_model_from_user_class(class_path, config) found_exception = None From 429df63065079ee0927af48bfe7d814fbb6093a7 Mon Sep 17 00:00:00 2001 From: Noravee Kanchanavatee Date: Fri, 25 Jul 2025 09:36:44 -0700 Subject: [PATCH 3/3] get the default llm class from the default llm info file instead --- .../run_context/langchain/llms/default_llm_factory.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/neuro_san/internals/run_context/langchain/llms/default_llm_factory.py b/neuro_san/internals/run_context/langchain/llms/default_llm_factory.py index 1e989f722..e81d48e52 100644 --- a/neuro_san/internals/run_context/langchain/llms/default_llm_factory.py +++ b/neuro_san/internals/run_context/langchain/llms/default_llm_factory.py @@ -36,7 +36,6 @@ from neuro_san.internals.run_context.langchain.util.argument_validator import ArgumentValidator from neuro_san.internals.utils.resolver_util import ResolverUtil -DEFAULT_LLM_CLASSES: Set[str] = {"anthropic", "azure-openai", "bedrock", "gemini", "ollama", "openai", "nvidia"} KEYS_TO_REMOVE_FOR_USER_CLASS: Set[str] = {"class", "verbose"} @@ -315,10 +314,11 @@ def create_base_chat_model(self, config: Dict[str, Any], # This fallback only applies when the user provides a non-default class path # and factory resolution failed. class_path: str = config.get("class") + default_llm_classes: Set[str] = set(self.llm_infos.get("classes")) if ( llm is None and found_exception is not None - and class_path not in DEFAULT_LLM_CLASSES + and class_path not in default_llm_classes ): llm = self.create_base_chat_model_from_user_class(class_path, config) found_exception = None