kiln_ai.adapters.model_adapters.langchain_adapters
1import os 2from typing import Any, Dict, NoReturn 3 4from langchain_aws import ChatBedrockConverse 5from langchain_core.language_models import LanguageModelInput 6from langchain_core.language_models.chat_models import BaseChatModel 7from langchain_core.messages import AIMessage, HumanMessage, SystemMessage 8from langchain_core.messages.base import BaseMessage 9from langchain_core.runnables import Runnable 10from langchain_fireworks import ChatFireworks 11from langchain_groq import ChatGroq 12from langchain_ollama import ChatOllama 13from pydantic import BaseModel 14 15import kiln_ai.datamodel as datamodel 16from kiln_ai.adapters.ml_model_list import ( 17 KilnModelProvider, 18 ModelProviderName, 19 StructuredOutputMode, 20) 21from kiln_ai.adapters.model_adapters.base_adapter import ( 22 AdapterInfo, 23 BaseAdapter, 24 BasePromptBuilder, 25 RunOutput, 26) 27from kiln_ai.adapters.ollama_tools import ( 28 get_ollama_connection, 29 ollama_base_url, 30 ollama_model_installed, 31) 32from kiln_ai.utils.config import Config 33 34LangChainModelType = BaseChatModel | Runnable[LanguageModelInput, Dict | BaseModel] 35 36 37class LangchainAdapter(BaseAdapter): 38 _model: LangChainModelType | None = None 39 40 def __init__( 41 self, 42 kiln_task: datamodel.Task, 43 custom_model: BaseChatModel | None = None, 44 model_name: str | None = None, 45 provider: str | None = None, 46 prompt_builder: BasePromptBuilder | None = None, 47 tags: list[str] | None = None, 48 ): 49 if custom_model is not None: 50 self._model = custom_model 51 52 # Attempt to infer model provider and name from custom model 53 if provider is None: 54 provider = "custom.langchain:" + custom_model.__class__.__name__ 55 56 if model_name is None: 57 model_name = "custom.langchain:unknown_model" 58 if hasattr(custom_model, "model_name") and isinstance( 59 getattr(custom_model, "model_name"), str 60 ): 61 model_name = "custom.langchain:" + getattr( 62 custom_model, "model_name" 63 ) 64 if hasattr(custom_model, "model") and isinstance( 65 getattr(custom_model, "model"), str 66 ): 67 model_name = "custom.langchain:" + getattr(custom_model, "model") 68 elif model_name is not None: 69 # default provider name if not provided 70 provider = provider or "custom.langchain.default_provider" 71 else: 72 raise ValueError( 73 "model_name and provider must be provided if custom_model is not provided" 74 ) 75 76 if model_name is None: 77 raise ValueError("model_name must be provided") 78 79 super().__init__( 80 kiln_task, 81 model_name=model_name, 82 model_provider_name=provider, 83 prompt_builder=prompt_builder, 84 tags=tags, 85 ) 86 87 async def model(self) -> LangChainModelType: 88 # cached model 89 if self._model: 90 return self._model 91 92 self._model = await self.langchain_model_from() 93 94 # Decide if we want to use Langchain's structured output: 95 # 1. Only for structured tasks 96 # 2. Only if the provider's mode isn't json_instructions (only mode that doesn't use an API option for structured output capabilities) 97 provider = await self.model_provider() 98 use_lc_structured_output = ( 99 self.has_structured_output() 100 and provider.structured_output_mode 101 != StructuredOutputMode.json_instructions 102 ) 103 104 if use_lc_structured_output: 105 if not hasattr(self._model, "with_structured_output") or not callable( 106 getattr(self._model, "with_structured_output") 107 ): 108 raise ValueError( 109 f"model {self._model} does not support structured output, cannot use output_json_schema" 110 ) 111 # Langchain expects title/description to be at top level, on top of json schema 112 output_schema = self.kiln_task.output_schema() 113 if output_schema is None: 114 raise ValueError( 115 f"output_json_schema is not valid json: {self.kiln_task.output_json_schema}" 116 ) 117 output_schema["title"] = "task_response" 118 output_schema["description"] = "A response from the task" 119 with_structured_output_options = await self.get_structured_output_options( 120 self.model_name, self.model_provider_name 121 ) 122 self._model = self._model.with_structured_output( 123 output_schema, 124 include_raw=True, 125 **with_structured_output_options, 126 ) 127 return self._model 128 129 async def _run(self, input: Dict | str) -> RunOutput: 130 provider = await self.model_provider() 131 model = await self.model() 132 chain = model 133 intermediate_outputs = {} 134 135 prompt = await self.build_prompt() 136 user_msg = self.prompt_builder.build_user_message(input) 137 messages = [ 138 SystemMessage(content=prompt), 139 HumanMessage(content=user_msg), 140 ] 141 142 # Handle chain of thought if enabled. 3 Modes: 143 # 1. Unstructured output: just call the LLM, with prompting for thinking 144 # 2. "Thinking" LLM designed to output thinking in a structured format: we make 1 call to the LLM, which outputs thinking in a structured format. 145 # 3. Normal LLM with structured output: we make 2 calls to the LLM - one for thinking and one for the final response. This helps us use the LLM's structured output modes (json_schema, tools, etc), which can't be used in a single call. 146 cot_prompt = self.prompt_builder.chain_of_thought_prompt() 147 thinking_llm = provider.reasoning_capable 148 149 if cot_prompt and (not self.has_structured_output() or thinking_llm): 150 # Case 1 or 2: Unstructured output, or "Thinking" LLM designed to output thinking in a structured format 151 messages.append({"role": "system", "content": cot_prompt}) 152 elif not thinking_llm and cot_prompt and self.has_structured_output(): 153 # Case 3: Normal LLM with structured output 154 # Base model (without structured output) used for COT message 155 base_model = await self.langchain_model_from() 156 messages.append( 157 SystemMessage(content=cot_prompt), 158 ) 159 160 cot_messages = [*messages] 161 cot_response = await base_model.ainvoke(cot_messages) 162 intermediate_outputs["chain_of_thought"] = cot_response.content 163 messages.append(AIMessage(content=cot_response.content)) 164 messages.append( 165 SystemMessage(content="Considering the above, return a final result.") 166 ) 167 168 response = await chain.ainvoke(messages) 169 170 # Langchain may have already parsed the response into structured output, so use that if available. 171 # However, a plain string may still be fixed at the parsing layer, so not being structured isn't a critical failure (yet) 172 if ( 173 self.has_structured_output() 174 and isinstance(response, dict) 175 and "parsed" in response 176 and isinstance(response["parsed"], dict) 177 ): 178 structured_response = response["parsed"] 179 return RunOutput( 180 output=self._munge_response(structured_response), 181 intermediate_outputs=intermediate_outputs, 182 ) 183 184 if not isinstance(response, BaseMessage): 185 raise RuntimeError(f"response is not a BaseMessage: {response}") 186 187 text_content = response.content 188 if not isinstance(text_content, str): 189 raise RuntimeError(f"response is not a string: {text_content}") 190 191 return RunOutput( 192 output=text_content, 193 intermediate_outputs=intermediate_outputs, 194 ) 195 196 def adapter_info(self) -> AdapterInfo: 197 return AdapterInfo( 198 model_name=self.model_name, 199 model_provider=self.model_provider_name, 200 adapter_name="kiln_langchain_adapter", 201 prompt_builder_name=self.prompt_builder.__class__.prompt_builder_name(), 202 prompt_id=self.prompt_builder.prompt_id(), 203 ) 204 205 def _munge_response(self, response: Dict) -> Dict: 206 # Mistral Large tool calling format is a bit different. Convert to standard format. 207 if ( 208 "name" in response 209 and response["name"] == "task_response" 210 and "arguments" in response 211 ): 212 return response["arguments"] 213 return response 214 215 async def get_structured_output_options( 216 self, model_name: str, model_provider_name: str 217 ) -> Dict[str, Any]: 218 provider = await self.model_provider() 219 if not provider: 220 return {} 221 222 options = {} 223 # We may need to add some provider specific logic here if providers use different names for the same mode, but everyone is copying openai for now 224 match provider.structured_output_mode: 225 case StructuredOutputMode.function_calling: 226 options["method"] = "function_calling" 227 case StructuredOutputMode.json_mode: 228 options["method"] = "json_mode" 229 case StructuredOutputMode.json_schema: 230 options["method"] = "json_schema" 231 case StructuredOutputMode.json_instructions: 232 # JSON done via instructions in prompt, not via API 233 pass 234 case StructuredOutputMode.default: 235 # Let langchain decide the default 236 pass 237 case _: 238 raise ValueError( 239 f"Unhandled enum value: {provider.structured_output_mode}" 240 ) 241 # triggers pyright warning if I miss a case 242 return NoReturn 243 244 return options 245 246 async def langchain_model_from(self) -> BaseChatModel: 247 provider = await self.model_provider() 248 return await langchain_model_from_provider(provider, self.model_name) 249 250 251async def langchain_model_from_provider( 252 provider: KilnModelProvider, model_name: str 253) -> BaseChatModel: 254 if provider.name == ModelProviderName.openai: 255 # We use the OpenAICompatibleAdapter for OpenAI 256 raise ValueError("OpenAI is not supported in Langchain adapter") 257 elif provider.name == ModelProviderName.openai_compatible: 258 # We use the OpenAICompatibleAdapter for OpenAI compatible 259 raise ValueError("OpenAI compatible is not supported in Langchain adapter") 260 elif provider.name == ModelProviderName.groq: 261 api_key = Config.shared().groq_api_key 262 if api_key is None: 263 raise ValueError( 264 "Attempted to use Groq without an API key set. " 265 "Get your API key from https://console.groq.com/keys" 266 ) 267 return ChatGroq(**provider.provider_options, groq_api_key=api_key) # type: ignore[arg-type] 268 elif provider.name == ModelProviderName.amazon_bedrock: 269 api_key = Config.shared().bedrock_access_key 270 secret_key = Config.shared().bedrock_secret_key 271 # langchain doesn't allow passing these, so ugly hack to set env vars 272 os.environ["AWS_ACCESS_KEY_ID"] = api_key 273 os.environ["AWS_SECRET_ACCESS_KEY"] = secret_key 274 return ChatBedrockConverse( 275 **provider.provider_options, 276 ) 277 elif provider.name == ModelProviderName.fireworks_ai: 278 api_key = Config.shared().fireworks_api_key 279 return ChatFireworks(**provider.provider_options, api_key=api_key) 280 elif provider.name == ModelProviderName.ollama: 281 # Ollama model naming is pretty flexible. We try a few versions of the model name 282 potential_model_names = [] 283 if "model" in provider.provider_options: 284 potential_model_names.append(provider.provider_options["model"]) 285 if "model_aliases" in provider.provider_options: 286 potential_model_names.extend(provider.provider_options["model_aliases"]) 287 288 # Get the list of models Ollama supports 289 ollama_connection = await get_ollama_connection() 290 if ollama_connection is None: 291 raise ValueError("Failed to connect to Ollama. Ensure Ollama is running.") 292 293 for model_name in potential_model_names: 294 if ollama_model_installed(ollama_connection, model_name): 295 return ChatOllama(model=model_name, base_url=ollama_base_url()) 296 297 raise ValueError(f"Model {model_name} not installed on Ollama") 298 elif provider.name == ModelProviderName.openrouter: 299 raise ValueError("OpenRouter is not supported in Langchain adapter") 300 else: 301 raise ValueError(f"Invalid model or provider: {model_name} - {provider.name}")
LangChainModelType =
typing.Union[langchain_core.language_models.chat_models.BaseChatModel, langchain_core.runnables.base.Runnable[typing.Union[langchain_core.prompt_values.PromptValue, str, collections.abc.Sequence[typing.Union[langchain_core.messages.base.BaseMessage, list[str], tuple[str, str], str, dict[str, typing.Any]]]], typing.Union[typing.Dict, pydantic.main.BaseModel]]]
38class LangchainAdapter(BaseAdapter): 39 _model: LangChainModelType | None = None 40 41 def __init__( 42 self, 43 kiln_task: datamodel.Task, 44 custom_model: BaseChatModel | None = None, 45 model_name: str | None = None, 46 provider: str | None = None, 47 prompt_builder: BasePromptBuilder | None = None, 48 tags: list[str] | None = None, 49 ): 50 if custom_model is not None: 51 self._model = custom_model 52 53 # Attempt to infer model provider and name from custom model 54 if provider is None: 55 provider = "custom.langchain:" + custom_model.__class__.__name__ 56 57 if model_name is None: 58 model_name = "custom.langchain:unknown_model" 59 if hasattr(custom_model, "model_name") and isinstance( 60 getattr(custom_model, "model_name"), str 61 ): 62 model_name = "custom.langchain:" + getattr( 63 custom_model, "model_name" 64 ) 65 if hasattr(custom_model, "model") and isinstance( 66 getattr(custom_model, "model"), str 67 ): 68 model_name = "custom.langchain:" + getattr(custom_model, "model") 69 elif model_name is not None: 70 # default provider name if not provided 71 provider = provider or "custom.langchain.default_provider" 72 else: 73 raise ValueError( 74 "model_name and provider must be provided if custom_model is not provided" 75 ) 76 77 if model_name is None: 78 raise ValueError("model_name must be provided") 79 80 super().__init__( 81 kiln_task, 82 model_name=model_name, 83 model_provider_name=provider, 84 prompt_builder=prompt_builder, 85 tags=tags, 86 ) 87 88 async def model(self) -> LangChainModelType: 89 # cached model 90 if self._model: 91 return self._model 92 93 self._model = await self.langchain_model_from() 94 95 # Decide if we want to use Langchain's structured output: 96 # 1. Only for structured tasks 97 # 2. Only if the provider's mode isn't json_instructions (only mode that doesn't use an API option for structured output capabilities) 98 provider = await self.model_provider() 99 use_lc_structured_output = ( 100 self.has_structured_output() 101 and provider.structured_output_mode 102 != StructuredOutputMode.json_instructions 103 ) 104 105 if use_lc_structured_output: 106 if not hasattr(self._model, "with_structured_output") or not callable( 107 getattr(self._model, "with_structured_output") 108 ): 109 raise ValueError( 110 f"model {self._model} does not support structured output, cannot use output_json_schema" 111 ) 112 # Langchain expects title/description to be at top level, on top of json schema 113 output_schema = self.kiln_task.output_schema() 114 if output_schema is None: 115 raise ValueError( 116 f"output_json_schema is not valid json: {self.kiln_task.output_json_schema}" 117 ) 118 output_schema["title"] = "task_response" 119 output_schema["description"] = "A response from the task" 120 with_structured_output_options = await self.get_structured_output_options( 121 self.model_name, self.model_provider_name 122 ) 123 self._model = self._model.with_structured_output( 124 output_schema, 125 include_raw=True, 126 **with_structured_output_options, 127 ) 128 return self._model 129 130 async def _run(self, input: Dict | str) -> RunOutput: 131 provider = await self.model_provider() 132 model = await self.model() 133 chain = model 134 intermediate_outputs = {} 135 136 prompt = await self.build_prompt() 137 user_msg = self.prompt_builder.build_user_message(input) 138 messages = [ 139 SystemMessage(content=prompt), 140 HumanMessage(content=user_msg), 141 ] 142 143 # Handle chain of thought if enabled. 3 Modes: 144 # 1. Unstructured output: just call the LLM, with prompting for thinking 145 # 2. "Thinking" LLM designed to output thinking in a structured format: we make 1 call to the LLM, which outputs thinking in a structured format. 146 # 3. Normal LLM with structured output: we make 2 calls to the LLM - one for thinking and one for the final response. This helps us use the LLM's structured output modes (json_schema, tools, etc), which can't be used in a single call. 147 cot_prompt = self.prompt_builder.chain_of_thought_prompt() 148 thinking_llm = provider.reasoning_capable 149 150 if cot_prompt and (not self.has_structured_output() or thinking_llm): 151 # Case 1 or 2: Unstructured output, or "Thinking" LLM designed to output thinking in a structured format 152 messages.append({"role": "system", "content": cot_prompt}) 153 elif not thinking_llm and cot_prompt and self.has_structured_output(): 154 # Case 3: Normal LLM with structured output 155 # Base model (without structured output) used for COT message 156 base_model = await self.langchain_model_from() 157 messages.append( 158 SystemMessage(content=cot_prompt), 159 ) 160 161 cot_messages = [*messages] 162 cot_response = await base_model.ainvoke(cot_messages) 163 intermediate_outputs["chain_of_thought"] = cot_response.content 164 messages.append(AIMessage(content=cot_response.content)) 165 messages.append( 166 SystemMessage(content="Considering the above, return a final result.") 167 ) 168 169 response = await chain.ainvoke(messages) 170 171 # Langchain may have already parsed the response into structured output, so use that if available. 172 # However, a plain string may still be fixed at the parsing layer, so not being structured isn't a critical failure (yet) 173 if ( 174 self.has_structured_output() 175 and isinstance(response, dict) 176 and "parsed" in response 177 and isinstance(response["parsed"], dict) 178 ): 179 structured_response = response["parsed"] 180 return RunOutput( 181 output=self._munge_response(structured_response), 182 intermediate_outputs=intermediate_outputs, 183 ) 184 185 if not isinstance(response, BaseMessage): 186 raise RuntimeError(f"response is not a BaseMessage: {response}") 187 188 text_content = response.content 189 if not isinstance(text_content, str): 190 raise RuntimeError(f"response is not a string: {text_content}") 191 192 return RunOutput( 193 output=text_content, 194 intermediate_outputs=intermediate_outputs, 195 ) 196 197 def adapter_info(self) -> AdapterInfo: 198 return AdapterInfo( 199 model_name=self.model_name, 200 model_provider=self.model_provider_name, 201 adapter_name="kiln_langchain_adapter", 202 prompt_builder_name=self.prompt_builder.__class__.prompt_builder_name(), 203 prompt_id=self.prompt_builder.prompt_id(), 204 ) 205 206 def _munge_response(self, response: Dict) -> Dict: 207 # Mistral Large tool calling format is a bit different. Convert to standard format. 208 if ( 209 "name" in response 210 and response["name"] == "task_response" 211 and "arguments" in response 212 ): 213 return response["arguments"] 214 return response 215 216 async def get_structured_output_options( 217 self, model_name: str, model_provider_name: str 218 ) -> Dict[str, Any]: 219 provider = await self.model_provider() 220 if not provider: 221 return {} 222 223 options = {} 224 # We may need to add some provider specific logic here if providers use different names for the same mode, but everyone is copying openai for now 225 match provider.structured_output_mode: 226 case StructuredOutputMode.function_calling: 227 options["method"] = "function_calling" 228 case StructuredOutputMode.json_mode: 229 options["method"] = "json_mode" 230 case StructuredOutputMode.json_schema: 231 options["method"] = "json_schema" 232 case StructuredOutputMode.json_instructions: 233 # JSON done via instructions in prompt, not via API 234 pass 235 case StructuredOutputMode.default: 236 # Let langchain decide the default 237 pass 238 case _: 239 raise ValueError( 240 f"Unhandled enum value: {provider.structured_output_mode}" 241 ) 242 # triggers pyright warning if I miss a case 243 return NoReturn 244 245 return options 246 247 async def langchain_model_from(self) -> BaseChatModel: 248 provider = await self.model_provider() 249 return await langchain_model_from_provider(provider, self.model_name)
Base class for AI model adapters that handle task execution.
This abstract class provides the foundation for implementing model-specific adapters that can process tasks with structured or unstructured inputs/outputs. It handles input/output validation, prompt building, and run tracking.
Attributes: prompt_builder (BasePromptBuilder): Builder for constructing prompts for the model kiln_task (Task): The task configuration and metadata output_schema (dict | None): JSON schema for validating structured outputs input_schema (dict | None): JSON schema for validating structured inputs
LangchainAdapter( kiln_task: kiln_ai.datamodel.Task, custom_model: langchain_core.language_models.chat_models.BaseChatModel | None = None, model_name: str | None = None, provider: str | None = None, prompt_builder: kiln_ai.adapters.prompt_builders.BasePromptBuilder | None = None, tags: list[str] | None = None)
41 def __init__( 42 self, 43 kiln_task: datamodel.Task, 44 custom_model: BaseChatModel | None = None, 45 model_name: str | None = None, 46 provider: str | None = None, 47 prompt_builder: BasePromptBuilder | None = None, 48 tags: list[str] | None = None, 49 ): 50 if custom_model is not None: 51 self._model = custom_model 52 53 # Attempt to infer model provider and name from custom model 54 if provider is None: 55 provider = "custom.langchain:" + custom_model.__class__.__name__ 56 57 if model_name is None: 58 model_name = "custom.langchain:unknown_model" 59 if hasattr(custom_model, "model_name") and isinstance( 60 getattr(custom_model, "model_name"), str 61 ): 62 model_name = "custom.langchain:" + getattr( 63 custom_model, "model_name" 64 ) 65 if hasattr(custom_model, "model") and isinstance( 66 getattr(custom_model, "model"), str 67 ): 68 model_name = "custom.langchain:" + getattr(custom_model, "model") 69 elif model_name is not None: 70 # default provider name if not provided 71 provider = provider or "custom.langchain.default_provider" 72 else: 73 raise ValueError( 74 "model_name and provider must be provided if custom_model is not provided" 75 ) 76 77 if model_name is None: 78 raise ValueError("model_name must be provided") 79 80 super().__init__( 81 kiln_task, 82 model_name=model_name, 83 model_provider_name=provider, 84 prompt_builder=prompt_builder, 85 tags=tags, 86 )
async def
model( self) -> Union[langchain_core.language_models.chat_models.BaseChatModel, langchain_core.runnables.base.Runnable[Union[langchain_core.prompt_values.PromptValue, str, Sequence[Union[langchain_core.messages.base.BaseMessage, list[str], tuple[str, str], str, dict[str, Any]]]], Union[Dict, pydantic.main.BaseModel]]]:
88 async def model(self) -> LangChainModelType: 89 # cached model 90 if self._model: 91 return self._model 92 93 self._model = await self.langchain_model_from() 94 95 # Decide if we want to use Langchain's structured output: 96 # 1. Only for structured tasks 97 # 2. Only if the provider's mode isn't json_instructions (only mode that doesn't use an API option for structured output capabilities) 98 provider = await self.model_provider() 99 use_lc_structured_output = ( 100 self.has_structured_output() 101 and provider.structured_output_mode 102 != StructuredOutputMode.json_instructions 103 ) 104 105 if use_lc_structured_output: 106 if not hasattr(self._model, "with_structured_output") or not callable( 107 getattr(self._model, "with_structured_output") 108 ): 109 raise ValueError( 110 f"model {self._model} does not support structured output, cannot use output_json_schema" 111 ) 112 # Langchain expects title/description to be at top level, on top of json schema 113 output_schema = self.kiln_task.output_schema() 114 if output_schema is None: 115 raise ValueError( 116 f"output_json_schema is not valid json: {self.kiln_task.output_json_schema}" 117 ) 118 output_schema["title"] = "task_response" 119 output_schema["description"] = "A response from the task" 120 with_structured_output_options = await self.get_structured_output_options( 121 self.model_name, self.model_provider_name 122 ) 123 self._model = self._model.with_structured_output( 124 output_schema, 125 include_raw=True, 126 **with_structured_output_options, 127 ) 128 return self._model
197 def adapter_info(self) -> AdapterInfo: 198 return AdapterInfo( 199 model_name=self.model_name, 200 model_provider=self.model_provider_name, 201 adapter_name="kiln_langchain_adapter", 202 prompt_builder_name=self.prompt_builder.__class__.prompt_builder_name(), 203 prompt_id=self.prompt_builder.prompt_id(), 204 )
async def
get_structured_output_options(self, model_name: str, model_provider_name: str) -> Dict[str, Any]:
216 async def get_structured_output_options( 217 self, model_name: str, model_provider_name: str 218 ) -> Dict[str, Any]: 219 provider = await self.model_provider() 220 if not provider: 221 return {} 222 223 options = {} 224 # We may need to add some provider specific logic here if providers use different names for the same mode, but everyone is copying openai for now 225 match provider.structured_output_mode: 226 case StructuredOutputMode.function_calling: 227 options["method"] = "function_calling" 228 case StructuredOutputMode.json_mode: 229 options["method"] = "json_mode" 230 case StructuredOutputMode.json_schema: 231 options["method"] = "json_schema" 232 case StructuredOutputMode.json_instructions: 233 # JSON done via instructions in prompt, not via API 234 pass 235 case StructuredOutputMode.default: 236 # Let langchain decide the default 237 pass 238 case _: 239 raise ValueError( 240 f"Unhandled enum value: {provider.structured_output_mode}" 241 ) 242 # triggers pyright warning if I miss a case 243 return NoReturn 244 245 return options
async def
langchain_model_from_provider( provider: kiln_ai.adapters.ml_model_list.KilnModelProvider, model_name: str) -> langchain_core.language_models.chat_models.BaseChatModel:
252async def langchain_model_from_provider( 253 provider: KilnModelProvider, model_name: str 254) -> BaseChatModel: 255 if provider.name == ModelProviderName.openai: 256 # We use the OpenAICompatibleAdapter for OpenAI 257 raise ValueError("OpenAI is not supported in Langchain adapter") 258 elif provider.name == ModelProviderName.openai_compatible: 259 # We use the OpenAICompatibleAdapter for OpenAI compatible 260 raise ValueError("OpenAI compatible is not supported in Langchain adapter") 261 elif provider.name == ModelProviderName.groq: 262 api_key = Config.shared().groq_api_key 263 if api_key is None: 264 raise ValueError( 265 "Attempted to use Groq without an API key set. " 266 "Get your API key from https://console.groq.com/keys" 267 ) 268 return ChatGroq(**provider.provider_options, groq_api_key=api_key) # type: ignore[arg-type] 269 elif provider.name == ModelProviderName.amazon_bedrock: 270 api_key = Config.shared().bedrock_access_key 271 secret_key = Config.shared().bedrock_secret_key 272 # langchain doesn't allow passing these, so ugly hack to set env vars 273 os.environ["AWS_ACCESS_KEY_ID"] = api_key 274 os.environ["AWS_SECRET_ACCESS_KEY"] = secret_key 275 return ChatBedrockConverse( 276 **provider.provider_options, 277 ) 278 elif provider.name == ModelProviderName.fireworks_ai: 279 api_key = Config.shared().fireworks_api_key 280 return ChatFireworks(**provider.provider_options, api_key=api_key) 281 elif provider.name == ModelProviderName.ollama: 282 # Ollama model naming is pretty flexible. We try a few versions of the model name 283 potential_model_names = [] 284 if "model" in provider.provider_options: 285 potential_model_names.append(provider.provider_options["model"]) 286 if "model_aliases" in provider.provider_options: 287 potential_model_names.extend(provider.provider_options["model_aliases"]) 288 289 # Get the list of models Ollama supports 290 ollama_connection = await get_ollama_connection() 291 if ollama_connection is None: 292 raise ValueError("Failed to connect to Ollama. Ensure Ollama is running.") 293 294 for model_name in potential_model_names: 295 if ollama_model_installed(ollama_connection, model_name): 296 return ChatOllama(model=model_name, base_url=ollama_base_url()) 297 298 raise ValueError(f"Model {model_name} not installed on Ollama") 299 elif provider.name == ModelProviderName.openrouter: 300 raise ValueError("OpenRouter is not supported in Langchain adapter") 301 else: 302 raise ValueError(f"Invalid model or provider: {model_name} - {provider.name}")