kiln_ai.adapters.ml_model_list

  1from enum import Enum
  2from typing import Dict, List
  3
  4from pydantic import BaseModel
  5
  6from kiln_ai.datamodel import StructuredOutputMode
  7
  8"""
  9Provides model configuration and management for various LLM providers and models.
 10This module handles the integration with different AI model providers and their respective models,
 11including configuration, validation, and instantiation of language models.
 12"""
 13
 14
 15class ModelProviderName(str, Enum):
 16    """
 17    Enumeration of supported AI model providers.
 18    """
 19
 20    openai = "openai"
 21    groq = "groq"
 22    amazon_bedrock = "amazon_bedrock"
 23    ollama = "ollama"
 24    openrouter = "openrouter"
 25    fireworks_ai = "fireworks_ai"
 26    kiln_fine_tune = "kiln_fine_tune"
 27    kiln_custom_registry = "kiln_custom_registry"
 28    openai_compatible = "openai_compatible"
 29
 30
 31class ModelFamily(str, Enum):
 32    """
 33    Enumeration of supported model families/architectures.
 34    """
 35
 36    gpt = "gpt"
 37    llama = "llama"
 38    phi = "phi"
 39    mistral = "mistral"
 40    gemma = "gemma"
 41    gemini = "gemini"
 42    claude = "claude"
 43    mixtral = "mixtral"
 44    qwen = "qwen"
 45    deepseek = "deepseek"
 46
 47
 48# Where models have instruct and raw versions, instruct is default and raw is specified
 49class ModelName(str, Enum):
 50    """
 51    Enumeration of specific model versions supported by the system.
 52    Where models have instruct and raw versions, instruct is default and raw is specified.
 53    """
 54
 55    llama_3_1_8b = "llama_3_1_8b"
 56    llama_3_1_70b = "llama_3_1_70b"
 57    llama_3_1_405b = "llama_3_1_405b"
 58    llama_3_2_1b = "llama_3_2_1b"
 59    llama_3_2_3b = "llama_3_2_3b"
 60    llama_3_2_11b = "llama_3_2_11b"
 61    llama_3_2_90b = "llama_3_2_90b"
 62    llama_3_3_70b = "llama_3_3_70b"
 63    gpt_4o_mini = "gpt_4o_mini"
 64    gpt_4o = "gpt_4o"
 65    phi_3_5 = "phi_3_5"
 66    phi_4 = "phi_4"
 67    mistral_large = "mistral_large"
 68    mistral_nemo = "mistral_nemo"
 69    gemma_2_2b = "gemma_2_2b"
 70    gemma_2_9b = "gemma_2_9b"
 71    gemma_2_27b = "gemma_2_27b"
 72    claude_3_5_haiku = "claude_3_5_haiku"
 73    claude_3_5_sonnet = "claude_3_5_sonnet"
 74    gemini_1_5_flash = "gemini_1_5_flash"
 75    gemini_1_5_flash_8b = "gemini_1_5_flash_8b"
 76    gemini_1_5_pro = "gemini_1_5_pro"
 77    nemotron_70b = "nemotron_70b"
 78    mixtral_8x7b = "mixtral_8x7b"
 79    qwen_2p5_7b = "qwen_2p5_7b"
 80    qwen_2p5_72b = "qwen_2p5_72b"
 81    deepseek_3 = "deepseek_3"
 82    deepseek_r1 = "deepseek_r1"
 83
 84
 85class ModelParserID(str, Enum):
 86    """
 87    Enumeration of supported model parsers.
 88    """
 89
 90    r1_thinking = "r1_thinking"
 91
 92
 93class KilnModelProvider(BaseModel):
 94    """
 95    Configuration for a specific model provider.
 96
 97    Attributes:
 98        name: The provider's identifier
 99        supports_structured_output: Whether the provider supports structured output formats
100        supports_data_gen: Whether the provider supports data generation
101        untested_model: Whether the model is untested (typically user added). The supports_ fields are not applicable.
102        provider_finetune_id: The finetune ID for the provider, if applicable
103        provider_options: Additional provider-specific configuration options
104        structured_output_mode: The mode we should use to call the model for structured output, if it was trained with structured output.
105        parser: A parser to use for the model, if applicable
106        reasoning_capable: Whether the model is designed to output thinking in a structured format (eg <think></think>). If so we don't use COT across 2 calls, and ask for thinking and final response in the same call.
107    """
108
109    name: ModelProviderName
110    supports_structured_output: bool = True
111    supports_data_gen: bool = True
112    untested_model: bool = False
113    provider_finetune_id: str | None = None
114    provider_options: Dict = {}
115    structured_output_mode: StructuredOutputMode = StructuredOutputMode.default
116    parser: ModelParserID | None = None
117    reasoning_capable: bool = False
118
119
120class KilnModel(BaseModel):
121    """
122    Configuration for a specific AI model.
123
124    Attributes:
125        family: The model's architecture family
126        name: The model's identifier
127        friendly_name: Human-readable name for the model
128        providers: List of providers that offer this model
129        supports_structured_output: Whether the model supports structured output formats
130    """
131
132    family: str
133    name: str
134    friendly_name: str
135    providers: List[KilnModelProvider]
136
137
138built_in_models: List[KilnModel] = [
139    # GPT 4o Mini
140    KilnModel(
141        family=ModelFamily.gpt,
142        name=ModelName.gpt_4o_mini,
143        friendly_name="GPT 4o Mini",
144        providers=[
145            KilnModelProvider(
146                name=ModelProviderName.openai,
147                provider_options={"model": "gpt-4o-mini"},
148                provider_finetune_id="gpt-4o-mini-2024-07-18",
149                structured_output_mode=StructuredOutputMode.json_schema,
150            ),
151            KilnModelProvider(
152                name=ModelProviderName.openrouter,
153                provider_options={"model": "openai/gpt-4o-mini"},
154                structured_output_mode=StructuredOutputMode.json_schema,
155            ),
156        ],
157    ),
158    # GPT 4o
159    KilnModel(
160        family=ModelFamily.gpt,
161        name=ModelName.gpt_4o,
162        friendly_name="GPT 4o",
163        providers=[
164            KilnModelProvider(
165                name=ModelProviderName.openai,
166                provider_options={"model": "gpt-4o"},
167                provider_finetune_id="gpt-4o-2024-08-06",
168                structured_output_mode=StructuredOutputMode.json_schema,
169            ),
170            KilnModelProvider(
171                name=ModelProviderName.openrouter,
172                provider_options={"model": "openai/gpt-4o"},
173                structured_output_mode=StructuredOutputMode.json_schema,
174            ),
175        ],
176    ),
177    # Claude 3.5 Haiku
178    KilnModel(
179        family=ModelFamily.claude,
180        name=ModelName.claude_3_5_haiku,
181        friendly_name="Claude 3.5 Haiku",
182        providers=[
183            KilnModelProvider(
184                name=ModelProviderName.openrouter,
185                structured_output_mode=StructuredOutputMode.function_calling,
186                provider_options={"model": "anthropic/claude-3-5-haiku"},
187            ),
188        ],
189    ),
190    # Claude 3.5 Sonnet
191    KilnModel(
192        family=ModelFamily.claude,
193        name=ModelName.claude_3_5_sonnet,
194        friendly_name="Claude 3.5 Sonnet",
195        providers=[
196            KilnModelProvider(
197                name=ModelProviderName.openrouter,
198                structured_output_mode=StructuredOutputMode.function_calling,
199                provider_options={"model": "anthropic/claude-3.5-sonnet"},
200            ),
201        ],
202    ),
203    # DeepSeek 3
204    KilnModel(
205        family=ModelFamily.deepseek,
206        name=ModelName.deepseek_3,
207        friendly_name="DeepSeek v3",
208        providers=[
209            KilnModelProvider(
210                name=ModelProviderName.openrouter,
211                provider_options={"model": "deepseek/deepseek-chat"},
212                structured_output_mode=StructuredOutputMode.function_calling,
213            ),
214        ],
215    ),
216    # DeepSeek R1
217    KilnModel(
218        family=ModelFamily.deepseek,
219        name=ModelName.deepseek_r1,
220        friendly_name="DeepSeek R1",
221        providers=[
222            KilnModelProvider(
223                name=ModelProviderName.openrouter,
224                provider_options={"model": "deepseek/deepseek-r1"},
225                # No custom parser -- openrouter implemented it themselves
226                structured_output_mode=StructuredOutputMode.json_instructions,
227                reasoning_capable=True,
228            ),
229            KilnModelProvider(
230                name=ModelProviderName.fireworks_ai,
231                provider_options={"model": "accounts/fireworks/models/deepseek-r1"},
232                parser=ModelParserID.r1_thinking,
233                structured_output_mode=StructuredOutputMode.json_instructions,
234                reasoning_capable=True,
235            ),
236            KilnModelProvider(
237                # I want your RAM
238                name=ModelProviderName.ollama,
239                provider_options={"model": "deepseek-r1:671b"},
240                parser=ModelParserID.r1_thinking,
241                structured_output_mode=StructuredOutputMode.json_instructions,
242                reasoning_capable=True,
243            ),
244        ],
245    ),
246    # Gemini 1.5 Pro
247    KilnModel(
248        family=ModelFamily.gemini,
249        name=ModelName.gemini_1_5_pro,
250        friendly_name="Gemini 1.5 Pro",
251        providers=[
252            KilnModelProvider(
253                name=ModelProviderName.openrouter,
254                provider_options={"model": "google/gemini-pro-1.5"},
255                structured_output_mode=StructuredOutputMode.json_schema,
256            ),
257        ],
258    ),
259    # Gemini 1.5 Flash
260    KilnModel(
261        family=ModelFamily.gemini,
262        name=ModelName.gemini_1_5_flash,
263        friendly_name="Gemini 1.5 Flash",
264        providers=[
265            KilnModelProvider(
266                name=ModelProviderName.openrouter,
267                provider_options={"model": "google/gemini-flash-1.5"},
268                structured_output_mode=StructuredOutputMode.json_schema,
269            ),
270        ],
271    ),
272    # Gemini 1.5 Flash 8B
273    KilnModel(
274        family=ModelFamily.gemini,
275        name=ModelName.gemini_1_5_flash_8b,
276        friendly_name="Gemini 1.5 Flash 8B",
277        providers=[
278            KilnModelProvider(
279                name=ModelProviderName.openrouter,
280                provider_options={"model": "google/gemini-flash-1.5-8b"},
281                structured_output_mode=StructuredOutputMode.json_mode,
282            ),
283        ],
284    ),
285    # Nemotron 70B
286    KilnModel(
287        family=ModelFamily.llama,
288        name=ModelName.nemotron_70b,
289        friendly_name="Nemotron 70B",
290        providers=[
291            KilnModelProvider(
292                name=ModelProviderName.openrouter,
293                supports_structured_output=False,
294                supports_data_gen=False,
295                provider_options={"model": "nvidia/llama-3.1-nemotron-70b-instruct"},
296            ),
297        ],
298    ),
299    # Llama 3.1-8b
300    KilnModel(
301        family=ModelFamily.llama,
302        name=ModelName.llama_3_1_8b,
303        friendly_name="Llama 3.1 8B",
304        providers=[
305            KilnModelProvider(
306                name=ModelProviderName.groq,
307                provider_options={"model": "llama-3.1-8b-instant"},
308            ),
309            KilnModelProvider(
310                name=ModelProviderName.amazon_bedrock,
311                structured_output_mode=StructuredOutputMode.json_schema,
312                provider_options={
313                    "model": "meta.llama3-1-8b-instruct-v1:0",
314                    "region_name": "us-west-2",  # Llama 3.1 only in west-2
315                },
316            ),
317            KilnModelProvider(
318                name=ModelProviderName.ollama,
319                structured_output_mode=StructuredOutputMode.json_schema,
320                provider_options={
321                    "model": "llama3.1:8b",
322                    "model_aliases": ["llama3.1"],  # 8b is default
323                },
324            ),
325            KilnModelProvider(
326                name=ModelProviderName.openrouter,
327                structured_output_mode=StructuredOutputMode.function_calling,
328                provider_options={"model": "meta-llama/llama-3.1-8b-instruct"},
329            ),
330            KilnModelProvider(
331                name=ModelProviderName.fireworks_ai,
332                # JSON mode not ideal (no schema), but tool calling doesn't work on 8b
333                structured_output_mode=StructuredOutputMode.json_mode,
334                provider_finetune_id="accounts/fireworks/models/llama-v3p1-8b-instruct",
335                provider_options={
336                    "model": "accounts/fireworks/models/llama-v3p1-8b-instruct"
337                },
338            ),
339        ],
340    ),
341    # Llama 3.1 70b
342    KilnModel(
343        family=ModelFamily.llama,
344        name=ModelName.llama_3_1_70b,
345        friendly_name="Llama 3.1 70B",
346        providers=[
347            KilnModelProvider(
348                name=ModelProviderName.groq,
349                provider_options={"model": "llama-3.1-70b-versatile"},
350            ),
351            KilnModelProvider(
352                name=ModelProviderName.amazon_bedrock,
353                structured_output_mode=StructuredOutputMode.json_schema,
354                provider_options={
355                    "model": "meta.llama3-1-70b-instruct-v1:0",
356                    "region_name": "us-west-2",  # Llama 3.1 only in west-2
357                },
358            ),
359            KilnModelProvider(
360                name=ModelProviderName.openrouter,
361                structured_output_mode=StructuredOutputMode.function_calling,
362                provider_options={"model": "meta-llama/llama-3.1-70b-instruct"},
363            ),
364            KilnModelProvider(
365                name=ModelProviderName.ollama,
366                structured_output_mode=StructuredOutputMode.json_schema,
367                provider_options={"model": "llama3.1:70b"},
368            ),
369            KilnModelProvider(
370                name=ModelProviderName.fireworks_ai,
371                # Tool calling forces schema -- fireworks doesn't support json_schema, just json_mode
372                structured_output_mode=StructuredOutputMode.function_calling,
373                provider_finetune_id="accounts/fireworks/models/llama-v3p1-70b-instruct",
374                provider_options={
375                    "model": "accounts/fireworks/models/llama-v3p1-70b-instruct"
376                },
377            ),
378        ],
379    ),
380    # Llama 3.1 405b
381    KilnModel(
382        family=ModelFamily.llama,
383        name=ModelName.llama_3_1_405b,
384        friendly_name="Llama 3.1 405B",
385        providers=[
386            KilnModelProvider(
387                name=ModelProviderName.amazon_bedrock,
388                structured_output_mode=StructuredOutputMode.json_schema,
389                supports_data_gen=False,
390                provider_options={
391                    "model": "meta.llama3-1-405b-instruct-v1:0",
392                    "region_name": "us-west-2",  # Llama 3.1 only in west-2
393                },
394            ),
395            KilnModelProvider(
396                name=ModelProviderName.ollama,
397                structured_output_mode=StructuredOutputMode.json_schema,
398                provider_options={"model": "llama3.1:405b"},
399            ),
400            KilnModelProvider(
401                name=ModelProviderName.openrouter,
402                structured_output_mode=StructuredOutputMode.function_calling,
403                provider_options={"model": "meta-llama/llama-3.1-405b-instruct"},
404            ),
405            KilnModelProvider(
406                name=ModelProviderName.fireworks_ai,
407                # No finetune support. https://docs.fireworks.ai/fine-tuning/fine-tuning-models
408                structured_output_mode=StructuredOutputMode.function_calling,
409                provider_options={
410                    "model": "accounts/fireworks/models/llama-v3p1-405b-instruct"
411                },
412            ),
413        ],
414    ),
415    # Mistral Nemo
416    KilnModel(
417        family=ModelFamily.mistral,
418        name=ModelName.mistral_nemo,
419        friendly_name="Mistral Nemo",
420        providers=[
421            KilnModelProvider(
422                name=ModelProviderName.openrouter,
423                provider_options={"model": "mistralai/mistral-nemo"},
424                structured_output_mode=StructuredOutputMode.json_instruction_and_object,
425            ),
426        ],
427    ),
428    # Mistral Large
429    KilnModel(
430        family=ModelFamily.mistral,
431        name=ModelName.mistral_large,
432        friendly_name="Mistral Large",
433        providers=[
434            KilnModelProvider(
435                name=ModelProviderName.amazon_bedrock,
436                structured_output_mode=StructuredOutputMode.json_schema,
437                provider_options={
438                    "model": "mistral.mistral-large-2407-v1:0",
439                    "region_name": "us-west-2",  # only in west-2
440                },
441            ),
442            KilnModelProvider(
443                name=ModelProviderName.openrouter,
444                structured_output_mode=StructuredOutputMode.json_schema,
445                provider_options={"model": "mistralai/mistral-large"},
446            ),
447            KilnModelProvider(
448                name=ModelProviderName.ollama,
449                structured_output_mode=StructuredOutputMode.json_schema,
450                provider_options={"model": "mistral-large"},
451            ),
452        ],
453    ),
454    # Llama 3.2 1B
455    KilnModel(
456        family=ModelFamily.llama,
457        name=ModelName.llama_3_2_1b,
458        friendly_name="Llama 3.2 1B",
459        providers=[
460            KilnModelProvider(
461                name=ModelProviderName.openrouter,
462                supports_structured_output=False,
463                supports_data_gen=False,
464                structured_output_mode=StructuredOutputMode.json_instruction_and_object,
465                provider_options={"model": "meta-llama/llama-3.2-1b-instruct"},
466            ),
467            KilnModelProvider(
468                name=ModelProviderName.ollama,
469                supports_structured_output=False,
470                supports_data_gen=False,
471                provider_options={"model": "llama3.2:1b"},
472            ),
473        ],
474    ),
475    # Llama 3.2 3B
476    KilnModel(
477        family=ModelFamily.llama,
478        name=ModelName.llama_3_2_3b,
479        friendly_name="Llama 3.2 3B",
480        providers=[
481            KilnModelProvider(
482                name=ModelProviderName.openrouter,
483                supports_structured_output=False,
484                supports_data_gen=False,
485                structured_output_mode=StructuredOutputMode.json_schema,
486                provider_options={"model": "meta-llama/llama-3.2-3b-instruct"},
487            ),
488            KilnModelProvider(
489                name=ModelProviderName.ollama,
490                supports_structured_output=False,
491                supports_data_gen=False,
492                provider_options={"model": "llama3.2"},
493            ),
494            KilnModelProvider(
495                name=ModelProviderName.fireworks_ai,
496                provider_finetune_id="accounts/fireworks/models/llama-v3p2-3b-instruct",
497                structured_output_mode=StructuredOutputMode.json_mode,
498                provider_options={
499                    "model": "accounts/fireworks/models/llama-v3p2-3b-instruct"
500                },
501            ),
502        ],
503    ),
504    # Llama 3.2 11B
505    KilnModel(
506        family=ModelFamily.llama,
507        name=ModelName.llama_3_2_11b,
508        friendly_name="Llama 3.2 11B",
509        providers=[
510            KilnModelProvider(
511                name=ModelProviderName.openrouter,
512                structured_output_mode=StructuredOutputMode.json_schema,
513                provider_options={"model": "meta-llama/llama-3.2-11b-vision-instruct"},
514            ),
515            KilnModelProvider(
516                name=ModelProviderName.ollama,
517                structured_output_mode=StructuredOutputMode.json_schema,
518                provider_options={"model": "llama3.2-vision"},
519            ),
520            KilnModelProvider(
521                name=ModelProviderName.fireworks_ai,
522                # No finetune support. https://docs.fireworks.ai/fine-tuning/fine-tuning-models
523                provider_options={
524                    "model": "accounts/fireworks/models/llama-v3p2-11b-vision-instruct"
525                },
526                structured_output_mode=StructuredOutputMode.json_mode,
527            ),
528        ],
529    ),
530    # Llama 3.2 90B
531    KilnModel(
532        family=ModelFamily.llama,
533        name=ModelName.llama_3_2_90b,
534        friendly_name="Llama 3.2 90B",
535        providers=[
536            KilnModelProvider(
537                name=ModelProviderName.openrouter,
538                structured_output_mode=StructuredOutputMode.json_schema,
539                provider_options={"model": "meta-llama/llama-3.2-90b-vision-instruct"},
540            ),
541            KilnModelProvider(
542                name=ModelProviderName.ollama,
543                structured_output_mode=StructuredOutputMode.json_schema,
544                provider_options={"model": "llama3.2-vision:90b"},
545            ),
546            KilnModelProvider(
547                name=ModelProviderName.fireworks_ai,
548                # No finetune support. https://docs.fireworks.ai/fine-tuning/fine-tuning-models
549                provider_options={
550                    "model": "accounts/fireworks/models/llama-v3p2-90b-vision-instruct"
551                },
552                structured_output_mode=StructuredOutputMode.json_mode,
553            ),
554        ],
555    ),
556    # Llama 3.3 70B
557    KilnModel(
558        family=ModelFamily.llama,
559        name=ModelName.llama_3_3_70b,
560        friendly_name="Llama 3.3 70B",
561        providers=[
562            KilnModelProvider(
563                name=ModelProviderName.openrouter,
564                provider_options={"model": "meta-llama/llama-3.3-70b-instruct"},
565                structured_output_mode=StructuredOutputMode.json_schema,
566                # Openrouter not working with json_schema or tools. JSON_schema sometimes works so force that, but not consistently so still not recommended.
567                supports_structured_output=False,
568                supports_data_gen=False,
569            ),
570            KilnModelProvider(
571                name=ModelProviderName.groq,
572                supports_structured_output=True,
573                supports_data_gen=True,
574                provider_options={"model": "llama-3.3-70b-versatile"},
575            ),
576            KilnModelProvider(
577                name=ModelProviderName.ollama,
578                structured_output_mode=StructuredOutputMode.json_schema,
579                provider_options={"model": "llama3.3"},
580            ),
581            KilnModelProvider(
582                name=ModelProviderName.fireworks_ai,
583                # Finetuning not live yet
584                # provider_finetune_id="accounts/fireworks/models/llama-v3p3-70b-instruct",
585                # Tool calling forces schema -- fireworks doesn't support json_schema, just json_mode
586                structured_output_mode=StructuredOutputMode.function_calling,
587                provider_options={
588                    "model": "accounts/fireworks/models/llama-v3p3-70b-instruct"
589                },
590            ),
591        ],
592    ),
593    # Phi 3.5
594    KilnModel(
595        family=ModelFamily.phi,
596        name=ModelName.phi_3_5,
597        friendly_name="Phi 3.5",
598        providers=[
599            KilnModelProvider(
600                name=ModelProviderName.ollama,
601                structured_output_mode=StructuredOutputMode.json_schema,
602                supports_structured_output=False,
603                supports_data_gen=False,
604                provider_options={"model": "phi3.5"},
605            ),
606            KilnModelProvider(
607                name=ModelProviderName.openrouter,
608                supports_structured_output=False,
609                supports_data_gen=False,
610                provider_options={"model": "microsoft/phi-3.5-mini-128k-instruct"},
611                structured_output_mode=StructuredOutputMode.json_schema,
612            ),
613            KilnModelProvider(
614                name=ModelProviderName.fireworks_ai,
615                # No finetune support. https://docs.fireworks.ai/fine-tuning/fine-tuning-models
616                structured_output_mode=StructuredOutputMode.json_mode,
617                provider_options={
618                    "model": "accounts/fireworks/models/phi-3-vision-128k-instruct"
619                },
620            ),
621        ],
622    ),
623    # Phi 4
624    KilnModel(
625        family=ModelFamily.phi,
626        name=ModelName.phi_4,
627        friendly_name="Phi 4",
628        providers=[
629            KilnModelProvider(
630                name=ModelProviderName.ollama,
631                structured_output_mode=StructuredOutputMode.json_schema,
632                provider_options={"model": "phi4"},
633            ),
634            KilnModelProvider(
635                name=ModelProviderName.openrouter,
636                # JSON mode not consistent enough to enable in UI
637                structured_output_mode=StructuredOutputMode.json_instruction_and_object,
638                supports_data_gen=False,
639                provider_options={"model": "microsoft/phi-4"},
640            ),
641        ],
642    ),
643    # Gemma 2 2.6b
644    KilnModel(
645        family=ModelFamily.gemma,
646        name=ModelName.gemma_2_2b,
647        friendly_name="Gemma 2 2B",
648        providers=[
649            KilnModelProvider(
650                name=ModelProviderName.ollama,
651                supports_structured_output=False,
652                supports_data_gen=False,
653                provider_options={
654                    "model": "gemma2:2b",
655                },
656            ),
657        ],
658    ),
659    # Gemma 2 9b
660    KilnModel(
661        family=ModelFamily.gemma,
662        name=ModelName.gemma_2_9b,
663        friendly_name="Gemma 2 9B",
664        providers=[
665            KilnModelProvider(
666                name=ModelProviderName.ollama,
667                structured_output_mode=StructuredOutputMode.json_schema,
668                supports_data_gen=False,
669                provider_options={
670                    "model": "gemma2:9b",
671                },
672            ),
673            KilnModelProvider(
674                name=ModelProviderName.openrouter,
675                structured_output_mode=StructuredOutputMode.json_instruction_and_object,
676                supports_data_gen=False,
677                provider_options={"model": "google/gemma-2-9b-it"},
678            ),
679            # fireworks AI errors - not allowing system role. Exclude until resolved.
680        ],
681    ),
682    # Gemma 2 27b
683    KilnModel(
684        family=ModelFamily.gemma,
685        name=ModelName.gemma_2_27b,
686        friendly_name="Gemma 2 27B",
687        providers=[
688            KilnModelProvider(
689                name=ModelProviderName.ollama,
690                supports_data_gen=False,
691                provider_options={
692                    "model": "gemma2:27b",
693                },
694            ),
695            KilnModelProvider(
696                name=ModelProviderName.openrouter,
697                structured_output_mode=StructuredOutputMode.json_instruction_and_object,
698                supports_data_gen=False,
699                provider_options={"model": "google/gemma-2-27b-it"},
700            ),
701        ],
702    ),
703    # Mixtral 8x7B
704    KilnModel(
705        family=ModelFamily.mixtral,
706        name=ModelName.mixtral_8x7b,
707        friendly_name="Mixtral 8x7B",
708        providers=[
709            KilnModelProvider(
710                name=ModelProviderName.openrouter,
711                provider_options={"model": "mistralai/mixtral-8x7b-instruct"},
712                structured_output_mode=StructuredOutputMode.json_instruction_and_object,
713            ),
714            KilnModelProvider(
715                name=ModelProviderName.ollama,
716                structured_output_mode=StructuredOutputMode.json_schema,
717                provider_options={"model": "mixtral"},
718            ),
719        ],
720    ),
721    # Qwen 2.5 7B
722    KilnModel(
723        family=ModelFamily.qwen,
724        name=ModelName.qwen_2p5_7b,
725        friendly_name="Qwen 2.5 7B",
726        providers=[
727            KilnModelProvider(
728                name=ModelProviderName.openrouter,
729                provider_options={"model": "qwen/qwen-2.5-7b-instruct"},
730                structured_output_mode=StructuredOutputMode.json_instruction_and_object,
731            ),
732            KilnModelProvider(
733                name=ModelProviderName.ollama,
734                provider_options={"model": "qwen2.5"},
735                structured_output_mode=StructuredOutputMode.json_schema,
736            ),
737        ],
738    ),
739    # Qwen 2.5 72B
740    KilnModel(
741        family=ModelFamily.qwen,
742        name=ModelName.qwen_2p5_72b,
743        friendly_name="Qwen 2.5 72B",
744        providers=[
745            KilnModelProvider(
746                name=ModelProviderName.openrouter,
747                provider_options={"model": "qwen/qwen-2.5-72b-instruct"},
748                # Not consistent with structure data. Works sometimes but not often
749                supports_structured_output=False,
750                supports_data_gen=False,
751                structured_output_mode=StructuredOutputMode.json_instruction_and_object,
752            ),
753            KilnModelProvider(
754                name=ModelProviderName.ollama,
755                provider_options={"model": "qwen2.5:72b"},
756                structured_output_mode=StructuredOutputMode.json_schema,
757            ),
758            KilnModelProvider(
759                name=ModelProviderName.fireworks_ai,
760                provider_options={
761                    "model": "accounts/fireworks/models/qwen2p5-72b-instruct"
762                },
763                # Fireworks will start tuning, but it never finishes.
764                # provider_finetune_id="accounts/fireworks/models/qwen2p5-72b-instruct",
765                # Tool calling forces schema -- fireworks doesn't support json_schema, just json_mode
766                structured_output_mode=StructuredOutputMode.function_calling,
767            ),
768        ],
769    ),
770]
class ModelProviderName(builtins.str, enum.Enum):
16class ModelProviderName(str, Enum):
17    """
18    Enumeration of supported AI model providers.
19    """
20
21    openai = "openai"
22    groq = "groq"
23    amazon_bedrock = "amazon_bedrock"
24    ollama = "ollama"
25    openrouter = "openrouter"
26    fireworks_ai = "fireworks_ai"
27    kiln_fine_tune = "kiln_fine_tune"
28    kiln_custom_registry = "kiln_custom_registry"
29    openai_compatible = "openai_compatible"

Enumeration of supported AI model providers.

openai = <ModelProviderName.openai: 'openai'>
groq = <ModelProviderName.groq: 'groq'>
amazon_bedrock = <ModelProviderName.amazon_bedrock: 'amazon_bedrock'>
ollama = <ModelProviderName.ollama: 'ollama'>
openrouter = <ModelProviderName.openrouter: 'openrouter'>
fireworks_ai = <ModelProviderName.fireworks_ai: 'fireworks_ai'>
kiln_fine_tune = <ModelProviderName.kiln_fine_tune: 'kiln_fine_tune'>
kiln_custom_registry = <ModelProviderName.kiln_custom_registry: 'kiln_custom_registry'>
openai_compatible = <ModelProviderName.openai_compatible: 'openai_compatible'>
class ModelFamily(builtins.str, enum.Enum):
32class ModelFamily(str, Enum):
33    """
34    Enumeration of supported model families/architectures.
35    """
36
37    gpt = "gpt"
38    llama = "llama"
39    phi = "phi"
40    mistral = "mistral"
41    gemma = "gemma"
42    gemini = "gemini"
43    claude = "claude"
44    mixtral = "mixtral"
45    qwen = "qwen"
46    deepseek = "deepseek"

Enumeration of supported model families/architectures.

gpt = <ModelFamily.gpt: 'gpt'>
llama = <ModelFamily.llama: 'llama'>
phi = <ModelFamily.phi: 'phi'>
mistral = <ModelFamily.mistral: 'mistral'>
gemma = <ModelFamily.gemma: 'gemma'>
gemini = <ModelFamily.gemini: 'gemini'>
claude = <ModelFamily.claude: 'claude'>
mixtral = <ModelFamily.mixtral: 'mixtral'>
qwen = <ModelFamily.qwen: 'qwen'>
deepseek = <ModelFamily.deepseek: 'deepseek'>
class ModelName(builtins.str, enum.Enum):
50class ModelName(str, Enum):
51    """
52    Enumeration of specific model versions supported by the system.
53    Where models have instruct and raw versions, instruct is default and raw is specified.
54    """
55
56    llama_3_1_8b = "llama_3_1_8b"
57    llama_3_1_70b = "llama_3_1_70b"
58    llama_3_1_405b = "llama_3_1_405b"
59    llama_3_2_1b = "llama_3_2_1b"
60    llama_3_2_3b = "llama_3_2_3b"
61    llama_3_2_11b = "llama_3_2_11b"
62    llama_3_2_90b = "llama_3_2_90b"
63    llama_3_3_70b = "llama_3_3_70b"
64    gpt_4o_mini = "gpt_4o_mini"
65    gpt_4o = "gpt_4o"
66    phi_3_5 = "phi_3_5"
67    phi_4 = "phi_4"
68    mistral_large = "mistral_large"
69    mistral_nemo = "mistral_nemo"
70    gemma_2_2b = "gemma_2_2b"
71    gemma_2_9b = "gemma_2_9b"
72    gemma_2_27b = "gemma_2_27b"
73    claude_3_5_haiku = "claude_3_5_haiku"
74    claude_3_5_sonnet = "claude_3_5_sonnet"
75    gemini_1_5_flash = "gemini_1_5_flash"
76    gemini_1_5_flash_8b = "gemini_1_5_flash_8b"
77    gemini_1_5_pro = "gemini_1_5_pro"
78    nemotron_70b = "nemotron_70b"
79    mixtral_8x7b = "mixtral_8x7b"
80    qwen_2p5_7b = "qwen_2p5_7b"
81    qwen_2p5_72b = "qwen_2p5_72b"
82    deepseek_3 = "deepseek_3"
83    deepseek_r1 = "deepseek_r1"

Enumeration of specific model versions supported by the system. Where models have instruct and raw versions, instruct is default and raw is specified.

llama_3_1_8b = <ModelName.llama_3_1_8b: 'llama_3_1_8b'>
llama_3_1_70b = <ModelName.llama_3_1_70b: 'llama_3_1_70b'>
llama_3_1_405b = <ModelName.llama_3_1_405b: 'llama_3_1_405b'>
llama_3_2_1b = <ModelName.llama_3_2_1b: 'llama_3_2_1b'>
llama_3_2_3b = <ModelName.llama_3_2_3b: 'llama_3_2_3b'>
llama_3_2_11b = <ModelName.llama_3_2_11b: 'llama_3_2_11b'>
llama_3_2_90b = <ModelName.llama_3_2_90b: 'llama_3_2_90b'>
llama_3_3_70b = <ModelName.llama_3_3_70b: 'llama_3_3_70b'>
gpt_4o_mini = <ModelName.gpt_4o_mini: 'gpt_4o_mini'>
gpt_4o = <ModelName.gpt_4o: 'gpt_4o'>
phi_3_5 = <ModelName.phi_3_5: 'phi_3_5'>
phi_4 = <ModelName.phi_4: 'phi_4'>
mistral_large = <ModelName.mistral_large: 'mistral_large'>
mistral_nemo = <ModelName.mistral_nemo: 'mistral_nemo'>
gemma_2_2b = <ModelName.gemma_2_2b: 'gemma_2_2b'>
gemma_2_9b = <ModelName.gemma_2_9b: 'gemma_2_9b'>
gemma_2_27b = <ModelName.gemma_2_27b: 'gemma_2_27b'>
claude_3_5_haiku = <ModelName.claude_3_5_haiku: 'claude_3_5_haiku'>
claude_3_5_sonnet = <ModelName.claude_3_5_sonnet: 'claude_3_5_sonnet'>
gemini_1_5_flash = <ModelName.gemini_1_5_flash: 'gemini_1_5_flash'>
gemini_1_5_flash_8b = <ModelName.gemini_1_5_flash_8b: 'gemini_1_5_flash_8b'>
gemini_1_5_pro = <ModelName.gemini_1_5_pro: 'gemini_1_5_pro'>
nemotron_70b = <ModelName.nemotron_70b: 'nemotron_70b'>
mixtral_8x7b = <ModelName.mixtral_8x7b: 'mixtral_8x7b'>
qwen_2p5_7b = <ModelName.qwen_2p5_7b: 'qwen_2p5_7b'>
qwen_2p5_72b = <ModelName.qwen_2p5_72b: 'qwen_2p5_72b'>
deepseek_3 = <ModelName.deepseek_3: 'deepseek_3'>
deepseek_r1 = <ModelName.deepseek_r1: 'deepseek_r1'>
class ModelParserID(builtins.str, enum.Enum):
86class ModelParserID(str, Enum):
87    """
88    Enumeration of supported model parsers.
89    """
90
91    r1_thinking = "r1_thinking"

Enumeration of supported model parsers.

r1_thinking = <ModelParserID.r1_thinking: 'r1_thinking'>
class KilnModelProvider(pydantic.main.BaseModel):
 94class KilnModelProvider(BaseModel):
 95    """
 96    Configuration for a specific model provider.
 97
 98    Attributes:
 99        name: The provider's identifier
100        supports_structured_output: Whether the provider supports structured output formats
101        supports_data_gen: Whether the provider supports data generation
102        untested_model: Whether the model is untested (typically user added). The supports_ fields are not applicable.
103        provider_finetune_id: The finetune ID for the provider, if applicable
104        provider_options: Additional provider-specific configuration options
105        structured_output_mode: The mode we should use to call the model for structured output, if it was trained with structured output.
106        parser: A parser to use for the model, if applicable
107        reasoning_capable: Whether the model is designed to output thinking in a structured format (eg <think></think>). If so we don't use COT across 2 calls, and ask for thinking and final response in the same call.
108    """
109
110    name: ModelProviderName
111    supports_structured_output: bool = True
112    supports_data_gen: bool = True
113    untested_model: bool = False
114    provider_finetune_id: str | None = None
115    provider_options: Dict = {}
116    structured_output_mode: StructuredOutputMode = StructuredOutputMode.default
117    parser: ModelParserID | None = None
118    reasoning_capable: bool = False

Configuration for a specific model provider.

Attributes: name: The provider's identifier supports_structured_output: Whether the provider supports structured output formats supports_data_gen: Whether the provider supports data generation untested_model: Whether the model is untested (typically user added). The supports_ fields are not applicable. provider_finetune_id: The finetune ID for the provider, if applicable provider_options: Additional provider-specific configuration options structured_output_mode: The mode we should use to call the model for structured output, if it was trained with structured output. parser: A parser to use for the model, if applicable reasoning_capable: Whether the model is designed to output thinking in a structured format (eg ). If so we don't use COT across 2 calls, and ask for thinking and final response in the same call.

supports_structured_output: bool
supports_data_gen: bool
untested_model: bool
provider_finetune_id: str | None
provider_options: Dict
structured_output_mode: kiln_ai.datamodel.StructuredOutputMode
parser: ModelParserID | None
reasoning_capable: bool
model_config: ClassVar[pydantic.config.ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class KilnModel(pydantic.main.BaseModel):
121class KilnModel(BaseModel):
122    """
123    Configuration for a specific AI model.
124
125    Attributes:
126        family: The model's architecture family
127        name: The model's identifier
128        friendly_name: Human-readable name for the model
129        providers: List of providers that offer this model
130        supports_structured_output: Whether the model supports structured output formats
131    """
132
133    family: str
134    name: str
135    friendly_name: str
136    providers: List[KilnModelProvider]

Configuration for a specific AI model.

Attributes: family: The model's architecture family name: The model's identifier friendly_name: Human-readable name for the model providers: List of providers that offer this model supports_structured_output: Whether the model supports structured output formats

family: str
name: str
friendly_name: str
providers: List[KilnModelProvider]
model_config: ClassVar[pydantic.config.ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

built_in_models: List[KilnModel] = [KilnModel(family='gpt', name='gpt_4o_mini', friendly_name='GPT 4o Mini', providers=[KilnModelProvider(name=<ModelProviderName.openai: 'openai'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id='gpt-4o-mini-2024-07-18', provider_options={'model': 'gpt-4o-mini'}, structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False), KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'openai/gpt-4o-mini'}, structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False)]), KilnModel(family='gpt', name='gpt_4o', friendly_name='GPT 4o', providers=[KilnModelProvider(name=<ModelProviderName.openai: 'openai'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id='gpt-4o-2024-08-06', provider_options={'model': 'gpt-4o'}, structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False), KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'openai/gpt-4o'}, structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False)]), KilnModel(family='claude', name='claude_3_5_haiku', friendly_name='Claude 3.5 Haiku', providers=[KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'anthropic/claude-3-5-haiku'}, structured_output_mode=<StructuredOutputMode.function_calling: 'function_calling'>, parser=None, reasoning_capable=False)]), KilnModel(family='claude', name='claude_3_5_sonnet', friendly_name='Claude 3.5 Sonnet', providers=[KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'anthropic/claude-3.5-sonnet'}, structured_output_mode=<StructuredOutputMode.function_calling: 'function_calling'>, parser=None, reasoning_capable=False)]), KilnModel(family='deepseek', name='deepseek_3', friendly_name='DeepSeek v3', providers=[KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'deepseek/deepseek-chat'}, structured_output_mode=<StructuredOutputMode.function_calling: 'function_calling'>, parser=None, reasoning_capable=False)]), KilnModel(family='deepseek', name='deepseek_r1', friendly_name='DeepSeek R1', providers=[KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'deepseek/deepseek-r1'}, structured_output_mode=<StructuredOutputMode.json_instructions: 'json_instructions'>, parser=None, reasoning_capable=True), KilnModelProvider(name=<ModelProviderName.fireworks_ai: 'fireworks_ai'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'accounts/fireworks/models/deepseek-r1'}, structured_output_mode=<StructuredOutputMode.json_instructions: 'json_instructions'>, parser=<ModelParserID.r1_thinking: 'r1_thinking'>, reasoning_capable=True), KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'deepseek-r1:671b'}, structured_output_mode=<StructuredOutputMode.json_instructions: 'json_instructions'>, parser=<ModelParserID.r1_thinking: 'r1_thinking'>, reasoning_capable=True)]), KilnModel(family='gemini', name='gemini_1_5_pro', friendly_name='Gemini 1.5 Pro', providers=[KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'google/gemini-pro-1.5'}, structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False)]), KilnModel(family='gemini', name='gemini_1_5_flash', friendly_name='Gemini 1.5 Flash', providers=[KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'google/gemini-flash-1.5'}, structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False)]), KilnModel(family='gemini', name='gemini_1_5_flash_8b', friendly_name='Gemini 1.5 Flash 8B', providers=[KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'google/gemini-flash-1.5-8b'}, structured_output_mode=<StructuredOutputMode.json_mode: 'json_mode'>, parser=None, reasoning_capable=False)]), KilnModel(family='llama', name='nemotron_70b', friendly_name='Nemotron 70B', providers=[KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, supports_structured_output=False, supports_data_gen=False, untested_model=False, provider_finetune_id=None, provider_options={'model': 'nvidia/llama-3.1-nemotron-70b-instruct'}, structured_output_mode=<StructuredOutputMode.default: 'default'>, parser=None, reasoning_capable=False)]), KilnModel(family='llama', name='llama_3_1_8b', friendly_name='Llama 3.1 8B', providers=[KilnModelProvider(name=<ModelProviderName.groq: 'groq'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'llama-3.1-8b-instant'}, structured_output_mode=<StructuredOutputMode.default: 'default'>, parser=None, reasoning_capable=False), KilnModelProvider(name=<ModelProviderName.amazon_bedrock: 'amazon_bedrock'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'meta.llama3-1-8b-instruct-v1:0', 'region_name': 'us-west-2'}, structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False), KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'llama3.1:8b', 'model_aliases': ['llama3.1']}, structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False), KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'meta-llama/llama-3.1-8b-instruct'}, structured_output_mode=<StructuredOutputMode.function_calling: 'function_calling'>, parser=None, reasoning_capable=False), KilnModelProvider(name=<ModelProviderName.fireworks_ai: 'fireworks_ai'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id='accounts/fireworks/models/llama-v3p1-8b-instruct', provider_options={'model': 'accounts/fireworks/models/llama-v3p1-8b-instruct'}, structured_output_mode=<StructuredOutputMode.json_mode: 'json_mode'>, parser=None, reasoning_capable=False)]), KilnModel(family='llama', name='llama_3_1_70b', friendly_name='Llama 3.1 70B', providers=[KilnModelProvider(name=<ModelProviderName.groq: 'groq'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'llama-3.1-70b-versatile'}, structured_output_mode=<StructuredOutputMode.default: 'default'>, parser=None, reasoning_capable=False), KilnModelProvider(name=<ModelProviderName.amazon_bedrock: 'amazon_bedrock'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'meta.llama3-1-70b-instruct-v1:0', 'region_name': 'us-west-2'}, structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False), KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'meta-llama/llama-3.1-70b-instruct'}, structured_output_mode=<StructuredOutputMode.function_calling: 'function_calling'>, parser=None, reasoning_capable=False), KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'llama3.1:70b'}, structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False), KilnModelProvider(name=<ModelProviderName.fireworks_ai: 'fireworks_ai'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id='accounts/fireworks/models/llama-v3p1-70b-instruct', provider_options={'model': 'accounts/fireworks/models/llama-v3p1-70b-instruct'}, structured_output_mode=<StructuredOutputMode.function_calling: 'function_calling'>, parser=None, reasoning_capable=False)]), KilnModel(family='llama', name='llama_3_1_405b', friendly_name='Llama 3.1 405B', providers=[KilnModelProvider(name=<ModelProviderName.amazon_bedrock: 'amazon_bedrock'>, supports_structured_output=True, supports_data_gen=False, untested_model=False, provider_finetune_id=None, provider_options={'model': 'meta.llama3-1-405b-instruct-v1:0', 'region_name': 'us-west-2'}, structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False), KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'llama3.1:405b'}, structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False), KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'meta-llama/llama-3.1-405b-instruct'}, structured_output_mode=<StructuredOutputMode.function_calling: 'function_calling'>, parser=None, reasoning_capable=False), KilnModelProvider(name=<ModelProviderName.fireworks_ai: 'fireworks_ai'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'accounts/fireworks/models/llama-v3p1-405b-instruct'}, structured_output_mode=<StructuredOutputMode.function_calling: 'function_calling'>, parser=None, reasoning_capable=False)]), KilnModel(family='mistral', name='mistral_nemo', friendly_name='Mistral Nemo', providers=[KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'mistralai/mistral-nemo'}, structured_output_mode=<StructuredOutputMode.json_instruction_and_object: 'json_instruction_and_object'>, parser=None, reasoning_capable=False)]), KilnModel(family='mistral', name='mistral_large', friendly_name='Mistral Large', providers=[KilnModelProvider(name=<ModelProviderName.amazon_bedrock: 'amazon_bedrock'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'mistral.mistral-large-2407-v1:0', 'region_name': 'us-west-2'}, structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False), KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'mistralai/mistral-large'}, structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False), KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'mistral-large'}, structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False)]), KilnModel(family='llama', name='llama_3_2_1b', friendly_name='Llama 3.2 1B', providers=[KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, supports_structured_output=False, supports_data_gen=False, untested_model=False, provider_finetune_id=None, provider_options={'model': 'meta-llama/llama-3.2-1b-instruct'}, structured_output_mode=<StructuredOutputMode.json_instruction_and_object: 'json_instruction_and_object'>, parser=None, reasoning_capable=False), KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, supports_structured_output=False, supports_data_gen=False, untested_model=False, provider_finetune_id=None, provider_options={'model': 'llama3.2:1b'}, structured_output_mode=<StructuredOutputMode.default: 'default'>, parser=None, reasoning_capable=False)]), KilnModel(family='llama', name='llama_3_2_3b', friendly_name='Llama 3.2 3B', providers=[KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, supports_structured_output=False, supports_data_gen=False, untested_model=False, provider_finetune_id=None, provider_options={'model': 'meta-llama/llama-3.2-3b-instruct'}, structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False), KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, supports_structured_output=False, supports_data_gen=False, untested_model=False, provider_finetune_id=None, provider_options={'model': 'llama3.2'}, structured_output_mode=<StructuredOutputMode.default: 'default'>, parser=None, reasoning_capable=False), KilnModelProvider(name=<ModelProviderName.fireworks_ai: 'fireworks_ai'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id='accounts/fireworks/models/llama-v3p2-3b-instruct', provider_options={'model': 'accounts/fireworks/models/llama-v3p2-3b-instruct'}, structured_output_mode=<StructuredOutputMode.json_mode: 'json_mode'>, parser=None, reasoning_capable=False)]), KilnModel(family='llama', name='llama_3_2_11b', friendly_name='Llama 3.2 11B', providers=[KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'meta-llama/llama-3.2-11b-vision-instruct'}, structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False), KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'llama3.2-vision'}, structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False), KilnModelProvider(name=<ModelProviderName.fireworks_ai: 'fireworks_ai'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'accounts/fireworks/models/llama-v3p2-11b-vision-instruct'}, structured_output_mode=<StructuredOutputMode.json_mode: 'json_mode'>, parser=None, reasoning_capable=False)]), KilnModel(family='llama', name='llama_3_2_90b', friendly_name='Llama 3.2 90B', providers=[KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'meta-llama/llama-3.2-90b-vision-instruct'}, structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False), KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'llama3.2-vision:90b'}, structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False), KilnModelProvider(name=<ModelProviderName.fireworks_ai: 'fireworks_ai'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'accounts/fireworks/models/llama-v3p2-90b-vision-instruct'}, structured_output_mode=<StructuredOutputMode.json_mode: 'json_mode'>, parser=None, reasoning_capable=False)]), KilnModel(family='llama', name='llama_3_3_70b', friendly_name='Llama 3.3 70B', providers=[KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, supports_structured_output=False, supports_data_gen=False, untested_model=False, provider_finetune_id=None, provider_options={'model': 'meta-llama/llama-3.3-70b-instruct'}, structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False), KilnModelProvider(name=<ModelProviderName.groq: 'groq'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'llama-3.3-70b-versatile'}, structured_output_mode=<StructuredOutputMode.default: 'default'>, parser=None, reasoning_capable=False), KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'llama3.3'}, structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False), KilnModelProvider(name=<ModelProviderName.fireworks_ai: 'fireworks_ai'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'accounts/fireworks/models/llama-v3p3-70b-instruct'}, structured_output_mode=<StructuredOutputMode.function_calling: 'function_calling'>, parser=None, reasoning_capable=False)]), KilnModel(family='phi', name='phi_3_5', friendly_name='Phi 3.5', providers=[KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, supports_structured_output=False, supports_data_gen=False, untested_model=False, provider_finetune_id=None, provider_options={'model': 'phi3.5'}, structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False), KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, supports_structured_output=False, supports_data_gen=False, untested_model=False, provider_finetune_id=None, provider_options={'model': 'microsoft/phi-3.5-mini-128k-instruct'}, structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False), KilnModelProvider(name=<ModelProviderName.fireworks_ai: 'fireworks_ai'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'accounts/fireworks/models/phi-3-vision-128k-instruct'}, structured_output_mode=<StructuredOutputMode.json_mode: 'json_mode'>, parser=None, reasoning_capable=False)]), KilnModel(family='phi', name='phi_4', friendly_name='Phi 4', providers=[KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'phi4'}, structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False), KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, supports_structured_output=True, supports_data_gen=False, untested_model=False, provider_finetune_id=None, provider_options={'model': 'microsoft/phi-4'}, structured_output_mode=<StructuredOutputMode.json_instruction_and_object: 'json_instruction_and_object'>, parser=None, reasoning_capable=False)]), KilnModel(family='gemma', name='gemma_2_2b', friendly_name='Gemma 2 2B', providers=[KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, supports_structured_output=False, supports_data_gen=False, untested_model=False, provider_finetune_id=None, provider_options={'model': 'gemma2:2b'}, structured_output_mode=<StructuredOutputMode.default: 'default'>, parser=None, reasoning_capable=False)]), KilnModel(family='gemma', name='gemma_2_9b', friendly_name='Gemma 2 9B', providers=[KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, supports_structured_output=True, supports_data_gen=False, untested_model=False, provider_finetune_id=None, provider_options={'model': 'gemma2:9b'}, structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False), KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, supports_structured_output=True, supports_data_gen=False, untested_model=False, provider_finetune_id=None, provider_options={'model': 'google/gemma-2-9b-it'}, structured_output_mode=<StructuredOutputMode.json_instruction_and_object: 'json_instruction_and_object'>, parser=None, reasoning_capable=False)]), KilnModel(family='gemma', name='gemma_2_27b', friendly_name='Gemma 2 27B', providers=[KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, supports_structured_output=True, supports_data_gen=False, untested_model=False, provider_finetune_id=None, provider_options={'model': 'gemma2:27b'}, structured_output_mode=<StructuredOutputMode.default: 'default'>, parser=None, reasoning_capable=False), KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, supports_structured_output=True, supports_data_gen=False, untested_model=False, provider_finetune_id=None, provider_options={'model': 'google/gemma-2-27b-it'}, structured_output_mode=<StructuredOutputMode.json_instruction_and_object: 'json_instruction_and_object'>, parser=None, reasoning_capable=False)]), KilnModel(family='mixtral', name='mixtral_8x7b', friendly_name='Mixtral 8x7B', providers=[KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'mistralai/mixtral-8x7b-instruct'}, structured_output_mode=<StructuredOutputMode.json_instruction_and_object: 'json_instruction_and_object'>, parser=None, reasoning_capable=False), KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'mixtral'}, structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False)]), KilnModel(family='qwen', name='qwen_2p5_7b', friendly_name='Qwen 2.5 7B', providers=[KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'qwen/qwen-2.5-7b-instruct'}, structured_output_mode=<StructuredOutputMode.json_instruction_and_object: 'json_instruction_and_object'>, parser=None, reasoning_capable=False), KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'qwen2.5'}, structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False)]), KilnModel(family='qwen', name='qwen_2p5_72b', friendly_name='Qwen 2.5 72B', providers=[KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, supports_structured_output=False, supports_data_gen=False, untested_model=False, provider_finetune_id=None, provider_options={'model': 'qwen/qwen-2.5-72b-instruct'}, structured_output_mode=<StructuredOutputMode.json_instruction_and_object: 'json_instruction_and_object'>, parser=None, reasoning_capable=False), KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'qwen2.5:72b'}, structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False), KilnModelProvider(name=<ModelProviderName.fireworks_ai: 'fireworks_ai'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'accounts/fireworks/models/qwen2p5-72b-instruct'}, structured_output_mode=<StructuredOutputMode.function_calling: 'function_calling'>, parser=None, reasoning_capable=False)])]