kiln_ai.adapters.ml_model_list

   1from enum import Enum
   2from typing import Dict, List, Literal
   3
   4from pydantic import BaseModel
   5
   6from kiln_ai.datamodel import StructuredOutputMode
   7
   8"""
   9Provides model configuration and management for various LLM providers and models.
  10This module handles the integration with different AI model providers and their respective models,
  11including configuration, validation, and instantiation of language models.
  12"""
  13
  14
  15class ModelProviderName(str, Enum):
  16    """
  17    Enumeration of supported AI model providers.
  18    """
  19
  20    openai = "openai"
  21    groq = "groq"
  22    amazon_bedrock = "amazon_bedrock"
  23    ollama = "ollama"
  24    openrouter = "openrouter"
  25    fireworks_ai = "fireworks_ai"
  26    kiln_fine_tune = "kiln_fine_tune"
  27    kiln_custom_registry = "kiln_custom_registry"
  28    openai_compatible = "openai_compatible"
  29    anthropic = "anthropic"
  30    gemini_api = "gemini_api"
  31    azure_openai = "azure_openai"
  32    huggingface = "huggingface"
  33    vertex = "vertex"
  34    together_ai = "together_ai"
  35
  36
  37class ModelFamily(str, Enum):
  38    """
  39    Enumeration of supported model families/architectures.
  40    """
  41
  42    gpt = "gpt"
  43    llama = "llama"
  44    phi = "phi"
  45    mistral = "mistral"
  46    gemma = "gemma"
  47    gemini = "gemini"
  48    claude = "claude"
  49    mixtral = "mixtral"
  50    qwen = "qwen"
  51    deepseek = "deepseek"
  52    dolphin = "dolphin"
  53    grok = "grok"
  54
  55
  56# Where models have instruct and raw versions, instruct is default and raw is specified
  57class ModelName(str, Enum):
  58    """
  59    Enumeration of specific model versions supported by the system.
  60    Where models have instruct and raw versions, instruct is default and raw is specified.
  61    """
  62
  63    llama_3_1_8b = "llama_3_1_8b"
  64    llama_3_1_70b = "llama_3_1_70b"
  65    llama_3_1_405b = "llama_3_1_405b"
  66    llama_3_2_1b = "llama_3_2_1b"
  67    llama_3_2_3b = "llama_3_2_3b"
  68    llama_3_2_11b = "llama_3_2_11b"
  69    llama_3_2_90b = "llama_3_2_90b"
  70    llama_3_3_70b = "llama_3_3_70b"
  71    gpt_4o_mini = "gpt_4o_mini"
  72    gpt_4o = "gpt_4o"
  73    gpt_o1_low = "gpt_o1_low"
  74    gpt_o1_medium = "gpt_o1_medium"
  75    gpt_o1_high = "gpt_o1_high"
  76    gpt_o3_mini_low = "gpt_o3_mini_low"
  77    gpt_o3_mini_medium = "gpt_o3_mini_medium"
  78    gpt_o3_mini_high = "gpt_o3_mini_high"
  79    phi_3_5 = "phi_3_5"
  80    phi_4 = "phi_4"
  81    phi_4_5p6b = "phi_4_5p6b"
  82    phi_4_mini = "phi_4_mini"
  83    mistral_large = "mistral_large"
  84    mistral_nemo = "mistral_nemo"
  85    gemma_2_2b = "gemma_2_2b"
  86    gemma_2_9b = "gemma_2_9b"
  87    gemma_2_27b = "gemma_2_27b"
  88    gemma_3_1b = "gemma_3_1b"
  89    gemma_3_4b = "gemma_3_4b"
  90    gemma_3_12b = "gemma_3_12b"
  91    gemma_3_27b = "gemma_3_27b"
  92    claude_3_5_haiku = "claude_3_5_haiku"
  93    claude_3_5_sonnet = "claude_3_5_sonnet"
  94    claude_3_7_sonnet = "claude_3_7_sonnet"
  95    claude_3_7_sonnet_thinking = "claude_3_7_sonnet_thinking"
  96    gemini_1_5_flash = "gemini_1_5_flash"
  97    gemini_1_5_flash_8b = "gemini_1_5_flash_8b"
  98    gemini_1_5_pro = "gemini_1_5_pro"
  99    gemini_2_0_flash = "gemini_2_0_flash"
 100    nemotron_70b = "nemotron_70b"
 101    mixtral_8x7b = "mixtral_8x7b"
 102    qwen_2p5_7b = "qwen_2p5_7b"
 103    qwen_2p5_14b = "qwen_2p5_14b"
 104    qwen_2p5_72b = "qwen_2p5_72b"
 105    qwq_32b = "qwq_32b"
 106    deepseek_3 = "deepseek_3"
 107    deepseek_r1 = "deepseek_r1"
 108    mistral_small_3 = "mistral_small_3"
 109    deepseek_r1_distill_qwen_32b = "deepseek_r1_distill_qwen_32b"
 110    deepseek_r1_distill_llama_70b = "deepseek_r1_distill_llama_70b"
 111    deepseek_r1_distill_qwen_14b = "deepseek_r1_distill_qwen_14b"
 112    deepseek_r1_distill_qwen_1p5b = "deepseek_r1_distill_qwen_1p5b"
 113    deepseek_r1_distill_qwen_7b = "deepseek_r1_distill_qwen_7b"
 114    deepseek_r1_distill_llama_8b = "deepseek_r1_distill_llama_8b"
 115    dolphin_2_9_8x22b = "dolphin_2_9_8x22b"
 116    grok_2 = "grok_2"
 117
 118
 119class ModelParserID(str, Enum):
 120    """
 121    Enumeration of supported model parsers.
 122    """
 123
 124    r1_thinking = "r1_thinking"
 125
 126
 127class KilnModelProvider(BaseModel):
 128    """
 129    Configuration for a specific model provider.
 130
 131    Attributes:
 132        name: The provider's identifier
 133        supports_structured_output: Whether the provider supports structured output formats
 134        supports_data_gen: Whether the provider supports data generation
 135        untested_model: Whether the model is untested (typically user added). The supports_ fields are not applicable.
 136        provider_finetune_id: The finetune ID for the provider, if applicable
 137        structured_output_mode: The mode we should use to call the model for structured output, if it was trained with structured output.
 138        parser: A parser to use for the model, if applicable
 139        reasoning_capable: Whether the model is designed to output thinking in a structured format (eg <think></think>). If so we don't use COT across 2 calls, and ask for thinking and final response in the same call.
 140    """
 141
 142    name: ModelProviderName
 143    model_id: str | None = None
 144    supports_structured_output: bool = True
 145    supports_data_gen: bool = True
 146    untested_model: bool = False
 147    provider_finetune_id: str | None = None
 148    structured_output_mode: StructuredOutputMode = StructuredOutputMode.default
 149    parser: ModelParserID | None = None
 150    reasoning_capable: bool = False
 151    supports_logprobs: bool = False
 152
 153    # TODO P1: Need a more generalized way to handle custom provider parameters.
 154    # Making them quite declarative here for now, isolating provider specific logic
 155    # to this file. Later I should be able to override anything in this file via config.
 156    r1_openrouter_options: bool = False
 157    require_openrouter_reasoning: bool = False
 158    logprobs_openrouter_options: bool = False
 159    openrouter_skip_required_parameters: bool = False
 160    thinking_level: Literal["low", "medium", "high"] | None = None
 161    ollama_model_aliases: List[str] | None = None
 162    anthropic_extended_thinking: bool = False
 163
 164
 165class KilnModel(BaseModel):
 166    """
 167    Configuration for a specific AI model.
 168
 169    Attributes:
 170        family: The model's architecture family
 171        name: The model's identifier
 172        friendly_name: Human-readable name for the model
 173        providers: List of providers that offer this model
 174        supports_structured_output: Whether the model supports structured output formats
 175    """
 176
 177    family: str
 178    name: str
 179    friendly_name: str
 180    providers: List[KilnModelProvider]
 181
 182
 183built_in_models: List[KilnModel] = [
 184    # GPT 4o Mini
 185    KilnModel(
 186        family=ModelFamily.gpt,
 187        name=ModelName.gpt_4o_mini,
 188        friendly_name="GPT 4o Mini",
 189        providers=[
 190            KilnModelProvider(
 191                name=ModelProviderName.openai,
 192                model_id="gpt-4o-mini",
 193                provider_finetune_id="gpt-4o-mini-2024-07-18",
 194                structured_output_mode=StructuredOutputMode.json_schema,
 195                supports_logprobs=True,
 196            ),
 197            KilnModelProvider(
 198                name=ModelProviderName.openrouter,
 199                model_id="openai/gpt-4o-mini",
 200                structured_output_mode=StructuredOutputMode.json_schema,
 201                supports_logprobs=True,
 202                logprobs_openrouter_options=True,
 203            ),
 204            KilnModelProvider(
 205                name=ModelProviderName.azure_openai,
 206                model_id="gpt-4o-mini",
 207            ),
 208        ],
 209    ),
 210    # GPT 4o
 211    KilnModel(
 212        family=ModelFamily.gpt,
 213        name=ModelName.gpt_4o,
 214        friendly_name="GPT 4o",
 215        providers=[
 216            KilnModelProvider(
 217                name=ModelProviderName.openai,
 218                model_id="gpt-4o",
 219                provider_finetune_id="gpt-4o-2024-08-06",
 220                structured_output_mode=StructuredOutputMode.json_schema,
 221                supports_logprobs=True,
 222            ),
 223            KilnModelProvider(
 224                name=ModelProviderName.openrouter,
 225                model_id="openai/gpt-4o",
 226                structured_output_mode=StructuredOutputMode.json_schema,
 227                supports_logprobs=True,
 228                logprobs_openrouter_options=True,
 229            ),
 230            KilnModelProvider(
 231                name=ModelProviderName.azure_openai,
 232                model_id="gpt-4o",
 233            ),
 234        ],
 235    ),
 236    # GPT o3 Mini Low
 237    KilnModel(
 238        family=ModelFamily.gpt,
 239        name=ModelName.gpt_o3_mini_low,
 240        friendly_name="GPT o3 Mini - Low",
 241        providers=[
 242            KilnModelProvider(
 243                name=ModelProviderName.openai,
 244                model_id="o3-mini",
 245                thinking_level="low",
 246                structured_output_mode=StructuredOutputMode.json_schema,
 247            ),
 248            KilnModelProvider(
 249                name=ModelProviderName.azure_openai,
 250                model_id="o3-mini",
 251                structured_output_mode=StructuredOutputMode.json_schema,
 252                thinking_level="low",
 253            ),
 254        ],
 255    ),
 256    # GPT o3 Mini Medium
 257    KilnModel(
 258        family=ModelFamily.gpt,
 259        name=ModelName.gpt_o3_mini_medium,
 260        friendly_name="GPT o3 Mini - Medium",
 261        providers=[
 262            KilnModelProvider(
 263                name=ModelProviderName.openai,
 264                model_id="o3-mini",
 265                thinking_level="medium",
 266                structured_output_mode=StructuredOutputMode.json_schema,
 267            ),
 268            KilnModelProvider(
 269                name=ModelProviderName.azure_openai,
 270                model_id="o3-mini",
 271                structured_output_mode=StructuredOutputMode.json_schema,
 272                thinking_level="medium",
 273            ),
 274        ],
 275    ),
 276    # GPT o3 Mini High
 277    KilnModel(
 278        family=ModelFamily.gpt,
 279        name=ModelName.gpt_o3_mini_high,
 280        friendly_name="GPT o3 Mini - High",
 281        providers=[
 282            KilnModelProvider(
 283                name=ModelProviderName.openai,
 284                model_id="o3-mini",
 285                thinking_level="high",
 286                structured_output_mode=StructuredOutputMode.json_schema,
 287            ),
 288            KilnModelProvider(
 289                name=ModelProviderName.azure_openai,
 290                model_id="o3-mini",
 291                structured_output_mode=StructuredOutputMode.json_schema,
 292                thinking_level="high",
 293            ),
 294        ],
 295    ),
 296    # GPT o1 Low
 297    KilnModel(
 298        family=ModelFamily.gpt,
 299        name=ModelName.gpt_o1_low,
 300        friendly_name="GPT o1 - Low",
 301        providers=[
 302            KilnModelProvider(
 303                name=ModelProviderName.openai,
 304                model_id="o1",
 305                thinking_level="low",
 306                structured_output_mode=StructuredOutputMode.json_schema,
 307            ),
 308            KilnModelProvider(
 309                name=ModelProviderName.azure_openai,
 310                model_id="o1",
 311                structured_output_mode=StructuredOutputMode.json_schema,
 312                thinking_level="low",
 313            ),
 314        ],
 315    ),
 316    # GPT o1 Medium
 317    KilnModel(
 318        family=ModelFamily.gpt,
 319        name=ModelName.gpt_o1_medium,
 320        friendly_name="GPT o1 - Medium",
 321        providers=[
 322            KilnModelProvider(
 323                name=ModelProviderName.openai,
 324                model_id="o1",
 325                thinking_level="medium",
 326                structured_output_mode=StructuredOutputMode.json_schema,
 327            ),
 328            KilnModelProvider(
 329                name=ModelProviderName.azure_openai,
 330                model_id="o1",
 331                structured_output_mode=StructuredOutputMode.json_schema,
 332                thinking_level="medium",
 333            ),
 334        ],
 335    ),
 336    # GPT o1 High
 337    KilnModel(
 338        family=ModelFamily.gpt,
 339        name=ModelName.gpt_o1_high,
 340        friendly_name="GPT o1 - High",
 341        providers=[
 342            KilnModelProvider(
 343                name=ModelProviderName.openai,
 344                model_id="o1",
 345                thinking_level="high",
 346                structured_output_mode=StructuredOutputMode.json_schema,
 347            ),
 348            KilnModelProvider(
 349                name=ModelProviderName.azure_openai,
 350                model_id="o1",
 351                structured_output_mode=StructuredOutputMode.json_schema,
 352                thinking_level="high",
 353            ),
 354        ],
 355    ),
 356    # Claude 3.5 Haiku
 357    KilnModel(
 358        family=ModelFamily.claude,
 359        name=ModelName.claude_3_5_haiku,
 360        friendly_name="Claude 3.5 Haiku",
 361        providers=[
 362            KilnModelProvider(
 363                name=ModelProviderName.openrouter,
 364                structured_output_mode=StructuredOutputMode.function_calling,
 365                model_id="anthropic/claude-3-5-haiku",
 366            ),
 367            KilnModelProvider(
 368                name=ModelProviderName.anthropic,
 369                model_id="claude-3-5-haiku-20241022",
 370                structured_output_mode=StructuredOutputMode.function_calling,
 371            ),
 372            KilnModelProvider(
 373                name=ModelProviderName.vertex,
 374                model_id="claude-3-5-haiku",
 375                structured_output_mode=StructuredOutputMode.function_calling_weak,
 376            ),
 377        ],
 378    ),
 379    # Claude 3.5 Sonnet
 380    KilnModel(
 381        family=ModelFamily.claude,
 382        name=ModelName.claude_3_5_sonnet,
 383        friendly_name="Claude 3.5 Sonnet",
 384        providers=[
 385            KilnModelProvider(
 386                name=ModelProviderName.openrouter,
 387                structured_output_mode=StructuredOutputMode.function_calling,
 388                model_id="anthropic/claude-3.5-sonnet",
 389            ),
 390            KilnModelProvider(
 391                name=ModelProviderName.anthropic,
 392                model_id="claude-3-5-sonnet-20241022",
 393                structured_output_mode=StructuredOutputMode.function_calling,
 394            ),
 395            KilnModelProvider(
 396                name=ModelProviderName.vertex,
 397                model_id="claude-3-5-sonnet",
 398                structured_output_mode=StructuredOutputMode.function_calling_weak,
 399            ),
 400        ],
 401    ),
 402    # Claude 3.7 Sonnet
 403    KilnModel(
 404        family=ModelFamily.claude,
 405        name=ModelName.claude_3_7_sonnet,
 406        friendly_name="Claude 3.7 Sonnet",
 407        providers=[
 408            KilnModelProvider(
 409                name=ModelProviderName.openrouter,
 410                structured_output_mode=StructuredOutputMode.function_calling,
 411                model_id="anthropic/claude-3.7-sonnet",
 412            ),
 413            KilnModelProvider(
 414                name=ModelProviderName.anthropic,
 415                model_id="claude-3-7-sonnet-20250219",
 416                structured_output_mode=StructuredOutputMode.function_calling,
 417            ),
 418        ],
 419    ),
 420    # Claude 3.7 Sonnet Thinking
 421    KilnModel(
 422        family=ModelFamily.claude,
 423        name=ModelName.claude_3_7_sonnet_thinking,
 424        friendly_name="Claude 3.7 Sonnet Thinking",
 425        providers=[
 426            KilnModelProvider(
 427                name=ModelProviderName.openrouter,
 428                model_id="anthropic/claude-3.7-sonnet:thinking",
 429                reasoning_capable=True,
 430                # For reasoning models, we need to use json_instructions with OpenRouter
 431                structured_output_mode=StructuredOutputMode.json_instructions,
 432                require_openrouter_reasoning=True,
 433            ),
 434            KilnModelProvider(
 435                name=ModelProviderName.anthropic,
 436                reasoning_capable=True,
 437                model_id="claude-3-7-sonnet-20250219",
 438                anthropic_extended_thinking=True,
 439                structured_output_mode=StructuredOutputMode.json_instructions,
 440            ),
 441        ],
 442    ),
 443    # Gemini 1.5 Pro
 444    KilnModel(
 445        family=ModelFamily.gemini,
 446        name=ModelName.gemini_1_5_pro,
 447        friendly_name="Gemini 1.5 Pro",
 448        providers=[
 449            KilnModelProvider(
 450                name=ModelProviderName.openrouter,
 451                model_id="google/gemini-pro-1.5",
 452                structured_output_mode=StructuredOutputMode.json_instruction_and_object,
 453            ),
 454            KilnModelProvider(
 455                name=ModelProviderName.gemini_api,
 456                model_id="gemini-1.5-pro",
 457                structured_output_mode=StructuredOutputMode.json_instruction_and_object,
 458            ),
 459            KilnModelProvider(
 460                name=ModelProviderName.vertex,
 461                model_id="gemini-1.5-pro",
 462                structured_output_mode=StructuredOutputMode.json_instruction_and_object,
 463            ),
 464        ],
 465    ),
 466    # Gemini 1.5 Flash
 467    KilnModel(
 468        family=ModelFamily.gemini,
 469        name=ModelName.gemini_1_5_flash,
 470        friendly_name="Gemini 1.5 Flash",
 471        providers=[
 472            KilnModelProvider(
 473                name=ModelProviderName.openrouter,
 474                model_id="google/gemini-flash-1.5",
 475                structured_output_mode=StructuredOutputMode.json_instruction_and_object,
 476            ),
 477            KilnModelProvider(
 478                name=ModelProviderName.gemini_api,
 479                model_id="gemini-1.5-flash",
 480                structured_output_mode=StructuredOutputMode.json_instruction_and_object,
 481            ),
 482            KilnModelProvider(
 483                name=ModelProviderName.vertex,
 484                model_id="gemini-1.5-flash",
 485                structured_output_mode=StructuredOutputMode.json_instruction_and_object,
 486            ),
 487        ],
 488    ),
 489    # Gemini 1.5 Flash 8B
 490    KilnModel(
 491        family=ModelFamily.gemini,
 492        name=ModelName.gemini_1_5_flash_8b,
 493        friendly_name="Gemini 1.5 Flash 8B",
 494        providers=[
 495            KilnModelProvider(
 496                name=ModelProviderName.openrouter,
 497                model_id="google/gemini-flash-1.5-8b",
 498                structured_output_mode=StructuredOutputMode.json_instruction_and_object,
 499                supports_data_gen=False,
 500            ),
 501            KilnModelProvider(
 502                name=ModelProviderName.gemini_api,
 503                model_id="gemini-1.5-flash-8b",
 504                structured_output_mode=StructuredOutputMode.json_instruction_and_object,
 505                supports_data_gen=False,
 506            ),
 507        ],
 508    ),
 509    # Gemini 2.0 Flash
 510    KilnModel(
 511        family=ModelFamily.gemini,
 512        name=ModelName.gemini_2_0_flash,
 513        friendly_name="Gemini 2.0 Flash",
 514        providers=[
 515            KilnModelProvider(
 516                name=ModelProviderName.openrouter,
 517                model_id="google/gemini-2.0-flash-001",
 518                structured_output_mode=StructuredOutputMode.json_instruction_and_object,
 519            ),
 520            KilnModelProvider(
 521                name=ModelProviderName.gemini_api,
 522                model_id="gemini-2.0-flash",
 523                structured_output_mode=StructuredOutputMode.json_instruction_and_object,
 524            ),
 525            KilnModelProvider(
 526                name=ModelProviderName.vertex,
 527                model_id="gemini-2.0-flash",
 528                structured_output_mode=StructuredOutputMode.json_instruction_and_object,
 529            ),
 530        ],
 531    ),
 532    # Nemotron 70B
 533    KilnModel(
 534        family=ModelFamily.llama,
 535        name=ModelName.nemotron_70b,
 536        friendly_name="Nemotron 70B",
 537        providers=[
 538            KilnModelProvider(
 539                name=ModelProviderName.openrouter,
 540                supports_structured_output=False,
 541                supports_data_gen=False,
 542                model_id="nvidia/llama-3.1-nemotron-70b-instruct",
 543            ),
 544        ],
 545    ),
 546    # Llama 3.1-8b
 547    KilnModel(
 548        family=ModelFamily.llama,
 549        name=ModelName.llama_3_1_8b,
 550        friendly_name="Llama 3.1 8B",
 551        providers=[
 552            KilnModelProvider(
 553                name=ModelProviderName.groq,
 554                model_id="llama-3.1-8b-instant",
 555            ),
 556            KilnModelProvider(
 557                name=ModelProviderName.amazon_bedrock,
 558                structured_output_mode=StructuredOutputMode.json_schema,
 559                supports_structured_output=False,
 560                model_id="meta.llama3-1-8b-instruct-v1:0",
 561            ),
 562            KilnModelProvider(
 563                name=ModelProviderName.ollama,
 564                structured_output_mode=StructuredOutputMode.json_schema,
 565                model_id="llama3.1:8b",
 566                ollama_model_aliases=["llama3.1"],  # 8b is default
 567            ),
 568            KilnModelProvider(
 569                name=ModelProviderName.openrouter,
 570                supports_data_gen=False,
 571                structured_output_mode=StructuredOutputMode.function_calling,
 572                model_id="meta-llama/llama-3.1-8b-instruct",
 573            ),
 574            KilnModelProvider(
 575                name=ModelProviderName.fireworks_ai,
 576                # JSON mode not ideal (no schema), but tool calling doesn't work on 8b
 577                structured_output_mode=StructuredOutputMode.json_instruction_and_object,
 578                supports_data_gen=False,
 579                provider_finetune_id="accounts/fireworks/models/llama-v3p1-8b-instruct",
 580                model_id="accounts/fireworks/models/llama-v3p1-8b-instruct",
 581            ),
 582            KilnModelProvider(
 583                name=ModelProviderName.together_ai,
 584                model_id="meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo",
 585                supports_data_gen=False,
 586                structured_output_mode=StructuredOutputMode.function_calling_weak,
 587                provider_finetune_id="meta-llama/Meta-Llama-3.1-8B-Instruct",
 588            ),
 589        ],
 590    ),
 591    # Llama 3.1 70b
 592    KilnModel(
 593        family=ModelFamily.llama,
 594        name=ModelName.llama_3_1_70b,
 595        friendly_name="Llama 3.1 70B",
 596        providers=[
 597            KilnModelProvider(
 598                name=ModelProviderName.amazon_bedrock,
 599                structured_output_mode=StructuredOutputMode.json_schema,
 600                supports_data_gen=False,
 601                model_id="meta.llama3-1-70b-instruct-v1:0",
 602            ),
 603            KilnModelProvider(
 604                name=ModelProviderName.openrouter,
 605                supports_data_gen=False,
 606                # Need to not pass "strict=True" to the function call to get this to work with logprobs for some reason. Openrouter issue.
 607                structured_output_mode=StructuredOutputMode.function_calling_weak,
 608                model_id="meta-llama/llama-3.1-70b-instruct",
 609                supports_logprobs=True,
 610                logprobs_openrouter_options=True,
 611            ),
 612            KilnModelProvider(
 613                name=ModelProviderName.ollama,
 614                structured_output_mode=StructuredOutputMode.json_schema,
 615                model_id="llama3.1:70b",
 616            ),
 617            KilnModelProvider(
 618                name=ModelProviderName.fireworks_ai,
 619                # Tool calling forces schema -- fireworks doesn't support json_schema, just json_mode
 620                structured_output_mode=StructuredOutputMode.function_calling_weak,
 621                provider_finetune_id="accounts/fireworks/models/llama-v3p1-70b-instruct",
 622                model_id="accounts/fireworks/models/llama-v3p1-70b-instruct",
 623            ),
 624            KilnModelProvider(
 625                name=ModelProviderName.together_ai,
 626                model_id="meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
 627                supports_data_gen=False,
 628                structured_output_mode=StructuredOutputMode.function_calling_weak,
 629                provider_finetune_id="meta-llama/Meta-Llama-3.1-70B-Instruct",
 630            ),
 631        ],
 632    ),
 633    # Llama 3.1 405b
 634    KilnModel(
 635        family=ModelFamily.llama,
 636        name=ModelName.llama_3_1_405b,
 637        friendly_name="Llama 3.1 405B",
 638        providers=[
 639            KilnModelProvider(
 640                name=ModelProviderName.amazon_bedrock,
 641                structured_output_mode=StructuredOutputMode.json_schema,
 642                supports_data_gen=False,
 643                model_id="meta.llama3-1-405b-instruct-v1:0",
 644            ),
 645            KilnModelProvider(
 646                name=ModelProviderName.ollama,
 647                structured_output_mode=StructuredOutputMode.json_schema,
 648                model_id="llama3.1:405b",
 649            ),
 650            KilnModelProvider(
 651                name=ModelProviderName.openrouter,
 652                structured_output_mode=StructuredOutputMode.function_calling,
 653                model_id="meta-llama/llama-3.1-405b-instruct",
 654            ),
 655            KilnModelProvider(
 656                name=ModelProviderName.fireworks_ai,
 657                # No finetune support. https://docs.fireworks.ai/fine-tuning/fine-tuning-models
 658                structured_output_mode=StructuredOutputMode.function_calling_weak,
 659                model_id="accounts/fireworks/models/llama-v3p1-405b-instruct",
 660            ),
 661            KilnModelProvider(
 662                name=ModelProviderName.together_ai,
 663                model_id="meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo",
 664                supports_data_gen=False,
 665                structured_output_mode=StructuredOutputMode.function_calling_weak,
 666            ),
 667        ],
 668    ),
 669    # Mistral Nemo
 670    KilnModel(
 671        family=ModelFamily.mistral,
 672        name=ModelName.mistral_nemo,
 673        friendly_name="Mistral Nemo",
 674        providers=[
 675            KilnModelProvider(
 676                name=ModelProviderName.openrouter,
 677                model_id="mistralai/mistral-nemo",
 678                structured_output_mode=StructuredOutputMode.json_instruction_and_object,
 679            ),
 680        ],
 681    ),
 682    # Mistral Large
 683    KilnModel(
 684        family=ModelFamily.mistral,
 685        name=ModelName.mistral_large,
 686        friendly_name="Mistral Large",
 687        providers=[
 688            KilnModelProvider(
 689                name=ModelProviderName.amazon_bedrock,
 690                structured_output_mode=StructuredOutputMode.json_instructions,
 691                model_id="mistral.mistral-large-2407-v1:0",
 692            ),
 693            KilnModelProvider(
 694                name=ModelProviderName.openrouter,
 695                structured_output_mode=StructuredOutputMode.json_schema,
 696                model_id="mistralai/mistral-large",
 697            ),
 698            KilnModelProvider(
 699                name=ModelProviderName.ollama,
 700                structured_output_mode=StructuredOutputMode.json_schema,
 701                model_id="mistral-large",
 702            ),
 703        ],
 704    ),
 705    # Llama 3.2 1B
 706    KilnModel(
 707        family=ModelFamily.llama,
 708        name=ModelName.llama_3_2_1b,
 709        friendly_name="Llama 3.2 1B",
 710        providers=[
 711            KilnModelProvider(
 712                name=ModelProviderName.groq,
 713                model_id="llama-3.2-1b-preview",
 714                structured_output_mode=StructuredOutputMode.json_instruction_and_object,
 715                supports_data_gen=False,
 716            ),
 717            KilnModelProvider(
 718                name=ModelProviderName.openrouter,
 719                supports_structured_output=False,
 720                supports_data_gen=False,
 721                structured_output_mode=StructuredOutputMode.json_instruction_and_object,
 722                model_id="meta-llama/llama-3.2-1b-instruct",
 723            ),
 724            KilnModelProvider(
 725                name=ModelProviderName.ollama,
 726                supports_structured_output=False,
 727                supports_data_gen=False,
 728                model_id="llama3.2:1b",
 729            ),
 730            KilnModelProvider(
 731                name=ModelProviderName.huggingface,
 732                model_id="meta-llama/Llama-3.2-1B-Instruct",
 733                supports_structured_output=False,
 734                supports_data_gen=False,
 735            ),
 736            KilnModelProvider(
 737                name=ModelProviderName.together_ai,
 738                provider_finetune_id="meta-llama/Llama-3.2-1B-Instruct",
 739            ),
 740        ],
 741    ),
 742    # Llama 3.2 3B
 743    KilnModel(
 744        family=ModelFamily.llama,
 745        name=ModelName.llama_3_2_3b,
 746        friendly_name="Llama 3.2 3B",
 747        providers=[
 748            KilnModelProvider(
 749                name=ModelProviderName.groq,
 750                model_id="llama-3.2-3b-preview",
 751                supports_data_gen=False,
 752            ),
 753            KilnModelProvider(
 754                name=ModelProviderName.openrouter,
 755                supports_structured_output=False,
 756                supports_data_gen=False,
 757                structured_output_mode=StructuredOutputMode.json_schema,
 758                model_id="meta-llama/llama-3.2-3b-instruct",
 759            ),
 760            KilnModelProvider(
 761                name=ModelProviderName.ollama,
 762                supports_data_gen=False,
 763                model_id="llama3.2",
 764            ),
 765            KilnModelProvider(
 766                name=ModelProviderName.fireworks_ai,
 767                provider_finetune_id="accounts/fireworks/models/llama-v3p2-3b-instruct",
 768                supports_structured_output=False,
 769                supports_data_gen=False,
 770                model_id="accounts/fireworks/models/llama-v3p2-3b-instruct",
 771            ),
 772            KilnModelProvider(
 773                name=ModelProviderName.huggingface,
 774                model_id="meta-llama/Llama-3.2-3B-Instruct",
 775                supports_structured_output=False,
 776                supports_data_gen=False,
 777            ),
 778            KilnModelProvider(
 779                name=ModelProviderName.together_ai,
 780                model_id="meta-llama/Llama-3.2-3B-Instruct-Turbo",
 781                supports_structured_output=False,
 782                supports_data_gen=False,
 783                provider_finetune_id="meta-llama/Llama-3.2-3B-Instruct",
 784            ),
 785        ],
 786    ),
 787    # Llama 3.2 11B
 788    KilnModel(
 789        family=ModelFamily.llama,
 790        name=ModelName.llama_3_2_11b,
 791        friendly_name="Llama 3.2 11B",
 792        providers=[
 793            KilnModelProvider(
 794                name=ModelProviderName.groq,
 795                model_id="llama-3.2-11b-vision-preview",
 796            ),
 797            KilnModelProvider(
 798                name=ModelProviderName.openrouter,
 799                structured_output_mode=StructuredOutputMode.json_instruction_and_object,
 800                supports_data_gen=False,
 801                model_id="meta-llama/llama-3.2-11b-vision-instruct",
 802            ),
 803            KilnModelProvider(
 804                name=ModelProviderName.ollama,
 805                structured_output_mode=StructuredOutputMode.json_schema,
 806                model_id="llama3.2-vision",
 807            ),
 808            KilnModelProvider(
 809                name=ModelProviderName.fireworks_ai,
 810                # No finetune support. https://docs.fireworks.ai/fine-tuning/fine-tuning-models
 811                model_id="accounts/fireworks/models/llama-v3p2-11b-vision-instruct",
 812                structured_output_mode=StructuredOutputMode.json_instruction_and_object,
 813                supports_data_gen=False,
 814            ),
 815            KilnModelProvider(
 816                name=ModelProviderName.huggingface,
 817                model_id="meta-llama/Llama-3.2-11B-Vision-Instruct",
 818                supports_structured_output=False,
 819                supports_data_gen=False,
 820            ),
 821            KilnModelProvider(
 822                name=ModelProviderName.together_ai,
 823                model_id="meta-llama/Llama-3.2-11B-Vision-Instruct-Turbo",
 824                supports_structured_output=False,
 825                supports_data_gen=False,
 826            ),
 827        ],
 828    ),
 829    # Llama 3.2 90B
 830    KilnModel(
 831        family=ModelFamily.llama,
 832        name=ModelName.llama_3_2_90b,
 833        friendly_name="Llama 3.2 90B",
 834        providers=[
 835            KilnModelProvider(
 836                name=ModelProviderName.groq,
 837                model_id="llama-3.2-90b-vision-preview",
 838            ),
 839            KilnModelProvider(
 840                name=ModelProviderName.openrouter,
 841                structured_output_mode=StructuredOutputMode.json_instruction_and_object,
 842                model_id="meta-llama/llama-3.2-90b-vision-instruct",
 843            ),
 844            KilnModelProvider(
 845                name=ModelProviderName.ollama,
 846                structured_output_mode=StructuredOutputMode.json_schema,
 847                model_id="llama3.2-vision:90b",
 848            ),
 849            KilnModelProvider(
 850                name=ModelProviderName.fireworks_ai,
 851                # No finetune support. https://docs.fireworks.ai/fine-tuning/fine-tuning-models
 852                model_id="accounts/fireworks/models/llama-v3p2-90b-vision-instruct",
 853                structured_output_mode=StructuredOutputMode.json_instruction_and_object,
 854                supports_data_gen=False,
 855            ),
 856            KilnModelProvider(
 857                name=ModelProviderName.together_ai,
 858                model_id="meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo",
 859                supports_structured_output=False,
 860                supports_data_gen=False,
 861            ),
 862        ],
 863    ),
 864    # Llama 3.3 70B
 865    KilnModel(
 866        family=ModelFamily.llama,
 867        name=ModelName.llama_3_3_70b,
 868        friendly_name="Llama 3.3 70B",
 869        providers=[
 870            KilnModelProvider(
 871                name=ModelProviderName.openrouter,
 872                model_id="meta-llama/llama-3.3-70b-instruct",
 873                structured_output_mode=StructuredOutputMode.json_schema,
 874                # Openrouter not working with json_schema or tools. JSON_schema sometimes works so force that, but not consistently so still not recommended.
 875                supports_structured_output=False,
 876                supports_data_gen=False,
 877            ),
 878            KilnModelProvider(
 879                name=ModelProviderName.groq,
 880                supports_structured_output=True,
 881                supports_data_gen=True,
 882                model_id="llama-3.3-70b-versatile",
 883            ),
 884            KilnModelProvider(
 885                name=ModelProviderName.ollama,
 886                structured_output_mode=StructuredOutputMode.json_schema,
 887                model_id="llama3.3",
 888            ),
 889            KilnModelProvider(
 890                name=ModelProviderName.fireworks_ai,
 891                # Finetuning not live yet
 892                # provider_finetune_id="accounts/fireworks/models/llama-v3p3-70b-instruct",
 893                # Tool calling forces schema -- fireworks doesn't support json_schema, just json_mode
 894                structured_output_mode=StructuredOutputMode.function_calling_weak,
 895                model_id="accounts/fireworks/models/llama-v3p3-70b-instruct",
 896            ),
 897            KilnModelProvider(
 898                name=ModelProviderName.vertex,
 899                model_id="meta/llama-3.3-70b-instruct-maas",
 900                # Doesn't work, TODO to debug
 901                supports_structured_output=False,
 902                supports_data_gen=False,
 903            ),
 904            KilnModelProvider(
 905                name=ModelProviderName.together_ai,
 906                model_id="meta-llama/Llama-3.3-70B-Instruct-Turbo",
 907                structured_output_mode=StructuredOutputMode.function_calling_weak,
 908            ),
 909        ],
 910    ),
 911    # Phi 3.5
 912    KilnModel(
 913        family=ModelFamily.phi,
 914        name=ModelName.phi_3_5,
 915        friendly_name="Phi 3.5",
 916        providers=[
 917            KilnModelProvider(
 918                name=ModelProviderName.ollama,
 919                structured_output_mode=StructuredOutputMode.json_schema,
 920                supports_structured_output=False,
 921                supports_data_gen=False,
 922                model_id="phi3.5",
 923            ),
 924            KilnModelProvider(
 925                name=ModelProviderName.openrouter,
 926                supports_structured_output=False,
 927                supports_data_gen=False,
 928                model_id="microsoft/phi-3.5-mini-128k-instruct",
 929                structured_output_mode=StructuredOutputMode.json_schema,
 930            ),
 931            KilnModelProvider(
 932                name=ModelProviderName.fireworks_ai,
 933                # No finetune support. https://docs.fireworks.ai/fine-tuning/fine-tuning-models
 934                supports_structured_output=False,
 935                supports_data_gen=False,
 936                model_id="accounts/fireworks/models/phi-3-vision-128k-instruct",
 937            ),
 938        ],
 939    ),
 940    # Phi 4
 941    KilnModel(
 942        family=ModelFamily.phi,
 943        name=ModelName.phi_4,
 944        friendly_name="Phi 4 - 14B",
 945        providers=[
 946            KilnModelProvider(
 947                name=ModelProviderName.ollama,
 948                structured_output_mode=StructuredOutputMode.json_schema,
 949                model_id="phi4",
 950            ),
 951            KilnModelProvider(
 952                name=ModelProviderName.openrouter,
 953                # JSON mode not consistent enough to enable in UI
 954                structured_output_mode=StructuredOutputMode.json_instruction_and_object,
 955                supports_data_gen=False,
 956                model_id="microsoft/phi-4",
 957            ),
 958        ],
 959    ),
 960    # Phi 4 5.6B
 961    KilnModel(
 962        family=ModelFamily.phi,
 963        name=ModelName.phi_4_5p6b,
 964        friendly_name="Phi 4 - 5.6B",
 965        providers=[
 966            KilnModelProvider(
 967                name=ModelProviderName.openrouter,
 968                model_id="microsoft/phi-4-multimodal-instruct",
 969                supports_structured_output=False,
 970                supports_data_gen=False,
 971            ),
 972        ],
 973    ),
 974    # Phi 4 Mini
 975    KilnModel(
 976        family=ModelFamily.phi,
 977        name=ModelName.phi_4_mini,
 978        friendly_name="Phi 4 Mini - 3.8B",
 979        providers=[
 980            KilnModelProvider(
 981                name=ModelProviderName.ollama,
 982                model_id="phi4-mini",
 983            ),
 984        ],
 985    ),
 986    # Gemma 2 2.6b
 987    KilnModel(
 988        family=ModelFamily.gemma,
 989        name=ModelName.gemma_2_2b,
 990        friendly_name="Gemma 2 2B",
 991        providers=[
 992            KilnModelProvider(
 993                name=ModelProviderName.ollama,
 994                supports_data_gen=False,
 995                model_id="gemma2:2b",
 996            ),
 997        ],
 998    ),
 999    # Gemma 2 9b
1000    KilnModel(
1001        family=ModelFamily.gemma,
1002        name=ModelName.gemma_2_9b,
1003        friendly_name="Gemma 2 9B",
1004        providers=[
1005            KilnModelProvider(
1006                name=ModelProviderName.ollama,
1007                supports_data_gen=False,
1008                model_id="gemma2:9b",
1009            ),
1010            KilnModelProvider(
1011                name=ModelProviderName.openrouter,
1012                structured_output_mode=StructuredOutputMode.json_instruction_and_object,
1013                supports_data_gen=False,
1014                model_id="google/gemma-2-9b-it",
1015            ),
1016            # fireworks AI errors - not allowing system role. Exclude until resolved.
1017        ],
1018    ),
1019    # Gemma 2 27b
1020    KilnModel(
1021        family=ModelFamily.gemma,
1022        name=ModelName.gemma_2_27b,
1023        friendly_name="Gemma 2 27B",
1024        providers=[
1025            KilnModelProvider(
1026                name=ModelProviderName.ollama,
1027                supports_data_gen=False,
1028                model_id="gemma2:27b",
1029            ),
1030            KilnModelProvider(
1031                name=ModelProviderName.openrouter,
1032                structured_output_mode=StructuredOutputMode.json_instruction_and_object,
1033                supports_data_gen=False,
1034                model_id="google/gemma-2-27b-it",
1035            ),
1036        ],
1037    ),
1038    # Gemma 3 1B
1039    KilnModel(
1040        family=ModelFamily.gemma,
1041        name=ModelName.gemma_3_1b,
1042        friendly_name="Gemma 3 1B",
1043        providers=[
1044            KilnModelProvider(
1045                name=ModelProviderName.ollama,
1046                model_id="gemma3:1b",
1047                supports_structured_output=False,
1048                supports_data_gen=False,
1049            ),
1050            KilnModelProvider(
1051                name=ModelProviderName.openrouter,
1052                # TODO: swap to non-free model when available (more reliable)
1053                model_id="google/gemma-3-1b-it:free",
1054                supports_structured_output=False,
1055                supports_data_gen=False,
1056            ),
1057        ],
1058    ),
1059    # Gemma 3 4B
1060    KilnModel(
1061        family=ModelFamily.gemma,
1062        name=ModelName.gemma_3_4b,
1063        friendly_name="Gemma 3 4B",
1064        providers=[
1065            KilnModelProvider(
1066                name=ModelProviderName.ollama,
1067                model_id="gemma3:4b",
1068                ollama_model_aliases=["gemma3"],
1069            ),
1070            KilnModelProvider(
1071                name=ModelProviderName.openrouter,
1072                structured_output_mode=StructuredOutputMode.json_instruction_and_object,
1073                # TODO: swap to non-free model when available (more reliable)
1074                model_id="google/gemma-3-4b-it:free",
1075            ),
1076        ],
1077    ),
1078    # Gemma 3 12B
1079    KilnModel(
1080        family=ModelFamily.gemma,
1081        name=ModelName.gemma_3_12b,
1082        friendly_name="Gemma 3 12B",
1083        providers=[
1084            KilnModelProvider(
1085                name=ModelProviderName.ollama,
1086                model_id="gemma3:12b",
1087            ),
1088            KilnModelProvider(
1089                name=ModelProviderName.openrouter,
1090                structured_output_mode=StructuredOutputMode.json_instruction_and_object,
1091                # TODO: swap to non-free model when available (more reliable)
1092                model_id="google/gemma-3-12b-it:free",
1093            ),
1094        ],
1095    ),
1096    # Gemma 3 27B
1097    KilnModel(
1098        family=ModelFamily.gemma,
1099        name=ModelName.gemma_3_27b,
1100        friendly_name="Gemma 3 27B",
1101        providers=[
1102            KilnModelProvider(
1103                name=ModelProviderName.ollama,
1104                model_id="gemma3:27b",
1105            ),
1106            KilnModelProvider(
1107                name=ModelProviderName.openrouter,
1108                structured_output_mode=StructuredOutputMode.json_instruction_and_object,
1109                model_id="google/gemma-3-27b-it",
1110            ),
1111            KilnModelProvider(
1112                name=ModelProviderName.huggingface,
1113                model_id="google/gemma-3-27b-it",
1114                structured_output_mode=StructuredOutputMode.json_instructions,
1115            ),
1116        ],
1117    ),
1118    # Mixtral 8x7B
1119    KilnModel(
1120        family=ModelFamily.mixtral,
1121        name=ModelName.mixtral_8x7b,
1122        friendly_name="Mixtral 8x7B",
1123        providers=[
1124            KilnModelProvider(
1125                name=ModelProviderName.openrouter,
1126                model_id="mistralai/mixtral-8x7b-instruct",
1127                supports_data_gen=False,
1128                structured_output_mode=StructuredOutputMode.json_instruction_and_object,
1129            ),
1130            KilnModelProvider(
1131                name=ModelProviderName.ollama,
1132                model_id="mixtral",
1133            ),
1134        ],
1135    ),
1136    # QwQ 32B
1137    KilnModel(
1138        family=ModelFamily.qwen,
1139        name=ModelName.qwq_32b,
1140        friendly_name="QwQ 32B (Qwen Reasoning)",
1141        providers=[
1142            KilnModelProvider(
1143                name=ModelProviderName.openrouter,
1144                model_id="qwen/qwq-32b",
1145                reasoning_capable=True,
1146                require_openrouter_reasoning=True,
1147                r1_openrouter_options=True,
1148                structured_output_mode=StructuredOutputMode.json_instructions,
1149                parser=ModelParserID.r1_thinking,
1150            ),
1151            KilnModelProvider(
1152                name=ModelProviderName.fireworks_ai,
1153                model_id="accounts/fireworks/models/qwq-32b",
1154                reasoning_capable=True,
1155                parser=ModelParserID.r1_thinking,
1156                structured_output_mode=StructuredOutputMode.json_instructions,
1157            ),
1158            KilnModelProvider(
1159                name=ModelProviderName.ollama,
1160                model_id="qwq",
1161                reasoning_capable=True,
1162                parser=ModelParserID.r1_thinking,
1163                structured_output_mode=StructuredOutputMode.json_instructions,
1164            ),
1165            KilnModelProvider(
1166                name=ModelProviderName.groq,
1167                model_id="qwen-qwq-32b",
1168                reasoning_capable=True,
1169                parser=ModelParserID.r1_thinking,
1170                structured_output_mode=StructuredOutputMode.json_instructions,
1171            ),
1172            KilnModelProvider(
1173                name=ModelProviderName.together_ai,
1174                model_id="Qwen/QwQ-32B",
1175                structured_output_mode=StructuredOutputMode.json_instructions,
1176                parser=ModelParserID.r1_thinking,
1177                reasoning_capable=True,
1178            ),
1179        ],
1180    ),
1181    # Qwen 2.5 7B
1182    KilnModel(
1183        family=ModelFamily.qwen,
1184        name=ModelName.qwen_2p5_7b,
1185        friendly_name="Qwen 2.5 7B",
1186        providers=[
1187            KilnModelProvider(
1188                name=ModelProviderName.openrouter,
1189                model_id="qwen/qwen-2.5-7b-instruct",
1190                structured_output_mode=StructuredOutputMode.json_instruction_and_object,
1191            ),
1192            KilnModelProvider(
1193                name=ModelProviderName.ollama,
1194                model_id="qwen2.5",
1195            ),
1196        ],
1197    ),
1198    # Qwen 2.5 14B
1199    KilnModel(
1200        family=ModelFamily.qwen,
1201        name=ModelName.qwen_2p5_14b,
1202        friendly_name="Qwen 2.5 14B",
1203        providers=[
1204            KilnModelProvider(
1205                name=ModelProviderName.together_ai,
1206                provider_finetune_id="Qwen/Qwen2.5-14B-Instruct",
1207            ),
1208            KilnModelProvider(
1209                name=ModelProviderName.ollama,
1210                model_id="qwen2.5:14b",
1211                supports_data_gen=False,
1212            ),
1213        ],
1214    ),
1215    # Qwen 2.5 72B
1216    KilnModel(
1217        family=ModelFamily.qwen,
1218        name=ModelName.qwen_2p5_72b,
1219        friendly_name="Qwen 2.5 72B",
1220        providers=[
1221            KilnModelProvider(
1222                name=ModelProviderName.openrouter,
1223                model_id="qwen/qwen-2.5-72b-instruct",
1224                # Not consistent with structure data. Works sometimes but not often
1225                supports_structured_output=False,
1226                supports_data_gen=False,
1227                structured_output_mode=StructuredOutputMode.json_instruction_and_object,
1228            ),
1229            KilnModelProvider(
1230                name=ModelProviderName.ollama,
1231                model_id="qwen2.5:72b",
1232            ),
1233            KilnModelProvider(
1234                name=ModelProviderName.fireworks_ai,
1235                model_id="accounts/fireworks/models/qwen2p5-72b-instruct",
1236                # Tool calling forces schema -- fireworks doesn't support json_schema, just json_mode
1237                structured_output_mode=StructuredOutputMode.function_calling_weak,
1238            ),
1239            KilnModelProvider(
1240                name=ModelProviderName.together_ai,
1241                provider_finetune_id="Qwen/Qwen2.5-72B-Instruct",
1242            ),
1243        ],
1244    ),
1245    # Mistral Small 3
1246    KilnModel(
1247        family=ModelFamily.mistral,
1248        name=ModelName.mistral_small_3,
1249        friendly_name="Mistral Small 3",
1250        providers=[
1251            KilnModelProvider(
1252                name=ModelProviderName.openrouter,
1253                structured_output_mode=StructuredOutputMode.json_instruction_and_object,
1254                model_id="mistralai/mistral-small-24b-instruct-2501",
1255            ),
1256            KilnModelProvider(
1257                name=ModelProviderName.ollama,
1258                model_id="mistral-small:24b",
1259            ),
1260        ],
1261    ),
1262    # DeepSeek 3
1263    KilnModel(
1264        family=ModelFamily.deepseek,
1265        name=ModelName.deepseek_3,
1266        friendly_name="DeepSeek V3",
1267        providers=[
1268            KilnModelProvider(
1269                name=ModelProviderName.openrouter,
1270                model_id="deepseek/deepseek-chat",
1271                structured_output_mode=StructuredOutputMode.function_calling,
1272            ),
1273            KilnModelProvider(
1274                name=ModelProviderName.fireworks_ai,
1275                model_id="accounts/fireworks/models/deepseek-v3",
1276                structured_output_mode=StructuredOutputMode.json_instruction_and_object,
1277                supports_structured_output=True,
1278                supports_data_gen=False,
1279            ),
1280            KilnModelProvider(
1281                name=ModelProviderName.together_ai,
1282                model_id="deepseek-ai/DeepSeek-V3",
1283                structured_output_mode=StructuredOutputMode.json_instructions,
1284            ),
1285        ],
1286    ),
1287    # DeepSeek R1
1288    KilnModel(
1289        family=ModelFamily.deepseek,
1290        name=ModelName.deepseek_r1,
1291        friendly_name="DeepSeek R1",
1292        providers=[
1293            KilnModelProvider(
1294                name=ModelProviderName.openrouter,
1295                model_id="deepseek/deepseek-r1",
1296                parser=ModelParserID.r1_thinking,
1297                structured_output_mode=StructuredOutputMode.json_instructions,
1298                reasoning_capable=True,
1299                r1_openrouter_options=True,
1300                require_openrouter_reasoning=True,
1301            ),
1302            KilnModelProvider(
1303                name=ModelProviderName.fireworks_ai,
1304                model_id="accounts/fireworks/models/deepseek-r1",
1305                parser=ModelParserID.r1_thinking,
1306                structured_output_mode=StructuredOutputMode.json_instructions,
1307                reasoning_capable=True,
1308            ),
1309            KilnModelProvider(
1310                # I want your RAM
1311                name=ModelProviderName.ollama,
1312                model_id="deepseek-r1:671b",
1313                parser=ModelParserID.r1_thinking,
1314                structured_output_mode=StructuredOutputMode.json_instructions,
1315                reasoning_capable=True,
1316            ),
1317            KilnModelProvider(
1318                name=ModelProviderName.together_ai,
1319                model_id="deepseek-ai/DeepSeek-R1",
1320                structured_output_mode=StructuredOutputMode.json_instructions,
1321                parser=ModelParserID.r1_thinking,
1322                reasoning_capable=True,
1323            ),
1324        ],
1325    ),
1326    # DeepSeek R1 Distill Qwen 32B
1327    KilnModel(
1328        family=ModelFamily.deepseek,
1329        name=ModelName.deepseek_r1_distill_qwen_32b,
1330        friendly_name="DeepSeek R1 Distill Qwen 32B",
1331        providers=[
1332            KilnModelProvider(
1333                name=ModelProviderName.openrouter,
1334                reasoning_capable=True,
1335                structured_output_mode=StructuredOutputMode.json_instructions,
1336                model_id="deepseek/deepseek-r1-distill-qwen-32b",
1337                r1_openrouter_options=True,
1338                parser=ModelParserID.r1_thinking,
1339                require_openrouter_reasoning=True,
1340            ),
1341            KilnModelProvider(
1342                name=ModelProviderName.ollama,
1343                parser=ModelParserID.r1_thinking,
1344                reasoning_capable=True,
1345                structured_output_mode=StructuredOutputMode.json_instructions,
1346                model_id="deepseek-r1:32b",
1347            ),
1348            KilnModelProvider(
1349                name=ModelProviderName.together_ai,
1350                model_id="deepseek-ai/DeepSeek-R1-Distill-Qwen-14B",
1351                structured_output_mode=StructuredOutputMode.json_instructions,
1352                parser=ModelParserID.r1_thinking,
1353                reasoning_capable=True,
1354            ),
1355        ],
1356    ),
1357    # DeepSeek R1 Distill Llama 70B
1358    KilnModel(
1359        family=ModelFamily.deepseek,
1360        name=ModelName.deepseek_r1_distill_llama_70b,
1361        friendly_name="DeepSeek R1 Distill Llama 70B",
1362        providers=[
1363            KilnModelProvider(
1364                name=ModelProviderName.openrouter,
1365                reasoning_capable=True,
1366                structured_output_mode=StructuredOutputMode.json_instructions,
1367                model_id="deepseek/deepseek-r1-distill-llama-70b",
1368                r1_openrouter_options=True,
1369                require_openrouter_reasoning=True,
1370                parser=ModelParserID.r1_thinking,
1371            ),
1372            KilnModelProvider(
1373                name=ModelProviderName.ollama,
1374                supports_data_gen=False,
1375                parser=ModelParserID.r1_thinking,
1376                reasoning_capable=True,
1377                structured_output_mode=StructuredOutputMode.json_instructions,
1378                model_id="deepseek-r1:70b",
1379            ),
1380            KilnModelProvider(
1381                name=ModelProviderName.together_ai,
1382                model_id="deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
1383                structured_output_mode=StructuredOutputMode.json_instructions,
1384                parser=ModelParserID.r1_thinking,
1385            ),
1386        ],
1387    ),
1388    # DeepSeek R1 Distill Qwen 14B
1389    KilnModel(
1390        family=ModelFamily.deepseek,
1391        name=ModelName.deepseek_r1_distill_qwen_14b,
1392        friendly_name="DeepSeek R1 Distill Qwen 14B",
1393        providers=[
1394            KilnModelProvider(
1395                name=ModelProviderName.openrouter,
1396                supports_data_gen=False,
1397                reasoning_capable=True,
1398                structured_output_mode=StructuredOutputMode.json_instructions,
1399                model_id="deepseek/deepseek-r1-distill-qwen-14b",
1400                r1_openrouter_options=True,
1401                require_openrouter_reasoning=True,
1402                openrouter_skip_required_parameters=True,
1403                parser=ModelParserID.r1_thinking,
1404            ),
1405            KilnModelProvider(
1406                name=ModelProviderName.ollama,
1407                supports_data_gen=False,
1408                parser=ModelParserID.r1_thinking,
1409                reasoning_capable=True,
1410                structured_output_mode=StructuredOutputMode.json_instructions,
1411                model_id="deepseek-r1:14b",
1412            ),
1413            KilnModelProvider(
1414                name=ModelProviderName.together_ai,
1415                model_id="deepseek-ai/DeepSeek-R1-Distill-Qwen-14B",
1416                structured_output_mode=StructuredOutputMode.json_instructions,
1417                parser=ModelParserID.r1_thinking,
1418            ),
1419        ],
1420    ),
1421    # DeepSeek R1 Distill Llama 8B
1422    KilnModel(
1423        family=ModelFamily.deepseek,
1424        name=ModelName.deepseek_r1_distill_llama_8b,
1425        friendly_name="DeepSeek R1 Distill Llama 8B",
1426        providers=[
1427            KilnModelProvider(
1428                name=ModelProviderName.openrouter,
1429                supports_data_gen=False,
1430                reasoning_capable=True,
1431                structured_output_mode=StructuredOutputMode.json_instructions,
1432                model_id="deepseek/deepseek-r1-distill-llama-8b",
1433                r1_openrouter_options=True,
1434                require_openrouter_reasoning=True,
1435                openrouter_skip_required_parameters=True,
1436                parser=ModelParserID.r1_thinking,
1437            ),
1438            KilnModelProvider(
1439                name=ModelProviderName.ollama,
1440                supports_data_gen=False,
1441                parser=ModelParserID.r1_thinking,
1442                reasoning_capable=True,
1443                structured_output_mode=StructuredOutputMode.json_instructions,
1444                model_id="deepseek-r1:8b",
1445            ),
1446        ],
1447    ),
1448    # DeepSeek R1 Distill Qwen 7B
1449    KilnModel(
1450        family=ModelFamily.deepseek,
1451        name=ModelName.deepseek_r1_distill_qwen_7b,
1452        friendly_name="DeepSeek R1 Distill Qwen 7B",
1453        providers=[
1454            KilnModelProvider(
1455                name=ModelProviderName.ollama,
1456                supports_data_gen=False,
1457                parser=ModelParserID.r1_thinking,
1458                reasoning_capable=True,
1459                structured_output_mode=StructuredOutputMode.json_instructions,
1460                model_id="deepseek-r1:7b",
1461            ),
1462        ],
1463    ),
1464    # DeepSeek R1 Distill Qwen 1.5B
1465    KilnModel(
1466        family=ModelFamily.deepseek,
1467        name=ModelName.deepseek_r1_distill_qwen_1p5b,
1468        friendly_name="DeepSeek R1 Distill Qwen 1.5B",
1469        providers=[
1470            KilnModelProvider(
1471                name=ModelProviderName.openrouter,
1472                supports_structured_output=False,
1473                supports_data_gen=False,
1474                reasoning_capable=True,
1475                structured_output_mode=StructuredOutputMode.json_instructions,
1476                model_id="deepseek/deepseek-r1-distill-qwen-1.5b",
1477                r1_openrouter_options=True,
1478                require_openrouter_reasoning=True,
1479                openrouter_skip_required_parameters=True,
1480                parser=ModelParserID.r1_thinking,
1481            ),
1482            KilnModelProvider(
1483                name=ModelProviderName.ollama,
1484                supports_data_gen=False,
1485                parser=ModelParserID.r1_thinking,
1486                reasoning_capable=True,
1487                structured_output_mode=StructuredOutputMode.json_instructions,
1488                model_id="deepseek-r1:1.5b",
1489            ),
1490            KilnModelProvider(
1491                name=ModelProviderName.together_ai,
1492                model_id="deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
1493                structured_output_mode=StructuredOutputMode.json_instructions,
1494                parser=ModelParserID.r1_thinking,
1495                supports_structured_output=False,
1496                supports_data_gen=False,
1497            ),
1498        ],
1499    ),
1500    # Dolphin 2.9 Mixtral 8x22B
1501    KilnModel(
1502        family=ModelFamily.dolphin,
1503        name=ModelName.dolphin_2_9_8x22b,
1504        friendly_name="Dolphin 2.9 8x22B",
1505        providers=[
1506            KilnModelProvider(
1507                name=ModelProviderName.ollama,
1508                structured_output_mode=StructuredOutputMode.json_schema,
1509                supports_data_gen=True,
1510                model_id="dolphin-mixtral:8x22b",
1511            ),
1512            KilnModelProvider(
1513                name=ModelProviderName.openrouter,
1514                supports_data_gen=True,
1515                structured_output_mode=StructuredOutputMode.json_instruction_and_object,
1516                model_id="cognitivecomputations/dolphin-mixtral-8x22b",
1517            ),
1518        ],
1519    ),
1520    # Grok 2
1521    KilnModel(
1522        family=ModelFamily.grok,
1523        name=ModelName.grok_2,
1524        friendly_name="Grok 2",
1525        providers=[
1526            KilnModelProvider(
1527                name=ModelProviderName.openrouter,
1528                model_id="x-ai/grok-2-1212",
1529                supports_structured_output=True,
1530                supports_data_gen=True,
1531                structured_output_mode=StructuredOutputMode.json_schema,
1532            ),
1533        ],
1534    ),
1535]
class ModelProviderName(builtins.str, enum.Enum):
16class ModelProviderName(str, Enum):
17    """
18    Enumeration of supported AI model providers.
19    """
20
21    openai = "openai"
22    groq = "groq"
23    amazon_bedrock = "amazon_bedrock"
24    ollama = "ollama"
25    openrouter = "openrouter"
26    fireworks_ai = "fireworks_ai"
27    kiln_fine_tune = "kiln_fine_tune"
28    kiln_custom_registry = "kiln_custom_registry"
29    openai_compatible = "openai_compatible"
30    anthropic = "anthropic"
31    gemini_api = "gemini_api"
32    azure_openai = "azure_openai"
33    huggingface = "huggingface"
34    vertex = "vertex"
35    together_ai = "together_ai"

Enumeration of supported AI model providers.

openai = <ModelProviderName.openai: 'openai'>
groq = <ModelProviderName.groq: 'groq'>
amazon_bedrock = <ModelProviderName.amazon_bedrock: 'amazon_bedrock'>
ollama = <ModelProviderName.ollama: 'ollama'>
openrouter = <ModelProviderName.openrouter: 'openrouter'>
fireworks_ai = <ModelProviderName.fireworks_ai: 'fireworks_ai'>
kiln_fine_tune = <ModelProviderName.kiln_fine_tune: 'kiln_fine_tune'>
kiln_custom_registry = <ModelProviderName.kiln_custom_registry: 'kiln_custom_registry'>
openai_compatible = <ModelProviderName.openai_compatible: 'openai_compatible'>
anthropic = <ModelProviderName.anthropic: 'anthropic'>
gemini_api = <ModelProviderName.gemini_api: 'gemini_api'>
azure_openai = <ModelProviderName.azure_openai: 'azure_openai'>
huggingface = <ModelProviderName.huggingface: 'huggingface'>
vertex = <ModelProviderName.vertex: 'vertex'>
together_ai = <ModelProviderName.together_ai: 'together_ai'>
class ModelFamily(builtins.str, enum.Enum):
38class ModelFamily(str, Enum):
39    """
40    Enumeration of supported model families/architectures.
41    """
42
43    gpt = "gpt"
44    llama = "llama"
45    phi = "phi"
46    mistral = "mistral"
47    gemma = "gemma"
48    gemini = "gemini"
49    claude = "claude"
50    mixtral = "mixtral"
51    qwen = "qwen"
52    deepseek = "deepseek"
53    dolphin = "dolphin"
54    grok = "grok"

Enumeration of supported model families/architectures.

gpt = <ModelFamily.gpt: 'gpt'>
llama = <ModelFamily.llama: 'llama'>
phi = <ModelFamily.phi: 'phi'>
mistral = <ModelFamily.mistral: 'mistral'>
gemma = <ModelFamily.gemma: 'gemma'>
gemini = <ModelFamily.gemini: 'gemini'>
claude = <ModelFamily.claude: 'claude'>
mixtral = <ModelFamily.mixtral: 'mixtral'>
qwen = <ModelFamily.qwen: 'qwen'>
deepseek = <ModelFamily.deepseek: 'deepseek'>
dolphin = <ModelFamily.dolphin: 'dolphin'>
grok = <ModelFamily.grok: 'grok'>
class ModelName(builtins.str, enum.Enum):
 58class ModelName(str, Enum):
 59    """
 60    Enumeration of specific model versions supported by the system.
 61    Where models have instruct and raw versions, instruct is default and raw is specified.
 62    """
 63
 64    llama_3_1_8b = "llama_3_1_8b"
 65    llama_3_1_70b = "llama_3_1_70b"
 66    llama_3_1_405b = "llama_3_1_405b"
 67    llama_3_2_1b = "llama_3_2_1b"
 68    llama_3_2_3b = "llama_3_2_3b"
 69    llama_3_2_11b = "llama_3_2_11b"
 70    llama_3_2_90b = "llama_3_2_90b"
 71    llama_3_3_70b = "llama_3_3_70b"
 72    gpt_4o_mini = "gpt_4o_mini"
 73    gpt_4o = "gpt_4o"
 74    gpt_o1_low = "gpt_o1_low"
 75    gpt_o1_medium = "gpt_o1_medium"
 76    gpt_o1_high = "gpt_o1_high"
 77    gpt_o3_mini_low = "gpt_o3_mini_low"
 78    gpt_o3_mini_medium = "gpt_o3_mini_medium"
 79    gpt_o3_mini_high = "gpt_o3_mini_high"
 80    phi_3_5 = "phi_3_5"
 81    phi_4 = "phi_4"
 82    phi_4_5p6b = "phi_4_5p6b"
 83    phi_4_mini = "phi_4_mini"
 84    mistral_large = "mistral_large"
 85    mistral_nemo = "mistral_nemo"
 86    gemma_2_2b = "gemma_2_2b"
 87    gemma_2_9b = "gemma_2_9b"
 88    gemma_2_27b = "gemma_2_27b"
 89    gemma_3_1b = "gemma_3_1b"
 90    gemma_3_4b = "gemma_3_4b"
 91    gemma_3_12b = "gemma_3_12b"
 92    gemma_3_27b = "gemma_3_27b"
 93    claude_3_5_haiku = "claude_3_5_haiku"
 94    claude_3_5_sonnet = "claude_3_5_sonnet"
 95    claude_3_7_sonnet = "claude_3_7_sonnet"
 96    claude_3_7_sonnet_thinking = "claude_3_7_sonnet_thinking"
 97    gemini_1_5_flash = "gemini_1_5_flash"
 98    gemini_1_5_flash_8b = "gemini_1_5_flash_8b"
 99    gemini_1_5_pro = "gemini_1_5_pro"
100    gemini_2_0_flash = "gemini_2_0_flash"
101    nemotron_70b = "nemotron_70b"
102    mixtral_8x7b = "mixtral_8x7b"
103    qwen_2p5_7b = "qwen_2p5_7b"
104    qwen_2p5_14b = "qwen_2p5_14b"
105    qwen_2p5_72b = "qwen_2p5_72b"
106    qwq_32b = "qwq_32b"
107    deepseek_3 = "deepseek_3"
108    deepseek_r1 = "deepseek_r1"
109    mistral_small_3 = "mistral_small_3"
110    deepseek_r1_distill_qwen_32b = "deepseek_r1_distill_qwen_32b"
111    deepseek_r1_distill_llama_70b = "deepseek_r1_distill_llama_70b"
112    deepseek_r1_distill_qwen_14b = "deepseek_r1_distill_qwen_14b"
113    deepseek_r1_distill_qwen_1p5b = "deepseek_r1_distill_qwen_1p5b"
114    deepseek_r1_distill_qwen_7b = "deepseek_r1_distill_qwen_7b"
115    deepseek_r1_distill_llama_8b = "deepseek_r1_distill_llama_8b"
116    dolphin_2_9_8x22b = "dolphin_2_9_8x22b"
117    grok_2 = "grok_2"

Enumeration of specific model versions supported by the system. Where models have instruct and raw versions, instruct is default and raw is specified.

llama_3_1_8b = <ModelName.llama_3_1_8b: 'llama_3_1_8b'>
llama_3_1_70b = <ModelName.llama_3_1_70b: 'llama_3_1_70b'>
llama_3_1_405b = <ModelName.llama_3_1_405b: 'llama_3_1_405b'>
llama_3_2_1b = <ModelName.llama_3_2_1b: 'llama_3_2_1b'>
llama_3_2_3b = <ModelName.llama_3_2_3b: 'llama_3_2_3b'>
llama_3_2_11b = <ModelName.llama_3_2_11b: 'llama_3_2_11b'>
llama_3_2_90b = <ModelName.llama_3_2_90b: 'llama_3_2_90b'>
llama_3_3_70b = <ModelName.llama_3_3_70b: 'llama_3_3_70b'>
gpt_4o_mini = <ModelName.gpt_4o_mini: 'gpt_4o_mini'>
gpt_4o = <ModelName.gpt_4o: 'gpt_4o'>
gpt_o1_low = <ModelName.gpt_o1_low: 'gpt_o1_low'>
gpt_o1_medium = <ModelName.gpt_o1_medium: 'gpt_o1_medium'>
gpt_o1_high = <ModelName.gpt_o1_high: 'gpt_o1_high'>
gpt_o3_mini_low = <ModelName.gpt_o3_mini_low: 'gpt_o3_mini_low'>
gpt_o3_mini_medium = <ModelName.gpt_o3_mini_medium: 'gpt_o3_mini_medium'>
gpt_o3_mini_high = <ModelName.gpt_o3_mini_high: 'gpt_o3_mini_high'>
phi_3_5 = <ModelName.phi_3_5: 'phi_3_5'>
phi_4 = <ModelName.phi_4: 'phi_4'>
phi_4_5p6b = <ModelName.phi_4_5p6b: 'phi_4_5p6b'>
phi_4_mini = <ModelName.phi_4_mini: 'phi_4_mini'>
mistral_large = <ModelName.mistral_large: 'mistral_large'>
mistral_nemo = <ModelName.mistral_nemo: 'mistral_nemo'>
gemma_2_2b = <ModelName.gemma_2_2b: 'gemma_2_2b'>
gemma_2_9b = <ModelName.gemma_2_9b: 'gemma_2_9b'>
gemma_2_27b = <ModelName.gemma_2_27b: 'gemma_2_27b'>
gemma_3_1b = <ModelName.gemma_3_1b: 'gemma_3_1b'>
gemma_3_4b = <ModelName.gemma_3_4b: 'gemma_3_4b'>
gemma_3_12b = <ModelName.gemma_3_12b: 'gemma_3_12b'>
gemma_3_27b = <ModelName.gemma_3_27b: 'gemma_3_27b'>
claude_3_5_haiku = <ModelName.claude_3_5_haiku: 'claude_3_5_haiku'>
claude_3_5_sonnet = <ModelName.claude_3_5_sonnet: 'claude_3_5_sonnet'>
claude_3_7_sonnet = <ModelName.claude_3_7_sonnet: 'claude_3_7_sonnet'>
claude_3_7_sonnet_thinking = <ModelName.claude_3_7_sonnet_thinking: 'claude_3_7_sonnet_thinking'>
gemini_1_5_flash = <ModelName.gemini_1_5_flash: 'gemini_1_5_flash'>
gemini_1_5_flash_8b = <ModelName.gemini_1_5_flash_8b: 'gemini_1_5_flash_8b'>
gemini_1_5_pro = <ModelName.gemini_1_5_pro: 'gemini_1_5_pro'>
gemini_2_0_flash = <ModelName.gemini_2_0_flash: 'gemini_2_0_flash'>
nemotron_70b = <ModelName.nemotron_70b: 'nemotron_70b'>
mixtral_8x7b = <ModelName.mixtral_8x7b: 'mixtral_8x7b'>
qwen_2p5_7b = <ModelName.qwen_2p5_7b: 'qwen_2p5_7b'>
qwen_2p5_14b = <ModelName.qwen_2p5_14b: 'qwen_2p5_14b'>
qwen_2p5_72b = <ModelName.qwen_2p5_72b: 'qwen_2p5_72b'>
qwq_32b = <ModelName.qwq_32b: 'qwq_32b'>
deepseek_3 = <ModelName.deepseek_3: 'deepseek_3'>
deepseek_r1 = <ModelName.deepseek_r1: 'deepseek_r1'>
mistral_small_3 = <ModelName.mistral_small_3: 'mistral_small_3'>
deepseek_r1_distill_qwen_32b = <ModelName.deepseek_r1_distill_qwen_32b: 'deepseek_r1_distill_qwen_32b'>
deepseek_r1_distill_llama_70b = <ModelName.deepseek_r1_distill_llama_70b: 'deepseek_r1_distill_llama_70b'>
deepseek_r1_distill_qwen_14b = <ModelName.deepseek_r1_distill_qwen_14b: 'deepseek_r1_distill_qwen_14b'>
deepseek_r1_distill_qwen_1p5b = <ModelName.deepseek_r1_distill_qwen_1p5b: 'deepseek_r1_distill_qwen_1p5b'>
deepseek_r1_distill_qwen_7b = <ModelName.deepseek_r1_distill_qwen_7b: 'deepseek_r1_distill_qwen_7b'>
deepseek_r1_distill_llama_8b = <ModelName.deepseek_r1_distill_llama_8b: 'deepseek_r1_distill_llama_8b'>
dolphin_2_9_8x22b = <ModelName.dolphin_2_9_8x22b: 'dolphin_2_9_8x22b'>
grok_2 = <ModelName.grok_2: 'grok_2'>
class ModelParserID(builtins.str, enum.Enum):
120class ModelParserID(str, Enum):
121    """
122    Enumeration of supported model parsers.
123    """
124
125    r1_thinking = "r1_thinking"

Enumeration of supported model parsers.

r1_thinking = <ModelParserID.r1_thinking: 'r1_thinking'>
class KilnModelProvider(pydantic.main.BaseModel):
128class KilnModelProvider(BaseModel):
129    """
130    Configuration for a specific model provider.
131
132    Attributes:
133        name: The provider's identifier
134        supports_structured_output: Whether the provider supports structured output formats
135        supports_data_gen: Whether the provider supports data generation
136        untested_model: Whether the model is untested (typically user added). The supports_ fields are not applicable.
137        provider_finetune_id: The finetune ID for the provider, if applicable
138        structured_output_mode: The mode we should use to call the model for structured output, if it was trained with structured output.
139        parser: A parser to use for the model, if applicable
140        reasoning_capable: Whether the model is designed to output thinking in a structured format (eg <think></think>). If so we don't use COT across 2 calls, and ask for thinking and final response in the same call.
141    """
142
143    name: ModelProviderName
144    model_id: str | None = None
145    supports_structured_output: bool = True
146    supports_data_gen: bool = True
147    untested_model: bool = False
148    provider_finetune_id: str | None = None
149    structured_output_mode: StructuredOutputMode = StructuredOutputMode.default
150    parser: ModelParserID | None = None
151    reasoning_capable: bool = False
152    supports_logprobs: bool = False
153
154    # TODO P1: Need a more generalized way to handle custom provider parameters.
155    # Making them quite declarative here for now, isolating provider specific logic
156    # to this file. Later I should be able to override anything in this file via config.
157    r1_openrouter_options: bool = False
158    require_openrouter_reasoning: bool = False
159    logprobs_openrouter_options: bool = False
160    openrouter_skip_required_parameters: bool = False
161    thinking_level: Literal["low", "medium", "high"] | None = None
162    ollama_model_aliases: List[str] | None = None
163    anthropic_extended_thinking: bool = False

Configuration for a specific model provider.

Attributes: name: The provider's identifier supports_structured_output: Whether the provider supports structured output formats supports_data_gen: Whether the provider supports data generation untested_model: Whether the model is untested (typically user added). The supports_ fields are not applicable. provider_finetune_id: The finetune ID for the provider, if applicable structured_output_mode: The mode we should use to call the model for structured output, if it was trained with structured output. parser: A parser to use for the model, if applicable reasoning_capable: Whether the model is designed to output thinking in a structured format (eg ). If so we don't use COT across 2 calls, and ask for thinking and final response in the same call.

model_id: str | None
supports_structured_output: bool
supports_data_gen: bool
untested_model: bool
provider_finetune_id: str | None
structured_output_mode: kiln_ai.datamodel.StructuredOutputMode
parser: ModelParserID | None
reasoning_capable: bool
supports_logprobs: bool
r1_openrouter_options: bool
require_openrouter_reasoning: bool
logprobs_openrouter_options: bool
openrouter_skip_required_parameters: bool
thinking_level: Optional[Literal['low', 'medium', 'high']]
ollama_model_aliases: Optional[List[str]]
anthropic_extended_thinking: bool
model_config: ClassVar[pydantic.config.ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class KilnModel(pydantic.main.BaseModel):
166class KilnModel(BaseModel):
167    """
168    Configuration for a specific AI model.
169
170    Attributes:
171        family: The model's architecture family
172        name: The model's identifier
173        friendly_name: Human-readable name for the model
174        providers: List of providers that offer this model
175        supports_structured_output: Whether the model supports structured output formats
176    """
177
178    family: str
179    name: str
180    friendly_name: str
181    providers: List[KilnModelProvider]

Configuration for a specific AI model.

Attributes: family: The model's architecture family name: The model's identifier friendly_name: Human-readable name for the model providers: List of providers that offer this model supports_structured_output: Whether the model supports structured output formats

family: str
name: str
friendly_name: str
providers: List[KilnModelProvider]
model_config: ClassVar[pydantic.config.ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

built_in_models: List[KilnModel] = [KilnModel(family='gpt', name='gpt_4o_mini', friendly_name='GPT 4o Mini', providers=[KilnModelProvider(name=<ModelProviderName.openai: 'openai'>, model_id='gpt-4o-mini', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id='gpt-4o-mini-2024-07-18', structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False, supports_logprobs=True, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, model_id='openai/gpt-4o-mini', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False, supports_logprobs=True, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=True, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.azure_openai: 'azure_openai'>, model_id='gpt-4o-mini', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.default: 'default'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False)]), KilnModel(family='gpt', name='gpt_4o', friendly_name='GPT 4o', providers=[KilnModelProvider(name=<ModelProviderName.openai: 'openai'>, model_id='gpt-4o', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id='gpt-4o-2024-08-06', structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False, supports_logprobs=True, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, model_id='openai/gpt-4o', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False, supports_logprobs=True, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=True, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.azure_openai: 'azure_openai'>, model_id='gpt-4o', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.default: 'default'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False)]), KilnModel(family='gpt', name='gpt_o3_mini_low', friendly_name='GPT o3 Mini - Low', providers=[KilnModelProvider(name=<ModelProviderName.openai: 'openai'>, model_id='o3-mini', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level='low', ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.azure_openai: 'azure_openai'>, model_id='o3-mini', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level='low', ollama_model_aliases=None, anthropic_extended_thinking=False)]), KilnModel(family='gpt', name='gpt_o3_mini_medium', friendly_name='GPT o3 Mini - Medium', providers=[KilnModelProvider(name=<ModelProviderName.openai: 'openai'>, model_id='o3-mini', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level='medium', ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.azure_openai: 'azure_openai'>, model_id='o3-mini', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level='medium', ollama_model_aliases=None, anthropic_extended_thinking=False)]), KilnModel(family='gpt', name='gpt_o3_mini_high', friendly_name='GPT o3 Mini - High', providers=[KilnModelProvider(name=<ModelProviderName.openai: 'openai'>, model_id='o3-mini', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level='high', ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.azure_openai: 'azure_openai'>, model_id='o3-mini', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level='high', ollama_model_aliases=None, anthropic_extended_thinking=False)]), KilnModel(family='gpt', name='gpt_o1_low', friendly_name='GPT o1 - Low', providers=[KilnModelProvider(name=<ModelProviderName.openai: 'openai'>, model_id='o1', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level='low', ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.azure_openai: 'azure_openai'>, model_id='o1', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level='low', ollama_model_aliases=None, anthropic_extended_thinking=False)]), KilnModel(family='gpt', name='gpt_o1_medium', friendly_name='GPT o1 - Medium', providers=[KilnModelProvider(name=<ModelProviderName.openai: 'openai'>, model_id='o1', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level='medium', ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.azure_openai: 'azure_openai'>, model_id='o1', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level='medium', ollama_model_aliases=None, anthropic_extended_thinking=False)]), KilnModel(family='gpt', name='gpt_o1_high', friendly_name='GPT o1 - High', providers=[KilnModelProvider(name=<ModelProviderName.openai: 'openai'>, model_id='o1', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level='high', ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.azure_openai: 'azure_openai'>, model_id='o1', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level='high', ollama_model_aliases=None, anthropic_extended_thinking=False)]), KilnModel(family='claude', name='claude_3_5_haiku', friendly_name='Claude 3.5 Haiku', providers=[KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, model_id='anthropic/claude-3-5-haiku', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.function_calling: 'function_calling'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.anthropic: 'anthropic'>, model_id='claude-3-5-haiku-20241022', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.function_calling: 'function_calling'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.vertex: 'vertex'>, model_id='claude-3-5-haiku', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.function_calling_weak: 'function_calling_weak'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False)]), KilnModel(family='claude', name='claude_3_5_sonnet', friendly_name='Claude 3.5 Sonnet', providers=[KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, model_id='anthropic/claude-3.5-sonnet', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.function_calling: 'function_calling'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.anthropic: 'anthropic'>, model_id='claude-3-5-sonnet-20241022', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.function_calling: 'function_calling'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.vertex: 'vertex'>, model_id='claude-3-5-sonnet', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.function_calling_weak: 'function_calling_weak'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False)]), KilnModel(family='claude', name='claude_3_7_sonnet', friendly_name='Claude 3.7 Sonnet', providers=[KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, model_id='anthropic/claude-3.7-sonnet', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.function_calling: 'function_calling'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.anthropic: 'anthropic'>, model_id='claude-3-7-sonnet-20250219', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.function_calling: 'function_calling'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False)]), KilnModel(family='claude', name='claude_3_7_sonnet_thinking', friendly_name='Claude 3.7 Sonnet Thinking', providers=[KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, model_id='anthropic/claude-3.7-sonnet:thinking', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_instructions: 'json_instructions'>, parser=None, reasoning_capable=True, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=True, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.anthropic: 'anthropic'>, model_id='claude-3-7-sonnet-20250219', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_instructions: 'json_instructions'>, parser=None, reasoning_capable=True, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=True)]), KilnModel(family='gemini', name='gemini_1_5_pro', friendly_name='Gemini 1.5 Pro', providers=[KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, model_id='google/gemini-pro-1.5', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_instruction_and_object: 'json_instruction_and_object'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.gemini_api: 'gemini_api'>, model_id='gemini-1.5-pro', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_instruction_and_object: 'json_instruction_and_object'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.vertex: 'vertex'>, model_id='gemini-1.5-pro', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_instruction_and_object: 'json_instruction_and_object'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False)]), KilnModel(family='gemini', name='gemini_1_5_flash', friendly_name='Gemini 1.5 Flash', providers=[KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, model_id='google/gemini-flash-1.5', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_instruction_and_object: 'json_instruction_and_object'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.gemini_api: 'gemini_api'>, model_id='gemini-1.5-flash', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_instruction_and_object: 'json_instruction_and_object'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.vertex: 'vertex'>, model_id='gemini-1.5-flash', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_instruction_and_object: 'json_instruction_and_object'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False)]), KilnModel(family='gemini', name='gemini_1_5_flash_8b', friendly_name='Gemini 1.5 Flash 8B', providers=[KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, model_id='google/gemini-flash-1.5-8b', supports_structured_output=True, supports_data_gen=False, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_instruction_and_object: 'json_instruction_and_object'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.gemini_api: 'gemini_api'>, model_id='gemini-1.5-flash-8b', supports_structured_output=True, supports_data_gen=False, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_instruction_and_object: 'json_instruction_and_object'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False)]), KilnModel(family='gemini', name='gemini_2_0_flash', friendly_name='Gemini 2.0 Flash', providers=[KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, model_id='google/gemini-2.0-flash-001', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_instruction_and_object: 'json_instruction_and_object'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.gemini_api: 'gemini_api'>, model_id='gemini-2.0-flash', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_instruction_and_object: 'json_instruction_and_object'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.vertex: 'vertex'>, model_id='gemini-2.0-flash', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_instruction_and_object: 'json_instruction_and_object'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False)]), KilnModel(family='llama', name='nemotron_70b', friendly_name='Nemotron 70B', providers=[KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, model_id='nvidia/llama-3.1-nemotron-70b-instruct', supports_structured_output=False, supports_data_gen=False, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.default: 'default'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False)]), KilnModel(family='llama', name='llama_3_1_8b', friendly_name='Llama 3.1 8B', providers=[KilnModelProvider(name=<ModelProviderName.groq: 'groq'>, model_id='llama-3.1-8b-instant', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.default: 'default'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.amazon_bedrock: 'amazon_bedrock'>, model_id='meta.llama3-1-8b-instruct-v1:0', supports_structured_output=False, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, model_id='llama3.1:8b', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=['llama3.1'], anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, model_id='meta-llama/llama-3.1-8b-instruct', supports_structured_output=True, supports_data_gen=False, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.function_calling: 'function_calling'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.fireworks_ai: 'fireworks_ai'>, model_id='accounts/fireworks/models/llama-v3p1-8b-instruct', supports_structured_output=True, supports_data_gen=False, untested_model=False, provider_finetune_id='accounts/fireworks/models/llama-v3p1-8b-instruct', structured_output_mode=<StructuredOutputMode.json_instruction_and_object: 'json_instruction_and_object'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.together_ai: 'together_ai'>, model_id='meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo', supports_structured_output=True, supports_data_gen=False, untested_model=False, provider_finetune_id='meta-llama/Meta-Llama-3.1-8B-Instruct', structured_output_mode=<StructuredOutputMode.function_calling_weak: 'function_calling_weak'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False)]), KilnModel(family='llama', name='llama_3_1_70b', friendly_name='Llama 3.1 70B', providers=[KilnModelProvider(name=<ModelProviderName.amazon_bedrock: 'amazon_bedrock'>, model_id='meta.llama3-1-70b-instruct-v1:0', supports_structured_output=True, supports_data_gen=False, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, model_id='meta-llama/llama-3.1-70b-instruct', supports_structured_output=True, supports_data_gen=False, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.function_calling_weak: 'function_calling_weak'>, parser=None, reasoning_capable=False, supports_logprobs=True, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=True, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, model_id='llama3.1:70b', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.fireworks_ai: 'fireworks_ai'>, model_id='accounts/fireworks/models/llama-v3p1-70b-instruct', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id='accounts/fireworks/models/llama-v3p1-70b-instruct', structured_output_mode=<StructuredOutputMode.function_calling_weak: 'function_calling_weak'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.together_ai: 'together_ai'>, model_id='meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo', supports_structured_output=True, supports_data_gen=False, untested_model=False, provider_finetune_id='meta-llama/Meta-Llama-3.1-70B-Instruct', structured_output_mode=<StructuredOutputMode.function_calling_weak: 'function_calling_weak'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False)]), KilnModel(family='llama', name='llama_3_1_405b', friendly_name='Llama 3.1 405B', providers=[KilnModelProvider(name=<ModelProviderName.amazon_bedrock: 'amazon_bedrock'>, model_id='meta.llama3-1-405b-instruct-v1:0', supports_structured_output=True, supports_data_gen=False, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, model_id='llama3.1:405b', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, model_id='meta-llama/llama-3.1-405b-instruct', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.function_calling: 'function_calling'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.fireworks_ai: 'fireworks_ai'>, model_id='accounts/fireworks/models/llama-v3p1-405b-instruct', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.function_calling_weak: 'function_calling_weak'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.together_ai: 'together_ai'>, model_id='meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo', supports_structured_output=True, supports_data_gen=False, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.function_calling_weak: 'function_calling_weak'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False)]), KilnModel(family='mistral', name='mistral_nemo', friendly_name='Mistral Nemo', providers=[KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, model_id='mistralai/mistral-nemo', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_instruction_and_object: 'json_instruction_and_object'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False)]), KilnModel(family='mistral', name='mistral_large', friendly_name='Mistral Large', providers=[KilnModelProvider(name=<ModelProviderName.amazon_bedrock: 'amazon_bedrock'>, model_id='mistral.mistral-large-2407-v1:0', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_instructions: 'json_instructions'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, model_id='mistralai/mistral-large', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, model_id='mistral-large', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False)]), KilnModel(family='llama', name='llama_3_2_1b', friendly_name='Llama 3.2 1B', providers=[KilnModelProvider(name=<ModelProviderName.groq: 'groq'>, model_id='llama-3.2-1b-preview', supports_structured_output=True, supports_data_gen=False, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_instruction_and_object: 'json_instruction_and_object'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, model_id='meta-llama/llama-3.2-1b-instruct', supports_structured_output=False, supports_data_gen=False, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_instruction_and_object: 'json_instruction_and_object'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, model_id='llama3.2:1b', supports_structured_output=False, supports_data_gen=False, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.default: 'default'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.huggingface: 'huggingface'>, model_id='meta-llama/Llama-3.2-1B-Instruct', supports_structured_output=False, supports_data_gen=False, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.default: 'default'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.together_ai: 'together_ai'>, model_id=None, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id='meta-llama/Llama-3.2-1B-Instruct', structured_output_mode=<StructuredOutputMode.default: 'default'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False)]), KilnModel(family='llama', name='llama_3_2_3b', friendly_name='Llama 3.2 3B', providers=[KilnModelProvider(name=<ModelProviderName.groq: 'groq'>, model_id='llama-3.2-3b-preview', supports_structured_output=True, supports_data_gen=False, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.default: 'default'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, model_id='meta-llama/llama-3.2-3b-instruct', supports_structured_output=False, supports_data_gen=False, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, model_id='llama3.2', supports_structured_output=True, supports_data_gen=False, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.default: 'default'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.fireworks_ai: 'fireworks_ai'>, model_id='accounts/fireworks/models/llama-v3p2-3b-instruct', supports_structured_output=False, supports_data_gen=False, untested_model=False, provider_finetune_id='accounts/fireworks/models/llama-v3p2-3b-instruct', structured_output_mode=<StructuredOutputMode.default: 'default'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.huggingface: 'huggingface'>, model_id='meta-llama/Llama-3.2-3B-Instruct', supports_structured_output=False, supports_data_gen=False, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.default: 'default'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.together_ai: 'together_ai'>, model_id='meta-llama/Llama-3.2-3B-Instruct-Turbo', supports_structured_output=False, supports_data_gen=False, untested_model=False, provider_finetune_id='meta-llama/Llama-3.2-3B-Instruct', structured_output_mode=<StructuredOutputMode.default: 'default'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False)]), KilnModel(family='llama', name='llama_3_2_11b', friendly_name='Llama 3.2 11B', providers=[KilnModelProvider(name=<ModelProviderName.groq: 'groq'>, model_id='llama-3.2-11b-vision-preview', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.default: 'default'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, model_id='meta-llama/llama-3.2-11b-vision-instruct', supports_structured_output=True, supports_data_gen=False, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_instruction_and_object: 'json_instruction_and_object'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, model_id='llama3.2-vision', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.fireworks_ai: 'fireworks_ai'>, model_id='accounts/fireworks/models/llama-v3p2-11b-vision-instruct', supports_structured_output=True, supports_data_gen=False, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_instruction_and_object: 'json_instruction_and_object'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.huggingface: 'huggingface'>, model_id='meta-llama/Llama-3.2-11B-Vision-Instruct', supports_structured_output=False, supports_data_gen=False, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.default: 'default'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.together_ai: 'together_ai'>, model_id='meta-llama/Llama-3.2-11B-Vision-Instruct-Turbo', supports_structured_output=False, supports_data_gen=False, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.default: 'default'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False)]), KilnModel(family='llama', name='llama_3_2_90b', friendly_name='Llama 3.2 90B', providers=[KilnModelProvider(name=<ModelProviderName.groq: 'groq'>, model_id='llama-3.2-90b-vision-preview', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.default: 'default'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, model_id='meta-llama/llama-3.2-90b-vision-instruct', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_instruction_and_object: 'json_instruction_and_object'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, model_id='llama3.2-vision:90b', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.fireworks_ai: 'fireworks_ai'>, model_id='accounts/fireworks/models/llama-v3p2-90b-vision-instruct', supports_structured_output=True, supports_data_gen=False, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_instruction_and_object: 'json_instruction_and_object'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.together_ai: 'together_ai'>, model_id='meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo', supports_structured_output=False, supports_data_gen=False, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.default: 'default'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False)]), KilnModel(family='llama', name='llama_3_3_70b', friendly_name='Llama 3.3 70B', providers=[KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, model_id='meta-llama/llama-3.3-70b-instruct', supports_structured_output=False, supports_data_gen=False, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.groq: 'groq'>, model_id='llama-3.3-70b-versatile', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.default: 'default'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, model_id='llama3.3', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.fireworks_ai: 'fireworks_ai'>, model_id='accounts/fireworks/models/llama-v3p3-70b-instruct', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.function_calling_weak: 'function_calling_weak'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.vertex: 'vertex'>, model_id='meta/llama-3.3-70b-instruct-maas', supports_structured_output=False, supports_data_gen=False, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.default: 'default'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.together_ai: 'together_ai'>, model_id='meta-llama/Llama-3.3-70B-Instruct-Turbo', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.function_calling_weak: 'function_calling_weak'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False)]), KilnModel(family='phi', name='phi_3_5', friendly_name='Phi 3.5', providers=[KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, model_id='phi3.5', supports_structured_output=False, supports_data_gen=False, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, model_id='microsoft/phi-3.5-mini-128k-instruct', supports_structured_output=False, supports_data_gen=False, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.fireworks_ai: 'fireworks_ai'>, model_id='accounts/fireworks/models/phi-3-vision-128k-instruct', supports_structured_output=False, supports_data_gen=False, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.default: 'default'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False)]), KilnModel(family='phi', name='phi_4', friendly_name='Phi 4 - 14B', providers=[KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, model_id='phi4', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, model_id='microsoft/phi-4', supports_structured_output=True, supports_data_gen=False, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_instruction_and_object: 'json_instruction_and_object'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False)]), KilnModel(family='phi', name='phi_4_5p6b', friendly_name='Phi 4 - 5.6B', providers=[KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, model_id='microsoft/phi-4-multimodal-instruct', supports_structured_output=False, supports_data_gen=False, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.default: 'default'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False)]), KilnModel(family='phi', name='phi_4_mini', friendly_name='Phi 4 Mini - 3.8B', providers=[KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, model_id='phi4-mini', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.default: 'default'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False)]), KilnModel(family='gemma', name='gemma_2_2b', friendly_name='Gemma 2 2B', providers=[KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, model_id='gemma2:2b', supports_structured_output=True, supports_data_gen=False, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.default: 'default'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False)]), KilnModel(family='gemma', name='gemma_2_9b', friendly_name='Gemma 2 9B', providers=[KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, model_id='gemma2:9b', supports_structured_output=True, supports_data_gen=False, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.default: 'default'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, model_id='google/gemma-2-9b-it', supports_structured_output=True, supports_data_gen=False, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_instruction_and_object: 'json_instruction_and_object'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False)]), KilnModel(family='gemma', name='gemma_2_27b', friendly_name='Gemma 2 27B', providers=[KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, model_id='gemma2:27b', supports_structured_output=True, supports_data_gen=False, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.default: 'default'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, model_id='google/gemma-2-27b-it', supports_structured_output=True, supports_data_gen=False, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_instruction_and_object: 'json_instruction_and_object'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False)]), KilnModel(family='gemma', name='gemma_3_1b', friendly_name='Gemma 3 1B', providers=[KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, model_id='gemma3:1b', supports_structured_output=False, supports_data_gen=False, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.default: 'default'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, model_id='google/gemma-3-1b-it:free', supports_structured_output=False, supports_data_gen=False, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.default: 'default'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False)]), KilnModel(family='gemma', name='gemma_3_4b', friendly_name='Gemma 3 4B', providers=[KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, model_id='gemma3:4b', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.default: 'default'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=['gemma3'], anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, model_id='google/gemma-3-4b-it:free', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_instruction_and_object: 'json_instruction_and_object'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False)]), KilnModel(family='gemma', name='gemma_3_12b', friendly_name='Gemma 3 12B', providers=[KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, model_id='gemma3:12b', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.default: 'default'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, model_id='google/gemma-3-12b-it:free', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_instruction_and_object: 'json_instruction_and_object'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False)]), KilnModel(family='gemma', name='gemma_3_27b', friendly_name='Gemma 3 27B', providers=[KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, model_id='gemma3:27b', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.default: 'default'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, model_id='google/gemma-3-27b-it', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_instruction_and_object: 'json_instruction_and_object'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.huggingface: 'huggingface'>, model_id='google/gemma-3-27b-it', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_instructions: 'json_instructions'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False)]), KilnModel(family='mixtral', name='mixtral_8x7b', friendly_name='Mixtral 8x7B', providers=[KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, model_id='mistralai/mixtral-8x7b-instruct', supports_structured_output=True, supports_data_gen=False, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_instruction_and_object: 'json_instruction_and_object'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, model_id='mixtral', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.default: 'default'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False)]), KilnModel(family='qwen', name='qwq_32b', friendly_name='QwQ 32B (Qwen Reasoning)', providers=[KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, model_id='qwen/qwq-32b', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_instructions: 'json_instructions'>, parser=<ModelParserID.r1_thinking: 'r1_thinking'>, reasoning_capable=True, supports_logprobs=False, r1_openrouter_options=True, require_openrouter_reasoning=True, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.fireworks_ai: 'fireworks_ai'>, model_id='accounts/fireworks/models/qwq-32b', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_instructions: 'json_instructions'>, parser=<ModelParserID.r1_thinking: 'r1_thinking'>, reasoning_capable=True, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, model_id='qwq', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_instructions: 'json_instructions'>, parser=<ModelParserID.r1_thinking: 'r1_thinking'>, reasoning_capable=True, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.groq: 'groq'>, model_id='qwen-qwq-32b', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_instructions: 'json_instructions'>, parser=<ModelParserID.r1_thinking: 'r1_thinking'>, reasoning_capable=True, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.together_ai: 'together_ai'>, model_id='Qwen/QwQ-32B', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_instructions: 'json_instructions'>, parser=<ModelParserID.r1_thinking: 'r1_thinking'>, reasoning_capable=True, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False)]), KilnModel(family='qwen', name='qwen_2p5_7b', friendly_name='Qwen 2.5 7B', providers=[KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, model_id='qwen/qwen-2.5-7b-instruct', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_instruction_and_object: 'json_instruction_and_object'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, model_id='qwen2.5', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.default: 'default'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False)]), KilnModel(family='qwen', name='qwen_2p5_14b', friendly_name='Qwen 2.5 14B', providers=[KilnModelProvider(name=<ModelProviderName.together_ai: 'together_ai'>, model_id=None, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id='Qwen/Qwen2.5-14B-Instruct', structured_output_mode=<StructuredOutputMode.default: 'default'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, model_id='qwen2.5:14b', supports_structured_output=True, supports_data_gen=False, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.default: 'default'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False)]), KilnModel(family='qwen', name='qwen_2p5_72b', friendly_name='Qwen 2.5 72B', providers=[KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, model_id='qwen/qwen-2.5-72b-instruct', supports_structured_output=False, supports_data_gen=False, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_instruction_and_object: 'json_instruction_and_object'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, model_id='qwen2.5:72b', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.default: 'default'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.fireworks_ai: 'fireworks_ai'>, model_id='accounts/fireworks/models/qwen2p5-72b-instruct', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.function_calling_weak: 'function_calling_weak'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.together_ai: 'together_ai'>, model_id=None, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id='Qwen/Qwen2.5-72B-Instruct', structured_output_mode=<StructuredOutputMode.default: 'default'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False)]), KilnModel(family='mistral', name='mistral_small_3', friendly_name='Mistral Small 3', providers=[KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, model_id='mistralai/mistral-small-24b-instruct-2501', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_instruction_and_object: 'json_instruction_and_object'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, model_id='mistral-small:24b', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.default: 'default'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False)]), KilnModel(family='deepseek', name='deepseek_3', friendly_name='DeepSeek V3', providers=[KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, model_id='deepseek/deepseek-chat', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.function_calling: 'function_calling'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.fireworks_ai: 'fireworks_ai'>, model_id='accounts/fireworks/models/deepseek-v3', supports_structured_output=True, supports_data_gen=False, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_instruction_and_object: 'json_instruction_and_object'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.together_ai: 'together_ai'>, model_id='deepseek-ai/DeepSeek-V3', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_instructions: 'json_instructions'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False)]), KilnModel(family='deepseek', name='deepseek_r1', friendly_name='DeepSeek R1', providers=[KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, model_id='deepseek/deepseek-r1', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_instructions: 'json_instructions'>, parser=<ModelParserID.r1_thinking: 'r1_thinking'>, reasoning_capable=True, supports_logprobs=False, r1_openrouter_options=True, require_openrouter_reasoning=True, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.fireworks_ai: 'fireworks_ai'>, model_id='accounts/fireworks/models/deepseek-r1', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_instructions: 'json_instructions'>, parser=<ModelParserID.r1_thinking: 'r1_thinking'>, reasoning_capable=True, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, model_id='deepseek-r1:671b', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_instructions: 'json_instructions'>, parser=<ModelParserID.r1_thinking: 'r1_thinking'>, reasoning_capable=True, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.together_ai: 'together_ai'>, model_id='deepseek-ai/DeepSeek-R1', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_instructions: 'json_instructions'>, parser=<ModelParserID.r1_thinking: 'r1_thinking'>, reasoning_capable=True, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False)]), KilnModel(family='deepseek', name='deepseek_r1_distill_qwen_32b', friendly_name='DeepSeek R1 Distill Qwen 32B', providers=[KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, model_id='deepseek/deepseek-r1-distill-qwen-32b', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_instructions: 'json_instructions'>, parser=<ModelParserID.r1_thinking: 'r1_thinking'>, reasoning_capable=True, supports_logprobs=False, r1_openrouter_options=True, require_openrouter_reasoning=True, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, model_id='deepseek-r1:32b', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_instructions: 'json_instructions'>, parser=<ModelParserID.r1_thinking: 'r1_thinking'>, reasoning_capable=True, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.together_ai: 'together_ai'>, model_id='deepseek-ai/DeepSeek-R1-Distill-Qwen-14B', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_instructions: 'json_instructions'>, parser=<ModelParserID.r1_thinking: 'r1_thinking'>, reasoning_capable=True, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False)]), KilnModel(family='deepseek', name='deepseek_r1_distill_llama_70b', friendly_name='DeepSeek R1 Distill Llama 70B', providers=[KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, model_id='deepseek/deepseek-r1-distill-llama-70b', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_instructions: 'json_instructions'>, parser=<ModelParserID.r1_thinking: 'r1_thinking'>, reasoning_capable=True, supports_logprobs=False, r1_openrouter_options=True, require_openrouter_reasoning=True, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, model_id='deepseek-r1:70b', supports_structured_output=True, supports_data_gen=False, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_instructions: 'json_instructions'>, parser=<ModelParserID.r1_thinking: 'r1_thinking'>, reasoning_capable=True, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.together_ai: 'together_ai'>, model_id='deepseek-ai/DeepSeek-R1-Distill-Llama-70B', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_instructions: 'json_instructions'>, parser=<ModelParserID.r1_thinking: 'r1_thinking'>, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False)]), KilnModel(family='deepseek', name='deepseek_r1_distill_qwen_14b', friendly_name='DeepSeek R1 Distill Qwen 14B', providers=[KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, model_id='deepseek/deepseek-r1-distill-qwen-14b', supports_structured_output=True, supports_data_gen=False, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_instructions: 'json_instructions'>, parser=<ModelParserID.r1_thinking: 'r1_thinking'>, reasoning_capable=True, supports_logprobs=False, r1_openrouter_options=True, require_openrouter_reasoning=True, logprobs_openrouter_options=False, openrouter_skip_required_parameters=True, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, model_id='deepseek-r1:14b', supports_structured_output=True, supports_data_gen=False, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_instructions: 'json_instructions'>, parser=<ModelParserID.r1_thinking: 'r1_thinking'>, reasoning_capable=True, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.together_ai: 'together_ai'>, model_id='deepseek-ai/DeepSeek-R1-Distill-Qwen-14B', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_instructions: 'json_instructions'>, parser=<ModelParserID.r1_thinking: 'r1_thinking'>, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False)]), KilnModel(family='deepseek', name='deepseek_r1_distill_llama_8b', friendly_name='DeepSeek R1 Distill Llama 8B', providers=[KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, model_id='deepseek/deepseek-r1-distill-llama-8b', supports_structured_output=True, supports_data_gen=False, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_instructions: 'json_instructions'>, parser=<ModelParserID.r1_thinking: 'r1_thinking'>, reasoning_capable=True, supports_logprobs=False, r1_openrouter_options=True, require_openrouter_reasoning=True, logprobs_openrouter_options=False, openrouter_skip_required_parameters=True, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, model_id='deepseek-r1:8b', supports_structured_output=True, supports_data_gen=False, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_instructions: 'json_instructions'>, parser=<ModelParserID.r1_thinking: 'r1_thinking'>, reasoning_capable=True, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False)]), KilnModel(family='deepseek', name='deepseek_r1_distill_qwen_7b', friendly_name='DeepSeek R1 Distill Qwen 7B', providers=[KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, model_id='deepseek-r1:7b', supports_structured_output=True, supports_data_gen=False, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_instructions: 'json_instructions'>, parser=<ModelParserID.r1_thinking: 'r1_thinking'>, reasoning_capable=True, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False)]), KilnModel(family='deepseek', name='deepseek_r1_distill_qwen_1p5b', friendly_name='DeepSeek R1 Distill Qwen 1.5B', providers=[KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, model_id='deepseek/deepseek-r1-distill-qwen-1.5b', supports_structured_output=False, supports_data_gen=False, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_instructions: 'json_instructions'>, parser=<ModelParserID.r1_thinking: 'r1_thinking'>, reasoning_capable=True, supports_logprobs=False, r1_openrouter_options=True, require_openrouter_reasoning=True, logprobs_openrouter_options=False, openrouter_skip_required_parameters=True, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, model_id='deepseek-r1:1.5b', supports_structured_output=True, supports_data_gen=False, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_instructions: 'json_instructions'>, parser=<ModelParserID.r1_thinking: 'r1_thinking'>, reasoning_capable=True, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.together_ai: 'together_ai'>, model_id='deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B', supports_structured_output=False, supports_data_gen=False, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_instructions: 'json_instructions'>, parser=<ModelParserID.r1_thinking: 'r1_thinking'>, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False)]), KilnModel(family='dolphin', name='dolphin_2_9_8x22b', friendly_name='Dolphin 2.9 8x22B', providers=[KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, model_id='dolphin-mixtral:8x22b', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False), KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, model_id='cognitivecomputations/dolphin-mixtral-8x22b', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_instruction_and_object: 'json_instruction_and_object'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False)]), KilnModel(family='grok', name='grok_2', friendly_name='Grok 2', providers=[KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, model_id='x-ai/grok-2-1212', supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False, supports_logprobs=False, r1_openrouter_options=False, require_openrouter_reasoning=False, logprobs_openrouter_options=False, openrouter_skip_required_parameters=False, thinking_level=None, ollama_model_aliases=None, anthropic_extended_thinking=False)])]