LLM Configuration File

To edit your LLM configuration, open this file with your preferred text editor:

{{ llm_file_path }}

Default Configuration Example

"""
LLM configuration for Local Deep Research.

This file is loaded as a Python module, allowing for complex
customization of LLM behavior.
"""

# Default model settings
DEFAULT_MODEL = "mistral"
DEFAULT_MODEL_TYPE = "ollama"  # Options: ollama, openai, anthropic
DEFAULT_TEMPERATURE = 0.7
MAX_TOKENS = 30000

# API keys and endpoints (consider using environment variables instead)
USE_OPENAI_ENDPOINT = False
OPENAI_ENDPOINT_URL = "https://openrouter.ai/api/v1"
OPENAI_ENDPOINT_REQUIRES_MODEL = True

# Custom model loading function
def get_llm(model_name=None, model_type=None, temperature=None, **kwargs):
    """
    Get a language model instance.
    
    Args:
        model_name: Name of the model to use
        model_type: Type of model provider 
        temperature: Model temperature
        **kwargs: Additional parameters
        
    Returns:
        A LangChain language model instance
    """
    # Use defaults if not provided
    model_name = model_name or DEFAULT_MODEL
    model_type = model_type or DEFAULT_MODEL_TYPE
    temperature = temperature or DEFAULT_TEMPERATURE
    
    # If using Ollama
    if model_type == "ollama":
        from langchain_ollama import ChatOllama
        return ChatOllama(
            model=model_name,
            temperature=temperature,
            **kwargs
        )
    
    # Default fallback
    from langchain_ollama import ChatOllama
    return ChatOllama(
        model="mistral",
        temperature=0.7,
        **kwargs
    )