kiln_ai.adapters.model_adapters.base_adapter

  1import json
  2from abc import ABCMeta, abstractmethod
  3from dataclasses import dataclass
  4from typing import Dict, Literal, Tuple
  5
  6from kiln_ai.adapters.ml_model_list import KilnModelProvider, StructuredOutputMode
  7from kiln_ai.adapters.parsers.json_parser import parse_json_string
  8from kiln_ai.adapters.parsers.parser_registry import model_parser_from_id
  9from kiln_ai.adapters.prompt_builders import prompt_builder_from_id
 10from kiln_ai.adapters.provider_tools import kiln_model_provider_from
 11from kiln_ai.adapters.run_output import RunOutput
 12from kiln_ai.datamodel import (
 13    DataSource,
 14    DataSourceType,
 15    Task,
 16    TaskOutput,
 17    TaskRun,
 18)
 19from kiln_ai.datamodel.json_schema import validate_schema
 20from kiln_ai.datamodel.task import RunConfig
 21from kiln_ai.utils.config import Config
 22
 23
 24@dataclass
 25class AdapterConfig:
 26    """
 27    An adapter config is config options that do NOT impact the output of the model.
 28
 29    For example: if it's saved, of if we request additional data like logprobs.
 30    """
 31
 32    allow_saving: bool = True
 33    top_logprobs: int | None = None
 34    default_tags: list[str] | None = None
 35
 36
 37COT_FINAL_ANSWER_PROMPT = "Considering the above, return a final result."
 38
 39
 40class BaseAdapter(metaclass=ABCMeta):
 41    """Base class for AI model adapters that handle task execution.
 42
 43    This abstract class provides the foundation for implementing model-specific adapters
 44    that can process tasks with structured or unstructured inputs/outputs. It handles
 45    input/output validation, prompt building, and run tracking.
 46
 47    Attributes:
 48        prompt_builder (BasePromptBuilder): Builder for constructing prompts for the model
 49        kiln_task (Task): The task configuration and metadata
 50        output_schema (dict | None): JSON schema for validating structured outputs
 51        input_schema (dict | None): JSON schema for validating structured inputs
 52    """
 53
 54    def __init__(
 55        self,
 56        run_config: RunConfig,
 57        config: AdapterConfig | None = None,
 58    ):
 59        self.run_config = run_config
 60        self.prompt_builder = prompt_builder_from_id(
 61            run_config.prompt_id, run_config.task
 62        )
 63        self._model_provider: KilnModelProvider | None = None
 64
 65        self.output_schema = self.task().output_json_schema
 66        self.input_schema = self.task().input_json_schema
 67        self.base_adapter_config = config or AdapterConfig()
 68
 69    def task(self) -> Task:
 70        return self.run_config.task
 71
 72    def model_provider(self) -> KilnModelProvider:
 73        """
 74        Lazy load the model provider for this adapter.
 75        """
 76        if self._model_provider is not None:
 77            return self._model_provider
 78        if not self.run_config.model_name or not self.run_config.model_provider_name:
 79            raise ValueError("model_name and model_provider_name must be provided")
 80        self._model_provider = kiln_model_provider_from(
 81            self.run_config.model_name, self.run_config.model_provider_name
 82        )
 83        if not self._model_provider:
 84            raise ValueError(
 85                f"model_provider_name {self.run_config.model_provider_name} not found for model {self.run_config.model_name}"
 86            )
 87        return self._model_provider
 88
 89    async def invoke(
 90        self,
 91        input: Dict | str,
 92        input_source: DataSource | None = None,
 93    ) -> TaskRun:
 94        run_output, _ = await self.invoke_returning_run_output(input, input_source)
 95        return run_output
 96
 97    async def invoke_returning_run_output(
 98        self,
 99        input: Dict | str,
100        input_source: DataSource | None = None,
101    ) -> Tuple[TaskRun, RunOutput]:
102        # validate input
103        if self.input_schema is not None:
104            if not isinstance(input, dict):
105                raise ValueError(f"structured input is not a dict: {input}")
106            validate_schema(input, self.input_schema)
107
108        # Run
109        run_output = await self._run(input)
110
111        # Parse
112        provider = self.model_provider()
113        parser = model_parser_from_id(provider.parser)(
114            structured_output=self.has_structured_output()
115        )
116        parsed_output = parser.parse_output(original_output=run_output)
117
118        # validate output
119        if self.output_schema is not None:
120            # Parse json to dict if we have structured output
121            if isinstance(parsed_output.output, str):
122                parsed_output.output = parse_json_string(parsed_output.output)
123
124            if not isinstance(parsed_output.output, dict):
125                raise RuntimeError(
126                    f"structured response is not a dict: {parsed_output.output}"
127                )
128            validate_schema(parsed_output.output, self.output_schema)
129        else:
130            if not isinstance(parsed_output.output, str):
131                raise RuntimeError(
132                    f"response is not a string for non-structured task: {parsed_output.output}"
133                )
134
135        # Validate reasoning content is present (if reasoning)
136        if provider.reasoning_capable and (
137            not parsed_output.intermediate_outputs
138            or "reasoning" not in parsed_output.intermediate_outputs
139        ):
140            raise RuntimeError(
141                "Reasoning is required for this model, but no reasoning was returned."
142            )
143
144        # Generate the run and output
145        run = self.generate_run(input, input_source, parsed_output)
146
147        # Save the run if configured to do so, and we have a path to save to
148        if (
149            self.base_adapter_config.allow_saving
150            and Config.shared().autosave_runs
151            and self.task().path is not None
152        ):
153            run.save_to_file()
154        else:
155            # Clear the ID to indicate it's not persisted
156            run.id = None
157
158        return run, run_output
159
160    def has_structured_output(self) -> bool:
161        return self.output_schema is not None
162
163    @abstractmethod
164    def adapter_name(self) -> str:
165        pass
166
167    @abstractmethod
168    async def _run(self, input: Dict | str) -> RunOutput:
169        pass
170
171    def build_prompt(self) -> str:
172        # The prompt builder needs to know if we want to inject formatting instructions
173        provider = self.model_provider()
174        add_json_instructions = self.has_structured_output() and (
175            provider.structured_output_mode == StructuredOutputMode.json_instructions
176            or provider.structured_output_mode
177            == StructuredOutputMode.json_instruction_and_object
178        )
179
180        return self.prompt_builder.build_prompt(
181            include_json_instructions=add_json_instructions
182        )
183
184    def run_strategy(
185        self,
186    ) -> Tuple[Literal["cot_as_message", "cot_two_call", "basic"], str | None]:
187        # Determine the run strategy for COT prompting. 3 options:
188        # 1. "Thinking" LLM designed to output thinking in a structured format plus a COT prompt: we make 1 call to the LLM, which outputs thinking in a structured format. We include the thinking instuctions as a message.
189        # 2. Normal LLM with COT prompt: we make 2 calls to the LLM - one for thinking and one for the final response. This helps us use the LLM's structured output modes (json_schema, tools, etc), which can't be used in a single call. It also separates the thinking from the final response.
190        # 3. Non chain of thought: we make 1 call to the LLM, with no COT prompt.
191        cot_prompt = self.prompt_builder.chain_of_thought_prompt()
192        reasoning_capable = self.model_provider().reasoning_capable
193
194        if cot_prompt and reasoning_capable:
195            # 1: "Thinking" LLM designed to output thinking in a structured format
196            # A simple message with the COT prompt appended to the message list is sufficient
197            return "cot_as_message", cot_prompt
198        elif cot_prompt:
199            # 2: Unstructured output with COT
200            # Two calls to separate the thinking from the final response
201            return "cot_two_call", cot_prompt
202        else:
203            return "basic", None
204
205    # create a run and task output
206    def generate_run(
207        self, input: Dict | str, input_source: DataSource | None, run_output: RunOutput
208    ) -> TaskRun:
209        # Convert input and output to JSON strings if they are dictionaries
210        input_str = (
211            json.dumps(input, ensure_ascii=False) if isinstance(input, dict) else input
212        )
213        output_str = (
214            json.dumps(run_output.output, ensure_ascii=False)
215            if isinstance(run_output.output, dict)
216            else run_output.output
217        )
218
219        # If no input source is provided, use the human data source
220        if input_source is None:
221            input_source = DataSource(
222                type=DataSourceType.human,
223                properties={"created_by": Config.shared().user_id},
224            )
225
226        new_task_run = TaskRun(
227            parent=self.task(),
228            input=input_str,
229            input_source=input_source,
230            output=TaskOutput(
231                output=output_str,
232                # Synthetic since an adapter, not a human, is creating this
233                source=DataSource(
234                    type=DataSourceType.synthetic,
235                    properties=self._properties_for_task_output(),
236                ),
237            ),
238            intermediate_outputs=run_output.intermediate_outputs,
239            tags=self.base_adapter_config.default_tags or [],
240        )
241
242        return new_task_run
243
244    def _properties_for_task_output(self) -> Dict[str, str | int | float]:
245        props = {}
246
247        # adapter info
248        props["adapter_name"] = self.adapter_name()
249        props["model_name"] = self.run_config.model_name
250        props["model_provider"] = self.run_config.model_provider_name
251        props["prompt_id"] = self.run_config.prompt_id
252
253        return props
@dataclass
class AdapterConfig:
25@dataclass
26class AdapterConfig:
27    """
28    An adapter config is config options that do NOT impact the output of the model.
29
30    For example: if it's saved, of if we request additional data like logprobs.
31    """
32
33    allow_saving: bool = True
34    top_logprobs: int | None = None
35    default_tags: list[str] | None = None

An adapter config is config options that do NOT impact the output of the model.

For example: if it's saved, of if we request additional data like logprobs.

AdapterConfig( allow_saving: bool = True, top_logprobs: int | None = None, default_tags: list[str] | None = None)
allow_saving: bool = True
top_logprobs: int | None = None
default_tags: list[str] | None = None
COT_FINAL_ANSWER_PROMPT = 'Considering the above, return a final result.'
class BaseAdapter:
 41class BaseAdapter(metaclass=ABCMeta):
 42    """Base class for AI model adapters that handle task execution.
 43
 44    This abstract class provides the foundation for implementing model-specific adapters
 45    that can process tasks with structured or unstructured inputs/outputs. It handles
 46    input/output validation, prompt building, and run tracking.
 47
 48    Attributes:
 49        prompt_builder (BasePromptBuilder): Builder for constructing prompts for the model
 50        kiln_task (Task): The task configuration and metadata
 51        output_schema (dict | None): JSON schema for validating structured outputs
 52        input_schema (dict | None): JSON schema for validating structured inputs
 53    """
 54
 55    def __init__(
 56        self,
 57        run_config: RunConfig,
 58        config: AdapterConfig | None = None,
 59    ):
 60        self.run_config = run_config
 61        self.prompt_builder = prompt_builder_from_id(
 62            run_config.prompt_id, run_config.task
 63        )
 64        self._model_provider: KilnModelProvider | None = None
 65
 66        self.output_schema = self.task().output_json_schema
 67        self.input_schema = self.task().input_json_schema
 68        self.base_adapter_config = config or AdapterConfig()
 69
 70    def task(self) -> Task:
 71        return self.run_config.task
 72
 73    def model_provider(self) -> KilnModelProvider:
 74        """
 75        Lazy load the model provider for this adapter.
 76        """
 77        if self._model_provider is not None:
 78            return self._model_provider
 79        if not self.run_config.model_name or not self.run_config.model_provider_name:
 80            raise ValueError("model_name and model_provider_name must be provided")
 81        self._model_provider = kiln_model_provider_from(
 82            self.run_config.model_name, self.run_config.model_provider_name
 83        )
 84        if not self._model_provider:
 85            raise ValueError(
 86                f"model_provider_name {self.run_config.model_provider_name} not found for model {self.run_config.model_name}"
 87            )
 88        return self._model_provider
 89
 90    async def invoke(
 91        self,
 92        input: Dict | str,
 93        input_source: DataSource | None = None,
 94    ) -> TaskRun:
 95        run_output, _ = await self.invoke_returning_run_output(input, input_source)
 96        return run_output
 97
 98    async def invoke_returning_run_output(
 99        self,
100        input: Dict | str,
101        input_source: DataSource | None = None,
102    ) -> Tuple[TaskRun, RunOutput]:
103        # validate input
104        if self.input_schema is not None:
105            if not isinstance(input, dict):
106                raise ValueError(f"structured input is not a dict: {input}")
107            validate_schema(input, self.input_schema)
108
109        # Run
110        run_output = await self._run(input)
111
112        # Parse
113        provider = self.model_provider()
114        parser = model_parser_from_id(provider.parser)(
115            structured_output=self.has_structured_output()
116        )
117        parsed_output = parser.parse_output(original_output=run_output)
118
119        # validate output
120        if self.output_schema is not None:
121            # Parse json to dict if we have structured output
122            if isinstance(parsed_output.output, str):
123                parsed_output.output = parse_json_string(parsed_output.output)
124
125            if not isinstance(parsed_output.output, dict):
126                raise RuntimeError(
127                    f"structured response is not a dict: {parsed_output.output}"
128                )
129            validate_schema(parsed_output.output, self.output_schema)
130        else:
131            if not isinstance(parsed_output.output, str):
132                raise RuntimeError(
133                    f"response is not a string for non-structured task: {parsed_output.output}"
134                )
135
136        # Validate reasoning content is present (if reasoning)
137        if provider.reasoning_capable and (
138            not parsed_output.intermediate_outputs
139            or "reasoning" not in parsed_output.intermediate_outputs
140        ):
141            raise RuntimeError(
142                "Reasoning is required for this model, but no reasoning was returned."
143            )
144
145        # Generate the run and output
146        run = self.generate_run(input, input_source, parsed_output)
147
148        # Save the run if configured to do so, and we have a path to save to
149        if (
150            self.base_adapter_config.allow_saving
151            and Config.shared().autosave_runs
152            and self.task().path is not None
153        ):
154            run.save_to_file()
155        else:
156            # Clear the ID to indicate it's not persisted
157            run.id = None
158
159        return run, run_output
160
161    def has_structured_output(self) -> bool:
162        return self.output_schema is not None
163
164    @abstractmethod
165    def adapter_name(self) -> str:
166        pass
167
168    @abstractmethod
169    async def _run(self, input: Dict | str) -> RunOutput:
170        pass
171
172    def build_prompt(self) -> str:
173        # The prompt builder needs to know if we want to inject formatting instructions
174        provider = self.model_provider()
175        add_json_instructions = self.has_structured_output() and (
176            provider.structured_output_mode == StructuredOutputMode.json_instructions
177            or provider.structured_output_mode
178            == StructuredOutputMode.json_instruction_and_object
179        )
180
181        return self.prompt_builder.build_prompt(
182            include_json_instructions=add_json_instructions
183        )
184
185    def run_strategy(
186        self,
187    ) -> Tuple[Literal["cot_as_message", "cot_two_call", "basic"], str | None]:
188        # Determine the run strategy for COT prompting. 3 options:
189        # 1. "Thinking" LLM designed to output thinking in a structured format plus a COT prompt: we make 1 call to the LLM, which outputs thinking in a structured format. We include the thinking instuctions as a message.
190        # 2. Normal LLM with COT prompt: we make 2 calls to the LLM - one for thinking and one for the final response. This helps us use the LLM's structured output modes (json_schema, tools, etc), which can't be used in a single call. It also separates the thinking from the final response.
191        # 3. Non chain of thought: we make 1 call to the LLM, with no COT prompt.
192        cot_prompt = self.prompt_builder.chain_of_thought_prompt()
193        reasoning_capable = self.model_provider().reasoning_capable
194
195        if cot_prompt and reasoning_capable:
196            # 1: "Thinking" LLM designed to output thinking in a structured format
197            # A simple message with the COT prompt appended to the message list is sufficient
198            return "cot_as_message", cot_prompt
199        elif cot_prompt:
200            # 2: Unstructured output with COT
201            # Two calls to separate the thinking from the final response
202            return "cot_two_call", cot_prompt
203        else:
204            return "basic", None
205
206    # create a run and task output
207    def generate_run(
208        self, input: Dict | str, input_source: DataSource | None, run_output: RunOutput
209    ) -> TaskRun:
210        # Convert input and output to JSON strings if they are dictionaries
211        input_str = (
212            json.dumps(input, ensure_ascii=False) if isinstance(input, dict) else input
213        )
214        output_str = (
215            json.dumps(run_output.output, ensure_ascii=False)
216            if isinstance(run_output.output, dict)
217            else run_output.output
218        )
219
220        # If no input source is provided, use the human data source
221        if input_source is None:
222            input_source = DataSource(
223                type=DataSourceType.human,
224                properties={"created_by": Config.shared().user_id},
225            )
226
227        new_task_run = TaskRun(
228            parent=self.task(),
229            input=input_str,
230            input_source=input_source,
231            output=TaskOutput(
232                output=output_str,
233                # Synthetic since an adapter, not a human, is creating this
234                source=DataSource(
235                    type=DataSourceType.synthetic,
236                    properties=self._properties_for_task_output(),
237                ),
238            ),
239            intermediate_outputs=run_output.intermediate_outputs,
240            tags=self.base_adapter_config.default_tags or [],
241        )
242
243        return new_task_run
244
245    def _properties_for_task_output(self) -> Dict[str, str | int | float]:
246        props = {}
247
248        # adapter info
249        props["adapter_name"] = self.adapter_name()
250        props["model_name"] = self.run_config.model_name
251        props["model_provider"] = self.run_config.model_provider_name
252        props["prompt_id"] = self.run_config.prompt_id
253
254        return props

Base class for AI model adapters that handle task execution.

This abstract class provides the foundation for implementing model-specific adapters that can process tasks with structured or unstructured inputs/outputs. It handles input/output validation, prompt building, and run tracking.

Attributes: prompt_builder (BasePromptBuilder): Builder for constructing prompts for the model kiln_task (Task): The task configuration and metadata output_schema (dict | None): JSON schema for validating structured outputs input_schema (dict | None): JSON schema for validating structured inputs

run_config
prompt_builder
output_schema
input_schema
base_adapter_config
def task(self) -> kiln_ai.datamodel.Task:
70    def task(self) -> Task:
71        return self.run_config.task
def model_provider(self) -> kiln_ai.adapters.ml_model_list.KilnModelProvider:
73    def model_provider(self) -> KilnModelProvider:
74        """
75        Lazy load the model provider for this adapter.
76        """
77        if self._model_provider is not None:
78            return self._model_provider
79        if not self.run_config.model_name or not self.run_config.model_provider_name:
80            raise ValueError("model_name and model_provider_name must be provided")
81        self._model_provider = kiln_model_provider_from(
82            self.run_config.model_name, self.run_config.model_provider_name
83        )
84        if not self._model_provider:
85            raise ValueError(
86                f"model_provider_name {self.run_config.model_provider_name} not found for model {self.run_config.model_name}"
87            )
88        return self._model_provider

Lazy load the model provider for this adapter.

async def invoke( self, input: Union[Dict, str], input_source: kiln_ai.datamodel.DataSource | None = None) -> kiln_ai.datamodel.TaskRun:
90    async def invoke(
91        self,
92        input: Dict | str,
93        input_source: DataSource | None = None,
94    ) -> TaskRun:
95        run_output, _ = await self.invoke_returning_run_output(input, input_source)
96        return run_output
async def invoke_returning_run_output( self, input: Union[Dict, str], input_source: kiln_ai.datamodel.DataSource | None = None) -> Tuple[kiln_ai.datamodel.TaskRun, kiln_ai.adapters.run_output.RunOutput]:
 98    async def invoke_returning_run_output(
 99        self,
100        input: Dict | str,
101        input_source: DataSource | None = None,
102    ) -> Tuple[TaskRun, RunOutput]:
103        # validate input
104        if self.input_schema is not None:
105            if not isinstance(input, dict):
106                raise ValueError(f"structured input is not a dict: {input}")
107            validate_schema(input, self.input_schema)
108
109        # Run
110        run_output = await self._run(input)
111
112        # Parse
113        provider = self.model_provider()
114        parser = model_parser_from_id(provider.parser)(
115            structured_output=self.has_structured_output()
116        )
117        parsed_output = parser.parse_output(original_output=run_output)
118
119        # validate output
120        if self.output_schema is not None:
121            # Parse json to dict if we have structured output
122            if isinstance(parsed_output.output, str):
123                parsed_output.output = parse_json_string(parsed_output.output)
124
125            if not isinstance(parsed_output.output, dict):
126                raise RuntimeError(
127                    f"structured response is not a dict: {parsed_output.output}"
128                )
129            validate_schema(parsed_output.output, self.output_schema)
130        else:
131            if not isinstance(parsed_output.output, str):
132                raise RuntimeError(
133                    f"response is not a string for non-structured task: {parsed_output.output}"
134                )
135
136        # Validate reasoning content is present (if reasoning)
137        if provider.reasoning_capable and (
138            not parsed_output.intermediate_outputs
139            or "reasoning" not in parsed_output.intermediate_outputs
140        ):
141            raise RuntimeError(
142                "Reasoning is required for this model, but no reasoning was returned."
143            )
144
145        # Generate the run and output
146        run = self.generate_run(input, input_source, parsed_output)
147
148        # Save the run if configured to do so, and we have a path to save to
149        if (
150            self.base_adapter_config.allow_saving
151            and Config.shared().autosave_runs
152            and self.task().path is not None
153        ):
154            run.save_to_file()
155        else:
156            # Clear the ID to indicate it's not persisted
157            run.id = None
158
159        return run, run_output
def has_structured_output(self) -> bool:
161    def has_structured_output(self) -> bool:
162        return self.output_schema is not None
@abstractmethod
def adapter_name(self) -> str:
164    @abstractmethod
165    def adapter_name(self) -> str:
166        pass
def build_prompt(self) -> str:
172    def build_prompt(self) -> str:
173        # The prompt builder needs to know if we want to inject formatting instructions
174        provider = self.model_provider()
175        add_json_instructions = self.has_structured_output() and (
176            provider.structured_output_mode == StructuredOutputMode.json_instructions
177            or provider.structured_output_mode
178            == StructuredOutputMode.json_instruction_and_object
179        )
180
181        return self.prompt_builder.build_prompt(
182            include_json_instructions=add_json_instructions
183        )
def run_strategy( self) -> Tuple[Literal['cot_as_message', 'cot_two_call', 'basic'], str | None]:
185    def run_strategy(
186        self,
187    ) -> Tuple[Literal["cot_as_message", "cot_two_call", "basic"], str | None]:
188        # Determine the run strategy for COT prompting. 3 options:
189        # 1. "Thinking" LLM designed to output thinking in a structured format plus a COT prompt: we make 1 call to the LLM, which outputs thinking in a structured format. We include the thinking instuctions as a message.
190        # 2. Normal LLM with COT prompt: we make 2 calls to the LLM - one for thinking and one for the final response. This helps us use the LLM's structured output modes (json_schema, tools, etc), which can't be used in a single call. It also separates the thinking from the final response.
191        # 3. Non chain of thought: we make 1 call to the LLM, with no COT prompt.
192        cot_prompt = self.prompt_builder.chain_of_thought_prompt()
193        reasoning_capable = self.model_provider().reasoning_capable
194
195        if cot_prompt and reasoning_capable:
196            # 1: "Thinking" LLM designed to output thinking in a structured format
197            # A simple message with the COT prompt appended to the message list is sufficient
198            return "cot_as_message", cot_prompt
199        elif cot_prompt:
200            # 2: Unstructured output with COT
201            # Two calls to separate the thinking from the final response
202            return "cot_two_call", cot_prompt
203        else:
204            return "basic", None
def generate_run( self, input: Union[Dict, str], input_source: kiln_ai.datamodel.DataSource | None, run_output: kiln_ai.adapters.run_output.RunOutput) -> kiln_ai.datamodel.TaskRun:
207    def generate_run(
208        self, input: Dict | str, input_source: DataSource | None, run_output: RunOutput
209    ) -> TaskRun:
210        # Convert input and output to JSON strings if they are dictionaries
211        input_str = (
212            json.dumps(input, ensure_ascii=False) if isinstance(input, dict) else input
213        )
214        output_str = (
215            json.dumps(run_output.output, ensure_ascii=False)
216            if isinstance(run_output.output, dict)
217            else run_output.output
218        )
219
220        # If no input source is provided, use the human data source
221        if input_source is None:
222            input_source = DataSource(
223                type=DataSourceType.human,
224                properties={"created_by": Config.shared().user_id},
225            )
226
227        new_task_run = TaskRun(
228            parent=self.task(),
229            input=input_str,
230            input_source=input_source,
231            output=TaskOutput(
232                output=output_str,
233                # Synthetic since an adapter, not a human, is creating this
234                source=DataSource(
235                    type=DataSourceType.synthetic,
236                    properties=self._properties_for_task_output(),
237                ),
238            ),
239            intermediate_outputs=run_output.intermediate_outputs,
240            tags=self.base_adapter_config.default_tags or [],
241        )
242
243        return new_task_run