wallaroo.engine_config

 1import json
 2from typing import Dict, Optional
 3
 4import gql  # type: ignore
 5import yaml
 6
 7from .object import *
 8
 9
10class EngineConfig:
11    """Wraps an engine config."""
12
13    # NOTE: refer to /conductor/helm/payloads/deployment-manager/helm/default-values and
14    # /conductor/helm/payloads/deployment-manager/helm/orchestra-deployment.yaml
15    # for reasonable defaults
16    def __init__(
17        self,
18        cpus: int,
19        inference_channel_size: Optional[int] = None,
20        model_concurrency: Optional[int] = None,
21        pipeline_config_directory: Optional[str] = None,
22        model_config_directory: Optional[str] = None,
23        model_directory: Optional[str] = None,
24        audit_logging: bool = False,
25        standalone: bool = False,
26    ) -> None:
27        self._cpus = cpus
28        self._inference_channel_size = (
29            inference_channel_size if inference_channel_size else 10000
30        )
31        self._model_concurrency = model_concurrency if model_concurrency else 2
32        self._audit_logging = audit_logging
33        self._pipeline_config_directory = pipeline_config_directory
34        self._model_config_directory = model_config_directory
35        self._model_directory = model_directory
36        self._standalone = standalone
37
38    @staticmethod
39    def as_standalone(
40        cpus: int,
41        inference_channel_size: Optional[int] = None,
42        model_concurrency: Optional[int] = None,
43        pipeline_config_directory: Optional[str] = None,
44        model_config_directory: Optional[str] = None,
45        model_directory: Optional[str] = None,
46    ) -> "EngineConfig":
47        """Creates an EngineConfig for use with standalone mode"""
48        return EngineConfig(
49            cpus,
50            inference_channel_size=inference_channel_size,
51            model_concurrency=model_concurrency,
52            pipeline_config_directory=pipeline_config_directory,
53            model_config_directory=model_config_directory,
54            model_directory=model_directory,
55            audit_logging=False,
56            standalone=True,
57        )
58
59    # TODO: Is there a better way to keep this in sync with our helm chart?
60    def _to_dict(self) -> Dict[str, Any]:
61        """Generate a dictionary representation for use to coversion to json or yaml"""
62        config: Dict[str, Any] = {
63            "cpus": self._cpus,
64        }
65        if self._inference_channel_size:
66            config["inference_channel_size"] = self._inference_channel_size
67        if self._model_concurrency:
68            config["model_server"] = {"model_concurrency": self._model_concurrency}
69        config["audit_logging"] = {"enabled": self._audit_logging}
70        if self._standalone:
71            if "model_server" in config:
72                config["model_server"]["model_dir"] = "/models"
73            config["input_type"] = "json"
74            config["input_protocol"] = "http"
75            config["onnx"] = {"intra_op_parallelism_threads": self._cpus}
76            config["k8s"] = {"namespace": "wallaroo-standalone"}
77            config["sink"] = {"type": "http_response"}
78            config["directories"] = {
79                "model_config": (
80                    self._model_config_directory
81                    if self._model_config_directory
82                    else "/modelconfig"
83                ),
84                "pipeline_config": (
85                    self._pipeline_config_directory
86                    if self._pipeline_config_directory
87                    else "/pipelineconfig"
88                ),
89                "model": (self._model_directory if self._model_directory else "/model"),
90            }
91        return config
92
93    def to_json(self) -> str:
94        """Returns a json representation of this object"""
95        return json.dumps(self._to_dict())
96
97    def to_yaml(self) -> str:
98        """Returns a yaml representation of this object for use with standalone mode"""
99        return yaml.dump(self._to_dict())
class EngineConfig:
 11class EngineConfig:
 12    """Wraps an engine config."""
 13
 14    # NOTE: refer to /conductor/helm/payloads/deployment-manager/helm/default-values and
 15    # /conductor/helm/payloads/deployment-manager/helm/orchestra-deployment.yaml
 16    # for reasonable defaults
 17    def __init__(
 18        self,
 19        cpus: int,
 20        inference_channel_size: Optional[int] = None,
 21        model_concurrency: Optional[int] = None,
 22        pipeline_config_directory: Optional[str] = None,
 23        model_config_directory: Optional[str] = None,
 24        model_directory: Optional[str] = None,
 25        audit_logging: bool = False,
 26        standalone: bool = False,
 27    ) -> None:
 28        self._cpus = cpus
 29        self._inference_channel_size = (
 30            inference_channel_size if inference_channel_size else 10000
 31        )
 32        self._model_concurrency = model_concurrency if model_concurrency else 2
 33        self._audit_logging = audit_logging
 34        self._pipeline_config_directory = pipeline_config_directory
 35        self._model_config_directory = model_config_directory
 36        self._model_directory = model_directory
 37        self._standalone = standalone
 38
 39    @staticmethod
 40    def as_standalone(
 41        cpus: int,
 42        inference_channel_size: Optional[int] = None,
 43        model_concurrency: Optional[int] = None,
 44        pipeline_config_directory: Optional[str] = None,
 45        model_config_directory: Optional[str] = None,
 46        model_directory: Optional[str] = None,
 47    ) -> "EngineConfig":
 48        """Creates an EngineConfig for use with standalone mode"""
 49        return EngineConfig(
 50            cpus,
 51            inference_channel_size=inference_channel_size,
 52            model_concurrency=model_concurrency,
 53            pipeline_config_directory=pipeline_config_directory,
 54            model_config_directory=model_config_directory,
 55            model_directory=model_directory,
 56            audit_logging=False,
 57            standalone=True,
 58        )
 59
 60    # TODO: Is there a better way to keep this in sync with our helm chart?
 61    def _to_dict(self) -> Dict[str, Any]:
 62        """Generate a dictionary representation for use to coversion to json or yaml"""
 63        config: Dict[str, Any] = {
 64            "cpus": self._cpus,
 65        }
 66        if self._inference_channel_size:
 67            config["inference_channel_size"] = self._inference_channel_size
 68        if self._model_concurrency:
 69            config["model_server"] = {"model_concurrency": self._model_concurrency}
 70        config["audit_logging"] = {"enabled": self._audit_logging}
 71        if self._standalone:
 72            if "model_server" in config:
 73                config["model_server"]["model_dir"] = "/models"
 74            config["input_type"] = "json"
 75            config["input_protocol"] = "http"
 76            config["onnx"] = {"intra_op_parallelism_threads": self._cpus}
 77            config["k8s"] = {"namespace": "wallaroo-standalone"}
 78            config["sink"] = {"type": "http_response"}
 79            config["directories"] = {
 80                "model_config": (
 81                    self._model_config_directory
 82                    if self._model_config_directory
 83                    else "/modelconfig"
 84                ),
 85                "pipeline_config": (
 86                    self._pipeline_config_directory
 87                    if self._pipeline_config_directory
 88                    else "/pipelineconfig"
 89                ),
 90                "model": (self._model_directory if self._model_directory else "/model"),
 91            }
 92        return config
 93
 94    def to_json(self) -> str:
 95        """Returns a json representation of this object"""
 96        return json.dumps(self._to_dict())
 97
 98    def to_yaml(self) -> str:
 99        """Returns a yaml representation of this object for use with standalone mode"""
100        return yaml.dump(self._to_dict())

Wraps an engine config.

EngineConfig( cpus: int, inference_channel_size: Optional[int] = None, model_concurrency: Optional[int] = None, pipeline_config_directory: Optional[str] = None, model_config_directory: Optional[str] = None, model_directory: Optional[str] = None, audit_logging: bool = False, standalone: bool = False)
17    def __init__(
18        self,
19        cpus: int,
20        inference_channel_size: Optional[int] = None,
21        model_concurrency: Optional[int] = None,
22        pipeline_config_directory: Optional[str] = None,
23        model_config_directory: Optional[str] = None,
24        model_directory: Optional[str] = None,
25        audit_logging: bool = False,
26        standalone: bool = False,
27    ) -> None:
28        self._cpus = cpus
29        self._inference_channel_size = (
30            inference_channel_size if inference_channel_size else 10000
31        )
32        self._model_concurrency = model_concurrency if model_concurrency else 2
33        self._audit_logging = audit_logging
34        self._pipeline_config_directory = pipeline_config_directory
35        self._model_config_directory = model_config_directory
36        self._model_directory = model_directory
37        self._standalone = standalone
@staticmethod
def as_standalone( cpus: int, inference_channel_size: Optional[int] = None, model_concurrency: Optional[int] = None, pipeline_config_directory: Optional[str] = None, model_config_directory: Optional[str] = None, model_directory: Optional[str] = None) -> wallaroo.engine_config.EngineConfig:
39    @staticmethod
40    def as_standalone(
41        cpus: int,
42        inference_channel_size: Optional[int] = None,
43        model_concurrency: Optional[int] = None,
44        pipeline_config_directory: Optional[str] = None,
45        model_config_directory: Optional[str] = None,
46        model_directory: Optional[str] = None,
47    ) -> "EngineConfig":
48        """Creates an EngineConfig for use with standalone mode"""
49        return EngineConfig(
50            cpus,
51            inference_channel_size=inference_channel_size,
52            model_concurrency=model_concurrency,
53            pipeline_config_directory=pipeline_config_directory,
54            model_config_directory=model_config_directory,
55            model_directory=model_directory,
56            audit_logging=False,
57            standalone=True,
58        )

Creates an EngineConfig for use with standalone mode

def to_json(self) -> str:
94    def to_json(self) -> str:
95        """Returns a json representation of this object"""
96        return json.dumps(self._to_dict())

Returns a json representation of this object

def to_yaml(self) -> str:
 98    def to_yaml(self) -> str:
 99        """Returns a yaml representation of this object for use with standalone mode"""
100        return yaml.dump(self._to_dict())

Returns a yaml representation of this object for use with standalone mode