def get_runtime_handler(kind: str) -> BaseRuntimeHandler: global runtime_handler_instances_cache if kind == RuntimeKinds.mpijob: mpijob_crd_version = resolve_mpijob_crd_version() crd_version_to_runtime_handler_class = { MPIJobCRDVersions.v1alpha1: MpiV1Alpha1RuntimeHandler, MPIJobCRDVersions.v1: MpiV1RuntimeHandler, } runtime_handler_class = crd_version_to_runtime_handler_class[ mpijob_crd_version] if not runtime_handler_instances_cache.setdefault( RuntimeKinds.mpijob, {}).get(mpijob_crd_version): runtime_handler_instances_cache[RuntimeKinds.mpijob][ mpijob_crd_version] = runtime_handler_class() return runtime_handler_instances_cache[ RuntimeKinds.mpijob][mpijob_crd_version] kind_runtime_handler_map = { RuntimeKinds.dask: DaskRuntimeHandler, RuntimeKinds.spark: SparkRuntimeHandler, RuntimeKinds.remotespark: RemoteSparkRuntimeHandler, RuntimeKinds.job: KubeRuntimeHandler, } runtime_handler_class = kind_runtime_handler_map[kind] if not runtime_handler_instances_cache.get(kind): runtime_handler_instances_cache[kind] = runtime_handler_class() return runtime_handler_instances_cache[kind]
def get_runtime_class(kind: str): if kind == RuntimeKinds.mpijob: mpijob_crd_version = resolve_mpijob_crd_version() crd_version_to_runtime = { MPIJobCRDVersions.v1alpha1: MpiRuntimeV1Alpha1, MPIJobCRDVersions.v1: MpiRuntimeV1, } return crd_version_to_runtime[mpijob_crd_version] if kind == RuntimeKinds.spark: spark_operator_version = resolve_spark_operator_version() if spark_operator_version == 2: return Spark2Runtime elif spark_operator_version == 3: return Spark3Runtime kind_runtime_map = { RuntimeKinds.remote: RemoteRuntime, RuntimeKinds.nuclio: RemoteRuntime, RuntimeKinds.serving: ServingRuntime, RuntimeKinds.dask: DaskCluster, RuntimeKinds.job: KubejobRuntime, RuntimeKinds.local: LocalRuntime, RuntimeKinds.remotespark: RemoteSparkRuntime, } return kind_runtime_map[kind]
def health(): mpijob_crd_version = resolve_mpijob_crd_version(api_context=True) return { "version": config.version, "namespace": config.namespace, "docker_registry": environ.get('DEFAULT_DOCKER_REGISTRY', ''), "remote_host": config.remote_host, "mpijob_crd_version": mpijob_crd_version, "ui_url": config.ui_url, "artifact_path": config.artifact_path, }
def health(): mpijob_crd_version = resolve_mpijob_crd_version(api_context=True) return { "version": config.version, "namespace": config.namespace, "docker_registry": config.httpdb.builder.docker_registry, "remote_host": config.remote_host, "mpijob_crd_version": mpijob_crd_version, "ui_url": config.ui_url, "artifact_path": config.artifact_path, "spark_app_image": config.spark_app_image, "spark_app_image_tag": config.spark_app_image_tag, }
def get_client_spec(self): mpijob_crd_version = resolve_mpijob_crd_version(api_context=True) return mlrun.api.schemas.ClientSpec( version=config.version, namespace=config.namespace, docker_registry=config.httpdb.builder.docker_registry, remote_host=config.remote_host, mpijob_crd_version=mpijob_crd_version, ui_url=config.resolve_ui_url(), artifact_path=config.artifact_path, spark_app_image=config.spark_app_image, spark_app_image_tag=config.spark_app_image_tag, spark_history_server_path=config.spark_history_server_path, kfp_image=config.kfp_image, dask_kfp_image=config.dask_kfp_image, api_url=config.httpdb.api_url, nuclio_version=self._resolve_nuclio_version(), # These don't have a default value, but we don't send them if they are not set to allow the client to know # when to use server value and when to use client value (server only if set). Since their default value is # empty and not set is also empty we can use the same _get_config_value_if_not_default default_function_priority_class_name=self._get_config_value_if_not_default( "default_function_priority_class_name" ), valid_function_priority_class_names=self._get_config_value_if_not_default( "valid_function_priority_class_names" ), # These have a default value, therefore we want to send them only if their value is not the default one # (otherwise clients don't know when to use server value and when to use client value) ui_projects_prefix=self._get_config_value_if_not_default( "ui.projects_prefix" ), scrape_metrics=self._get_config_value_if_not_default("scrape_metrics"), hub_url=self._get_config_value_if_not_default("hub_url"), default_function_node_selector=self._get_config_value_if_not_default( "default_function_node_selector" ), igz_version=self._get_config_value_if_not_default("igz_version"), auto_mount_type=self._get_config_value_if_not_default( "storage.auto_mount_type" ), auto_mount_params=self._get_config_value_if_not_default( "storage.auto_mount_params" ), spark_operator_version=self._get_config_value_if_not_default( "spark_operator_version" ), default_tensorboard_logs_path=self._get_config_value_if_not_default( "default_tensorboard_logs_path" ), )
def health(): mpijob_crd_version = resolve_mpijob_crd_version(api_context=True) return { "version": config.version, "namespace": config.namespace, "docker_registry": config.httpdb.builder.docker_registry, "remote_host": config.remote_host, "mpijob_crd_version": mpijob_crd_version, "ui_url": config.resolve_ui_url(), "ui_projects_prefix": config.ui.projects_prefix, "artifact_path": config.artifact_path, "spark_app_image": config.spark_app_image, "spark_app_image_tag": config.spark_app_image_tag, "kfp_image": config.kfp_image, "dask_kfp_image": config.dask_kfp_image, "api_url": config.httpdb.api_url, "scrape_metrics": config.scrape_metrics, }
def get_runtime_class(kind: str): if kind == RuntimeKinds.mpijob: mpijob_crd_version = resolve_mpijob_crd_version() crd_version_to_runtime = { MPIJobCRDVersions.v1alpha1: MpiRuntimeV1Alpha1, MPIJobCRDVersions.v1: MpiRuntimeV1, } return crd_version_to_runtime[mpijob_crd_version] kind_runtime_map = { RuntimeKinds.remote: RemoteRuntime, RuntimeKinds.nuclio: RemoteRuntime, RuntimeKinds.dask: DaskCluster, RuntimeKinds.job: KubejobRuntime, RuntimeKinds.spark: SparkRuntime, } return kind_runtime_map[kind]
def health(): mpijob_crd_version = resolve_mpijob_crd_version(api_context=True) return { "version": config.version, "namespace": config.namespace, "docker_registry": config.httpdb.builder.docker_registry, "remote_host": config.remote_host, "mpijob_crd_version": mpijob_crd_version, "ui_url": config.resolve_ui_url(), "artifact_path": config.artifact_path, "spark_app_image": config.spark_app_image, "spark_app_image_tag": config.spark_app_image_tag, "kfp_image": config.kfp_image, "dask_kfp_image": config.dask_kfp_image, "api_url": config.httpdb.api_url, # These have a default value, therefore we want to send them only if their value is not the default one # (otherwise clients don't know when to use server value and when to use client value) "ui_projects_prefix": _get_config_value_if_not_default("ui.projects_prefix"), "scrape_metrics": _get_config_value_if_not_default("scrape_metrics"), "hub_url": _get_config_value_if_not_default("hub_url"), "default_function_node_selector": _get_config_value_if_not_default("default_function_node_selector"), }