def generate_exp_name_and_labels(script_name: str, namespace: str, name: str = None, run_kind: RunKinds = RunKinds.TRAINING) -> Tuple[str, Dict[str, str]]: if script_name: script_name = Path(script_name).name if name: # CASE 1: If user pass name as param, then use it. If experiment with this name exists - return error experiment = Experiment.get(namespace=namespace, name=name) experiment_runs: List[Run] = experiment.get_runs() if experiment else [] if experiment and experiment_runs: raise SubmitExperimentError(Texts.EXPERIMENT_ALREADY_EXISTS_ERROR_MSG.format(name=name)) # subcase when experiment has no associated runs. if experiment and not experiment_runs: raise SubmitExperimentError(Texts.EXPERIMENT_INVALID_STATE_MSG.format(name=name)) # if there are still artifacts from previous experiment with the same name if list_pods(namespace=namespace, label_selector=f'runName={name}'): raise SubmitExperimentError(Texts.EXPERIMENT_PREV_EXP_STILL_TERMINATING) return name, prepare_label(script_name, name, name, run_kind=run_kind) else: # CASE 2: If user submit exp without name, but there is already exp with the same script name, then: # --> use existing exp name and add post-fix with next index generated_name, labels = generate_name_for_existing_exps(script_name, namespace, run_kind=run_kind) if generated_name: return generated_name, labels # CASE 3: If user submit exp without name and there is no existing exps with matching script name,then: # --> generate new name result = generate_name(script_name) experiments = Experiment.list(namespace=namespace, name_filter=result) if experiments and len(experiments) > 0: result = f'{result}-{len(experiments)}' return result, prepare_label(script_name, result, run_kind=run_kind) return result, prepare_label(script_name, result, run_kind=run_kind)
def replace_initializing_runs(run_list: List[Run]): """ Creates a list of runs with initializing runs replaced by fake runs created based on experiment data. If there is at least one initializing run within a certain experiment - none of runs creating this experiment is displayed. :param run_list: list of runs to be checked :return: list without runs that are initialized at the moment """ initializing_experiments: set = set() ret_list = [] for run in run_list: exp_name = run.experiment_name experiment = Experiment.get(name=exp_name, namespace=run.namespace) if (run.state is None or run.state == '') and exp_name not in initializing_experiments: ret_list.append(create_fake_run(experiment)) initializing_experiments.add(exp_name) elif exp_name not in initializing_experiments: if experiment: run.template_version = experiment.template_version else: run.template_version = None ret_list.append(run) return ret_list
def test_list_experiments_from_namespace(mock_platform_resources_api_client: CustomObjectsApi): raw_experiments_single_namespace = dict(LIST_EXPERIMENTS_RESPONSE_RAW) raw_experiments_single_namespace['items'] = [raw_experiments_single_namespace['items'][0]] mock_platform_resources_api_client.list_namespaced_custom_object.return_value = raw_experiments_single_namespace experiments = Experiment.list(namespace='namespace-1') assert [TEST_EXPERIMENTS[0]] == experiments
def list_k8s_experiments_by_label(namespace: str = None, label_selector: str = "") -> List[KubernetesObject]: """ Return list of Kubernetes Experiments filtered [optionally] by labels :param namespace: If provided, only experiments from this namespace will be returned :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :return: List of Experiment objects """ raw_experiments = Experiment.list_raw_experiments(namespace, label_selector) schema = ExperimentKubernetesSchema() body, err = schema.load(raw_experiments['items'], many=True) if err: raise RuntimeError(Texts.K8S_RESPONSE_LOAD_ERROR_MSG.format(err=err)) return body
def test_list_experiments_name_filter(mock_platform_resources_api_client: CustomObjectsApi): mock_platform_resources_api_client.list_cluster_custom_object.return_value = LIST_EXPERIMENTS_RESPONSE_RAW experiments = Experiment.list(name_filter='test-experiment-new') assert [TEST_EXPERIMENTS[1]] == experiments
def test_list_experiments_filter_status(mock_platform_resources_api_client: CustomObjectsApi): mock_platform_resources_api_client.list_cluster_custom_object.return_value = LIST_EXPERIMENTS_RESPONSE_RAW experiments = Experiment.list(state=ExperimentStatus.CREATING) assert [TEST_EXPERIMENTS[0]] == experiments
TEMPLATE_NAME = 'template' TEMPLATE_NAMESPACE = 'template-namespace-test' TEST_EXPERIMENTS = [Experiment(name='test-experiment-old', parameters_spec=['a 1', 'b 2'], creation_timestamp='2018-04-26T13:43:01Z', namespace='namespace-1', state=ExperimentStatus.CREATING, template_name='test-ex-template', template_namespace='test-ex-namespace', metadata={'annotations': {'kubectl.kubernetes.io/last-applied-configuration': '{"apiVersion":"aipg.intel.com/v1",' '"kind":"Experiment",' '"metadata":{"annotations":{},' '"name":"test-experiment-old",' '"namespace":"namespace-1"},' '"spec":{"name":"test-experiment-old",' '"parameters-spec":["a 1", "b 2"],' '"state":"CREATING",' '"template-name":"test-ex-template",' '"template-namespace":"test-ex-namespace"}}\n'}, 'clusterName': '', 'creationTimestamp': '2018-04-26T13:43:01Z', 'labels': {'name_origin': 'test-experiment-new', 'script_name': 'mnist_single_node.py'}, 'generation': 1, 'name': 'test-experiment-old', 'namespace': 'namespace-1', 'resourceVersion': '1350906', 'selfLink': '/apis/aipg.intel.com/v1/namespaces/mciesiel-ef-stack/experiments/test-experiment', 'uid': 'bd298c60-4957-11e8-96f7-527100002000'}), Experiment(name='test-experiment-new', parameters_spec=['a 1', 'b 2'], creation_timestamp='2018-05-08T13:05:04Z', namespace='namespace-2', state=ExperimentStatus.SUBMITTED, template_name='test-ex-template',