def generate_exp_name_and_labels(script_name: str, namespace: str, name: str = None, run_kind: RunKinds = RunKinds.TRAINING) -> Tuple[str, Dict[str, str]]: if script_name: script_name = Path(script_name).name if name: # CASE 1: If user pass name as param, then use it. If experiment with this name exists - return error experiment = Experiment.get(namespace=namespace, name=name) experiment_runs: List[Run] = experiment.get_runs() if experiment else [] if experiment and experiment_runs: raise SubmitExperimentError(Texts.EXPERIMENT_ALREADY_EXISTS_ERROR_MSG.format(name=name)) # subcase when experiment has no associated runs. if experiment and not experiment_runs: raise SubmitExperimentError(Texts.EXPERIMENT_INVALID_STATE_MSG.format(name=name)) # if there are still artifacts from previous experiment with the same name if list_pods(namespace=namespace, label_selector=f'runName={name}'): raise SubmitExperimentError(Texts.EXPERIMENT_PREV_EXP_STILL_TERMINATING) return name, prepare_label(script_name, name, name, run_kind=run_kind) else: # CASE 2: If user submit exp without name, but there is already exp with the same script name, then: # --> use existing exp name and add post-fix with next index generated_name, labels = generate_name_for_existing_exps(script_name, namespace, run_kind=run_kind) if generated_name: return generated_name, labels # CASE 3: If user submit exp without name and there is no existing exps with matching script name,then: # --> generate new name result = generate_name(script_name) experiments = Experiment.list(namespace=namespace, name_filter=result) if experiments and len(experiments) > 0: result = f'{result}-{len(experiments)}' return result, prepare_label(script_name, result, run_kind=run_kind) return result, prepare_label(script_name, result, run_kind=run_kind)
def test_list_experiments_from_namespace(mock_platform_resources_api_client: CustomObjectsApi): raw_experiments_single_namespace = dict(LIST_EXPERIMENTS_RESPONSE_RAW) raw_experiments_single_namespace['items'] = [raw_experiments_single_namespace['items'][0]] mock_platform_resources_api_client.list_namespaced_custom_object.return_value = raw_experiments_single_namespace experiments = Experiment.list(namespace='namespace-1') assert [TEST_EXPERIMENTS[0]] == experiments
def list_unitialized_experiments_in_cli(verbosity_lvl: int, all_users: bool, name: str, headers: List[str], listed_runs_kinds: List[RunKinds] = None, count: int = None, brief: bool = False): """ Display a list of selected runs in the cli. :param verbosity_lvl: level at which error messages should be logged or displayed :param all_users: whether to display runs regardless of their owner or not :param name: regular expression to which names of the shown runs have to match :param headers: headers which will be displayed on top of a table shown in the cli :param count: number of rows displayed on a list. If not given - content of a list is not limited """ if not listed_runs_kinds: listed_runs_kinds = [RunKinds.TRAINING, RunKinds.JUPYTER] try: namespace = None if all_users else get_kubectl_current_context_namespace() creating_experiments = Experiment.list(namespace=namespace, state=ExperimentStatus.CREATING, run_kinds_filter=listed_runs_kinds, name_filter=name) runs = Run.list(namespace=namespace, name_filter=name, run_kinds_filter=listed_runs_kinds) # Get Experiments without associated Runs names_of_experiment_with_runs = set() for run in runs: names_of_experiment_with_runs.add(run.experiment_name) uninitialized_experiments = [experiment for experiment in creating_experiments if experiment.name not in names_of_experiment_with_runs] displayed_items_count = count if count else len(uninitialized_experiments) click.echo(tabulate([uninitialized_experiment_cli_representation(experiment) for experiment in uninitialized_experiments][-displayed_items_count:], headers=headers, tablefmt="orgtbl")) except InvalidRegularExpressionError: handle_error(logger, Texts.INVALID_REGEX_ERROR_MSG, Texts.INVALID_REGEX_ERROR_MSG, add_verbosity_msg=verbosity_lvl == 0) exit(1) except Exception: handle_error(logger, Texts.OTHER_ERROR_MSG, Texts.OTHER_ERROR_MSG, add_verbosity_msg=verbosity_lvl == 0) exit(1)
def test_list_experiments(mock_platform_resources_api_client: CustomObjectsApi): mock_platform_resources_api_client.list_cluster_custom_object.return_value = LIST_EXPERIMENTS_RESPONSE_RAW experiments = Experiment.list() assert TEST_EXPERIMENTS == experiments
def test_list_experiments_invalid_name_filter(mock_platform_resources_api_client: CustomObjectsApi): mock_platform_resources_api_client.list_cluster_custom_object.return_value = LIST_EXPERIMENTS_RESPONSE_RAW with pytest.raises(InvalidRegularExpressionError): Experiment.list(name_filter='*')
def test_list_experiments_name_filter(mock_platform_resources_api_client: CustomObjectsApi): mock_platform_resources_api_client.list_cluster_custom_object.return_value = LIST_EXPERIMENTS_RESPONSE_RAW experiments = Experiment.list(name_filter='test-experiment-new') assert [TEST_EXPERIMENTS[1]] == experiments
def test_list_experiments_filter_status(mock_platform_resources_api_client: CustomObjectsApi): mock_platform_resources_api_client.list_cluster_custom_object.return_value = LIST_EXPERIMENTS_RESPONSE_RAW experiments = Experiment.list(state=ExperimentStatus.CREATING) assert [TEST_EXPERIMENTS[0]] == experiments