예제 #1
0
파일: test_logs.py 프로젝트: pnijhara/nauta
def test_show_logs_match(mocker):
    es_client_mock = mocker.patch(
        "commands.common.logs_utils.K8sElasticSearchClient")

    es_client_instance = es_client_mock.return_value
    es_client_instance.get_experiment_logs_generator.return_value = TEST_LOG_ENTRIES

    get_kubectl_host_mock = mocker.patch(
        'commands.common.logs_utils.get_kubectl_host')
    get_api_key_mock = mocker.patch('commands.common.logs_utils.get_api_key')

    get_current_namespace_mock = mocker.patch(
        'commands.common.logs_utils.get_kubectl_current_context_namespace')
    fake_experiment_1_name = 'fake-experiment-1'
    fake_experiment_2_name = 'fake-experiment-2'
    list_runs_mock = mocker.patch('commands.common.logs_utils.Run.list')
    list_runs_mock.return_value = [
        Run(name=fake_experiment_1_name,
            experiment_name=fake_experiment_1_name),
        Run(name=fake_experiment_2_name,
            experiment_name=fake_experiment_2_name)
    ]

    runner = CliRunner()
    result = runner.invoke(logs.logs, ['-m', 'fake-experiment'])

    assert get_kubectl_host_mock.call_count == 1, 'kubectl host was not retrieved'
    assert get_api_key_mock.call_count == 1, 'k8s api key was not retrieved'
    assert get_current_namespace_mock.call_count == 1, 'namespace was not retrieved'
    assert list_runs_mock.call_count == 1, 'run was not retrieved'
    assert es_client_instance.get_experiment_logs_generator.call_count == 2, 'Experiment logs were not retrieved'

    assert fake_experiment_1_name in result.output
    assert fake_experiment_2_name in result.output
예제 #2
0
def prepare_list_of_runs(parameter_range: List[Tuple[str, str]],
                         experiment_name: str, parameter_set: Tuple[str, ...],
                         template_name: str) -> List[Run]:

    run_list: List[Run] = []

    if not parameter_range and not parameter_set:
        run_list = [
            Run(name=experiment_name,
                experiment_name=experiment_name,
                pod_selector={
                    'matchLabels': {
                        'app': template_name,
                        'release': experiment_name
                    }
                })
        ]
    else:
        list_of_range_parameters: List[Tuple[str, ...]] = [("", )]
        list_of_set_parameters = [("", )]

        if parameter_range:
            list_of_range_parameters = analyze_pr_parameters_list(
                parameter_range)

        if parameter_set:
            list_of_set_parameters = analyze_ps_parameters_list(parameter_set)

        run_index = 1
        for set_param in list_of_set_parameters:
            for range_param in list_of_range_parameters:
                current_run_name = experiment_name + "-" + str(run_index)
                current_params: Tuple[str, ...] = ()

                if len(set_param) >= 1 and set_param[0]:
                    current_params = set_param

                if len(range_param) >= 1 and range_param[0]:
                    current_params = current_params + range_param

                run_list.append(
                    Run(name=current_run_name,
                        experiment_name=experiment_name,
                        parameters=current_params,
                        pod_selector={
                            'matchLabels': {
                                'app': template_name,
                                'release': current_run_name
                            }
                        }))
                run_index = run_index + 1
    return run_list
예제 #3
0
def create_fake_run(experiment: Experiment) -> Run:
    return Run(name=experiment.name, experiment_name=experiment.name, metrics={},
               parameters=experiment.parameters_spec, pod_count=0,
               pod_selector={}, state=RunStatus.CREATING, namespace=experiment.namespace,
               creation_timestamp=experiment.creation_timestamp,
               template_name=experiment.template_name,
               template_version=experiment.template_version)
예제 #4
0
def test_show_logs_failure_proxy_problem(mocker, exception):
    es_client_mock = mocker.patch(
        'commands.experiment.logs.K8sElasticSearchClient')
    es_client_instance = es_client_mock.return_value
    es_client_instance.get_experiment_logs_generator.side_effect = RuntimeError

    proxy_mock = mocker.patch.object(logs, 'K8sProxy')
    proxy_mock.side_effect = exception
    get_current_namespace_mock = mocker.patch(
        'commands.experiment.logs.get_kubectl_current_context_namespace')
    fake_experiment_name = 'fake-experiment'
    list_runs_mock = mocker.patch('commands.experiment.logs.Run.list')
    list_runs_mock.return_value = [
        Run(name=fake_experiment_name, experiment_name=fake_experiment_name)
    ]

    runner = CliRunner()

    result = runner.invoke(logs.logs, [fake_experiment_name])

    assert proxy_mock.call_count == 1, 'port forwarding was not initiated'
    assert get_current_namespace_mock.call_count == 0, 'namespace was retrieved'
    assert list_runs_mock.call_count == 0, 'run was retrieved'
    assert es_client_instance.get_experiment_logs_generator.call_count == 0, 'Experiment logs retrieval was called'
    assert result.exit_code == 1
예제 #5
0
파일: test_logs.py 프로젝트: pnijhara/nauta
def test_show_logs_from_two_experiments(mocker):
    es_client_mock = mocker.patch(
        'commands.common.logs_utils.K8sElasticSearchClient')
    es_client_instance = es_client_mock.return_value
    es_client_instance.get_experiment_logs_generator.return_value = TEST_LOG_ENTRIES

    get_kubectl_host_mock = mocker.patch(
        'commands.common.logs_utils.get_kubectl_host')
    get_api_key_mock = mocker.patch('commands.common.logs_utils.get_api_key')

    get_current_namespace_mock = mocker.patch(
        "commands.common.logs_utils.get_kubectl_current_context_namespace")

    fake_experiment_name = 'fake-experiment'
    list_runs_mock = mocker.patch('commands.common.logs_utils.Run.list')
    list_runs_mock.return_value = [
        Run(name=fake_experiment_name, experiment_name=fake_experiment_name)
    ]

    runner = CliRunner()
    m = mock_open()
    with patch("builtins.open", m) as open_mock:
        exception = RuntimeError()
        exception.message = "Cause of an error"
        open_mock.return_value.__enter__.side_effect = exception
        result = runner.invoke(logs.logs, ['fake-experiment', '-o'], input='y')

    assert CmdsCommonTexts.LOGS_STORING_ERROR.format(
        exception_message=exception.message) in result.output
    assert get_kubectl_host_mock.call_count == 1, 'kubectl host was not retrieved'
    assert get_api_key_mock.call_count == 1, 'k8s api key was not retrieved'
    assert get_current_namespace_mock.call_count == 1, "namespace was not retrieved"
    assert list_runs_mock.call_count == 1, "run was not retrieved"
    assert es_client_instance.get_experiment_logs_generator.call_count == 1, "Experiment logs were not retrieved"
예제 #6
0
파일: test_logs.py 프로젝트: pnijhara/nauta
def test_show_logs_to_file_success(mocker):
    es_client_mock = mocker.patch(
        "commands.common.logs_utils.K8sElasticSearchClient")
    es_client_instance = es_client_mock.return_value
    es_client_instance.get_experiment_logs_generator.return_value = TEST_LOG_ENTRIES

    get_kubectl_host_mock = mocker.patch(
        'commands.common.logs_utils.get_kubectl_host')
    get_api_key_mock = mocker.patch('commands.common.logs_utils.get_api_key')

    get_current_namespace_mock = mocker.patch(
        "commands.common.logs_utils.get_kubectl_current_context_namespace")
    fake_experiment_name = 'fake-experiment'
    list_runs_mock = mocker.patch('commands.common.logs_utils.Run.list')
    list_runs_mock.return_value = [
        Run(name=fake_experiment_name, experiment_name=fake_experiment_name)
    ]

    runner = CliRunner()
    m = mock_open()
    with patch("builtins.open", m) as open_mock:
        runner.invoke(logs.logs, ['fake-experiment', '-o'], input='y')

    assert get_kubectl_host_mock.call_count == 1, 'kubectl host was not retrieved'
    assert get_api_key_mock.call_count == 1, 'k8s api key was not retrieved'
    assert get_current_namespace_mock.call_count == 1, "namespace was not retrieved"
    assert list_runs_mock.call_count == 1, "run was not retrieved"
    assert es_client_instance.get_experiment_logs_generator.call_count == 1, "Experiment logs were not retrieved"
    assert open_mock.call_count == 1, "File wasn't saved."
예제 #7
0
파일: test_logs.py 프로젝트: pnijhara/nauta
def test_show_logs_failure(mocker):
    es_client_mock = mocker.patch(
        'commands.common.logs_utils.K8sElasticSearchClient')
    es_client_instance = es_client_mock.return_value
    es_client_instance.get_experiment_logs_generator.side_effect = RuntimeError

    get_kubectl_host_mock = mocker.patch(
        'commands.common.logs_utils.get_kubectl_host')
    get_api_key_mock = mocker.patch('commands.common.logs_utils.get_api_key')

    get_current_namespace_mock = mocker.patch(
        'commands.common.logs_utils.get_kubectl_current_context_namespace')
    fake_experiment_name = 'fake-experiment'
    list_runs_mock = mocker.patch('commands.common.logs_utils.Run.list')
    list_runs_mock.return_value = [
        Run(name=fake_experiment_name, experiment_name=fake_experiment_name)
    ]

    runner = CliRunner()

    result = runner.invoke(logs.logs, [fake_experiment_name])

    assert get_kubectl_host_mock.call_count == 1, 'kubectl host was not retrieved'
    assert get_api_key_mock.call_count == 1, 'k8s api key was not retrieved'
    assert get_current_namespace_mock.call_count == 1, 'namespace was not retrieved'
    assert list_runs_mock.call_count == 1, 'run was not retrieved'
    assert es_client_instance.get_experiment_logs_generator.call_count == 1, 'Experiment logs retrieval was not called'
    assert result.exit_code == 1
예제 #8
0
def test_create_list_of_runs_ps_only(mocker):
    experiment_name = "experiment_name"
    template_name = "template_name"
    mocker.patch("platform_resources.experiment_utils.generate_exp_name_and_labels",
                 side_effect=[(experiment_name, {})])

    multiple_two_params = ("{param1:0, param2:1}", "{param1:2,param3:3}")
    multiple_two_params_list_result = \
        [Run(name=experiment_name + "-1", experiment_name=experiment_name,
             parameters=("param1=0", "param2=1")),
         Run(name=experiment_name + "-2", experiment_name=experiment_name,
             parameters=("param1=2", "param3=3"))]
    output = prepare_list_of_runs(parameter_range=[], experiment_name=experiment_name,
                                  parameter_set=multiple_two_params, template_name=template_name)
    assert len(output) == 2
    for expected_run, result_run in zip(multiple_two_params_list_result, output):
        assert expected_run.parameters == result_run.parameters
예제 #9
0
def test_create_list_of_runs_pr_only(mocker):
    experiment_name = "experiment_name"
    template_name = "template_name"
    mocker.patch(
        "platform_resources.experiment_utils.generate_exp_name_and_labels",
        side_effect=[(experiment_name, {})])

    two_params_list = [("param1", "{0, 1}"), ("param2", "{0...2:1}")]
    two_params_list_result = \
        [Run(name=experiment_name + "-1", experiment_name=experiment_name,
             parameters=("param1=0", "param2=0")),
         Run(name=experiment_name + "-2", experiment_name=experiment_name,
             parameters=("param1=0", "param2=1")),
         Run(name=experiment_name + "-3", experiment_name=experiment_name,
             parameters=("param1=0", "param2=2")),
         Run(name=experiment_name + "-4", experiment_name=experiment_name,
             parameters=("param1=1", "param2=0")),
         Run(name=experiment_name + "-5", experiment_name=experiment_name,
             parameters=("param1=1", "param2=1")),
         Run(name=experiment_name + "-6", experiment_name=experiment_name,
             parameters=("param1=1", "param2=2"))]

    output = prepare_list_of_runs(parameter_range=two_params_list,
                                  experiment_name=experiment_name,
                                  parameter_set=(),
                                  template_name=template_name)
    assert len(output) == 6
    for expected_run, result_run in zip(two_params_list_result, output):
        assert expected_run.parameters == result_run.parameters
예제 #10
0
def test_show_logs_success(mocker):
    es_client_mock = mocker.patch('commands.common.K8sElasticSearchClient')
    es_client_instance = es_client_mock.return_value
    es_client_instance.get_experiment_logs_generator.return_value = TEST_LOG_ENTRIES

    proxy_mock = mocker.patch.object(common, 'K8sProxy')

    get_current_namespace_mock = mocker.patch('commands.common.get_kubectl_current_context_namespace')
    fake_experiment_name = 'fake-experiment'
    list_runs_mock = mocker.patch('commands.common.Run.list')
    list_runs_mock.return_value = [Run(name=fake_experiment_name, experiment_name=fake_experiment_name)]

    runner = CliRunner()
    runner.invoke(logs.logs, [fake_experiment_name])

    assert proxy_mock.call_count == 1, 'port forwarding was not initiated'
    assert get_current_namespace_mock.call_count == 1, 'namespace was not retrieved'
    assert list_runs_mock.call_count == 1, 'run was not retrieved'
    assert es_client_instance.get_experiment_logs_generator.call_count == 1, 'Experiment logs were not retrieved'
예제 #11
0
def test_add_run_failure(mock_k8s_run_api_client: CustomObjectsApi):
    mock_k8s_run_api_client.create_namespaced_custom_object.side_effect = ApiException(status=500)
    run = Run(name=RUN_NAME, experiment_name='fake')
    with pytest.raises(ApiException):
        run.create(namespace=NAMESPACE)
예제 #12
0
def test_add_run(mock_k8s_run_api_client: CustomObjectsApi):
    mock_k8s_run_api_client.create_namespaced_custom_object.return_value = GET_RUN_RESPONSE_RAW
    run = Run(name=RUN_NAME, experiment_name='fake')
    added_run = run.create(namespace=NAMESPACE)
    assert added_run is not None and type(added_run) is KubernetesObject
예제 #13
0
    state=ExperimentStatus.CREATING,
    template_name='test-ex-template',
    template_namespace='test-ex-namespace',
    metadata={'labels': {
        'runKind': 'training'
    }})

RUN_QUEUED = Run(
    name="exp-mnist-single-node.py-18.05.17-16.05.45-1-tf-training",
    parameters=['mnist_single_node.py', '--data_dir', '/app'],
    state=RunStatus.QUEUED,
    metrics={'accuracy': 52.322},
    experiment_name="experiment-1",
    pod_count=1,
    pod_selector={
        'matchLabels': {
            'app': 'tf-training',
            'draft': 'exp-mnist-single-node.py-18.05.17-16.05.45-1',
            'release': 'exp-mnist-single-node.py-18.05.17-16.05.45-1'
        }
    },
    namespace="mciesiel-dev",
    creation_timestamp="2018-05-17T14:05:52Z",
    template_name="tf-training")
RUN_CANCELLED = Run(
    name="exp-mnist-single-node.py-18.05.17-16.05.45-1-tf-training",
    parameters=['mnist_single_node.py', '--data_dir', '/app'],
    state=RunStatus.CANCELLED,
    metrics={'accuracy': 52.322},
    experiment_name="experiment-name-will-be-added-soon",
    pod_count=1,
예제 #14
0
from unittest.mock import MagicMock
from kubernetes.client import V1Pod, V1PodStatus, V1Event, V1ObjectReference, V1ObjectMeta

from commands.experiment import view
from platform_resources.run import Run, RunStatus
from platform_resources.experiment import Experiment
from cli_text_consts import ExperimentViewCmdTexts as Texts
from util.k8s.k8s_statistics import ResourceUsage
from util.k8s.k8s_info import PodStatus

TEST_RUNS = [
    Run(name='test-experiment',
        parameters=['a 1', 'b 2'],
        creation_timestamp='2018-04-26T13:43:01Z',
        namespace='namespace-1',
        state=RunStatus.RUNNING,
        template_name='test-ex-template',
        metrics={'any metrics': 'a'},
        experiment_name='experiment_name',
        pod_count=1,
        pod_selector={}),
    Run(name='test-experiment-2',
        parameters=['a 1', 'b 2'],
        creation_timestamp='2018-05-08T13:05:04Z',
        namespace='namespace-2',
        state=RunStatus.COMPLETE,
        template_name='test-ex-template',
        metrics={'any metrics': 'a'},
        experiment_name='experiment_name',
        pod_count=1,
        pod_selector={})
]
예제 #15
0
                                creation_timestamp='2018-04-26T13:43:01Z',
                                namespace='namespace-1',
                                state=ExperimentStatus.CREATING,
                                template_name='jupyter',
                                template_namespace='test-ex-namespace')

NON_JUPYTER_EXPERIMENT = Experiment(name='test-experiment-2',
                                    parameters_spec=['a 1', 'b 2'],
                                    creation_timestamp='2018-05-08T13:05:04Z',
                                    namespace='namespace-2',
                                    state=ExperimentStatus.SUBMITTED,
                                    template_name='test-ex-template',
                                    template_namespace='test-ex-namespace')
SUBMITTED_RUNS = [
    Run(name="exp-mnist-single-node.py-18.05.17-16.05.45-1-tf-training",
        experiment_name=CORRECT_INTERACT_NAME,
        state=RunStatus.QUEUED)
]

KO_EXPERIMENT = KubernetesObject(spec=JUPYTER_EXPERIMENT,
                                 metadata=client.V1ObjectMeta())


class InteractMocks:
    def __init__(self, mocker):
        self.mocker = mocker
        self.get_namespace = mocker.patch(
            "commands.experiment.interact.get_kubectl_current_context_namespace",
            side_effect=[EXPERIMENT_NAMESPACE, EXPERIMENT_NAMESPACE])
        self.get_experiment = mocker.patch(
            "commands.experiment.interact.Experiment.get", return_value=None)
예제 #16
0
def test_create_list_of_runs_pr_and_ps(mocker):
    experiment_name = "experiment_name"
    template_name = "template_name"
    mocker.patch(
        "platform_resources.experiment_utils.generate_exp_name_and_labels",
        side_effect=[(experiment_name, {})])

    two_params_list = [("param1", "{0, 1}"), ("param2", "{0...2:1}")]
    multiple_two_params = ("{param3:0, param4:1}", "{param3:2,param4:3}")

    expected_result = [
        Run(name=experiment_name + "-1",
            experiment_name=experiment_name,
            parameters=("param3=0", "param4=1", "param1=0", "param2=0")),
        Run(name=experiment_name + "-2",
            experiment_name=experiment_name,
            parameters=("param3=0", "param4=1", "param1=0", "param2=1")),
        Run(name=experiment_name + "-3",
            experiment_name=experiment_name,
            parameters=("param3=0", "param4=1", "param1=0", "param2=2")),
        Run(name=experiment_name + "-4",
            experiment_name=experiment_name,
            parameters=("param3=0", "param4=1", "param1=1", "param2=0")),
        Run(name=experiment_name + "-5",
            experiment_name=experiment_name,
            parameters=("param3=0", "param4=1", "param1=1", "param2=1")),
        Run(name=experiment_name + "-6",
            experiment_name=experiment_name,
            parameters=("param3=0", "param4=1", "param1=1", "param2=2")),
        Run(name=experiment_name + "-7",
            experiment_name=experiment_name,
            parameters=("param3=2", "param4=3", "param1=0", "param2=0")),
        Run(name=experiment_name + "-8",
            experiment_name=experiment_name,
            parameters=("param3=2", "param4=3", "param1=0", "param2=1")),
        Run(name=experiment_name + "-9",
            experiment_name=experiment_name,
            parameters=("param3=2", "param4=3", "param1=0", "param2=2")),
        Run(name=experiment_name + "-10",
            experiment_name=experiment_name,
            parameters=("param3=2", "param4=3", "param1=1", "param2=0")),
        Run(name=experiment_name + "-11",
            experiment_name=experiment_name,
            parameters=("param3=2", "param4=3", "param1=1", "param2=1")),
        Run(name=experiment_name + "-12",
            experiment_name=experiment_name,
            parameters=("param3=2", "param4=3", "param1=1", "param2=2"))
    ]

    output = prepare_list_of_runs(two_params_list,
                                  experiment_name,
                                  multiple_two_params,
                                  template_name=template_name)
    assert len(output) == 12

    for expected_run, result_run in zip(expected_result, output):
        assert expected_run.parameters == result_run.parameters
예제 #17
0
from kubernetes.client.rest import ApiException

from platform_resources.platform_resource import KubernetesObject
from platform_resources.run import Run, RunStatus
from util.exceptions import InvalidRegularExpressionError

TEST_RUNS = [Run(name="exp-mnist-single-node.py-18.05.17-16.05.45-1-tf-training",
                 parameters=['mnist_single_node.py', '--data_dir', '/app'],
                 state=RunStatus.QUEUED,
                 metrics={'accuracy': 52.322},
                 experiment_name="experiment-name-will-be-added-soon",
                 pod_count=1,
                 pod_selector={'matchLabels': {'app': 'tf-training',
                                               'draft': 'exp-mnist-single-node.py-18.05.17-16.05.45-1',
                                               'release': 'exp-mnist-single-node.py-18.05.17-16.05.45-1'}},
                 namespace="mciesiel-dev", creation_timestamp="2018-05-17T14:05:52Z",
                 template_name="tf-training",
                 metadata={'clusterName': '', 'creationTimestamp': '2018-05-17T14:05:52Z', 'generation': 1,
                           'name': 'exp-mnist-single-node.py-18.05.17-16.05.45-1-tf-training',
                           'namespace': 'mciesiel-dev',
                           'resourceVersion': '435977',
                           'selfLink': '/apis/aipg.intel.com/v1/namespaces/mciesiel-dev/runs/'
                                       'exp-mnist-single-node.py-18.05.17-16.05.45-1-tf-training',
                           'uid': '68af2c7a-59db-11e8-b5db-527100001250'},
                 start_timestamp=None,
                 end_timestamp=None),
             Run(name="exp-mnist-single-node.py-18.05.17-16.05.56-2-tf-training",
                 parameters=['mnist_single_node.py', '--data_dir', '/app'], state=RunStatus.COMPLETE,
                 metrics={'accuracy': 52.322}, experiment_name="experiment-name-will-be-added-soon", pod_count=1,
                 pod_selector={
                     'matchLabels': {'app': 'tf-training', 'draft': 'exp-mnist-single-node.py-18.05.17-16.05.56-2',
                                     'release': 'exp-mnist-single-node.py-18.05.17-16.05.56-2'}},
예제 #18
0
# limitations under the License.
#

import dateutil

from commands.common import list_utils
from platform_resources.experiment import Experiment
from platform_resources.run import Run, RunStatus

TEST_RUNS = [
    Run(name='test-experiment',
        parameters=('a 1', 'b 2'),
        metrics={
            'acc': 52.2,
            'loss': 1.62345
        },
        creation_timestamp='2018-04-26T13:43:01Z',
        namespace='namespace-1',
        state=RunStatus.QUEUED,
        experiment_name='test-experiment',
        pod_count=0,
        pod_selector={}),
    Run(name='test-experiment-2',
        parameters=('a 1', 'b 2'),
        metrics={
            'acc': 52.2,
            'loss': 1.62345
        },
        creation_timestamp='2018-05-08T13:05:04Z',
        namespace='namespace-2',
        state=RunStatus.COMPLETE,
        experiment_name='test-experiment',
예제 #19
0
파일: test_submit.py 프로젝트: zhcf/nauta
from commands.experiment.submit import submit, DEFAULT_SCRIPT_NAME, validate_script_location, \
    validate_script_folder_location, get_default_script_location, clean_script_parameters, validate_pack_params, \
    check_duplicated_params
from commands.experiment.common import RunStatus
from platform_resources.run import Run
from util.exceptions import SubmitExperimentError
from cli_text_consts import ExperimentSubmitCmdTexts as Texts
from cli_text_consts import ExperimentCommonTexts as CommonTexts

SCRIPT_LOCATION = "training_script.py"
SCRIPT_FOLDER = "/a/b/c"

SUBMITTED_RUNS = [
    Run(name="exp-mnist-single-node.py-18.05.17-16.05.45-1-tf-training",
        experiment_name='test-experiment',
        state=RunStatus.QUEUED)
]

FAILED_RUNS = [
    Run(name="exp-mnist-single-node.py-18.05.17-16.05.45-1-tf-training",
        experiment_name='test-experiment',
        state=RunStatus.QUEUED),
    Run(name="exp-mnist-single-node.py-18.05.18-16.05.45-1-tf-training",
        experiment_name='test-experiment',
        state=RunStatus.FAILED)
]


class SubmitMocks:
    def __init__(self, mocker):