예제 #1
0
파일: base.py 프로젝트: AlonMaor14/mlrun
    def _assert_secret_mount(self, volume_name, secret_name, default_mode,
                             mount_path):
        args = self._get_pod_creation_args()
        pod_spec = args.spec

        expected_volume = {
            "name": volume_name,
            "secret": {
                "defaultMode": default_mode,
                "secretName": secret_name
            },
        }
        assert (deepdiff.DeepDiff(pod_spec.volumes[0],
                                  expected_volume,
                                  ignore_order=True) == {})

        expected_volume_mounts = [
            {
                "mountPath": mount_path,
                "name": volume_name
            },
        ]

        container_spec = pod_spec.containers[0]
        assert (deepdiff.DeepDiff(container_spec.volume_mounts,
                                  expected_volume_mounts,
                                  ignore_order=True) == {})
예제 #2
0
def find_most_similar_config(percent_prefix: str, path_to_dataset: str,
                             current_config: Union[LMTrainingConfig, ClassifierTrainingConfig]):
    config_diff_dict = defaultdict(list)
    logger.debug(f"Finding the most similar config in {path_to_dataset}")
    dirpath, dirnames, _ = next(os.walk(path_to_dataset))
    for dirname in dirnames:
        if not dirname.startswith(percent_prefix):
            continue
        file_path = os.path.join(dirpath, dirname, PARAM_FILE_NAME)
        if os.path.exists(file_path):
            with open(file_path, 'r') as f:
                json_str = f.read()
                logger.debug(f'Loading config from {file_path}')
                config = jsons.loads(json_str, type(current_config))
            config_diff = deepdiff.DeepDiff(config, current_config)
            if config_diff == {}:
                return dirname, {}
            else:
                n_changed_params = (len(config_diff[DEEPDIFF_ADDED]) if DEEPDIFF_ADDED in config_diff else 0) \
                                   + (len(config_diff[DEEPDIFF_CHANGED]) if DEEPDIFF_CHANGED in config_diff else 0) \
                                   + (len(config_diff[DEEPDIFF_REMOVED]) if DEEPDIFF_REMOVED in config_diff else 0)
                config_diff_dict[n_changed_params].append((dirname, config_diff))
    if not config_diff_dict:
        return None, deepdiff.DeepDiff({}, current_config)
    else:
        return config_diff_dict[min(config_diff_dict)][-1]
예제 #3
0
def assert_mces_equal(output: object,
                      golden: object,
                      ignore_paths: Optional[List[str]] = None) -> None:
    # This method assumes we're given a list of MCE json objects.
    diff = deepdiff.DeepDiff(golden,
                             output,
                             exclude_regex_paths=ignore_paths,
                             ignore_order=True)
    if diff:
        # Attempt a clean diff (removing None-s)
        assert isinstance(output, list)
        assert isinstance(golden, list)
        clean_output = [clean_nones(o) for o in output]
        clean_golden = [clean_nones(g) for g in golden]
        clean_diff = deepdiff.DeepDiff(
            clean_golden,
            clean_output,
            exclude_regex_paths=ignore_paths,
            ignore_order=True,
        )
        if clean_diff != diff:
            logger.warning(
                f"MCE-s differ, clean MCE-s are fine\n{pprint.pformat(diff)}")
        diff = clean_diff

    assert not diff, f"MCEs differ\n{pprint.pformat(diff)}"
예제 #4
0
    def _assert_node_selections(
        self,
        kube_resource_spec: KubeResourceSpec,
        expected_node_name=None,
        expected_node_selector=None,
        expected_affinity=None,
    ):
        args, _ = nuclio.deploy.deploy_config.call_args
        deploy_spec = args[0]["spec"]

        if expected_node_name:
            assert deploy_spec["nodeName"] == expected_node_name

        if expected_node_selector:
            assert (
                deepdiff.DeepDiff(
                    deploy_spec["nodeSelector"],
                    expected_node_selector,
                    ignore_order=True,
                )
                == {}
            )
        if expected_affinity:
            # deploy_spec returns affinity in CamelCase, V1Affinity is in snake_case
            assert (
                deepdiff.DeepDiff(
                    kube_resource_spec._transform_affinity_to_k8s_class_instance(
                        deploy_spec["affinity"]
                    ),
                    expected_affinity,
                    ignore_order=True,
                )
                == {}
            )
예제 #5
0
파일: base.py 프로젝트: AlonMaor14/mlrun
    def _assert_pvc_mount_configured(self, pvc_name, pvc_mount_path,
                                     volume_name):
        args = self._get_pod_creation_args()
        pod_spec = args.spec

        expected_volume = {
            "name": volume_name,
            "persistentVolumeClaim": {
                "claimName": pvc_name
            },
        }
        assert (deepdiff.DeepDiff(pod_spec.volumes[0],
                                  expected_volume,
                                  ignore_order=True) == {})

        expected_volume_mounts = [
            {
                "mountPath": pvc_mount_path,
                "name": volume_name
            },
        ]

        container_spec = pod_spec.containers[0]
        assert (deepdiff.DeepDiff(container_spec.volume_mounts,
                                  expected_volume_mounts,
                                  ignore_order=True) == {})
예제 #6
0
def test_mount_configmap():
    expected_volume = {
        "configMap": {
            "name": "my-config-map"
        },
        "name": "my-volume"
    }
    expected_volume_mount = {
        "mountPath": "/myConfMapPath",
        "name": "my-volume"
    }

    function = mlrun.new_function("function-name",
                                  "function-project",
                                  kind=mlrun.runtimes.RuntimeKinds.job)
    function.apply(
        mlrun.platforms.mount_configmap(
            configmap_name="my-config-map",
            mount_path="/myConfMapPath",
            volume_name="my-volume",
        ))

    assert (deepdiff.DeepDiff(
        [expected_volume],
        function.spec.volumes,
        ignore_order=True,
    ) == {})
    assert (deepdiff.DeepDiff(
        [expected_volume_mount],
        function.spec.volume_mounts,
        ignore_order=True,
    ) == {})
예제 #7
0
 def _assert_resource_in_response_resources(
     expected_resource_type: str,
     expected_resource: dict,
     resources: mlrun.api.schemas.GroupedByJobRuntimeResourcesOutput,
     resources_field_name: str,
     group_by_field_extractor,
 ):
     (
         first_group_by_field_value,
         second_group_by_field_value,
     ) = group_by_field_extractor(expected_resource["metadata"]["labels"])
     found = False
     for resource in getattr(
             resources[first_group_by_field_value]
         [second_group_by_field_value],
             resources_field_name,
     ):
         if resource.name == expected_resource["metadata"]["name"]:
             found = True
             assert (deepdiff.DeepDiff(
                 resource.labels,
                 expected_resource["metadata"]["labels"],
                 ignore_order=True,
             ) == {})
             assert (deepdiff.DeepDiff(
                 resource.status,
                 expected_resource["status"],
                 ignore_order=True,
             ) == {})
     if not found:
         pytest.fail(
             f"Expected {expected_resource_type} was not found in response resources"
         )
예제 #8
0
def test_mount_hostpath():
    expected_volume = {
        "hostPath": {
            "path": "/tmp",
            "type": ""
        },
        "name": "my-volume"
    }
    expected_volume_mount = {"mountPath": "/myHostPath", "name": "my-volume"}

    function = mlrun.new_function("function-name",
                                  "function-project",
                                  kind=mlrun.runtimes.RuntimeKinds.job)
    function.apply(
        mlrun.platforms.mount_hostpath(host_path="/tmp",
                                       mount_path="/myHostPath",
                                       volume_name="my-volume"))

    assert (deepdiff.DeepDiff(
        [expected_volume],
        function.spec.volumes,
        ignore_order=True,
    ) == {})
    assert (deepdiff.DeepDiff(
        [expected_volume_mount],
        function.spec.volume_mounts,
        ignore_order=True,
    ) == {})
def compare(d1, d2, level='root'):
    if isinstance(d1, dict) and isinstance(d2, dict):
        if d1.keys() != d2.keys():
            s1 = set(d1.keys())
            s2 = set(d2.keys())
            print('{:<20} + {} - {}'.format(level, s1 - s2, s2 - s1))
            common_keys = s1 & s2
        else:
            common_keys = set(d1.keys())

        for k in common_keys:
            compare(d1[k], d2[k], level='{}.{}'.format(level, k))

    elif isinstance(d1, list) and isinstance(d2, list):
        if len(d1) != len(d2):
            #print('{:<20} len1={}; len2={}'.format(level, len(d1), len(d2)))
            ddiff = deepdiff.DeepDiff(d1,
                                      d2,
                                      ignore_order='true',
                                      verbose_level=0)
            print(ddiff)
        common_len = min(len(d1), len(d2))

        for i in range(common_len):
            compare(d1[i], d2[i], level='{}[{}]'.format(level, i))

    else:
        if d1 != d2:
            #print('{:<20} {} != {}'.format(level, d1, d2))
            ddiff = deepdiff.DeepDiff(d1,
                                      d2,
                                      ignore_order='true',
                                      verbose_level=0)
            print(ddiff)
예제 #10
0
def test_mount_v3io_legacy():
    username = "******"
    access_key = "access-key"
    os.environ["V3IO_USERNAME"] = username
    os.environ["V3IO_ACCESS_KEY"] = access_key
    function = mlrun.new_function("function-name",
                                  "function-project",
                                  kind=mlrun.runtimes.RuntimeKinds.job)
    function.apply(mlrun.mount_v3io_legacy())
    expected_volume = {
        "flexVolume": {
            "driver": "v3io/fuse",
            "options": {
                "accessKey": access_key,
                "container": "users",
                "subPath": f"/{username}",
            },
        },
        "name": "v3io",
    }
    expected_volume_mount = {
        "mountPath": "/User",
        "name": "v3io",
        "subPath": ""
    }
    assert (deepdiff.DeepDiff(
        [expected_volume],
        function.spec.volumes,
        ignore_order=True,
    ) == {})
    assert (deepdiff.DeepDiff(
        [expected_volume_mount],
        function.spec.volume_mounts,
        ignore_order=True,
    ) == {})
def test_model_nwm_public(tmp_data_dir_public,capsys):
    with capsys.disabled():
        print("Question: WrfHydroModel object is able to compile NWM public?")

    # Setup directory paths
    expected_dir = tmp_data_dir_public / 'data' / 'expected'
    source_dir = tmp_data_dir_public / 'data' / 'wrf_hydro_nwm_public' / 'source'
    compile_dir = tmp_data_dir_public / 'data' / 'wrf_hydro_nwm_public' / 'compiled'

    # Load expected data objects
    model_objects_expected = pickle.load(open(expected_dir / 'test_model_nwm_public.pkl',"rb"))

    # Make precompile object
    model_object_precompile = wrfhydropy.WrfHydroModel(source_dir=str(source_dir))

    # Make post compile object
    model_object_postcompile = copy.deepcopy(model_object_precompile)
    model_object_postcompile.compile('gfort',compile_dir=str(compile_dir),overwrite=True)

    # Compare to expected pre-compile object
    diffs_precompile = deepdiff.DeepDiff(model_objects_expected['model_object_precompile'],
                                model_object_precompile)
    assert diffs_precompile == {}

    # Compare to expected post-compile object, file paths will be different,so only check existence
    postcompile_expected = model_objects_expected['model_object_postcompile']

    # check that the model compiled successfully
    diffs_compile_options = deepdiff.DeepDiff(model_object_postcompile.compile_options,
                                     postcompile_expected.compile_options,
                                     ignore_order=True)
    assert diffs_compile_options == {}
    assert model_object_postcompile.compile_log.returncode == 0
    assert model_object_postcompile.wrf_hydro_exe.name == 'wrf_hydro.exe'
def test_simulation_nwm_public(tmp_data_dir_public,capsys):
    with capsys.disabled():
        print("Question: WrfHydroSim object is constructed properly for NWM public?")

    # Set directory paths
    expected_dir = tmp_data_dir_public / 'data' / 'expected'
    compile_dir = tmp_data_dir_public / 'data' / 'wrf_hydro_nwm_public' / 'compiled'
    domain_top_dir = tmp_data_dir_public / 'data' / 'domain'

    # Load expected objects
    model_objects_expected = pickle.load(open(expected_dir / 'test_model_nwm_public.pkl', "rb"))
    domain_object_expected = pickle.load(open(expected_dir / 'test_domain_nwm_public.pkl', "rb"))
    simulation_object_expected = pickle.load(open(expected_dir / 'test_simulation_nwm_public.pkl',"rb"))

    # Load previous test artifacts
    model_object_postcompile = pickle.load(open(compile_dir / 'WrfHydroModel.pkl','rb'))

    # Setup a simulation
    domain_object = wrfhydropy.WrfHydroDomain(domain_top_dir=domain_top_dir,
                                   domain_config='NWM',
                                   model_version='v1.2.1')
    model_object_postcompile_expected=model_objects_expected['model_object_postcompile']
    simulation_object = wrfhydropy.WrfHydroSim(model_object_postcompile,domain_object)

    # Compare expected to new
    hydro_diffs = deepdiff.DeepDiff(simulation_object_expected.hydro_namelist,
                              simulation_object.hydro_namelist)
    assert hydro_diffs == {}
    hrldas_diffs = deepdiff.DeepDiff(simulation_object_expected.namelist_hrldas,
                              simulation_object.namelist_hrldas)
    assert hrldas_diffs == {}
예제 #13
0
def test_store_artifact_restoring_multiple_tags(db: DBInterface, db_session: Session):
    artifact_key = "artifact_key_1"
    artifact_1_uid = "artifact_uid_1"
    artifact_2_uid = "artifact_uid_2"
    artifact_1_body = _generate_artifact(artifact_key, uid=artifact_1_uid)
    artifact_2_body = _generate_artifact(artifact_key, uid=artifact_2_uid)
    artifact_1_tag = "artifact_tag_1"
    artifact_2_tag = "artifact_tag_2"

    db.store_artifact(
        db_session, artifact_key, artifact_1_body, artifact_1_uid, tag=artifact_1_tag,
    )
    db.store_artifact(
        db_session, artifact_key, artifact_2_body, artifact_2_uid, tag=artifact_2_tag,
    )
    artifacts = db.list_artifacts(db_session, artifact_key, tag="*")
    assert len(artifacts) == 2
    expected_uids = [artifact_1_uid, artifact_2_uid]
    uids = [artifact["metadata"]["uid"] for artifact in artifacts]
    assert deepdiff.DeepDiff(expected_uids, uids, ignore_order=True,) == {}
    expected_tags = [artifact_1_tag, artifact_2_tag]
    tags = [artifact["tag"] for artifact in artifacts]
    assert deepdiff.DeepDiff(expected_tags, tags, ignore_order=True,) == {}
    artifact = db.read_artifact(db_session, artifact_key, tag=artifact_1_tag)
    assert artifact["metadata"]["uid"] == artifact_1_uid
    assert artifact["tag"] == artifact_1_tag
    artifact = db.read_artifact(db_session, artifact_key, tag=artifact_2_tag)
    assert artifact["metadata"]["uid"] == artifact_2_uid
    assert artifact["tag"] == artifact_2_tag
예제 #14
0
    def _assert_triggers(self, http_trigger=None, v3io_trigger=None):
        args, _ = nuclio.deploy.deploy_config.call_args
        triggers_config = args[0]["spec"]["triggers"]

        if http_trigger:
            expected_struct = self._get_expected_struct_for_http_trigger(http_trigger)
            assert (
                deepdiff.DeepDiff(
                    triggers_config["http"],
                    expected_struct,
                    ignore_order=True,
                    # TODO - (in Nuclio) There is a bug with canary configuration:
                    #        the nginx.ingress.kubernetes.io/canary-weight annotation gets assigned the host name
                    #        rather than the actual weight. Remove this once bug is fixed.
                    exclude_paths=[
                        "root['annotations']['nginx.ingress.kubernetes.io/canary-weight']"
                    ],
                )
                == {}
            )

        if v3io_trigger:
            expected_struct = self._get_expected_struct_for_v3io_trigger(v3io_trigger)
            diff_result = deepdiff.DeepDiff(
                triggers_config[v3io_trigger["name"]],
                expected_struct,
                ignore_order=True,
            )
            # It's ok if the Nuclio trigger has additional parameters, these are constants that we don't care
            # about. We just care that the values we look for are fully there.
            diff_result.pop("dictionary_item_removed", None)
            assert diff_result == {}
예제 #15
0
def test_list_project(
    api_url: str,
    iguazio_client: mlrun.api.utils.clients.iguazio.Client,
    requests_mock: requests_mock_package.Mocker,
):
    mock_projects = [
        {"name": "project-name-1"},
        {"name": "project-name-2", "description": "project-description-2"},
        {"name": "project-name-3", "labels": {"key": "value"}},
        {
            "name": "project-name-4",
            "annotations": {"annotation-key": "annotation-value"},
        },
        {
            "name": "project-name-5",
            "description": "project-description-4",
            "labels": {"key2": "value2"},
            "annotations": {"annotation-key2": "annotation-value2"},
        },
    ]
    response_body = {
        "data": [
            _build_project_response(
                iguazio_client,
                _generate_project(
                    mock_project["name"],
                    mock_project.get("description", ""),
                    mock_project.get("labels", {}),
                    mock_project.get("annotations", {}),
                ),
            )
            for mock_project in mock_projects
        ]
    }
    requests_mock.get(f"{api_url}/api/projects", json=response_body)
    projects, latest_updated_at = iguazio_client.list_projects(None)
    for index, project in enumerate(projects):
        assert project.metadata.name == mock_projects[index]["name"]
        assert project.spec.description == mock_projects[index].get("description")
        assert (
            deepdiff.DeepDiff(
                mock_projects[index].get("labels"),
                project.metadata.labels,
                ignore_order=True,
            )
            == {}
        )
        assert (
            deepdiff.DeepDiff(
                mock_projects[index].get("annotations"),
                project.metadata.annotations,
                ignore_order=True,
            )
            == {}
        )
    assert (
        latest_updated_at.isoformat()
        == response_body["data"][-1]["attributes"]["updated_at"]
    )
예제 #16
0
def test_create_project_from_file_with_legacy_structure():
    project_name = "project-name"
    description = "project description"
    params = {"param_key": "param value"}
    artifact_path = "/tmp"
    legacy_project = mlrun.projects.project.MlrunProjectLegacy(
        project_name, description, params, artifact_path=artifact_path
    )
    function_name = "trainer-function"
    function = mlrun.new_function(function_name, project_name)
    legacy_project.set_function(function, function_name)
    legacy_project.set_function("hub://describe", "describe")
    workflow_name = "workflow-name"
    workflow_file_path = (
        pathlib.Path(tests.conftest.tests_root_directory) / "projects" / "workflow.py"
    )
    legacy_project.set_workflow(workflow_name, str(workflow_file_path))
    artifact_dict = {
        "key": "raw-data",
        "kind": "",
        "iter": 0,
        "tree": "latest",
        "target_path": "https://raw.githubusercontent.com/mlrun/demos/master/customer-churn-prediction/WA_Fn-UseC_-Telc"
        "o-Customer-Churn.csv",
        "db_key": "raw-data",
    }
    legacy_project.artifacts = [artifact_dict]
    legacy_project_file_path = pathlib.Path(tests.conftest.results) / "project.yaml"
    legacy_project.save(str(legacy_project_file_path))
    project = mlrun.load_project("./", str(legacy_project_file_path))
    assert project.kind == "project"
    assert project.metadata.name == project_name
    assert project.spec.description == description
    # assert accessible from the project as well
    assert project.description == description
    assert project.spec.artifact_path == artifact_path
    # assert accessible from the project as well
    assert project.artifact_path == artifact_path
    assert deepdiff.DeepDiff(params, project.spec.params, ignore_order=True,) == {}
    # assert accessible from the project as well
    assert deepdiff.DeepDiff(params, project.params, ignore_order=True,) == {}
    assert (
        deepdiff.DeepDiff(
            legacy_project.functions, project.functions, ignore_order=True,
        )
        == {}
    )
    assert (
        deepdiff.DeepDiff(
            legacy_project.workflows, project.workflows, ignore_order=True,
        )
        == {}
    )
    assert (
        deepdiff.DeepDiff(
            legacy_project.artifacts, project.artifacts, ignore_order=True,
        )
        == {}
    )
예제 #17
0
    def _assert_deploy_spec_has_secrets_config(self, expected_secret_sources):
        call_args_list = nuclio.deploy.deploy_config.call_args_list
        for single_call_args in call_args_list:
            args, _ = single_call_args
            deploy_spec = args[0]["spec"]

            token_path = mlconf.secret_stores.vault.token_path.replace(
                "~", "/root")
            azure_secret_path = mlconf.secret_stores.azure_vault.secret_path.replace(
                "~", "/root")
            expected_volumes = [
                {
                    "volume": {
                        "name": "vault-secret",
                        "secret": {
                            "defaultMode": 420,
                            "secretName": self.vault_secret_name,
                        },
                    },
                    "volumeMount": {
                        "name": "vault-secret",
                        "mountPath": token_path
                    },
                },
                {
                    "volume": {
                        "name": "azure-vault-secret",
                        "secret": {
                            "defaultMode": 420,
                            "secretName": self.azure_vault_secret_name,
                        },
                    },
                    "volumeMount": {
                        "name": "azure-vault-secret",
                        "mountPath": azure_secret_path,
                    },
                },
            ]
            assert (deepdiff.DeepDiff(deploy_spec["volumes"],
                                      expected_volumes,
                                      ignore_order=True) == {})

            expected_env = {
                "MLRUN_SECRET_STORES__VAULT__ROLE": f"project:{self.project}",
                "MLRUN_SECRET_STORES__VAULT__URL":
                mlconf.secret_stores.vault.url,
                # For now, just checking the variable exists, later we check specific contents
                "SERVING_SPEC_ENV": None,
            }
            self._assert_pod_env(deploy_spec["env"], expected_env)

            for env_variable in deploy_spec["env"]:
                if env_variable["name"] == "SERVING_SPEC_ENV":
                    serving_spec = json.loads(env_variable["value"])
                    assert (deepdiff.DeepDiff(
                        serving_spec["secret_sources"],
                        expected_secret_sources,
                        ignore_order=True,
                    ) == {})
예제 #18
0
def test_cycle_addjob(job_restart, init_times, restart_dirs):
    cy1 = CycleSimulation(init_times=init_times, restart_dirs=restart_dirs)
    cy1.add(job_restart)
    assert deepdiff.DeepDiff(cy1._job, job_restart) == {}

    job_restart.job_id = 'a_different_id'
    cy1.add(job_restart)
    assert deepdiff.DeepDiff(cy1._job, job_restart) == {}
예제 #19
0
def test_ensemble_addjob(simulation, job):
    ens1 = EnsembleSimulation()
    ens1.add(job)
    assert deepdiff.DeepDiff(ens1.jobs[0], job) == {}

    job.job_id = 'a_different_id'
    ens1.add(job)
    assert deepdiff.DeepDiff(ens1.jobs[1], job) == {}
예제 #20
0
def test_cycle_addscheduler(scheduler, init_times, restart_dirs):
    cy1 = CycleSimulation(init_times=init_times, restart_dirs=restart_dirs)
    cy1.add(scheduler)
    assert deepdiff.DeepDiff(cy1._scheduler, scheduler) == {}

    sched2 = copy.deepcopy(scheduler)
    sched2.nnodes = 99
    cy1.add(sched2)
    assert deepdiff.DeepDiff(cy1._scheduler, sched2) == {}
예제 #21
0
def test_ensemble_addscheduler(simulation, scheduler):
    ens1 = EnsembleSimulation()
    ens1.add(scheduler)
    assert deepdiff.DeepDiff(ens1.scheduler, scheduler) == {}

    sched2 = copy.deepcopy(scheduler)
    sched2.nnodes = 99
    ens1.add(sched2)
    assert deepdiff.DeepDiff(ens1.scheduler, sched2) == {}
예제 #22
0
def test_list_runtime_resources_filter_by_kind(
        db: sqlalchemy.orm.Session,
        client: fastapi.testclient.TestClient) -> None:
    (
        project_1,
        project_2,
        project_3,
        project_1_job_name,
        project_2_job_name,
        project_2_dask_name,
        project_3_mpijob_name,
        grouped_by_project_runtime_resources_output,
    ) = _generate_grouped_by_project_runtime_resources_output()
    filtered_kind = mlrun.runtimes.RuntimeKinds.job

    runtime_handler = mlrun.runtimes.get_runtime_handler(filtered_kind)
    runtime_handler.list_resources = unittest.mock.Mock(
        return_value=
        _filter_kind_from_grouped_by_project_runtime_resources_output(
            mlrun.runtimes.RuntimeKinds.job,
            grouped_by_project_runtime_resources_output,
        ))
    mlrun.api.utils.clients.opa.Client(
    ).filter_project_resources_by_permissions = unittest.mock.Mock(
        side_effect=lambda _, resources, *args, **kwargs: resources)
    response = client.get(
        "/api/projects/*/runtime-resources",
        params={"kind": mlrun.runtimes.RuntimeKinds.job},
    )
    body = response.json()
    expected_runtime_resources = mlrun.api.schemas.KindRuntimeResources(
        kind=mlrun.runtimes.RuntimeKinds.job,
        resources=mlrun.api.schemas.RuntimeResources(
            crd_resources=[],
            pod_resources=grouped_by_project_runtime_resources_output[
                project_1][mlrun.runtimes.RuntimeKinds.job].pod_resources +
            grouped_by_project_runtime_resources_output[project_2][
                mlrun.runtimes.RuntimeKinds.job].pod_resources,
        ),
    ).dict()
    expected_body = [expected_runtime_resources]
    assert deepdiff.DeepDiff(
        body,
        expected_body,
        ignore_order=True,
    ) == {}

    # test legacy endpoint
    response = client.get(f"/api/runtimes/{mlrun.runtimes.RuntimeKinds.job}")
    body = response.json()
    expected_body = expected_runtime_resources
    assert deepdiff.DeepDiff(
        body,
        expected_body,
        ignore_order=True,
    ) == {}
예제 #23
0
파일: base.py 프로젝트: nschenone/mlrun
    def _assert_function_config(
        self,
        config,
        expected_params,
        expected_inputs,
        expected_hyper_params,
        expected_secrets,
        expected_labels,
    ):
        function_metadata = config["metadata"]
        assert function_metadata["name"] == self.name
        assert function_metadata["project"] == self.project

        function_spec = config["spec"]
        assert function_spec["output_path"] == self.artifact_path
        if expected_params:
            assert (
                deepdiff.DeepDiff(
                    function_spec["parameters"], expected_params, ignore_order=True
                )
                == {}
            )
        if expected_inputs:
            assert (
                deepdiff.DeepDiff(
                    function_spec["inputs"], expected_inputs, ignore_order=True
                )
                == {}
            )
        if expected_hyper_params:
            assert (
                deepdiff.DeepDiff(
                    function_spec["hyperparams"],
                    expected_hyper_params,
                    ignore_order=True,
                )
                == {}
            )
        if expected_secrets:
            assert (
                deepdiff.DeepDiff(
                    function_spec["secret_sources"],
                    [expected_secrets],
                    ignore_order=True,
                )
                == {}
            )
        if expected_labels:
            diff_result = deepdiff.DeepDiff(
                function_metadata["labels"], expected_labels, ignore_order=True,
            )
            # We just care that the values we look for are fully there.
            diff_result.pop("dictionary_item_removed", None)
            assert diff_result == {}
예제 #24
0
def assert_ds_equal(a, b):
    def _all_attrs(x):
        return {name: var.attrs for name, var in x.variables.items()}

    assert_equal(a, b)
    assert not deepdiff.DeepDiff(a.attrs, b.attrs)
    assert not deepdiff.DeepDiff(
        _all_attrs(a),
        _all_attrs(b),
    )
    assert a.variables.keys() == b.variables.keys()
예제 #25
0
def test_list_project(
    api_url: str,
    nuclio_client: mlrun.api.utils.clients.nuclio.Client,
    requests_mock: requests_mock_package.Mocker,
):
    mock_projects = [
        {"name": "project-name-1"},
        {"name": "project-name-2", "description": "project-description-2"},
        {"name": "project-name-3", "labels": {"key": "value"}},
        {
            "name": "project-name-4",
            "annotations": {"annotation-key": "annotation-value"},
        },
        {
            "name": "project-name-5",
            "description": "project-description-4",
            "labels": {"key2": "value2"},
            "annotations": {"annotation-key2": "annotation-value2"},
        },
    ]
    response_body = {
        mock_project["name"]: _generate_project_body(
            mock_project["name"],
            mock_project.get("description"),
            mock_project.get("labels"),
            mock_project.get("annotations"),
            with_spec=True,
        )
        for mock_project in mock_projects
    }
    requests_mock.get(f"{api_url}/api/projects", json=response_body)
    projects = nuclio_client.list_projects(None)
    for index, project in enumerate(projects.projects):
        assert project.metadata.name == mock_projects[index]["name"]
        assert project.spec.description == mock_projects[index].get("description")
        assert (
            deepdiff.DeepDiff(
                mock_projects[index].get("labels"),
                project.metadata.labels,
                ignore_order=True,
            )
            == {}
        )
        assert (
            deepdiff.DeepDiff(
                mock_projects[index].get("annotations"),
                project.metadata.annotations,
                ignore_order=True,
            )
            == {}
        )
예제 #26
0
파일: base.py 프로젝트: AlonMaor14/mlrun
    def _assert_v3io_mount_or_creds_configured(self,
                                               v3io_user,
                                               v3io_access_key,
                                               cred_only=False):
        args = self._get_pod_creation_args()
        pod_spec = args.spec
        container_spec = pod_spec.containers[0]

        pod_env = container_spec.env
        self._assert_pod_env(
            pod_env,
            {
                "V3IO_API": None,
                "V3IO_USERNAME": v3io_user,
                "V3IO_ACCESS_KEY": v3io_access_key,
            },
        )

        if cred_only:
            assert len(pod_spec.volumes) == 0
            assert len(container_spec.volume_mounts) == 0
            return

        expected_volume = {
            "flexVolume": {
                "driver": "v3io/fuse",
                "options": {
                    "accessKey": v3io_access_key
                },
            },
            "name": "v3io",
        }
        assert (deepdiff.DeepDiff(pod_spec.volumes[0],
                                  expected_volume,
                                  ignore_order=True) == {})

        expected_volume_mounts = [
            {
                "mountPath": "/v3io",
                "name": "v3io",
                "subPath": ""
            },
            {
                "mountPath": "/User",
                "name": "v3io",
                "subPath": f"users/{v3io_user}"
            },
        ]
        assert (deepdiff.DeepDiff(container_spec.volume_mounts,
                                  expected_volume_mounts,
                                  ignore_order=True) == {})
예제 #27
0
파일: base.py 프로젝트: AlonMaor14/mlrun
 def _assert_container_resources(self, container_spec, expected_limits,
                                 expected_requests):
     if expected_limits:
         assert (deepdiff.DeepDiff(
             container_spec.resources["limits"],
             expected_limits,
             ignore_order=True,
         ) == {})
     if expected_requests:
         assert (deepdiff.DeepDiff(
             container_spec.resources["requests"],
             expected_requests,
             ignore_order=True,
         ) == {})
예제 #28
0
    def test_k8s_project_secrets_using_httpdb(self):
        secrets = {"secret1": "value1", "secret2": "value2"}
        expected_results = mlrun.api.schemas.SecretKeysData(
            provider="kubernetes", secret_keys=list(secrets.keys()))

        self._run_db.delete_project_secrets(self.project_name,
                                            provider="kubernetes")

        response = self._run_db.list_project_secret_keys(self.project_name,
                                                         provider="kubernetes")
        assert response.secret_keys == []

        self._run_db.create_project_secrets(self.project_name, "kubernetes",
                                            secrets)

        response = self._run_db.list_project_secret_keys(self.project_name,
                                                         provider="kubernetes")
        assert deepdiff.DeepDiff(response.dict(),
                                 expected_results.dict()) == {}

        # Add a secret key
        added_secret = {"secret3": "mySecret!!!"}
        self._run_db.create_project_secrets(self.project_name, "kubernetes",
                                            added_secret)

        expected_results.secret_keys.append("secret3")
        response = self._run_db.list_project_secret_keys(self.project_name,
                                                         provider="kubernetes")
        assert deepdiff.DeepDiff(response.dict(),
                                 expected_results.dict()) == {}

        # Delete secrets
        self._run_db.delete_project_secrets(self.project_name,
                                            provider="kubernetes",
                                            secrets=["secret1", "secret2"])
        expected_results.secret_keys.remove("secret1")
        expected_results.secret_keys.remove("secret2")
        response = self._run_db.list_project_secret_keys(self.project_name,
                                                         provider="kubernetes")
        assert deepdiff.DeepDiff(response.dict(),
                                 expected_results.dict()) == {}

        # Cleanup
        self._run_db.delete_project_secrets(self.project_name,
                                            provider="kubernetes")

        # Negative test - try to list_secrets for k8s secrets (not implemented)
        with pytest.raises(mlrun.errors.MLRunBadRequestError):
            self._run_db.list_project_secrets(self.project_name,
                                              provider="kubernetes")
예제 #29
0
def test_simulation_sub_obj_pickle(model, domain, job, tmpdir):
    sim = Simulation()
    sim.add(model)
    sim.add(domain)
    sim.add(job)

    os.chdir(tmpdir)
    domain_path = pathlib.Path(tmpdir).joinpath('WrfHydroDomain.pkl')
    model_path = pathlib.Path(tmpdir).joinpath('WrfHydroModel.pkl')
    sim.pickle_sub_objs()
    assert sim.domain.resolve() == domain_path
    assert sim.model.resolve() == model_path

    sim.restore_sub_objs()
    assert deepdiff.DeepDiff(sim.domain, domain) == {}
    assert deepdiff.DeepDiff(sim.model, model) == {}
예제 #30
0
    def parser_helper(self, name, code):
        s = preprocess(code)
        logger.info(f"\n*********\nCONSTRUCTING TREE OF {name}:\n*********\n")
        t = None
        try:
            t = construct_node(s, LABEL_DOCUMENT)
        except (UnmatchedTactic, UnmatchedToken) as e:
            logger.info(f"EXCEPTION! {e.__class__.__name__}: {str(e)[:20]}")
            raise e

        try:
            with open(f'pickled/{name}', 'rb') as f:
                pickled = pickle.load(f)
        except FileNotFoundError:
            with open(f'pickled/{name}', 'wb') as f:
                pickle.dump(t, f)
            with open(f'pickled/{name}', 'rb') as f:
                pickled = pickle.load(f)

        diff = deepdiff.DeepDiff(t, pickled)
        try:
            self.assertEqual(diff, {})
            logger.info(f"\n*********\nTREE OF {name}:\n*********\n")
            logger.info("\n"+utils.pretty2str(t))
        except AssertionError as e:
            logger.info(
                f"\n*********\nPARSER OUTPUT FOR TESTCASE: {name}\n*********\n")
            logger.info("\n"+utils.pretty2str(t))
            logger.info(
                f"\n*********\nEXPECTED FOR TESTCASE: {name}\n*********\n")
            logger.info("\n"+utils.pretty2str(pickled))
            logger.info("\nDIFF:")
            logger.info(diff)
            raise e