def test_run(tmpdir, tracking_uri_mock, use_start_run): # pylint: disable=unused-argument submitted_run = mlflow.projects.run( TEST_PROJECT_DIR, entry_point="test_tracking", parameters={"use_start_run": use_start_run}, use_conda=False, experiment_id=0) assert submitted_run.run_id is not None # Blocking runs should be finished when they return validate_exit_status(submitted_run.get_status(), RunStatus.FINISHED) # Test that we can call wait() on a synchronous run & that the run has the correct # status after calling wait(). submitted_run.wait() validate_exit_status(submitted_run.get_status(), RunStatus.FINISHED) # Validate run contents in the FileStore run_uuid = submitted_run.run_id store = FileStore(tmpdir.strpath) run_infos = store.list_run_infos(experiment_id=0, run_view_type=ViewType.ACTIVE_ONLY) assert len(run_infos) == 1 store_run_uuid = run_infos[0].run_uuid assert run_uuid == store_run_uuid run = store.get_run(run_uuid) expected_params = {"use_start_run": use_start_run} assert run.info.status == RunStatus.FINISHED assert len(run.data.params) == len(expected_params) for param in run.data.params: assert param.value == expected_params[param.key] expected_metrics = {"some_key": 3} assert len(run.data.metrics) == len(expected_metrics) for metric in run.data.metrics: assert metric.value == expected_metrics[metric.key]
def test_list_run_infos(self): fs = FileStore(self.test_root) for exp_id in self.experiments: run_infos = fs.list_run_infos(exp_id) for run_info in run_infos: run_uuid = run_info.run_uuid dict_run_info = self.run_data[run_uuid] dict_run_info.pop("metrics") dict_run_info.pop("params") self.assertEqual(dict_run_info, dict(run_info))
def test_list_run_infos(self): fs = FileStore(self.test_root) for exp_id in self.experiments: run_infos = fs.list_run_infos(exp_id, run_view_type=ViewType.ALL) for run_info in run_infos: run_uuid = run_info.run_uuid dict_run_info = self.run_data[run_uuid] dict_run_info.pop("metrics") dict_run_info.pop("params") dict_run_info.pop("tags") dict_run_info['lifecycle_stage'] = LifecycleStage.ACTIVE self.assertEqual(dict_run_info, dict(run_info))
def test_run_local_git_repo( tmpdir, local_git_repo, local_git_repo_uri, tracking_uri_mock, # pylint: disable=unused-argument use_start_run, version): if version is not None: uri = local_git_repo_uri + "#" + TEST_PROJECT_NAME else: uri = os.path.join("%s/" % local_git_repo, TEST_PROJECT_NAME) if version == "git-commit": version = _get_version_local_git_repo(local_git_repo) submitted_run = mlflow.projects.run( uri, entry_point="test_tracking", version=version, parameters={"use_start_run": use_start_run}, use_conda=False, experiment_id=0) # Blocking runs should be finished when they return validate_exit_status(submitted_run.get_status(), RunStatus.FINISHED) # Test that we can call wait() on a synchronous run & that the run has the correct # status after calling wait(). submitted_run.wait() validate_exit_status(submitted_run.get_status(), RunStatus.FINISHED) # Validate run contents in the FileStore run_uuid = submitted_run.run_id store = FileStore(tmpdir.strpath) run_infos = store.list_run_infos(experiment_id=0, run_view_type=ViewType.ACTIVE_ONLY) assert "file:" in run_infos[0].source_name assert len(run_infos) == 1 store_run_uuid = run_infos[0].run_uuid assert run_uuid == store_run_uuid run = store.get_run(run_uuid) expected_params = {"use_start_run": use_start_run} assert run.info.status == RunStatus.FINISHED assert len(run.data.params) == len(expected_params) for param in run.data.params: assert param.value == expected_params[param.key] expected_metrics = {"some_key": 3} assert len(run.data.metrics) == len(expected_metrics) for metric in run.data.metrics: assert metric.value == expected_metrics[metric.key] # Validate the branch name tag is logged if version == "master": expected_tags = {"mlflow.gitBranchName": "master"} for tag in run.data.tags: assert tag.value == expected_tags[tag.key]
def test_log_parameters(): """ Test that we log provided parameters when running a project. """ with TempDir() as tmp, mock.patch("mlflow.tracking.get_tracking_uri") as get_tracking_uri_mock: tmp_dir = tmp.path() get_tracking_uri_mock.return_value = tmp_dir mlflow.projects.run( TEST_PROJECT_DIR, entry_point="greeter", parameters={"name": "friend"}, use_conda=False, experiment_id=0) store = FileStore(tmp_dir) run_uuid = store.list_run_infos(experiment_id=0)[0].run_uuid run = store.get_run(run_uuid) expected_params = {"name": "friend"} assert len(run.data.params) == len(expected_params) for param in run.data.params: assert param.value == expected_params[param.key]
def test_run(): for use_start_run in map(str, [0, 1]): with TempDir() as tmp, mock.patch("mlflow.tracking.get_tracking_uri")\ as get_tracking_uri_mock: tmp_dir = tmp.path() get_tracking_uri_mock.return_value = tmp_dir submitted_run = mlflow.projects.run( TEST_PROJECT_DIR, entry_point="test_tracking", parameters={"use_start_run": use_start_run}, use_conda=False, experiment_id=0) # Blocking runs should be finished when they return validate_exit_status(submitted_run.get_status(), RunStatus.FINISHED) # Test that we can call wait() on a synchronous run & that the run has the correct # status after calling wait(). submitted_run.wait() validate_exit_status(submitted_run.get_status(), RunStatus.FINISHED) # Validate run contents in the FileStore run_uuid = submitted_run.run_id store = FileStore(tmp_dir) run_infos = store.list_run_infos(experiment_id=0) assert len(run_infos) == 1 store_run_uuid = run_infos[0].run_uuid assert run_uuid == store_run_uuid run = store.get_run(run_uuid) expected_params = {"use_start_run": use_start_run} assert run.info.status == RunStatus.FINISHED assert len(run.data.params) == len(expected_params) for param in run.data.params: assert param.value == expected_params[param.key] expected_metrics = {"some_key": 3} for metric in run.data.metrics: assert metric.value == expected_metrics[metric.key]