Example #1
0
    def test_bad_experiment_id_recorded_for_run(self):
        fs = FileStore(self.test_root)
        exp_0 = fs.get_experiment(Experiment.DEFAULT_EXPERIMENT_ID)
        all_runs = fs.search_runs([exp_0.experiment_id], [], run_view_type=ViewType.ALL)

        all_run_ids = self.exp_data[exp_0.experiment_id]["runs"]
        assert len(all_runs) == len(all_run_ids)

        # change experiment pointer in run
        bad_run_id = str(self.exp_data[exp_0.experiment_id]['runs'][0])
        path = os.path.join(self.test_root, str(exp_0.experiment_id), bad_run_id)
        experiment_data = read_yaml(path, "meta.yaml")
        experiment_data["experiment_id"] = 1
        write_yaml(path, "meta.yaml", experiment_data, True)

        with pytest.raises(MlflowException) as e:
            fs.get_run(bad_run_id)
            assert e.message.contains("not found")

        valid_runs = fs.search_runs([exp_0.experiment_id], [], run_view_type=ViewType.ALL)
        assert len(valid_runs) == len(all_runs) - 1

        for rid in all_run_ids:
            if rid != bad_run_id:
                fs.get_run(rid)
Example #2
0
 def test_search_runs(self):
     # replace with test with code is implemented
     fs = FileStore(self.test_root)
     # Expect 2 runs for each experiment
     assert len(fs.search_runs([self.experiments[0]], None, ViewType.ACTIVE_ONLY)) == 2
     assert len(fs.search_runs([self.experiments[0]], None, ViewType.ALL)) == 2
     assert len(fs.search_runs([self.experiments[0]], None, ViewType.DELETED_ONLY)) == 0
Example #3
0
    def test_malformed_run(self):
        fs = FileStore(self.test_root)
        exp_0 = fs.get_experiment(Experiment.DEFAULT_EXPERIMENT_ID)
        all_runs = fs.search_runs([exp_0.experiment_id], [],
                                  run_view_type=ViewType.ALL)

        all_run_ids = self.exp_data[exp_0.experiment_id]["runs"]
        assert len(all_runs) == len(all_run_ids)

        # delete metadata file.
        bad_run_id = self.exp_data[exp_0.experiment_id]['runs'][0]
        path = os.path.join(self.test_root, str(exp_0.experiment_id),
                            str(bad_run_id), "meta.yaml")
        os.remove(path)
        with pytest.raises(MissingConfigException) as e:
            fs.get_run(bad_run_id)
            assert e.message.contains("does not exist")

        valid_runs = fs.search_runs([exp_0.experiment_id], [],
                                    run_view_type=ViewType.ALL)
        assert len(valid_runs) == len(all_runs) - 1

        for rid in all_run_ids:
            if rid != bad_run_id:
                fs.get_run(rid)
 def test_search_runs_pagination(self):
     fs = FileStore(self.test_root)
     exp = fs.create_experiment("test_search_runs_pagination")
     # test returned token behavior
     runs = sorted([fs.create_run(exp, 'user', 1000, []).info.run_id
                    for r in range(10)])
     result = fs.search_runs([exp], None, ViewType.ALL, max_results=4)
     assert [r.info.run_id for r in result] == runs[0:4]
     assert result.token is not None
     result = fs.search_runs([exp], None, ViewType.ALL, max_results=4,
                             page_token=result.token)
     assert [r.info.run_id for r in result] == runs[4:8]
     assert result.token is not None
     result = fs.search_runs([exp], None, ViewType.ALL, max_results=4,
                             page_token=result.token)
     assert [r.info.run_id for r in result] == runs[8:]
     assert result.token is None
Example #5
0
 def test_search_runs(self):
     # replace with test with code is implemented
     fs = FileStore(self.test_root)
     # Expect 2 runs for each experiment
     assert len(fs.search_runs([self.experiments[0]], [])) == 2