Пример #1
0
 def test_create_run_in_deleted_experiment(self):
     fs = FileStore(self.test_root)
     exp_id = self.experiments[random_int(0, len(self.experiments) - 1)]
     # delete it
     fs.delete_experiment(exp_id)
     with pytest.raises(Exception):
         fs.create_run(exp_id, 'user', 0, [])
Пример #2
0
 def test_create_run_in_deleted_experiment(self):
     fs = FileStore(self.test_root)
     exp_id = self.experiments[random_int(0, len(self.experiments) - 1)]
     # delete it
     fs.delete_experiment(exp_id)
     with pytest.raises(Exception):
         fs.create_run(exp_id, 'user', 'name', 'source_type', 'source_name', 'entry_point_name',
                       0, None, [], None)
Пример #3
0
 def test_create_run_with_parent_id(self):
     fs = FileStore(self.test_root)
     exp_id = self.experiments[random_int(0, len(self.experiments) - 1)]
     run = fs.create_run(exp_id, 'user', 'name', 'source_type', 'source_name',
                         'entry_point_name', 0, None, [], 'test_parent_run_id')
     assert any([t.key == MLFLOW_PARENT_RUN_ID and t.value == 'test_parent_run_id'
                 for t in fs.get_all_tags(run.info.run_uuid)])
Пример #4
0
 def test_log_batch(self):
     fs = FileStore(self.test_root)
     run = fs.create_run(experiment_id=Experiment.DEFAULT_EXPERIMENT_ID,
                         user_id='user',
                         run_name=None,
                         source_type='source_type',
                         source_name='source_name',
                         entry_point_name='entry_point_name',
                         start_time=0,
                         source_version=None,
                         tags=[],
                         parent_run_id=None)
     run_uuid = run.info.run_uuid
     metric_entities = [
         Metric("m1", 0.87, 12345),
         Metric("m2", 0.49, 12345)
     ]
     param_entities = [Param("p1", "p1val"), Param("p2", "p2val")]
     tag_entities = [RunTag("t1", "t1val"), RunTag("t2", "t2val")]
     fs.log_batch(run_id=run_uuid,
                  metrics=metric_entities,
                  params=param_entities,
                  tags=tag_entities)
     self._verify_logged(fs, run_uuid, metric_entities, param_entities,
                         tag_entities)
Пример #5
0
 def test_log_batch(self):
     fs = FileStore(self.test_root)
     run = fs.create_run(experiment_id=Experiment.DEFAULT_EXPERIMENT_ID,
                         user_id='user',
                         run_name=None,
                         source_type='source_type',
                         source_name='source_name',
                         entry_point_name='entry_point_name',
                         start_time=0,
                         source_version=None,
                         tags=[],
                         parent_run_id=None)
     run_uuid = run.info.run_uuid
     metric_entities = [
         Metric("m1", 0.87, 12345),
         Metric("m2", 0.49, 12345)
     ]
     param_entities = [Param("p1", "p1val"), Param("p2", "p2val")]
     tag_entities = [RunTag("t1", "t1val"), RunTag("t2", "t2val")]
     fs.log_batch(run_id=run_uuid,
                  metrics=metric_entities,
                  params=param_entities,
                  tags=tag_entities)
     run = fs.get_run(run_uuid)
     tags = [(t.key, t.value) for t in run.data.tags]
     metrics = [(m.key, m.value, m.timestamp) for m in run.data.metrics]
     params = [(p.key, p.value) for p in run.data.params]
     assert set(tags) == set([("t1", "t1val"), ("t2", "t2val")])
     assert set(metrics) == set([("m1", 0.87, 12345), ("m2", 0.49, 12345)])
     assert set(params) == set([("p1", "p1val"), ("p2", "p2val")])
Пример #6
0
    def test_search_tags(self):
        fs = FileStore(self.test_root)
        experiment_id = self.experiments[0]
        r1 = fs.create_run(experiment_id, 'user', 0, []).info.run_id
        r2 = fs.create_run(experiment_id, 'user', 0, []).info.run_id

        fs.set_tag(r1, RunTag('generic_tag', 'p_val'))
        fs.set_tag(r2, RunTag('generic_tag', 'p_val'))

        fs.set_tag(r1, RunTag('generic_2', 'some value'))
        fs.set_tag(r2, RunTag('generic_2', 'another value'))

        fs.set_tag(r1, RunTag('p_a', 'abc'))
        fs.set_tag(r2, RunTag('p_b', 'ABC'))

        # test search returns both runs
        six.assertCountEqual(
            self, [r1, r2],
            self._search(fs,
                         experiment_id,
                         filter_str="tags.generic_tag = 'p_val'"))
        # test search returns appropriate run (same key different values per run)
        six.assertCountEqual(
            self, [r1],
            self._search(fs,
                         experiment_id,
                         filter_str="tags.generic_2 = 'some value'"))
        six.assertCountEqual(
            self, [r2],
            self._search(fs,
                         experiment_id,
                         filter_str="tags.generic_2='another value'"))
        six.assertCountEqual(
            self, [],
            self._search(fs,
                         experiment_id,
                         filter_str="tags.generic_tag = 'wrong_val'"))
        six.assertCountEqual(
            self, [],
            self._search(fs,
                         experiment_id,
                         filter_str="tags.generic_tag != 'p_val'"))
        six.assertCountEqual(
            self, [r1, r2],
            self._search(fs,
                         experiment_id,
                         filter_str="tags.generic_tag != 'wrong_val'"))
        six.assertCountEqual(
            self, [r1, r2],
            self._search(fs,
                         experiment_id,
                         filter_str="tags.generic_2 != 'wrong_val'"))
        six.assertCountEqual(
            self, [r1],
            self._search(fs, experiment_id, filter_str="tags.p_a = 'abc'"))
        six.assertCountEqual(
            self, [r2],
            self._search(fs, experiment_id, filter_str="tags.p_b = 'ABC'"))
Пример #7
0
 def test_create_run_with_parent_id(self):
     fs = FileStore(self.test_root)
     exp_id = self.experiments[random_int(0, len(self.experiments) - 1)]
     run = fs.create_run(exp_id, 'user', 'name', 'source_type',
                         'source_name', 'entry_point_name', 0, None, [],
                         'test_parent_run_id')
     assert fs.get_run(
         run.info.run_uuid
     ).data.tags[MLFLOW_PARENT_RUN_ID] == 'test_parent_run_id'
Пример #8
0
    def test_search_with_deterministic_max_results(self):
        fs = FileStore(self.test_root)
        exp = fs.create_experiment("test_search_with_deterministic_max_results")

        # Create 10 runs with the same start_time.
        # Sort based on run_id
        runs = sorted([fs.create_run(exp, 'user', 1000, []).info.run_id
                       for r in range(10)])
        for n in [0, 1, 2, 4, 8, 10, 20]:
            assert(runs[:min(10, n)] == self._search(fs, exp, max_results=n))
Пример #9
0
 def test_log_batch(self):
     fs = FileStore(self.test_root)
     run = fs.create_run(
         experiment_id=FileStore.DEFAULT_EXPERIMENT_ID, user_id='user', start_time=0, tags=[])
     run_id = run.info.run_id
     metric_entities = [Metric("m1", 0.87, 12345, 0), Metric("m2", 0.49, 12345, 0)]
     param_entities = [Param("p1", "p1val"), Param("p2", "p2val")]
     tag_entities = [RunTag("t1", "t1val"), RunTag("t2", "t2val")]
     fs.log_batch(
         run_id=run_id, metrics=metric_entities, params=param_entities, tags=tag_entities)
     self._verify_logged(fs, run_id, metric_entities, param_entities, tag_entities)
Пример #10
0
    def test_create_run_returns_expected_run_data(self):
        fs = FileStore(self.test_root)
        no_tags_run = fs.create_run(
            experiment_id=FileStore.DEFAULT_EXPERIMENT_ID, user_id='user', start_time=0, tags=[])
        assert isinstance(no_tags_run.data, RunData)
        assert len(no_tags_run.data.tags) == 0

        tags_dict = {
            "my_first_tag": "first",
            "my-second-tag": "2nd",
        }
        tags_entities = [
            RunTag(key, value) for key, value in tags_dict.items()
        ]
        tags_run = fs.create_run(
            experiment_id=FileStore.DEFAULT_EXPERIMENT_ID,
            user_id='user',
            start_time=0,
            tags=tags_entities)
        assert isinstance(tags_run.data, RunData)
        assert tags_run.data.tags == tags_dict
Пример #11
0
 def test_search_runs_pagination(self):
     fs = FileStore(self.test_root)
     exp = fs.create_experiment("test_search_runs_pagination")
     # test returned token behavior
     runs = sorted([fs.create_run(exp, 'user', 1000, []).info.run_id
                    for r in range(10)])
     result = fs.search_runs([exp], None, ViewType.ALL, max_results=4)
     assert [r.info.run_id for r in result] == runs[0:4]
     assert result.token is not None
     result = fs.search_runs([exp], None, ViewType.ALL, max_results=4,
                             page_token=result.token)
     assert [r.info.run_id for r in result] == runs[4:8]
     assert result.token is not None
     result = fs.search_runs([exp], None, ViewType.ALL, max_results=4,
                             page_token=result.token)
     assert [r.info.run_id for r in result] == runs[8:]
     assert result.token is None
Пример #12
0
    def test_search_with_max_results(self):
        fs = FileStore(self.test_root)
        exp = fs.create_experiment("search_with_max_results")

        runs = [fs.create_run(exp, 'user', r, []).info.run_id
                for r in range(10)]
        runs.reverse()

        print(runs)
        print(self._search(fs, exp))
        assert(runs[:10] == self._search(fs, exp))
        for n in [0, 1, 2, 4, 8, 10, 20, 50, 100, 500, 1000, 1200, 2000]:
            assert(runs[:min(1200, n)] == self._search(fs, exp, max_results=n))

        with self.assertRaises(MlflowException) as e:
            self._search(fs, exp, None, max_results=int(1e10))
        self.assertIn("Invalid value for request parameter max_results. It ", e.exception.message)