Ejemplo n.º 1
0
  def test_overview(self, mlmd_store_path, metric_aggregators, want_columns):
    config = metadata_store_pb2.ConnectionConfig()
    config.sqlite.filename_uri = mlmd_store_path

    store = metadata_store.MetadataStore(config)
    df = results.overview(store, metric_aggregators=metric_aggregators)
    self.assertEqual(want_columns, df.columns.tolist())
Ejemplo n.º 2
0
    def test(self, use_keras, enable_tuning):
        with requests_mock.Mocker() as mocker:
            dataset_id_list = [1, 2]
            testing_utils.register_mock_urls(mocker, dataset_id_list)
            self.run_benchmarks(
                [openml_cc18_benchmark.OpenMLCC18Benchmark()],
                data_dir=os.path.join(self.pipeline_root, 'openML_mock_data'),
                mock_data=True,
                use_keras=use_keras,
                enable_tuning=enable_tuning,
            )

        instance_names = []
        for did in dataset_id_list:
            instance_name = '.'.join(
                ['OpenMLCC18Benchmark', 'benchmark', f'OpenML.mockdata_{did}'])
            instance_names.append(instance_name)

            if enable_tuning:
                self.assertComponentExecutionCount(8 * len(dataset_id_list))
                self.assertComponentSucceeded('.'.join(
                    ['Tuner', instance_name]))
            else:
                self.assertComponentExecutionCount(7 * len(dataset_id_list))
            self.assertComponentSucceeded('.'.join(
                [f'CsvExampleGen.OpenML.mockdata_{did}', instance_name]))
            self.assertComponentSucceeded('.'.join(
                ['SchemaGen.AutoData', instance_name]))
            self.assertComponentSucceeded('.'.join(
                ['StatisticsGen.AutoData', instance_name]))
            self.assertComponentSucceeded('.'.join(
                ['Transform.AutoData', instance_name]))
            self.assertComponentSucceeded('.'.join(['Trainer', instance_name]))
            self.assertComponentSucceeded('.'.join(
                ['Evaluator', instance_name]))
            self.assertComponentSucceeded('.'.join(
                ['BenchmarkResultPublisher', instance_name]))

        # Load benchmark results.
        store = metadata_store.MetadataStore(self.metadata_config)
        df = results.overview(store)

        # Check benchmark results overview values.
        self.assertEqual(len(df.index), len(dataset_id_list))
        self.assertContainsSubset([
            'benchmark',
            'run',
            'num_runs',
            'accuracy',
            'average_loss',
            'post_export_metrics/example_count',
        ], df.columns.values.tolist())
        self.assertSameElements([1], df['run'].tolist())
        self.assertSameElements([1], df['num_runs'].tolist())
        self.assertSameElements(instance_names, df.benchmark.unique())
Ejemplo n.º 3
0
    def test(self, use_keras, enable_tuning):
        self.run_benchmarks([titanic_benchmark.TitanicBenchmark()],
                            use_keras=use_keras,
                            enable_tuning=enable_tuning)
        if enable_tuning:
            self.assertComponentExecutionCount(8)
            self.assertComponentSucceeded("Tuner.TitanicBenchmark.benchmark")
        else:
            self.assertComponentExecutionCount(7)
        self.assertComponentSucceeded(
            "ImportExampleGen.TitanicBenchmark.benchmark")
        self.assertComponentSucceeded(
            "SchemaGen.AutoData.TitanicBenchmark.benchmark")
        self.assertComponentSucceeded(
            "StatisticsGen.AutoData.TitanicBenchmark.benchmark")
        self.assertComponentSucceeded(
            "Transform.AutoData.TitanicBenchmark.benchmark")
        self.assertComponentSucceeded("Trainer.TitanicBenchmark.benchmark")
        self.assertComponentSucceeded("Evaluator.TitanicBenchmark.benchmark")
        self.assertComponentSucceeded(
            "BenchmarkResultPublisher.TitanicBenchmark.benchmark")

        # Load benchmark results.
        store = metadata_store.MetadataStore(self.metadata_config)
        df = results.overview(store)

        # Check benchmark results overview values.
        self.assertEqual(len(df.index), 1)
        self.assertContainsSubset([
            "benchmark",
            "run",
            "num_runs",
            "accuracy",
            "average_loss",
            "post_export_metrics/example_count",
        ], df.columns.values.tolist())
        self.assertSameElements([1], df["run"].tolist())
        self.assertSameElements([1], df["num_runs"].tolist())
        self.assertSameElements(["TitanicBenchmark.benchmark"],
                                df.benchmark.unique())
    def test(self, algorithm):
        with requests_mock.Mocker() as mocker:
            testing_utils.register_mock_urls(mocker)
            self.run_benchmarks(
                [metalearning_benchmark.MetaLearningBenchmark()],
                data_dir=os.path.join(self.pipeline_root,
                                      'mock_metalearning_openml'),
                mock_data=True,
                algorithm=algorithm)

        train_dataset_ids = [1]
        for ix in train_dataset_ids:
            instance_name = 'MetaLearningBenchmark.benchmark'
            self.assertComponentSucceeded(
                f'CsvExampleGen.OpenML.mockdata_{ix}.{instance_name}')
            self.assertComponentSucceeded(
                f'AugmentedTuner.train.OpenML.mockdata_{ix}.{instance_name}')
            self.assertComponentSucceeded(
                f'SchemaGen.AutoData.train.OpenML.mockdata_{ix}.{instance_name}'
            )
            self.assertComponentSucceeded(
                f'StatisticsGen.AutoData.train.OpenML.mockdata_{ix}.{instance_name}'
            )
            self.assertComponentSucceeded(
                f'Transform.AutoData.train.OpenML.mockdata_{ix}.{instance_name}'
            )

        test_dataset_ids = [2]
        for ix in test_dataset_ids:
            instance_name = 'MetaLearningBenchmark.benchmark.OpenML.mockdata_2'
            self.assertComponentSucceeded(
                f'CsvExampleGen.OpenML.mockdata_{ix}.{instance_name}')
            self.assertComponentSucceeded(
                f'SchemaGen.AutoData.test.OpenML.mockdata_{ix}.{instance_name}'
            )
            self.assertComponentSucceeded(
                f'StatisticsGen.AutoData.test.OpenML.mockdata_{ix}.{instance_name}'
            )
            self.assertComponentSucceeded(
                f'Transform.AutoData.test.OpenML.mockdata_{ix}.{instance_name}'
            )
            self.assertComponentSucceeded(
                f'AugmentedTuner.test.OpenML.mockdata_{ix}.{instance_name}')
            self.assertComponentSucceeded(
                f'Trainer.test.OpenML.mockdata_{ix}.{instance_name}')

        instance_name = 'MetaLearningBenchmark.benchmark.OpenML.mockdata_2'
        self.assertComponentSucceeded(f'Evaluator.{instance_name}')
        self.assertComponentSucceeded(
            f'BenchmarkResultPublisher.{instance_name}')

        # Load benchmark results.
        store = metadata_store.MetadataStore(self.metadata_config)
        df = results.overview(store)
        # Check benchmark results overview values.
        self.assertEqual(len(df.index), 1)
        self.assertContainsSubset([
            'benchmark',
            'run',
            'num_runs',
            'accuracy',
            'average_loss',
            'post_export_metrics/example_count',
        ], df.columns.values.tolist())
        self.assertSameElements([1], df['run'].tolist())
        self.assertSameElements([1], df['num_runs'].tolist())
        self.assertSameElements([instance_name], df.benchmark.unique())