예제 #1
0
class TestObjectiveFunctionFactory:
    @classmethod
    def classSetUp(cls):
        mlos.global_values.declare_singletons()
        warnings.simplefilter("error", category=FutureWarning)

    @pytest.mark.parametrize("config_name", [
        config.name
        for config in objective_function_config_store.list_named_configs()
    ])
    def test_named_configs(self, config_name):
        objective_function_config = objective_function_config_store.get_config_by_name(
            config_name)
        print(objective_function_config.to_json(indent=2))
        objective_function = ObjectiveFunctionFactory.create_objective_function(
            objective_function_config=objective_function_config)

        for _ in range(100):
            random_point = objective_function.parameter_space.random()
            value = objective_function.evaluate_point(random_point)
            assert value in objective_function.output_space

        for i in range(1, 100):
            random_dataframe = objective_function.parameter_space.random_dataframe(
                num_samples=i)
            values_df = objective_function.evaluate_dataframe(random_dataframe)
            assert values_df.index.equals(random_dataframe.index)
예제 #2
0
    def test_named_configs(self, test_num):
        """Tests named optimizer configurations against named objective functions.

        It is prohibitively expensive to test the entire cross product so we test only its subset, but in such a way that
        each configuration will be tested at least once.
        """
        optimizer_named_configs = bayesian_optimizer_config_store.list_named_configs(
        )
        num_optimizer_configs = len(optimizer_named_configs)
        objective_function_named_configs = objective_function_config_store.list_named_configs(
        )
        num_objective_function_configs = len(objective_function_named_configs)

        named_optimizer_config = optimizer_named_configs[test_num %
                                                         num_optimizer_configs]
        named_objective_function_config = objective_function_named_configs[
            test_num % num_objective_function_configs]

        print(
            "#####################################################################################################"
        )
        print(named_optimizer_config)
        print(named_objective_function_config)

        optimizer_evaluator_config = optimizer_evaluator_config_store.get_config_by_name(
            name="parallel_unit_tests_config")
        optimizer_config = named_optimizer_config.config_point
        objective_function_config = named_objective_function_config.config_point

        optimizer_evaluator = OptimizerEvaluator(
            optimizer_evaluator_config=optimizer_evaluator_config,
            objective_function_config=objective_function_config,
            optimizer_config=optimizer_config)

        optimizer_evaluation_report = optimizer_evaluator.evaluate_optimizer()

        mlos.global_values.tracer.trace_events.extend(
            optimizer_evaluation_report.execution_trace)
        if not optimizer_evaluation_report.success:
            raise optimizer_evaluation_report.exception

        for objective_name, single_objective_fit_state in optimizer_evaluation_report.regression_model_fit_state:
            with pd.option_context('display.max_columns', 100):
                print(
                    single_objective_fit_state.get_goodness_of_fit_dataframe(
                        DataSetType.TRAIN).tail())
                for optimum_name, optimum_over_time in optimizer_evaluation_report.optima_over_time.items(
                ):
                    print(
                        "#####################################################################################################"
                    )
                    print(optimum_name)
                    print(optimum_over_time.get_dataframe().tail(10))
                    print(
                        "#####################################################################################################"
                    )
예제 #3
0
    def test_named_configs(self):
        """Tests named optimizer configurations against named objective functions.

        It is prohibitively expensive to test the entire cross product so we test only its subset, but in such a way that
        each configuration will be tested at least once.
        """
        optimizer_named_configs = bayesian_optimizer_config_store.list_named_configs()
        num_optimizer_configs = len(optimizer_named_configs)
        objective_function_named_configs = objective_function_config_store.list_named_configs()
        num_objective_function_configs = len(objective_function_named_configs)

        num_tests = max(num_optimizer_configs, num_objective_function_configs)

        with traced(scope_name="parallel_tests"), concurrent.futures.ProcessPoolExecutor(max_workers=4) as executor:
            outstanding_futures = set()

            for i in range(num_tests):
                named_optimizer_config = optimizer_named_configs[i % num_optimizer_configs]
                named_objective_function_config = objective_function_named_configs[i % num_objective_function_configs]

                print("#####################################################################################################")
                print(named_optimizer_config)
                print(named_objective_function_config)

                optimizer_evaluator_config = optimizer_evaluator_config_store.get_config_by_name(name="parallel_unit_tests_config")
                optimizer_config = named_optimizer_config.config_point
                objective_function_config = named_objective_function_config.config_point

                optimizer_evaluator = OptimizerEvaluator(
                    optimizer_evaluator_config=optimizer_evaluator_config,
                    objective_function_config=objective_function_config,
                    optimizer_config=optimizer_config
                )

                future = executor.submit(optimizer_evaluator.evaluate_optimizer)
                outstanding_futures.add(future)

            done_futures, outstanding_futures = concurrent.futures.wait(outstanding_futures, return_when=concurrent.futures.ALL_COMPLETED)

            for future in done_futures:
                optimizer_evaluation_report = future.result()
                assert optimizer_evaluation_report.success
                mlos.global_values.tracer.trace_events.extend(optimizer_evaluation_report.execution_trace)

                with pd.option_context('display.max_columns', 100):
                    print(optimizer_evaluation_report.regression_model_goodness_of_fit_state.get_goodness_of_fit_dataframe(DataSetType.TRAIN).tail())
                    for optimum_name, optimum_over_time in optimizer_evaluation_report.optima_over_time.items():
                        print("#####################################################################################################")
                        print(optimum_name)
                        print(optimum_over_time.get_dataframe().tail(10))
                        print("#####################################################################################################")
예제 #4
0
    def test_named_configs(self):

        named_configs = objective_function_config_store.list_named_configs()

        objective_function_configs_to_test = [
            named_config.config_point for named_config in named_configs
        ]

        for objective_function_config in objective_function_configs_to_test:
            print(objective_function_config.to_json(indent=2))
            objective_function = ObjectiveFunctionFactory.create_objective_function(
                objective_function_config=objective_function_config)
            default_polynomials_domain = objective_function.parameter_space
            for _ in range(100):
                random_point = default_polynomials_domain.random()
                value = objective_function.evaluate_point(random_point)
                self.assertTrue(value in objective_function.output_space)

            for i in range(1, 100):
                random_dataframe = default_polynomials_domain.random_dataframe(
                    num_samples=i)
                values_df = objective_function.evaluate_dataframe(
                    random_dataframe)
                self.assertTrue(values_df.index.equals(random_dataframe.index))