示例#1
0
    def __init__(self,
                 model_config: Point,
                 input_space: Hypergrid,
                 output_space: Hypergrid,
                 logger=None):
        MultiObjectiveRegressionModel.__init__(self,
                                               model_type=type(self),
                                               model_config=model_config,
                                               input_space=input_space,
                                               output_space=output_space)
        if logger is None:
            logger = create_logger("MultiObjectiveHomogeneousRandomForest")
        self.logger = logger

        # We just need to assert that the model config belongs in homogeneous_random_forest_config_store.parameter_space.
        # A more elaborate solution might be needed down the road, but for now this simple solution should suffice.
        #
        assert model_config in homogeneous_random_forest_config_store.parameter_space

        self._regressors_by_objective_name = KeyOrderedDict(
            ordered_keys=self.output_dimension_names,
            value_type=HomogeneousRandomForestRegressionModel)

        for output_dimension in output_space.dimensions:
            random_forest = HomogeneousRandomForestRegressionModel(
                model_config=model_config,
                input_space=input_space,
                output_space=SimpleHypergrid(
                    name=f"{output_dimension.name}_objective",
                    dimensions=[output_dimension]),
                logger=self.logger)
            self._regressors_by_objective_name[
                output_dimension.name] = random_forest
示例#2
0
    def test_sanity(self):
        keys = [letter for letter in "abcdefghijklmnopqrstuvwxyz"]
        values = [letter.upper() for letter in keys]

        key_ordered_dict = KeyOrderedDict(ordered_keys=keys, value_type=str)
        for key, value in zip(keys, values):
            key_ordered_dict[key] = value

        for i, (key, value) in enumerate(key_ordered_dict):
            assert key == keys[i]
            assert value == values[i]

        key_ordered_dict['a'] = None
        assert key_ordered_dict[0] is None
        assert key_ordered_dict['a'] is None

        with pytest.raises(TypeError):
            key_ordered_dict['b'] = 1

        assert key_ordered_dict[1] == "B"
        assert key_ordered_dict['b'] == "B"
        key_ordered_dict['b'] = "1"
        assert key_ordered_dict[1] == "1"
        assert key_ordered_dict['b'] == "1"

        with pytest.raises(KeyError):
            _ = key_ordered_dict['A']

        with pytest.raises(IndexError):
            _ = key_ordered_dict[100]

        assert len(keys) == len(key_ordered_dict)
    def __init__(self,
                 model_type: type,
                 model_config: Point,
                 input_space: Hypergrid,
                 output_space: Hypergrid,
                 logger=None):
        assert issubclass(model_type, RegressionModel)
        MultiObjectiveRegressionModel.__init__(self,
                                               model_type=model_type,
                                               model_config=model_config,
                                               input_space=input_space,
                                               output_space=output_space)
        if logger is None:
            logger = create_logger("MultiObjectiveHomogeneousRandomForest")
        self.logger = logger

        self._regressors_by_objective_name = KeyOrderedDict(
            ordered_keys=self.output_dimension_names, value_type=model_type)
    def __init__(self, objective_function_config: Point):
        assert objective_function_config in multi_objective_nested_polynomial_config_space
        ObjectiveFunctionBase.__init__(self, objective_function_config)

        nested_polynomial_objective_config = objective_function_config.nested_polynomial_objective_config
        self._nested_polynomial_objective_config = nested_polynomial_objective_config
        self._ordered_output_dimension_names = [
            f'y{i}' for i in range(objective_function_config.num_objectives)
        ]
        self._individual_objective_functions = KeyOrderedDict(
            ordered_keys=self._ordered_output_dimension_names,
            value_type=NestedPolynomialObjective)

        # Let's create the required number of objective functions.
        #
        for i in range(objective_function_config.num_objectives):
            nested_polynomial_objective_config.polynomial_objective_config.seed += i
            single_objective_function = NestedPolynomialObjective(
                objective_function_config=nested_polynomial_objective_config)
            self._individual_objective_functions[i] = single_objective_function

        self._parameter_space = self._individual_objective_functions[
            0].parameter_space

        self._output_space = SimpleHypergrid(
            name='output_space',
            dimensions=[
                ContinuousDimension(name=output_dim_name,
                                    min=-math.inf,
                                    max=math.inf)
                for output_dim_name in self._ordered_output_dimension_names
            ])

        self.default_optimization_problem = OptimizationProblem(
            parameter_space=self._parameter_space,
            objective_space=self._output_space,
            objectives=[
                Objective(name=name, minimize=True)
                for name in self._ordered_output_dimension_names
            ])
示例#5
0
    def approximate_pareto_volume(self,
                                  num_samples=1000000) -> ParetoVolumeEsimator:
        """Approximates the volume of the pareto frontier.

        The idea here is that we can randomly sample from the objective space and observe the proportion of
        dominated points to all points. This proportion will allow us to compute a confidence interval on
        the proportion of dominated points and we can use it to estimate the ratio between the volume of
        the frontier and the volume from which we sampled.

        We can get arbitrarily precise simply by drawing more samples.
        """

        # First we need to find the maxima for each of the objective values.
        #
        objectives_extremes = KeyOrderedDict(
            ordered_keys=self._pareto_df.columns, value_type=float)
        for objective in self.optimization_problem.objectives:
            if objective.minimize:
                objectives_extremes[objective.name] = self._pareto_df[
                    objective.name].min()
            else:
                objectives_extremes[objective.name] = self._pareto_df[
                    objective.name].max()

        random_points_array = np.random.uniform(low=0.0,
                                                high=1.0,
                                                size=(len(objectives_extremes),
                                                      num_samples))
        random_objectives_df = pd.DataFrame({
            objective_name: random_points_array[i] * objective_extremum
            for i, (objective_name,
                    objective_extremum) in enumerate(objectives_extremes)
        })

        num_dominated_points = self.is_dominated(
            objectives_df=random_objectives_df).sum()
        return ParetoVolumeEsimator(num_random_points=num_samples,
                                    num_dominated_points=num_dominated_points,
                                    objectives_maxima=objectives_extremes)
示例#6
0
    def __init__(self, objective_function_config: Point = None):
        assert objective_function_config in multi_objective_enveloped_waves_config_space
        ObjectiveFunctionBase.__init__(self, objective_function_config)
        single_objective_enveloped_waves_config = objective_function_config.enveloped_waves_config
        self._individual_objectives = KeyOrderedDict(ordered_keys=[
            f"y{objective_id}"
            for objective_id in range(objective_function_config.num_objectives)
        ],
                                                     value_type=EnvelopedWaves)

        for objective_id in range(objective_function_config.num_objectives):
            config = single_objective_enveloped_waves_config.copy()
            config.phase_shift += objective_function_config.phase_difference * objective_id
            config.period *= objective_function_config.period_change**objective_id

            while config.period > enveloped_waves_config_store.parameter_space[
                    "period"].max:
                config.period -= enveloped_waves_config_store.parameter_space[
                    "period"].max

            while config.phase_shift > enveloped_waves_config_store.parameter_space[
                    "phase_shift"].max:
                config.phase_shift -= enveloped_waves_config_store.parameter_space[
                    "phase_shift"].max

            self._individual_objectives[objective_id] = EnvelopedWaves(
                objective_function_config=config)

        self._parameter_space = self._individual_objectives[0].parameter_space
        self._output_space = SimpleHypergrid(
            name="range",
            dimensions=[
                ContinuousDimension(name=f"y{objective_id}",
                                    min=-math.inf,
                                    max=math.inf) for objective_id in
                range(objective_function_config.num_objectives)
            ])
示例#7
0
 def __init__(self, objective_names: List[str]):
     KeyOrderedDict.__init__(self,
                             ordered_keys=objective_names,
                             value_type=Prediction)
 def __init__(self, objective_names: List[str]):
     KeyOrderedDict.__init__(self,
                             ordered_keys=objective_names,
                             value_type=RegressionModelFitState)
 def __init__(self, objective_names: List[str]):
     KeyOrderedDict.__init__(self,
                             ordered_keys=objective_names,
                             value_type=GoodnessOfFitMetrics)