Exemplo n.º 1
0
    def testGenArms(self):
        p1 = {"x": 0, "y": 1}
        p2 = {"x": 4, "y": 8}
        observation_features = [
            ObservationFeatures(parameters=p1),
            ObservationFeatures(parameters=p2),
        ]
        arms, candidate_metadata = gen_arms(
            observation_features=observation_features)
        self.assertEqual(arms[0].parameters, p1)
        self.assertIsNone(candidate_metadata)

        arm = Arm(name="1_1", parameters=p1)
        arms_by_signature = {arm.signature: arm}
        observation_features[0].metadata = {"some_key": "some_val_0"}
        observation_features[1].metadata = {"some_key": "some_val_1"}
        arms, candidate_metadata = gen_arms(
            observation_features=observation_features,
            arms_by_signature=arms_by_signature,
        )
        self.assertEqual(arms[0].name, "1_1")
        self.assertEqual(
            candidate_metadata,
            {
                arms[0].signature: {
                    "some_key": "some_val_0"
                },
                arms[1].signature: {
                    "some_key": "some_val_1"
                },
            },
        )
Exemplo n.º 2
0
    def testGenArms(self):
        p1 = {"x": 0, "y": 1}
        p2 = {"x": 4, "y": 8}
        observation_features = [
            ObservationFeatures(parameters=p1),
            ObservationFeatures(parameters=p2),
        ]
        arms = gen_arms(observation_features=observation_features)
        self.assertEqual(arms[0].parameters, p1)

        arm = Arm(name="1_1", parameters=p1)
        arms_by_signature = {arm.signature: arm}
        arms = gen_arms(
            observation_features=observation_features,
            arms_by_signature=arms_by_signature,
        )
        self.assertEqual(arms[0].name, "1_1")
Exemplo n.º 3
0
    def evaluation_function(x: List[float]) -> float:
        # Check if we have exhuasted the evaluation budget
        if len(experiment.trials) >= max_trials:
            raise ValueError(
                f"Evaluation budget ({max_trials} trials) exhuasted.")

        # Create an ObservationFeatures
        param_dict = {
            pname: x[i]
            for i, pname in enumerate(problem.search_space.parameters.keys())
        }
        obsf = ObservationFeatures(parameters=param_dict)  # pyre-ignore
        # Get the time since last call
        num_trials = len(experiment.trials)
        if num_trials == 0:
            gen_time = None
        else:
            previous_ts = experiment.trials[num_trials -
                                            1].time_created.timestamp()
            gen_time = time.time() - previous_ts
        # Create a GR
        arms, candidate_metadata_by_arm_signature = gen_arms(
            observation_features=[obsf],
            arms_by_signature=experiment.arms_by_signature)
        gr = GeneratorRun(
            arms=arms,
            gen_time=gen_time,
            candidate_metadata_by_arm_signature=
            candidate_metadata_by_arm_signature,
        )
        # Add it as a trial
        trial = experiment.new_trial().add_generator_run(gr).run()
        # Evaluate function
        df = trial.fetch_data().df
        if len(df) > 1:
            raise Exception(
                "Does not support multiple outcomes")  # pragma: no cover
        obj = float(df["mean"].values[0])
        if not problem.optimization_config.objective.minimize:
            obj = -obj
        return obj
Exemplo n.º 4
0
    def evaluation_function(self, x: List[float]) -> float:
        """
        An interface for directly calling the benchmark problem evaluation
        function. Tracks each call as a Trial. Only works for unconstrained
        problems with a batch size of 1.

        Args:
            x: A vector of the point to evaluate

        Returns: Value of the objective at x
        """
        if len(self.trials) >= self.total_iterations:
            raise Exception("Evaluation budget exhausted.")
        assert self.batch_size == 1
        # Create an ObservationFeatures
        param_dict = {
            pname: x[i]
            for i, pname in enumerate(self.search_space.parameters.keys())
        }
        obsf = ObservationFeatures(parameters=param_dict)  # pyre-ignore
        # Get the time since last call
        if len(self.trials) == 0:
            gen_time = None
        else:
            gen_time = (
                time.time() -
                self.trials[len(self.trials) - 1].time_created.timestamp())
        # Create a GR
        gr = GeneratorRun(
            arms=gen_arms(observation_features=[obsf],
                          arms_by_signature=self.arms_by_signature),
            gen_time=gen_time,
        )
        # Add it as a trial
        trial = self.new_trial().add_generator_run(gr).run()
        # Evaluate function
        df = trial.fetch_data().df
        if len(df) > 1:
            raise Exception(
                "Does not support multiple outcomes")  # pragma: no cover
        return float(df["mean"].values[0])
Exemplo n.º 5
0
    def model_best_point(
        self,
        search_space: Optional[SearchSpace] = None,
        optimization_config: Optional[OptimizationConfig] = None,
        pending_observations: Optional[Dict[str, List[ObservationFeatures]]] = None,
        fixed_features: Optional[ObservationFeatures] = None,
        model_gen_options: Optional[TConfig] = None,
    ) -> Optional[Tuple[Arm, Optional[TModelPredictArm]]]:
        # Get modifiable versions
        if search_space is None:
            search_space = self._model_space
        search_space = search_space.clone()

        base_gen_args = self._get_transformed_gen_args(
            search_space=search_space,
            optimization_config=optimization_config,
            pending_observations=pending_observations,
            fixed_features=fixed_features,
        )

        array_model_gen_args = self._get_transformed_model_gen_args(
            search_space=base_gen_args.search_space,
            pending_observations=base_gen_args.pending_observations,
            fixed_features=base_gen_args.fixed_features,
            model_gen_options=None,
            optimization_config=base_gen_args.optimization_config,
        )

        search_space_digest = array_model_gen_args.search_space_digest

        xbest = self._model_best_point(
            bounds=search_space_digest.bounds,
            objective_weights=array_model_gen_args.objective_weights,
            outcome_constraints=array_model_gen_args.outcome_constraints,
            linear_constraints=array_model_gen_args.linear_constraints,
            fixed_features=array_model_gen_args.fixed_features,
            model_gen_options=model_gen_options,
            target_fidelities=search_space_digest.target_fidelities,
        )

        if xbest is None:
            return None

        best_obsf = ObservationFeatures(
            parameters={p: float(xbest[i]) for i, p in enumerate(self.parameters)}
        )

        for t in reversed(self.transforms.values()):  # noqa T484
            best_obsf = t.untransform_observation_features([best_obsf])[0]

        best_point_predictions = extract_arm_predictions(
            model_predictions=self.predict([best_obsf]), arm_idx=0
        )

        best_arms, _ = gen_arms(
            observation_features=[best_obsf],
            arms_by_signature=self._arms_by_signature,
        )
        best_arm = best_arms[0]

        return best_arm, best_point_predictions