예제 #1
0
 def test_pending_observations_as_array(self):
     # Mark a trial dispatched so that there are pending observations.
     self.trial.mark_running(no_runner_required=True)
     # If outcome names are respected, unlisted metrics should be filtered out.
     self.assertEqual(
         [
             x.tolist() for x in pending_observations_as_array(
                 pending_observations=get_pending_observation_features(
                     self.experiment),
                 outcome_names=["m2", "m1"],
                 param_names=["x", "y", "z", "w"],
             )
         ],
         [[["1", "foo", "True", "4"]], [["1", "foo", "True", "4"]]],
     )
     self.experiment.attach_data(
         Data.from_evaluations({self.trial.arm.name: {
             "m2": (1, 0)
         }},
                               trial_index=self.trial.index))
     # There should be no pending observations for metric m2 now, since the
     # only trial there is, has been updated with data for it.
     self.assertEqual(
         [
             x.tolist() for x in pending_observations_as_array(
                 pending_observations=get_pending_observation_features(
                     self.experiment),
                 outcome_names=["m2", "m1"],
                 param_names=["x", "y", "z", "w"],
             )
         ],
         [[], [["1", "foo", "True", "4"]]],
     )
예제 #2
0
    def _get_transformed_model_gen_args(
        self,
        search_space: SearchSpace,
        pending_observations: Dict[str, List[ObservationFeatures]],
        fixed_features: ObservationFeatures,
        model_gen_options: Optional[TConfig] = None,
        optimization_config: Optional[OptimizationConfig] = None,
    ) -> ArrayModelGenArgs:
        # Validation
        if not self.parameters:  # pragma: no cover
            raise ValueError(FIT_MODEL_ERROR.format(action="_gen"))
        # Extract search space info
        search_space_digest = extract_search_space_digest(
            search_space=search_space, param_names=self.parameters
        )
        if optimization_config is None:
            raise ValueError(
                "ArrayModelBridge requires an OptimizationConfig to be specified"
            )
        if self.outcomes is None or len(self.outcomes) == 0:  # pragma: no cover
            raise ValueError("No outcomes found during model fit--data are missing.")

        validate_optimization_config(optimization_config, self.outcomes)
        objective_weights = extract_objective_weights(
            objective=optimization_config.objective, outcomes=self.outcomes
        )
        outcome_constraints = extract_outcome_constraints(
            outcome_constraints=optimization_config.outcome_constraints,
            outcomes=self.outcomes,
        )
        extra_model_gen_kwargs = self._get_extra_model_gen_kwargs(
            optimization_config=optimization_config
        )
        linear_constraints = extract_parameter_constraints(
            search_space.parameter_constraints, self.parameters
        )
        fixed_features_dict = get_fixed_features(fixed_features, self.parameters)
        pending_array = pending_observations_as_array(
            pending_observations, self.outcomes, self.parameters
        )
        return ArrayModelGenArgs(
            search_space_digest=search_space_digest,
            objective_weights=objective_weights,
            outcome_constraints=outcome_constraints,
            linear_constraints=linear_constraints,
            fixed_features=fixed_features_dict,
            pending_observations=pending_array,
            rounding_func=transform_callback(self.parameters, self.transforms),
            extra_model_gen_kwargs=extra_model_gen_kwargs,
        )
예제 #3
0
파일: array.py 프로젝트: dme65/Ax
    def _gen(
        self,
        n: int,
        search_space: SearchSpace,
        pending_observations: Dict[str, List[ObservationFeatures]],
        fixed_features: ObservationFeatures,
        model_gen_options: Optional[TConfig] = None,
        optimization_config: Optional[OptimizationConfig] = None,
    ) -> Tuple[List[ObservationFeatures], List[float],
               Optional[ObservationFeatures], TGenMetadata, ]:
        """Generate new candidates according to search_space and
        optimization_config.

        The outcome constraints should be transformed to no longer be relative.
        """
        # Validation
        if not self.parameters:  # pragma: no cover
            raise ValueError(FIT_MODEL_ERROR.format(action="_gen"))
        # Extract bounds
        bounds, _, target_fidelities = get_bounds_and_task(
            search_space=search_space, param_names=self.parameters)
        target_fidelities = {
            i: float(v)
            for i, v in target_fidelities.items()  # pyre-ignore [6]
        }

        if optimization_config is None:
            raise ValueError(
                "ArrayModelBridge requires an OptimizationConfig to be specified"
            )
        if self.outcomes is None or len(
                self.outcomes) == 0:  # pragma: no cover
            raise ValueError(
                "No outcomes found during model fit--data are missing.")

        validate_optimization_config(optimization_config, self.outcomes)
        objective_weights = extract_objective_weights(
            objective=optimization_config.objective, outcomes=self.outcomes)
        outcome_constraints = extract_outcome_constraints(
            outcome_constraints=optimization_config.outcome_constraints,
            outcomes=self.outcomes,
        )
        extra_model_gen_kwargs = self._get_extra_model_gen_kwargs(
            optimization_config=optimization_config)
        linear_constraints = extract_parameter_constraints(
            search_space.parameter_constraints, self.parameters)
        fixed_features_dict = get_fixed_features(fixed_features,
                                                 self.parameters)
        pending_array = pending_observations_as_array(pending_observations,
                                                      self.outcomes,
                                                      self.parameters)
        # Generate the candidates
        X, w, gen_metadata, candidate_metadata = self._model_gen(
            n=n,
            bounds=bounds,
            objective_weights=objective_weights,
            outcome_constraints=outcome_constraints,
            linear_constraints=linear_constraints,
            fixed_features=fixed_features_dict,
            pending_observations=pending_array,
            model_gen_options=model_gen_options,
            rounding_func=transform_callback(self.parameters, self.transforms),
            target_fidelities=target_fidelities,
            **extra_model_gen_kwargs,
        )
        # Transform array to observations
        observation_features = parse_observation_features(
            X=X,
            param_names=self.parameters,
            candidate_metadata=candidate_metadata)
        xbest = self._model_best_point(
            bounds=bounds,
            objective_weights=objective_weights,
            outcome_constraints=outcome_constraints,
            linear_constraints=linear_constraints,
            fixed_features=fixed_features_dict,
            model_gen_options=model_gen_options,
            target_fidelities=target_fidelities,
        )
        best_obsf = (None if xbest is None else ObservationFeatures(
            parameters={
                p: float(xbest[i])
                for i, p in enumerate(self.parameters)
            }))
        return observation_features, w.tolist(), best_obsf, gen_metadata