Beispiel #1
0
    def test_best_raw_objective_point_unsatisfiable_relative(self):
        exp = get_branin_experiment()

        # Optimization config with unsatisfiable constraint
        opt_conf = exp.optimization_config.clone()
        opt_conf.outcome_constraints.append(
            OutcomeConstraint(
                metric=get_branin_metric(),
                op=ComparisonOp.GEQ,
                bound=9999,
                relative=True,
            )
        )

        trial = exp.new_trial(
            generator_run=GeneratorRun(arms=[Arm(parameters={"x1": 5.0, "x2": 5.0})])
        ).run()
        trial.mark_completed()

        with self.assertLogs(logger="ax.service.utils.best_point", level="WARN") as lg:
            get_best_raw_objective_point(exp, opt_conf)
            self.assertTrue(
                any("No status quo provided" in warning for warning in lg.output),
                msg=lg.output,
            )

        exp.status_quo = Arm(parameters={"x1": 0, "x2": 0}, name="status_quo")
        sq_trial = exp.new_trial(
            generator_run=GeneratorRun(arms=[exp.status_quo])
        ).run()
        sq_trial.mark_completed()

        with self.assertRaisesRegex(ValueError, "No points satisfied"):
            get_best_raw_objective_point(exp, opt_conf)
 def test_best_raw_objective_point(self):
     exp = get_branin_experiment()
     with self.assertRaisesRegex(ValueError, "Cannot identify best "):
         get_best_raw_objective_point(exp)
     self.assertEqual(get_best_parameters(exp), None)
     exp.new_trial(
         generator_run=GeneratorRun(arms=[Arm(parameters={"x1": 5.0, "x2": 5.0})])
     ).run()
     opt_conf = exp.optimization_config.clone()
     opt_conf.objective.metric._name = "not_branin"
     with self.assertRaisesRegex(ValueError, "No data has been logged"):
         get_best_raw_objective_point(exp, opt_conf)
Beispiel #3
0
 def test_best_raw_objective_point_scalarized(self):
     exp = get_branin_experiment()
     exp.optimization_config = OptimizationConfig(
         ScalarizedObjective(metrics=[get_branin_metric()], minimize=False)
     )
     with self.assertRaisesRegex(ValueError, "Cannot identify best "):
         get_best_raw_objective_point(exp)
     self.assertEqual(get_best_parameters(exp, Models), None)
     exp.new_trial(
         generator_run=GeneratorRun(arms=[Arm(parameters={"x1": 5.0, "x2": 5.0})])
     ).run()
     self.assertEqual(get_best_raw_objective_point(exp)[0], {"x1": 5.0, "x2": 5.0})
Beispiel #4
0
    def get_best_point(
            self) -> Tuple[TParameterization, Optional[TModelPredictArm]]:
        """Obtains the best point encountered in the course
        of this optimization."""
        # Find latest trial which has a generator_run attached and get its predictions
        model_predictions = get_best_from_model_predictions(
            experiment=self.experiment)
        if model_predictions is not None:
            return model_predictions

        # Could not find through model, default to using raw objective.
        parameterization, values = get_best_raw_objective_point(
            experiment=self.experiment)
        # For values, grab just the means to conform to TModelPredictArm format.
        return (
            parameterization,
            (
                {k: v[0]
                 for k, v in values.items()},  # v[0] is mean
                {k: {
                    k: v[1] * v[1]
                }
                 for k, v in values.items()},  # v[1] is sem
            ),
        )
Beispiel #5
0
    def save_best_parameters(self, record_keeper, ax_client):
        q = record_keeper.query(
            "SELECT * FROM {0} WHERE {1}=(SELECT max({1}) FROM {0})".format(
                self.bayes_opt_table_name,
                self.YR.args.eval_primary_metric))[0]
        best_trial_index = int(q['trial_index'])
        best_sub_experiment_name = self.get_sub_experiment_name(
            best_trial_index)
        logging.info("BEST SUB EXPERIMENT NAME: %s" % best_sub_experiment_name)

        best_parameters, best_values = get_best_raw_objective_point(
            ax_client.experiment)
        assert np.isclose(best_values[self.YR.args.eval_primary_metric][0],
                          q[self.YR.args.eval_primary_metric])
        best_parameters_dict = {
            "best_sub_experiment_name": best_sub_experiment_name,
            "best_parameters": best_parameters,
            "best_values": {
                k: {
                    "mean": float(v[0]),
                    "SEM": float(v[1])
                }
                for k, v in best_values.items()
            }
        }
        c_f.write_yaml(self.best_parameters_filename,
                       best_parameters_dict,
                       open_as='w')
        return best_sub_experiment_name
    def run(self):    
        ax_client = self.get_ax_client()
        num_explored_points = len(ax_client.experiment.trials) if ax_client.experiment.trials else 0
        finished_sub_experiment_names = self.get_finished_sub_experiment_names()

        for i in range(num_explored_points, self.bayes_opt_iters):
            logging.info("Optimization iteration %d"%i)
            sub_experiment_name = self.get_latest_sub_experiment_name()
            parameters, trial_index = ax_client.get_next_trial()
            ax_client.complete_trial(trial_index=trial_index, raw_data=self.run_new_experiment(parameters, sub_experiment_name))
            self.save_new_log(ax_client)
            finished_sub_experiment_names.append([sub_experiment_name])
            self.write_finished_sub_experiment_names(finished_sub_experiment_names)
            self.plot_progress(ax_client)

        logging.info("DONE BAYESIAN OPTIMIZATION")
        df = ax_client.get_trials_data_frame()
        metric_column = pd.to_numeric(df[self.YR.args.eval_primary_metric])
        best_trial_index = df['trial_index'].iloc[metric_column.idxmax()]
        best_sub_experiment_name = finished_sub_experiment_names[best_trial_index][0]
        best_sub_experiment_path = self.get_sub_experiment_path(best_sub_experiment_name) 
        logging.info("BEST SUB EXPERIMENT NAME: %s"%best_sub_experiment_name)

        self.plot_progress(ax_client)
        best_parameters, best_values = get_best_raw_objective_point(ax_client.experiment)
        best_parameters_dict = {"best_sub_experiment_name": best_sub_experiment_name,
                                "best_sub_experiment_path": best_sub_experiment_path, 
                                "best_parameters": best_parameters, 
                                "best_values": {k:{"mean": float(v[0]), "SEM": float(v[1])} for k,v in best_values.items()}}
        c_f.write_yaml(os.path.join(self.bayes_opt_root_experiment_folder, "best_parameters.yaml"), best_parameters_dict, open_as='w')

        self.test_best_model(best_sub_experiment_name)
Beispiel #7
0
    def test_best_raw_objective_point_unsatisfiable(self):
        exp = get_branin_experiment()
        trial = exp.new_trial(
            generator_run=GeneratorRun(arms=[Arm(parameters={"x1": 5.0, "x2": 5.0})])
        ).run()
        trial.mark_completed()

        opt_conf = exp.optimization_config.clone()
        opt_conf.outcome_constraints.append(
            OutcomeConstraint(
                metric=get_branin_metric(), op=ComparisonOp.LEQ, bound=0, relative=False
            )
        )

        with self.assertRaisesRegex(ValueError, "No points satisfied"):
            get_best_raw_objective_point(exp, opt_conf)
Beispiel #8
0
    def get_best_parameters(
        self
    ) -> Optional[Tuple[TParameterization, Optional[TModelPredictArm]]]:
        """
        Return the best set of parameters the experiment has knowledge of.

        If experiment is in the optimization phase, return the best point
        determined by the model used in the latest optimization round, otherwise
        return none.

        Custom type `TModelPredictArm` is defined as
        `Tuple[Dict[str, float], Optional[Dict[str, Dict[str, float]]]]`, and
        stands for tuple of two mappings: metric name to its mean value and metric
        name to a mapping of other mapping name to covariance of the two metrics.

        Returns:
            Tuple of (best parameters, model predictions for best parameters).
            None if no data.
        """
        # Find latest trial which has a generator_run attached and get its predictions
        model_predictions = get_best_from_model_predictions(experiment=self.experiment)
        if model_predictions is not None:  # pragma: no cover
            return model_predictions

        # Could not find through model, default to using raw objective.
        parameterization, values = get_best_raw_objective_point(
            experiment=self.experiment
        )
        return (
            parameterization,
            (
                {k: v[0] for k, v in values.items()},  # v[0] is mean
                {k: {k: v[1] * v[1]} for k, v in values.items()},  # v[1] is sem
            ),
        )
Beispiel #9
0
                                     config_foldernames, parameters))
        save_new_log(ax_client, root_experiment_folder)
        finished_experiment_names.append([experiment_path])
        write_finished_experiment_names(root_experiment_folder,
                                        finished_experiment_names)
        plot_progress(ax_client, root_experiment_folder, experiment_name)

    logging.info("DONE BAYESIAN OPTIMIZATION")
    df = ax_client.get_trials_data_frame()
    metric_column = pd.to_numeric(df[YR.args.eval_metric_for_best_epoch])
    best_trial_index = df['trial_index'].iloc[metric_column.idxmax()]
    best_experiment_path = finished_experiment_names[best_trial_index][0]
    logging.info("BEST EXPERIMENT PATH: %s" % best_experiment_path)

    plot_progress(ax_client, YR.args.root_experiment_folder, experiment_name)
    best_parameters, best_values = get_best_raw_objective_point(
        ax_client.experiment)
    best_parameters_dict = {
        "best_experiment_path": best_experiment_path,
        "best_parameters": best_parameters,
        "best_values": {
            k: {
                "mean": float(v[0]),
                "SEM": float(v[1])
            }
            for k, v in best_values.items()
        }
    }
    c_f.write_yaml(os.path.join(YR.args.root_experiment_folder,
                                "best_parameters.yaml"),
                   best_parameters_dict,
                   open_as='w')