Exemplo n.º 1
0
    def test_status_quo_for_non_monolithic_data(self, mock_gen):
        mock_gen.return_value = (
            [
                ObservationFeatures(parameters={
                    "x1": float(i),
                    "x2": float(i)
                },
                                    trial_index=np.int64(1)) for i in range(5)
            ],
            [1] * 5,
            None,
            {},
        )
        exp = get_branin_experiment_with_multi_objective(with_status_quo=True)
        sobol = Models.SOBOL(search_space=exp.search_space)
        exp.new_batch_trial(sobol.gen(5)).set_status_quo_and_optimize_power(
            status_quo=exp.status_quo).run()

        # create data where metrics vary in start and end times
        data = get_non_monolithic_branin_moo_data()
        with warnings.catch_warnings(record=True) as ws:
            bridge = ModelBridge(
                experiment=exp,
                data=data,
                model=Model(),
                search_space=exp.search_space,
            )
        # just testing it doesn't error
        bridge.gen(5)
        self.assertTrue(any("start_time" in str(w.message) for w in ws))
        self.assertTrue(any("end_time" in str(w.message) for w in ws))
        self.assertEqual(bridge.status_quo.arm_name, "status_quo")
Exemplo n.º 2
0
 def testOODStatusQuo(self):
     # An OOD status quo arm without a trial index will raise an error
     experiment = get_branin_experiment()
     experiment.add_tracking_metric(
         BraninMetric(name="m2", param_names=["x1", "x2"]))
     metrics = list(experiment.metrics.values())
     sobol = Models.SOBOL(experiment.search_space)
     a = sobol.gen(5)
     experiment.new_batch_trial(generator_run=a).run()
     # Experiments with batch trials must specify a trial index
     with self.assertRaises(UnsupportedError):
         compute_pareto_frontier(
             experiment,
             metrics[0],
             metrics[1],
             absolute_metrics=[m.name for m in metrics],
         )
     compute_pareto_frontier(
         experiment,
         metrics[0],
         metrics[1],
         trial_index=0,
         absolute_metrics=[m.name for m in metrics],
     )
     compute_pareto_frontier(
         experiment,
         metrics[0],
         metrics[1],
         data=experiment.fetch_data(),
         absolute_metrics=[m.name for m in metrics],
     )
Exemplo n.º 3
0
    def setUp(self):
        self.branin_experiment = get_branin_experiment_with_multi_objective()
        sobol = Models.SOBOL(search_space=self.branin_experiment.search_space)
        sobol_run = sobol.gen(n=20)
        self.branin_experiment.new_batch_trial().add_generator_run(
            sobol_run
        ).run().mark_completed()
        data = self.branin_experiment.fetch_data()

        ms_gpei = ModelSpec(model_enum=Models.GPEI)
        ms_gpei.fit(experiment=self.branin_experiment, data=data)

        ms_gpkg = ModelSpec(model_enum=Models.GPKG)
        ms_gpkg.fit(experiment=self.branin_experiment, data=data)

        self.fitted_model_specs = [ms_gpei, ms_gpkg]

        self.model_selection_node = GenerationNode(
            model_specs=self.fitted_model_specs,
            best_model_selector=SingleDiagnosticBestModelSelector(
                diagnostic="Fisher exact test p",
                criterion=MetricAggregation.MEAN,
                metric_aggregation=DiagnosticCriterion.MIN,
            ),
        )
Exemplo n.º 4
0
    def test_get_model_from_generator_run(self):
        """Tests that it is possible to restore a model from a generator run it
        produced, if `Models` registry was used.
        """
        exp = get_branin_experiment()
        initial_sobol = Models.SOBOL(experiment=exp, seed=239)
        gr = initial_sobol.gen(n=1)
        # Restore the model as it was before generation.
        sobol = get_model_from_generator_run(generator_run=gr,
                                             experiment=exp,
                                             data=exp.fetch_data())
        self.assertEqual(sobol.model.init_position, 0)
        self.assertEqual(sobol.model.seed, 239)
        # Restore the model as it was after generation (to resume generation).
        sobol_after_gen = get_model_from_generator_run(generator_run=gr,
                                                       experiment=exp,
                                                       data=exp.fetch_data(),
                                                       after_gen=True)
        self.assertEqual(sobol_after_gen.model.init_position, 1)
        self.assertEqual(sobol_after_gen.model.seed, 239)
        self.assertEqual(
            initial_sobol.gen(n=1).arms,
            sobol_after_gen.gen(n=1).arms)
        exp.new_trial(generator_run=gr)
        # Check restoration of GPEI, to ensure proper restoration of callable kwargs
        gpei = Models.GPEI(experiment=exp, data=get_branin_data())
        # Punch GPEI model + bridge kwargs into the Sobol generator run, to avoid
        # a slow call to `gpei.gen`.
        gr._model_key = "GPEI"
        gr._model_kwargs = gpei._model_kwargs
        gr._bridge_kwargs = gpei._bridge_kwargs
        gpei_restored = get_model_from_generator_run(gr,
                                                     experiment=exp,
                                                     data=get_branin_data())
        for key in gpei.__dict__:
            self.assertIn(key, gpei_restored.__dict__)
            original, restored = gpei.__dict__[key], gpei_restored.__dict__[
                key]
            # Fit times are set in instantiation so not same and model compared below
            if key in ["fit_time", "fit_time_since_gen", "model"]:
                continue  # Fit times are set in instantiation so won't be same
            if isinstance(original, OrderedDict) and isinstance(
                    restored, OrderedDict):
                original, restored = list(original.keys()), list(
                    restored.keys())
            if isinstance(original, Model) and isinstance(restored, Model):
                continue  # Model equality is tough to compare.
            self.assertEqual(original, restored)

        for key in gpei.model.__dict__:
            self.assertIn(key, gpei_restored.model.__dict__)
            original, restored = (
                gpei.model.__dict__[key],
                gpei_restored.model.__dict__[key],
            )
            # Botorch model equality is tough to compare and training data
            # is unnecessary to compare, because data passed to model was the same
            if key in ["model", "warm_start_refitting", "Xs", "Ys"]:
                continue
            self.assertEqual(original, restored)
Exemplo n.º 5
0
 def setUp(self) -> None:
     self.experiment = get_branin_experiment()
     sobol = Models.SOBOL(search_space=self.experiment.search_space)
     sobol_run = sobol.gen(n=20)
     self.experiment.new_batch_trial().add_generator_run(
         sobol_run).run().mark_completed()
     self.data = self.experiment.fetch_data()
Exemplo n.º 6
0
def get_sobol(
    search_space: SearchSpace,
    seed: Optional[int] = None,
    deduplicate: bool = False,
    init_position: int = 0,
    scramble: bool = True,
) -> RandomModelBridge:
    """Instantiates a Sobol sequence quasi-random generator.

    Args:
        search_space: Sobol generator search space.
        kwargs: Custom args for sobol generator.

    Returns:
        RandomModelBridge, with SobolGenerator as model.
    """
    logger.info(
        "Factory functions (like `get_sobol`) will soon be deprecated. Use "
        "the model registry instead (`Models.SOBOL(...)`).")
    return checked_cast(
        RandomModelBridge,
        Models.SOBOL(
            search_space=search_space,
            seed=seed,
            deduplicate=deduplicate,
            init_position=init_position,
            scramble=scramble,
        ),
    )
Exemplo n.º 7
0
def get_sobol(
    search_space: SearchSpace,
    seed: Optional[int] = None,
    deduplicate: bool = False,
    init_position: int = 0,
    scramble: bool = True,
) -> RandomModelBridge:
    """Instantiates a Sobol sequence quasi-random generator.

    Args:
        search_space: Sobol generator search space.
        kwargs: Custom args for sobol generator.

    Returns:
        RandomModelBridge, with SobolGenerator as model.
    """
    return checked_cast(
        RandomModelBridge,
        Models.SOBOL(
            search_space=search_space,
            seed=seed,
            deduplicate=deduplicate,
            init_position=init_position,
            scramble=scramble,
        ),
    )
Exemplo n.º 8
0
 def test_enum_sobol_GPEI(self):
     """Tests Soboland GPEI instantiation through the Models enum."""
     exp = get_branin_experiment()
     # Check that factory generates a valid sobol modelbridge.
     sobol = Models.SOBOL(search_space=exp.search_space)
     self.assertIsInstance(sobol, RandomModelBridge)
     for _ in range(5):
         sobol_run = sobol.gen(n=1)
         self.assertEqual(sobol_run._model_key, "Sobol")
         exp.new_batch_trial().add_generator_run(sobol_run).run()
     # Check that factory generates a valid GP+EI modelbridge.
     exp.optimization_config = get_branin_optimization_config()
     gpei = Models.GPEI(experiment=exp, data=exp.fetch_data())
     self.assertIsInstance(gpei, TorchModelBridge)
     self.assertEqual(gpei._model_key, "GPEI")
     botorch_defaults = "ax.models.torch.botorch_defaults"
     # Check that the callable kwargs and the torch kwargs were recorded.
     self.assertEqual(
         gpei._model_kwargs,
         {
             "acqf_constructor": {
                 "is_callable_as_path": True,
                 "value": f"{botorch_defaults}.get_NEI",
             },
             "acqf_optimizer": {
                 "is_callable_as_path": True,
                 "value": f"{botorch_defaults}.scipy_optimizer",
             },
             "model_constructor": {
                 "is_callable_as_path": True,
                 "value": f"{botorch_defaults}.get_and_fit_model",
             },
             "model_predictor": {
                 "is_callable_as_path": True,
                 "value": f"{botorch_defaults}.predict_from_model",
             },
             "refit_on_cv": False,
             "refit_on_update": True,
             "warm_start_refitting": True,
         },
     )
     self.assertEqual(
         gpei._bridge_kwargs,
         {
             "transform_configs": None,
             "torch_dtype": torch_float64,
             "torch_device": torch_device(type="cpu"),
             "status_quo_name": None,
             "status_quo_features": None,
             "optimization_config": None,
             "transforms": Cont_X_trans + Y_trans,
         },
     )
     gpei = Models.GPEI(experiment=exp,
                        data=exp.fetch_data(),
                        search_space=exp.search_space)
     self.assertIsInstance(gpei, TorchModelBridge)
Exemplo n.º 9
0
 def setUp(self):
     experiment = get_branin_experiment()
     experiment.add_tracking_metric(
         BraninMetric(name="m2", param_names=["x1", "x2"]))
     sobol = Models.SOBOL(experiment.search_space)
     a = sobol.gen(5)
     experiment.new_batch_trial(generator_run=a).run()
     self.experiment = experiment
     self.metrics = list(experiment.metrics.values())
Exemplo n.º 10
0
 def setUp(self):
     super().setUp()
     search_space = get_search_space()
     gr = Models.SOBOL(search_space=search_space).gen(n=1)
     self.model = Mock(
         search_space=search_space,
         status_quo=Mock(
             features=ObservationFeatures(parameters=gr.arms[0].parameters)
         ),
     )
Exemplo n.º 11
0
 def testPlotMultipleParetoFrontiers(self):
     experiment = get_branin_experiment_with_multi_objective(
         has_objective_thresholds=True, )
     sobol = Models.SOBOL(experiment.search_space)
     a = sobol.gen(5)
     experiment.new_batch_trial(generator_run=a).run()
     pfrs = get_observed_pareto_frontiers(experiment=experiment)
     pfrs2 = copy.deepcopy(pfrs)
     pfr_lists = {"pfrs 1": pfrs, "pfrs 2": pfrs2}
     self.assertIsNotNone(interact_multiple_pareto_frontier(pfr_lists))
Exemplo n.º 12
0
 def test_cross_validate_raises_not_implemented_error_for_non_cv_model_with_data(
     self,
 ):
     exp = get_branin_experiment(with_batch=True)
     exp.trials[0].run().complete()
     sobol = Models.SOBOL(
         experiment=exp, search_space=exp.search_space, data=exp.fetch_data()
     )
     with self.assertRaises(NotImplementedError):
         cross_validate(model=sobol)
Exemplo n.º 13
0
 def test_enum_model_kwargs(self):
     """Tests that kwargs are passed correctly when instantiating through the
     Models enum."""
     exp = get_branin_experiment()
     sobol = Models.SOBOL(
         search_space=exp.search_space, init_position=2, scramble=False, seed=239
     )
     self.assertIsInstance(sobol, RandomModelBridge)
     for _ in range(5):
         sobol_run = sobol.gen(1)
         exp.new_batch_trial().add_generator_run(sobol_run).run()
Exemplo n.º 14
0
    def test_ST_MTGP(self):
        """Tests single type MTGP instantiation."""
        # Test Single-type MTGP
        exp = get_branin_experiment()
        sobol = Models.SOBOL(search_space=exp.search_space)
        self.assertIsInstance(sobol, RandomModelBridge)
        for _ in range(5):
            sobol_run = sobol.gen(n=1)
            t = exp.new_batch_trial().add_generator_run(sobol_run)
            t.set_status_quo_with_weight(status_quo=t.arms[0], weight=0.5)
            t.run().mark_completed()
        status_quo_features = ObservationFeatures(
            parameters=exp.trials[0].status_quo.parameters,
            trial_index=0,
        )
        mtgp = Models.ST_MTGP(
            experiment=exp,
            data=exp.fetch_data(),
            status_quo_features=status_quo_features,
        )
        self.assertIsInstance(mtgp, TorchModelBridge)

        exp = get_branin_experiment()
        sobol = Models.SOBOL(search_space=exp.search_space)
        self.assertIsInstance(sobol, RandomModelBridge)
        sobol_run = sobol.gen(n=1)
        t = exp.new_batch_trial().add_generator_run(sobol_run)
        t.set_status_quo_with_weight(status_quo=t.arms[0], weight=0.5)
        t.run().mark_completed()

        with self.assertRaises(ValueError):
            status_quo_features = ObservationFeatures(
                parameters=exp.trials[0].status_quo.parameters,
                trial_index=0,
            )
            Models.ST_MTGP(
                experiment=exp,
                data=exp.fetch_data(),
                status_quo_features=status_quo_features,
            )
Exemplo n.º 15
0
    def test_ST_MTGP_NEHVI(self):
        """Tests single type MTGP NEHVI instantiation."""
        multi_obj_exp = get_branin_experiment_with_multi_objective(
            with_batch=True,
            with_status_quo=True,
        )
        metrics = multi_obj_exp.optimization_config.objective.metrics
        multi_objective_thresholds = [
            ObjectiveThreshold(metric=metrics[0],
                               bound=0.0,
                               relative=False,
                               op=ComparisonOp.GEQ),
            ObjectiveThreshold(metric=metrics[1],
                               bound=0.0,
                               relative=False,
                               op=ComparisonOp.GEQ),
        ]
        sobol = Models.SOBOL(search_space=multi_obj_exp.search_space)
        self.assertIsInstance(sobol, RandomModelBridge)
        for _ in range(2):
            sobol_run = sobol.gen(n=1)
            t = multi_obj_exp.new_batch_trial().add_generator_run(sobol_run)
            t.set_status_quo_with_weight(status_quo=t.arms[0], weight=0.5)
            t.run().mark_completed()
        status_quo_features = ObservationFeatures(
            parameters=multi_obj_exp.trials[0].status_quo.parameters,
            trial_index=0,
        )
        mtgp = Models.ST_MTGP_NEHVI(
            experiment=multi_obj_exp,
            data=multi_obj_exp.fetch_data(),
            status_quo_features=status_quo_features,
            objective_thresholds=multi_objective_thresholds,
        )
        self.assertIsInstance(mtgp, TorchModelBridge)
        self.assertIsInstance(mtgp.model, MultiObjectiveBotorchModel)

        # test it can generate
        mtgp_run = mtgp.gen(n=1,
                            fixed_features=ObservationFeatures(parameters={},
                                                               trial_index=1))
        self.assertEqual(len(mtgp_run.arms), 1)

        # test a generated trial can be completed
        t = multi_obj_exp.new_batch_trial().add_generator_run(mtgp_run)
        t.set_status_quo_with_weight(status_quo=t.arms[0], weight=0.5)
        t.run().mark_completed()
 def test_enum_sobol_GPEI(self):
     """Tests Sobol instantiation through the Models enum."""
     exp = get_branin_experiment()
     # Check that factory generates a valid sobol modelbridge.
     sobol = Models.SOBOL(search_space=exp.search_space)
     self.assertIsInstance(sobol, RandomModelBridge)
     for _ in range(5):
         sobol_run = sobol.gen(n=1)
         self.assertEqual(sobol_run._model_key, "Sobol")
         exp.new_batch_trial().add_generator_run(sobol_run).run()
     # Check that factory generates a valid GP+EI modelbridge.
     exp.optimization_config = get_branin_optimization_config()
     gpei = Models.GPEI(experiment=exp, data=exp.fetch_data())
     self.assertIsInstance(gpei, TorchModelBridge)
     gpei = Models.GPEI(experiment=exp,
                        data=exp.fetch_data(),
                        search_space=exp.search_space)
     self.assertIsInstance(gpei, TorchModelBridge)
Exemplo n.º 17
0
 def test_relativize_transform_requires_a_modelbridge_to_have_status_quo_data(self):
     sobol = Models.SOBOL(search_space=get_search_space())
     self.assertIsNone(sobol.status_quo)
     with self.assertRaisesRegex(ValueError, "status quo data"):
         Relativize(
             search_space=None,
             observation_features=[],
             observation_data=[],
             modelbridge=sobol,
         ).transform_observation_data(
             observation_data=[
                 ObservationData(
                     metric_names=["foo"],
                     means=np.array([2]),
                     covariance=np.array([[0.1]]),
                 )
             ],
             observation_features=[ObservationFeatures(parameters={"x": 1})],
         )
Exemplo n.º 18
0
 def test_get_model_from_generator_run(self):
     """Tests that it is possible to restore a model from a generator run it
     produced, if `Models` registry was used.
     """
     exp = get_branin_experiment()
     initial_sobol = Models.SOBOL(experiment=exp, seed=239)
     gr = initial_sobol.gen(n=1)
     # Restore the model as it was before generation.
     sobol = get_model_from_generator_run(generator_run=gr,
                                          experiment=exp,
                                          data=exp.fetch_data())
     self.assertEqual(sobol.model.init_position, 0)
     self.assertEqual(sobol.model.seed, 239)
     # Restore the model as it was after generation (to resume generation).
     sobol_after_gen = get_model_from_generator_run(generator_run=gr,
                                                    experiment=exp,
                                                    data=exp.fetch_data(),
                                                    after_gen=True)
     self.assertEqual(sobol_after_gen.model.init_position, 1)
     self.assertEqual(sobol_after_gen.model.seed, 239)
     self.assertEqual(
         initial_sobol.gen(n=1).arms,
         sobol_after_gen.gen(n=1).arms)
Exemplo n.º 19
0
 def setUp(self) -> None:
     self.experiment = get_experiment()
     self.arm = Arm({"x": 1, "y": "foo", "z": True, "w": 4})
     self.trial = self.experiment.new_trial(GeneratorRun([self.arm]))
     self.experiment_2 = get_experiment()
     self.batch_trial = self.experiment_2.new_batch_trial(
         GeneratorRun([self.arm]))
     self.batch_trial.set_status_quo_with_weight(
         self.experiment_2.status_quo, 1)
     self.obs_feat = ObservationFeatures.from_arm(arm=self.trial.arm,
                                                  trial_index=np.int64(
                                                      self.trial.index))
     self.hss_exp = get_hierarchical_search_space_experiment()
     self.hss_sobol = Models.SOBOL(search_space=self.hss_exp.search_space)
     self.hss_gr = self.hss_sobol.gen(n=1)
     self.hss_trial = self.hss_exp.new_trial(self.hss_gr)
     self.hss_arm = not_none(self.hss_trial.arm)
     self.hss_cand_metadata = self.hss_trial._get_candidate_metadata(
         arm_name=self.hss_arm.name)
     self.hss_full_parameterization = self.hss_cand_metadata.get(
         Keys.FULL_PARAMETERIZATION).copy()
     self.assertTrue(
         all(p_name in self.hss_full_parameterization
             for p_name in self.hss_exp.search_space.parameters))
     self.hss_obs_feat = ObservationFeatures.from_arm(
         arm=self.hss_arm,
         trial_index=np.int64(self.hss_trial.index),
         metadata=self.hss_cand_metadata,
     )
     self.hss_obs_feat_all_params = ObservationFeatures.from_arm(
         arm=Arm(self.hss_full_parameterization),
         trial_index=np.int64(self.hss_trial.index),
         metadata={
             Keys.FULL_PARAMETERIZATION: self.hss_full_parameterization
         },
     )
Exemplo n.º 20
0
 def test_cross_validate_gives_a_useful_error_for_model_with_no_data(self):
     exp = get_branin_experiment()
     sobol = Models.SOBOL(experiment=exp, search_space=exp.search_space)
     with self.assertRaisesRegex(ValueError, "no training data"):
         cross_validate(model=sobol)
Exemplo n.º 21
0
        RangeParameter(name="lr", lower=1.0e-5, upper=1.0e-1,     
                               parameter_type=ParameterType.FLOAT),
        RangeParameter(name="lr_change", lower=0.5, upper=1.0,    
                               parameter_type=ParameterType.FLOAT),    
        RangeParameter(name="leafes", lower=2, upper=1000,    
                               parameter_type=ParameterType.INT)]
    )


experiment = SimpleExperiment(
    name = f"weather_lbgm_{dt.datetime.today().strftime('%d-%m-%Y')}",
    search_space = search_space,
    evaluation_function = mdl.obj_fun,
)

sobol = Models.SOBOL(experiment.search_space)
for i in range(n_random_trials):
    experiment.new_trial(generator_run=sobol.gen(1))

best_arm = None
for i in range(n_searches):
    gpei = Models.GPEI(experiment=experiment, data=experiment.eval())
    generator_run = gpei.gen(1)
    best_arm, _ = generator_run.best_arm_predictions
    experiment.new_trial(generator_run=generator_run)

best_para_ax = best_arm.parameters


n_oos = 0
params['num_boost_round'] = 200
Exemplo n.º 22
0
def main():

    args = parse_args()

    function_list = [get_function_by_name[name]
            for name in args.function_name_list.split(',')]
    weight_list = list(map(float, args.weight_list.split(',')))
    covariance_matrix = json.loads(args.covariance_matrix)
    evaluate_covariance = args.evaluate_covariance

    init_iter = args.init_iter
    # if init_iter > 1:
    #     raise ValueError("init_iter should be 1.")
    init_batch_size = args.init_batch_size
    update_iter = args.update_iter
    batch_size = args.batch_size
    var_coef = args.var_coef

    var_compute_type = args.var_compute_type
    num_random = args.num_random
    num_bucket = args.num_bucket

    save_path = args.save_path

    # num_control = args.num_control

    minimize = True

    groundtruth_function = get_groundtruth_function(function_list, weight_list)
    #evaluation_function = get_evaluation_function(
    #        function_list, weight_list, covariance_matrix,
    #        evaluate_covariance, var_coef)
    evaluation_function = get_evaluation_function(
            function_list, weight_list, covariance_matrix,
            var_compute_type, num_random, num_bucket)

    exp = SimpleExperiment(
        name=args.function_name_list + args.weight_list,
        search_space=get_search_space(function_list),
        evaluation_function=evaluation_function,
        objective_name="objective_name",
        minimize=minimize,
    )
    t_start = time.time()
    print(f"Start time: {t_start}")
    print(f"Sobol iteration begin...{time.time() - t_start}")
    sobol = Models.SOBOL(exp.search_space)
    for i in range(init_iter):
        if init_batch_size == 1:
            exp.new_trial(generator_run=sobol.gen(init_batch_size))
        else:
            exp.new_batch_trial(generator_run=sobol.gen(init_batch_size))
        print(f"Running sobol optimization trial {i+1}/{init_iter}..."
              f"{time.time() - t_start}")
    print(f"GPEI iteration begin...{time.time() - t_start}")
    for i in range(update_iter):
        gpei = Models.BOTORCH(experiment=exp, data=exp.eval())
        if batch_size == 1:
            exp.new_trial(generator_run=gpei.gen(batch_size))
        else:
            exp.new_batch_trial(generator_run=gpei.gen(batch_size))
        print(f"Running GPEI optimization trial {i+1}/{update_iter}..."
              f"{time.time() - t_start}")

    # Construct Result.
    ## origin data.
    data_df = copy.deepcopy(exp.eval().df)
    compare_func = min if minimize else max
    
    arm_name2mean = {}
    for _, row in data_df.iterrows():
        arm_name2mean[row["arm_name"]] = row["mean"]
    ## parameters true_mean.
    other_columns = {
        "arm_name": [], "parameters": [], "true_mean": [],
        "cur_trial_best_mean": [], "accum_trials_best_mean": []}
    atbm = None # accum_trial_best_mean
    for trial in exp.trials.values():
        ctbm = None # cur_trial_best_mean
        for arm in trial.arms:
            other_columns['arm_name'].append(arm.name)
            other_columns['parameters'].append(json.dumps(arm.parameters))
            other_columns['true_mean'].append(
                    groundtruth_function(arm.parameters))
            if ctbm is None:
                ctbm = arm_name2mean[arm.name]
            ctbm = compare_func(ctbm, arm_name2mean[arm.name])
        if atbm is None:
            atbm = ctbm
        atbm = compare_func(atbm, ctbm)
        other_columns['cur_trial_best_mean'].extend([ctbm] * len(trial.arms))
        other_columns['accum_trials_best_mean'].extend([atbm] * len(trial.arms))
    other_df = DataFrame(other_columns)

    result_df = data_df.set_index('arm_name').join(
            other_df.set_index('arm_name')).reset_index()
    
    # Save to file.
    print("Save to file.")
    sub_dir_name = "_".join([
        "ax", args.function_name_list.replace(",", "_"),
        args.weight_list.replace(",", "_"), args.covariance_matrix.replace(
            "[", "_").replace("]", "_").replace(",", "_").replace(" ", ""),
        str(args.evaluate_covariance), str(args.init_iter), str(init_batch_size),
        str(args.update_iter), str(args.batch_size), str(args.var_coef),
        str(minimize), str(var_compute_type), str(num_random), str(num_bucket)
        ])
    abs_dir_path = os.path.join(save_path, sub_dir_name)
    Path(abs_dir_path).mkdir(parents=True, exist_ok=True)
    task_id = os.environ.get('TASK_INDEX')
    cur_time = pd.Timestamp.now().strftime('%Y%m%d%H%M%S')
    filename = cur_time + "_" + str(task_id) + ".csv"
    print(os.path.join(abs_dir_path, filename))
    result_df.to_csv(os.path.join(abs_dir_path, filename))
    print("2021-01-19 19:48:00")
    print("Done...")
def sobol(experiment, n_initial_evaluations, seed=None):
    print(f'Running Sobol initialisation...')
    sobol = Models.SOBOL(experiment.search_space, seed=int(seed))
    for i in range(n_initial_evaluations):
        print(f'Sobol: {i + 1}/{n_initial_evaluations}')
        experiment.new_trial(generator_run=sobol.gen(1))
Exemplo n.º 24
0
    def test_get_standard_plots(self):
        exp = get_branin_experiment()
        self.assertEqual(
            len(
                get_standard_plots(experiment=exp,
                                   model=get_generation_strategy().model)),
            0,
        )
        exp = get_branin_experiment(with_batch=True, minimize=True)
        exp.trials[0].run()
        plots = get_standard_plots(
            experiment=exp,
            model=Models.BOTORCH(experiment=exp, data=exp.fetch_data()),
        )
        self.assertEqual(len(plots), 6)
        self.assertTrue(all(isinstance(plot, go.Figure) for plot in plots))
        exp = get_branin_experiment_with_multi_objective(with_batch=True)
        exp.optimization_config.objective.objectives[0].minimize = False
        exp.optimization_config.objective.objectives[1].minimize = True
        exp.optimization_config._objective_thresholds = [
            ObjectiveThreshold(metric=exp.metrics["branin_a"],
                               op=ComparisonOp.GEQ,
                               bound=-100.0),
            ObjectiveThreshold(metric=exp.metrics["branin_b"],
                               op=ComparisonOp.LEQ,
                               bound=100.0),
        ]
        exp.trials[0].run()
        plots = get_standard_plots(experiment=exp,
                                   model=Models.MOO(experiment=exp,
                                                    data=exp.fetch_data()))
        self.assertEqual(len(plots), 7)

        # All plots are successfully created when objective thresholds are absent
        exp.optimization_config._objective_thresholds = []
        plots = get_standard_plots(experiment=exp,
                                   model=Models.MOO(experiment=exp,
                                                    data=exp.fetch_data()))
        self.assertEqual(len(plots), 7)

        exp = get_branin_experiment_with_timestamp_map_metric(
            with_status_quo=True)
        exp.new_trial().add_arm(exp.status_quo)
        exp.trials[0].run()
        exp.new_trial(generator_run=Models.SOBOL(
            search_space=exp.search_space).gen(n=1))
        exp.trials[1].run()
        plots = get_standard_plots(
            experiment=exp,
            model=Models.BOTORCH(experiment=exp, data=exp.fetch_data()),
            true_objective_metric_name="branin",
        )

        self.assertEqual(len(plots), 9)
        self.assertTrue(all(isinstance(plot, go.Figure) for plot in plots))
        self.assertIn(
            "Objective branin_map vs. True Objective Metric branin",
            [p.layout.title.text for p in plots],
        )

        with self.assertRaisesRegex(
                ValueError, "Please add a valid true_objective_metric_name"):
            plots = get_standard_plots(
                experiment=exp,
                model=Models.BOTORCH(experiment=exp, data=exp.fetch_data()),
                true_objective_metric_name="not_present",
            )
Exemplo n.º 25
0
def matbench_fold(fold):
    t0 = time()
    train_inputs, train_outputs = task.get_train_and_val_data(fold)
    train_val_df = pd.DataFrame({
        "formula": train_inputs.values,
        "target": train_outputs.values
    })
    if dummy:
        train_val_df = train_val_df[:25]

    optimization_config = OptimizationConfig(objective=Objective(
        metric=CrabNetMetric(name=metric,
                             train_val_df=train_val_df,
                             n_splits=n_splits),
        minimize=True,
    ), )
    # TODO: use status_quo (Arm) as default CrabNet parameters
    exp = Experiment(
        name="nested_crabnet_mae_saas",
        search_space=search_space,
        optimization_config=optimization_config,
        runner=SyntheticRunner(),
    )

    sobol = Models.SOBOL(exp.search_space)
    print("evaluating SOBOL points")
    for _ in range(n_sobol):
        print(_)
        trial = exp.new_trial(generator_run=sobol.gen(1))
        trial.run()
        trial.mark_completed()

    data = exp.fetch_data()
    j = -1
    new_value = np.nan
    best_so_far = np.nan
    for j in range(n_saas):
        saas = Models.FULLYBAYESIAN(
            experiment=exp,
            data=exp.fetch_data(),
            num_samples=
            num_samples,  # Increasing this may result in better model fits
            warmup_steps=
            warmup_steps,  # Increasing this may result in better model fits
            gp_kernel=
            "rbf",  # "rbf" is the default in the paper, but we also support "matern"
            torch_device=tkwargs["device"],
            torch_dtype=tkwargs["dtype"],
            verbose=False,  # Set to True to print stats from MCMC
            disable_progbar=
            True,  # Set to False to print a progress bar from MCMC
        )
        generator_run = saas.gen(1)
        best_arm, _ = generator_run.best_arm_predictions
        trial = exp.new_trial(generator_run=generator_run)
        trial.run()
        trial.mark_completed()
        data = Data.from_multiple_data([data, trial.fetch_data()])
        new_value = trial.fetch_data().df["mean"].min()
        best_so_far = data.df["mean"].min()
        tf = time()
        print(
            f"iter{j}, BestInIter:{new_value:.3f}, BestSoFar:{best_so_far:.3f} elapsed time: {tf - t0}",
        )

    exp.fetch_data()
    best_parameters = best_arm.parameters

    experiment_fpath = join(experiment_dir, "experiment" + str(fold) + ".json")
    save_experiment(exp, experiment_fpath)

    test_pred, default_mae, test_mae, best_parameterization = get_test_results(
        task, fold, best_parameters, train_val_df)
    print(f"default_mae: {default_mae}")
    print(f"test_mae: {test_mae}")
    # maes.append(test_mae)  # [0.32241879861870626, ...]

    # task.record(fold, test_pred, params=best_parameterization)

    return test_pred, best_parameterization
Exemplo n.º 26
0
def pid(cfg):
    env_name = cfg.env.params.name
    env = gym.make(env_name)
    env.reset()
    full_rewards = []
    exp_cfg = cfg.experiment

    from learn.utils.plotly import hv_characterization
    hv_characterization()

    def compare_control(env, cfg, save=True):
        import torch
        from learn.control.pid import PidPolicy

        controllers = []
        labels = []
        metrics = []

        # PID  baselines
        # /Users/nato/Documents/Berkeley/Research/Codebases/dynamics-learn/sweeps/2020-04-14/11-12-02

        # from learn.simulate_sac import *
        # Rotation policy
        sac_policy1 = torch.load(
            '/Users/nato/Documents/Berkeley/Research/Codebases/dynamics-learn/outputs/2020-03-24/18-32-26/trial_70000.dat'
        )
        controllers.append(sac_policy1['policy'])
        labels.append("SAC - Rotation")
        metrics.append(0)

        # Living reward policy
        sac_policy2 = torch.load(
            '/Users/nato/Documents/Berkeley/Research/Codebases/dynamics-learn/outputs/2020-03-24/18-31-45/trial_35000.dat'
        )
        controllers.append(sac_policy2['policy'])
        labels.append("SAC - Living")
        metrics.append(1)

        # Square cost policy
        # sac_policy2 = torch.load(
        #     '/Users/nato/Documents/Berkeley/Research/Codebases/dynamics-learn/sweeps/2020-03-25/20-30-47/metric.name=Square,robot=iono_sim/26/trial_40000.dat')
        controllers.append(sac_policy2['policy'])
        labels.append("SAC - Square")
        metrics.append(2)

        # un-Optimized PID parameters
        pid_params = [[2531.917, 61.358, 33.762], [2531.917, 61.358, 33.762]]
        pid = PidPolicy(cfg)
        pid.set_params(pid_params)
        controllers.append(pid)
        labels.append("PID - temp")
        metrics.append(0)

        controllers.append(pid)
        labels.append("PID - temp")
        metrics.append(1)

        # Optimized PID parameters
        pid_params = [[2531.917, 61.358, 3333.762],
                      [2531.917, 61.358, 3333.762]]
        pid = PidPolicy(cfg)
        pid.set_params(pid_params)
        controllers.append(pid)
        labels.append("PID - improved")
        metrics.append(2)

        from learn.control.mpc import MPController
        cfg.policy.mode = 'mpc'
        # dynam_model = torch.load(
        #     '/Users/nato/Documents/Berkeley/Research/Codebases/dynamics-learn/outputs/2020-03-25/10-45-17/trial_1.dat')
        dynam_model = torch.load(
            '/Users/nato/Documents/Berkeley/Research/Codebases/dynamics-learn/sweeps/2020-03-25/20-30-57/metric.name=Rotation,robot=iono_sim/14/trial_9.dat'
        )
        mpc = MPController(env, dynam_model['model'], cfg)

        controllers.append(mpc)
        labels.append("MPC - 1")
        metrics.append(0)
        controllers.append(mpc)
        labels.append("MPC - 2")
        metrics.append(1)
        controllers.append(mpc)
        labels.append("MPC - 3")
        metrics.append(2)

        import plotly.graph_objects as go
        import plotly

        colors = [
            '#1f77b4',  # muted blue
            '#ff7f0e',  # safety orange
            '#2ca02c',  # cooked asparagus green
            '#d62728',  # brick red
            '#9467bd',  # muted purple
            '#8c564b',  # chestnut brown
            '#e377c2',  # raspberry yogurt pink
            '#7f7f7f',  # middle gray
            '#bcbd22',  # curry yellow-green
            '#17becf'  # blue-teal
        ]

        markers = [
            "cross",
            "circle-open-dot",
            "x-open-dot",
            "triangle-up-open-dot",
            "y-down-open",
            "diamond-open-dot",
            "hourglass",
            "hash",
            "star",
            "square",
        ]

        m1 = living_reward
        m2 = rotation_mat
        m3 = squ_cost
        eval_metrics = [m1, m2, m3]
        metric_names = ["Living", "Rotation", "Square"]

        fig = plotly.subplots.make_subplots(
            rows=3,
            cols=2,
            # subplot_titles=["Living", "Rotation", "Square"],
            subplot_titles=[
                "Pitch",
                "Roll",
                " ",
                " ",
                " ",
                " ",
            ],
            vertical_spacing=0.03,
            horizontal_spacing=0.03,
            shared_xaxes=True,
        )  # go.Figure()

        fig_mpc = go.Figure()
        fig_sac = go.Figure()

        pry = [1, 0, 2]
        # state0 = 2*env.reset()
        # state0 = env.reset()
        state0 = np.array([0, np.deg2rad(15), 0, 0, 0, 0])
        for i, (con, lab, m) in enumerate(zip(controllers, labels, metrics)):
            print(f"Evaluating controller type {lab}")
            _ = env.reset()
            env.set_state(np.concatenate((np.zeros(6), state0)))
            state = state0
            states = []
            actions = []
            rews = []
            done = False
            # for t in range(cfg.experiment.r_len + 1):
            for t in range(500):
                if done:
                    break
                if "SAC" in lab:
                    with torch.no_grad():
                        with eval_mode(con):
                            action = con.select_action(state)
                            if i < 2:
                                action = np.array([65535, 65535, 65535, 65535
                                                   ]) * (action + 1) / 2
                            else:
                                action = np.array([3000, 3000, 3000, 3000
                                                   ]) * (action + 1) / 2

                else:
                    action = con.get_action(state, metric=eval_metrics[m])
                states.append(state)
                actions.append(action)

                state, rew, done, _ = env.step(action)
                done = done

            states = np.stack(states)
            actions = np.stack(actions)

            pitch = np.degrees(states[:, pry[0]])
            roll = np.degrees(states[:, pry[1]])

            # deal with markers
            num_mark = np.zeros(len(pitch))
            mark_every = 50
            m_size = 32
            start = np.random.randint(0, int(len(pitch) / 10))
            num_mark[start::mark_every] = m_size
            if "SAC" in lab:
                fig_sac.add_trace(
                    go.Scatter(
                        y=pitch,
                        name=metric_names[m],  # legendgroup=lab[:3],
                        # showlegend=(True if (i % 3 == 0) else False),
                        line=dict(color=colors[m], width=4),
                        cliponaxis=False,
                        mode='lines+markers',
                        marker=dict(color=colors[m],
                                    symbol=markers[-m],
                                    size=num_mark.tolist())))

            elif "MPC" in lab:
                fig_mpc.add_trace(
                    go.Scatter(
                        y=pitch,
                        name=metric_names[m],  # legendgroup=lab[:3],
                        # showlegend=(True if (i % 3 == 0) else False),
                        line=dict(color=colors[m], width=4),
                        cliponaxis=False,
                        mode='lines+markers',
                        marker=dict(color=colors[m],
                                    symbol=markers[-m],
                                    size=num_mark.tolist())))

            fig.add_trace(
                go.Scatter(
                    y=pitch,
                    name=lab[:3] + str(m),
                    legendgroup=lab[:3],
                    showlegend=(True if (i % 3 == 0) else False),
                    line=dict(color=colors[int(i / 3)],
                              width=2),  # mode='lines+markers',
                    # marker=dict(color=colors[i], symbol=markers[i], size=16)
                ),
                row=m + 1,
                col=1)

            fig.add_trace(
                go.Scatter(
                    y=roll,
                    name=lab[:3] + str(m),
                    legendgroup=lab[:3],
                    showlegend=(False),
                    line=dict(color=colors[int(i / 3)],
                              width=2),  # mode='lines+markers',
                    # marker=dict(color=colors[i], symbol=markers[i], size=16)
                ),
                row=m + 1,
                col=2)

        fig.update_layout(
            title='Comparison of Controllers and Reward Functions',
            font=dict(family="Times New Roman, Times, serif",
                      size=24,
                      color="black"),
            legend_orientation="h",
            legend=dict(
                x=.6,
                y=0.07,
                bgcolor='rgba(205, 223, 212, .4)',
                bordercolor="Black",
            ),
            # xaxis_title='Timestep',
            # yaxis_title='Angle (Degrees)',
            plot_bgcolor='white',
            width=1600,
            height=1000,
            # xaxis=dict(
            #     showline=True,
            #     showgrid=False,
            #     showticklabels=True, ),
            # yaxis=dict(
            #     showline=True,
            #     showgrid=False,
            #     showticklabels=True, ),
        )

        fig_sac.update_layout(  # title='Comparison of SAC Policies',
            font=dict(family="Times New Roman, Times, serif",
                      size=32,
                      color="black"),
            legend_orientation="h",
            legend=dict(
                x=.35,
                y=0.1,
                bgcolor='rgba(205, 223, 212, .4)',
                bordercolor="Black",
            ),
            # xaxis_title='Timestep',
            # yaxis_title='Angle (Degrees)',
            showlegend=False,
            plot_bgcolor='white',
            width=1600,
            height=800,
            margin=dict(t=5, r=5),
        )

        fig_mpc.update_layout(  # title='Comparison of MPC Policies',
            font=dict(family="Times New Roman, Times, serif",
                      size=32,
                      color="black"),
            legend_orientation="h",
            showlegend=False,
            legend=dict(
                x=.35,
                y=0.1,
                bgcolor='rgba(205, 223, 212, .4)',
                bordercolor="Black",
                # ncol= 2,
            ),
            # xaxis_title='Timestep',
            # yaxis_title='Angle (Degrees)',
            plot_bgcolor='white',
            width=1600,
            height=800,
            margin=dict(t=5, r=5),
        )

        reg_color = 'rgba(255,60,60,.15)'
        fig_sac.add_trace(
            go.Scatter(x=[0, 500],
                       y=[5, 5],
                       name='Living Region',
                       legendgroup='Living Region',
                       fill='tozeroy',
                       mode='lines',
                       fillcolor=reg_color,
                       line=dict(width=0.0,
                                 color=reg_color)))  # fill down to xaxis
        fig_sac.add_trace(
            go.Scatter(x=[0, 500],
                       y=[-5, -5],
                       showlegend=False,
                       legendgroup='Living Region',
                       fill='tozeroy',
                       mode='lines',
                       fillcolor=reg_color,
                       line=dict(width=0.0,
                                 color=reg_color)))  # fill down to xaxis

        fig_mpc.add_trace(
            go.Scatter(x=[0, 500],
                       y=[5, 5],
                       name='Living Region',
                       legendgroup='Living Region',
                       fill='tozeroy',
                       mode='lines',
                       fillcolor=reg_color,
                       line=dict(width=0.0,
                                 color=reg_color)))  # fill down to xaxis
        fig_mpc.add_trace(
            go.Scatter(x=[0, 500],
                       y=[-5, -5],
                       showlegend=False,
                       legendgroup='Living Region',
                       fill='tozeroy',
                       mode='lines',
                       fillcolor=reg_color,
                       line=dict(width=0.0,
                                 color=reg_color)))  # fill down to xaxis

        # SOLO
        rang_ind = [-20, 20]
        fig_sac.update_xaxes(
            title_text="Timestep",
            range=[0, 500],
            ticks="inside",
            tickwidth=2,
            zeroline=True,
            zerolinecolor='rgba(0,0,0,.5)',
            zerolinewidth=1,
        )
        fig_sac.update_yaxes(
            title_text="Pitch (degrees)",
            range=rang_ind,
            ticks="inside",
            tickwidth=2,
            zeroline=True,
            zerolinecolor='rgba(0,0,0,.5)',
            zerolinewidth=1,
        )
        fig_sac.show()
        fig_sac.write_image(os.getcwd() + "/compare_sac.pdf")

        fig_mpc.update_xaxes(
            title_text="Timestep",
            range=[0, 500],
            ticks="inside",
            tickwidth=2,
            zeroline=True,
            zerolinecolor='rgba(0,0,0,.5)',
            zerolinewidth=1,
        )
        fig_mpc.update_yaxes(
            title_text="Pitch (degrees)",
            range=rang_ind,
            ticks="inside",
            tickwidth=2,
            zeroline=True,
            zerolinecolor='rgba(0,0,0,.5)',
            zerolinewidth=1,
        )
        fig_mpc.show()
        fig_mpc.write_image(os.getcwd() + "/compare_mpc.pdf")

        # COMPARISON

        fig.update_xaxes(
            title_text="Timestep",
            row=3,
            col=1,
            ticks="inside",
            tickwidth=2,
            zeroline=True,
            zerolinecolor='rgba(0,0,0,.5)',
            zerolinewidth=1,
        )
        fig.update_xaxes(
            row=2,
            col=1,
            zeroline=True,
            zerolinecolor='rgba(0,0,0,.5)',
            zerolinewidth=1,
        )
        fig.update_xaxes(
            row=1,
            col=1,
            zeroline=True,
            zerolinecolor='rgba(0,0,0,.5)',
            zerolinewidth=1,
        )
        fig.update_xaxes(
            title_text="Timestep",
            row=3,
            col=2,
            ticks="inside",
            tickwidth=2,
            zeroline=True,
            zerolinecolor='rgba(0,0,0,.5)',
            zerolinewidth=1,
        )
        fig.update_xaxes(
            row=2,
            col=2,
            zeroline=True,
            zerolinecolor='rgba(0,0,0,.5)',
            zerolinewidth=1,
        )
        fig.update_xaxes(
            row=1,
            col=2,
            zeroline=True,
            zerolinecolor='rgba(0,0,0,.5)',
            zerolinewidth=1,
        )
        # fig.update_xaxes(title_text="xaxis 1 title", row=1, col=1)
        # fig.update_yaxes(title_text="Roll (Degrees)", row=1, col=1)

        rang = [-30, 30]
        nticks = 6
        fig.update_yaxes(
            title_text="Living Rew.",
            range=rang,
            row=1,
            col=1,
            nticks=nticks,
            ticks="inside",
            tickwidth=2,
            zeroline=True,
            zerolinecolor='rgba(0,0,0,.5)',
            zerolinewidth=1,
        )
        fig.update_yaxes(
            title_text="Rotation Rew.",
            range=rang,
            row=2,
            col=1,
            nticks=nticks,
            ticks="inside",
            tickwidth=2,
            zeroline=True,
            zerolinecolor='rgba(0,0,0,.5)',
            zerolinewidth=1,
        )
        fig.update_yaxes(
            title_text="Square Cost",
            range=rang,
            row=3,
            col=1,
            nticks=nticks,
            ticks="inside",
            tickwidth=2,
            zeroline=True,
            zerolinecolor='rgba(0,0,0,.5)',
            zerolinewidth=1,
        )
        fig.update_yaxes(
            range=rang,
            row=1,
            col=2,
            nticks=nticks,
            showticklabels=False,
            ticks="inside",
            tickwidth=2,
            zeroline=True,
            zerolinecolor='rgba(0,0,0,.5)',
            zerolinewidth=1,
        )
        fig.update_yaxes(
            range=rang,
            row=2,
            col=2,
            nticks=nticks,
            showticklabels=False,
            ticks="inside",
            tickwidth=2,
            zeroline=True,
            zerolinecolor='rgba(0,0,0,.5)',
            zerolinewidth=1,
        )
        fig.update_yaxes(
            range=rang,
            row=3,
            col=2,
            nticks=nticks,
            showticklabels=False,
            ticks="inside",
            tickwidth=2,
            zeroline=True,
            zerolinecolor='rgba(0,0,0,.5)',
            zerolinewidth=1,
        )

        print(f"Plotting {len(labels)} control responses")
        # save = False
        # if save:
        #     fig.write_image(os.getcwd() + "compare.png")
        # else:
        #     fig.show()
        #
        # return fig

    # compare_control(env, cfg, save=True)
    # quit()
    plot_results(logx=False, save=True, mpc=False)
    quit()

    # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
    # # # # # # # # # # # Evalutation Function  # # # # # # # # # # # # # # # # # # # #
    def bo_rollout_wrapper(params, weights=None):  # env, controller, exp_cfg):
        pid_1 = [params["pitch-p"], params["pitch-i"], params["pitch-d"]]
        # pid_1 = [params["roll-p"], params["roll-i"],
        #          params["roll-d"]]  # [params["pitch-p"], params["pitch-i"], params["pitch-d"]]
        pid_2 = [params["roll-p"], params["roll-i"], params["roll-d"]]
        print(
            f"Optimizing Parameters {np.round(pid_1, 3)},{np.round(pid_2, 3)}")
        pid_params = [[pid_1[0], pid_1[1], pid_1[2]],
                      [pid_2[0], pid_2[1], pid_2[2]]]
        # pid_params = [[1000, 0, 0], [1000, 0, 0]]
        pid = PidPolicy(cfg)

        pid.set_params(pid_params)

        cum_cost = []
        r = 0
        fncs = [squ_cost, living_reward, rotation_mat]
        mult_rewards = [[] for _ in range(len(fncs))]
        while r < cfg.experiment.repeat:
            pid.reset()
            states, actions, rews, sim_error = rollout(env, pid, exp_cfg)
            # plot_rollout(states, actions, pry=[1, 0, 2])
            rewards_full = get_rewards(states, actions, fncs=fncs)
            for i, vec in enumerate(rewards_full):
                mult_rewards[i].append(vec)

            # if sim_error:
            #     print("Repeating strange simulation")
            #     continue
            # if len(rews) < 400:
            #     cum_cost.append(-(cfg.experiment.r_len - len(rews)) / cfg.experiment.r_len)
            # else:
            rollout_cost = np.sum(rews) / cfg.experiment.r_len  # / len(rews)
            # if rollout_cost > max_cost:
            #      max_cost = rollout_cost
            # rollout_cost += get_reward_euler(states[-1], actions[-1])
            cum_cost.append(rollout_cost)
            r += 1

        std = np.std(cum_cost)
        cum_cost = np.mean(cum_cost)
        # print(f"Cum. Cost {cum_cost / max_cost}")
        # print(f"- Mean Cum. Cost / Rew: {cum_cost}, std dev: {std}")
        eval = {
            "Square": (np.mean(rewards_full[0]), np.std(rewards_full[0])),
            "Living": (np.mean(rewards_full[1]), np.std(rewards_full[1])),
            "Rotation": (np.mean(rewards_full[2]), np.std(rewards_full[2]))
        }

        for n, (key, value) in enumerate(eval.items()):
            if n == 0:
                print(f"- Square {np.round(value, 4)}")
            elif n == 1:
                print(f"- Living {np.round(value, 4)}")
            else:
                print(f"- Rotn {np.round(value, 4)}")
        return eval
        # return cum_cost.reshape(1, 1), std

    # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
    # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #

    from ax import (
        ComparisonOp,
        ParameterType,
        RangeParameter,
        SearchSpace,
        SimpleExperiment,
        OutcomeConstraint,
    )

    exp = SimpleExperiment(
        name="PID Control Robot",
        search_space=SearchSpace([
            RangeParameter(
                name=f"roll-p",
                parameter_type=ParameterType.FLOAT,
                lower=1.0,
                upper=10000.0,
                log_scale=True,
            ),
            # FixedParameter(name="roll-i", value=0.0, parameter_type=ParameterType.FLOAT),
            RangeParameter(
                name=f"roll-i",
                parameter_type=ParameterType.FLOAT,
                lower=0,
                upper=1000.0,
                log_scale=False,
            ),
            RangeParameter(
                name=f"roll-d",
                parameter_type=ParameterType.FLOAT,
                lower=.1,
                upper=5000.0,
                log_scale=True,
            ),
            RangeParameter(
                name=f"pitch-p",
                parameter_type=ParameterType.FLOAT,
                lower=1.0,
                upper=10000.0,
                log_scale=True,
            ),
            RangeParameter(
                name=f"pitch-d",
                parameter_type=ParameterType.FLOAT,
                lower=0,
                upper=1000.0,
                log_scale=False,
            ),
            RangeParameter(
                name=f"pitch-i",
                parameter_type=ParameterType.FLOAT,
                lower=.1,
                upper=5000.0,
                log_scale=True,
            ),
            # FixedParameter(name="pitch-i", value=0.0, parameter_type=ParameterType.FLOAT),
        ]),
        evaluation_function=bo_rollout_wrapper,
        objective_name=cfg.metric.name,
        minimize=cfg.metric.minimize,
        outcome_constraints=[],
    )

    from ax.storage.metric_registry import register_metric
    from ax.storage.runner_registry import register_runner

    class GenericMetric(Metric):
        def fetch_trial_data(self, trial):
            records = []
            for arm_name, arm in trial.arms_by_name.items():
                params = arm.parameters
                mean, sem = bo_rollout_wrapper(params)
                records.append({
                    "arm_name": arm_name,
                    "metric_name": self.name,
                    "mean": mean,
                    "sem": sem,
                    "trial_index": trial.index,
                })
            return Data(df=pd.DataFrame.from_records(records))

    class MyRunner(Runner):
        def run(self, trial):
            return {"name": str(trial.index)}

    optimization_config = OptimizationConfig(objective=Objective(
        metric=GenericMetric(name=cfg.metric.name),
        minimize=cfg.metric.minimize,
    ), )
    register_metric(GenericMetric)
    register_runner(MyRunner)

    exp.runner = MyRunner()
    exp.optimization_config = optimization_config

    log.info(f"Running experiment, metric name {cfg.metric.name}")
    log.info(f"Running Sobol initialization trials...")
    sobol = Models.SOBOL(exp.search_space)
    num_search = cfg.bo.random
    for i in range(num_search):
        exp.new_trial(generator_run=sobol.gen(1))
        exp.trials[len(exp.trials) - 1].run()

    import plotly.graph_objects as go

    gpei = Models.BOTORCH(experiment=exp, data=exp.eval())

    objectives = ["Living", "Square", "Rotation"]

    def plot_all(model, objectives, name="", rend=False):
        for o in objectives:
            plot = plot_contour(
                model=model,
                param_x="roll-p",
                param_y="roll-d",
                metric_name=o,
            )
            plot[0]['layout']['title'] = o
            data = plot[0]['data']
            lay = plot[0]['layout']

            for i, d in enumerate(data):
                if i > 1:
                    d['cliponaxis'] = False

            fig = {
                "data": data,
                "layout": lay,
            }
            go.Figure(fig).write_image(name + o + ".png")
            if rend: render(plot)

    plot_all(gpei, objectives, name="Random fit-")

    num_opt = cfg.bo.optimized
    for i in range(num_opt):
        log.info(f"Running GP+EI optimization trial {i + 1}/{num_opt}...")
        # Reinitialize GP+EI model at each step with updated data.
        batch = exp.new_trial(generator_run=gpei.gen(1))
        gpei = Models.BOTORCH(experiment=exp, data=exp.eval())

        if ((i + 1) % 10) == 0:
            plot_all(gpei,
                     objectives,
                     name=f"optimizing {str(i + 1)}-",
                     rend=False)

    from ax.plot.exp_utils import exp_to_df

    best_arm, _ = gpei.gen(1).best_arm_predictions
    best_parameters = best_arm.parameters
    log.info(f"Best parameters {best_parameters}")

    experiment_log = {
        "Exp": exp_to_df(exp=exp),
        "Cfg": cfg,
        "Best_param": best_parameters,
    }

    log.info("Printing Parameters")
    log.info(exp_to_df(exp=exp))
    save_log(cfg, exp, experiment_log)

    fig_learn = plot_learning(exp, cfg)
    fig_learn.write_image("learning" + ".png")
    fig_learn.show()
    plot_all(gpei, objectives, name=f"FINAL -", rend=True)
Exemplo n.º 27
0
    def test_best_from_model_prediction(self):
        exp = get_branin_experiment()

        for _ in range(3):
            sobol = Models.SOBOL(search_space=exp.search_space)
            generator_run = sobol.gen(n=1)
            trial = exp.new_trial(generator_run=generator_run)
            trial.run()
            trial.mark_completed()
            exp.attach_data(exp.fetch_data())

        gpei = Models.BOTORCH(experiment=exp, data=exp.lookup_data())
        generator_run = gpei.gen(n=1)
        trial = exp.new_trial(generator_run=generator_run)
        trial.run()
        trial.mark_completed()

        with patch.object(
            ArrayModelBridge,
            "model_best_point",
            return_value=(
                (
                    Arm(
                        name="0_0",
                        parameters={"x1": -4.842811906710267, "x2": 11.887089014053345},
                    ),
                    (
                        {"branin": 34.76260622783635},
                        {"branin": {"branin": 0.00028306433439807734}},
                    ),
                )
            ),
        ) as mock_model_best_point, self.assertLogs(
            logger="ax.service.utils.best_point", level="WARN"
        ) as lg:
            # Test bad model fit causes function to resort back to raw data
            with patch(
                "ax.service.utils.best_point.assess_model_fit",
                return_value=AssessModelFitResult(
                    good_fit_metrics_to_fisher_score={},
                    bad_fit_metrics_to_fisher_score={
                        "branin": 0,
                    },
                ),
            ):
                self.assertIsNotNone(get_best_parameters(exp, Models))
                self.assertTrue(
                    any("Model fit is poor" in warning for warning in lg.output),
                    msg=lg.output,
                )
                mock_model_best_point.assert_not_called()

            # Test model best point is used when fit is good
            with patch(
                "ax.service.utils.best_point.assess_model_fit",
                return_value=AssessModelFitResult(
                    good_fit_metrics_to_fisher_score={
                        "branin": 0,
                    },
                    bad_fit_metrics_to_fisher_score={},
                ),
            ):
                self.assertIsNotNone(get_best_parameters(exp, Models))
                mock_model_best_point.assert_called()

        # Assert the non-mocked method works correctly as well
        self.assertIsNotNone(get_best_parameters(exp, Models))