Ejemplo n.º 1
0
 def test_default_generation_strategy_discrete(self) -> None:
     """Test that Sobol is used if no GenerationStrategy is provided and
     the search space is discrete.
     """
     # Test that Sobol is chosen when all parameters are choice.
     ax_client = AxClient()
     ax_client.create_experiment(
         parameters=[  # pyre-fixme[6]: expected union that should include
             {
                 "name": "x",
                 "type": "choice",
                 "values": [1, 2, 3]
             },
             {
                 "name": "y",
                 "type": "choice",
                 "values": [1, 2, 3]
             },
         ])
     self.assertEqual(
         [s.model for s in not_none(ax_client.generation_strategy)._steps],
         [Models.SOBOL],
     )
     self.assertEqual(ax_client.get_recommended_max_parallelism(),
                      [(-1, -1)])
     self.assertTrue(ax_client.get_trials_data_frame().empty)
Ejemplo n.º 2
0
 def test_raw_data_format_with_fidelities(self):
     ax_client = AxClient()
     ax_client.create_experiment(
         parameters=[
             {
                 "name": "x",
                 "type": "range",
                 "bounds": [-5.0, 10.0]
             },
             {
                 "name": "y",
                 "type": "range",
                 "bounds": [0.0, 1.0]
             },
         ],
         minimize=True,
     )
     for _ in range(6):
         parameterization, trial_index = ax_client.get_next_trial()
         x, y = parameterization.get("x"), parameterization.get("y")
         ax_client.complete_trial(
             trial_index,
             raw_data=[
                 ({
                     "y": y / 2.0
                 }, {
                     "objective": (branin(x, y / 2.0), 0.0)
                 }),
                 ({
                     "y": y
                 }, {
                     "objective": (branin(x, y), 0.0)
                 }),
             ],
         )
Ejemplo n.º 3
0
    def setup_ax_client(self, arguments: List[str]) -> AxClient:
        """Method to setup the Ax Client"""
        parameters: List[Dict[Any, Any]] = []
        for key, value in self.ax_params.items():
            param = OmegaConf.to_container(value, resolve=True)
            assert isinstance(param, Dict)
            if param["type"] == "range":
                bounds = param["bounds"]
                if not (all(isinstance(x, int) for x in bounds)):
                    # Type mismatch. Promote all to float
                    param["bounds"] = [float(x) for x in bounds]

            parameters.append(param)
            parameters[-1]["name"] = key
        commandline_params = self.parse_commandline_args(arguments)
        for cmd_param in commandline_params:
            for param in parameters:
                if param["name"] == cmd_param["name"]:
                    for key, value in cmd_param.items():
                        param[key] = value
                    break
            else:
                parameters.append(cmd_param)

        log.info(
            f"AxSweeper is optimizing the following parameters: {encoder_parameters_into_string(parameters)}"
        )
        ax_client = AxClient(
            verbose_logging=self.ax_client_config.verbose_logging,
            random_seed=self.ax_client_config.random_seed,
        )
        ax_client.create_experiment(parameters=parameters, **self.experiment)

        return ax_client
Ejemplo n.º 4
0
 def test_recommended_parallelism(self):
     ax_client = AxClient()
     with self.assertRaisesRegex(ValueError, "No generation strategy"):
         ax_client.get_max_parallelism()
     ax_client.create_experiment(
         parameters=[
             {
                 "name": "x",
                 "type": "range",
                 "bounds": [-5.0, 10.0]
             },
             {
                 "name": "y",
                 "type": "range",
                 "bounds": [0.0, 15.0]
             },
         ],
         minimize=True,
     )
     self.assertEqual(ax_client.get_max_parallelism(), [(5, 5), (-1, 3)])
     self.assertEqual(
         run_trials_using_recommended_parallelism(
             ax_client, ax_client.get_max_parallelism(), 20),
         0,
     )
     # With incorrect parallelism setting, the 'need more data' error should
     # still be raised.
     ax_client = AxClient()
     ax_client.create_experiment(
         parameters=[
             {
                 "name": "x",
                 "type": "range",
                 "bounds": [-5.0, 10.0]
             },
             {
                 "name": "y",
                 "type": "range",
                 "bounds": [0.0, 15.0]
             },
         ],
         minimize=True,
     )
     with self.assertRaisesRegex(DataRequiredError,
                                 "All trials for current model "):
         run_trials_using_recommended_parallelism(ax_client, [(6, 6),
                                                              (-1, 3)], 20)
Ejemplo n.º 5
0
 def test_keep_generating_without_data(self):
     # Check that normally numebr of arms to generate is enforced.
     ax_client = AxClient()
     ax_client.create_experiment(
         parameters=[
             {
                 "name": "x",
                 "type": "range",
                 "bounds": [-5.0, 10.0]
             },
             {
                 "name": "y",
                 "type": "range",
                 "bounds": [0.0, 15.0]
             },
         ],
         minimize=True,
     )
     for _ in range(5):
         parameterization, trial_index = ax_client.get_next_trial()
     with self.assertRaisesRegex(DataRequiredError,
                                 "All trials for current model"):
         ax_client.get_next_trial()
     # Check thatwith enforce_sequential_optimization off, we can keep
     # generating.
     ax_client = AxClient(enforce_sequential_optimization=False)
     ax_client.create_experiment(
         parameters=[
             {
                 "name": "x",
                 "type": "range",
                 "bounds": [-5.0, 10.0]
             },
             {
                 "name": "y",
                 "type": "range",
                 "bounds": [0.0, 15.0]
             },
         ],
         minimize=True,
     )
     self.assertFalse(
         ax_client.generation_strategy._steps[0].enforce_num_trials, False)
     self.assertFalse(
         ax_client.generation_strategy._steps[1].max_parallelism, None)
     for _ in range(10):
         parameterization, trial_index = ax_client.get_next_trial()
Ejemplo n.º 6
0
 def test_plotting_validation(self):
     ax_client = AxClient()
     ax_client.create_experiment(parameters=[{
         "name": "x3",
         "type": "fixed",
         "value": 2,
         "value_type": "int"
     }])
     with self.assertRaisesRegex(ValueError, ".* there are no trials"):
         ax_client.get_contour_plot()
     with self.assertRaisesRegex(ValueError, ".* there are no trials"):
         ax_client.get_feature_importances()
     ax_client.get_next_trial()
     with self.assertRaisesRegex(ValueError, ".* less than 2 parameters"):
         ax_client.get_contour_plot()
     ax_client = AxClient()
     ax_client.create_experiment(parameters=[
         {
             "name": "x",
             "type": "range",
             "bounds": [-5.0, 10.0]
         },
         {
             "name": "y",
             "type": "range",
             "bounds": [0.0, 15.0]
         },
     ])
     ax_client.get_next_trial()
     with self.assertRaisesRegex(ValueError, "If `param_x` is provided"):
         ax_client.get_contour_plot(param_x="y")
     with self.assertRaisesRegex(ValueError, "If `param_x` is provided"):
         ax_client.get_contour_plot(param_y="y")
     with self.assertRaisesRegex(ValueError, 'Parameter "x3"'):
         ax_client.get_contour_plot(param_x="x3", param_y="x3")
     with self.assertRaisesRegex(ValueError, 'Parameter "x4"'):
         ax_client.get_contour_plot(param_x="x", param_y="x4")
     with self.assertRaisesRegex(ValueError, 'Metric "nonexistent"'):
         ax_client.get_contour_plot(param_x="x",
                                    param_y="y",
                                    metric_name="nonexistent")
     with self.assertRaisesRegex(ValueError, "Could not obtain contour"):
         ax_client.get_contour_plot(param_x="x",
                                    param_y="y",
                                    metric_name="objective")
     with self.assertRaisesRegex(ValueError, "Could not obtain feature"):
         ax_client.get_feature_importances()
def main(data_path, experiment_path, model_path, params_path):
    ray.init(address='auto')

    data_path = os.path.abspath(data_path)
    params_path = os.path.abspath(params_path)
    model_path = os.path.abspath(model_path)    
    n_splits = 4

    cfg = pickle.load(open(params_path, "rb"))

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    exp_config = {
        **locals().copy(),
        **cfg,
        'objective': 'soft-boundary',
        'net_name':'cicflow_mlp_2',

    }

    if exp_config['seed'] != -1:
        random.seed(exp_config['seed'])
        np.random.seed(exp_config['seed'])
        torch.manual_seed(exp_config['seed'])
        torch.cuda.manual_seed(exp_config['seed'])
        torch.backends.cudnn.deterministic = True

    dates = ['2019-11-08', '2019-11-09', '2019-11-11', '2019-11-12', '2019-11-13',
        '2019-11-14', '2019-11-15','2019-11-16','2019-11-17','2019-11-18','2019-11-19']

    ax = AxClient(enforce_sequential_optimization=False)
    ax.create_experiment(
        name="SVDDCICFlowExp",
        parameters=[
            {
                "name": "dates",
                "type": "choice",
                "values": dates
            },
        ],
        objective_name="val_auc_pr",
    )

    search_alg = AxSearch(ax)

    analysis = tune.run(OneDaySVDDCICFlowExp,
                        name="DriftSVDDCICFlowExp",
                        checkpoint_at_end=True,
                        checkpoint_freq=1,
                        stop={
                            "training_iteration": 1,
                        },
                        resources_per_trial={"gpu": 1},
                        num_samples=len(dates),
                        local_dir=experiment_path,
                        search_alg=search_alg,
                        config=exp_config)

    print("Best config is:", analysis.get_best_config(metric="val_auc_pr"))
Ejemplo n.º 8
0
def _benchmark_replication_Service_API(
    problem: SimpleBenchmarkProblem,
    method: GenerationStrategy,
    num_trials: int,
    experiment_name: str,
    batch_size: int = 1,
    raise_all_exceptions: bool = False,
    benchmark_trial: FunctionType = benchmark_trial,
    verbose_logging: bool = True,
    # Number of trials that need to fail for a replication to be considered failed.
    failed_trials_tolerated: int = 5,
) -> Tuple[Experiment, List[Exception]]:
    """Run a benchmark replication via the Service API because the problem was
    set up in a simplified way, without the use of Ax classes like `OptimizationConfig`
    or `SearchSpace`.
    """
    exceptions = []
    if batch_size == 1:
        ax_client = AxClient(generation_strategy=method,
                             verbose_logging=verbose_logging)
    else:  # pragma: no cover, TODO[T53975770]
        assert batch_size > 1, "Batch size of 1 or greater is expected."
        raise NotImplementedError(
            "Batched benchmarking on `SimpleBenchmarkProblem`-s not yet implemented."
        )
    ax_client.create_experiment(
        name=experiment_name,
        parameters=problem.domain_as_ax_client_parameters(),
        minimize=problem.minimize,
        objective_name=problem.name,
    )
    parameter_names = list(ax_client.experiment.search_space.parameters.keys())
    assert num_trials > 0
    for _ in range(num_trials):
        parameterization, idx = ax_client.get_next_trial()
        param_values = np.array(
            [parameterization.get(x) for x in parameter_names])
        try:
            mean, sem = benchmark_trial(parameterization=param_values,
                                        evaluation_function=problem.f)
            # If problem indicates a noise level and is using a synthetic callable,
            # add normal noise to the measurement of the mean.
            if problem.uses_synthetic_function and problem.noise_sd != 0.0:
                noise = np.random.randn() * problem.noise_sd
                sem = (sem or 0.0) + problem.noise_sd
                logger.info(
                    f"Adding noise of {noise} to the measurement mean ({mean})."
                    f"Problem noise SD setting: {problem.noise_sd}.")
                mean = mean + noise
            ax_client.complete_trial(trial_index=idx, raw_data=(mean, sem))
        except Exception as err:  # TODO[T53975770]: test
            if raise_all_exceptions:
                raise
            exceptions.append(err)
        if len(exceptions) > failed_trials_tolerated:
            raise RuntimeError(  # TODO[T53975770]: test
                f"More than {failed_trials_tolerated} failed for {experiment_name}."
            )
    return ax_client.experiment, exceptions
Ejemplo n.º 9
0
 def initialize_ax_client(self):
     self.ax_client = AxClient()
     self.ax_client.create_experiment(
         parameters=[
             {"name": "amount", "type": "range", "bounds": [0.05, 0.15], "value_type": "float"}
         ],
         objective_name="test_accuracy",
     )
Ejemplo n.º 10
0
 def test_deprecated_save_load_method_errors(self):
     ax_client = AxClient()
     with self.assertRaises(NotImplementedError):
         ax_client.save()
     with self.assertRaises(NotImplementedError):
         ax_client.load()
     with self.assertRaises(NotImplementedError):
         ax_client.load_experiment("test_experiment")
Ejemplo n.º 11
0
    def _setup_experiment(self):
        if self._metric is None and self._mode:
            # If only a mode was passed, use anonymous metric
            self._metric = DEFAULT_METRIC

        if not self._ax:
            self._ax = AxClient(**self._ax_kwargs)

        try:
            exp = self._ax.experiment
            has_experiment = True
        except ValueError:
            has_experiment = False

        if not has_experiment:
            if not self._space:
                raise ValueError(
                    "You have to create an Ax experiment by calling "
                    "`AxClient.create_experiment()`, or you should pass an "
                    "Ax search space as the `space` parameter to `AxSearch`, "
                    "or pass a `config` dict to `tune.run()`.")
            if self._mode not in ["min", "max"]:
                raise ValueError(
                    "Please specify the `mode` argument when initializing "
                    "the `AxSearch` object or pass it to `tune.run()`.")
            self._ax.create_experiment(
                parameters=self._space,
                objective_name=self._metric,
                parameter_constraints=self._parameter_constraints,
                outcome_constraints=self._outcome_constraints,
                minimize=self._mode != "max")
        else:
            if any([
                    self._space, self._parameter_constraints,
                    self._outcome_constraints, self._mode, self._metric
            ]):
                raise ValueError(
                    "If you create the Ax experiment yourself, do not pass "
                    "values for these parameters to `AxSearch`: {}.".format([
                        "space",
                        "parameter_constraints",
                        "outcome_constraints",
                        "mode",
                        "metric",
                    ]))

        exp = self._ax.experiment

        # Update mode and metric from experiment if it has been passed
        self._mode = "min" \
            if exp.optimization_config.objective.minimize else "max"
        self._metric = exp.optimization_config.objective.metric.name

        self._parameters = list(exp.parameters)

        if self._ax._enforce_sequential_optimization:
            logger.warning("Detected sequential enforcement. Be sure to use "
                           "a ConcurrencyLimiter.")
Ejemplo n.º 12
0
 def test_default_generation_strategy(self) -> None:
     """Test that Sobol+GPEI is used if no GenerationStrategy is provided."""
     ax = AxClient()
     ax.create_experiment(
         parameters=[
             {
                 "name": "x1",
                 "type": "range",
                 "bounds": [-5.0, 10.0]
             },
             {
                 "name": "x2",
                 "type": "range",
                 "bounds": [0.0, 15.0]
             },
         ],
         objective_name="branin",
         minimize=True,
     )
     self.assertEqual(
         [s.model for s in ax.generation_strategy._steps],
         [Models.SOBOL, Models.GPEI],
     )
     for _ in range(6):
         parameterization, trial_index = ax.get_next_trial()
         x1, x2 = parameterization.get("x1"), parameterization.get("x2")
         ax.complete_trial(trial_index,
                           raw_data={"branin": (branin(x1, x2), 0.0)})
     # Test that Sobol is chosen when all parameters are choice.
     ax = AxClient()
     ax.create_experiment(parameters=[
         {
             "name": "x1",
             "type": "choice",
             "values": [1, 2, 3]
         },
         {
             "name": "x2",
             "type": "choice",
             "values": [1, 2, 3]
         },
     ])
     self.assertEqual([s.model for s in ax.generation_strategy._steps],
                      [Models.SOBOL])
     self.assertEqual(ax.get_recommended_max_parallelism(), [(-1, -1)])
Ejemplo n.º 13
0
 def test_sqa_storage(self):
     init_test_engine_and_session_factory(force_init=True)
     config = SQAConfig()
     encoder = Encoder(config=config)
     decoder = Decoder(config=config)
     db_settings = DBSettings(encoder=encoder, decoder=decoder)
     ax_client = AxClient(db_settings=db_settings)
     ax_client.create_experiment(
         name="test_experiment",
         parameters=[
             {"name": "x", "type": "range", "bounds": [-5.0, 10.0]},
             {"name": "y", "type": "range", "bounds": [0.0, 15.0]},
         ],
         minimize=True,
     )
     for _ in range(5):
         parameters, trial_index = ax_client.get_next_trial()
         ax_client.complete_trial(
             trial_index=trial_index, raw_data=branin(*parameters.values())
         )
     gs = ax_client.generation_strategy
     ax_client = AxClient(db_settings=db_settings)
     ax_client.load_experiment_from_database("test_experiment")
     self.assertEqual(gs, ax_client.generation_strategy)
     with self.assertRaises(ValueError):
         # Overwriting existing experiment.
         ax_client.create_experiment(
             name="test_experiment",
             parameters=[
                 {"name": "x", "type": "range", "bounds": [-5.0, 10.0]},
                 {"name": "y", "type": "range", "bounds": [0.0, 15.0]},
             ],
             minimize=True,
         )
     with self.assertRaises(ValueError):
         # Overwriting existing experiment with overwrite flag with present
         # DB settings. This should fail as we no longer allow overwriting
         # experiments stored in the DB.
         ax_client.create_experiment(
             name="test_experiment",
             parameters=[{"name": "x", "type": "range", "bounds": [-5.0, 10.0]}],
             overwrite_existing_experiment=True,
         )
     # Original experiment should still be in DB and not have been overwritten.
     self.assertEqual(len(ax_client.experiment.trials), 5)
Ejemplo n.º 14
0
def ax_client_with_explicit_strategy(num_random, num_computed):
    steps = []
    if num_random > 0:
        steps.append(GenerationStep(model=Models.SOBOL, num_arms=num_random))
    if num_computed > 0:
        steps.append(GenerationStep(model=Models.GPEI, num_arms=-1))

    return AxClient(enforce_sequential_optimization=False,
                    generation_strategy=GenerationStrategy(steps))
 def get_ax_client(self):
     log_paths = self.get_all_log_paths()
     ax_client = None
     if len(log_paths) > 0:
         ax_client = open_log(log_paths)
     if ax_client is None:
         ax_client = AxClient()
         ax_client.create_experiment(parameters=self.bayes_params, name=self.YR.args.experiment_name, minimize=False, objective_name=self.YR.args.eval_primary_metric)
     return ax_client
Ejemplo n.º 16
0
 def test_trial_completion(self):
     ax_client = AxClient()
     ax_client.create_experiment(
         parameters=[
             {
                 "name": "x",
                 "type": "range",
                 "bounds": [-5.0, 10.0]
             },
             {
                 "name": "y",
                 "type": "range",
                 "bounds": [0.0, 15.0]
             },
         ],
         minimize=True,
     )
     params, idx = ax_client.get_next_trial()
     # Can't update before completing.
     with self.assertRaisesRegex(ValueError, ".* not yet"):
         ax_client.update_trial_data(trial_index=idx,
                                     raw_data={"objective": (0, 0.0)})
     ax_client.complete_trial(trial_index=idx,
                              raw_data={"objective": (0, 0.0)})
     # Cannot complete a trial twice, should use `update_trial_data`.
     with self.assertRaisesRegex(ValueError, ".* already been completed"):
         ax_client.complete_trial(trial_index=idx,
                                  raw_data={"objective": (0, 0.0)})
     # Cannot update trial data with observation for a metric it already has.
     with self.assertRaisesRegex(ValueError, ".* contained an observation"):
         ax_client.update_trial_data(trial_index=idx,
                                     raw_data={"objective": (0, 0.0)})
     # Same as above, except objective name should be getting inferred.
     with self.assertRaisesRegex(ValueError, ".* contained an observation"):
         ax_client.update_trial_data(trial_index=idx, raw_data=1.0)
     ax_client.update_trial_data(trial_index=idx, raw_data={"m1": (1, 0.0)})
     metrics_in_data = ax_client.experiment.fetch_data(
     ).df["metric_name"].values
     self.assertIn("m1", metrics_in_data)
     self.assertIn("objective", metrics_in_data)
     self.assertEqual(ax_client.get_best_parameters()[0], params)
     params2, idy = ax_client.get_next_trial()
     ax_client.complete_trial(trial_index=idy, raw_data=(-1, 0.0))
     self.assertEqual(ax_client.get_best_parameters()[0], params2)
     params3, idx3 = ax_client.get_next_trial()
     ax_client.complete_trial(trial_index=idx3,
                              raw_data=-2,
                              metadata={"dummy": "test"})
     self.assertEqual(ax_client.get_best_parameters()[0], params3)
     self.assertEqual(
         ax_client.experiment.trials.get(2).run_metadata.get("dummy"),
         "test")
     best_trial_values = ax_client.get_best_parameters()[1]
     self.assertEqual(best_trial_values[0], {"objective": -2.0})
     self.assertTrue(
         math.isnan(best_trial_values[1]["objective"]["objective"]))
Ejemplo n.º 17
0
 def test_no_sqa(self):
     # Pretend we couldn't import sqa_store.structs (this could happen when
     # SQLAlchemy is not installed).
     patcher = patch("ax.service.ax_client.DBSettings", None)
     patcher.start()
     with self.assertRaises(ModuleNotFoundError):
         import ax_client.storage.sqa_store.structs  # noqa F401
     AxClient()  # Make sure we still can instantiate client w/o db settings.
     # Even with correctly typed DBSettings, `AxClient` instantiation should
     # fail here, because `DBSettings` are mocked to None in `ax_client`.
     db_settings = DBSettings()
     self.assertIsInstance(db_settings, DBSettings)
     with self.assertRaisesRegex(ValueError, "`db_settings` argument should "):
         AxClient(db_settings=db_settings)
     patcher.stop()
     # DBSettings should be defined in `ax_client` now, but incorrectly typed
     # `db_settings` argument should still make instantiation fail.
     with self.assertRaisesRegex(ValueError, "`db_settings` argument should "):
         AxClient(db_settings="badly_typed_db_settings")
Ejemplo n.º 18
0
 def test_keep_generating_without_data(self):
     # Check that normally numebr of arms to generate is enforced.
     ax = AxClient()
     ax.create_experiment(
         parameters=[
             {
                 "name": "x1",
                 "type": "range",
                 "bounds": [-5.0, 10.0]
             },
             {
                 "name": "x2",
                 "type": "range",
                 "bounds": [0.0, 15.0]
             },
         ],
         minimize=True,
     )
     for _ in range(5):
         parameterization, trial_index = ax.get_next_trial()
     with self.assertRaisesRegex(ValueError,
                                 "All trials for current model"):
         ax.get_next_trial()
     # Check thatwith enforce_sequential_optimization off, we can keep
     # generating.
     ax = AxClient(enforce_sequential_optimization=False)
     ax.create_experiment(
         parameters=[
             {
                 "name": "x1",
                 "type": "range",
                 "bounds": [-5.0, 10.0]
             },
             {
                 "name": "x2",
                 "type": "range",
                 "bounds": [0.0, 15.0]
             },
         ],
         minimize=True,
     )
     for _ in range(10):
         parameterization, trial_index = ax.get_next_trial()
Ejemplo n.º 19
0
    def testAx(self):
        from ray.tune.suggest.ax import AxSearch
        from ax.service.ax_client import AxClient

        converted_config = AxSearch.convert_search_space(self.config)
        client = AxClient()
        client.create_experiment(
            parameters=converted_config, objective_name=self.metric_name, minimize=False
        )
        searcher = AxSearch(ax_client=client)

        self._save(searcher)

        client = AxClient()
        client.create_experiment(
            parameters=converted_config, objective_name=self.metric_name, minimize=False
        )
        searcher = AxSearch(ax_client=client)
        self._restore(searcher)
Ejemplo n.º 20
0
 def test_sqa_storage(self):
     init_test_engine_and_session_factory(force_init=True)
     config = SQAConfig()
     encoder = Encoder(config=config)
     decoder = Decoder(config=config)
     db_settings = DBSettings(encoder=encoder, decoder=decoder)
     ax_client = AxClient(db_settings=db_settings)
     ax_client.create_experiment(
         name="test_experiment",
         parameters=[
             {"name": "x1", "type": "range", "bounds": [-5.0, 10.0]},
             {"name": "x2", "type": "range", "bounds": [0.0, 15.0]},
         ],
         minimize=True,
     )
     for _ in range(5):
         parameters, trial_index = ax_client.get_next_trial()
         ax_client.complete_trial(
             trial_index=trial_index, raw_data=branin(*parameters.values())
         )
     gs = ax_client.generation_strategy
     ax_client = AxClient(db_settings=db_settings)
     ax_client.load_experiment_from_database("test_experiment")
     self.assertEqual(gs, ax_client.generation_strategy)
     with self.assertRaises(ValueError):
         # Overwriting existing experiment.
         ax_client.create_experiment(
             name="test_experiment",
             parameters=[
                 {"name": "x1", "type": "range", "bounds": [-5.0, 10.0]},
                 {"name": "x2", "type": "range", "bounds": [0.0, 15.0]},
             ],
             minimize=True,
         )
     # Overwriting existing experiment with overwrite flag.
     ax_client.create_experiment(
         name="test_experiment",
         parameters=[{"name": "x1", "type": "range", "bounds": [-5.0, 10.0]}],
         overwrite_existing_experiment=True,
     )
     # There should be no trials, as we just put in a fresh experiment.
     self.assertEqual(len(ax_client.experiment.trials), 0)
Ejemplo n.º 21
0
 def test_fixed_random_seed_reproducibility(self):
     ax_client = AxClient(random_seed=239)
     ax_client.create_experiment(parameters=[
         {
             "name": "x",
             "type": "range",
             "bounds": [-5.0, 10.0]
         },
         {
             "name": "y",
             "type": "range",
             "bounds": [0.0, 15.0]
         },
     ])
     for _ in range(5):
         params, idx = ax_client.get_next_trial()
         ax_client.complete_trial(idx,
                                  branin(params.get("x"), params.get("y")))
     trial_parameters_1 = [
         t.arm.parameters for t in ax_client.experiment.trials.values()
     ]
     ax_client = AxClient(random_seed=239)
     ax_client.create_experiment(parameters=[
         {
             "name": "x",
             "type": "range",
             "bounds": [-5.0, 10.0]
         },
         {
             "name": "y",
             "type": "range",
             "bounds": [0.0, 15.0]
         },
     ])
     for _ in range(5):
         params, idx = ax_client.get_next_trial()
         ax_client.complete_trial(idx,
                                  branin(params.get("x"), params.get("y")))
     trial_parameters_2 = [
         t.arm.parameters for t in ax_client.experiment.trials.values()
     ]
     self.assertEqual(trial_parameters_1, trial_parameters_2)
Ejemplo n.º 22
0
 def test_unnamed_experiment_snapshot(self):
     ax_client = AxClient(random_seed=239)
     ax_client.create_experiment(
         parameters=[
             {"name": "x", "type": "range", "bounds": [-5.0, 10.0]},
             {"name": "y", "type": "range", "bounds": [0.0, 15.0]},
         ]
     )
     serialized = ax_client.to_json_snapshot()
     ax_client = AxClient.from_json_snapshot(serialized)
     self.assertIsNone(ax_client.experiment._name)
Ejemplo n.º 23
0
 def test_default_generation_strategy_continuous(self, _a, _b, _c,
                                                 _d) -> None:
     """Test that Sobol+GPEI is used if no GenerationStrategy is provided."""
     ax_client = AxClient()
     ax_client.create_experiment(
         parameters=[  # pyre-fixme[6]: expected union that should include
             {
                 "name": "x",
                 "type": "range",
                 "bounds": [-5.0, 10.0]
             },
             {
                 "name": "y",
                 "type": "range",
                 "bounds": [0.0, 15.0]
             },
         ],
         objective_name="a",
         minimize=True,
     )
     self.assertEqual(
         [s.model for s in not_none(ax_client.generation_strategy)._steps],
         [Models.SOBOL, Models.GPEI],
     )
     with self.assertRaisesRegex(ValueError, ".* no trials"):
         ax_client.get_optimization_trace(objective_optimum=branin.fmin)
     for i in range(6):
         parameterization, trial_index = ax_client.get_next_trial()
         x, y = parameterization.get("x"), parameterization.get("y")
         ax_client.complete_trial(
             trial_index,
             raw_data={
                 "a": (
                     checked_cast(
                         float,
                         branin(checked_cast(float, x),
                                checked_cast(float, y)),
                     ),
                     0.0,
                 )
             },
             sample_size=i,
         )
     self.assertEqual(ax_client.generation_strategy.model._model_key,
                      "GPEI")
     ax_client.get_optimization_trace(objective_optimum=branin.fmin)
     ax_client.get_contour_plot()
     ax_client.get_feature_importances()
     trials_df = ax_client.get_trials_data_frame()
     self.assertIn("x", trials_df)
     self.assertIn("y", trials_df)
     self.assertIn("a", trials_df)
     self.assertEqual(len(trials_df), 6)
Ejemplo n.º 24
0
 def test_attach_trial_numpy(self):
     ax_client = AxClient()
     ax_client.create_experiment(
         parameters=[
             {"name": "x1", "type": "range", "bounds": [-5.0, 10.0]},
             {"name": "x2", "type": "range", "bounds": [0.0, 15.0]},
         ],
         minimize=True,
     )
     params, idx = ax_client.attach_trial(parameters={"x1": 0, "x2": 1})
     ax_client.complete_trial(trial_index=idx, raw_data=np.int32(5))
     self.assertEqual(ax_client.get_best_parameters()[0], params)
Ejemplo n.º 25
0
def initialize(filepath='ax_client_snapshot.json'):
    ax_client = AxClient(verbose_logging=False)
    try:
        ax_client = ax_client.load_from_json_file(filepath=filepath)
    except:
        logging.warning("COULD NOT LOAD CURRENT EXPERIMENT. STARTING NEW..")
        ax_client.create_experiment(
            name="hypertune_simulation",
            parameters=parameters,
            objective_name="valid/hitrate",
            outcome_constraints=["test/loglik <= 10000"])
    return ax_client
Ejemplo n.º 26
0
def raytune_ax_train(model_params: dict, config_params: dict):
    depth = [int(d) for d in config_params['ht_depth_range'].split(',')]
    features = [
        float(d) for d in config_params['ht_features_range'].split(',')
    ]
    estimators = [int(d) for d in config_params['ht_est_range'].split(',')]
    experiments = config_params['ht_experiments']
    ax = AxClient(enforce_sequential_optimization=False)

    ax.create_experiment(name="hpo_experiment",
                         parameters=[{
                             "name": "max_depth",
                             "type": "range",
                             "bounds": depth,
                             "parameter_type": ParameterType.INT
                         }, {
                             "name": "max_features",
                             "type": "range",
                             "bounds": features,
                             "parameter_type": ParameterType.FLOAT
                         }, {
                             "name": "n_estimators",
                             "type": "range",
                             "bounds": estimators,
                             "parameter_type": ParameterType.INT
                         }],
                         objective_name="accuracy",
                         minimize=False)

    tune.run(
        run_or_experiment=lambda parameters: ax_train_proxy(
            model_params=model_params,
            config_params=config_params,
            ax_params=parameters),
        num_samples=experiments,
        search_alg=AxSearch(
            ax),  # Note that the argument here is the `AxClient`.
        verbose=
        1,  # Set this level to 1 to see status updates and to 2 to also see trial results.
        # To use GPU, specify: resources_per_trial={"gpu": 1}.
        resources_per_trial={"gpu": 1} if
        ('GPU' in config_params['compute']) else {"cpu": 8})

    print(f"FINISHED RAY TUNE RUNE", flush=True)

    best_parameters, best_values = ax.get_best_parameters()
    means, covariances = best_values
    print("Ax Optimization Results:", flush=True)
    print(best_parameters, flush=True)
    print(best_values, flush=True)

    return means['accuracy']
Ejemplo n.º 27
0
def get_ax_client(YR, bayes_params):
    log_paths = get_all_log_paths(YR.args.root_experiment_folder)
    ax_client = None
    if len(log_paths) > 0:
        ax_client = open_log(log_paths)
    if ax_client is None:
        ax_client = AxClient()
        ax_client.create_experiment(
            parameters=bayes_params,
            name=YR.args.experiment_name,
            minimize=False,
            objective_name=YR.args.eval_metric_for_best_epoch)
    return ax_client
Ejemplo n.º 28
0
 def test_constraint_same_as_objective(self):
     """Check that we do not allow constraints on the objective metric."""
     ax_client = AxClient(
         GenerationStrategy(steps=[GenerationStep(model=Models.SOBOL, num_arms=30)])
     )
     with self.assertRaises(ValueError):
         ax_client.create_experiment(
             name="test_experiment",
             parameters=[
                 {"name": "x3", "type": "fixed", "value": 2, "value_type": "int"}
             ],
             objective_name="test_objective",
             outcome_constraints=["test_objective >= 3"],
         )
Ejemplo n.º 29
0
 def test_relative_oc_without_sq(self):
     """Must specify status quo to have relative outcome constraint."""
     ax_client = AxClient()
     with self.assertRaises(ValueError):
         ax_client.create_experiment(
             name="test_experiment",
             parameters=[
                 {"name": "x1", "type": "range", "bounds": [-5.0, 10.0]},
                 {"name": "x2", "type": "range", "bounds": [0.0, 15.0]},
             ],
             objective_name="test_objective",
             minimize=True,
             outcome_constraints=["some_metric <= 4.0%"],
         )
Ejemplo n.º 30
0
 def test_get_model_predictions(self, _predict, _tr_data, _obs_from_data):
     ax_client = AxClient()
     ax_client.create_experiment(
         name="test_experiment",
         parameters=[
             {"name": "x", "type": "range", "bounds": [-5.0, 10.0]},
             {"name": "y", "type": "range", "bounds": [0.0, 15.0]},
         ],
         minimize=True,
         objective_name="a",
     )
     ax_client.get_next_trial()
     ax_client.experiment.trials[0].arm._name = "1_1"
     self.assertEqual(ax_client.get_model_predictions(), {0: {"a": (9.0, 1.0)}})