Esempio n. 1
0
 def update_search_space(self, search_space):
     '''
     Tuners are advised to support updating search space at run-time.
     If a tuner can only set search space once before generating first hyper-parameters,
     it should explicitly document this behaviour.
     search_space: JSON object created by experiment owner
     '''
     config = {}
     for key, value in search_space:
         v = value.get("_value")
         _type = value['_type']
         if _type == 'choice':
             config[key] = choice(v)
         elif _type == 'randint':
             config[key] = randint(v[0], v[1] - 1)
         elif _type == 'uniform':
             config[key] = uniform(v[0], v[1])
         elif _type == 'quniform':
             config[key] = quniform(v[0], v[1], v[2])
         elif _type == 'loguniform':
             config[key] = loguniform(v[0], v[1])
         elif _type == 'qloguniform':
             config[key] = qloguniform(v[0], v[1], v[2])
         elif _type == 'normal':
             config[key] = randn(v[1], v[2])
         elif _type == 'qnormal':
             config[key] = qrandn(v[1], v[2], v[3])
         else:
             raise ValueError(
                 f'unsupported type in search_space {_type}')
     self._ls.set_search_properties(None, None, config)
     if self._gs is not None:
         self._gs.set_search_properties(None, None, config)
     self._init_search()
Esempio n. 2
0
    def update_search_space(self, search_space):
        """Required by NNI.

        Tuners are advised to support updating search space at run-time.
        If a tuner can only set search space once before generating first hyper-parameters,
        it should explicitly document this behaviour.

        Args:
            search_space: JSON object created by experiment owner.
        """
        config = {}
        for key, value in search_space.items():
            v = value.get("_value")
            _type = value["_type"]
            if _type == "choice":
                config[key] = choice(v)
            elif _type == "randint":
                config[key] = randint(*v)
            elif _type == "uniform":
                config[key] = uniform(*v)
            elif _type == "quniform":
                config[key] = quniform(*v)
            elif _type == "loguniform":
                config[key] = loguniform(*v)
            elif _type == "qloguniform":
                config[key] = qloguniform(*v)
            elif _type == "normal":
                config[key] = randn(*v)
            elif _type == "qnormal":
                config[key] = qrandn(*v)
            else:
                raise ValueError(f"unsupported type in search_space {_type}")
        # low_cost_partial_config is passed to constructor,
        # which is before update_search_space() is called
        init_config = self._ls.init_config
        add_cost_to_space(config, init_config, self._cat_hp_cost)
        self._ls = self.LocalSearch(
            init_config,
            self._ls.metric,
            self._mode,
            config,
            self._ls.resource_attr,
            self._ls.min_resource,
            self._ls.max_resource,
            self._ls.resource_multiple_factor,
            cost_attr=self.cost_attr,
            seed=self._ls.seed,
        )
        if self._gs is not None:
            self._gs = GlobalSearch(
                space=config,
                metric=self._metric,
                mode=self._mode,
                sampler=self._gs._sampler,
            )
            self._gs.space = config
        self._init_search()
Esempio n. 3
0
    def testTuneSampleAPI(self):
        config = {
            "func": tune.sample_from(lambda spec: spec.config.uniform * 0.01),
            "uniform": tune.uniform(-5, -1),
            "quniform": tune.quniform(3.2, 5.4, 0.2),
            "loguniform": tune.loguniform(1e-4, 1e-2),
            "qloguniform": tune.qloguniform(1e-4, 1e-1, 5e-5),
            "choice": tune.choice([2, 3, 4]),
            "randint": tune.randint(-9, 15),
            "qrandint": tune.qrandint(-21, 12, 3),
            "randn": tune.randn(10, 2),
            "qrandn": tune.qrandn(10, 2, 0.2),
        }
        for _, (_, generated) in zip(
                range(1000), generate_variants({
                    "config": config
                })):
            out = generated["config"]

            self.assertAlmostEqual(out["func"], out["uniform"] * 0.01)

            self.assertGreaterEqual(out["uniform"], -5)
            self.assertLess(out["uniform"], -1)

            self.assertGreaterEqual(out["quniform"], 3.2)
            self.assertLessEqual(out["quniform"], 5.4)
            self.assertAlmostEqual(out["quniform"] / 0.2,
                                   round(out["quniform"] / 0.2))

            self.assertGreaterEqual(out["loguniform"], 1e-4)
            self.assertLess(out["loguniform"], 1e-2)

            self.assertGreaterEqual(out["qloguniform"], 1e-4)
            self.assertLessEqual(out["qloguniform"], 1e-1)
            self.assertAlmostEqual(out["qloguniform"] / 5e-5,
                                   round(out["qloguniform"] / 5e-5))

            self.assertIn(out["choice"], [2, 3, 4])

            self.assertGreaterEqual(out["randint"], -9)
            self.assertLess(out["randint"], 15)

            self.assertGreaterEqual(out["qrandint"], -21)
            self.assertLessEqual(out["qrandint"], 12)
            self.assertEqual(out["qrandint"] % 3, 0)

            # Very improbable
            self.assertGreater(out["randn"], 0)
            self.assertLess(out["randn"], 20)

            self.assertGreater(out["qrandn"], 0)
            self.assertLess(out["qrandn"], 20)
            self.assertAlmostEqual(out["qrandn"] / 0.2,
                                   round(out["qrandn"] / 0.2))
Esempio n. 4
0
def main():
    logging.basicConfig(level=logging.INFO)

    # Raylib parameters
    num_samples = 10
    envname = 'AdversarialAntBulletEnv-v0'
    trainingconfig = Path.cwd() / 'trainingconfig.json'
    evaluate_mean_n = 1000  # Number of timesteps over which to evaluate the mean reward
    name_fmt = 'million-bucks_{adv_force}'

    config = {
        # TODO: sample from control once, then different adversarial strengths
        # Range is centered on the force that achieves the closest reward to the control (7.5)
        "adv_force": tune.qrandn(7.5, 2.5, 0.1),
    }

    # https://docs.ray.io/en/master/tune/tutorials/overview.html#which-search-algorithm-scheduler-should-i-choose
    # Use BOHB for larger problems with a small number of hyperparameters
    # search = TuneBOHB(max_concurrent=4, metric="mean_loss", mode="min")
    # sched = HyperBandForBOHB(
    #     time_attr="training_iteration",
    #     max_t=100,
    # )

    # Implicitly use random search if search algo is not specified
    sched = ASHAScheduler(
        time_attr='training_iteration',
        max_t=100,
        grace_period=1,  # Unit is iterations, not timesteps. TODO configure
    )

    # Pass in a Trainable class or function to tune.run.
    local_dir = str(Path.cwd() / "ray")
    logging.info(f'{local_dir=}')
    anal = tune.run(tune.with_parameters(trainable,
                                         envname=envname,
                                         trainingconfig=trainingconfig,
                                         evaluate_mean_n=evaluate_mean_n,
                                         name_fmt=name_fmt),
                    config=config,
                    num_samples=num_samples,
                    scheduler=sched,
                    local_dir=local_dir,
                    metric="robustness",
                    mode="max",
                    log_to_file=True)
    logging.info(f'best config: {anal.best_config}')
    logging.info(f'best config: {anal.best_result}')
Esempio n. 5
0
# __run_tunable_samples_end__

# __search_space_start__
space = {"a": tune.uniform(0, 1), "b": tune.uniform(0, 1)}
tune.run(trainable, config=space, num_samples=10)
# __search_space_end__

# __config_start__
config = {
    "uniform": tune.uniform(-5, -1),  # Uniform float between -5 and -1
    "quniform": tune.quniform(3.2, 5.4, 0.2),  # Round to increments of 0.2
    "loguniform": tune.loguniform(1e-4, 1e-1),  # Uniform float in log space
    "qloguniform": tune.qloguniform(1e-4, 1e-1,
                                    5e-5),  # Round to increments of 0.00005
    "randn": tune.randn(10, 2),  # Normal distribution with mean 10 and sd 2
    "qrandn": tune.qrandn(10, 2, 0.2),  # Round to increments of 0.2
    "randint": tune.randint(-9, 15),  # Random integer between -9 and 15
    "qrandint": tune.qrandint(-21, 12,
                              3),  # Round to increments of 3 (includes 12)
    "lograndint": tune.lograndint(1, 10),  # Random integer in log space
    "qlograndint": tune.qlograndint(1, 10, 2),  # Round to increments of 2
    "choice": tune.choice(["a", "b",
                           "c"]),  # Choose one of these options uniformly
    "func": tune.sample_from(
        lambda spec: spec.config.uniform * 0.01),  # Depends on other value
    "grid": tune.grid_search([32, 64, 128]),  # Search over all these values
}
# __config_end__

# __bayes_start__
from ray.tune.suggest.bayesopt import BayesOptSearch