Пример #1
0
    def testTuneSampleAPI(self):
        config = {
            "func": tune.sample_from(lambda spec: spec.config.uniform * 0.01),
            "uniform": tune.uniform(-5, -1),
            "quniform": tune.quniform(3.2, 5.4, 0.2),
            "loguniform": tune.loguniform(1e-4, 1e-2),
            "qloguniform": tune.qloguniform(1e-4, 1e-1, 5e-5),
            "choice": tune.choice([2, 3, 4]),
            "randint": tune.randint(-9, 15),
            "lograndint": tune.lograndint(1, 10),
            "qrandint": tune.qrandint(-21, 12, 3),
            "qlograndint": tune.qlograndint(2, 20, 2),
            "randn": tune.randn(10, 2),
            "qrandn": tune.qrandn(10, 2, 0.2),
        }
        for _, (_, generated) in zip(range(1000),
                                     generate_variants({"config": config})):
            out = generated["config"]

            self.assertAlmostEqual(out["func"], out["uniform"] * 0.01)

            self.assertGreaterEqual(out["uniform"], -5)
            self.assertLess(out["uniform"], -1)

            self.assertGreaterEqual(out["quniform"], 3.2)
            self.assertLessEqual(out["quniform"], 5.4)
            self.assertAlmostEqual(out["quniform"] / 0.2,
                                   round(out["quniform"] / 0.2))

            self.assertGreaterEqual(out["loguniform"], 1e-4)
            self.assertLess(out["loguniform"], 1e-2)

            self.assertGreaterEqual(out["qloguniform"], 1e-4)
            self.assertLessEqual(out["qloguniform"], 1e-1)
            self.assertAlmostEqual(out["qloguniform"] / 5e-5,
                                   round(out["qloguniform"] / 5e-5))

            self.assertIn(out["choice"], [2, 3, 4])

            self.assertGreaterEqual(out["randint"], -9)
            self.assertLess(out["randint"], 15)
            self.assertTrue(isinstance(out["randint"], int))

            self.assertGreaterEqual(out["lograndint"], 1)
            self.assertLess(out["lograndint"], 10)
            self.assertTrue(isinstance(out["lograndint"], int))

            self.assertGreaterEqual(out["qrandint"], -21)
            self.assertLessEqual(out["qrandint"], 12)
            self.assertEqual(out["qrandint"] % 3, 0)
            self.assertTrue(isinstance(out["qrandint"], int))

            self.assertGreaterEqual(out["qlograndint"], 2)
            self.assertLessEqual(out["qlograndint"], 20)
            self.assertEqual(out["qlograndint"] % 2, 0)
            self.assertTrue(isinstance(out["qlograndint"], int))

            # Very improbable
            self.assertGreater(out["randn"], 0)
            self.assertLess(out["randn"], 20)

            self.assertGreater(out["qrandn"], 0)
            self.assertLess(out["qrandn"], 20)
            self.assertAlmostEqual(out["qrandn"] / 0.2,
                                   round(out["qrandn"] / 0.2))
Пример #2
0
# __search_space_end__

# __config_start__
config = {
    "uniform": tune.uniform(-5, -1),  # Uniform float between -5 and -1
    "quniform": tune.quniform(3.2, 5.4, 0.2),  # Round to increments of 0.2
    "loguniform": tune.loguniform(1e-4, 1e-1),  # Uniform float in log space
    "qloguniform": tune.qloguniform(1e-4, 1e-1,
                                    5e-5),  # Round to increments of 0.00005
    "randn": tune.randn(10, 2),  # Normal distribution with mean 10 and sd 2
    "qrandn": tune.qrandn(10, 2, 0.2),  # Round to increments of 0.2
    "randint": tune.randint(-9, 15),  # Random integer between -9 and 15
    "qrandint": tune.qrandint(-21, 12,
                              3),  # Round to increments of 3 (includes 12)
    "lograndint": tune.lograndint(1, 10),  # Random integer in log space
    "qlograndint": tune.qlograndint(1, 10, 2),  # Round to increments of 2
    "choice": tune.choice(["a", "b",
                           "c"]),  # Choose one of these options uniformly
    "func": tune.sample_from(
        lambda spec: spec.config.uniform * 0.01),  # Depends on other value
    "grid": tune.grid_search([32, 64, 128]),  # Search over all these values
}
# __config_end__

# __bayes_start__
from ray.tune.suggest.bayesopt import BayesOptSearch

# Define the search space
search_space = {"a": tune.uniform(0, 1), "b": tune.uniform(0, 20)}

algo = BayesOptSearch(random_search_steps=4)
Пример #3
0
    def decode_tuning_param_config(encoded_tuning_params_config):
        params = json.loads(encoded_tuning_params_config)
        config = {}
        for param in params:
            # if injecting some constant into trainables
            if isinstance(params[param], int) or isinstance(
                    params[param], str):
                config[param] = params[param]
                continue

            param_data = params[param]

            if "is_trivial_enum" in param_data and param_data[
                    "is_trivial_enum"]:
                config[param] = tune.choice(param_data["values"])
                continue

            d_from = param_data["domain_from"]
            d_to = param_data["domain_to"]
            d_step = param_data["domain_step"]

            if param_data["type"] == "float":
                if "use_log_scale" in param_data and param_data[
                        "use_log_scale"]:
                    if d_step == 0:
                        config[param] = tune.loguniform(d_from, d_to)
                    else:
                        config[param] = tune.qloguniform(d_from, d_to, d_step)
                else:
                    if d_step == 0:
                        config[param] = tune.uniform(d_from, d_to)
                    else:
                        first_val = (d_from // d_step + 1) * d_step
                        iters = int(round((d_to - first_val) / d_step + 1, 1))

                        param_domain = [d_from]
                        param_domain += [
                            round(first_val + i * d_step, 3)
                            for i in range(iters)
                        ]
                        config[param] = tune.choice(param_domain)
            else:
                if "use_log_scale" in param_data and param_data[
                        "use_log_scale"]:
                    if d_step == 0:
                        config[param] = tune.lograndint(d_from, d_to + 1)
                    else:
                        # Warning: qlograndint does not actually have uniform distr.
                        config[param] = tune.qlograndint(d_from, d_to, d_step)
                else:
                    if d_step == 0 or d_step == 1:
                        config[param] = tune.randint(d_from, d_to + 1)
                    else:
                        # RayTune does not provide quantized *uniform* distribution;
                        # hence, manually quantize by using tune.choice instead
                        first_val = (d_from // d_step + 1) * d_step
                        iters = int(round((d_to - first_val) / d_step + 1, 1))

                        param_domain = [d_from]
                        param_domain += [
                            first_val + i * d_step for i in range(iters)
                        ]
                        config[param] = tune.choice(param_domain)
        return config