def qrandint(lower, upper, q=1): ''' Uniformly sample integer between lower and upper. (Both inclusive) Round the result to nearest value with granularity q. :param lower: Lower bound of the sampling range. :param upper: Upper bound of the sampling range. :param q: Integer Granularity for increment. ''' return tune.qrandint(lower, upper, q)
def testTuneSampleAPI(self): config = { "func": tune.sample_from(lambda spec: spec.config.uniform * 0.01), "uniform": tune.uniform(-5, -1), "quniform": tune.quniform(3.2, 5.4, 0.2), "loguniform": tune.loguniform(1e-4, 1e-2), "qloguniform": tune.qloguniform(1e-4, 1e-1, 5e-5), "choice": tune.choice([2, 3, 4]), "randint": tune.randint(-9, 15), "qrandint": tune.qrandint(-21, 12, 3), "randn": tune.randn(10, 2), "qrandn": tune.qrandn(10, 2, 0.2), } for _, (_, generated) in zip( range(1000), generate_variants({ "config": config })): out = generated["config"] self.assertAlmostEqual(out["func"], out["uniform"] * 0.01) self.assertGreaterEqual(out["uniform"], -5) self.assertLess(out["uniform"], -1) self.assertGreaterEqual(out["quniform"], 3.2) self.assertLessEqual(out["quniform"], 5.4) self.assertAlmostEqual(out["quniform"] / 0.2, round(out["quniform"] / 0.2)) self.assertGreaterEqual(out["loguniform"], 1e-4) self.assertLess(out["loguniform"], 1e-2) self.assertGreaterEqual(out["qloguniform"], 1e-4) self.assertLessEqual(out["qloguniform"], 1e-1) self.assertAlmostEqual(out["qloguniform"] / 5e-5, round(out["qloguniform"] / 5e-5)) self.assertIn(out["choice"], [2, 3, 4]) self.assertGreaterEqual(out["randint"], -9) self.assertLess(out["randint"], 15) self.assertGreaterEqual(out["qrandint"], -21) self.assertLessEqual(out["qrandint"], 12) self.assertEqual(out["qrandint"] % 3, 0) # Very improbable self.assertGreater(out["randn"], 0) self.assertLess(out["randn"], 20) self.assertGreater(out["qrandn"], 0) self.assertLess(out["qrandn"], 20) self.assertAlmostEqual(out["qrandn"] / 0.2, round(out["qrandn"] / 0.2))
def configure_search_space(config): config["preprocessing"] = { "remove_pauses": tune.choice([True, False]), "pad": tune.choice(["front", "back"]), "max_length": tune.choice([-1, 20, 50, 75]), } config["model"] = { "hidden_size": tune.qrandint(64, 384, q=8), # "inner_size": tune.choice([32, 64, 128, 256, 512, 1024, 2048]), # "inner_size_multiple": tune.qrandint(2, 4, q=2), "inner_size_multiple": 2, "num_heads": tune.choice([2, 4, 8]), # "num_layers": tune.randint(1, 4), "num_layers": 3, # "prenorm": tune.choice([False, True]), "prenorm": False, "scalenorm": tune.choice([False, True]), # "kernel_size": tune.choice([None, 11, 33]), "kernel_size": 33, "dropout": tune.uniform(0.2, 0.4), } # config["model"]["inner_size"] = tune.sample_from(lambda spec: spec.config.hidden_size * np.random.choice([2, 4])) # config["inner_size_multiple"] * config["model"]["hidden_size"] # config["lr_scheduler"] = tune.choice([False, True]) config["lr_scheduler"] = True # config["lr_schedule"]["patience"] = tune.randint(2, 5) config["lr_schedule"]["patience"] = 5 # config["lr_schedule"]["factor"] = tune.loguniform(0.1, 0.5) config["lr_schedule"]["factor"] = 0.2 # config["optimizer"] = tune.choice(["SGD", "Adam", "AdamW"]) config["optimizer"] = "AdamW" # config["optim"]["lr"] = tune.loguniform(1e-4, 1e-2) config["optim"]["lr"] = 1e-4 # config["optim"]["weight_decay"] = tune.loguniform(1e-4, 1e-1) config["optim"]["weight_decay"] = 5e-4 # config["data"]["batch_size"] = tune.choice([8, 16, 32, 64]) config["data"]["batch_size"] = tune.choice([8, 16]) config["trainer"]["gradient_clip_val"] = tune.choice([0, 0.1]) # config["trainer"]["gradient_clip_val"] = 0.1 return config
def main(): ray.init(logging_level=logging.WARNING) space = { "wandb_dir": os.getcwd(), "wandb_tags": "unstable controllable".split(), "seed": 42, "estimator": "maac", "K": 8, "B": tune.qrandint(20, 400, 20), "optimizer": "SGD", "learning_rate": tune.loguniform(1e-5, 1e-2), "clip_grad_norm": 100, } for env_dim in range(10, 11): hyperopt_search = HyperOptSearch( { "env_dim": env_dim, **space }, metric="true_value", mode="max", random_state_seed=42, ) name = f"HparamSearch-Dim{env_dim}" analysis = tune.run( HparamSearch, name=name, search_alg=hyperopt_search, local_dir="./results", stop={"time_total_s": 300}, num_samples=1000, ) best = analysis.get_best_config("true_value", mode="max") with open(os.path.join("./results", name, "search_best.json"), "w") as file: json.dump(best, file) ray.shutdown()
# __search_space_start__ space = {"a": tune.uniform(0, 1), "b": tune.uniform(0, 1)} tune.run(trainable, config=space, num_samples=10) # __search_space_end__ # __config_start__ config = { "uniform": tune.uniform(-5, -1), # Uniform float between -5 and -1 "quniform": tune.quniform(3.2, 5.4, 0.2), # Round to increments of 0.2 "loguniform": tune.loguniform(1e-4, 1e-1), # Uniform float in log space "qloguniform": tune.qloguniform(1e-4, 1e-1, 5e-5), # Round to increments of 0.00005 "randn": tune.randn(10, 2), # Normal distribution with mean 10 and sd 2 "qrandn": tune.qrandn(10, 2, 0.2), # Round to increments of 0.2 "randint": tune.randint(-9, 15), # Random integer between -9 and 15 "qrandint": tune.qrandint(-21, 12, 3), # Round to increments of 3 (includes 12) "lograndint": tune.lograndint(1, 10), # Random integer in log space "qlograndint": tune.qlograndint(1, 10, 2), # Round to increments of 2 "choice": tune.choice(["a", "b", "c"]), # Choose one of these options uniformly "func": tune.sample_from( lambda spec: spec.config.uniform * 0.01), # Depends on other value "grid": tune.grid_search([32, 64, 128]), # Search over all these values } # __config_end__ # __bayes_start__ from ray.tune.suggest.bayesopt import BayesOptSearch # Define the search space search_space = {"a": tune.uniform(0, 1), "b": tune.uniform(0, 20)}
}, "utterance.cell_type": { "space": "choice", "categories": ["rnn", "gru", "lstm"] }, }, "goal": "maximize", "num_samples": 4, }, } if RAY_AVAILABLE: EXPECTED_SEARCH_SPACE = { "test_1": { "trainer.learning_rate": tune.uniform(0.001, 0.1), "combiner.num_fc_layers": tune.qrandint(3, 6, 3), "utterance.cell_type": tune.grid_search(["rnn", "gru", "lstm"]), }, "test_2": { "trainer.learning_rate": tune.loguniform(0.001, 0.1), "combiner.num_fc_layers": tune.randint(2, 6), "utterance.cell_type": tune.choice(["rnn", "gru", "lstm"]), }, } @pytest.mark.skipif(not RAY_AVAILABLE, reason="Ray is not installed for testing") @pytest.mark.parametrize("key", ["test_1", "test_2"]) def test_grid_strategy(key):
"random_state": 1 } abc_config = { "class": AdaBoostClassifier, # "base_estimator": DecisionTreeClassifier(), # How to use something else than DecisionTreeClassifier? "base_estimator": tune.choice([ SVC(kernel="rbf", gamma=0.05, probability=False, C=23), NuSVC(kernel="rbf", gamma="auto", probability=False, nu=0.4), DecisionTreeClassifier() ]), "n_estimators": tune.qrandint(10, 100, 10), "learning_rate": tune.uniform(0.5, 1.5), "random_state": 1, "algorithm": "SAMME" } knn_config = { "class": KNeighborsClassifier, "n_neighbors": tune.randint(3, 20), "weights": tune.choice(["uniform", "distance"]), "algorithm": tune.choice(["auto", "ball_tree", "kd_tree", "brute"]), "leaf_size": tune.qrandint(10, 60, 5), }