Exemplo n.º 1
0
    def set_basic_conf(self):
        from dragonfly.opt.gp_bandit import EuclideanGPBandit
        from dragonfly.exd.experiment_caller import EuclideanFunctionCaller
        from dragonfly import load_config

        def cost(space, reporter):
            height, width = space["point"]
            reporter(loss=(height - 14)**2 - abs(width - 3))

        domain_vars = [{
            "name": "height",
            "type": "float",
            "min": -10,
            "max": 10
        }, {
            "name": "width",
            "type": "float",
            "min": 0,
            "max": 20
        }]

        domain_config = load_config({"domain": domain_vars})

        func_caller = EuclideanFunctionCaller(
            None, domain_config.domain.list_of_domains[0])
        optimizer = EuclideanGPBandit(func_caller, ask_tell_mode=True)
        search_alg = DragonflySearch(
            optimizer,
            metric="loss",
            mode="min",
            max_concurrent=1000,  # Here to avoid breaking back-compat.
        )
        return search_alg, cost
Exemplo n.º 2
0
    def testDragonfly(self):
        from ray.tune.suggest.dragonfly import DragonflySearch

        searcher = DragonflySearch(space=self.config,
                                   metric=self.metric_name,
                                   mode="max",
                                   domain="euclidean",
                                   optimizer="random")

        self._save(searcher)

        searcher = DragonflySearch(space=self.config,
                                   metric=self.metric_name,
                                   mode="max",
                                   domain="euclidean",
                                   optimizer="random")
        self._restore(searcher)
Exemplo n.º 3
0
    def testConvergenceDragonfly(self):
        from ray.tune.suggest.dragonfly import DragonflySearch

        np.random.seed(0)
        searcher = DragonflySearch(domain="euclidean", optimizer="bandit")
        analysis = self._testConvergence(searcher)

        assert len(analysis.trials) < 100
        assert math.isclose(analysis.best_config["x"], 0, abs_tol=1e-5)
Exemplo n.º 4
0
    def testDragonfly(self):
        from ray.tune.suggest.dragonfly import DragonflySearch

        np.random.seed(1000)  # At least one nan, inf, -inf and float

        out = tune.run(_invalid_objective,
                       search_alg=DragonflySearch(domain="euclidean",
                                                  optimizer="random"),
                       config=self.config,
                       mode="max",
                       num_samples=8,
                       reuse_actors=False)

        best_trial = out.best_trial
        self.assertLessEqual(best_trial.config["point"], 2.0)
Exemplo n.º 5
0
    def testDragonfly(self):
        from ray.tune.suggest.dragonfly import DragonflySearch

        searcher = DragonflySearch(space=self.space,
                                   metric="metric",
                                   mode="max",
                                   domain="euclidean",
                                   optimizer="bandit")

        point = {
            self.param_name: self.valid_value,
        }

        get_len_X = lambda s: len(s._opt.history.curr_opt_points)  # noqa E731
        get_len_y = lambda s: len(s._opt.history.curr_opt_vals)  # noqa E731

        self.run_add_evaluated_point(point, searcher, get_len_X, get_len_y)
        self.run_add_evaluated_trials(searcher, get_len_X, get_len_y)
Exemplo n.º 6
0
def _test_roberta(method='BlendSearch'):

    max_num_epoch = 100
    num_samples = -1
    time_budget_s = 3600

    search_space = {
        # You can mix constants with search space objects.
        "num_train_epochs": flaml.tune.loguniform(1, max_num_epoch),
        "learning_rate": flaml.tune.loguniform(1e-5, 3e-5),
        "weight_decay": flaml.tune.uniform(0, 0.3),
        "per_device_train_batch_size": flaml.tune.choice([16, 32, 64, 128]),
        "seed": flaml.tune.choice([12, 22, 33, 42]),
    }

    start_time = time.time()
    ray.init(num_cpus=4, num_gpus=4)
    if 'ASHA' == method:
        algo = None
    elif 'BOHB' == method:
        from ray.tune.schedulers import HyperBandForBOHB
        from ray.tune.suggest.bohb import tuneBOHB
        algo = tuneBOHB(max_concurrent=4)
        scheduler = HyperBandForBOHB(max_t=max_num_epoch)
    elif 'Optuna' == method:
        from ray.tune.suggest.optuna import OptunaSearch
        algo = OptunaSearch()
    elif 'CFO' == method:
        from flaml import CFO
        algo = CFO(points_to_evaluate=[{
            "num_train_epochs": 1,
            "per_device_train_batch_size": 128,
        }])
    elif 'BlendSearch' == method:
        from flaml import BlendSearch
        algo = BlendSearch(
            points_to_evaluate=[{
                "num_train_epochs": 1,
                "per_device_train_batch_size": 128,
            }])
    elif 'Dragonfly' == method:
        from ray.tune.suggest.dragonfly import DragonflySearch
        algo = DragonflySearch()
    elif 'SkOpt' == method:
        from ray.tune.suggest.skopt import SkOptSearch
        algo = SkOptSearch()
    elif 'Nevergrad' == method:
        from ray.tune.suggest.nevergrad import NevergradSearch
        import nevergrad as ng
        algo = NevergradSearch(optimizer=ng.optimizers.OnePlusOne)
    elif 'ZOOpt' == method:
        from ray.tune.suggest.zoopt import ZOOptSearch
        algo = ZOOptSearch(budget=num_samples)
    elif 'Ax' == method:
        from ray.tune.suggest.ax import AxSearch
        algo = AxSearch(max_concurrent=3)
    elif 'HyperOpt' == method:
        from ray.tune.suggest.hyperopt import HyperOptSearch
        algo = HyperOptSearch()
        scheduler = None
    if method != 'BOHB':
        from ray.tune.schedulers import ASHAScheduler
        scheduler = ASHAScheduler(max_t=max_num_epoch, grace_period=1)
    scheduler = None
    analysis = ray.tune.run(train_roberta,
                            metric=HP_METRIC,
                            mode=MODE,
                            resources_per_trial={
                                "gpu": 4,
                                "cpu": 4
                            },
                            config=search_space,
                            local_dir='logs/',
                            num_samples=num_samples,
                            time_budget_s=time_budget_s,
                            keep_checkpoints_num=1,
                            checkpoint_score_attr=HP_METRIC,
                            scheduler=scheduler,
                            search_alg=algo)

    ray.shutdown()

    best_trial = analysis.get_best_trial(HP_METRIC, MODE, "all")
    metric = best_trial.metric_analysis[HP_METRIC][MODE]

    logger.info(f"method={method}")
    logger.info(f"n_trials={len(analysis.trials)}")
    logger.info(f"time={time.time()-start_time}")
    logger.info(f"Best model eval {HP_METRIC}: {metric:.4f}")
    logger.info(f"Best model parameters: {best_trial.config}")
Exemplo n.º 7
0
    def testConvertDragonfly(self):
        from ray.tune.suggest.dragonfly import DragonflySearch

        config = {
            "a": tune.sample.Categorical([2, 3, 4]).uniform(),
            "b": {
                "x": tune.sample.Integer(0, 5).quantized(2),
                "y": 4,
                "z": tune.sample.Float(1e-4, 1e-2).loguniform()
            }
        }
        with self.assertRaises(ValueError):
            converted_config = DragonflySearch.convert_search_space(config)

        config = {
            "a": 4,
            "b": {
                "z": tune.sample.Float(1e-4, 1e-2).loguniform()
            }
        }
        dragonfly_config = [{
            "name": "b/z",
            "type": "float",
            "min": 1e-4,
            "max": 1e-2
        }]
        converted_config = DragonflySearch.convert_search_space(config)

        np.random.seed(1234)
        searcher1 = DragonflySearch(optimizer="bandit",
                                    domain="euclidean",
                                    space=converted_config,
                                    metric="none")

        config1 = searcher1.suggest("0")

        np.random.seed(1234)
        searcher2 = DragonflySearch(optimizer="bandit",
                                    domain="euclidean",
                                    space=dragonfly_config,
                                    metric="none")
        config2 = searcher2.suggest("0")

        self.assertEqual(config1, config2)
        self.assertLess(config2["point"], 1e-2)

        searcher = DragonflySearch()
        invalid_config = {"a/b": tune.uniform(4.0, 8.0)}
        with self.assertRaises(ValueError):
            searcher.set_search_properties("none", "max", invalid_config)
        invalid_config = {"a": {"b/c": tune.uniform(4.0, 8.0)}}
        with self.assertRaises(ValueError):
            searcher.set_search_properties("none", "max", invalid_config)

        searcher = DragonflySearch(optimizer="bandit",
                                   domain="euclidean",
                                   metric="a",
                                   mode="max")
        analysis = tune.run(_mock_objective,
                            config=config,
                            search_alg=searcher,
                            num_samples=1)
        trial = analysis.trials[0]
        self.assertLess(trial.config["point"], 1e-2)

        mixed_config = {
            "a": tune.uniform(5, 6),
            "b": tune.uniform(8, 9)  # Cannot mix List and Dict
        }
        searcher = DragonflySearch(space=mixed_config,
                                   optimizer="bandit",
                                   domain="euclidean",
                                   metric="a",
                                   mode="max")
        config = searcher.suggest("0")

        self.assertTrue(5 <= config["point"][0] <= 6)
        self.assertTrue(8 <= config["point"][1] <= 9)
Exemplo n.º 8
0
        "type": "float",
        "min": 0,
        "max": 7
    }, {
        "name": "Li2SO4_vol",
        "type": "float",
        "min": 0,
        "max": 7
    }, {
        "name": "NaClO4_vol",
        "type": "float",
        "min": 0,
        "max": 7
    }]

    domain_config = load_config({"domain": domain_vars})

    func_caller = EuclideanFunctionCaller(
        None, domain_config.domain.list_of_domains[0])
    optimizer = EuclideanGPBandit(func_caller, ask_tell_mode=True)
    algo = DragonflySearch(optimizer,
                           max_concurrent=4,
                           metric="objective",
                           mode="max")
    scheduler = AsyncHyperBandScheduler(metric="objective", mode="max")
    run(objective,
        name="dragonfly_search",
        search_alg=algo,
        scheduler=scheduler,
        **config)
Exemplo n.º 9
0
def _test_xgboost(method='BlendSearch'):
    try:
        import ray
    except ImportError:
        return
    if method == 'BlendSearch':
        from flaml import tune
    else:
        from ray import tune
    search_space = {
        # You can mix constants with search space objects.
        "max_depth": tune.randint(1, 8) if method in [
            "BlendSearch", "BOHB", "Optuna"] else tune.randint(1, 9),
        "min_child_weight": tune.choice([1, 2, 3]),
        "subsample": tune.uniform(0.5, 1.0),
        "eta": tune.loguniform(1e-4, 1e-1)
    }
    max_iter = 10
    for num_samples in [256]:
        time_budget_s = 60 #None
        for n_cpu in [8]:
            start_time = time.time()
            ray.init(num_cpus=n_cpu, num_gpus=0)
            if method == 'BlendSearch':
                analysis = tune.run(
                    train_breast_cancer,
                    init_config={
                        "max_depth": 1,
                        "min_child_weight": 3,
                    },
                    cat_hp_cost={
                        "min_child_weight": [6, 3, 2],
                    },
                    metric="eval-logloss",
                    mode="min",
                    max_resource=max_iter,
                    min_resource=1,
                    report_intermediate_result=True,
                    # You can add "gpu": 0.1 to allocate GPUs
                    resources_per_trial={"cpu": 1},
                    config=search_space,
                    local_dir='logs/',
                    num_samples=num_samples*n_cpu,
                    time_budget_s=time_budget_s,
                    use_ray=True)
            else:
                if 'ASHA' == method:
                    algo = None
                elif 'BOHB' == method:
                    from ray.tune.schedulers import HyperBandForBOHB
                    from ray.tune.suggest.bohb import TuneBOHB
                    algo = TuneBOHB(max_concurrent=n_cpu)
                    scheduler = HyperBandForBOHB(max_t=max_iter)
                elif 'Optuna' == method:
                    from ray.tune.suggest.optuna import OptunaSearch
                    algo = OptunaSearch()
                elif 'CFO' == method:
                    from flaml import CFO
                    algo = CFO(points_to_evaluate=[{
                        "max_depth": 1,
                        "min_child_weight": 3,
                    }], cat_hp_cost={
                        "min_child_weight": [6, 3, 2],
                    })
                elif 'Dragonfly' == method:
                    from ray.tune.suggest.dragonfly import DragonflySearch
                    algo = DragonflySearch()
                elif 'SkOpt' == method:
                    from ray.tune.suggest.skopt import SkOptSearch
                    algo = SkOptSearch()
                elif 'Nevergrad' == method:
                    from ray.tune.suggest.nevergrad import NevergradSearch
                    import nevergrad as ng
                    algo = NevergradSearch(optimizer=ng.optimizers.OnePlusOne)
                elif 'ZOOpt' == method:
                    from ray.tune.suggest.zoopt import ZOOptSearch
                    algo = ZOOptSearch(budget=num_samples*n_cpu)
                elif 'Ax' == method:
                    from ray.tune.suggest.ax import AxSearch
                    algo = AxSearch()
                elif 'HyperOpt' == method:
                    from ray.tune.suggest.hyperopt import HyperOptSearch
                    algo = HyperOptSearch()
                    scheduler = None
                if method != 'BOHB':
                    from ray.tune.schedulers import ASHAScheduler
                    scheduler = ASHAScheduler(
                        max_t=max_iter,
                        grace_period=1)
                analysis = tune.run(
                    train_breast_cancer,
                    metric="eval-logloss",
                    mode="min",
                    # You can add "gpu": 0.1 to allocate GPUs
                    resources_per_trial={"cpu": 1},
                    config=search_space, local_dir='logs/',
                    num_samples=num_samples*n_cpu, time_budget_s=time_budget_s,
                    scheduler=scheduler, search_alg=algo)
            ray.shutdown()
            # # Load the best model checkpoint
            # best_bst = xgb.Booster()
            # best_bst.load_model(os.path.join(analysis.best_checkpoint,
            #  "model.xgb"))
            best_trial = analysis.get_best_trial("eval-logloss","min","all")
            accuracy = 1. - best_trial.metric_analysis["eval-error"]["min"]
            logloss = best_trial.metric_analysis["eval-logloss"]["min"]
            logger.info(f"method={method}")
            logger.info(f"n_samples={num_samples*n_cpu}")
            logger.info(f"time={time.time()-start_time}")
            logger.info(f"Best model eval loss: {logloss:.4f}")
            logger.info(f"Best model total accuracy: {accuracy:.4f}")
            logger.info(f"Best model parameters: {best_trial.config}")
Exemplo n.º 10
0
def hparams(algorithm, scheduler, num_samples, tensorboard, bare):
    from glob import glob

    import tensorflow.summary
    from tensorflow import random as tfrandom, int64 as tfint64
    from ray import init as init_ray, shutdown as shutdown_ray
    from ray import tune
    from wandb.ray import WandbLogger
    from wandb import sweep as wandbsweep
    from wandb.apis import CommError as wandbCommError

    # less summaries are logged if MLENCRYPT_TB is TRUE (for efficiency)
    # TODO: use tf.summary.record_if?
    environ["MLENCRYPT_TB"] = str(tensorboard).upper()
    environ["MLENCRYPT_BARE"] = str(bare).upper()
    if getenv('MLENCRYPT_TB', 'FALSE') == 'TRUE' and \
            getenv('MLENCRYPT_BARE', 'FALSE') == 'TRUE':
        raise ValueError('TensorBoard logging cannot be enabled in bare mode.')

    logdir = f'logs/hparams/{datetime.now()}'

    # "These results show that K = 3 is the optimal choice for the
    # cryptographic application of neural synchronization. K = 1 and K = 2 are
    # too insecure in regard to the geometric attack. And for K > 3 the effort
    # of A and B grows exponentially with increasing L, while the simple attack
    # is quite successful in the limit K -> infinity. Consequently, one should
    # only use Tree Parity Machines with three hidden units for the neural
    # key-exchange protocol." (Ruttor, 2006)
    # https://arxiv.org/pdf/0711.2411.pdf#page=59

    update_rules = [
        'random-same',
        # 'random-different-A-B-E', 'random-different-A-B',
        'hebbian',
        'anti_hebbian',
        'random_walk'
    ]
    K_bounds = {'min': 4, 'max': 8}
    N_bounds = {'min': 4, 'max': 8}
    L_bounds = {'min': 4, 'max': 8}

    # TODO: don't use *_bounds.values() since .values doesn't preserve order

    def get_session_num(logdir):
        current_runs = glob(join(logdir, "run-*"))
        if current_runs:
            last_run_path = current_runs[-1]
            last_run_session_num = int(last_run_path.split('-')[-1])
            return last_run_session_num + 1
        else:  # there are no runs yet, start at 0
            return 0

    def trainable(config, reporter):
        """
        Args:
            config (dict): Parameters provided from the search algorithm
                or variant generation.
        """
        if not isinstance(config['update_rule'], str):
            update_rule = update_rules[int(config['update_rule'])]
        else:
            update_rule = config['update_rule']
        K, N, L = int(config['K']), int(config['N']), int(config['L'])

        run_name = f"run-{get_session_num(logdir)}"
        run_logdir = join(logdir, run_name)
        # for each attack, the TPMs should start with the same weights
        initial_weights_tensors = get_initial_weights(K, N, L)
        training_steps_ls = {}
        eve_scores_ls = {}
        losses_ls = {}
        # for each attack, the TPMs should use the same inputs
        seed = tfrandom.uniform([],
                                minval=0,
                                maxval=tfint64.max,
                                dtype=tfint64).numpy()
        for attack in ['none', 'geometric']:
            initial_weights = {
                tpm: weights_tensor_to_variable(weights, tpm)
                for tpm, weights in initial_weights_tensors.items()
            }
            tfrandom.set_seed(seed)

            if tensorboard:
                attack_logdir = join(run_logdir, attack)
                attack_writer = tensorflow.summary.create_file_writer(
                    attack_logdir)
                with attack_writer.as_default():
                    training_steps, sync_scores, loss = run(
                        update_rule, K, N, L, attack, initial_weights)
            else:
                training_steps, sync_scores, loss = run(
                    update_rule, K, N, L, attack, initial_weights)
            training_steps_ls[attack] = training_steps
            eve_scores_ls[attack] = sync_scores
            losses_ls[attack] = loss
        avg_training_steps = tensorflow.math.reduce_mean(
            list(training_steps_ls.values()))
        avg_eve_score = tensorflow.math.reduce_mean(
            list(eve_scores_ls.values()))
        mean_loss = tensorflow.math.reduce_mean(list(losses_ls.values()))
        reporter(
            avg_training_steps=avg_training_steps.numpy(),
            avg_eve_score=avg_eve_score.numpy(),
            mean_loss=mean_loss.numpy(),
            done=True,
        )

    if algorithm == 'hyperopt':
        from hyperopt import hp as hyperopt
        from hyperopt.pyll.base import scope
        from ray.tune.suggest.hyperopt import HyperOptSearch

        space = {
            'update_rule': hyperopt.choice(
                'update_rule',
                update_rules,
            ),
            'K': scope.int(hyperopt.quniform('K', *K_bounds.values(), q=1)),
            'N': scope.int(hyperopt.quniform('N', *N_bounds.values(), q=1)),
            'L': scope.int(hyperopt.quniform('L', *L_bounds.values(), q=1)),
        }
        algo = HyperOptSearch(
            space,
            metric='mean_loss',
            mode='min',
            points_to_evaluate=[
                {
                    'update_rule': 0,
                    'K': 3,
                    'N': 16,
                    'L': 8
                },
                {
                    'update_rule': 0,
                    'K': 8,
                    'N': 16,
                    'L': 8
                },
                {
                    'update_rule': 0,
                    'K': 8,
                    'N': 16,
                    'L': 128
                },
            ],
        )
    elif algorithm == 'bayesopt':
        from ray.tune.suggest.bayesopt import BayesOptSearch

        space = {
            'update_rule': (0, len(update_rules)),
            'K': tuple(K_bounds.values()),
            'N': tuple(N_bounds.values()),
            'L': tuple(L_bounds.values()),
        }
        algo = BayesOptSearch(
            space,
            metric="mean_loss",
            mode="min",
            # TODO: what is utility_kwargs for and why is it needed?
            utility_kwargs={
                "kind": "ucb",
                "kappa": 2.5,
                "xi": 0.0
            })
    elif algorithm == 'nevergrad':
        from ray.tune.suggest.nevergrad import NevergradSearch
        from nevergrad import optimizers
        from nevergrad import p as ngp

        algo = NevergradSearch(
            optimizers.TwoPointsDE(
                ngp.Instrumentation(
                    update_rule=ngp.Choice(update_rules),
                    K=ngp.Scalar(lower=K_bounds['min'],
                                 upper=K_bounds['max']).set_integer_casting(),
                    N=ngp.Scalar(lower=N_bounds['min'],
                                 upper=N_bounds['max']).set_integer_casting(),
                    L=ngp.Scalar(lower=L_bounds['min'],
                                 upper=L_bounds['max']).set_integer_casting(),
                )),
            None,  # since the optimizer is already instrumented with kwargs
            metric="mean_loss",
            mode="min")
    elif algorithm == 'skopt':
        from skopt import Optimizer
        from ray.tune.suggest.skopt import SkOptSearch

        optimizer = Optimizer([
            update_rules,
            tuple(K_bounds.values()),
            tuple(N_bounds.values()),
            tuple(L_bounds.values())
        ])
        algo = SkOptSearch(
            optimizer,
            ["update_rule", "K", "N", "L"],
            metric="mean_loss",
            mode="min",
            points_to_evaluate=[
                ['random-same', 3, 16, 8],
                ['random-same', 8, 16, 8],
                ['random-same', 8, 16, 128],
            ],
        )
    elif algorithm == 'dragonfly':
        # TODO: doesn't work
        from ray.tune.suggest.dragonfly import DragonflySearch
        from dragonfly.exd.experiment_caller import EuclideanFunctionCaller
        from dragonfly.opt.gp_bandit import EuclideanGPBandit
        # from dragonfly.exd.experiment_caller import CPFunctionCaller
        # from dragonfly.opt.gp_bandit import CPGPBandit
        from dragonfly import load_config

        domain_config = load_config({
            "domain": [
                {
                    "name": "update_rule",
                    "type": "discrete",
                    "dim": 1,
                    "items": update_rules
                },
                {
                    "name": "K",
                    "type": "int",
                    "min": K_bounds['min'],
                    "max": K_bounds['max'],
                    # "dim": 1
                },
                {
                    "name": "N",
                    "type": "int",
                    "min": N_bounds['min'],
                    "max": N_bounds['max'],
                    # "dim": 1
                },
                {
                    "name": "L",
                    "type": "int",
                    "min": L_bounds['min'],
                    "max": L_bounds['max'],
                    # "dim": 1
                }
            ]
        })
        func_caller = EuclideanFunctionCaller(
            None, domain_config.domain.list_of_domains[0])
        optimizer = EuclideanGPBandit(func_caller, ask_tell_mode=True)
        algo = DragonflySearch(
            optimizer,
            metric="mean_loss",
            mode="min",
            points_to_evaluate=[
                ['random-same', 3, 16, 8],
                ['random-same', 8, 16, 8],
                ['random-same', 8, 16, 128],
            ],
        )
    elif algorithm == 'bohb':
        from ConfigSpace import ConfigurationSpace
        from ConfigSpace import hyperparameters as CSH
        from ray.tune.suggest.bohb import TuneBOHB

        config_space = ConfigurationSpace()
        config_space.add_hyperparameter(
            CSH.CategoricalHyperparameter("update_rule", choices=update_rules))
        config_space.add_hyperparameter(
            CSH.UniformIntegerHyperparameter(name='K',
                                             lower=K_bounds['min'],
                                             upper=K_bounds['max']))
        config_space.add_hyperparameter(
            CSH.UniformIntegerHyperparameter(name='N',
                                             lower=N_bounds['min'],
                                             upper=N_bounds['max']))
        config_space.add_hyperparameter(
            CSH.UniformIntegerHyperparameter(name='L',
                                             lower=L_bounds['min'],
                                             upper=L_bounds['max']))
        algo = TuneBOHB(config_space, metric="mean_loss", mode="min")
    elif algorithm == 'zoopt':
        from ray.tune.suggest.zoopt import ZOOptSearch
        from zoopt import ValueType

        space = {
            "update_rule":
            (ValueType.DISCRETE, range(0, len(update_rules)), False),
            "K": (ValueType.DISCRETE,
                  range(K_bounds['min'], K_bounds['max'] + 1), True),
            "N": (ValueType.DISCRETE,
                  range(N_bounds['min'], N_bounds['max'] + 1), True),
            "L": (ValueType.DISCRETE,
                  range(L_bounds['min'], L_bounds['max'] + 1), True),
        }
        # TODO: change budget to a large value
        algo = ZOOptSearch(budget=10,
                           dim_dict=space,
                           metric="mean_loss",
                           mode="min")

    # TODO: use more appropriate arguments for schedulers:
    # https://docs.ray.io/en/master/tune/api_docs/schedulers.html
    if scheduler == 'fifo':
        sched = None  # Tune defaults to FIFO
    elif scheduler == 'pbt':
        from ray.tune.schedulers import PopulationBasedTraining
        from random import randint
        sched = PopulationBasedTraining(
            metric="mean_loss",
            mode="min",
            hyperparam_mutations={
                "update_rule": update_rules,
                "K": lambda: randint(K_bounds['min'], K_bounds['max']),
                "N": lambda: randint(N_bounds['min'], N_bounds['max']),
                "L": lambda: randint(L_bounds['min'], L_bounds['max']),
            })
    elif scheduler == 'ahb' or scheduler == 'asha':
        # https://docs.ray.io/en/latest/tune/api_docs/schedulers.html#asha-tune-schedulers-ashascheduler
        from ray.tune.schedulers import AsyncHyperBandScheduler
        sched = AsyncHyperBandScheduler(metric="mean_loss", mode="min")
    elif scheduler == 'hb':
        from ray.tune.schedulers import HyperBandScheduler
        sched = HyperBandScheduler(metric="mean_loss", mode="min")
    elif algorithm == 'bohb' or scheduler == 'bohb':
        from ray.tune.schedulers import HyperBandForBOHB
        sched = HyperBandForBOHB(metric="mean_loss", mode="min")
    elif scheduler == 'msr':
        from ray.tune.schedulers import MedianStoppingRule
        sched = MedianStoppingRule(metric="mean_loss", mode="min")
    init_ray(
        address=getenv("ip_head"),
        redis_password=getenv('redis_password'),
    )
    analysis = tune.run(
        trainable,
        name='mlencrypt_research',
        config={
            "monitor": True,
            "env_config": {
                "wandb": {
                    "project": "mlencrypt-research",
                    "sync_tensorboard": True,
                },
            },
        },
        # resources_per_trial={"cpu": 1, "gpu": 3},
        local_dir='./ray_results',
        export_formats=['csv'],  # TODO: add other formats?
        num_samples=num_samples,
        loggers=[
            tune.logger.JsonLogger, tune.logger.CSVLogger,
            tune.logger.TBXLogger, WandbLogger
        ],
        search_alg=algo,
        scheduler=sched,
        queue_trials=True,
    )
    try:
        wandbsweep(analysis)
    except wandbCommError:
        # see https://docs.wandb.com/sweeps/ray-tune#feature-compatibility
        pass
    best_config = analysis.get_best_config(metric='mean_loss', mode='min')
    print(f"Best config: {best_config}")
    shutdown_ray()
Exemplo n.º 11
0
def _test_xgboost(method="BlendSearch"):
    try:
        import ray
    except ImportError:
        return
    if method == "BlendSearch":
        from flaml import tune
    else:
        from ray import tune
    search_space = {
        "max_depth":
        tune.randint(1, 9)
        if method in ["BlendSearch", "BOHB", "Optuna"] else tune.randint(1, 9),
        "min_child_weight":
        tune.choice([1, 2, 3]),
        "subsample":
        tune.uniform(0.5, 1.0),
        "eta":
        tune.loguniform(1e-4, 1e-1),
    }
    max_iter = 10
    for num_samples in [128]:
        time_budget_s = 60
        for n_cpu in [2]:
            start_time = time.time()
            # ray.init(address='auto')
            if method == "BlendSearch":
                analysis = tune.run(
                    train_breast_cancer,
                    config=search_space,
                    low_cost_partial_config={
                        "max_depth": 1,
                    },
                    cat_hp_cost={
                        "min_child_weight": [6, 3, 2],
                    },
                    metric="eval-logloss",
                    mode="min",
                    max_resource=max_iter,
                    min_resource=1,
                    scheduler="asha",
                    # You can add "gpu": 0.1 to allocate GPUs
                    resources_per_trial={"cpu": 1},
                    local_dir="logs/",
                    num_samples=num_samples * n_cpu,
                    time_budget_s=time_budget_s,
                    use_ray=True,
                )
            else:
                if "ASHA" == method:
                    algo = None
                elif "BOHB" == method:
                    from ray.tune.schedulers import HyperBandForBOHB
                    from ray.tune.suggest.bohb import TuneBOHB

                    algo = TuneBOHB(max_concurrent=n_cpu)
                    scheduler = HyperBandForBOHB(max_t=max_iter)
                elif "Optuna" == method:
                    from ray.tune.suggest.optuna import OptunaSearch

                    algo = OptunaSearch()
                elif "CFO" == method:
                    from flaml import CFO

                    algo = CFO(
                        low_cost_partial_config={
                            "max_depth": 1,
                        },
                        cat_hp_cost={
                            "min_child_weight": [6, 3, 2],
                        },
                    )
                elif "CFOCat" == method:
                    from flaml.searcher.cfo_cat import CFOCat

                    algo = CFOCat(
                        low_cost_partial_config={
                            "max_depth": 1,
                        },
                        cat_hp_cost={
                            "min_child_weight": [6, 3, 2],
                        },
                    )
                elif "Dragonfly" == method:
                    from ray.tune.suggest.dragonfly import DragonflySearch

                    algo = DragonflySearch()
                elif "SkOpt" == method:
                    from ray.tune.suggest.skopt import SkOptSearch

                    algo = SkOptSearch()
                elif "Nevergrad" == method:
                    from ray.tune.suggest.nevergrad import NevergradSearch
                    import nevergrad as ng

                    algo = NevergradSearch(optimizer=ng.optimizers.OnePlusOne)
                elif "ZOOpt" == method:
                    from ray.tune.suggest.zoopt import ZOOptSearch

                    algo = ZOOptSearch(budget=num_samples * n_cpu)
                elif "Ax" == method:
                    from ray.tune.suggest.ax import AxSearch

                    algo = AxSearch()
                elif "HyperOpt" == method:
                    from ray.tune.suggest.hyperopt import HyperOptSearch

                    algo = HyperOptSearch()
                    scheduler = None
                if method != "BOHB":
                    from ray.tune.schedulers import ASHAScheduler

                    scheduler = ASHAScheduler(max_t=max_iter, grace_period=1)
                analysis = tune.run(
                    train_breast_cancer,
                    metric="eval-logloss",
                    mode="min",
                    # You can add "gpu": 0.1 to allocate GPUs
                    resources_per_trial={"cpu": 1},
                    config=search_space,
                    local_dir="logs/",
                    num_samples=num_samples * n_cpu,
                    time_budget_s=time_budget_s,
                    scheduler=scheduler,
                    search_alg=algo,
                )
            # # Load the best model checkpoint
            # import os
            # best_bst = xgb.Booster()
            # best_bst.load_model(os.path.join(analysis.best_checkpoint,
            #  "model.xgb"))
            best_trial = analysis.get_best_trial("eval-logloss", "min", "all")
            accuracy = 1.0 - best_trial.metric_analysis["eval-error"]["min"]
            logloss = best_trial.metric_analysis["eval-logloss"]["min"]
            logger.info(f"method={method}")
            logger.info(f"n_samples={num_samples*n_cpu}")
            logger.info(f"time={time.time()-start_time}")
            logger.info(f"Best model eval loss: {logloss:.4f}")
            logger.info(f"Best model total accuracy: {accuracy:.4f}")
            logger.info(f"Best model parameters: {best_trial.config}")
Exemplo n.º 12
0
    #     "max": 7
    # }, {
    #     "name": "Li2SO4_vol",
    #     "type": "float",
    #     "min": 0,
    #     "max": 7
    # }, {
    #     "name": "NaClO4_vol",
    #     "type": "float",
    #     "min": 0,
    #     "max": 7
    # }]

    df_search = DragonflySearch(
        optimizer="bandit",
        domain="euclidean",
        # space=space,  # If you want to set the space manually
    )
    df_search = ConcurrencyLimiter(df_search, max_concurrent=4)

    scheduler = AsyncHyperBandScheduler()
    analysis = tune.run(
        objective,
        metric="objective",
        mode="max",
        name="dragonfly_search",
        search_alg=df_search,
        scheduler=scheduler,
        num_samples=10 if args.smoke_test else 50,
        config={
            "iterations": 100,
Exemplo n.º 13
0
def _test_distillbert(method='BlendSearch'):

    max_num_epoch = 64
    num_samples = -1
    time_budget_s = 10800

    search_space = {
        # You can mix constants with search space objects.
        "num_train_epochs": flaml.tune.loguniform(1, max_num_epoch),
        "learning_rate": flaml.tune.loguniform(1e-6, 1e-4),
        "adam_beta1": flaml.tune.uniform(0.8, 0.99),
        "adam_beta2": flaml.tune.loguniform(98e-2, 9999e-4),
        "adam_epsilon": flaml.tune.loguniform(1e-9, 1e-7),
    }

    start_time = time.time()
    ray.init(num_cpus=4, num_gpus=4)
    if 'ASHA' == method:
        algo = None
    elif 'BOHB' == method:
        from ray.tune.schedulers import HyperBandForBOHB
        from ray.tune.suggest.bohb import tuneBOHB
        algo = tuneBOHB(max_concurrent=4)
        scheduler = HyperBandForBOHB(max_t=max_num_epoch)
    elif 'Optuna' == method:
        from ray.tune.suggest.optuna import OptunaSearch
        algo = OptunaSearch()
    elif 'CFO' == method:
        from flaml import CFO
        algo = CFO(points_to_evaluate=[{
            "num_train_epochs": 1,
        }])
    elif 'BlendSearch' == method:
        from flaml import BlendSearch
        algo = BlendSearch(points_to_evaluate=[{
            "num_train_epochs": 1,
        }])
    elif 'Dragonfly' == method:
        from ray.tune.suggest.dragonfly import DragonflySearch
        algo = DragonflySearch()
    elif 'SkOpt' == method:
        from ray.tune.suggest.skopt import SkOptSearch
        algo = SkOptSearch()
    elif 'Nevergrad' == method:
        from ray.tune.suggest.nevergrad import NevergradSearch
        import nevergrad as ng
        algo = NevergradSearch(optimizer=ng.optimizers.OnePlusOne)
    elif 'ZOOpt' == method:
        from ray.tune.suggest.zoopt import ZOOptSearch
        algo = ZOOptSearch(budget=num_samples)
    elif 'Ax' == method:
        from ray.tune.suggest.ax import AxSearch
        algo = AxSearch()
    elif 'HyperOpt' == method:
        from ray.tune.suggest.hyperopt import HyperOptSearch
        algo = HyperOptSearch()
        scheduler = None
    if method != 'BOHB':
        from ray.tune.schedulers import ASHAScheduler
        scheduler = ASHAScheduler(max_t=max_num_epoch, grace_period=1)
    scheduler = None
    analysis = ray.tune.run(
        train_distilbert,
        metric=HP_METRIC,
        mode=MODE,
        # You can add "gpu": 1 to allocate GPUs
        resources_per_trial={"gpu": 1},
        config=search_space,
        local_dir='test/logs/',
        num_samples=num_samples,
        time_budget_s=time_budget_s,
        keep_checkpoints_num=1,
        checkpoint_score_attr=HP_METRIC,
        scheduler=scheduler,
        search_alg=algo)

    ray.shutdown()

    best_trial = analysis.get_best_trial(HP_METRIC, MODE, "all")
    metric = best_trial.metric_analysis[HP_METRIC][MODE]

    logger.info(f"method={method}")
    logger.info(f"n_trials={len(analysis.trials)}")
    logger.info(f"time={time.time()-start_time}")
    logger.info(f"Best model eval {HP_METRIC}: {metric:.4f}")
    logger.info(f"Best model parameters: {best_trial.config}")
Exemplo n.º 14
0
    # space = [{
    #     "name": "LiNO3_vol",
    #     "type": "float",
    #     "min": 0,
    #     "max": 7
    # }, {
    #     "name": "Li2SO4_vol",
    #     "type": "float",
    #     "min": 0,
    #     "max": 7
    # }, {
    #     "name": "NaClO4_vol",
    #     "type": "float",
    #     "min": 0,
    #     "max": 7
    # }]

    df_search = DragonflySearch(
        optimizer="bandit",
        domain="euclidean",
        # space=space,  # If you want to set the space manually
        metric="objective",
        mode="max")

    scheduler = AsyncHyperBandScheduler(metric="objective", mode="max")
    tune.run(objective,
             name="dragonfly_search",
             search_alg=df_search,
             scheduler=scheduler,
             **tune_kwargs)
Exemplo n.º 15
0
    domain_vars = [{
        "name": "LiNO3_vol",
        "type": "float",
        "min": 0,
        "max": 7
    }, {
        "name": "Li2SO4_vol",
        "type": "float",
        "min": 0,
        "max": 7
    }, {
        "name": "NaClO4_vol",
        "type": "float",
        "min": 0,
        "max": 7
    }]

    domain_config = load_config({"domain": domain_vars})

    func_caller = EuclideanFunctionCaller(
        None, domain_config.domain.list_of_domains[0])
    optimizer = EuclideanGPBandit(func_caller, ask_tell_mode=True)
    algo = DragonflySearch(optimizer, metric="objective", mode="max")
    scheduler = AsyncHyperBandScheduler(metric="objective", mode="max")
    tune.run(objective,
             name="dragonfly_search",
             search_alg=algo,
             scheduler=scheduler,
             **config)