示例#1
0
    def set_basic_conf(self):
        space = {
            "height": tune.uniform(-100, 100),
            "width": tune.randint(0, 100),
        }

        def cost(param):
            tune.report(loss=(param["height"] - 14) ** 2 - abs(param["width"] - 3))

        search_alg = CFO(
            space=space,
            metric="loss",
            mode="min",
            seed=20,
        )

        return search_alg, cost
示例#2
0
def _test_roberta(method='BlendSearch'):

    max_num_epoch = 100
    num_samples = -1
    time_budget_s = 3600

    search_space = {
        # You can mix constants with search space objects.
        "num_train_epochs": flaml.tune.loguniform(1, max_num_epoch),
        "learning_rate": flaml.tune.loguniform(1e-5, 3e-5),
        "weight_decay": flaml.tune.uniform(0, 0.3),
        "per_device_train_batch_size": flaml.tune.choice([16, 32, 64, 128]),
        "seed": flaml.tune.choice([12, 22, 33, 42]),
    }

    start_time = time.time()
    ray.init(num_cpus=4, num_gpus=4)
    if 'ASHA' == method:
        algo = None
    elif 'BOHB' == method:
        from ray.tune.schedulers import HyperBandForBOHB
        from ray.tune.suggest.bohb import tuneBOHB
        algo = tuneBOHB(max_concurrent=4)
        scheduler = HyperBandForBOHB(max_t=max_num_epoch)
    elif 'Optuna' == method:
        from ray.tune.suggest.optuna import OptunaSearch
        algo = OptunaSearch()
    elif 'CFO' == method:
        from flaml import CFO
        algo = CFO(points_to_evaluate=[{
            "num_train_epochs": 1,
            "per_device_train_batch_size": 128,
        }])
    elif 'BlendSearch' == method:
        from flaml import BlendSearch
        algo = BlendSearch(
            points_to_evaluate=[{
                "num_train_epochs": 1,
                "per_device_train_batch_size": 128,
            }])
    elif 'Dragonfly' == method:
        from ray.tune.suggest.dragonfly import DragonflySearch
        algo = DragonflySearch()
    elif 'SkOpt' == method:
        from ray.tune.suggest.skopt import SkOptSearch
        algo = SkOptSearch()
    elif 'Nevergrad' == method:
        from ray.tune.suggest.nevergrad import NevergradSearch
        import nevergrad as ng
        algo = NevergradSearch(optimizer=ng.optimizers.OnePlusOne)
    elif 'ZOOpt' == method:
        from ray.tune.suggest.zoopt import ZOOptSearch
        algo = ZOOptSearch(budget=num_samples)
    elif 'Ax' == method:
        from ray.tune.suggest.ax import AxSearch
        algo = AxSearch(max_concurrent=3)
    elif 'HyperOpt' == method:
        from ray.tune.suggest.hyperopt import HyperOptSearch
        algo = HyperOptSearch()
        scheduler = None
    if method != 'BOHB':
        from ray.tune.schedulers import ASHAScheduler
        scheduler = ASHAScheduler(max_t=max_num_epoch, grace_period=1)
    scheduler = None
    analysis = ray.tune.run(train_roberta,
                            metric=HP_METRIC,
                            mode=MODE,
                            resources_per_trial={
                                "gpu": 4,
                                "cpu": 4
                            },
                            config=search_space,
                            local_dir='logs/',
                            num_samples=num_samples,
                            time_budget_s=time_budget_s,
                            keep_checkpoints_num=1,
                            checkpoint_score_attr=HP_METRIC,
                            scheduler=scheduler,
                            search_alg=algo)

    ray.shutdown()

    best_trial = analysis.get_best_trial(HP_METRIC, MODE, "all")
    metric = best_trial.metric_analysis[HP_METRIC][MODE]

    logger.info(f"method={method}")
    logger.info(f"n_trials={len(analysis.trials)}")
    logger.info(f"time={time.time()-start_time}")
    logger.info(f"Best model eval {HP_METRIC}: {metric:.4f}")
    logger.info(f"Best model parameters: {best_trial.config}")
示例#3
0
def _test_xgboost(method='BlendSearch'):
    try:
        import ray
    except ImportError:
        return
    if method == 'BlendSearch':
        from flaml import tune
    else:
        from ray import tune
    search_space = {
        # You can mix constants with search space objects.
        "max_depth": tune.randint(1, 8) if method in [
            "BlendSearch", "BOHB", "Optuna"] else tune.randint(1, 9),
        "min_child_weight": tune.choice([1, 2, 3]),
        "subsample": tune.uniform(0.5, 1.0),
        "eta": tune.loguniform(1e-4, 1e-1)
    }
    max_iter = 10
    for num_samples in [256]:
        time_budget_s = 60 #None
        for n_cpu in [8]:
            start_time = time.time()
            ray.init(num_cpus=n_cpu, num_gpus=0)
            if method == 'BlendSearch':
                analysis = tune.run(
                    train_breast_cancer,
                    init_config={
                        "max_depth": 1,
                        "min_child_weight": 3,
                    },
                    cat_hp_cost={
                        "min_child_weight": [6, 3, 2],
                    },
                    metric="eval-logloss",
                    mode="min",
                    max_resource=max_iter,
                    min_resource=1,
                    report_intermediate_result=True,
                    # You can add "gpu": 0.1 to allocate GPUs
                    resources_per_trial={"cpu": 1},
                    config=search_space,
                    local_dir='logs/',
                    num_samples=num_samples*n_cpu,
                    time_budget_s=time_budget_s,
                    use_ray=True)
            else:
                if 'ASHA' == method:
                    algo = None
                elif 'BOHB' == method:
                    from ray.tune.schedulers import HyperBandForBOHB
                    from ray.tune.suggest.bohb import TuneBOHB
                    algo = TuneBOHB(max_concurrent=n_cpu)
                    scheduler = HyperBandForBOHB(max_t=max_iter)
                elif 'Optuna' == method:
                    from ray.tune.suggest.optuna import OptunaSearch
                    algo = OptunaSearch()
                elif 'CFO' == method:
                    from flaml import CFO
                    algo = CFO(points_to_evaluate=[{
                        "max_depth": 1,
                        "min_child_weight": 3,
                    }], cat_hp_cost={
                        "min_child_weight": [6, 3, 2],
                    })
                elif 'Dragonfly' == method:
                    from ray.tune.suggest.dragonfly import DragonflySearch
                    algo = DragonflySearch()
                elif 'SkOpt' == method:
                    from ray.tune.suggest.skopt import SkOptSearch
                    algo = SkOptSearch()
                elif 'Nevergrad' == method:
                    from ray.tune.suggest.nevergrad import NevergradSearch
                    import nevergrad as ng
                    algo = NevergradSearch(optimizer=ng.optimizers.OnePlusOne)
                elif 'ZOOpt' == method:
                    from ray.tune.suggest.zoopt import ZOOptSearch
                    algo = ZOOptSearch(budget=num_samples*n_cpu)
                elif 'Ax' == method:
                    from ray.tune.suggest.ax import AxSearch
                    algo = AxSearch()
                elif 'HyperOpt' == method:
                    from ray.tune.suggest.hyperopt import HyperOptSearch
                    algo = HyperOptSearch()
                    scheduler = None
                if method != 'BOHB':
                    from ray.tune.schedulers import ASHAScheduler
                    scheduler = ASHAScheduler(
                        max_t=max_iter,
                        grace_period=1)
                analysis = tune.run(
                    train_breast_cancer,
                    metric="eval-logloss",
                    mode="min",
                    # You can add "gpu": 0.1 to allocate GPUs
                    resources_per_trial={"cpu": 1},
                    config=search_space, local_dir='logs/',
                    num_samples=num_samples*n_cpu, time_budget_s=time_budget_s,
                    scheduler=scheduler, search_alg=algo)
            ray.shutdown()
            # # Load the best model checkpoint
            # best_bst = xgb.Booster()
            # best_bst.load_model(os.path.join(analysis.best_checkpoint,
            #  "model.xgb"))
            best_trial = analysis.get_best_trial("eval-logloss","min","all")
            accuracy = 1. - best_trial.metric_analysis["eval-error"]["min"]
            logloss = best_trial.metric_analysis["eval-logloss"]["min"]
            logger.info(f"method={method}")
            logger.info(f"n_samples={num_samples*n_cpu}")
            logger.info(f"time={time.time()-start_time}")
            logger.info(f"Best model eval loss: {logloss:.4f}")
            logger.info(f"Best model total accuracy: {accuracy:.4f}")
            logger.info(f"Best model parameters: {best_trial.config}")
示例#4
0
def _test_xgboost(method="BlendSearch"):
    try:
        import ray
    except ImportError:
        return
    if method == "BlendSearch":
        from flaml import tune
    else:
        from ray import tune
    search_space = {
        "max_depth":
        tune.randint(1, 9)
        if method in ["BlendSearch", "BOHB", "Optuna"] else tune.randint(1, 9),
        "min_child_weight":
        tune.choice([1, 2, 3]),
        "subsample":
        tune.uniform(0.5, 1.0),
        "eta":
        tune.loguniform(1e-4, 1e-1),
    }
    max_iter = 10
    for num_samples in [128]:
        time_budget_s = 60
        for n_cpu in [2]:
            start_time = time.time()
            # ray.init(address='auto')
            if method == "BlendSearch":
                analysis = tune.run(
                    train_breast_cancer,
                    config=search_space,
                    low_cost_partial_config={
                        "max_depth": 1,
                    },
                    cat_hp_cost={
                        "min_child_weight": [6, 3, 2],
                    },
                    metric="eval-logloss",
                    mode="min",
                    max_resource=max_iter,
                    min_resource=1,
                    scheduler="asha",
                    # You can add "gpu": 0.1 to allocate GPUs
                    resources_per_trial={"cpu": 1},
                    local_dir="logs/",
                    num_samples=num_samples * n_cpu,
                    time_budget_s=time_budget_s,
                    use_ray=True,
                )
            else:
                if "ASHA" == method:
                    algo = None
                elif "BOHB" == method:
                    from ray.tune.schedulers import HyperBandForBOHB
                    from ray.tune.suggest.bohb import TuneBOHB

                    algo = TuneBOHB(max_concurrent=n_cpu)
                    scheduler = HyperBandForBOHB(max_t=max_iter)
                elif "Optuna" == method:
                    from ray.tune.suggest.optuna import OptunaSearch

                    algo = OptunaSearch()
                elif "CFO" == method:
                    from flaml import CFO

                    algo = CFO(
                        low_cost_partial_config={
                            "max_depth": 1,
                        },
                        cat_hp_cost={
                            "min_child_weight": [6, 3, 2],
                        },
                    )
                elif "CFOCat" == method:
                    from flaml.searcher.cfo_cat import CFOCat

                    algo = CFOCat(
                        low_cost_partial_config={
                            "max_depth": 1,
                        },
                        cat_hp_cost={
                            "min_child_weight": [6, 3, 2],
                        },
                    )
                elif "Dragonfly" == method:
                    from ray.tune.suggest.dragonfly import DragonflySearch

                    algo = DragonflySearch()
                elif "SkOpt" == method:
                    from ray.tune.suggest.skopt import SkOptSearch

                    algo = SkOptSearch()
                elif "Nevergrad" == method:
                    from ray.tune.suggest.nevergrad import NevergradSearch
                    import nevergrad as ng

                    algo = NevergradSearch(optimizer=ng.optimizers.OnePlusOne)
                elif "ZOOpt" == method:
                    from ray.tune.suggest.zoopt import ZOOptSearch

                    algo = ZOOptSearch(budget=num_samples * n_cpu)
                elif "Ax" == method:
                    from ray.tune.suggest.ax import AxSearch

                    algo = AxSearch()
                elif "HyperOpt" == method:
                    from ray.tune.suggest.hyperopt import HyperOptSearch

                    algo = HyperOptSearch()
                    scheduler = None
                if method != "BOHB":
                    from ray.tune.schedulers import ASHAScheduler

                    scheduler = ASHAScheduler(max_t=max_iter, grace_period=1)
                analysis = tune.run(
                    train_breast_cancer,
                    metric="eval-logloss",
                    mode="min",
                    # You can add "gpu": 0.1 to allocate GPUs
                    resources_per_trial={"cpu": 1},
                    config=search_space,
                    local_dir="logs/",
                    num_samples=num_samples * n_cpu,
                    time_budget_s=time_budget_s,
                    scheduler=scheduler,
                    search_alg=algo,
                )
            # # Load the best model checkpoint
            # import os
            # best_bst = xgb.Booster()
            # best_bst.load_model(os.path.join(analysis.best_checkpoint,
            #  "model.xgb"))
            best_trial = analysis.get_best_trial("eval-logloss", "min", "all")
            accuracy = 1.0 - best_trial.metric_analysis["eval-error"]["min"]
            logloss = best_trial.metric_analysis["eval-logloss"]["min"]
            logger.info(f"method={method}")
            logger.info(f"n_samples={num_samples*n_cpu}")
            logger.info(f"time={time.time()-start_time}")
            logger.info(f"Best model eval loss: {logloss:.4f}")
            logger.info(f"Best model total accuracy: {accuracy:.4f}")
            logger.info(f"Best model parameters: {best_trial.config}")
示例#5
0
def cifar10_main(method='BlendSearch',
                 num_samples=10,
                 max_num_epochs=100,
                 gpus_per_trial=2):
    data_dir = os.path.abspath("test/data")
    load_data(data_dir)  # Download data for all trials before starting the run
    if method == 'BlendSearch':
        from flaml import tune
    else:
        from ray import tune
    if method in ['BlendSearch', 'BOHB', 'Optuna']:
        config = {
            "l1": tune.randint(2, 8),
            "l2": tune.randint(2, 8),
            "lr": tune.loguniform(1e-4, 1e-1),
            "num_epochs": tune.qloguniform(1, max_num_epochs, q=1),
            "batch_size": tune.randint(1, 4)  #tune.choice([2, 4, 8, 16])
        }
    else:
        config = {
            "l1": tune.randint(2, 9),
            "l2": tune.randint(2, 9),
            "lr": tune.loguniform(1e-4, 1e-1),
            "num_epochs": tune.qloguniform(1, max_num_epochs + 1, q=1),
            "batch_size": tune.randint(1, 5)  #tune.choice([2, 4, 8, 16])
        }
    import ray
    time_budget_s = 3600
    start_time = time.time()
    if method == 'BlendSearch':
        result = tune.run(ray.tune.with_parameters(train_cifar,
                                                   data_dir=data_dir),
                          init_config={
                              "l1": 2,
                              "l2": 2,
                              "num_epochs": 1,
                              "batch_size": 4,
                          },
                          metric="loss",
                          mode="min",
                          max_resource=max_num_epochs,
                          min_resource=1,
                          report_intermediate_result=True,
                          resources_per_trial={
                              "cpu": 2,
                              "gpu": gpus_per_trial
                          },
                          config=config,
                          local_dir='logs/',
                          num_samples=num_samples,
                          time_budget_s=time_budget_s,
                          use_ray=True)
    else:
        if 'ASHA' == method:
            algo = None
        elif 'BOHB' == method:
            from ray.tune.schedulers import HyperBandForBOHB
            from ray.tune.suggest.bohb import TuneBOHB
            algo = TuneBOHB()
            scheduler = HyperBandForBOHB(max_t=max_num_epochs)
        elif 'Optuna' == method:
            from ray.tune.suggest.optuna import OptunaSearch
            algo = OptunaSearch()
        elif 'CFO' == method:
            from flaml import CFO
            algo = CFO(points_to_evaluate=[{
                "l1": 2,
                "l2": 2,
                "num_epochs": 1,
                "batch_size": 4,
            }])
        elif 'Nevergrad' == method:
            from ray.tune.suggest.nevergrad import NevergradSearch
            import nevergrad as ng
            algo = NevergradSearch(optimizer=ng.optimizers.OnePlusOne)
        if method != 'BOHB':
            from ray.tune.schedulers import ASHAScheduler
            scheduler = ASHAScheduler(max_t=max_num_epochs, grace_period=1)
        result = tune.run(tune.with_parameters(train_cifar, data_dir=data_dir),
                          resources_per_trial={
                              "cpu": 2,
                              "gpu": gpus_per_trial
                          },
                          config=config,
                          metric="loss",
                          mode="min",
                          num_samples=num_samples,
                          time_budget_s=time_budget_s,
                          scheduler=scheduler,
                          search_alg=algo)
    ray.shutdown()

    logger.info(f"method={method}")
    logger.info(f"n_samples={num_samples}")
    logger.info(f"time={time.time()-start_time}")
    best_trial = result.get_best_trial("loss", "min", "all")
    logger.info("Best trial config: {}".format(best_trial.config))
    logger.info("Best trial final validation loss: {}".format(
        best_trial.metric_analysis["loss"]["min"]))
    logger.info("Best trial final validation accuracy: {}".format(
        best_trial.metric_analysis["accuracy"]["max"]))

    best_trained_model = Net(2**best_trial.config["l1"],
                             2**best_trial.config["l2"])
    device = "cpu"
    if torch.cuda.is_available():
        device = "cuda:0"
        if gpus_per_trial > 1:
            best_trained_model = nn.DataParallel(best_trained_model)
    best_trained_model.to(device)

    checkpoint_path = os.path.join(best_trial.checkpoint.value, "checkpoint")

    model_state, optimizer_state = torch.load(checkpoint_path)
    best_trained_model.load_state_dict(model_state)

    test_acc = _test_accuracy(best_trained_model, device)
    logger.info("Best trial test set accuracy: {}".format(test_acc))
示例#6
0
def test_nested():
    from flaml import tune, CFO

    search_space = {
        # test nested search space
        "cost_related": {
            "a": tune.randint(1, 9),
        },
        "b": tune.uniform(0.5, 1.0),
    }

    def simple_func(config):
        obj = (config["cost_related"]["a"] -
               4)**2 + (config["b"] - config["cost_related"]["a"])**2
        tune.report(obj=obj)
        tune.report(obj=obj, ab=config["cost_related"]["a"] * config["b"])

    analysis = tune.run(
        simple_func,
        search_alg=CFO(
            space=search_space,
            metric="obj",
            mode="min",
            low_cost_partial_config={"cost_related": {
                "a": 1
            }},
            points_to_evaluate=[
                {
                    "b": 0.99,
                    "cost_related": {
                        "a": 3
                    }
                },
                {
                    "b": 0.99,
                    "cost_related": {
                        "a": 2
                    }
                },
                {
                    "cost_related": {
                        "a": 8
                    }
                },
            ],
            metric_constraints=[("ab", "<=", 4)],
        ),
        local_dir="logs/",
        num_samples=-1,
        time_budget_s=1,
    )

    best_trial = analysis.get_best_trial()
    logger.info(f"CFO best config: {best_trial.config}")
    logger.info(f"CFO best result: {best_trial.last_result}")

    analysis = tune.run(
        simple_func,
        search_alg=BlendSearch(
            experimental=True,
            space=search_space,
            metric="obj",
            mode="min",
            low_cost_partial_config={"cost_related": {
                "a": 1
            }},
            points_to_evaluate=[
                {
                    "b": 0.99,
                    "cost_related": {
                        "a": 3
                    }
                },
                {
                    "b": 0.99,
                    "cost_related": {
                        "a": 2
                    }
                },
                {
                    "cost_related": {
                        "a": 8
                    }
                },
            ],
            metric_constraints=[("ab", "<=", 4)],
        ),
        local_dir="logs/",
        num_samples=-1,
        time_budget_s=1,
    )

    best_trial = analysis.get_best_trial()
    logger.info(f"BlendSearch exp best config: {best_trial.config}")
    logger.info(f"BlendSearch exp best result: {best_trial.last_result}")

    points_to_evaluate = [
        {
            "b": 0.99,
            "cost_related": {
                "a": 3
            }
        },
        {
            "b": 0.99,
            "cost_related": {
                "a": 2
            }
        },
    ]
    analysis = tune.run(
        simple_func,
        config=search_space,
        low_cost_partial_config={"cost_related": {
            "a": 1
        }},
        points_to_evaluate=points_to_evaluate,
        evaluated_rewards=[(config["cost_related"]["a"] - 4)**2 +
                           (config["b"] - config["cost_related"]["a"])**2
                           for config in points_to_evaluate],
        metric="obj",
        mode="min",
        metric_constraints=[("ab", "<=", 4)],
        local_dir="logs/",
        num_samples=-1,
        time_budget_s=1,
    )

    best_trial = analysis.get_best_trial()
    logger.info(f"BlendSearch best config: {best_trial.config}")
    logger.info(f"BlendSearch best result: {best_trial.last_result}")
示例#7
0
def _test_distillbert(method='BlendSearch'):

    max_num_epoch = 64
    num_samples = -1
    time_budget_s = 10800

    search_space = {
        # You can mix constants with search space objects.
        "num_train_epochs": flaml.tune.loguniform(1, max_num_epoch),
        "learning_rate": flaml.tune.loguniform(1e-6, 1e-4),
        "adam_beta1": flaml.tune.uniform(0.8, 0.99),
        "adam_beta2": flaml.tune.loguniform(98e-2, 9999e-4),
        "adam_epsilon": flaml.tune.loguniform(1e-9, 1e-7),
    }

    start_time = time.time()
    ray.init(num_cpus=4, num_gpus=4)
    if 'ASHA' == method:
        algo = None
    elif 'BOHB' == method:
        from ray.tune.schedulers import HyperBandForBOHB
        from ray.tune.suggest.bohb import tuneBOHB
        algo = tuneBOHB(max_concurrent=4)
        scheduler = HyperBandForBOHB(max_t=max_num_epoch)
    elif 'Optuna' == method:
        from ray.tune.suggest.optuna import OptunaSearch
        algo = OptunaSearch()
    elif 'CFO' == method:
        from flaml import CFO
        algo = CFO(points_to_evaluate=[{
            "num_train_epochs": 1,
        }])
    elif 'BlendSearch' == method:
        from flaml import BlendSearch
        algo = BlendSearch(points_to_evaluate=[{
            "num_train_epochs": 1,
        }])
    elif 'Dragonfly' == method:
        from ray.tune.suggest.dragonfly import DragonflySearch
        algo = DragonflySearch()
    elif 'SkOpt' == method:
        from ray.tune.suggest.skopt import SkOptSearch
        algo = SkOptSearch()
    elif 'Nevergrad' == method:
        from ray.tune.suggest.nevergrad import NevergradSearch
        import nevergrad as ng
        algo = NevergradSearch(optimizer=ng.optimizers.OnePlusOne)
    elif 'ZOOpt' == method:
        from ray.tune.suggest.zoopt import ZOOptSearch
        algo = ZOOptSearch(budget=num_samples)
    elif 'Ax' == method:
        from ray.tune.suggest.ax import AxSearch
        algo = AxSearch()
    elif 'HyperOpt' == method:
        from ray.tune.suggest.hyperopt import HyperOptSearch
        algo = HyperOptSearch()
        scheduler = None
    if method != 'BOHB':
        from ray.tune.schedulers import ASHAScheduler
        scheduler = ASHAScheduler(max_t=max_num_epoch, grace_period=1)
    scheduler = None
    analysis = ray.tune.run(
        train_distilbert,
        metric=HP_METRIC,
        mode=MODE,
        # You can add "gpu": 1 to allocate GPUs
        resources_per_trial={"gpu": 1},
        config=search_space,
        local_dir='test/logs/',
        num_samples=num_samples,
        time_budget_s=time_budget_s,
        keep_checkpoints_num=1,
        checkpoint_score_attr=HP_METRIC,
        scheduler=scheduler,
        search_alg=algo)

    ray.shutdown()

    best_trial = analysis.get_best_trial(HP_METRIC, MODE, "all")
    metric = best_trial.metric_analysis[HP_METRIC][MODE]

    logger.info(f"method={method}")
    logger.info(f"n_trials={len(analysis.trials)}")
    logger.info(f"time={time.time()-start_time}")
    logger.info(f"Best model eval {HP_METRIC}: {metric:.4f}")
    logger.info(f"Best model parameters: {best_trial.config}")
示例#8
0
def cifar10_main(method="BlendSearch",
                 num_samples=10,
                 max_num_epochs=100,
                 gpus_per_trial=1):
    data_dir = os.path.abspath("test/data")
    load_data(data_dir)  # Download data for all trials before starting the run
    if method == "BlendSearch":
        from flaml import tune
    else:
        from ray import tune
    if method in ["BOHB"]:
        config = {
            "l1": tune.randint(2, 8),
            "l2": tune.randint(2, 8),
            "lr": tune.loguniform(1e-4, 1e-1),
            "num_epochs": tune.qloguniform(1, max_num_epochs, q=1),
            "batch_size": tune.randint(1, 4),
        }
    else:
        config = {
            "l1": tune.randint(2, 9),
            "l2": tune.randint(2, 9),
            "lr": tune.loguniform(1e-4, 1e-1),
            "num_epochs": tune.loguniform(1, max_num_epochs),
            "batch_size": tune.randint(1, 5),
        }
    import ray

    time_budget_s = 600
    np.random.seed(7654321)
    start_time = time.time()
    if method == "BlendSearch":
        result = tune.run(
            ray.tune.with_parameters(train_cifar, data_dir=data_dir),
            config=config,
            metric="loss",
            mode="min",
            low_cost_partial_config={"num_epochs": 1},
            max_resource=max_num_epochs,
            min_resource=1,
            scheduler="asha",
            resources_per_trial={
                "cpu": 1,
                "gpu": gpus_per_trial
            },
            local_dir="logs/",
            num_samples=num_samples,
            time_budget_s=time_budget_s,
            use_ray=True,
        )
    else:
        if "ASHA" == method:
            algo = None
        elif "BOHB" == method:
            from ray.tune.schedulers import HyperBandForBOHB
            from ray.tune.suggest.bohb import TuneBOHB

            algo = TuneBOHB()
            scheduler = HyperBandForBOHB(max_t=max_num_epochs)
        elif "Optuna" == method:
            from ray.tune.suggest.optuna import OptunaSearch

            algo = OptunaSearch(seed=10)
        elif "CFO" == method:
            from flaml import CFO

            algo = CFO(low_cost_partial_config={
                "num_epochs": 1,
            })
        elif "Nevergrad" == method:
            from ray.tune.suggest.nevergrad import NevergradSearch
            import nevergrad as ng

            algo = NevergradSearch(optimizer=ng.optimizers.OnePlusOne)
        if method != "BOHB":
            from ray.tune.schedulers import ASHAScheduler

            scheduler = ASHAScheduler(max_t=max_num_epochs, grace_period=1)
        result = tune.run(
            tune.with_parameters(train_cifar, data_dir=data_dir),
            resources_per_trial={
                "cpu": 1,
                "gpu": gpus_per_trial
            },
            config=config,
            metric="loss",
            mode="min",
            num_samples=num_samples,
            time_budget_s=time_budget_s,
            scheduler=scheduler,
            search_alg=algo,
        )
    ray.shutdown()

    logger.info(f"method={method}")
    logger.info(f"#trials={len(result.trials)}")
    logger.info(f"time={time.time()-start_time}")
    best_trial = result.get_best_trial("loss", "min", "all")
    logger.info("Best trial config: {}".format(best_trial.config))
    logger.info("Best trial final validation loss: {}".format(
        best_trial.metric_analysis["loss"]["min"]))
    logger.info("Best trial final validation accuracy: {}".format(
        best_trial.metric_analysis["accuracy"]["max"]))

    best_trained_model = Net(2**best_trial.config["l1"],
                             2**best_trial.config["l2"])
    device = "cpu"
    if torch.cuda.is_available():
        device = "cuda:0"
        if gpus_per_trial > 1:
            best_trained_model = nn.DataParallel(best_trained_model)
    best_trained_model.to(device)

    checkpoint_path = os.path.join(best_trial.checkpoint.value, "checkpoint")

    model_state, optimizer_state = torch.load(checkpoint_path)
    best_trained_model.load_state_dict(model_state)

    test_acc = _test_accuracy(best_trained_model, device)
    logger.info("Best trial test set accuracy: {}".format(test_acc))
示例#9
0
def test_record_incumbent(method="BlendSearch"):

    if method != "CFOCat":
        search_space = {
            "x1": tune.randint(1, 9),
            "x2": tune.randint(1, 9),
            "x3": tune.randint(1, 9),
            "x4": tune.randint(1, 9),
            "x5": tune.randint(1, 9),
        }
    else:
        search_space = {
            "x1": tune.choice([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]),
            "x2": tune.choice([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]),
            "x3": tune.choice([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]),
            "x4": tune.choice([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]),
            "x5": tune.choice([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]),
        }

    max_iter = 100
    num_samples = 128
    time_budget_s = 1
    n_cpu = 1

    if method == "BlendSearch":
        tune.run(
            evaluation_function=rosenbrock_function,
            config=search_space,
            verbose=0,
            metric="funcLoss",
            mode="min",
            max_resource=max_iter,
            min_resource=1,
            local_dir="logs/",
            num_samples=num_samples * n_cpu,
            time_budget_s=time_budget_s,
            use_incumbent_result_in_evaluation=True,
        )
        return
    elif method == "CFO":
        from flaml import CFO

        algo = CFO(
            use_incumbent_result_in_evaluation=True,
        )
    elif method == "CFOCat":
        from flaml.searcher.cfo_cat import CFOCat

        algo = CFOCat(
            use_incumbent_result_in_evaluation=True,
        )
    else:
        raise NotImplementedError
    tune.run(
        evaluation_function=rosenbrock_function,
        metric="funcLoss",
        mode="min",
        config=search_space,
        local_dir="logs/",
        num_samples=num_samples * n_cpu,
        time_budget_s=time_budget_s,
        search_alg=algo,
    )
示例#10
0
def test_define_by_run():
    from flaml.tune.space import (
        unflatten_hierarchical,
        normalize,
        indexof,
        complete_config,
    )

    space = {
        # Sample a float uniformly between -5.0 and -1.0
        "uniform": tune.uniform(-5, -1),
        # Sample a float uniformly between 3.2 and 5.4,
        # rounding to increments of 0.2
        "quniform": tune.quniform(3.2, 5.4, 0.2),
        # Sample a float uniformly between 0.0001 and 0.01, while
        # sampling in log space
        "loguniform": tune.loguniform(1e-4, 1e-2),
        # Sample a float uniformly between 0.0001 and 0.1, while
        # sampling in log space and rounding to increments of 0.00005
        "qloguniform": tune.qloguniform(1e-4, 1e-1, 5e-5),
        # Sample a random float from a normal distribution with
        # mean=10 and sd=2
        # "randn": tune.randn(10, 2),
        # Sample a random float from a normal distribution with
        # mean=10 and sd=2, rounding to increments of 0.2
        # "qrandn": tune.qrandn(10, 2, 0.2),
        # Sample a integer uniformly between -9 (inclusive) and 15 (exclusive)
        "randint": tune.randint(-9, 15),
        # Sample a random uniformly between -21 (inclusive) and 12 (inclusive (!))
        # rounding to increments of 3 (includes 12)
        "qrandint": tune.qrandint(-21, 12, 3),
        # Sample a integer uniformly between 1 (inclusive) and 10 (exclusive),
        # while sampling in log space
        "lograndint": tune.lograndint(1, 10),
        # Sample a integer uniformly between 2 (inclusive) and 10 (inclusive (!)),
        # while sampling in log space and rounding to increments of 2
        "qlograndint": tune.qlograndint(2, 10, 2),
        # Sample an option uniformly from the specified choices
        "choice": tune.choice(["a", "b", "c"]),
        "const": 5,
    }
    choice = {"nested": space}
    bs = BlendSearch(
        space={"c": tune.choice([choice])},
        low_cost_partial_config={"c": choice},
        metric="metric",
        mode="max",
    )
    print(indexof(bs._gs.space["c"], choice))
    print(indexof(bs._gs.space["c"], {"nested": {"const": 1}}))
    config = bs._gs.suggest("t1")
    print(config)
    config = unflatten_hierarchical(config, bs._gs.space)[0]
    print(config)
    print(normalize({"c": [choice]}, bs._gs.space, config, {}, False))
    space["randn"] = tune.randn(10, 2)
    cfo = CFO(
        space={"c": tune.choice([0, choice])},
        metric="metric",
        mode="max",
    )
    for i in range(5):
        cfo.suggest(f"t{i}")
    # print(normalize(config, bs._gs.space, config, {}, False))
    print(complete_config({}, cfo._ls.space, cfo._ls))