Exemplo n.º 1
0
    ]
    known_rewards = [-189, -1144]

    # maximum number of concurrent trials
    max_concurrent = 8

    algo = HEBOSearch(
        # space = space, # If you want to set the space
        points_to_evaluate=previously_run_params,
        evaluated_rewards=known_rewards,
        random_state_seed=123,  # for reproducibility
        max_concurrent=max_concurrent,
    )

    scheduler = AsyncHyperBandScheduler()

    analysis = tune.run(easy_objective,
                        metric="mean_loss",
                        mode="min",
                        name="hebo_exp_with_warmstart",
                        search_alg=algo,
                        scheduler=scheduler,
                        num_samples=10 if args.smoke_test else 50,
                        config={
                            "steps": 100,
                            "width": tune.uniform(0, 20),
                            "height": tune.uniform(-100, 100),
                            "activation": tune.choice(["relu", "tanh"])
                        })
    print("Best hyperparameters found were: ", analysis.best_config)
Exemplo n.º 2
0
                gpus=gpus,
                gpu_threshold=0.8)

    # Retraining phase
    if args.retrain:
        best_dict = get_best_info(os.path.join(exp_dir, 'design'),
                                  mode='manual')
        t_config = best_dict['config']
        tr_idx, vl_idx = train_test_split(np.arange(len(dataset)),
                                          test_size=0.2,
                                          stratify=np.array(
                                              [t.y for t in dataset]),
                                          shuffle=True,
                                          random_state=get_seed())
        config['tr_idx'], config['vl_idx'] = tr_idx.tolist(), vl_idx.tolist()
        t_config['out'] = tune.choice([t_config['out']])
        run_exp('test',
                config=t_config,
                n_samples=5,
                p_early={
                    'metric': 'vl_loss',
                    'mode': 'min',
                    'patience': 50
                },
                p_scheduler=None,
                exp_dir=exp_dir,
                chk_score_attr='vl_score',
                log_params={
                    'n_gen': '#gen',
                    'C': 'C',
                    'lr': 'LRate',
Exemplo n.º 3
0
def tune_transformer(num_samples=8, gpus_per_trial=0, smoke_test=False):
    data_dir_name = "./data" if not smoke_test else "./test_data"
    data_dir = os.path.abspath(os.path.join(os.getcwd(), data_dir_name))
    if not os.path.exists(data_dir):
        os.mkdir(data_dir, 0o755)

    # Change these as needed.
    model_name = "bert-base-uncased" if not smoke_test \
        else "sshleifer/tiny-distilroberta-base"
    task_name = "rte"

    task_data_dir = os.path.join(data_dir, task_name.upper())

    num_labels = glue_tasks_num_labels[task_name]

    config = AutoConfig.from_pretrained(model_name,
                                        num_labels=num_labels,
                                        finetuning_task=task_name)

    # Download and cache tokenizer, model, and features
    print("Downloading and caching Tokenizer")
    tokenizer = AutoTokenizer.from_pretrained(model_name)

    # Triggers tokenizer download to cache
    print("Downloading and caching pre-trained model")
    AutoModelForSequenceClassification.from_pretrained(
        model_name,
        config=config,
    )

    def get_model():
        return AutoModelForSequenceClassification.from_pretrained(
            model_name,
            config=config,
        )

    # Download data.
    download_data(task_name, data_dir)

    data_args = GlueDataTrainingArguments(task_name=task_name,
                                          data_dir=task_data_dir)

    train_dataset = GlueDataset(data_args,
                                tokenizer=tokenizer,
                                mode="train",
                                cache_dir=task_data_dir)
    eval_dataset = GlueDataset(data_args,
                               tokenizer=tokenizer,
                               mode="dev",
                               cache_dir=task_data_dir)

    training_args = TrainingArguments(
        output_dir=".",
        learning_rate=1e-5,  # config
        do_train=True,
        do_eval=True,
        no_cuda=gpus_per_trial <= 0,
        evaluation_strategy="epoch",
        save_strategy="epoch",
        load_best_model_at_end=True,
        num_train_epochs=2,  # config
        max_steps=-1,
        per_device_train_batch_size=16,  # config
        per_device_eval_batch_size=16,  # config
        warmup_steps=0,
        weight_decay=0.1,  # config
        logging_dir="./logs",
        skip_memory_metrics=True,
        report_to="none")

    trainer = Trainer(model_init=get_model,
                      args=training_args,
                      train_dataset=train_dataset,
                      eval_dataset=eval_dataset,
                      compute_metrics=build_compute_metrics_fn(task_name))

    tune_config = {
        "per_device_train_batch_size": 32,
        "per_device_eval_batch_size": 32,
        "num_train_epochs": tune.choice([2, 3, 4, 5]),
        "max_steps": 1 if smoke_test else -1,  # Used for smoke test.
    }

    scheduler = PopulationBasedTraining(time_attr="training_iteration",
                                        metric="eval_acc",
                                        mode="max",
                                        perturbation_interval=1,
                                        hyperparam_mutations={
                                            "weight_decay":
                                            tune.uniform(0.0, 0.3),
                                            "learning_rate":
                                            tune.uniform(1e-5, 5e-5),
                                            "per_device_train_batch_size":
                                            [16, 32, 64],
                                        })

    reporter = CLIReporter(parameter_columns={
        "weight_decay": "w_decay",
        "learning_rate": "lr",
        "per_device_train_batch_size": "train_bs/gpu",
        "num_train_epochs": "num_epochs"
    },
                           metric_columns=[
                               "eval_acc", "eval_loss", "epoch",
                               "training_iteration"
                           ])

    trainer.hyperparameter_search(
        hp_space=lambda _: tune_config,
        backend="ray",
        n_trials=num_samples,
        resources_per_trial={
            "cpu": 1,
            "gpu": gpus_per_trial
        },
        scheduler=scheduler,
        keep_checkpoints_num=1,
        checkpoint_score_attr="training_iteration",
        stop={"training_iteration": 1} if smoke_test else None,
        progress_reporter=reporter,
        local_dir="~/ray_results/",
        name="tune_transformer_pbt",
        log_to_file=True)
Exemplo n.º 4
0
import ray
from ray import tune
from inventory_env import InventoryEnv

ray.init()
tune.run(
    "PPO",
    stop={"timesteps_total": 1e6},
    num_samples=5,
    config={
        "env": InventoryEnv,
        "rollout_fragment_length": 40,
        "num_gpus": 1,
        "num_workers": 50,
        "lr": tune.grid_search([0.01, 0.001, 0.0001, 0.00001]),
        "use_gae": tune.choice([True, False]),
        "train_batch_size": tune.choice([5000, 10000, 20000, 40000]),
        "sgd_minibatch_size": tune.choice([128, 1024, 4096, 8192]),
        "num_sgd_iter": tune.choice([5, 10, 30]),
        "vf_loss_coeff": tune.choice([0.1, 1, 10]),
        "vf_share_layers": tune.choice([True, False]),
        "entropy_coeff": tune.choice([0, 0.1, 1]),
        "clip_param": tune.choice([0.05, 0.1, 0.3, 0.5]),
        "vf_clip_param": tune.choice([1, 5, 10]),
        "grad_clip": tune.choice([None, 0.01, 0.1, 1]),
        "kl_target": tune.choice([0.005, 0.01, 0.05]),
        "eager": False,
    },
)
def parse_search_space(space_file):
    search_space = {}
    if os.path.exists('./{}.json'.format(space_file)):
        with open('./{}.json'.format(space_file), 'r') as f:
            paras_dict = json.load(f)
            for name in paras_dict:
                paras_type = paras_dict[name]['type']
                if paras_type == 'uniform':
                    # name type low up
                    try:
                        search_space[name] = tune.uniform(
                            paras_dict[name]['lower'],
                            paras_dict[name]['upper'])
                    except:
                        raise TypeError(
                            'The space file does not meet the format requirements,\
                            when parsing uniform type.')
                elif paras_type == 'randn':
                    # name type mean sd
                    try:
                        search_space[name] = tune.randn(
                            paras_dict[name]['mean'], paras_dict[name]['sd'])
                    except:
                        raise TypeError(
                            'The space file does not meet the format requirements,\
                            when parsing randn type.')
                elif paras_type == 'randint':
                    # name type lower upper
                    try:
                        if 'lower' not in paras_dict[name]:
                            search_space[name] = tune.randint(
                                paras_dict[name]['upper'])
                        else:
                            search_space[name] = tune.randint(
                                paras_dict[name]['lower'],
                                paras_dict[name]['upper'])
                    except:
                        raise TypeError(
                            'The space file does not meet the format requirements,\
                            when parsing randint type.')
                elif paras_type == 'choice':
                    # name type list
                    try:
                        search_space[name] = tune.choice(
                            paras_dict[name]['list'])
                    except:
                        raise TypeError(
                            'The space file does not meet the format requirements,\
                            when parsing choice type.')
                elif paras_type == 'grid_search':
                    # name type list
                    try:
                        search_space[name] = tune.grid_search(
                            paras_dict[name]['list'])
                    except:
                        raise TypeError(
                            'The space file does not meet the format requirements,\
                            when parsing grid_search type.')
                else:
                    raise TypeError(
                        'The space file does not meet the format requirements,\
                            when parsing an undefined type.')
    else:
        raise FileNotFoundError(
            'The space file {}.json is not found. Please ensure \
            the config file is in the root dir and is a txt.'.format(
                space_file))
    return search_space
Exemplo n.º 6
0
def create_next(client):
    '''A stateless API for HPO
    '''
    state = client.get_state()
    setting = client.get_settings_dict()
    if state is None:
        # first time call
        try:
            from ray.tune import (uniform, quniform, choice, randint, qrandint, randn,
        qrandn, loguniform, qloguniform)
            from ray.tune.trial import Trial
        except:
            from ..tune.sample import (uniform, quniform, choice, randint, qrandint, randn,
        qrandn, loguniform, qloguniform)
            from ..tune.trial import Trial
        method = setting.get('method', 'BlendSearch')
        mode = client.get_optimization_mode()
        if mode == 'minimize':
            mode = 'min'
        elif mode == 'maximize':
            mode = 'max'
        metric = client.get_primary_metric()
        hp_space = client.get_hyperparameter_space_dict()
        space = {}
        for key, value in hp_space.items():
            t = value["type"]
            if t == 'continuous':
                space[key] = uniform(value["min_val"], value["max_val"])
            elif t == 'discrete':
                space[key] = choice(value["values"])
            elif t == 'integral':
                space[key] = randint(value["min_val"], value["max_val"])
            elif t == 'quantized_continuous':
                space[key] = quniform(value["min_val"], value["max_val"],
                 value["step"])
        init_config = setting.get('init_config', None)
        if init_config:
            points_to_evaluate = [init_config]
        else:
            points_to_evaluate = None
        cat_hp_cost = setting.get('cat_hp_cost', None)

        if method == 'BlendSearch':
            Algo = BlendSearch
        elif method == 'CFO':
            Algo = CFO
        algo = Algo(
            mode=mode, 
            metric=metric, 
            space=space,
            points_to_evaluate=points_to_evaluate,
            cat_hp_cost=cat_hp_cost,
            )
        time_budget_s = setting.get('time_budget_s', None)
        if time_budget_s:
            algo._deadline = time_budget_s + time.time()
        config2trialid = {}
    else:
        algo = state['algo']
        config2trialid = state['config2trialid']
    # update finished trials
    trials_completed = []
    for trial in client.get_trials():
        if trial.end_time is not None:
            signature = algo._ls.config_signature(trial.hp_sample)
            if not algo._result[signature]:
                trials_completed.append((trial.end_time, trial))
    trials_completed.sort()
    for t in trials_completed:
        end_time, trial = t
        trial_id = config2trialid[trial.hp_sample]
        result = {}
        result[algo.metric] = trial.metrics[algo.metric].values[-1]
        result[algo.cost_attr] = (end_time - trial.start_time).total_seconds()
        for key, value in trial.hp_sample.items():
            result['config/'+key] = value
        algo.on_trial_complete(trial_id, result=result)
    # propose new trial
    trial_id = Trial.generate_id()
    config = algo.suggest(trial_id)
    if config:
        config2trialid[config] = trial_id
        client.launch_trial(config)
    client.update_state({'algo': algo, 'config2trialid': config2trialid})
Exemplo n.º 7
0
        hyperparam_mutations={
            # distribution for resampling
            "lr": lambda: np.random.uniform(0.001, 1),
            # allow perturbations within this set of categorical values
            "momentum": [0.8, 0.9, 0.99],
        },
    )

    reporter = CLIReporter()
    reporter.add_metric_column("loss", "loss")

    analysis = tune.run(
        Trainable,
        num_samples=4,
        config={
            "lr": tune.choice([0.001, 0.01, 0.1]),
            "momentum": 0.8,
            "batch_size": 128 * args.num_workers,
            "epochs": args.num_epochs,
            "test_mode": args.smoke_test,  # whether to to subset the data
        },
        stop={"training_iteration": 2 if args.smoke_test else 100},
        max_failures=3,  # used for fault tolerance
        checkpoint_freq=3,  # used for fault tolerance
        keep_checkpoints_num=1,  # used for fault tolerance
        verbose=2,
        progress_reporter=reporter,
        scheduler=pbt_scheduler,
    )

    print(analysis.get_best_config(metric="loss", mode="min"))
Exemplo n.º 8
0
)
from autogluon.multimodal.constants import (
    MODEL,
    DATA,
    OPTIMIZATION,
    ENVIRONMENT,
    BINARY,
    MULTICLASS,
)


@pytest.mark.parametrize(
    "hyperparameters, keys_to_filter, expected",
    [
        ({
            "model.abc": tune.choice(["a", "b"])
        }, ["model"], {}),
        ({
            "model.abc": tune.choice(["a", "b"])
        }, ["data"], {
            "model.abc": tune.choice(["a", "b"])
        }),
        ({
            "model.abc": "def"
        }, ["model"], {
            "model.abc": "def"
        }),
        (
            {
                "data.abc.def": tune.choice(["a", "b"]),
                "model.abc": "def",
    "predict_and_get_currency_data_from_web_pound": {
        "task": "predict_and_get_currency_data_from_web_pound",
        "schedule": 60,
    },
    "fit_time_series_model_pound": {
        "task": "fit_time_series_model_pound",
        "schedule": 900,
    },
    "optimize_time_series_model_pound": {
        "task": "optimize_time_series_model_pound",
        "schedule": 3780,
    },
}

SEARCH_SPACE = {
    "gru_units": tune.choice(list(range(20, 120))),
    "rec_dropout": tune.uniform(0.0, 0.4),
    "batch_size": tune.choice([8, 12, 16, 20, 24]),
}

TRAINING_PARAMETER_LIST = ["batch_size"]

CURRENCY_PREDICTOR_COMMON_DICT = {
    # data_from_web
    "get_currency_data":
    webscrap_currency_data_bid_and_time_from_investing_com_bid,
    "pred_fit_optim_time_offset_tuple": (12, 10, 5),
    # fiting_predicting
    "n_points_model": 180,
    "n_points_training": 120,
    "n_steps": 10,
Exemplo n.º 10
0
    def _setup(self, config):
        [X_train, X_test, y_train, y_test] = get_pinned_object(data_id)

        self.cuml_model = curfc(n_estimators=config.get("estimators", 40),
                                max_depth=config.get("depth", 16),
                                max_features=1.0)
        self.X_cudf_train = cudf.DataFrame.from_pandas(X_train)
        self.X_cudf_test = cudf.DataFrame.from_pandas(X_test)
        self.y_cudf_train = cudf.Series(y_train.values)
        self.y_test = y_test

    def _train(self):
        self.cuml_model.fit(self.X_cudf_train, self.y_cudf_train)
        fil_preds_orig = self.cuml_model.predict(self.X_cudf_test)
        return {"mean_accuracy": accuracy_score(self.y_test, fil_preds_orig)}

    def _stop(self):
        del self.X_cudf_train
        del self.X_cudf_test
        del self.y_cudf_train
        del self.y_test
        del self.cuml_model


analysis = tune.run(CUMLTrainable,
                    resources_per_trial={"gpu": 0.3},
                    num_samples=20,
                    config={"depth": tune.choice(list(range(8, 24)))},
                    stop={"training_iteration": 1},
                    verbose=1)