예제 #1
0
import os
from models import dual_encoder_trainer
from test_tube import HyperOptArgumentParser

parser = HyperOptArgumentParser(strategy='random_search')

# --------------------------
# build program arguments
# --------------------------

parser.add_opt_argument_list('--lr_1', default=0.0001, options=[0.0001, 0.0002, 0.0004, 0.0008, 0.001, 0.002], type=float, tunnable=True)
parser.add_opt_argument_list('--batch_size', default=10, options=[20, 30, 40, 50], type=int, tunnable=True)
parser.add_opt_argument_list('--embedding_dim', default=320, options=[100, 200, 320, 400], type=int, tunnable=True)
parser.add_opt_argument_list('--max_seq_len', default=50, options=[50, 70, 90, 110], type=int, tunnable=True)

# training params
parser.add_argument('--nb_epochs', default=30, type=int)
parser.add_argument('--optimizer_name', default='adam')
parser.add_argument('--eval_every_n_batches', default=200, type=int)
parser.add_argument('--train_mode', default='train')

# model params
parser.add_argument('--nb_grams', default=2, type=int)

# path params
parser.add_argument('--root_dir', default='')
parser.add_argument('--dataset_train_path', default='')
parser.add_argument('--dataset_test_path', default='')
parser.add_argument('--dataset_val_path', default='')
parser.add_argument('--vocab_path', default='')
예제 #2
0
# build a wrapper around a tng function so we can use the correct gpu
# the optimizer passes in the hyperparams and the job index as arguments
# to the function to optimize
def parallelize_on_gpus(trial_params, job_index_nb):
    from time import sleep
    sleep(job_index_nb * 1)  # Time in seconds.

    GPUs = ['0', '1', '2', '4']
    os.environ["CUDA_VISIBLE_DEVICES"] = GPUs[job_index_nb]
    main_trainer(trial_params)


if __name__ == '__main__':
    parser = HyperOptArgumentParser(strategy='random_search')
    parser.add_opt_argument_list('--batch_size', default=128, options=[128, 256], type=int, tunnable=False)
    parser.add_opt_argument_list('--dense_1_nb_units', default=30, options=[30, 60], type=int, tunnable=False)
    parser.add_opt_argument_list('--dense_2_nb_units', default=18, options=[30, 60], type=int, tunnable=False)
    parser.add_argument('--dense_3_nb_units', default=2, type=int)
    parser.add_opt_argument_list('--dropout', default=0.7, options=[0.2, 0.5, 0.7], type=float, tunnable=False)
    parser.add_opt_argument_list('--learning_rate', default=0.0001, options=[0.0001, 0.001, 0.01], type=float, tunnable=False)
    parser.add_opt_argument_list('--time_steps', default=3, options=[3, 5, 7, 9, 11], type=int, help='number of sequential readings', tunnable=False)

    # model params
    parser.add_opt_argument_list('--C', default=1e5, options=[1e5, 1e4, 1e3, 1e2, 1], type=int, tunnable=False)
    parser.add_argument('--nb_trials', default=200, type=int)
    parser.add_argument('--nb_epochs', default=600, type=int)
    parser.add_argument('--nb_classes', default=2, type=int)
    parser.add_argument('--loss', default='binary_crossentropy', type=str)

    # path vars
# the optimizer passes in the hyperparams and the job index as arguments
# to the function to optimize
def parallelize_on_gpus(trial_params, job_index_nb):
    from time import sleep
    sleep(job_index_nb * 1)  # Time in seconds.

    GPUs = ['0', '1', '2', '4']
    os.environ["CUDA_VISIBLE_DEVICES"] = GPUs[job_index_nb]
    main_trainer(trial_params)


if __name__ == '__main__':
    parser = HyperOptArgumentParser(strategy='random_search')
    parser.add_opt_argument_list('--time_steps',
                                 default=3,
                                 options=[3, 5, 7, 9, 11],
                                 type=int,
                                 help='number of sequential readings',
                                 tunnable=False)
    parser.add_opt_argument_list('--nb_rnn_units_l1',
                                 default=128,
                                 options=[50, 64, 128],
                                 type=int,
                                 tunnable=True)
    parser.add_opt_argument_list('--nb_rnn_units_l2',
                                 default=9,
                                 options=[50, 64, 128],
                                 type=int,
                                 tunnable=True)
    parser.add_opt_argument_list('--drop_rate',
                                 default=0.2,
                                 options=[0.2, 0.5, 0.7],
# build a wrapper around a tng function so we can use the correct gpu
# the optimizer passes in the hyperparams and the job index as arguments
# to the function to optimize
def parallelize_on_gpus(trial_params, job_index_nb):
    from time import sleep
    sleep(job_index_nb * 1)  # Time in seconds.

    GPUs = ['0', '1', '2', '4']
    os.environ["CUDA_VISIBLE_DEVICES"] = GPUs[job_index_nb]
    main_trainer(trial_params)


if __name__ == '__main__':
    parser = HyperOptArgumentParser(strategy='random_search')
    parser.add_opt_argument_list('--time_steps', default=3, options=[3, 5, 7, 9, 11], type=int, help='number of sequential readings', tunnable=False)

    # model params
    parser.add_opt_argument_list('--nb_estimators', default=5, options=[5, 10, 20, 50, 100], type=int, tunnable=False)
    parser.add_argument('--nb_trials', default=200, type=int)

    # path vars
    parser.add_argument('--data_path', default='/Users/waf/Developer/temp_floor/floor/data/in_out_classifier_data/data')
    parser.add_argument('--model_save_path', default='/Users/waf/Developer/temp_floor/floor/logs/weights/random_forest')
    parser.add_argument('--tt_save_path', default='/Users/waf/Developer/temp_floor/floor/logs/training_logs/random_forest')
    parser.add_argument('--tt_name', default='random_forest_final_2')
    parser.add_argument('--tt_description', default='hyperopt')
    parser.add_argument('--debug', default=False, type=bool)
    parser.add_argument('--local', default=True, type=bool)
    parser.add_json_config_argument('--config', default='/Users/waf/Developer/temp_floor/floor/logs/run_configs/local.json')
예제 #5
0
    sess = tf.Session()

    # Run the tf op
    for train_step in range(0, 100):
        output = sess.run(out, feed_dict={x: hparams.x_val, y: hparams.y_val})
        exp.add_metric_row({'fake_err': output})

    # save exp when we're done
    exp.save()


# set up our argparser and make the y_val tunnable
parser = HyperOptArgumentParser(strategy='random_search')
parser.add_argument('--path', default='some/path')
parser.add_opt_argument_list('--y_val',
                             default=12,
                             options=[1, 2, 3, 4],
                             tunnable=True)
parser.add_opt_argument_list('--x_val',
                             default=12,
                             options=[20, 12, 30, 45],
                             tunnable=True)
hyperparams = parser.parse_args()

# optimize on 4 gpus at the same time
# each gpu will get 1 experiment with a set of hyperparams
hyperparams.optimize_parallel_gpu_cuda(train,
                                       gpu_ids=['1', '0', '3', '2'],
                                       nb_trials=4,
                                       nb_workers=4)
예제 #6
0
# build a wrapper around a tng function so we can use the correct gpu
# the optimizer passes in the hyperparams and the job index as arguments
# to the function to optimize
def parallelize_on_gpus(trial_params, job_index_nb):
    from time import sleep
    sleep(job_index_nb * 1)  # Time in seconds.

    GPUs = ['0', '1', '2', '4']
    os.environ["CUDA_VISIBLE_DEVICES"] = GPUs[job_index_nb]
    main_trainer(trial_params)


if __name__ == '__main__':
    parser = HyperOptArgumentParser(strategy='random_search')
    parser.add_opt_argument_list('--time_steps', default=3, options=[3, 5, 7, 9, 11], type=int, help='number of sequential readings', tunnable=False)

    # model params
    parser.add_opt_argument_list('--gamma', default=0.00001, options=[0.00001], type=int, tunnable=False)
    parser.add_opt_argument_list('--kernel', default='rbf', options=['rbf'], type=str, tunnable=False)
    parser.add_opt_argument_list('--C', default=4, options=[4, 5, 6], type=int, tunnable=False)

    parser.add_argument('--nb_trials', default=200, type=int)

    # path vars
    parser.add_argument('--data_path', default='/Users/waf/Developer/temp_floor/floor/data/in_out_classifier_data/data')
    parser.add_argument('--model_save_path', default='/Users/waf/Developer/temp_floor/floor/logs/weights/svm')
    parser.add_argument('--tt_save_path', default='/Users/waf/Developer/temp_floor/floor/logs/training_logs/svm')
    parser.add_argument('--tt_name', default='svm_final_1')
    parser.add_argument('--tt_description', default='hyperopt')
    parser.add_argument('--debug', default=False, type=bool)
예제 #7
0
from torch.optim import Adam

from utils import ParamDict, ReplayBuffer, Step, np_to_var

exp = Experiment(name="ddpg", debug=False, save_dir="logs")

parser = HyperOptArgumentParser(strategy="random_search")

parser.add_argument("--buffer_size", default=1_000_000, type=int)
parser.add_argument("--num_steps", default=100_000, type=int)
parser.add_argument("--hidden_size", default=50, type=int)

parser.add_opt_argument_list(
    "--batch_size",
    default=64,
    type=int,
    tunnable=True,
    options=[2 ** i for i in range(5, 9)],
)

parser.add_opt_argument_list(
    "--discount",
    default=0.995,
    type=float,
    tunnable=True,
    options=[0.9, 0.99, 0.995, 0.999],
)

parser.add_opt_argument_range(
    "--target_update",
    default=100,
예제 #8
0
# build a wrapper around a tng function so we can use the correct gpu
# the optimizer passes in the hyperparams and the job index as arguments
# to the function to optimize
def parallelize_on_gpus(trial_params, job_index_nb):
    from time import sleep
    sleep(job_index_nb * 1)  # Time in seconds.

    GPUs = ['0', '1', '2', '4']
    os.environ["CUDA_VISIBLE_DEVICES"] = GPUs[job_index_nb]
    main_trainer(trial_params)


if __name__ == '__main__':
    parser = HyperOptArgumentParser(strategy='random_search')
    parser.add_opt_argument_list('--time_steps', default=3, options=[3, 5, 7, 9, 11], type=int, help='number of sequential readings', tunnable=False)

    # model params
    parser.add_opt_argument_list('--C', default=1e5, options=[1e5, 1e4, 1e3, 1e2, 1], type=int, tunnable=False)
    parser.add_argument('--nb_trials', default=200, type=int)

    # path vars
    parser.add_argument('--data_path', default='/Users/waf/Developer/temp_floor/floor/data/in_out_classifier_data/data')
    parser.add_argument('--model_save_path', default='/Users/waf/Developer/temp_floor/floor/logs/weights/logistic_regression')
    parser.add_argument('--tt_save_path', default='/Users/waf/Developer/temp_floor/floor/logs/training_logs/logistic_regression')
    parser.add_argument('--tt_name', default='logistic_regression_final_1')
    parser.add_argument('--tt_description', default='hyperopt')
    parser.add_argument('--debug', default=False, type=bool)
    parser.add_argument('--local', default=True, type=bool)
    parser.add_json_config_argument('--config', default='/Users/waf/Developer/temp_floor/floor/logs/run_configs/local.json')