Example #1
0
def run_example(FLAGS):
    """
    Run parallel Sherpa optimization over a set of discrete hp combinations.
    """
    parameters = [
        sherpa.Continuous('log10_learning_rate', [-5, 0]),
        sherpa.Continuous('log2_batch_size', [5, 8]),
        sherpa.Continuous('log2_n_steps', [4, 11]),
        sherpa.Continuous('x_n_opt_epochs', [0, 7]),
        sherpa.Continuous('log10_entcoeff', [-8, -1]),
        sherpa.Continuous('x_gamma', [-4, -1]),
        sherpa.Continuous('cliprange', [0.1, 0.4]),
        sherpa.Continuous('lam', [0.8, 1.0])
    ]

    alg = sherpa.algorithms.RandomSearch(max_num_trials=300)
    alg = sherpa.algorithms.Repeat(alg, 25, agg=True)

    # Run on local machine.
    sched = LocalScheduler()

    rval = sherpa.optimize(parameters=parameters,
                           algorithm=alg,
                           lower_is_better=False,
                           filename='trial.py',
                           scheduler=sched,
                           verbose=0,
                           max_concurrent=FLAGS.max_concurrent,
                           disable_dashboard=True,
                           output_dir='./output_{}'.format(
                               time.strftime("%Y-%m-%d--%H-%M-%S")))
    print('Best results:')
    print(rval)
Example #2
0
def run_example(FLAGS):
    """
    Run parallel Sherpa optimization over a set of discrete hp combinations.
    """
    
    parameters = [sherpa.Continuous('learning_rate', [1e-5, 5e-1], 'log'),
                  sherpa.Continuous('decay', [1e-8, 1e-2], 'log'),
                  sherpa.Continuous('momentum', [0., 0.99]),
                  sherpa.Continuous('dropout', [0.0001, 0.7]),
                  sherpa.Ordinal('batch_size', [32, 64, 128, 256])]

    algorithm = bayesian_optimization.GPyOpt(max_concurrent=FLAGS.concurrent,
                                             model_type='GP',
                                             acquisition_type='EI',
                                             max_num_trials=100)

    # Run on local machine.
    scheduler = LocalScheduler()

    rval = sherpa.optimize(parameters=parameters,
                           algorithm=algorithm,
                           dashboard_port=FLAGS.port,
                           lower_is_better=False,
                           command='python fashion_mlp.py',
                           scheduler=scheduler,
                           verbose=0,
                           max_concurrent=FLAGS.concurrent,
                           output_dir='./output_gpyopt_{}'.format(
                               time.strftime("%Y-%m-%d--%H-%M-%S")))
Example #3
0
def run_example(FLAGS):
    """
    Run parallel Sherpa optimization over a set of discrete hp combinations.
    """

    parameters = [
        sherpa.Continuous('learning_rate', [1e-5, 5e-1], 'log'),
        sherpa.Continuous('decay', [1e-8, 1e-2], 'log'),
        sherpa.Continuous('momentum', [0., 0.99]),
        sherpa.Continuous('dropout', [0.0001, 0.7]),
        sherpa.Ordinal('batch_size', [32, 64, 128, 256])
    ]

    algorithm = alg = sherpa.algorithms.PopulationBasedTraining(
        num_generations=26,
        population_size=100,
        parameter_range={
            'learning_rate': [1e-10, 9e-1],
            'decay': [1e-10, 9e-1]
        },
        perturbation_factors=(0.8, 1.2))

    # Run on local machine.
    scheduler = LocalScheduler()

    rval = sherpa.optimize(parameters=parameters,
                           algorithm=algorithm,
                           dashboard_port=FLAGS.port,
                           lower_is_better=False,
                           command='python fashion_mlp.py',
                           scheduler=scheduler,
                           verbose=0,
                           max_concurrent=FLAGS.concurrent,
                           output_dir='./output_pbt_{}'.format(
                               time.strftime("%Y-%m-%d--%H-%M-%S")))
Example #4
0
def run_example(FLAGS):
    """
    Run parallel Sherpa optimization over a set of discrete hp combinations.
    """
    parameters = [
        sherpa.Continuous('lrinit', [0.001, 0.1], 'log'),
        sherpa.Continuous('momentum', [0., 0.99]),
        sherpa.Continuous('lrdecay', [1e-7, 1e-2], 'log'),
        sherpa.Continuous('dropout', [0., 0.5])
    ]

    if FLAGS.algorithm == 'BayesianOptimization':
        print('Running GPyOpt')
        alg = bayesian_optimization.GPyOpt(max_concurrent=FLAGS.max_concurrent,
                                           model_type='GP_MCMC',
                                           acquisition_type='EI_MCMC',
                                           max_num_trials=150)
    elif FLAGS.algorithm == 'LocalSearch':
        print('Running Local Search')
        alg = sherpa.algorithms.LocalSearch(seed_configuration={
            'lrinit': 0.038,
            'momentum': 0.92,
            'lrdecay': 0.0001,
            'dropout': 0.
        },
                                            perturbation_factors=(0.9, 1.1))
    else:
        print('Running Random Search')
        alg = sherpa.algorithms.RandomSearch(max_num_trials=150)

    if FLAGS.sge:
        assert FLAGS.env, "For SGE use, you need to set an environment path."
        # Submit to SGE queue.
        env = FLAGS.env  # Script specifying environment variables.
        opt = '-N MNISTExample -P {} -q {} -l {}'.format(
            FLAGS.P, FLAGS.q, FLAGS.l)
        sched = SGEScheduler(environment=env, submit_options=opt)
    else:
        # Run on local machine.
        sched = LocalScheduler()

    rval = sherpa.optimize(parameters=parameters,
                           algorithm=alg,
                           lower_is_better=True,
                           filename='trial.py',
                           output_dir='output_{}'.format(FLAGS.studyname),
                           scheduler=sched,
                           max_concurrent=FLAGS.max_concurrent)
    print('Best results:')
    print(rval)
Example #5
0
def run_example(FLAGS):
    """
    Run parallel Sherpa optimization over a set of discrete hp combinations.
    """
#     max_features = trial.parameters('max_features', 20000)
#     batch_size = trial.parameters('batch_size', 32)
#     hidden_dim = trial.parameters('hidden_dim', 128)
#     dropout = trial.parameters('dropout', 0.2)
#     recurrent_dropout = trial.parameters('recurrent_dropout', 0.2)
#     optimizer = trial.parameters('optimizer', 'adam')
    
    
    parameters = []
    parameters += [sherpa.Discrete('max_features', [15000, 40000])]
    parameters += [sherpa.Discrete('batch_size', [10, 150], 'log')]
    parameters += [sherpa.Discrete('hidden_dim', [100, 500], 'log')]
    parameters += [sherpa.Continuous('dropout_embedding', [0.0001, 0.5])]
    parameters += [sherpa.Continuous('dropout_lstm', [0.0001, 0.5])]
    parameters += [sherpa.Continuous('embedding_regularizer', [1e-12, 1e-6], 'log')]
    parameters += [sherpa.Continuous('kernel_regularizer', [1e-8, 1e-0], 'log')]
    parameters += [sherpa.Continuous('recurrent_regularizer', [1e-8, 1e-0], 'log')]
    parameters += [sherpa.Continuous('lr', [5e-4, 5e-3], scale='log')]
    parameters += [sherpa.Continuous('decay', [1e-5, 1e-10], scale='log')]
    parameters += [sherpa.Continuous('rho', [0.5, 0.99])]
    
    
    print('Running Random Search')
    alg = sherpa.algorithms.RandomSearch(max_num_trials=200, repeat=25)

    if FLAGS.sge:
        assert FLAGS.env, "For SGE use, you need to set an environment path."
        # Submit to SGE queue.
        env = FLAGS.env  # Script specifying environment variables.
        opt = '-N LSTMSearch -P {} -q {} -l {} -l gpu=1'.format(FLAGS.P, FLAGS.q, FLAGS.l)
        sched = SGEScheduler(environment=env, submit_options=opt)
    else:
        # Run on local machine.
        resources = [int(x) for x in FLAGS.gpus.split(',')]
        sched = LocalScheduler(resources=resources)

    rval = sherpa.optimize(parameters=parameters,
                           algorithm=alg,
                           lower_is_better=False,
                           filename='trials.py',
                           scheduler=sched,
                           verbose=0,
                           max_concurrent=FLAGS.max_concurrent,
                           output_dir='./output_imdb_{}_{}_gpu-{}'.format(time.strftime("%Y-%m-%d--%H-%M-%S"), FLAGS.name, FLAGS.gpus.replace(',', '-')))
    print('Best results:')
    print(rval)
Example #6
0
def run_example(FLAGS):
    """
    Run parallel Sherpa optimization over a set of discrete hp combinations.
    """
    # Iterate algorithm accepts dictionary containing lists of possible values.
    hp_space = {
        'act': ['tanh', 'relu'],
        'lrinit': [0.1, 0.01],
        'momentum': [0.0],
        'lrdecay': [0.0],
        'arch': [[20, 5], [20, 10], [10, 10, 10]],
        'epochs': [20],
    }
    parameters = sherpa.Parameter.grid(hp_space)

    alg = sherpa.algorithms.GridSearch()
    stopping_rule = sherpa.algorithms.MedianStoppingRule(min_iterations=10,
                                                         min_trials=5)
    f = './bianchini.py'  # Python script to run.
    dir = './output'  # All files written to here.

    if not FLAGS.local:
        # Submit to SGE queue.
        # env = '/home/pjsadows/profiles/auto.profile'  # Script specifying environment variables.
        env = FLAGS.env
        opt = '-N example -P {} -q {} -l {}'.format(FLAGS.P, FLAGS.q, FLAGS.l)
        sched = SGEScheduler(environment=env,
                             submit_options=opt,
                             output_dir=dir)
    else:
        # Run on local machine.
        sched = LocalScheduler()  # Run on local machine without SGE.

    rval = sherpa.optimize(parameters=parameters,
                           algorithm=alg,
                           stopping_rule=stopping_rule,
                           output_dir=dir,
                           lower_is_better=True,
                           filename=f,
                           scheduler=sched,
                           max_concurrent=FLAGS.max_concurrent)
    print()
    print('Best results:')
    print(rval)
def run_example(FLAGS):
    """
    Run parallel Sherpa optimization over a set of discrete hp combinations.
    """

    parameters = [
        sherpa.Continuous('learning_rate', [1e-5, 5e-1], 'log'),
        sherpa.Continuous('decay', [1e-8, 1e-2], 'log'),
        sherpa.Continuous('momentum', [0., 0.99]),
        sherpa.Continuous('dropout', [0.0001, 0.7]),
        sherpa.Ordinal('batch_size', [32, 64, 128, 256])
    ]

    # 27 epochs to produce one finished 13 epoch model
    # => 54 epochs to produce one finished 26 epoch model (resource unit = 2epochs)
    # RS uses 100 models x 26 epochs = 2600 epochs.models
    # ASHA can produce 2600epochs.models//54epochs = 48 models
    algorithm = sherpa.algorithms.SuccessiveHalving(r=1,
                                                    R=9,
                                                    eta=3,
                                                    s=0,
                                                    max_finished_configs=48)

    # Run on local machine.
    scheduler = LocalScheduler()

    rval = sherpa.optimize(parameters=parameters,
                           algorithm=algorithm,
                           dashboard_port=FLAGS.port,
                           lower_is_better=False,
                           command='python fashion_mlp.py',
                           scheduler=scheduler,
                           verbose=0,
                           max_concurrent=FLAGS.concurrent,
                           output_dir='./output_successive_halving_{}'.format(
                               time.strftime("%Y-%m-%d--%H-%M-%S")))