示例#1
0
def main():

    args = parse_args()

    # Hyperparameters
    params = hpo.Params([
        ['--hidden-dim', 32, [16, 32, 64, 128, 256]],
        ['--n-edge-layers', 4, [1, 2, 4, 8]],
        ['--n-node-layers', 4, [1, 2, 4, 8]],
        ['--weight-decay', 1.e-4, (0., 1e-3)],
        ['--n-graph-iters', 8, (4, 16)],
        ['--real-weight', 3., (1., 6.)],
        ['--lr', 0.001, [1e-5, 1e-4, 1e-3, 1e-2]],
    ])

    # Define the command to be run by the evaluator
    output_dir = "'${SCRATCH}/heptrkx/results/hpo_%s_${SLURM_JOB_ID}_${SLURM_STEP_ID}'" % (
        os.path.basename(args.config).split('.')[0])
    cmd = ('python -u train.py %s' % args.config +
           ' --rank-gpu -d ddp-file --fom best' +
           ' --n-epochs %i' % args.epochs + ' --output-dir %s' % output_dir)

    # SLURM options
    n_nodes = (args.nodes if args.nodes is not None else int(
        os.environ['SLURM_JOB_NUM_NODES']))
    n_tasks_per_node = (args.ntasks_per_node
                        if args.ntasks_per_node is not None else int(
                            os.environ['SLURM_NTASKS_PER_NODE']))
    n_tasks_per_eval = args.nodes_per_eval * n_tasks_per_node
    alloc_args = ('-J hpo %s --time %s' % (args.alloc_args, args.time) +
                  ' --ntasks-per-node %i' % n_tasks_per_node)
    launch_args = '-n %i -u' % n_tasks_per_eval

    # Define the evaluator
    evaluator = hpo.Evaluator(cmd,
                              nodes=n_nodes,
                              workload_manager='slurm',
                              alloc_args=alloc_args,
                              launch_args=launch_args,
                              nodes_per_eval=args.nodes_per_eval,
                              verbose=True)

    # Random search optimizer
    if args.alg == 'random':
        optimizer = hpo.RandomOptimizer(evaluator, num_iters=args.iters)

    # Genetic search optimizer
    else:
        results_file = 'hpo.log'
        optimizer = hpo.GeneticOptimizer(evaluator,
                                         pop_size=args.pop_size,
                                         num_demes=args.demes,
                                         generations=args.generations,
                                         mutation_rate=args.mutation_rate,
                                         crossover_rate=args.crossover_rate,
                                         verbose=True,
                                         log_fn=results_file)

    # Run the Optimizer
    optimizer.optimize(params)
示例#2
0
def main():
    args = parse_args()

    # Set up evaluator
    # configs/hpo_cifar10_cnn.yaml is a scaled down version of cifar10
    # --hpo is required for train.py to print the FoM
    # --no-output is required to avoid checkpointing
    eval_cmd = 'python ./train.py configs/hpo_cifar10_cnn.yaml --hpo --no-output'
    evaluator = hpo.Evaluator(eval_cmd,
                              nodes=args.num_nodes,
                              verbose=args.verbose)

    # Set up search space for HPs: learning rate and dropout
    params = hpo.Params([['--optimizer lr', 0.001, (1e-6, 1)],
                         ['--dropout', 0.1, (0.0, 0.5)]])

    # Set up genetic optimizer with 8 evaluations/generation and 3 generations
    optimizer = hpo.GeneticOptimizer(evaluator,
                                     generations=4,
                                     pop_size=8,
                                     num_demes=1,
                                     mutation_rate=0.6,
                                     verbose=args.verbose)

    # Optimize the hyperparameters
    optimizer.optimize(params)

    # Print the figure of merit value for the best set of hyperparameters
    print(optimizer.best_fom)
    # Print the best set of hyperparameters found
    print(optimizer.best_params)
示例#3
0
def main():

    args = parse_args()

    # Hardcode some config
    #n_nodes = 4 #32
    #config_file = 'configs/test.yaml'
    #pop_size = 2 #16
    #n_demes = 2 #4
    #n_generations = 4
    #mutation_rate = 0.05
    #crossover_rate = 0.33
    #alloc_args='-J hpo -C haswell -q interactive -t 4:00:00'
    #checkpoint_dir = 'checkpoints'

    # Hyperparameters
    params = hpo.Params([['--lr', 0.001, (1e-6, 0.1)],
                         ['--n-graph-iters', 4, (1, 16)],
                         ['--real-weight', 3., (1., 6.)]])

    # Define the command to be run by the evaluator
    cmd = 'python train.py %s' % args.config
    cmd += ' --fom last --n-epochs 1 --resume --output-dir @checkpoint'

    # Define the evaluator
    result_dir = os.path.expandvars('$SCRATCH/heptrkx/results/pbt_%s' %
                                    time.strftime('%Y%m%d_%H%M%S'))
    evaluator = hpo.Evaluator(cmd,
                              run_path=result_dir,
                              nodes=args.nodes,
                              launcher='wlm',
                              verbose=True,
                              nodes_per_eval=args.nodes_per_eval,
                              checkpoint='checkpoints',
                              alloc_args=args.alloc_args)

    # Define the Optimizer
    optimizer = hpo.GeneticOptimizer(evaluator,
                                     pop_size=args.pop_size,
                                     num_demes=args.demes,
                                     generations=args.generations,
                                     mutation_rate=args.mutation_rate,
                                     crossover_rate=args.crossover_rate,
                                     verbose=True)

    # Run the Optimizer
    optimizer.optimize(params)
示例#4
0
argparser = argparse.ArgumentParser()
argparser.add_argument('-N', '--num_nodes', type=int, default=1)
argparser.add_argument('--generations', type=int, default=3)
argparser.add_argument('--num_demes', type=int, default=1)
argparser.add_argument('--pop_size', type=int, default=8)
argparser.add_argument('--mutation_rate', type=float, default=0.5)
argparser.add_argument('--crossover_rate', type=float, default=0.33)
argparser.add_argument('--verbose', action='store_true')
args = argparser.parse_args()

print("------------------------------------------------------------")
print("Genetic HPO Example: LeNet-5 (MNIST) TensorFlow -- Cray Inc.")
print("------------------------------------------------------------")

evaluator = hpo.Evaluator('python3 {0}/mnist.py'.format(src_path),
                          run_path=run_path,
                          src_path=src_path,
                          nodes=args.num_nodes)

optimizer = hpo.GeneticOptimizer(evaluator,
                                 generations=args.generations,
                                 num_demes=args.num_demes,
                                 pop_size=args.pop_size,
                                 mutation_rate=args.mutation_rate,
                                 crossover_rate=args.crossover_rate,
                                 verbose=args.verbose,
                                 log_fn='mnist-topology.log')

params = hpo.Params([["--dropout", 0.5, (0.005, 0.9)],
                     ["--momentum", 1.0e-4, (1.0e-6, 1.0e-2)],
                     ["--c1_sz", 5, (2, 8)], ["--c1_ft", 32, (8, 128)],
                     ["--c2_sz", 5, (2, 8)], ["--c2_ft", 64, (16, 256)],
示例#5
0
from crayai import hpo

argparser = argparse.ArgumentParser()
argparser.add_argument('--generations', type=int, default=250)
argparser.add_argument('--num_demes', type=int, default=1)
argparser.add_argument('--pop_size', type=int, default=100)
argparser.add_argument('--mutation_rate', type=float, default=0.05)
argparser.add_argument('--crossover_rate', type=float, default=0.33)
argparser.add_argument('--verbose', action='store_true')
args = argparser.parse_args()

print("------------------------------------------------------------")
print("Genetic HPO Example: LeNet-5 (MNIST) TensorFlow -- Cray Inc.")
print("------------------------------------------------------------")

evaluator = hpo.Evaluator('python3 source/mnist.py', run_path='runPath')

optimizer = hpo.genetic.Optimizer(evaluator,
                                  generations=args.generations,
                                  num_demes=args.num_demes,
                                  pop_size=args.pop_size,
                                  mutation_rate=args.mutation_rate,
                                  crossover_rate=args.crossover_rate,
                                  verbose=args.verbose,
                                  log_fn='mnist-topology.log')

params = hpo.Params([["--dropout", 0.5, (0.005, 0.9)],
                     ["--momentum", 1.0e-4, (1.0e-6, 1.0e-2)],
                     ["--c1_sz", 5, (2, 8)], ["--c1_ft", 32, (8, 128)],
                     ["--c2_sz", 5, (2, 8)], ["--c2_ft", 64, (16, 256)],
                     ["--fullyc_sz", 1024, (64, 4096)]])
#!/usr/bin/env python3
# encoding: utf-8
"""Random optimizer example"""
from crayai import hpo

evaluator = hpo.Evaluator('python source/sin.py', verbose=True)

params = hpo.Params([["-a", 1.0, (-1.0, 1.0)], ["-b", -1.0, (-1.0, 1.0)],
                     ["-c", 1.0, (-1.0, 1.0)], ["-d", -1.0, (-1.0, 1.0)],
                     ["-e", 1.0, (-1.0, 1.0)], ["-f", -1.0, (-1.0, 1.0)],
                     ["-g", 1.0, (-1.0, 1.0)]])

optimizer = hpo.RandomOptimizer(evaluator, num_iters=129, verbose=True)

optimizer.optimize(params)

print(optimizer.best_fom)
print(optimizer.best_params)
示例#7
0
   minimize both error and time taken when fitting polynomial to sin function.
"""
from crayai import hpo

import argparse

parser = argparse.ArgumentParser(description=__doc__,
                                 formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--source',
                    type=str,
                    default='sin-time-accuracy.py',
                    help='source script')

args = parser.parse_args()

evaluator = hpo.Evaluator('python source/{0}'.format(args.source))

params = hpo.Params([["-a", 1.0, (-10.0, 10.0)], ["-b", -1.0, (-10.0, 10.0)],
                     ["-c", 1.0, (-10.0, 10.0)], ["-d", -1.0, (-10.0, 10.0)],
                     ["-e", 1.0, (-10.0, 10.0)], ["-f", -1.0, (-10.0, 10.0)],
                     ["-g", 1.0, (-10.0, 10.0)]])

optimizer = hpo.genetic.Optimizer(evaluator,
                                  verbose=True,
                                  generations=5,
                                  pop_size=10,
                                  log_fn='genetic.log')

optimizer.optimize(params)

print(optimizer.best_fom)
                         ["--lr", 0.001, (1e-6, 0.1)],
                         ["--real-weight", 3, (1, 5)]])
    config_file = 'configs/agnn.yaml'

## Define Command to be run by the evaluator
cmd = "python train.py %s --crayai --resume --ranks-per-node 1 -v --gpu 0" % config_file
if args.run_pbt:
    cmd += " --pbt_checkpoint @checkpoint"

## Define the evaluator
timestr = time.strftime("%Y%m%d-%H%M%S")
evaluator = hpo.Evaluator(
    cmd,
    run_path="./runs/run%s" % timestr,
    nodes=args.nodes,
    launcher='wlm',
    verbose=True,
    nodes_per_eval=args.nodes_per_eval,
    checkpoint=args.checkpoint if args.run_pbt else '',
    alloc_args="-J agnn-heptrkx --exclusive --time=24:00:00 -C P100 --gres=gpu",
    alloc_jobid=args.alloc_job_id)

## Define the Optimizer
optimizer = hpo.genetic.Optimizer(evaluator,
                                  gens_per_epoch=args.gens_per_epoch,
                                  pop_size=args.pop_size,
                                  num_demes=1,
                                  generations=args.generations,
                                  mutation_rate=args.mutation_rate,
                                  crossover_rate=args.crossover_rate,
                                  verbose=True)