Esempio n. 1
0
def get_training_parameters(opt):
    train = DD()
    static = DD()
    static.exp = opt.exp

    static.seed = opt.random_seed

    # weight decay
    static.l2 = opt.l2
    static.vl2 = True
    static.lrsched = opt.learning_rate_schedule  # 'warmup_linear'
    static.lrwarm = opt.learning_rate_warmup  # 0.002

    # gradient clipping
    static.clip = opt.clip

    # what loss function to use
    static.loss = opt.loss

    dynamic = DD()
    dynamic.lr = opt.learning_rate  # learning rate
    dynamic.bs = opt.batch_size  # batch size
    # optimizer to use {adam, rmsprop, etc.}
    dynamic.optim = opt.optimizer

    # rmsprop
    # alpha is interpolation average

    static.update(opt[dynamic.optim])

    train.static = static
    train.dynamic = dynamic

    return train
Esempio n. 2
0
def get_parameters(opt, exp_type="model"):
    params = DD()
    params.net = DD()

    params.mle = 0
    params.dataset = opt.dataset

    params.net = get_net_parameters(opt)
    params.train = get_training_parameters(opt)

    params.model = params.net.model
    params.exp = opt.exp

    params.data = get_data_parameters(opt, params.exp, params.dataset)
    params.eval = get_eval_parameters(opt, params.data.get("categories", None))

    meta = DD()

    params.trainer = opt.trainer

    meta.iterations = int(opt.iterations)
    meta.cycle = opt.cycle
    params.cycle = opt.cycle
    params.iters = int(opt.iterations)

    global toy
    toy = opt.toy

    global do_gen
    do_gen = opt.do_gen

    global save
    save = opt.save

    global test_save
    test_save = opt.test_save

    global save_strategy
    save_strategy = opt.save_strategy

    print(params)
    return params, meta