def get_eval_parameters(opt, force_categories=None): evaluate = DD() if opt.eval_sampler == "beam": evaluate.bs = opt.beam_size elif opt.eval_sampler == "greedy": evaluate.bs = 1 elif opt.eval_sampler == "topk": evaluate.k = opt.topk_size evaluate.smax = opt.gen_seqlength evaluate.sample = opt.eval_sampler evaluate.numseq = opt.num_sequences evaluate.gs = opt.generate_sequences evaluate.es = opt.evaluate_sequences if opt.dataset == "atomic": if "eval_categories" in opt and force_categories is None: evaluate.categories = opt.eval_categories else: evaluate.categories = force_categories return evaluate
def get_training_parameters(opt): train = DD() static = DD() static.exp = opt.exp static.seed = opt.random_seed # weight decay static.l2 = opt.l2 static.vl2 = True static.lrsched = opt.learning_rate_schedule # 'warmup_linear' static.lrwarm = opt.learning_rate_warmup # 0.002 # gradient clipping static.clip = opt.clip # what loss function to use static.loss = opt.loss dynamic = DD() dynamic.lr = opt.learning_rate # learning rate dynamic.bs = opt.batch_size # batch size # optimizer to use {adam, rmsprop, etc.} dynamic.optim = opt.optimizer # rmsprop # alpha is interpolation average static.update(opt[dynamic.optim]) train.static = static train.dynamic = dynamic return train