コード例 #1
0
def main():
    args = parser.parse_args()
    args, logging, writer = utils.parse_args(args)

    logging.info('# Start Re-training #')

    criterion = LOSS_FACTORY[args.task](args, args.loss_scaling)

    if args.model_type == "stochastic":
        model_temp = STOCHASTIC_FACTORY[args.model]
    else:
        raise NotImplementedError("Other models have not been implemented!")
    model = model_temp(args.input_size, args.output_size, args.layers,
                       args.activation, args, True)

    logging.info('## Model created: ##')
    logging.info(model.__repr__())

    logging.info("### Param size = %f MB, Total number of params = %d ###" %
                 utils.count_parameters_in_MB(model, args))

    logging.info('### Loading model to parallel GPUs ###')

    utils.profile(model, args, logging)
    model = utils.model_to_gpus(model, args)

    logging.info('### Preparing schedulers and optimizers ###')
    optimizer = SGLD(model.parameters(),
                     args.learning_rate,
                     norm_sigma=args.weight_decay)

    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
        optimizer, args.epochs)

    logging.info('## Downloading and preparing data ##')
    train_loader, valid_loader = get_train_loaders(args)

    logging.info('## Beginning Training ##')

    train = Trainer(model, criterion, optimizer, scheduler, args)

    best_error, train_time, val_time = train.train_loop(
        train_loader, valid_loader, logging, writer)

    logging.info(
        '## Finished training, the best observed validation error: {}, total training time: {}, total validation time: {} ##'
        .format(best_error, timedelta(seconds=train_time),
                timedelta(seconds=val_time)))

    logging.info('## Beginning Plotting ##')
    del model
    with torch.no_grad():
        args.samples = 100
        args.model_path = args.save
        model = model_temp(args.input_size, args.output_size, args.layers,
                           args.activation, args, False)
        model = utils.model_to_gpus(model, args)
        model.eval()
        plot_regression_uncertainty(model, PLT, train_loader, args)
        logging.info('# Finished #')
コード例 #2
0
def main(wrapper):
    args = wrapper.args
    model = wrapper.model
    save_dir = args.save

    try:
        wrapper.load()
        logging.info('loaded previously saved weights')
    except Exception as e:
        print(e)

    logging.info("param size = %fMB", utils.count_parameters_in_MB(model))
    logging.info('Args: {}'.format(args))

    if args.eval_only:
        assert save_dir is not None

    data_size = 25000
    time_steps = 1

    B = int(args.epochs * data_size / args.batch_size / time_steps)

    searcher = Random_NAS(B, wrapper, args.seed, save_dir)
    logging.info('budget: %d' % (searcher.B))
    if not args.eval_only:
        searcher.run()
        archs = searcher.get_eval_arch(args.randomnas_rounds, args.n_samples)
    else:
        np.random.seed(args.seed + 1)
        archs = searcher.get_eval_arch(2)
    logging.info(archs)
    #arch = ' '.join([str(a) for a in archs[0][0]])
    arch = str(archs[0][0])
    arch = parse_arch_to_darts('cnn', ast.literal_eval(arch), args.space)
    with open(os.path.join(args.save, 'arch_{}'.format(args.task_id)),
              'w') as f:
        f.write(str(arch))

    logging.info(str(arch))
    utils.write_yaml_results(args, args.results_file_arch, str(arch))
    return arch