コード例 #1
0
def main():

    logdir = osp.join(FLAGS.logdir, FLAGS.exp)
    sandbox_logdir = osp.join('sandbox_cachedir', FLAGS.exp)

    if not osp.exists(sandbox_logdir):
        os.makedirs(sandbox_logdir)

    model_path = osp.join(logdir, "model_{}.pth".format(FLAGS.resume_iter))
    checkpoint = torch.load(model_path)
    FLAGS_model = checkpoint['FLAGS']

    if FLAGS.dataset == "celeba":
        model = CelebAModel(FLAGS_model).eval().cuda()
    elif FLAGS.dataset == "mnist":
        model = MNISTModel(FLAGS_model).eval().cuda()
    else:
        model = ResNetModel(FLAGS_model).eval().cuda()

    if FLAGS.ema:
        model.load_state_dict(checkpoint['ema_model_state_dict_0'])
    else:
        model.load_state_dict(checkpoint['model_state_dict_0'])

    logdir = osp.join(FLAGS.logdir, FLAGS.exp)
    model = model.eval()
    compute_inception(model)
コード例 #2
0
def main():

    logdir = osp.join(FLAGS.logdir, FLAGS.exp)
    sandbox_logdir = osp.join('sandbox_cachedir', FLAGS.exp)

    if not osp.exists(sandbox_logdir):
        os.makedirs(sandbox_logdir)

    model_path = osp.join(logdir, "model_{}.pth".format(FLAGS.resume_iter))
    checkpoint = torch.load(model_path)
    FLAGS_model = checkpoint['FLAGS']

    if FLAGS.dataset == "celeba":
        model = CelebAModel(FLAGS_model).eval().cuda()
    else:
        model = ResNetModel(FLAGS_model).eval().cuda()

    if not FLAGS.random_init:
        if FLAGS.ema:
            model.load_state_dict(checkpoint['ema_model_state_dict_0'])
        else:
            model.load_state_dict(checkpoint['model_state_dict_0'])

    logdir = osp.join(FLAGS.logdir, FLAGS.exp)
    model = model.eval()

    if FLAGS.task == 'mixenergy':
        energyevalmix(model)
    if FLAGS.task == 'unsup_finetune':
        unsup_finetune(model, FLAGS_model)
コード例 #3
0
def combine_main(models, resume_iters, select_idx):

    model_list = []

    for model, resume_iter in zip(models, resume_iters):
        model_path = osp.join("cachedir", model, "model_{}.pth".format(resume_iter))
        checkpoint = torch.load(model_path)
        FLAGS_model = checkpoint['FLAGS']
        model_base = CelebAModel(FLAGS_model)
        model_base.load_state_dict(checkpoint['ema_model_state_dict_0'])
        model_base = model_base.cuda()
        model_list.append(model_base)

    conceptcombine(model_list, select_idx)
コード例 #4
0
parser = ArgumentParser()

parser.add_argument('--config', type=str, help='Config file path')
parser.add_argument('--path', type=str, help='Save path')
parser.add_argument('--gpus', type=str, help='Gpus used')
parser.add_argument('--eval', type=bool, help='Whether only do test')

args = parser.parse_args()
seed_everything(1234)  # reproducibility
debug = False

config = parse_config(args.config)
gpus = [int(x) for x in args.gpus.strip().split(',')]
if not os.path.isdir(args.path):
    os.mkdir(args.path)

criterion = get_loss(config['criterion'])
model = CelebAModel(criterion=criterion,
                    config=config,
                    path=args.path,
                    batch_size=config['batch_size'],
                    **config['model'])
trainer = get_trainer(gpus=gpus,
                      path=args.path,
                      debug=debug,
                      resume_mode='min_loss' if args.eval else 'latest',
                      config=config['trainer'])
if not args.eval:
    trainer.fit(model)
trainer.test(model)