def main(args): # Set up main device and scale batch size device = 'cuda' if torch.cuda.is_available() and args.gpu_ids else 'cpu' args.batch_size *= max(1, len(args.gpu_ids)) # Set random seeds random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) torch.cuda.manual_seed_all(args.seed) trainloader = data.DataLoader(ICLEVRLoader('./'), batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers) test_condition = get_iCLEVR_data('./', 'test')[1] test_condition = torch.Tensor(test_condition).float() test_condition = test_condition.to(device) # Model print('Building model..') net = Glow(num_channels=args.num_channels, num_levels=args.num_levels, num_steps=args.num_steps, img_shape=(3,64,64), mode=args.mode) net = net.to(device) evaluator = evaluation_model() loss_fn = util.NLLLoss().to(device) optimizer = optim.Adam(net.parameters(), lr=args.lr) scheduler = sched.LambdaLR(optimizer, lambda s: min(1., s / args.warm_up)) start_epoch = 0 if args.resume: # Load checkpoint. print('Resuming from checkpoint') checkpoint = torch.load('savemodel/cINN/checkpoint_18.tar') net.load_state_dict(checkpoint['net']) optimizer.load_state_dict(checkpoint['optimizer']) global best_loss global global_step # best_loss = checkpoint['test_loss'] start_epoch = checkpoint['epoch'] global_step = start_epoch * len(trainloader.dataset) score_list = [] for epoch in range(start_epoch, start_epoch + args.num_epochs): train(epoch, net, trainloader, device, optimizer, scheduler, loss_fn, args.max_grad_norm) # test(epoch, net, test_condition, device, loss_fn, args.mode) score = test(epoch, net, test_condition, device, evaluator) score_list.append(score) score_list = np.asarray(score_list) print('Best epoch: %d\nBest score: %f' % (np.argmax(score_list), np.max(score_list)))
# Beta1 hyperparam for Adam optimizers beta1 = 0.5 batch_size = args.batch_size device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") netG = Generator(nc, nz, ngf).to(device) netD = Discriminator((64,64,3), ndf).to(device) # ckt = torch.load('savemodel/checkpoint_149.tar') # netG.load_state_dict(ckt['generator_state_dict']) # netD.load_state_dict(ckt['discriminator_state_dict']) # epoch = ckt['epoch'] + 1 epoch = 0 netG.apply(weights_init) netD.apply(weights_init) evaluation_model = evaluation_model() criterion = nn.BCELoss() # Create batch of latent vectors that we will use to visualize # the progression of the generator real_label = 1. fake_label = 0. optimizerG = optim.Adam(netG.parameters(), lr=lr, betas=(beta1, 0.999)) optimizerD = optim.Adam(netD.parameters(), lr=lr, betas=(beta1, 0.999)) writer = SummaryWriter('logs/' + args.exp_name) train_loader = torch.utils.data.DataLoader(ICLEVRLoader('./'), batch_size = batch_size, shuffle = True) # test_loader = torch.utils.data.DataLoader(ICLEVRLoader('./', mode='test'), batch_size = 2, shuffle = True) test_condition = get_iCLEVR_data('./', 'test')[1] newtest_condition = get_iCLEVR_data('./', 'new_test')[1]
import torch import torch.nn as nn from torchvision.utils import save_image, make_grid from torch.autograd import Variable import torch.nn.functional as F from dataset import TestingDataset from evaluator import evaluation_model import numpy as np import pickle import matplotlib.pyplot as plt device = torch.device("cuda" if torch.cuda.is_available() else "cpu") evaluator = evaluation_model() # In[ ]: test_dataset = TestingDataset('test.json') test_loader = torch.utils.data.DataLoader(test_dataset, 32, shuffle=False) # In[ ]: netG = torch.load('./models_weight/netG.pkl') # In[ ]: for labels in test_loader: z = Variable(torch.cuda.FloatTensor(np.random.normal(0, 1, (32, 104)))) img = netG(z, labels.to(device))
torch.load(output_folder + model_name, map_location="cpu")['model']) model.set_actnorm_init() model = model.to(device) model = model.eval() def norm_ip(img, min, max): img.clamp_(min=min, max=max) img.add_(-min).div_(max - min + 1e-5) def norm_range(t, range): if range is not None: norm_ip(t, range[0], range[1]) else: norm_ip(t, float(t.min()), float(t.max())) evaluator = evaluation_model( "/home/yellow/deep-learning-and-practice/hw7/classifier_weight.pth") test_conditions = get_test_conditions(hparams['dataroot']).cuda() predict_x = postprocess( model(y_onehot=test_conditions, temperature=1, reverse=True)).float() for t in predict_x: # loop over mini-batch dimension norm_range(t, None) score = evaluator.eval(predict_x, test_conditions) save_image(predict_x.float(), f"score{score:.3f}.png") test_conditions = get_new_test_conditions(hparams['dataroot']).cuda() predict_x = postprocess( model(y_onehot=test_conditions, temperature=1, reverse=True)).float() for t in predict_x: # loop over mini-batch dimension norm_range(t, None) newscore = evaluator.eval(predict_x.float(), test_conditions)
import torch.backends.cudnn as cudnn import torch.optim as optim import torch.utils.data import torchvision.datasets as dset import torchvision.transforms as transforms import torchvision.utils as vutils import numpy as np from dataloader import Data_Loader, one_hot from torch.utils.data import Dataset, DataLoader from evaluator import evaluation_model import json device = torch.device("cuda" if torch.cuda.is_available() else "cpu") print(device) evm = evaluation_model(device) train_data = Data_Loader("lab5_dataset/iclevr") train_loader = DataLoader(dataset=train_data, batch_size=32, shuffle=True) batch_size = 32 nz = 25 nc = 3 nclass = 24 lr = 0.0002 epochs = 75 class Generator(nn.Module): def __init__(self): super(Generator, self).__init__()