def gen_images(gen_image_num, save_path): net_g = netG(opt.imageSize, int(opt.nz), int(opt.nc), int(opt.ndf), int(opt.ngpu)) net_g.load_state_dict(torch.load(opt.net_g)) for i in range(gen_image_num): noise = torch.FloatTensor(1, int(opt.nz), 1, 1).normal_(0, 1) if opt.cuda: net_g.cuda() noise = noise.cuda() noise_v = Variable(noise, volatile=True) fake = net_g(noise_v) fake.data = fake.data.mul(0.5).add(0.5) vutils.save_image( fake.data, os.path.join(save_path, 'fake_sample{0}.png'.format(i)))
def load_model_from_file(path, epoch, model='dis'): from models import netD, netG, VAE import json with open(os.path.join(path, 'args.json'), 'r') as f: old_args = json.load(f) old_args = to_attr(old_args) if 'gen' in model.lower(): try: z_ = old_args.z_dim model_ = VAE(old_args) except: z_ = 100 model_ = netG(old_args, nz=z_, nc= 3 if old_args.no_polar else 2) elif 'dis' in model.lower(): model_ = netD(old_args) else: raise ValueError('%s is not a valid model name' % model) model_.load_state_dict(torch.load(os.path.join(path, 'models/%s_%d.pth' % (model, epoch)))) print('model successfully loaded') return model_, epoch
b_size = real_cpu.size(0) label.uniform_((1.0 - lsf), 1.0) # soft labelling factor #label = torch.full((b_size,), real_label, device=device) # Forward pass real batch through D output = netD(real_cpu).view(-1) # Calculate loss on all-real batch errD_real = criterion(output, label) # Calculate gradients for D in backward pass errD_real.backward() D_x = output.mean().item() ## Train with all-fake batch # Generate batch of latent vectors noise = torch.randn(b_size, nz, 1, 1, device=device) # Generate fake image batch with G fake = netG(noise) label.uniform_(0.0, lsf) # fake # Classify all fake batch with D output = netD(fake.detach()).view(-1) # Calculate D's loss on the all-fake batch errD_fake = criterion(output, label) # Calculate the gradients for this batch errD_fake.backward() D_G_z1 = output.mean().item() # Add the gradients from the all-real and all-fake batches errD = errD_real + errD_fake # Update D optimizerD.step() ############################ # (2) Update G network: maximize log(D(G(z)))
#print(points_lst) linfit = interpolate.interp1d(points_lst, noise_samples, kind='cubic', axis=0) #print(linfit(1).shape) #makes images and stores in temp directory with torch.no_grad(): x = linfit(0) #creates array length [frames * nz] for i in range(1, frames): x = np.append(x, linfit(i)) #print(x.shape) x = x.reshape(-1, nz, 1, 1) #print(x.shape) z = torch.FloatTensor(x) z = z.to(device) genImgs = netG(z).detach().cpu() #print(genImgs.size()) for i in range(genImgs.size(0)): img_fp = temp_dir + "/" + str(i).zfill(imgs_log10) + ".jpg" vutils.save_image(genImgs[i, :, :, :], img_fp, normalize=True) #print(a) #plt.imshow(img.permute(1, 2, 0)) #plt.savefig(img_name) #covert saved images to movie path_str = temp_dir + '/*.jpg' img_array = [] pathlist = glob.glob(path_str) pathlist.sort()
def train(): ## set parameter ngpu = int(opt.ngpu) nz = int(opt.nz) ngf = int(opt.ngf) ndf = int(opt.ndf) nc = int(opt.nc) x = torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize) noise = torch.FloatTensor(opt.batchSize, nz, 1, 1) fixed_noise = torch.FloatTensor(opt.batchSize, nz, 1, 1).normal_(0, 1) one = torch.FloatTensor([1]) mone = one * -1 ## dataset dataloader = scene_dataset(opt.dataroot, opt.list_file, is_train=True) ## net build net_d = netD(opt.imageSize, nz, nc, ndf, ngpu) net_d.apply(weights_init) net_g = netG(opt.imageSize, nz, nc, ngf, ngpu) net_g.apply(weights_init) if opt.net_d != '': net_d.load_state_dict(torch.load(opt.net_d)) if opt.net_g != '': net_g.load_state_dict(torch.load(opt.net_g)) ## train setup if opt.cuda: net_d.cuda() net_g.cuda() x = x.cuda() one, mone = one.cuda(), mone.cuda() noise, fixed_noise = noise.cuda(), fixed_noise.cuda() ## optimizer optimizer_d = optim.RMSprop(net_d.parameters(), lr=opt.lrD) optimizer_g = optim.RMSprop(net_g.parameters(), lr=opt.lrG) gen_iterations = 0 for epoch in xrange(opt.niter): data_iter = iter(dataloader) i = 0 while i < len(dataloader): for p in net_d.parameters(): p.requir_grad = True if gen_iterations < 25 or gen_iterations % 500 == 0: Diters = 100 else: Diters = opt.Diters j = 0 while j < Diters and i < len(dataloader): j += 1 for p in net_d.parameters(): p.data.clamp_(opt.clamp_lower, opt.clamp_upper) data = data_iter.next() i += 1 real_data, _ = data net_d.zero_grad() batch_size = real_data.size(0) if opt.cuda: real_data = real_data.cuda() x.resize_as_(real_data).copy_(real_data) x_v = Variable(x) errD_real = net_d(x_v) errD_real.backward(one) noise.resize_(opt.batchSize, nz, 1, 1).normal_(0, 1) noise_v = Variable(noise, volatile=True) fake_data = Variable(net_g(noise_v).data) errD_fake = net_d(fake_data) errD_fake.backward(mone) errD = errD_real - errD_fake optimizer_d.step() for p in net_d.parameters(): p.requir_grad = False net_g.zero_grad() noise.resize_(opt.batchSize, nz, 1, 1).normal_(0, 1) noise_v = Variable(noise) fake = net_g(noise_v) errG = net_d(fake) errG.backward(one) optimizer_g.step() gen_iterations += 1 print( '[%d/%d][%d/%d][%d] Loss_D: %f Loss_G: %f Loss_D_real: %f Loss_D_fake %f' % (epoch, opt.niter, i, len(dataloader), gen_iterations, errD.data[0], errG.data[0], errD_real.data[0], errD_fake.data[0])) if gen_iterations % 500 == 0: real_data = real_data.mul(0.5).add(0.5) vutils.save_image( real_data, '{0}/real_samples_{1}.png'.format(opt.experiment, gen_iterations)) fake = net_g(Variable(fixed_noise, volatile=True)) fake.data = fake.data.mul(0.5).add(0.5) vutils.save_image( fake.data, '{0}/fake_samples_{1}.png'.format(opt.experiment, gen_iterations)) torch.save(net_g.state_dict(), '{0}/netG_epoch_{1}.pth'.format(opt.experiment, epoch)) torch.save(net_d.state_dict(), '{0}/netD_epoch_{1}.pth'.format(opt.experiment, epoch))
import torch import matplotlib.pyplot as plt import numpy as np import torchvision.utils as vutils import sys from config import nz, checkpoint_path from models import netG fig_size = int(sys.argv[1]) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') checkpoint = torch.load(checkpoint_path) netG.load_state_dict(checkpoint['netG_state_dict']) fixed_noise = torch.randn(64, nz, 1, 1, device=device) fake_batch = netG(fixed_noise).detach().cpu() plt.figure(figsize=(fig_size,fig_size)) plt.axis("off") plt.title("Fake Images") plt.imshow(np.transpose(vutils.make_grid(fake_batch.to(device), padding=0, normalize=True).cpu(),(1,2,0))) plt.show()