def main(): save_path = 'models/model_23.pt' no_images = 64 images_size = 32 images_channels = 3 #Define and load model device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") net = PixelCNN().to(device) net.load_state_dict(torch.load(save_path)) net.eval() sample = torch.zeros(no_images, images_channels, images_size, images_size).to(device) print('-------------------------------------SAMPLING!!!!!---------------------------------') for i in tqdm(range(images_size)): for j in range(images_size): for c in range(images_channels): out = net(sample) probs = torch.softmax(out[:, :, c, i, j], dim=1) # print(probs) sampled_levels = torch.multinomial(probs, 1).squeeze().float() / (63.0) sample[:,c,i,j] = sampled_levels torchvision.utils.save_image(sample, 'sample.png', nrow=12, padding=0)
eval_loss = 0.0 model.train() for x, h in tqdm(train_loader): optim.zero_grad() x, h = x.to(device), h.to(device) target = (x * 255).long() pred = model(x, h) loss = crit(pred.view(BATCH_SIZE, 256, -1), target.view(BATCH_SIZE, -1)) train_loss += loss.item() loss.backward() optim.step() model.eval() with torch.no_grad(): for i, (x, h) in enumerate(tqdm(test_loader)): optim.zero_grad() x, h = x.to(device), h.to(device) target = (x * 255).long() pred = model(x, h) loss = crit(pred.view(BATCH_SIZE, 256, -1), target.view(BATCH_SIZE, -1)) eval_loss += loss.item() if i == 0: img = torch.cat([target, torch.argmax(pred, dim=1)], dim=0) / 255.0 torchvision.utils.save_image(img, f"samples/pixelcnn-{ei}.png")
def train(config, mode='cifar10'): model_name = 'pcnn_lr:{:.5f}_nr-resnet{}_nr-filters{}'.format(config.lr, config.nr_resnet, config.nr_filters) try: os.makedirs('models') os.makedirs('images') # print('mkdir:', config.outfile) except OSError: pass seed = np.random.randint(0, 10000) print("Random Seed: ", seed) torch.manual_seed(seed) np.random.seed(seed) torch.cuda.manual_seed_all(seed) cudnn.benchmark = True trainset, train_loader, testset, test_loader, classes = load_data(mode=mode, batch_size=config.batch_size) if mode == 'cifar10' or mode == 'faces': obs = (3, 32, 32) loss_op = lambda real, fake: discretized_mix_logistic_loss(real, fake, config.nr_logistic_mix) sample_op = lambda x: sample_from_discretized_mix_logistic(x, config.nr_logistic_mix) elif mode == 'mnist': obs = (1, 28, 28) loss_op = lambda real, fake: discretized_mix_logistic_loss_1d(real, fake, config.nr_logistic_mix) sample_op = lambda x: sample_from_discretized_mix_logistic_1d(x, config.nr_logistic_mix) sample_batch_size = 25 rescaling_inv = lambda x: .5 * x + .5 model = PixelCNN(nr_resnet=config.nr_resnet, nr_filters=config.nr_filters, input_channels=obs[0], nr_logistic_mix=config.nr_logistic_mix).cuda() optimizer = torch.optim.Adam(model.parameters(), lr=config.lr) scheduler = lr_scheduler.StepLR(optimizer, step_size=1, gamma=config.lr_decay) if config.load_params: load_part_of_model(model, config.load_params) print('model parameters loaded') def sample(model): model.train(False) data = torch.zeros(sample_batch_size, obs[0], obs[1], obs[2]) data = data.cuda() with tqdm(total=obs[1] * obs[2]) as pbar: for i in range(obs[1]): for j in range(obs[2]): with torch.no_grad(): data_v = data out = model(data_v, sample=True) out_sample = sample_op(out) data[:, :, i, j] = out_sample.data[:, :, i, j] pbar.update(1) return data print('starting training') for epoch in range(config.max_epochs): model.train() torch.cuda.synchronize() train_loss = 0. time_ = time.time() with tqdm(total=len(train_loader)) as pbar: for batch_idx, (data, label) in enumerate(train_loader): data = data.requires_grad_(True).cuda() output = model(data) loss = loss_op(data, output) optimizer.zero_grad() loss.backward() optimizer.step() train_loss += loss.item() pbar.update(1) deno = batch_idx * config.batch_size * np.prod(obs) print('train loss : %s' % (train_loss / deno), end='\t') # decrease learning rate scheduler.step() model.eval() test_loss = 0. with tqdm(total=len(test_loader)) as pbar: for batch_idx, (data, _) in enumerate(test_loader): data = data.requires_grad_(False).cuda() output = model(data) loss = loss_op(data, output) test_loss += loss.item() del loss, output pbar.update(1) deno = batch_idx * config.batch_size * np.prod(obs) print('test loss : {:.4f}, time : {:.4f}'.format((test_loss / deno), (time.time() - time_))) torch.cuda.synchronize() if (epoch + 1) % config.save_interval == 0: torch.save(model.state_dict(), 'models/{}_{}.pth'.format(model_name, epoch)) print('sampling...') sample_t = sample(model) sample_t = rescaling_inv(sample_t) save_image(sample_t, 'images/{}_{}.png'.format(model_name, epoch), nrow=5, padding=0)