data = all_pc_data.next_batch(config.batch_size)[0] pc_gen = model.fit(data, noise, iter_no, config.dis_n_iter) if iter_no % 1000 == 0: sio.savemat('%srender_%d.mat' % (config.render_dir, iter_no), {'X_hat': pc_gen}) # save image of point cloud if iter_no % config.renders_every_iter == 0: pc_gen = np.reshape(pc_gen[0, :], [2048, 3]) im_array = point_cloud_three_views(pc_gen) img = Image.fromarray(np.uint8(im_array * 255.0)) img.save('%srender_%d.jpg' % (config.render_dir, iter_no)) if iter_no % config.save_every_iter == 0: model.save_model(config.save_dir + 'model.ckpt') if iter_no % 10000 == 0: os.mkdir(config.save_dir + str(iter_no)) model.save_model(config.save_dir + str(iter_no) + '/model.ckpt') if iter_no % 10: with open(config.log_dir + 'start_iter', "w") as text_file: text_file.write("%d" % iter_no) # testing for test_no in range(config.N_test): noise = np.random.normal(size=[config.batch_size, config.z_size], scale=0.2) img = model.generate(noise)
(num_batches * epoch)) del real_images, nova_params #calculate testing loss and accuracy inputs = novaSet.test_info(which_config=[config, 'data'], which_type=config_type) gen_tst_loss, gen_tst_accur, disc_tst_loss, disc_tst_accur = model.test( imgs=inputs[0], data=inputs[1], epoch=epoch) print("Epoch %d : Gen Test Loss %f, Gen Test Accur %f, Disc Test Loss %f, Disc Test Accur %f, Gen Train Loss %f"\ % (epoch, gen_tst_loss, gen_tst_accur, disc_tst_loss, disc_tst_accur, training_loss)) del inputs #every 10 epochs, write the models' parameters to the disk an save images if (epoch % 10) == 0: model.save_model(epoch) #save images print("Saving Image Summaries") images = novaSet.next_batch(which_config=[config, 'data'], which_type=config_type, batch_size=5) model.save_image_summary(r_inputs=images[0], f_inputs=images[1], sum_epoch=epoch) del images if FLAGS.save_imgs: images = novaSet.get_data(which_config = ['data'],\ which_type = None,\
https://arxiv.org/pdf/1704.00028.pdf ''' if objective == 'max': # We are coming from the discriminator. grad_penalty = lamb * (torch.norm(grad, dim=1) - 1).pow(2).mean() loss = Dx.mean() - DGy.mean() - grad_penalty loss = -1 * loss elif objective == 'min': # We are coming from the generator -- the other terms in the loss don't matter. loss = -DGy.mean() return loss if __name__ == '__main__': # Get the dataloaders for SVHN print('Getting dataloaders...') batch_size = 64 train_loader, valid_loader, test_loader = get_loaders( batch_size=batch_size) # Instantiate and train GAN device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') assert (torch.cuda.is_available()) print("device: ", device) gan = GAN(device=device, batch_size=batch_size) print('Training GAN...') gan.train(test_loader, test_loader, loss_fn=wgan_gp) gan.log_learning_curves() gan.log_d_crossentropy() gan.save_model('gan.pt')