json.dump(vars(args), open(os.path.join(args.save_path, 'args.json'), 'w')) if args.use_visdom: import viz viz.setup(args.server, args.port, env=args.exp_name, use_tanh=True) else: viz = MockVisdom() encoder = SNEncoder if args.use_sn else Encoder train_loader, test_loader, shape = load_cifar(args.dataset_loc, args.batch_size, args.test_batch_size) gan = Decoder(shape, args.gen_h_size, args.z_size, True, nn.ReLU(True), 4).cuda() discriminator = encoder(shape, args.disc_h_size, 1, True, nn.LeakyReLU(0.1, True), 4).cuda() gan.apply(weights_init) discriminator.apply(weights_init) weight_clip = weight_cliping(0.05) generator_optimizer = optim.Adam(gan.parameters(), lr=args.lr, betas=(args.beta1, args.beta2)) discriminator_optimizer = optim.Adam(discriminator.parameters(), lr=args.lr, betas=(args.beta1, args.beta2)) t = 0 for i in range(201): print('Epoch: %s' % i) gan.train() discriminator.train()
elif classname.find("Conv1d") != -1: torch.nn.init.normal_(m.weight.data, 0.0, 0.02) elif classname.find("BatchNorm2d") != -1: torch.nn.init.normal_(m.weight.data, 1.0, 0.02) torch.nn.init.constant_(m.bias.data, 0.0) elif classname.find("BatchNorm1d") != -1: torch.nn.init.normal_(m.weight.data, 1.0, 0.02) torch.nn.init.constant_(m.bias.data, 0.0) # initialize generator and discriminator weights and device if USE_CUDA: print("Using", torch.cuda.device_count(), "GPUs") gen_net = torch.nn.DataParallel(gen_net) gen_net.to(device) gen_net.apply(weights_init_normal) dis_net = torch.nn.DataParallel(dis_net) dis_net.to(device) dis_net.apply(weights_init_normal) if opt.netG != '': gen_net.load_state_dict( torch.load( opt.netG, map_location=lambda storage, location: storage)['state_dict']) resume_epoch = torch.load(opt.netG)['epoch'] #print('G loaded') if opt.netD != '': dis_net.load_state_dict( torch.load( opt.netD,