def save_samples(iteration, fixed_Y, fixed_X, G_YtoX, G_XtoY, opts): """Saves samples from both generators X->Y and Y->X. """ fake_X = G_YtoX(fixed_Y) fake_Y = G_XtoY(fixed_X) X, fake_X = utils.to_data(fixed_X), utils.to_data(fake_X) Y, fake_Y = utils.to_data(fixed_Y), utils.to_data(fake_Y) merged = merge_images(X, fake_Y, opts) logger.add_image("Fake_Y", img_tensor=(merged + 1) / 2, global_step=iteration, dataformats="HWC") path = os.path.join(opts.sample_dir, 'sample-{:06d}-X-Y.png'.format(iteration)) imageio.imwrite(path, merged) print('Saved {}'.format(path)) merged = merge_images(Y, fake_X, opts) logger.add_image("Fake_X", img_tensor=(merged + 1) / 2, global_step=iteration, dataformats="HWC") path = os.path.join(opts.sample_dir, 'sample-{:06d}-Y-X.png'.format(iteration)) imageio.imwrite(path, merged) print('Saved {}'.format(path))
def run_env_PPO(policy, env_func, max_steps=100, render=False, stochastic=True, reward_only=False, gamma=0.99): env = env_func() state = env.reset() done = False total_reward = 0 states = [] actions = [] rewards = [] values = [] logprobs = [] masks = [] step = 0 while True: if (step == max_steps) and (not reward_only): break if render: env.render() time.sleep(0.05) value, action, logprob = policy.forward(utils.to_var(state).unsqueeze(0), stochastic) # depends on observation space (generally box) value, action, logprob = utils.to_data(value), utils.to_data(action), utils.to_data(logprob) # TODO: epsilon-greedy?? # print(action.shape) new_state, reward, done, _ = env.step(action) # calculate value of the NEXT state states.append(state) actions.append(action) rewards.append(reward) values.append(value) logprobs.append(logprob) masks.append(1-done) total_reward += reward state = new_state if done: if reward_only: env.close() return total_reward else: state = env.reset() step += 1 env.close() states = np.asarray(states) actions = np.asarray(actions) rewards = np.asarray(rewards) values = np.asarray(values) logprobs = np.asarray(logprobs) masks = np.asarray(masks) if done: last_value = 0.0 else: last_value, _, _ = policy.forward(utils.to_var(state).unsqueeze(0), stochastic) last_value = utils.to_data(last_value) returns = calculate_returns(rewards, masks, last_value, gamma) if reward_only: print(rewards) return np.sum(rewards) return states, actions, rewards, values.squeeze(), logprobs, returns
def save_samples2(iteration, fake, real, opts, XtoY=True): """Saves samples from both generators X->Y and Y->X. """ real, fake = utils.to_data(real), utils.to_data(fake) if XtoY: name = 'XtoY' fake_dir = opts.x2y_dir real_dir = opts.reals_x_dir real_name = 'X' else: name = 'YtoX' fake_dir = opts.y2x_dir real_dir = opts.reals_y_dir real_name = 'Y' if opts.save_ind: # To save individual generated images without paired with real image merged = prep_individual(fake) reals = prep_individual(real) # Save all reals together path = os.path.join(opts.reals_dir, 'real-{:06d}-{}.png'.format(iteration, name)) imageio.imwrite(path, reals) # Save all fakes together path = os.path.join(opts.sample_dir, 'sample-{:06d}-{}.png'.format(iteration, name)) imageio.imwrite(path, merged) # Save domain-specific reals path_reals_domain = os.path.join( real_dir, 'sample-{:06d}-{}.png'.format(iteration, real_name)) imageio.imwrite(path_reals_domain, reals) # Save domain-specific fakes path_fakes_domain = os.path.join( fake_dir, 'sample-{:06d}-{}.png'.format(iteration, name)) imageio.imwrite(path_fakes_domain, merged) print('Saved All!') else: merged = merge_images(real, fake, opts) path = os.path.join(opts.sample_dir, 'sample-{:06d}-{}.png'.format(iteration, name)) imageio.imwrite(path, merged) print('Saved {}'.format(path))
def save_image(latent, fileroot): generated = model.decode(latent) generated = utils.to_data(generated) for i, image in enumerate(generated): scipy.misc.imsave("generated/" + fileroot + str(i) + ".png", image.transpose(1, 2, 0))
def main(): # parse options parser = TestOptions() opts = parser.parse() # data loader print('--- load data ---') text = load_image(opts.text_name, opts.text_type) label = opts.scale step = opts.scale_step * 2.0 if opts.gpu: text = to_var(text) # model print('--- load model ---') netGlyph = GlyphGenerator(n_layers=6, ngf=32) netTexture = TextureGenerator(n_layers=6) netGlyph.load_state_dict(torch.load(opts.structure_model)) netTexture.load_state_dict(torch.load(opts.texture_model)) if opts.gpu: netGlyph.cuda() netTexture.cuda() netGlyph.eval() netTexture.eval() print('--- testing ---') text[:, 0:1] = gaussian(text[:, 0:1], stddev=0.2) if label == -1: # 默认值 -1 scale = -1.0 noise = text.data.new(text[:, 0:1].size()).normal_(0, 0.2) result = [] while scale <= 1.0: img_str = netGlyph(text, scale) img_str[:, 0:1] = torch.clamp(img_str[:, 0:1] + noise, -1, 1) result1 = netTexture(img_str).detach() result = result + [result1] scale = scale + step else: img_str = netGlyph(text, label * 2.0 - 1.0) img_str[:, 0:1] = gaussian(img_str[:, 0:1], stddev=0.2) result = [netTexture(img_str)] if opts.gpu: for i in range(len(result)): result[i] = to_data(result[i]) print('--- save ---') # directory if not os.path.exists(opts.result_dir): os.mkdir(opts.result_dir) for i in range(len(result)): if label == -1: result_filename = os.path.join(opts.result_dir, (opts.name + '_' + str(i) + '.png')) else: result_filename = os.path.join(opts.result_dir, (opts.name + '.png')) save_image(result[i][0], result_filename)
def save_samples(iteration, fixed_Y, fixed_X, G_YtoX, G_XtoY, opts): """Saves samples from both generators X->Y and Y->X. """ fake_X = G_YtoX(fixed_Y) fake_Y = G_XtoY(fixed_X) X, fake_X = utils.to_data(fixed_X), utils.to_data(fake_X) Y, fake_Y = utils.to_data(fixed_Y), utils.to_data(fake_Y) merged = merge_images(X, fake_Y, opts) path = os.path.join(opts.sample_dir, 'sample-{:06d}-X-Y.png'.format(iteration)) scipy.misc.imsave(path, merged) print('Saved {}'.format(path)) merged = merge_images(Y, fake_X, opts) path = os.path.join(opts.sample_dir, 'sample-{:06d}-Y-X.png'.format(iteration)) scipy.misc.imsave(path, merged) print('Saved {}'.format(path))
def unperturb_weights(self, new_weights, init_weights): epsilons = [] for i, (weight, init_weight) in enumerate(zip(new_weights, init_weights)): # \sigma*\epsilon diff = utils.to_data((weight - init_weight)) # \Theta = \bar{\theta} + \sigma*\epsilon epsilons.append(diff / self.sigma) return epsilons
def save_reconstructions(iteration, fixed_Y, fixed_X, reconstructed_Y, reconstructed_X, opts): """Saves samples from both generators X->Y and Y->X. """ X, reconstructed_X = utils.to_data(fixed_X), utils.to_data(reconstructed_X) Y, reconstructed_Y = utils.to_data(fixed_Y), utils.to_data(reconstructed_Y) merged = merge_images(X, reconstructed_X, opts) path = os.path.join(opts.sample_dir, 'sample-{:06d}-X-X.png'.format(iteration)) scipy.misc.imsave(path, merged) print('Saved {}'.format(path)) merged = merge_images(Y, reconstructed_Y, opts) path = os.path.join(opts.sample_dir, 'sample-{:06d}-Y-Y.png'.format(iteration)) scipy.misc.imsave(path, merged) print('Saved {}'.format(path))
def save_samples(G, fixed_noise, iteration, opts): generated_images = G(fixed_noise) generated_images = utils.to_data(generated_images) grid = create_image_grid(generated_images) # merged = merge_images(X, fake_Y, opts) path = os.path.join(opts.sample_dir, 'sample-{:06d}.png'.format(iteration)) scipy.misc.imsave(path, grid) print('Saved {}'.format(path))
def save_reconstructions(iteration, images_X, images_Y, reconstructed_X, reconstructed_Y, opts): """Saves samples from both generators X->Y and Y->X. """ images_X, reconstructed_X = utils.to_data(images_X), utils.to_data( reconstructed_X) images_Y, reconstructed_Y = utils.to_data(images_Y), utils.to_data( reconstructed_Y) merged = merge_images(images_X, reconstructed_X, opts) path = os.path.join(opts.sample_dir, 'reconstruction-{:06d}-X.png'.format(iteration)) imageio.imwrite(path, merged) print('Saved {}'.format(path)) merged = merge_images(images_Y, reconstructed_Y, opts) path = os.path.join(opts.sample_dir, 'reconstruction-{:06d}-Y.png'.format(iteration)) imageio.imwrite(path, merged) print('Saved {}'.format(path))
def save_samples(latent, filename): """Saves samples from both generators X->Y and Y->X. """ fake_X = model.decode(latent) fake_X = utils.to_data(fake_X) merged = merge_images(fake_X) path = os.path.join(os.path.join(os.path.dirname(__file__), 'generated'), filename) scipy.misc.imsave(path, merged) print('Saved {}'.format(path))
def save_samples(iteration, fixed_Y, fixed_X, E_XtoY, E_YtoX, D_X, D_Y, T_XtoY, T_YtoX, opts): fake_X = D_X(E_YtoX(fixed_Y)) fake_Y = D_Y(E_XtoY(fixed_X)) cycle_X = D_X(T_YtoX(E_XtoY(fixed_X))) cycle_Y = D_Y(T_XtoY(E_YtoX(fixed_Y))) X, fake_X, cycle_X = utils.to_data(fixed_X), utils.to_data(fake_X), utils.to_data(cycle_X) Y, fake_Y, cycle_Y = utils.to_data(fixed_Y), utils.to_data(fake_Y), utils.to_data(cycle_Y) print("X.shape: ", str(X.shape), "\ty.shape: ", str(fake_Y.shape)) merged = merge_images(X, fake_Y, opts) path = os.path.join(opts.sample_dir, 'sample-{:06d}-X-Y.png'.format(iteration)) scipy.misc.imsave(path, merged) print('Saved {}'.format(path)) merged = merge_images(Y, fake_X, opts) path = os.path.join(opts.sample_dir, 'sample-{:06d}-Y-X.png'.format(iteration)) scipy.misc.imsave(path, merged) print('Saved {}'.format(path)) merged = merge_images(X, cycle_X, opts) path = os.path.join(opts.sample_dir, 'sample-{:06d}-X-cycle_X.png'.format(iteration)) scipy.misc.imsave(path, merged) print('Saved {}'.format(path)) merged = merge_images(Y, cycle_Y, opts) path = os.path.join(opts.sample_dir, 'sample-{:06d}-Y-cycle_Y.png'.format(iteration)) scipy.misc.imsave(path, merged) print('Saved {}'.format(path))
def save_samples(iteration, fixed_Y, fixed_X, G_YtoX, G_XtoY, opts): fake_X = G_YtoX(fixed_Y) fake_Y = G_XtoY(fixed_X) cycle_X = G_YtoX(fake_Y) cycle_Y = G_XtoY(fake_X) X, fake_X, cycle_X = utils.to_data(fixed_X), utils.to_data( fake_X), utils.to_data(cycle_X) Y, fake_Y, cycle_Y = utils.to_data(fixed_Y), utils.to_data( fake_Y), utils.to_data(cycle_Y) merged = merge_images(X, fake_Y, opts) path = os.path.join(opts.sample_dir, 'sample-{:06d}-X-Y.png'.format(iteration)) scipy.misc.imsave(path, merged) print('Saved {}'.format(path)) merged = merge_images(Y, fake_X, opts) path = os.path.join(opts.sample_dir, 'sample-{:06d}-Y-X.png'.format(iteration)) scipy.misc.imsave(path, merged) print('Saved {}'.format(path)) merged = merge_images(X, cycle_X, opts) path = os.path.join(opts.sample_dir, 'sample-{:06d}-X-cycle_X.png'.format(iteration)) scipy.misc.imsave(path, merged) print('Saved {}'.format(path)) merged = merge_images(Y, cycle_Y, opts) path = os.path.join(opts.sample_dir, 'sample-{:06d}-Y-cycle_Y.png'.format(iteration)) scipy.misc.imsave(path, merged) print('Saved {}'.format(path))
def save_gradient_pic(D, fixed_generated_images, iteration, opts): pic = utils.to_var(fixed_generated_images) pic.requires_grad_(True) loss = mse_loss(D(pic), 1) path = os.path.join(opts.sample_dir, 'gradients-{:06d}.png'.format(iteration)) loss.backward() gradients = utils.to_data(pic.grad) # Only red channel gradients[:, :] = np.sqrt(np.sum(gradients**2, axis=1, keepdims=True)) grid = create_image_grid(gradients) scipy.misc.imsave(path, grid)
def stylize_text(model, inputs): text_name = inputs["input_image"] text = load_image(text_name, 1) text = to_var(text) label = inputs["scale"] text[:, 0:1] = gaussian(text[:, 0:1], stddev=0.2) img_str = model['netGlyph'](text, label * 2.0 - 1.0) img_str[:, 0:1] = gaussian(img_str[:, 0:1], stddev=0.2) result = to_data(model['netTexture'](img_str)) output = save_image(result[0]) return {"output_image": output}
def save_samples(G, fixed_noise, iteration, opts, extra_name): """ Saves samples of G(fixed_noise) in a grid to disk. """ generated_images = G(fixed_noise) generated_images = utils.to_data(generated_images) grid = create_image_grid(generated_images, ncols=10) # Save to google drive? if opts.colab: dir_path = F"/content/gdrive/My Drive/{opts.directory}/samples/" else: dir_path = os.path.join(opts.directory, 'samples') path = os.path.join(dir_path, 'c{}_sample-{:06d}.png'.format(extra_name, iteration)) scipy.misc.imsave(path, grid) if opts.display_debug: print('Saved {}'.format(path))
def translateDomain(img_x, model): # resize, and normalize image x img_x = cv2.resize(img_x, image_size) img_x = np.asarray((np.true_divide(img_x, 255.0) - 0.5), dtype=np.float32) # convert to pythorch format torch_img_x = np.moveaxis(img_x, -1, 0) # expand dim torch_img_x = torch_img_x[np.newaxis, ...] # convert to tensor img_x_tensor = torch.tensor(torch_img_x) # move image to GPU if available img_x_tensor = img_x_tensor.to(device) # set model to eval mode model.eval() # Generate image in domain B torch_img_y = model(img_x_tensor) img_y = to_data(torch_img_y[0]) # convert back to RowxColxChannel format img_y = np.moveaxis(img_y, 0, 2) # return image return img_y
def main(): # parse options parser = TestOptions() opts = parser.parse() # data loader print('--- load data ---') style = load_image(opts.style_name) if opts.gpu != 0: style = to_var(style) if opts.c2s == 1: content = load_image(opts.content_name, opts.content_type) if opts.gpu != 0: content = to_var(content) # model print('--- load model ---') tetGAN = TETGAN() tetGAN.load_state_dict(torch.load(opts.model)) if opts.gpu != 0: tetGAN.cuda() tetGAN.eval() print('--- testing ---') if opts.c2s == 1: result = tetGAN(content, style) else: result = tetGAN.desty_forward(style) if opts.gpu != 0: result = to_data(result) print('--- save ---') # directory result_filename = os.path.join(opts.result_dir, opts.name) if not os.path.exists(opts.result_dir): os.mkdir(opts.result_dir) save_image(result[0], result_filename)
def run_env_ES(weights, policy, env_func, render=False, stochastic=False): env = env_func() cloned_policy = copy.deepcopy(policy) for i, weight in enumerate(cloned_policy.parameters()): try: weight.data.copy_(weights[i]) except: weight.data.copy_(weights[i].data) state = env.reset() done = False total_reward = 0 # TODO: do all enviroments have DONE? step = 0 while not done: if render: env.render() time.sleep(0.05) action = cloned_policy.forward(utils.to_var(state).unsqueeze(0), stochastic) # depends on observation space (generally box) action = utils.to_data(action) state, reward, done, _ = env.step(action) total_reward += reward step += 1 env.close() return total_reward
model.eps.load_state_dict(torch.load(model_dir+'/eps.pkl')) model.eps.eval() if gen: model.g.load_state_dict(torch.load(model_dir+'/gen.pkl')) model.g.eval() if config.solver == 'bary_ot': model.psi.load_state_dict(torch.load(model_dir+'/psi.pkl')) model.psi.eval() model.g.load_state_dict(torch.load(model_dir+'/gen.pkl')) model.g.eval() fixed_r, fixed_z = model.get_fixed_data() #visualize_single_plotting(utils.to_data(fixed_z), utils.to_data(fixed_r), None, # exp_dir+'/fixed_source_target_data.png') # After passing through the model data images = get_visuals(model, config) # If there is generator, we should use g(fixed_z)= y for the transported data. # Otherwise we should use 'TY' visualize_single_plotting(utils.to_data(fixed_z), utils.to_data(fixed_r), utils.to_data(images['Y']) if gen else utils.to_data(images['TY']), exp_dir+'/final_transport_plot.png') visualize_single_plotting(utils.to_data(fixed_z), utils.to_data(fixed_r), utils.to_data(images['Y']) if gen else utils.to_data(images['TY']), exp_dir+'/final_transport_plot.pdf') data_dict = {'Y': utils.to_data(fixed_z), 'X':utils.to_data(fixed_r), 'X_pred': utils.to_data(images['Y']) if gen else utils.to_data(images['TY'])} np.save(exp_dir+'/data.npy', data_dict)
def main(): # parse options parser = TrainOptions() opts = parser.parse() # data loader print('--- load parameter ---') # outer_iter = opts.outer_iter # fade_iter = max(1.0, float(outer_iter / 2)) epochs = opts.epoch batchsize = opts.batchsize # datasize = opts.datasize # datarange = opts.datarange augementratio = opts.augementratio centercropratio = opts.centercropratio # model print('--- create model ---') tetGAN = TETGAN(gpu=(opts.gpu != 0)) if opts.gpu != 0: tetGAN.cuda() tetGAN.init_networks(weights_init) num_params = 0 for param in tetGAN.parameters(): num_params += param.numel() print('Total number of parameters in TET-GAN: %.3f M' % (num_params / 1e6)) print('--- training ---') texture_class = 'base_gray_texture' in opts.dataset_class or 'skeleton_gray_texture' in opts.dataset_class if texture_class: tetGAN.load_state_dict(torch.load(opts.model)) dataset_path = os.path.join(opts.train_path, opts.dataset_class, 'style') val_dataset_path = os.path.join(opts.train_path, opts.dataset_class, 'val') if 'base_gray_texture' in opts.dataset_class: few_size = 6 batchsize = 2 epochs = 1500 elif 'skeleton_gray_texture' in opts.dataset_class: few_size = 30 batchsize = 10 epochs = 300 fnames = load_trainset_batchfnames_dualnet(dataset_path, batchsize, few_size=few_size) val_fnames = sorted(os.listdir(val_dataset_path)) style_fnames = sorted(os.listdir(dataset_path)[:few_size]) else: dataset_path = os.path.join(opts.train_path, opts.dataset_class, 'train') fnames = load_trainset_batchfnames_dualnet(dataset_path, batchsize) tetGAN.train() train_size = os.listdir(dataset_path) print('List of %d styles:' % (len(train_size))) result_dir = os.path.join(opts.result_dir, opts.dataset_class) if not os.path.exists(result_dir): os.mkdir(result_dir) for epoch in range(epochs): for idx, fname in enumerate(fnames): x, y_real, y = prepare_batch(fname, 1, 1, centercropratio, augementratio, opts.gpu) losses = tetGAN.one_pass(x[0], None, y[0], None, y_real[0], None, 1, 0) if (idx + 1) % 100 == 0: print('Epoch [%d/%d], Iter [%d/%d]' % (epoch + 1, epochs, idx + 1, len(fnames))) print( 'Lrec: %.3f, Ldadv: %.3f, Ldesty: %.3f, Lsadv: %.3f, Lsty: %.3f' % (losses[0], losses[1], losses[2], losses[3], losses[4])) if texture_class and ((epoch + 1) % (epochs / 20)) == 0: outname = 'save/' + 'val_epoch' + str( epoch + 1) + '_' + opts.dataset_class + '_' + opts.save_model_name print('--- save model Epoch [%d/%d] ---' % (epoch + 1, epochs)) torch.save(tetGAN.state_dict(), outname) print('--- validating model [%d/%d] ---' % (epoch + 1, epochs)) for val_idx, val_fname in enumerate(val_fnames): v_fname = os.path.join(val_dataset_path, val_fname) random.shuffle(style_fnames) sty_fname = style_fnames[0] s_fname = os.path.join(dataset_path, sty_fname) with torch.no_grad(): val_content = load_image_dualnet(v_fname, load_type=1) val_sty = load_image_dualnet(s_fname, load_type=0) if opts.gpu != 0: val_content = val_content.cuda() val_sty = val_sty.cuda() result = tetGAN(val_content, val_sty) if opts.gpu != 0: result = to_data(result) result_filename = os.path.join( result_dir, str(epoch) + '_' + val_fname) print(result_filename) save_image(result[0], result_filename) elif not texture_class and ((epoch + 1) % 2) == 0: outname = 'save/' + 'epoch' + str(epoch + 1) + '_' + opts.save_model_name print('--- save model ---') torch.save(tetGAN.state_dict(), outname)
if os.path.exists(destination_directory): print('REMOVING DIRECTORY') shutil.rmtree(destination_directory) print('CREATING DIRECTORY') os.mkdir(destination_directory) netSketch = SketchModule(opts.GB_nlayers, opts.DB_nlayers, opts.GB_nf, opts.DB_nf, opts.gpu) if opts.gpu: netSketch.cuda() netSketch.init_networks(weights_init) netSketch.train() netSketch.load_state_dict(torch.load(ckpt_file)) netSketch.eval() list_of_files = [os.path.join(testing_directory, i) for i in os.listdir(testing_directory)] for file in list_of_files: print('PROCESSING == ', file) I = load_image(file) I = to_var(I[:,:,:,0:I.size(3)]) result = netSketch(I, -1.) Image.fromarray(((to_data(result[0]).numpy().transpose(1, 2, 0) + 1.0) * 127.5).astype(np.uint8)).save(os.path.join(destination_directory, file.split('/')[-1]))
def main(): # parse options parser = TestOptions() opts = parser.parse() if opts.model_task != 'SYN' and opts.model_task != 'EDT': print('%s:unsupported task!' % opts.model_task) return SYN = opts.model_task == 'SYN' # Note: this code is used for 256*256 images # To test on 64*64 images, change 'img_size = 256' to 'img_size = 64' # change 'netG.load_generator(filepath=opts.model_path, filename=opts.model_name)' # to 'netG.G64.load_state_dict(torch.load(your saved 64*64 model, map_location=device))' # In models.py, function forward_editing() and forward_synthesis() # change 'feature64 = self.G64.forward_feature(input64, l).detach()' # to 'outputs = self.G64(input256, l).detach()' and use outputs for final output img_size = 256 # data loader print('----load data----') img = to_var(load_image(opts.input_name)) if opts.gpu else load_image( opts.input_name) level = opts.l step = max(opts.l_step, 0.1) if level == -1 else 100.0 l = 0.0 if level == -1 else min(max(level, 0.0), 1.0) if SYN: S = img else: S = img[:, :, :, img_size:img_size * 2] M = img[:, 0:1, :, img_size * 2:img_size * 3] I = img[:, :, :, 0:img_size] print('----load model----') G_channels = 3 if SYN else 7 netF_Norm = 'None' if SYN else 'BN' device = None if opts.gpu else torch.device('cpu') netG = PSGAN(G_channels=G_channels, max_dilate=opts.max_dilate, img_size=img_size, gpu=opts.gpu != 0) netG.load_generator(filepath=opts.model_path, filename=opts.model_name) netF = Pix2pix256(in_channels=G_channels, nef=64, useNorm=netF_Norm) netF.load_state_dict(torch.load(opts.load_F_name, map_location=device)) if opts.gpu: netG.cuda() netF.cuda() netG.eval() netF.eval() print('----testing----') S_gens = [] I_gens = [] I_outs = [] while l <= 1.0: if SYN: S_gen, I_gen = netG.forward_synthesis(S, l) I_out = netF(S_gen).detach() else: S_gen, I_gen = netG.forward_editing(S, I, M, l) I_out = netF(torch.cat((S_gen, I * (1 - M) / 2, M), dim=1)).detach() S_gens += [to_data(S_gen) if opts.gpu else S_gen] I_gens += [to_data(I_gen) if opts.gpu else I_gen] I_outs += [to_data(I_out) if opts.gpu else I_out] l += step print('----save----') if not os.path.exists(opts.result_dir): os.mkdir(opts.result_dir) for i in range(len(S_gens)): result_filename = os.path.join(opts.result_dir, opts.name) save_image(S_gens[i][0], result_filename + '-SGEN_' + str(i) + '.png') save_image(I_gens[i][0], result_filename + '-IGEN_' + str(i) + '.png') save_image(I_outs[i][0], result_filename + '-IOUT_' + str(i) + '.png')
def main(): config = Options().parse() utils.print_opts(config) ## set up folders dir_string = './{0}_{1}/trial_{2}/'.format(config.solver, config.data, config.trial) if config.solver != 'w2' else \ './{0}_gen{2}_{1}/trial_{3}/'.format(config.solver, config.data, config.gen, config.trial) exp_dir = dir_string #os.path.join(config.exp_dir, config.exp_name) model_dir = os.path.join(exp_dir, 'models') img_dir = os.path.join(exp_dir, 'images') if not os.path.exists(exp_dir): os.makedirs(exp_dir) if not os.path.exists(model_dir): os.makedirs(model_dir) if not os.path.exists(img_dir): os.makedirs(img_dir) if config.use_tbx: # remove old tensorboardX logs logs = glob.glob(os.path.join(exp_dir, 'events.out.tfevents.*')) if len(logs) > 0: os.remove(logs[0]) tbx_writer = SummaryWriter(exp_dir) else: tbx_writer = None ## initialize data loaders & model r_loader, z_loader = get_loader(config) if config.solver == 'w1': model = W1(config, r_loader, z_loader) elif config.solver == 'w2': model = W2(config, r_loader, z_loader) elif config.solver == 'bary_ot': model = BaryOT(config, r_loader, z_loader) cudnn.benchmark = True networks = model.get_networks(config) utils.print_networks(networks) fixed_r, fixed_z = model.get_fixed_data() utils.visualize_single(utils.to_data(fixed_z), utils.to_data(fixed_r), None, os.path.join(img_dir, 'data.png'), data_range=(-12, 12) if config.data == '8gaussians' else (-6, 6)) if not config.no_benchmark: print('computing discrete-OT benchmark...') start_time = time.time() cost = model.get_cost() discrete_tz = utils.solve_assignment(fixed_z, fixed_r, cost, fixed_r.size(0)) print('Done in %.4f seconds.' % (time.time() - start_time)) utils.visualize_single(utils.to_data(fixed_z), utils.to_data(fixed_r), utils.to_data(discrete_tz), os.path.join(img_dir, 'assignment.png')) ## training ## stage 1 (dual stage) of bary_ot start_time = time.time() if config.solver == 'bary_ot': print("Starting: dual stage for %d iters." % config.dual_iters) for step in range(config.dual_iters): model.train_diter_only(config) if ((step + 1) % 10) == 0: stats = model.get_stats(config) end_time = time.time() stats['disp_time'] = (end_time - start_time) / 60. start_time = end_time utils.print_out(stats, step + 1, config.dual_iters, tbx_writer) print("dual stage complete.") ## main training loop of w1 / w2 or stage 2 (map stage) of bary-ot map_iters = config.map_iters if config.solver == 'bary_ot' else config.train_iters if config.solver == 'bary_ot': print("Starting: map stage for %d iters." % map_iters) else: print("Starting training...") for step in range(map_iters): model.train_iter(config) if ((step + 1) % 10) == 0: stats = model.get_stats(config) end_time = time.time() stats['disp_time'] = (end_time - start_time) / 60. start_time = end_time if not config.no_benchmark: if config.gen: stats['l2_dist/discrete_T_x--G_x'] = losses.calc_l2( fixed_z, model.g(fixed_z), discrete_tz).data.item() else: stats['l2_dist/discrete_T_x--T_x'] = losses.calc_l2( fixed_z, model.get_tx(fixed_z, reverse=True), discrete_tz).data.item() utils.print_out(stats, step + 1, map_iters, tbx_writer) if ((step + 1) % 10000) == 0 or step == 0: images = model.get_visuals(config) utils.visualize_iter( images, img_dir, step + 1, config, data_range=(-12, 12) if config.data == '8gaussians' else (-6, 6)) print("Training complete.") networks = model.get_networks(config) utils.save_networks(networks, model_dir)