kernel = np.ones((5,5),np.float32)/25 while epoch_num < EPOCHS: s = time.time() epoch_num = step/(num_train/BATCH_SIZE) idx = np.random.choice(np.arange(num_train), BATCH_SIZE, replace=False) batchA_paths = trainA_paths[idx] batchB_paths = trainB_paths[idx] batchA_images = np.empty((BATCH_SIZE, 256, 256, 3), dtype=np.float32) batchB_images = np.empty((BATCH_SIZE, 256, 256, 3), dtype=np.float32) i = 0 for a,b in zip(batchA_paths, batchB_paths): a_img = data_ops.preprocess(misc.imread(a).astype('float32')) b_img = data_ops.preprocess(misc.imread(b).astype('float32')) # Data augmentation here - each has 50% chance if AUGMENT: r = random.random() # flip image left right if r < 0.5: a_img = np.fliplr(a_img) b_img = np.fliplr(b_img) r = random.random() # flip image up down if r < 0.5: a_img = np.flipud(a_img) b_img = np.flipud(b_img)
print 'num test:', num_test c = 0 times = [] for img_path in tqdm(test_paths): img_name = ntpath.basename(img_path) img_name = img_name.split('.')[0] batch_images = np.empty((1, 256, 256, 3), dtype=np.float32) a_img = misc.imread(img_path).astype('float32') a_img = misc.imresize(a_img, (256, 256, 3)) a_img = data_ops.preprocess(a_img) batch_images[0, ...] = a_img s = time.time() gen_images = np.asarray( sess.run(gen_image, feed_dict={image_u: batch_images})) tot = time.time() - s times.append(tot) for gen, real in zip(gen_images, batch_images): #misc.imsave(IMAGES_DIR+str(step)+'_'+str(c)+'_real.png', real) #misc.imsave(IMAGES_DIR+str(step)+'_'+str(c)+'_gen.png', gen) misc.imsave(IMAGES_DIR + img_name + '_real.png', real) misc.imsave(IMAGES_DIR + img_name + '_gen.png', gen)
print "Could not restore model" pass step = int(sess.run(global_step)) clean_images = glob.glob('clean/*.*') distorted_images = glob.glob('distorted/*.*') for cimg, dimg in zip(clean_images, distorted_images): batch_cimages = np.empty((1, 256, 256, 3), dtype=np.float32) batch_dimages = np.empty((1, 256, 256, 3), dtype=np.float32) img = misc.imread(cimg).astype('float32') img = misc.imresize(img, (256, 256, 3)) img = data_ops.preprocess(img) batch_cimages[0, ...] = img img = misc.imread(dimg).astype('float32') img = misc.imresize(img, (256, 256, 3)) img = data_ops.preprocess(img) batch_dimages[0, ...] = img # send through decoder and get all layers all_layers = sess.run(layers, feed_dict={image_u:batch_cimages}) print all_layers[0].shape exit() c_embedding = sess.run(embedding, feed_dict={image_u:batch_cimages})