BATCH_SIZE, replace=False) batch_z = np.random.normal(-1.0, 1.0, size=[BATCH_SIZE, 100]).astype(np.float32) batch_y = train_annots[idx] batch_paths = train_paths[idx] batch_images = np.empty((BATCH_SIZE, SIZE, SIZE, 3), dtype=np.float32) bi = 0 for img_p in batch_paths: image = misc.imread(img_p) if CROP: image = data_ops.crop_center(image, 212, 212) image = misc.imresize(image, (SIZE, SIZE, 3)) image = data_ops.normalize(image) # randomly flip images left right or up down r = random.random() if r < 0.5: image = np.fliplr(image) r = random.random() if r < 0.5: image = np.flipud(image) batch_images[bi, ...] = image bi += 1 sess.run(D_train_op, feed_dict={ z: batch_z, y: batch_y, real_images: batch_images, mask: classes
while epoch_num < EPOCHS: epoch_num = step / (train_len / BATCH_SIZE) start = time.time() idx = np.random.choice(np.arange(train_len), BATCH_SIZE, replace=False) batch_paths = train_paths[idx] batch_a_images = np.empty((BATCH_SIZE, 256, 256, 3), dtype=np.float32) batch_b_images = np.empty((BATCH_SIZE, 64, 64, 3), dtype=np.float32) i = 0 for p in batch_paths: img = misc.imread(p).astype('float32') img_a = misc.imresize(img, (256, 256)) img_b = misc.imresize(img, (64, 64)) img_a = data_ops.normalize(img_a) img_b = data_ops.normalize(img_b) batch_a_images[i, ...] = img_a batch_b_images[i, ...] = img_b i += 1 r = random.random() if r < 0.5: batch_a_images = np.fliplr(batch_a_images) batch_b_images = np.fliplr(batch_b_images) r = random.random() if r < 0.5: batch_a_images = np.flipud(batch_a_images) batch_b_images = np.flipud(batch_b_images) # update D for critic_itr in range(n_critic):
try: saver.restore(sess, ckpt.model_checkpoint_path) print "Model restored" except: print "Could not restore model" raise exit() test_len = len(test_annots) print test_len,'testing images' for t_img, t_annot, t_gid in zip(test_images, test_annots, test_ids): t_img = misc.imread(t_img) if CROP: t_img = data_ops.crop_center(t_img, 212, 212) t_img = misc.imresize(t_img, (SIZE, SIZE, 3)) t_img = data_ops.normalize(t_img) canvas = 255*np.ones((84, (MAX_GEN+1)*74+10 , 3), dtype=np.uint8) start_x = 10 start_y = 10 end_y = start_y+64 t_img = (t_img+1.) t_img *= 127.5 t_img = np.clip(t_img, 0, 255).astype(np.uint8) t_img = np.reshape(t_img, (64, 64, -1)) end_x = start_x+64 canvas[start_y:end_y, start_x:end_x, :] = t_img start_x = end_x+10 # put a line of black pixels in between the real image and generated ones canvas[:, end_x+5] = 0
print 'train num:', train_len epoch_num = step / (train_len / BATCH_SIZE) lr_ = 1e-3 while epoch_num < EPOCHS: epoch_num = step / (train_len / BATCH_SIZE) idx = np.random.choice(np.arange(train_len), BATCH_SIZE, replace=False) batch_z = latents[idx] batch_img = images_[idx] batch_images = np.empty((BATCH_SIZE, 64, 64, 3), dtype=np.float32) i = 0 for img in batch_img: img = data_ops.normalize(misc.imread(img)) r = random.random() if r < 0.5: img = np.fliplr(img) # randomly flip left right half the time. batch_images[i, ...] = img i += 1 if epoch_num > 10 and epoch_num < 200: lr_ = 1e-4 if epoch_num > 20 and epoch_num < 300: lr_ = 1e-5 if epoch_num > 30 and epoch_num < 400: lr_ = 1e-6 if epoch_num > 40: lr_ = 1e-7 _, l = sess.run([train_op, loss], feed_dict={ images: batch_images, z: batch_z,
saver.restore(sess, ckpt.model_checkpoint_path) print "Model restored" except: print "Could not restore model" raise exit() pass print 'Loading data...' paths = np.asarray(sorted(glob.glob(IN_DIR + '*.png'))) i = 0 for img_p in tqdm(paths): img = misc.imread(img_p) img = misc.imresize(img, (64, 64)) img = data_ops.normalize(img) img = np.expand_dims(img, 0) gen_img = np.squeeze( np.asarray(sess.run([gen_images], feed_dict={small_images: img}))) img = np.squeeze(img) g_img = (gen_img + 1.) g_img *= 127.5 g_img = np.clip(g_img, 0, 255).astype(np.uint8) # also use bicubic interpolation and save that too b_int = cv2.resize(img, (256, 256), interpolation=cv2.INTER_CUBIC) misc.imsave(OUT_DIR + str(i) + '_real.png', img) misc.imsave(OUT_DIR + str(i) + '_interp.png', b_int)
pkl_file = open(DATA_DIR + 'data.pkl') data = pickle.load(pkl_file) images_ = data.keys() t = data.values() images_ = np.asarray(images_) encodings, labels = zip(*t) encodings = np.asarray(encodings) labels = np.asarray(labels) original_image = images_[0] label = labels[0] z_ = encodings[0] original_image = misc.imread(original_image) original_image = data_ops.normalize(original_image) z_ = np.expand_dims(z_, 0) label = np.expand_dims(label, 0) reconstruction = np.squeeze( sess.run(gen_images, feed_dict={ z: z_, y: label })) misc.imsave(IMAGES_DIR + str('000') + '_o.png', original_image) misc.imsave(IMAGES_DIR + str('000') + '_r.png', reconstruction) print label
print 'test num:', test_len info = {} ''' for x in batch(train_images, BATCH_SIZE): if len(x) < 64: break batch_images = [] for im in x: img = misc.imread(im).astype('float32') batch_images.append(img) batch_images = np.asarray(batch_images) encoding = sess.run([encoded], feed_dict={images:batch_images})[0] for ip,e in zip(x,encoding): info[ip] = [e] print info exit() ''' # want to write out a file with the image path and z vector for image_path, label in tqdm(zip(test_images, test_annots)): img = data_ops.normalize(misc.imread(image_path)) batch_images = np.expand_dims(img, 0) encoding = sess.run([encoded], feed_dict={images: batch_images})[0] info[image_path] = [encoding, label] # write out dictionary to pickle file p = open(OUTPUT_DIR + 'data.pkl', 'wb') data = pickle.dumps(info) p.write(data) p.close()