model_filename = None if len(sys.argv) >= 2: start_iter = int(sys.argv[1]) model_filename = os.path.join(MODEL_DIR, 'model%d.ckpt' % start_iter) # Generator code K.set_learning_phase(1) # to accommodate Keras layers G_Z = tf.placeholder(tf.float32, shape=[None, IMAGE_SZ, IMAGE_SZ, 4], name='G_Z') DG_X = tf.placeholder(tf.float32, shape=[None, IMAGE_SZ, IMAGE_SZ, 3], name='DG_X') # Create mask mask = util.load_mask('mask.png') # Load Places365 data data = np.load('places/places_128.npz') imgs = data[ 'imgs_train'] # Originally from http://data.csail.mit.edu/places/places365/val_256.tar imgs_p = util.preprocess_images_inpainting(imgs, mask=mask) test_imgs = data['imgs_test'] test_imgs_p = util.preprocess_images_inpainting(test_imgs, mask=mask) test_img = test_imgs[:N_TEST] test_img_p = test_imgs_p[:N_TEST] train_img = imgs[4, np.newaxis] train_img_p = imgs_p[4, np.newaxis]
import matplotlib.pyplot as plt from util import load_mask from util import generate_masked_frame import argparse # python3.6 generate_image.py -device "iPhone 11 Pro.png" -screen "frame.png" parser = argparse.ArgumentParser() parser.add_argument('-device', help='device background') parser.add_argument('-screen', help='screenshot') args = parser.parse_args() # load device background image and convert to hsv background = cv2.cvtColor(cv2.imread(args.device), cv2.COLOR_BGR2RGB) background_hsv = cv2.cvtColor(background, cv2.COLOR_RGB2HSV) # generate mask mask, mask_rect = load_mask(background_hsv) x, y, w, h = mask_rect # apply mask to screenshot frame = cv2.imread(args.screen) masked_frame, final_image = generate_masked_frame(background, mask, mask_rect, frame) f, ax = plt.subplots(1, 4, figsize=(20, 10)) ax[0].imshow(background) ax[1].imshow(mask) ax[2].imshow(masked_frame) ax[3].imshow(final_image) f.savefig("fig.png") cv2.imwrite("out.png", cv2.cvtColor(final_image, cv2.COLOR_RGB2BGR))