def test_image_to_sketch(self): img = general_util.imread('12746957.jpg') sketch = image_to_sketch(img) cv2.imshow('Input', cv2.cvtColor(img, cv2.COLOR_RGB2BGR).astype(np.uint8)) cv2.imshow('Sketch', sketch.astype(np.uint8)) cv2.waitKey(0)
def test_generate_hint_from_image(self): img = general_util.imread('face_36_566_115.png') sketch = generate_hint_from_image(img) cv2.imshow('Input', cv2.cvtColor(img, cv2.COLOR_RGB2BGR).astype(np.uint8)) cv2.imshow('Hint', cv2.cvtColor(sketch.astype(np.uint8), cv2.COLOR_RGBA2BGR)) cv2.waitKey(0)
def test_get_tf_string_extension(self): with self.test_session(config = tf.ConfigProto(device_count = {'GPU': 0})) as sess: # img = general_util.imread('/home/jerryli27/PycharmProjects/my_pix2pix/test_collected_sketches_cropped/1383646_p0.jpg.png.png',dtype=np.uint8) # '12746957.jpg') img = general_util.imread('2821993.jpg',dtype=np.uint8) # '12746957.jpg') sketch = tf.image.convert_image_dtype(sketch_extractor(tf.image.convert_image_dtype(img, tf.float32) ,"rgb", max_val=1, min_val=0), tf.uint8) actual_output, = sess.run([sketch]) cv2.imshow('Input', cv2.cvtColor(img, cv2.COLOR_RGB2BGR).astype(np.uint8)) cv2.imshow('Sketch', actual_output.astype(np.uint8)) cv2.waitKey(0)
def test_detect_complicated_img(self): all_img_paths = general_util.get_all_image_paths_in_dir( '/home/xor/pixiv_images/train_images/') for img_path in all_img_paths[10:]: print(img_path) print(detect_complicated_img(img_path)) img = general_util.imread(img_path) sketch = image_to_sketch(img) cv2.imshow('Input', cv2.cvtColor(img, cv2.COLOR_RGB2BGR).astype(np.uint8)) cv2.imshow('Sketch', sketch.astype(np.uint8)) cv2.waitKey(0)
def test_generate_training_image_pair(self): all_img_paths = general_util.get_all_image_paths_in_dir( '/home/xor/pixiv_images/test_images/') for img_path in all_img_paths[10:]: print(img_path) img = general_util.imread(img_path) sketch = image_to_sketch(img) sketch, img = generate_training_image_pair(sketch, img) cv2.imshow('Input', cv2.cvtColor(img, cv2.COLOR_YUV2BGR).astype(np.uint8)) cv2.imshow('Sketch', sketch[..., 0].astype(np.uint8)) cv2.imshow( 'Hint', cv2.cvtColor(sketch[..., 1:], cv2.COLOR_YUV2BGR).astype(np.uint8)) cv2.waitKey(0)
img_shape = (128, 256) # fig = plt.figure() # ax = plt.axes(xlim=(0, img_shape[1]), ylim=(0, img_shape[0])) # myimg = ax.imshow(np.zeros(img_shape, np.uint8)) # plt.show() myimg = plt.imshow(np.zeros(img_shape, np.uint8)) fig = plt.gcf() image_i = max(a.last_stopped, 0) last_image_i = -1 image_i_after_cleaning = image_i while image_i < num_images: src_path = all_image_paths[image_i] # src_img = mpimg.imread(src_path) src_img = imread(src_path, shape=img_shape, dtype=np.uint8) sketch_part = get_sketch_img(src_img) # Only negate when the human did not perform action 1 or 2. if last_image_i != image_i and detect_need_negate(sketch_part): src_img = combined_image_sketch_negate(src_img) imsave(src_path, src_img) src_img = imread(src_path, shape=img_shape, dtype=np.uint8) sketch_part = get_sketch_img(src_img) assert src_img.shape[0] == img_shape[0] and src_img.shape[ 1] == img_shape[1] # if detect_wierd_distribution(sketch_img=sketch_part): # last_image_i = image_i # image_i += 1 myimg.set_data(src_img) plt.pause(0.1)