def combine_original_and_transferred(images, transferred_2x_image_file_format, combined_image_pattern): ret = [] for i in range(len(images)): save_image_path = combined_image_pattern % i ret.append(save_image_path) if os.path.exists(save_image_path): continue start_time = time.time() transferred_2x_image_file_path = transferred_2x_image_file_format % i while not os.path.exists(transferred_2x_image_file_path ) and time.time() - start_time < 5: time.sleep(1) transferred_image = None while time.time() - start_time < 5: try: transferred_image = util_io.imread( transferred_2x_image_file_path, ) except IOError: time.sleep(1) if transferred_image is None: raise IOError('Cannot read image file %s' % (transferred_2x_image_file_path)) face_image = scipy.misc.imresize( images[i], (transferred_image.shape[0], transferred_image.shape[1])) combined_image = np.concatenate((face_image, transferred_image), axis=1) util_io.imsave(save_image_path, combined_image) return ret
def crop_out(sess, image_tensor, detected_tensors, min_score_thresh=.5, visualize_inference=False, category_index=None, feed_dict=None, img_np=None, filename=None): (detected_boxes_tensor, detected_scores_tensor, detected_labels_tensor) = detected_tensors (image, detected_boxes, detected_scores, detected_classes) = sess.run( [ # Modified from tf.get_default_session() to sess image_tensor, detected_boxes_tensor, detected_scores_tensor, detected_labels_tensor ], feed_dict=feed_dict) detected_boxes = detected_boxes.T indices = detected_scores > min_score_thresh # print(type(detected_boxes[0][indices])) ymins = detected_boxes[0][indices] xmins = detected_boxes[1][indices] ymaxs = detected_boxes[2][indices] xmaxs = detected_boxes[3][indices] class_label = detected_classes[indices].tolist() image = image[0] v_center = (ymaxs + ymins) / 2 h_center = (xmaxs + xmins) / 2 half_height = (ymaxs - ymins) / 2 half_width = (xmaxs - xmins) / 2 v_scale = 1.6 h_scale = 2 ymins = v_center - v_scale * half_height ymaxs = v_center + v_scale * half_height xmins = h_center - h_scale * half_width xmaxs = h_center + h_scale * half_width # print(image.shape) # print(img_np.shape) # print(class_label) for i, c in enumerate(class_label): eye_count = 0 if c == 2: # print(image.shape) # image = np.clip(image, 0, 255).astype(np.uint8) eye_img = image[int(ymins[i] * image.shape[0]):int(ymaxs[i] * image.shape[0]), int(xmins[i] * image.shape[1]):int(xmaxs[i] * image.shape[1])] util_io.imsave(filename[:-4] + '_' + str(eye_count) + '.png', eye_img) # eye_img = np.array(Image.fromarray(np.uint8(eye_img)).convert('RGB')) # cv2.imwrite(filename[:-4]+'_'+str(eye_count)+'.png',eye_img)
def test_imread_and_imsave_utf8(self): height = 256 width = 256 content_folder = tempfile.mkdtemp() image_path = content_folder + u'/骨董屋・三千世界の女主人_12746957.png' current_image = np.ones((height, width, 3)) * 255.0 current_image[0, 0, 0] = 0 util_io.imsave(image_path, current_image) actual_output = util_io.imread(util_io.get_all_image_paths(content_folder + '/')[0]) expected_answer = np.round(np.array(current_image)) np.testing.assert_almost_equal(expected_answer, actual_output)
def test_imread_bw(self): height = 256 width = 256 content_folder = tempfile.mkdtemp() image_path = content_folder + u'/骨董屋・三千世界の女主人_12746957.png' current_image = np.ones((height, width, 3)) * 255.0 current_image[0, 0, 0] = 0 util_io.imsave(image_path, current_image) actual_output = util_io.imread( util_io.get_files_in_dir(content_folder + '/')[0], bw=True) expected_answer = np.floor(_rgb2gray(np.array(current_image))) np.testing.assert_almost_equal(expected_answer, actual_output)
def _callback(result_future): """Callback function. Calculates the statistics for the prediction result. Args: result_future: Result future of the RPC. """ exception = result_future.exception() if exception: print(exception) else: sys.stdout.write('.') sys.stdout.flush() # TODO: do post processing using another function. response_images = np.reshape( np.array( result_future.result().outputs['outputs'].float_val), [ dim.size for dim in result_future.result(). outputs['outputs'].tensor_shape.dim ]) * 255.0 if supervised: start = kwargs['start'] end = kwargs['end'] subregion_shape = kwargs['subregion_shape'] # Use of flag here may cause some bug... if subregion_shape[0] != FLAGS.image_hw or subregion_shape[ 1] != FLAGS.image_hw: subregion = scipy.misc.imresize( response_images[0][..., 0], (subregion_shape[0], subregion_shape[1])) subregion = np.expand_dims(subregion, -1) else: subregion = response_images[0] output = sketch_image.copy() output[start[0]:end[0], start[1]:end[1]] = subregion else: output = response_images[0] # It seems during imsave and before save finishes, it is possible to read the half-written file. # To prevent those bugs, I write it to a temporary file and move it after I am done. temporary_file = os.path.splitext( output_path)[0] + '.tmp' + os.path.splitext(output_path)[1] util_io.imsave(temporary_file, output) shutil.move(temporary_file, output_path)
def combine_original_and_transferred(images, transferred_2x_image_file_format, combined_image_pattern): ret = [] for i in range(len(images)): save_image_path = combined_image_pattern % i ret.append(save_image_path) if os.path.exists(save_image_path): continue # raise NotImplementedError('deal with error: IOError: cannot identify image file u\'./static/images/transferred_faces/') transferred_image = util_io.imread( transferred_2x_image_file_format % i, ) face_image = scipy.misc.imresize( images[i], (transferred_image.shape[0], transferred_image.shape[1])) combined_image = np.concatenate((face_image, transferred_image), axis=1) util_io.imsave(save_image_path, combined_image) return ret
def main(_): inferer = ImageInferer() if FLAGS.input_image_path: outputs, image_paths = inferer.infer( FLAGS.input_image_path, return_image_paths=True) else: print('Generating images conditioned on random vector.') assert FLAGS.num_output >= 0, 'you have to specify the `num_output` flag for non-translational generators.' outputs, image_paths = inferer.infer( FLAGS.input_image_path, return_image_paths=True, num_output=FLAGS.num_output) if isinstance(outputs, list): util_io.touch_folder(FLAGS.output_image_path) for i, output in enumerate(outputs): util_io.imsave(os.path.join(FLAGS.output_image_path, os.path.basename(image_paths[i])), output) else: util_io.touch_folder(os.path.dirname(FLAGS.output_image_path)) util_io.imsave(FLAGS.output_image_path, outputs)
def _callback(result_future): """Callback function. Calculates the statistics for the prediction result. Args: result_future: Result future of the RPC. """ exception = result_future.exception() if exception: print(exception) else: sys.stdout.write('.') sys.stdout.flush() # TODO: do post-processing using another function. response_images = np.reshape(np.array( result_future.result().outputs['outputs'].float_val), [dim.size for dim in result_future.result().outputs['outputs'].tensor_shape.dim]) * 255.0 util_io.imsave(output_path, response_images[0])
def face2face(input_image_path='/root/CSC4001/data/test_face'): output_image_path = '/root/CSC4001/results/face/' + input_image_path.split( '/')[-1].split('.')[0] + '.jpg' inferer = ImageInferer() if input_image_path: outputs, image_paths = inferer.infer(input_image_path, return_image_paths=True) else: print('Generating images conditioned on random vector.') assert num_output >= 0, 'you have to specify the `num_output` flag for non-translational generators.' outputs, image_paths = inferer.infer(input_image_path, return_image_paths=True, num_output=num_output) if isinstance(outputs, list): util_io.touch_folder(output_image_path) for i, output in enumerate(outputs): util_io.imsave( os.path.join(output_image_path, os.path.basename(image_paths[i])), output) else: util_io.touch_folder(os.path.dirname(output_image_path)) util_io.imsave(output_image_path, outputs) return output_image_path
def crop_face_and_save(self, image_path, save_image_pattern): cropped = self.crop_face(image_path) for i, cropped_img in enumerate(cropped): util_io.imsave(save_image_pattern % (i), cropped_img) return cropped
def do_inference(self, output_dir, image_path=None, image_np=None): util_io.imsave(output_dir, self.mock_output_image) return output_dir
def main(_): tf.logging.set_verbosity(tf.logging.INFO) inference_class = mask_inference if FLAGS.detect_masks else detection_inference if not os.path.exists(FLAGS.output_path): tf.gfile.MakeDirs(FLAGS.output_path) required_flags = ['input_images', 'output_path', 'inference_graph'] for flag_name in required_flags: if not getattr(FLAGS, flag_name): raise ValueError('Flag --{} is required'.format(flag_name)) sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True, )) input_image_paths = [] for v in FLAGS.input_images.split(','): if v: input_image_paths += tf.gfile.Glob(v) tf.logging.info('Reading input from %d files', len(input_image_paths)) image_ph, image_tensor = build_input() tf.logging.info('Reading graph and building model...') detected_tensors = inference_class.build_inference_graph( image_tensor, FLAGS.inference_graph, override_num_detections=FLAGS.override_num_detections) tf.logging.info('Running inference and writing output to {}'.format( FLAGS.output_path)) sess.run(tf.local_variables_initializer()) for i, image_path in enumerate(input_image_paths): image_np = util_io.imread(image_path) result = inference_class.infer_detections( sess, image_tensor, detected_tensors, min_score_thresh=FLAGS.min_score_thresh, visualize_inference=FLAGS.visualize_inference, feed_dict={image_ph: image_np}) if FLAGS.output_cropped_image: if FLAGS.only_output_cropped_single_object and len(result["detection_score"]) == 1: num_outputs = 1 else: num_outputs = len(result["detection_score"]) for crop_i in range(0, num_outputs): if (result["detection_score"])[crop_i] > FLAGS.min_score_thresh: base, ext = os.path.splitext(os.path.basename(image_path)) output_crop = os.path.join(FLAGS.output_path, base + '_crop_%d.png' %crop_i) idims = image_np.shape # np array with shape (height, width, num_color(1, 3, or 4)) min_x = int(min(round(result["detection_bbox_xmin"][crop_i] * idims[1]), idims[1])) max_x = int(min(round(result["detection_bbox_xmax"][crop_i] * idims[1]), idims[1])) min_y = int(min(round(result["detection_bbox_ymin"][crop_i] * idims[0]), idims[0])) max_y = int(min(round(result["detection_bbox_ymax"][crop_i] * idims[0]), idims[0])) image_cropped = image_np[min_y:max_y, min_x:max_x, :] util_io.imsave(output_crop, image_cropped) if FLAGS.visualize_inference: output_image = os.path.join(FLAGS.output_path, os.path.basename(image_path)) util_io.imsave(output_image, result['annotated_image']) del result['annotated_image'] # No need to write the image to json. if FLAGS.detect_masks: base, ext = os.path.splitext(os.path.basename(image_path)) for mask_i in range(len(result['detected_masks'])): # Stores as png to preserve accurate mask values. output_mask = os.path.join(FLAGS.output_path, base + '_mask_%d' % mask_i + '.png') util_io.imsave(output_mask, np.array(result['detected_masks'][mask_i]) * 255) del result['detected_masks'] # Storing mask in json is pretty space consuming. output_file = os.path.join(FLAGS.output_path, os.path.splitext(os.path.basename(image_path))[0] + '.json') with open(output_file, 'w') as f: json.dump(result, f) tf.logging.log_every_n(tf.logging.INFO, 'Processed %d/%d images...', 10, i, len(input_image_paths)) print('Finished processing all images in data set.')