def main(_):
    # Enable Verbose Logging
    tf.logging.set_verbosity(tf.logging.INFO)

    # Check if all required flags are present
    required_flags = ['image', 'output_path', 'inference_graph']
    for flag_name in required_flags:
        if not getattr(FLAGS, flag_name):
            raise ValueError('Flag --{} is required'.format(flag_name))

    # Load category map
    '''
    A category index, which is a dictionary that maps integer ids to dicts
    containing categories, e.g.
    {1: {'id': 1, 'name': 'dog'}, 2: {'id': 2, 'name': 'cat'}, ...}
    '''

    category_index_from_labelmap = label_map_util.create_category_index_from_labelmap(
        FLAGS.path_protofile)

    with tf.Session() as sess:
        input_path = FLAGS.image
        tf.logging.info('Reading input from ', input_path)

        # Obtain image tensor
        image_tensor = load_image(input_path)

        # Run graph
        tf.logging.info('Reading graph and building model...')
        (detected_boxes_tensor, detected_scores_tensor,\
        detected_labels_tensor) = detection_inference.build_inference_graph(image_tensor, FLAGS.inference_graph)

        # Get detections
        (detected_boxes, detected_scores,\
        detected_labels)=sess.run([detected_boxes_tensor, detected_scores_tensor,\
        detected_labels_tensor])

        # Detected boxes of form: [ymins,xmins,ymax,xmax]

        input_image = sess.run(image_tensor)
        print(input_image)
        input_image = np.squeeze(input_image)

        # Draw bounding boxes
        print(detected_boxes, detected_scores)
        ii = np.where(detected_scores > FLAGS.confidence)
        for i in range(len(detected_scores[ii])):
            ymin = detected_boxes[i][0]
            xmin = detected_boxes[i][1]
            ymax = detected_boxes[i][2]
            xmax = detected_boxes[i][3]

            category = category_index_from_labelmap[detected_labels[i]]['name']

            vis_utils.draw_bounding_box_on_image_array(input_image,xmin=xmin,ymin=ymin,\
            xmax=xmax, ymax=ymax,display_str_list=(category) ,color='MediumPurple')

        vis_utils.save_image_array_as_png(input_image, FLAGS.output_path)
예제 #2
0
def main():
    # load the images and file names
    (file_names, numpy_images) = load_images()

    # create a graph and category indices that will detect cars and pedestrians from a frozen inference
    kitti_detection_graph = load_model(PATH_TO_FROZEN_GRAPH_1)
    kitti_category_indices = load_colloquial_labels(PATH_TO_LABELS_1)

    # create a graph and category indices that will detect signs from a frozen inference
    signs_detection_graph = load_model(PATH_TO_FROZEN_GRAPH_2)
    signs_category_indices = load_colloquial_labels(PATH_TO_LABELS_2)

    # create sessions for each graph
    session1 = tf.Session(graph=kitti_detection_graph)
    session2 = tf.Session(graph=signs_detection_graph)

    # create threads that will perform object detections for each graph in parallel
    thread1 = Thread(target=worker_runnable, args=(session1, numpy_images, kitti_list))
    thread2 = Thread(target=worker_runnable, args=(session2, numpy_images, signs_list))

    # start the threads
    thread1.start()
    thread2.start()

    # serialize the recombination and writing of the output images
    thread1.join()
    thread2.join()

    # for each result, apply the detections of each graph and save the output image
    for i in range(0, len(kitti_list)):
        mod_image = vis_util.visualize_boxes_and_labels_on_image_array(
            numpy_images[i],
            np.squeeze(kitti_list.__getitem__(i)[0]),
            np.squeeze(kitti_list.__getitem__(i)[2]).astype(np.int32),
            np.squeeze(kitti_list.__getitem__(i)[1]),
            kitti_category_indices,
            use_normalized_coordinates=True,
            line_thickness=8)

        mod_image = vis_util.visualize_boxes_and_labels_on_image_array(
            mod_image,
            np.squeeze(signs_list.__getitem__(i)[0]),
            np.squeeze(signs_list.__getitem__(i)[2]).astype(np.int32),
            np.squeeze(signs_list.__getitem__(i)[1]),
            signs_category_indices,
            use_normalized_coordinates=True,
            line_thickness=8)

        # save the output image
        vis_util.save_image_array_as_png(mod_image,
                                         os.path.join("output/", os.path.basename(file_names.__getitem__(i))))
예제 #3
0
def main(_):
  host, port = FLAGS.server.split(':')
  channel = implementations.insecure_channel(host, int(port))
  stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)

  # Send request
  response = requests.get(FLAGS.image, stream=True)

  if response.status_code == 200:
    request = predict_pb2.PredictRequest()
    request.model_spec.name = FLAGS.model_name
    request.model_spec.signature_name = FLAGS.signature_name
    request.inputs['inputs'].CopyFrom(
        tf.contrib.util.make_tensor_proto(response.content, shape=[1]))
    result = stub.Predict(request, 10.0)  # 10 secs timeout

    image = Image.open(BytesIO(response.content))
    image_np = load_image_into_numpy_array(image)
    boxes = np.array(result.outputs['detection_boxes'].float_val).reshape(
        result.outputs['detection_boxes'].tensor_shape.dim[0].size,
        result.outputs['detection_boxes'].tensor_shape.dim[1].size,
        result.outputs['detection_boxes'].tensor_shape.dim[2].size
    )
    classes = np.array(result.outputs['detection_classes'].float_val)
    scores = np.array(result.outputs['detection_scores'].float_val)

    label_map = label_map_util.load_labelmap(FLAGS.label_map_path)
    categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=FLAGS.num_classes, use_display_name=True)
    category_index = label_map_util.create_category_index(categories)

    vis_util.visualize_boxes_and_labels_on_image_array(
        image_np,
        np.squeeze(boxes),
        np.squeeze(classes).astype(np.int32),
        np.squeeze(scores),
        category_index,
        use_normalized_coordinates=True,
        line_thickness=8)
    vis_util.save_image_array_as_png(image_np, FLAGS.save_path+"/output-"+FLAGS.image.split('/')[-1])
예제 #4
0
def run_output(image_path, output_path):
    image = Image.open(image_path)
    # the array based representation of the image will be used later in order to prepare the
    # result image with boxes and labels on it.
    image_np = load_image_into_numpy_array(image)
    # Expand dimensions since the model expects images to have shape: [1, None, None, 3]
    image_np_expanded = np.expand_dims(image_np, axis=0)
    # Actual detection.
    detection_graph = graph_loader()
    output_dict = run_inference_for_single_image(image_np_expanded,
                                                 detection_graph)
    # Visualization of the results of a detection.
    vis_util.visualize_boxes_and_labels_on_image_array(
        image_np,
        output_dict['detection_boxes'],
        output_dict['detection_classes'],
        output_dict['detection_scores'],
        category_index,
        instance_masks=output_dict.get('detection_masks'),
        use_normalized_coordinates=True,
        line_thickness=5)
    plt.figure(figsize=IMAGE_SIZE)
    vis_util.save_image_array_as_png(image_np, output_path)
예제 #5
0
def main():
    detection_graph = load_model()
    category_index = load_colloquial_labels()

    # For every jpg in the img/ directory
    for file_name in glob.glob('img/*.jpg'):
        # Load the image
        image = Image.open(file_name)
        image_np = load_image_into_numpy_array(image)
        # Actual detection results
        output_dict = execute_inference_for_image(image_np, detection_graph)
        # Visualize the results of a detection.
        modified_image = vis_util.visualize_boxes_and_labels_on_image_array(
            image_np,
            output_dict['detection_boxes'],
            output_dict['detection_classes'],
            output_dict['detection_scores'],
            category_index,
            instance_masks=output_dict.get('detection_masks'),
            use_normalized_coordinates=True,
            line_thickness=8)
        vis_util.save_image_array_as_png(
            modified_image, os.path.join("output/",
                                         os.path.basename(file_name)))
예제 #6
0
def visualize_detection_results(result_dict,
                                tag,
                                global_step,
                                categories,
                                summary_dir='',
                                export_dir='',
                                agnostic_mode=False,
                                show_groundtruth=False,
                                groundtruth_box_visualization_color='black',
                                min_score_thresh=.5,
                                max_num_predictions=20,
                                skip_scores=False,
                                skip_labels=False,
                                keep_image_id_for_visualization_export=False):
  """Visualizes detection results and writes visualizations to image summaries.

  This function visualizes an image with its detected bounding boxes and writes
  to image summaries which can be viewed on tensorboard.  It optionally also
  writes images to a directory. In the case of missing entry in the label map,
  unknown class name in the visualization is shown as "N/A".

  Args:
    result_dict: a dictionary holding groundtruth and detection
      data corresponding to each image being evaluated.  The following keys
      are required:
        'original_image': a numpy array representing the image with shape
          [1, height, width, 3] or [1, height, width, 1]
        'detection_boxes': a numpy array of shape [N, 4]
        'detection_scores': a numpy array of shape [N]
        'detection_classes': a numpy array of shape [N]
      The following keys are optional:
        'groundtruth_boxes': a numpy array of shape [N, 4]
        'groundtruth_keypoints': a numpy array of shape [N, num_keypoints, 2]
      Detections are assumed to be provided in decreasing order of score and for
      display, and we assume that scores are probabilities between 0 and 1.
    tag: tensorboard tag (string) to associate with image.
    global_step: global step at which the visualization are generated.
    categories: a list of dictionaries representing all possible categories.
      Each dict in this list has the following keys:
          'id': (required) an integer id uniquely identifying this category
          'name': (required) string representing category name
            e.g., 'cat', 'dog', 'pizza'
          'supercategory': (optional) string representing the supercategory
            e.g., 'animal', 'vehicle', 'food', etc
    summary_dir: the output directory to which the image summaries are written.
    export_dir: the output directory to which images are written.  If this is
      empty (default), then images are not exported.
    agnostic_mode: boolean (default: False) controlling whether to evaluate in
      class-agnostic mode or not.
    show_groundtruth: boolean (default: False) controlling whether to show
      groundtruth boxes in addition to detected boxes
    groundtruth_box_visualization_color: box color for visualizing groundtruth
      boxes
    min_score_thresh: minimum score threshold for a box to be visualized
    max_num_predictions: maximum number of detections to visualize
    skip_scores: whether to skip score when drawing a single detection
    skip_labels: whether to skip label when drawing a single detection
    keep_image_id_for_visualization_export: whether to keep image identifier in
      filename when exported to export_dir
  Raises:
    ValueError: if result_dict does not contain the expected keys (i.e.,
      'original_image', 'detection_boxes', 'detection_scores',
      'detection_classes')
  """
  detection_fields = fields.DetectionResultFields
  input_fields = fields.InputDataFields
  if not set([
      input_fields.original_image,
      detection_fields.detection_boxes,
      detection_fields.detection_scores,
      detection_fields.detection_classes,
  ]).issubset(set(result_dict.keys())):
    raise ValueError('result_dict does not contain all expected keys.')
  if show_groundtruth and input_fields.groundtruth_boxes not in result_dict:
    raise ValueError('If show_groundtruth is enabled, result_dict must contain '
                     'groundtruth_boxes.')
  logging.info('Creating detection visualizations.')
  category_index = label_map_util.create_category_index(categories)

  image = np.squeeze(result_dict[input_fields.original_image], axis=0)
  if image.shape[2] == 1:  # If one channel image, repeat in RGB.
    image = np.tile(image, [1, 1, 3])
  detection_boxes = result_dict[detection_fields.detection_boxes]
  detection_scores = result_dict[detection_fields.detection_scores]
  detection_classes = np.int32((result_dict[
      detection_fields.detection_classes]))
  detection_keypoints = result_dict.get(detection_fields.detection_keypoints)
  detection_masks = result_dict.get(detection_fields.detection_masks)
  detection_boundaries = result_dict.get(detection_fields.detection_boundaries)

  # Plot groundtruth underneath detections
  if show_groundtruth:
    groundtruth_boxes = result_dict[input_fields.groundtruth_boxes]
    groundtruth_keypoints = result_dict.get(input_fields.groundtruth_keypoints)
    vis_utils.visualize_boxes_and_labels_on_image_array(
        image=image,
        boxes=groundtruth_boxes,
        classes=None,
        scores=None,
        category_index=category_index,
        keypoints=groundtruth_keypoints,
        use_normalized_coordinates=False,
        max_boxes_to_draw=None,
        groundtruth_box_visualization_color=groundtruth_box_visualization_color)
  vis_utils.visualize_boxes_and_labels_on_image_array(
      image,
      detection_boxes,
      detection_classes,
      detection_scores,
      category_index,
      instance_masks=detection_masks,
      instance_boundaries=detection_boundaries,
      keypoints=detection_keypoints,
      use_normalized_coordinates=False,
      max_boxes_to_draw=max_num_predictions,
      min_score_thresh=min_score_thresh,
      agnostic_mode=agnostic_mode,
      skip_scores=skip_scores,
      skip_labels=skip_labels)

  if export_dir:

    '''
    if keep_image_id_for_visualization_export and result_dict[fields.
                                                              InputDataFields()
                                                              .key]:
      export_path = os.path.join(export_dir, 'export-{}-{}.png'.format(
          tag, result_dict[fields.InputDataFields().key]))
    '''

    '''

    Modified key to filename
    '''

    if keep_image_id_for_visualization_export and result_dict[fields.
                                                              InputDataFields()
                                                              .key]:

      # Debugging steps
      #print(result_dict[fields.InputDataFields().key])
      #print(type(export_dir))


      export_path=os.path.join(export_dir,result_dict[fields.InputDataFields().key].decode("ASCII").split("/")[-1]+".png")

      '''
      export_path = os.path.join(export_dir, #result_dict[fields.InputDataFields().key].split("/")[-1])
      'export-{}-{}.png'.format(
          tag, result_dict[fields.InputDataFields().key]))
      '''
    else:
      export_path = os.path.join(export_dir, 'export-{}.png'.format(tag))

    print(export_path)
    vis_utils.save_image_array_as_png(image, export_path)

  summary = tf.Summary(value=[
      tf.Summary.Value(
          tag=tag,
          image=tf.Summary.Image(
              encoded_image_string=vis_utils.encode_image_array_as_png_str(
                  image)))
  ])
  summary_writer = tf.summary.FileWriterCache.get(summary_dir)
  summary_writer.add_summary(summary, global_step)

  logging.info('Detection visualizations written to summary with tag %s.', tag)
예제 #7
0
def visualize_detection_results(result_dict,
                                tag,
                                global_step,
                                categories,
                                summary_dir='',
                                export_dir='',
                                agnostic_mode=False,
                                show_groundtruth=False,
                                min_score_thresh=.5,
                                max_num_predictions=20):
    """Visualizes detection results and writes visualizations to image summaries.

  This function visualizes an image with its detected bounding boxes and writes
  to image summaries which can be viewed on tensorboard.  It optionally also
  writes images to a directory. In the case of missing entry in the label map,
  unknown class name in the visualization is shown as "N/A".

  Args:
    result_dict: a dictionary holding groundtruth and detection
      data corresponding to each image being evaluated.  The following keys
      are required:
        'original_image': a numpy array representing the image with shape
          [1, height, width, 3]
        'detection_boxes': a numpy array of shape [N, 4]
        'detection_scores': a numpy array of shape [N]
        'detection_classes': a numpy array of shape [N]
      The following keys are optional:
        'groundtruth_boxes': a numpy array of shape [N, 4]
        'groundtruth_keypoints': a numpy array of shape [N, num_keypoints, 2]
      Detections are assumed to be provided in decreasing order of score and for
      display, and we assume that scores are probabilities between 0 and 1.
    tag: tensorboard tag (string) to associate with image.
    global_step: global step at which the visualization are generated.
    categories: a list of dictionaries representing all possible categories.
      Each dict in this list has the following keys:
          'id': (required) an integer id uniquely identifying this category
          'name': (required) string representing category name
            e.g., 'cat', 'dog', 'pizza'
          'supercategory': (optional) string representing the supercategory
            e.g., 'animal', 'vehicle', 'food', etc
    summary_dir: the output directory to which the image summaries are written.
    export_dir: the output directory to which images are written.  If this is
      empty (default), then images are not exported.
    agnostic_mode: boolean (default: False) controlling whether to evaluate in
      class-agnostic mode or not.
    show_groundtruth: boolean (default: False) controlling whether to show
      groundtruth boxes in addition to detected boxes
    min_score_thresh: minimum score threshold for a box to be visualized
    max_num_predictions: maximum number of detections to visualize
  Raises:
    ValueError: if result_dict does not contain the expected keys (i.e.,
      'original_image', 'detection_boxes', 'detection_scores',
      'detection_classes')
  """
    if not set([
            'original_image', 'detection_boxes', 'detection_scores',
            'detection_classes'
    ]).issubset(set(result_dict.keys())):
        raise ValueError('result_dict does not contain all expected keys.')
    if show_groundtruth and 'groundtruth_boxes' not in result_dict:
        raise ValueError(
            'If show_groundtruth is enabled, result_dict must contain '
            'groundtruth_boxes.')
    logging.info('Creating detection visualizations.')
    category_index = label_map_util.create_category_index(categories)

    image = np.squeeze(result_dict['original_image'], axis=0)
    detection_boxes = result_dict['detection_boxes']
    detection_scores = result_dict['detection_scores']
    detection_classes = np.int32((result_dict['detection_classes']))
    detection_keypoints = result_dict.get('detection_keypoints', None)
    detection_masks = result_dict.get('detection_masks', None)

    # Plot groundtruth underneath detections
    if show_groundtruth:
        groundtruth_boxes = result_dict['groundtruth_boxes']
        groundtruth_keypoints = result_dict.get('groundtruth_keypoints', None)
        vis_utils.visualize_boxes_and_labels_on_image_array(
            image,
            groundtruth_boxes,
            None,
            None,
            category_index,
            keypoints=groundtruth_keypoints,
            use_normalized_coordinates=False,
            max_boxes_to_draw=None)
    vis_utils.visualize_boxes_and_labels_on_image_array(
        image,
        detection_boxes,
        detection_classes,
        detection_scores,
        category_index,
        instance_masks=detection_masks,
        keypoints=detection_keypoints,
        use_normalized_coordinates=False,
        max_boxes_to_draw=max_num_predictions,
        min_score_thresh=min_score_thresh,
        agnostic_mode=agnostic_mode)

    if export_dir:
        export_path = os.path.join(export_dir, 'export-{}.png'.format(tag))
        vis_utils.save_image_array_as_png(image, export_path)

    summary = tf.Summary(value=[
        tf.Summary.Value(
            tag=tag,
            image=tf.Summary.Image(
                encoded_image_string=vis_utils.encode_image_array_as_png_str(
                    image)))
    ])
    summary_writer = tf.summary.FileWriter(summary_dir)
    summary_writer.add_summary(summary, global_step)
    summary_writer.close()

    logging.info('Detection visualizations written to summary with tag %s.',
                 tag)
                    x_stack=np.hstack((x_stack,x_))
                    y_stack=np.hstack((y_stack,y_))
                    detected_boxes_stack=np.vstack((detected_boxes_stack,detected_boxes))
                    detected_scores_stack=np.vstack((detected_scores_stack,detected_scores))

                # Visulalise some high confidence images
                if FLAGS.vis_path and detected_scores.any():
                    if np.max(detected_scores)>FLAGS.vis_threshold:
                        for j in range(FLAGS.batch_size):
                            if np.max(detected_scores[j])>FLAGS.vis_threshold:
                                # Draw boxes
                                boxes_ii=np.where(detected_scores[j]>FLAGS.vis_threshold)
                                boxes=detected_boxes[j][boxes_ii]
                                vis_utils.draw_bounding_boxes_on_image_array(image[j],boxes)
                                # Save
                                vis_utils.save_image_array_as_png(image[j],os.path.join(FLAGS.vis_path,str(x_[j])+"-"+str(y_[j])+".png"))

        except tf.errors.OutOfRangeError:
            # Catch exceptions
            tf.logging.info('Finished processing records')

        finally:
            if len(sat_gen.map):
                sat_gen.write(x_stack,y_stack,(1024,1024),detected_boxes_stack,detected_scores_stack,batch_size=FLAGS.batch_size,stack_size=int(len(x_stack)/FLAGS.batch_size),count=True)
                tf.logging.info('Finished writting residual stacks')
            end=time.time()
            tf.logging.info("Elapsed time {}".format(end-start))

if __name__ == '__main__':
    tf.app.run(main=main)
예제 #9
0
def visualize_detection_results(result_dict,
                                tag,
                                global_step,
                                categories,
                                summary_dir='',
                                export_dir='',
                                agnostic_mode=False,
                                show_groundtruth=False,
                                groundtruth_box_visualization_color='black',
                                min_score_thresh=.5,
                                max_num_predictions=20,
                                skip_scores=False,
                                skip_labels=False,
                                keep_image_id_for_visualization_export=False):
  """Visualizes detection results and writes visualizations to image summaries.

  This function visualizes an image with its detected bounding boxes and writes
  to image summaries which can be viewed on tensorboard.  It optionally also
  writes images to a directory. In the case of missing entry in the label map,
  unknown class name in the visualization is shown as "N/A".

  Args:
    result_dict: a dictionary holding groundtruth and detection
      data corresponding to each image being evaluated.  The following keys
      are required:
        'original_image': a numpy array representing the image with shape
          [1, height, width, 3] or [1, height, width, 1]
        'detection_boxes': a numpy array of shape [N, 4]
        'detection_scores': a numpy array of shape [N]
        'detection_classes': a numpy array of shape [N]
      The following keys are optional:
        'groundtruth_boxes': a numpy array of shape [N, 4]
        'groundtruth_keypoints': a numpy array of shape [N, num_keypoints, 2]
      Detections are assumed to be provided in decreasing order of score and for
      display, and we assume that scores are probabilities between 0 and 1.
    tag: tensorboard tag (string) to associate with image.
    global_step: global step at which the visualization are generated.
    categories: a list of dictionaries representing all possible categories.
      Each dict in this list has the following keys:
          'id': (required) an integer id uniquely identifying this category
          'name': (required) string representing category name
            e.g., 'cat', 'dog', 'pizza'
          'supercategory': (optional) string representing the supercategory
            e.g., 'animal', 'vehicle', 'food', etc
    summary_dir: the output directory to which the image summaries are written.
    export_dir: the output directory to which images are written.  If this is
      empty (default), then images are not exported.
    agnostic_mode: boolean (default: False) controlling whether to evaluate in
      class-agnostic mode or not.
    show_groundtruth: boolean (default: False) controlling whether to show
      groundtruth boxes in addition to detected boxes
    groundtruth_box_visualization_color: box color for visualizing groundtruth
      boxes
    min_score_thresh: minimum score threshold for a box to be visualized
    max_num_predictions: maximum number of detections to visualize
    skip_scores: whether to skip score when drawing a single detection
    skip_labels: whether to skip label when drawing a single detection
    keep_image_id_for_visualization_export: whether to keep image identifier in
      filename when exported to export_dir
  Raises:
    ValueError: if result_dict does not contain the expected keys (i.e.,
      'original_image', 'detection_boxes', 'detection_scores',
      'detection_classes')
  """
  detection_fields = fields.DetectionResultFields
  input_fields = fields.InputDataFields
  if not set([
      input_fields.original_image,
      detection_fields.detection_boxes,
      detection_fields.detection_scores,
      detection_fields.detection_classes,
  ]).issubset(set(result_dict.keys())):
    raise ValueError('result_dict does not contain all expected keys.')
  if show_groundtruth and input_fields.groundtruth_boxes not in result_dict:
    raise ValueError('If show_groundtruth is enabled, result_dict must contain '
                     'groundtruth_boxes.')
  logging.info('Creating detection visualizations.')
  category_index = label_map_util.create_category_index(categories)

  image = np.squeeze(result_dict[input_fields.original_image], axis=0)
  if image.shape[2] == 1:  # If one channel image, repeat in RGB.
    image = np.tile(image, [1, 1, 3])
  detection_boxes = result_dict[detection_fields.detection_boxes]
  detection_scores = result_dict[detection_fields.detection_scores]
  detection_classes = np.int32((result_dict[
      detection_fields.detection_classes]))
  detection_keypoints = result_dict.get(detection_fields.detection_keypoints)
  detection_masks = result_dict.get(detection_fields.detection_masks)
  detection_boundaries = result_dict.get(detection_fields.detection_boundaries)

  # Plot groundtruth underneath detections
  if show_groundtruth:
    groundtruth_boxes = result_dict[input_fields.groundtruth_boxes]
    groundtruth_keypoints = result_dict.get(input_fields.groundtruth_keypoints)
    vis_utils.visualize_boxes_and_labels_on_image_array(
        image=image,
        boxes=groundtruth_boxes,
        classes=None,
        scores=None,
        category_index=category_index,
        keypoints=groundtruth_keypoints,
        use_normalized_coordinates=False,
        max_boxes_to_draw=None,
        groundtruth_box_visualization_color=groundtruth_box_visualization_color)
  vis_utils.visualize_boxes_and_labels_on_image_array(
      image,
      detection_boxes,
      detection_classes,
      detection_scores,
      category_index,
      instance_masks=detection_masks,
      instance_boundaries=detection_boundaries,
      keypoints=detection_keypoints,
      use_normalized_coordinates=False,
      max_boxes_to_draw=max_num_predictions,
      min_score_thresh=min_score_thresh,
      agnostic_mode=agnostic_mode,
      skip_scores=skip_scores,
      skip_labels=skip_labels)

  if export_dir:
    if keep_image_id_for_visualization_export and result_dict[fields.
                                                              InputDataFields()
                                                              .key]:
      export_path = os.path.join(export_dir, 'export-{}-{}.png'.format(
          tag, result_dict[fields.InputDataFields().key]))
    else:
      export_path = os.path.join(export_dir, 'export-{}.png'.format(tag))
    vis_utils.save_image_array_as_png(image, export_path)

  summary = tf.Summary(value=[
      tf.Summary.Value(
          tag=tag,
          image=tf.Summary.Image(
              encoded_image_string=vis_utils.encode_image_array_as_png_str(
                  image)))
  ])
  summary_writer = tf.summary.FileWriterCache.get(summary_dir)
  summary_writer.add_summary(summary, global_step)

  logging.info('Detection visualizations written to summary with tag %s.', tag)
예제 #10
0
def test(_):
    start=time.time()

    tf.logging.set_verbosity(tf.logging.DEBUG)

    #Check FLAGS
    SatellitleGeneratorTest.check_flags(required_flags = ['input_path','inference_graph'])

    # Intialise and load shard
    if FLAGS.test_size==-1:
        # Use default test size of 10
        sat_gen=SatellitleGeneratorTest(FLAGS.input_path,FLAGS.batch_size)
    else:
        sat_gen=SatellitleGeneratorTest(FLAGS.input_path,FLAGS.batch_size,FLAGS.test_size)
    sat_gen.load_shard(FLAGS.shard_path,FLAGS.shard)

    # Test a sample patch read
    coord=(sat_gen.map[0][0],sat_gen.map[1][0])
    sat_gen.read_patch_test(coord,FLAGS.input_path)

    # Setup dataset object
    sat_gen.setup_data()

    # Turn on log_device_placement for verbosity in ops
    with tf.Session(config=tf.ConfigProto(allow_soft_placement=True,log_device_placement=False)) as sess:
        sess.run(sat_gen.iter.initializer)

        if FLAGS.test_images:
            while True:
                x,y,image_tensors=sat_gen.iter.get_next()

            sys.exit(1)

        # Read and Fill Graph
        tf.logging.info('Reading graph and building model...')
        with tf.gfile.Open(FLAGS.inference_graph, 'rb') as graph_def_file:
            graph_content = graph_def_file.read()

        try:
            for counter in itertools.count():
                tf.logging.info('Reading Image No; \t {} SHARD{}'.format(counter,FLAGS.shard))
                x,y,image_tensors=sat_gen.iter.get_next()

                if not counter:
                    # Build Graph tensors only once
                    (num_detections_tensor,detected_boxes_tensor, detected_scores_tensor,
                     detected_labels_tensor) = sat_gen.build_inference_graph(
                         image_tensors, graph_content)


                tf.logging.info('Running inference')

                (detected_boxes, detected_scores,detected_classes, num_detections) = sess.run([detected_boxes_tensor,\
                 detected_scores_tensor,detected_labels_tensor,num_detections_tensor])

                tf.logging.log_every_n(tf.logging.INFO, 'Processed %d images...', 10,counter)

                # Threshold SCORES
                ii=np.where(detected_scores>FLAGS.threshold)
                detected_scores=detected_scores[ii]
                detected_boxes=detected_boxes[ii]
                detected_classes=detected_classes[ii]

                tf.logging.debug("DETETED SCORES .{}".format((detected_scores)))
                tf.logging.debug("DETECTED BOXES {}".format(len(detected_boxes)))

                x_,y_=sess.run([x,y])
                sat_gen.write(x_,y_,(1024,1024),detected_boxes,detected_scores,count=counter)

                # Visulalise some high confidence images
                if FLAGS.vis_path and detected_scores.any():
                    if np.max(detected_scores)>FLAGS.vis_threshold:
                        image=np.squeeze(sess.run(image_tensors))
                        # Draw boxes
                        vis_utils.draw_bounding_boxes_on_image_array(image, detected_boxes)
                        # Save
                        x_=x_[0]
                        y_=y_[0]
                        vis_utils.save_image_array_as_png(image,os.path.join(FLAGS.vis_path,str(x_)+"-"+str(y_)+".png"))

        except tf.errors.OutOfRangeError:
            # Catch exceptions
            tf.logging.info('Finished processing records')

        finally:
            end=time.time()
            tf.logging.info("Elapsed time {}".format(end-start))
예제 #11
0
def main(_):
    byte_tensor=tf.read_file(IMG_FILE)
    image_rgb = tf.image.decode_jpeg(byte_tensor)
    image_tensor = tf.stack([image_rgb])
    #print("image_tensor's shape="+str(tf.shape(image_tensor)))
    with tf.Session() as sess:

        tf.logging.info('Importing model file:{}'.format(PB_FILE))

        with tf.gfile.Open(PB_FILE, 'r') as graph_def_file:
            graph_content = graph_def_file.read()
        graph_def = tf.GraphDef()
        graph_def.MergeFromString(graph_content)

        tf.import_graph_def(
            graph_def, name='', input_map={'image_tensor': image_tensor})

        g = tf.get_default_graph()

        num_detections_tensor = tf.squeeze(
            g.get_tensor_by_name('num_detections:0'), 0)
        num_detections_tensor = tf.cast(num_detections_tensor, tf.int32)

        detected_boxes_tensor = tf.squeeze(
            g.get_tensor_by_name('detection_boxes:0'), 0)
        detected_boxes_tensor = detected_boxes_tensor[:num_detections_tensor]

        detected_scores_tensor = tf.squeeze(
            g.get_tensor_by_name('detection_scores:0'), 0)
        detected_scores_tensor = detected_scores_tensor[:num_detections_tensor]

        detected_labels_tensor = tf.squeeze(
            g.get_tensor_by_name('detection_classes:0'), 0)
        detected_labels_tensor = tf.cast(detected_labels_tensor, tf.int64)
        detected_labels_tensor = detected_labels_tensor[:num_detections_tensor]

        sess.run(tf.local_variables_initializer())
        tf.train.start_queue_runners()
        (detected_boxes_tensor, detected_scores_tensor, detected_labels_tensor) \
                = tf.get_default_session().run([detected_boxes_tensor, detected_scores_tensor, detected_labels_tensor])
        array=image_rgb.eval()
    #print("detected_number:"+str(num_detections_tensor.eval()))
    #print("detected_number:{}".format(len(detected_boxes_tensor)))

    for c, b, s in zip(detected_labels_tensor, detected_boxes_tensor, detected_scores_tensor):
        print("class={},score={}, box={}".format(c,s,b))
    cat=create_simple_category_index()
    vis_utils.visualize_boxes_and_labels_on_image_array(array,
                                                        detected_boxes_tensor,
                                                        detected_labels_tensor,
                                                        detected_scores_tensor,
                                                        cat,
                                                        use_normalized_coordinates=True,
                                                        max_boxes_to_draw=100,
                                                        min_score_thresh=0.2)
    vis_utils.save_image_array_as_png(array,"/home/keyong/Documents/ssd/result.png")
    # tf.logging.info('Running inference and writing output to {}'.format(
    #     FLAGS.output_tfrecord_path))
    # sess.run(tf.local_variables_initializer())
    # tf.train.start_queue_runners()
    # with tf.python_io.TFRecordWriter(
    #     FLAGS.output_tfrecord_path) as tf_record_writer:
    #   try:
    #     for counter in itertools.count():
    #       tf.logging.log_every_n(tf.logging.INFO, 'Processed %d images...', 10,
    #                              counter)
    #       tf_example = detection_inference.infer_detections_and_add_to_example(
    #           serialized_example_tensor, detected_boxes_tensor,
    #           detected_scores_tensor, detected_labels_tensor,
    #           FLAGS.discard_image_pixels)
    #       tf_record_writer.write(tf_example.SerializeToString())
    #   except tf.errors.OutOfRangeError:
    #     tf.logging.info('Finished processing records')
    pass
def main(_):
    start=time.time()

    tf.logging.set_verbosity(tf.logging.INFO)

    #Check FLAGS
    SatellitleGenerator.check_flags(required_flags = ['input_path','inference_graph'])

    # Intialise and load shard
    sat_gen=SatellitleGenerator(FLAGS.input_path,FLAGS.batch_size,FLAGS.test_size)

    if FLAGS.residue_run:
        print("residue run")
        sat_gen.load_residues(FLAGS.output_path)

    sat_gen.load_shard(FLAGS.shard_path,FLAGS.shard)

    if FLAGS.restore_from_json:
        message=sat_gen.restore_from_json(os.path.join(FLAGS.output_path,'result-'+str(FLAGS.shard)+'.json'))

        if message:
            tf.logging.info("Finished all evaluations here. Exitting")
            didnotrun=True
            sys.exit(0)
        else:
            didnotrun=False
    else:
        didnotrun=False
    # Test a sample patch read
    '''
    coord=(sat_gen.map[0][0],sat_gen.map[1][0])
    sat_gen.read_patch_test(coord,FLAGS.input_path)
    '''
    # Setup dataset object
    sat_gen.setup_data()

    # Session configs
    config=tf.ConfigProto(allow_soft_placement=True,log_device_placement=False)
    config.gpu_options.allow_growth = True
    # Turn on log_device_placement for verbosity in ops

    # Covered png's
    total_covered=[]

    with tf.Session(config=config) as sess:
        sess.run(sat_gen.iter.initializer)

        # Read and Fill Graph
        tf.logging.info('Reading graph and building model...')
        with tf.gfile.Open(FLAGS.inference_graph, 'rb') as graph_def_file:
            graph_content = graph_def_file.read()

        x,y,image_tensors,covered=sat_gen.iter.get_next()

        (num_detections_tensor,detected_boxes_tensor, detected_scores_tensor,
         detected_labels_tensor) = sat_gen.build_inference_graph(
             image_tensors, graph_content)

        # Chrome tracing
        options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
        run_metadata = tf.RunMetadata()

        try:
            for counter in itertools.count():
                tf.logging.info('Reading Image No; \t {} SHARD{}'.format(counter,FLAGS.shard))

                tf.logging.info('Running inference')

                if counter%10==0:
                    (detected_boxes, detected_scores, num_detections,x_,y_,image,covered_) = sess.run([detected_boxes_tensor,\
                     detected_scores_tensor,num_detections_tensor,x,y,image_tensors,covered],
                     options=options, run_metadata=run_metadata)

                else:
                    (detected_boxes, detected_scores, num_detections,x_,y_,image,covered_) = sess.run([detected_boxes_tensor,\
                     detected_scores_tensor,num_detections_tensor,x,y,image_tensors,covered])

                tf.logging.log_every_n(tf.logging.INFO, 'Processed %d images...', 10,counter)

                total_covered.append(covered_)

                if counter==0:
                    # Initialise stacks
                    tf.logging.info("Initialsied stacks")
                    x_stack=x_
                    y_stack=y_
                    detected_boxes_stack=detected_boxes
                    detected_scores_stack=detected_scores

                # Calibrate to write at counter = FLAGS.write_every-1, 2*FLAGS.write_every-1, ...
                if (counter)%FLAGS.write_every==FLAGS.write_every-1 :
                    # Write to JSON
                    tf.logging.info("Writting to JSON SHARD {}".format(FLAGS.shard) )
                    sat_gen.write(x_stack,y_stack,(1024,1024),detected_boxes_stack,detected_scores_stack,count=counter,batch_size=FLAGS.batch_size,stack_size=FLAGS.write_every)
                    x_stack=x_
                    y_stack=y_
                    detected_boxes_stack=detected_boxes
                    detected_scores_stack=detected_scores
                    np.save(FLAGS.total_covered,np.array(total_covered).flatten().astype(str))

                else:
                    x_stack=np.hstack((x_stack,x_))
                    y_stack=np.hstack((y_stack,y_))
                    detected_boxes_stack=np.vstack((detected_boxes_stack,detected_boxes))
                    detected_scores_stack=np.vstack((detected_scores_stack,detected_scores))

                # Visulalise some high confidence images
                if FLAGS.vis_path and detected_scores.any():
                    if np.max(detected_scores)>FLAGS.vis_threshold:
                        for j in range(FLAGS.batch_size):
                            if np.max(detected_scores[j])>FLAGS.vis_threshold:
                                # Draw boxes
                                boxes_ii=np.where(detected_scores[j]>FLAGS.vis_threshold)
                                boxes=detected_boxes[j][boxes_ii]
                                vis_utils.draw_bounding_boxes_on_image_array(image[j],boxes)
                                # Save
                                vis_utils.save_image_array_as_png(image[j],os.path.join(FLAGS.vis_path,str(x_[j])+"-"+str(y_[j])+".png"))
                '''
                # Chrome Profiling trace
                if counter%100==0:
                    fetched_timeline = timeline.Timeline(run_metadata.step_stats)
                    chrome_trace = fetched_timeline.generate_chrome_trace_format()
                    with open(os.path.join(FLAGS.trace_path,'timeline_'+str(counter)+'.json'), 'w') as f:
                        f.write(chrome_trace)
                '''

        except tf.errors.OutOfRangeError:
            # Catch exceptions
            tf.logging.info('Finished processing records')

        finally:
            if didnotrun:
                pass
            else:
                # print(x_stack.shape)
                sat_gen.write(x_stack,y_stack,(1024,1024),detected_boxes_stack,detected_scores_stack,batch_size=FLAGS.batch_size,stack_size=int(len(x_stack)/FLAGS.batch_size),count=True)
                tf.logging.info('Finished writting residual stacks of size {}'.format(len(x_stack)))
            end=time.time()
            tf.logging.info("Elapsed time {}".format(end-start))
예제 #13
0
def main(_):
    start = time.time()

    tf.logging.set_verbosity(tf.logging.INFO)

    #Check FLAGS
    CellImagesRun.check_flags(required_flags=['input_path', 'inference_graph'])

    # Intialise and load shard
    cell_run = CellImagesRun(FLAGS.input_path, FLAGS.batch_size)

    if FLAGS.test:
        cell_run.map = cell_run.map[:10]

    # Setup dataset object
    cell_run.setup_data()

    # Turn on log_device_placement for verbosity in ops
    with tf.Session(config=tf.ConfigProto(allow_soft_placement=True,
                                          log_device_placement=False)) as sess:
        sess.run(cell_run.iter.initializer)

        # Read and Fill Graph
        tf.logging.info('Reading graph and building model...')
        with tf.gfile.Open(FLAGS.inference_graph, 'rb') as graph_def_file:
            graph_content = graph_def_file.read()

        id, image_tensors = cell_run.iter.get_next()

        (num_detections_tensor, detected_boxes_tensor, detected_scores_tensor,
         detected_labels_tensor) = cell_run.build_inference_graph(
             image_tensors, graph_content)

        try:
            for counter in itertools.count():
                tf.logging.info('Reading Image No; \t {} '.format(counter))

                tf.logging.info('Running inference')

                try:
                    (detected_boxes, detected_scores,detected_classes, num_detections,id_,image) = sess.run([detected_boxes_tensor,\
                     detected_scores_tensor,detected_labels_tensor,num_detections_tensor,id,image_tensors])
                except:
                    sess.run([id])
                    continue

                tf.logging.log_every_n(tf.logging.INFO,
                                       'Processed %d images...', 10, counter)

                # Threshold SCORES
                ii = np.where(detected_scores > FLAGS.threshold)
                detected_scores = detected_scores[ii]
                detected_boxes = detected_boxes[ii]
                detected_classes = detected_classes[ii]

                tf.logging.debug("DETETED SCORES .{}".format(
                    (detected_scores)))
                tf.logging.debug("DETECTED BOXES {}".format(
                    len(detected_boxes)))

                # Visulalise some high confidence images
                if FLAGS.vis_path and detected_scores.any():
                    if np.max(detected_scores) > FLAGS.vis_threshold:
                        image = np.squeeze(image)
                        # Draw boxes
                        id_ = id_[0].decode("utf-8")
                        vis_utils.draw_bounding_boxes_on_image_array(
                            image, detected_boxes)
                        vis_utils.save_image_array_as_png(
                            image, os.path.join(FLAGS.vis_path, id_))

        except tf.errors.OutOfRangeError:
            # Catch exceptions
            tf.logging.info('Finished processing records')

        finally:
            end = time.time()
            tf.logging.info("Elapsed time {}".format(end - start))