예제 #1
0
 def restore_pb_model_inference_image_string(self, image_path_pattern,
                                             output_dir):
     """Perform inference for the given saved model."""
     return_elements = ["image_files:0", "detections:0"]
     graph = tf.Graph()
     pb_file = self.saved_model_dir + "/" + "efficientder-d0.pb"
     return_tensors = self.read_pb_return_tensors(graph, pb_file,
                                                  return_elements)
     with tf.Session(graph=graph) as sess:
         for file_name in os.listdir(image_path_pattern):
             print(os.path.join(image_path_pattern, file_name))
             file_path = os.path.join(image_path_pattern, file_name)
             raw_images = Image.open(file_path)
             raw_data_encode = tf.gfile.FastGFile(file_path, 'rb').read()
             pred_bbox = sess.run(
                 [return_tensors[1]],
                 feed_dict={return_tensors[0]: [raw_data_encode]})
             for i, output_np in enumerate(pred_bbox[0]):
                 # output_np has format [image_id, x, y, width,  height,score, class]
                 boxes = output_np[:, 1:5]
                 classes = output_np[:, 6].astype(int)
                 scores = output_np[:, 5]
                 boxes[:, [0, 1, 2, 3]] = boxes[:, [1, 0, 3, 2]]
                 boxes[:, 2:4] += boxes[:, 0:2]
                 img = inference.visualize_image(raw_images, boxes, classes,
                                                 scores,
                                                 inference.coco_id_mapping)
                 output_image_path = os.path.join(output_dir,
                                                  "output_" + file_name)
                 Image.fromarray(img).save(output_image_path)
                 logging.info('writing file to %s', output_image_path)
예제 #2
0
  def draw_inference(self, epoch):
    self.model.set_weights(self.train_model.get_weights())
    boxes, scores, classes, valid_len = self.inference()
    length = valid_len[0]
    image = inference.visualize_image(
        self.sample_image[0],
        boxes[0].numpy()[:length],
        classes[0].numpy().astype(np.int)[:length],
        scores[0].numpy()[:length],
        min_score_thresh=self.min_score_thresh,
        max_boxes_to_draw=self.max_boxes_to_draw)

    with self.file_writer.as_default():
      tf.summary.image('Test image', tf.expand_dims(image, axis=0), step=epoch)
예제 #3
0
def main(_):
    # pylint: disable=line-too-long
    # Prepare images and checkpoints: please run these commands in shell.
    # !mkdir tmp
    # !wget https://user-images.githubusercontent.com/11736571/77320690-099af300-6d37-11ea-9d86-24f14dc2d540.png -O tmp/img.png
    # !wget https://storage.googleapis.com/cloud-tpu-checkpoints/efficientdet/coco/efficientdet-d0.tar.gz -O tmp/efficientdet-d0.tar.gz
    # !tar zxf tmp/efficientdet-d0.tar.gz -C tmp
    imgs = [np.array(Image.open(FLAGS.image_path))]
    nms_score_thresh, nms_max_output_size = 0.4, 100

    # Create model config.
    config = hparams_config.get_efficientdet_config('efficientdet-d0')
    config.is_training_bn = False
    config.image_size = '1920x1280'
    config.nms_configs.score_thresh = nms_score_thresh
    config.nms_configs.max_output_size = nms_max_output_size

    # Use 'mixed_float16' if running on GPUs.
    policy = tf.keras.mixed_precision.experimental.Policy('float32')
    tf.keras.mixed_precision.experimental.set_policy(policy)
    tf.config.experimental_run_functions_eagerly(FLAGS.debug)

    # Create and run the model.
    model = efficientdet_keras.EfficientDetModel(config=config)
    height, width = utils.parse_image_size(config['image_size'])
    model.build((1, height, width, 3))
    model.load_weights(FLAGS.checkpoint)
    model.summary()

    @tf.function
    def f(imgs):
        return model(imgs, training=False, post_mode='global')

    boxes, scores, classes, valid_len = f(imgs)

    # Visualize results.
    for i, img in enumerate(imgs):
        length = valid_len[i]
        img = inference.visualize_image(img,
                                        boxes[i].numpy()[:length],
                                        classes[i].numpy().astype(
                                            np.int)[:length],
                                        scores[i].numpy()[:length],
                                        min_score_thresh=nms_score_thresh,
                                        max_boxes_to_draw=nms_max_output_size)
        output_image_path = os.path.join(FLAGS.output_dir, str(i) + '.jpg')
        Image.fromarray(img).save(output_image_path)
        print('writing annotated image to ', output_image_path)
예제 #4
0
    def draw_inference(self, epoch):
        self.model.set_weights(self.train_model.get_weights())
        boxes, scores, classes, valid_len = (self.model(self.sample_image,
                                                        training=False))
        length = valid_len[0]
        image = inference.visualize_image(
            self.sample_image[0],
            boxes[0].numpy()[:length],
            classes[0].numpy().astype(np.int)[:length],
            scores[0].numpy()[:length],
            min_score_thresh=self.model.config.nms_configs['score_thresh'],
            max_boxes_to_draw=self.model.config.nms_configs['max_output_size'])

        with self.file_writer.as_default():
            tf.summary.image("Test image",
                             tf.expand_dims(image, axis=0),
                             step=epoch)
예제 #5
0
  def _draw_inference(self, step):
    self.model.__class__ = efficientdet_keras.EfficientDetModel
    results = self.model(self.sample_image, training=False)
    boxes, scores, classes, valid_len = tf.nest.map_structure(np.array, results)
    length = valid_len[0]
    image = inference.visualize_image(
        self.sample_image[0],
        boxes[0][:length],
        classes[0].astype(np.int)[:length],
        scores[0][:length],
        label_map=self.model.config.label_map,
        min_score_thresh=self.min_score_thresh,
        max_boxes_to_draw=self.max_boxes_to_draw)

    with self.file_writer.as_default():
      tf.summary.image('Test image', tf.expand_dims(image, axis=0), step=step)
    self.model.__class__ = efficientdet_keras.EfficientDetNet
예제 #6
0
 def saved_model_inference(self, image_path_pattern, output_dir):
   """Perform inference for the given saved model."""
   with tf.Session() as sess:
     tf.saved_model.load(sess, ['serve'], self.saved_model_dir)
     raw_images = []
     image = Image.open(image_path_pattern)
     raw_images.append(np.array(image))
     outputs_np = sess.run('detections:0', {'image_arrays:0': raw_images})
     for i, output_np in enumerate(outputs_np):
       # output_np has format [image_id, y, x, height, width, score, class]
       boxes = output_np[:, 1:5]
       classes = output_np[:, 6].astype(int)
       scores = output_np[:, 5]
       boxes[:, 2:4] += boxes[:, 0:2]
       img = inference.visualize_image(
           raw_images[i], boxes, classes, scores, inference.coco_id_mapping)
       output_image_path = os.path.join(output_dir, str(i) + '.jpg')
       Image.fromarray(img).save(output_image_path)
       logging.info('writing file to %s', output_image_path)
예제 #7
0
def main(_):

  # pylint: disable=line-too-long
  # Prepare images and checkpoints: please run these commands in shell.
  # !mkdir tmp
  # !wget https://user-images.githubusercontent.com/11736571/77320690-099af300-6d37-11ea-9d86-24f14dc2d540.png -O tmp/img.png
  # !wget https://storage.googleapis.com/cloud-tpu-checkpoints/efficientdet/coco/efficientdet-d0.tar.gz -O tmp/efficientdet-d0.tar.gz
  # !tar zxf tmp/efficientdet-d0.tar.gz -C tmp
  imgs = [np.array(Image.open(FLAGS.image_path))] * 2
  # Create model config.
  config = hparams_config.get_efficientdet_config('efficientdet-d0')
  config.is_training_bn = False
  config.image_size = '1920x1280'
  config.nms_configs.score_thresh = 0.4
  config.nms_configs.max_output_size = 100
  config.override(FLAGS.hparams)

  # Use 'mixed_float16' if running on GPUs.
  policy = tf.keras.mixed_precision.Policy('float32')
  tf.keras.mixed_precision.set_global_policy(policy)
  tf.config.run_functions_eagerly(FLAGS.debug)

  # Create and run the model.
  model = efficientdet_keras.EfficientDetModel(config=config)
  model.build((None, None, None, 3))
  model.load_weights(tf.train.latest_checkpoint(FLAGS.model_dir))
  model.summary()

  class ExportModel(tf.Module):

    def __init__(self, model):
      super().__init__()
      self.model = model

    @tf.function
    def f(self, imgs):
      return self.model(imgs, training=False, post_mode='global')

  imgs = tf.convert_to_tensor(imgs, dtype=tf.uint8)
  export_model = ExportModel(model)
  if FLAGS.saved_model_dir:
    tf.saved_model.save(
        export_model,
        FLAGS.saved_model_dir,
        signatures=export_model.f.get_concrete_function(
            tf.TensorSpec(shape=(None, None, None, 3), dtype=tf.uint8)))
    export_model = tf.saved_model.load(FLAGS.saved_model_dir)

  boxes, scores, classes, valid_len = export_model.f(imgs)

  # Visualize results.
  for i, img in enumerate(imgs):
    length = valid_len[i]
    img = inference.visualize_image(
        img,
        boxes[i].numpy()[:length],
        classes[i].numpy().astype(np.int)[:length],
        scores[i].numpy()[:length],
        label_map=config.label_map,
        min_score_thresh=config.nms_configs.score_thresh,
        max_boxes_to_draw=config.nms_configs.max_output_size)
    output_image_path = os.path.join(FLAGS.output_dir, str(i) + '.jpg')
    Image.fromarray(img).save(output_image_path)
    print('writing annotated image to %s' % output_image_path)
예제 #8
0
def main(_):
    img = Image.open(FLAGS.image_path)
    imgs = [np.array(img)]
    # Create model config.
    config = hparams_config.get_efficientdet_config(FLAGS.model_name)
    config.is_training_bn = False
    # config.image_size = '640x640'
    # config.nms_configs.score_thresh = 0.01
    config.nms_configs.score_thresh = 0.4
    config.nms_configs.max_output_size = 100
    config.override(FLAGS.hparams)

    # Use 'mixed_float16' if running on GPUs.
    policy = tf.keras.mixed_precision.experimental.Policy('float32')
    tf.keras.mixed_precision.experimental.set_policy(policy)
    tf.config.experimental_run_functions_eagerly(FLAGS.debug)

    # Create model
    model = efficientdet_keras.EfficientDetNet(config=config)
    target_size = utils.parse_image_size(config.image_size)
    target_size = target_size + (3, )
    model_inputs = tf.keras.Input(shape=target_size)
    model(model_inputs, False)
    model.summary()

    # output layers detailed
    # for i in model.layers:
    #   print(i.name, i.input, i.output)

    model.load_weights(tf.train.latest_checkpoint(FLAGS.model_dir))

    # create new model to access intermediate layers
    effdet_model = tf.keras.Model(
        inputs=model.input,
        outputs=[
            model.get_layer(name='class_net').output,
            model.get_layer(name='box_net').output,
            model.backbone.layers[-3].output  # last layer
        ])

    # is only used for pre- and post-processing methods
    effdet_methods = efficientdet_keras.EfficientDetModel(config=config)

    # input image preprocessing
    imgs = tf.convert_to_tensor(imgs)
    inputs, scales = effdet_methods._preprocessing(imgs, config.image_size,
                                                   'infer')

    with tf.GradientTape() as tape:
        # Compute activations of the last conv layer and make the tape watch it
        cls_outputs, box_outputs, efficientnet_last_layer = effdet_model(
            inputs, False)

    # save gradients
    grads = None
    if FLAGS.gradient_type == 'cls':
        grads = tape.gradient(cls_outputs, efficientnet_last_layer)
    elif FLAGS.gradient_type == 'box':
        grads = tape.gradient(box_outputs, efficientnet_last_layer)

    assert grads != None
    grad_cam(grads, efficientnet_last_layer[0], img, imgs[0],
             FLAGS.gradient_type)

    ### bounding box visualization ###
    boxes, scores, classes, valid_len = effdet_methods._postprocess(
        cls_outputs, box_outputs, scales)

    # Visualize results.
    for i, img in enumerate(imgs):
        length = valid_len[i]
        img = inference.visualize_image(
            img,
            boxes[i].numpy()[:length],
            classes[i].numpy().astype(np.int)[:length],
            scores[i].numpy()[:length],
            min_score_thresh=config.nms_configs.score_thresh,
            max_boxes_to_draw=config.nms_configs.max_output_size)
        output_image_path = os.path.join(FLAGS.output_dir, str(i) + '.jpg')
        Image.fromarray(img).save(output_image_path)
        print('writing annotated image to ', output_image_path)
예제 #9
0
파일: infer.py 프로젝트: sendit2me/automl
# !mkdir tmp
# !wget https://user-images.githubusercontent.com/11736571/77320690-099af300-6d37-11ea-9d86-24f14dc2d540.png -O tmp/img.png
# !wget https://storage.googleapis.com/cloud-tpu-checkpoints/efficientdet/coco/efficientdet-d0.tar.gz -O tmp/efficientdet-d0.tar.gz
# !tar zxf tmp/efficientdet-d0.tar.gz -C tmp
imgs = [np.array(Image.open('tmp/img.png'))]
nms_score_thresh, nms_max_output_size = 0.4, 100

# Create model.
config = hparams_config.get_efficientdet_config('efficientdet-d0')
config.is_training_bn = False
config.image_size = '1920x1280'
config.nms_configs.score_thresh = nms_score_thresh
config.nms_configs.max_output_size = nms_max_output_size
model = efficientdet_keras.EfficientDetModel(config=config)
model.build((1, 1280, 1920, 3))
model.load_weights('tmp/efficientdet-d0/model')
boxes, scores, classes, valid_len = model(imgs)
model.summary()

# Visualize results.
for i, img in enumerate(imgs):
    img = inference.visualize_image(img,
                                    boxes[i].numpy(),
                                    classes[i].numpy().astype(np.int),
                                    scores[i].numpy(),
                                    min_score_thresh=nms_score_thresh,
                                    max_boxes_to_draw=nms_max_output_size)
    output_image_path = os.path.join('tmp/', str(i) + '.jpg')
    Image.fromarray(img).save(output_image_path)
    print('writing annotated image to %s', output_image_path)