Exemplo n.º 1
0
    def saved_model_inference(self, image_path_pattern, output_dir, **kwargs):
        """Perform inference for the given saved model."""
        driver = inference.ServingDriver(self.model_name,
                                         self.ckpt_path,
                                         batch_size=self.batch_size,
                                         enable_ema=self.enable_ema,
                                         use_xla=self.use_xla,
                                         data_format=self.data_format,
                                         **kwargs)
        driver.load(self.saved_model_dir)

        batch_size = self.batch_size
        all_files = list(tf.io.gfile.glob(image_path_pattern))
        num_batches = len(all_files) // batch_size

        for i in range(num_batches):
            batch_files = all_files[i * batch_size:(i + 1) * batch_size]
            raw_images = [np.array(Image.open(f)) for f in batch_files]
            detections_bs = driver.serve_images(raw_images)
            for j in range(len(raw_images)):
                img = driver.visualize(raw_images[j], detections_bs[j],
                                       **kwargs)
                img_id = str(i * batch_size + j)
                output_image_path = os.path.join(output_dir, img_id + '.jpg')
                Image.fromarray(img).save(output_image_path)
                logging.info('writing file to %s', output_image_path)
Exemplo n.º 2
0
    def saved_model_inference(self, image_path_pattern, output_dir, **kwargs):
        """Perform inference for the given saved model."""
        driver = inference.ServingDriver(
            self.model_name,
            self.ckpt_path,
            batch_size=self.batch_size,
            use_xla=self.use_xla,
            model_params=self.model_config.as_dict(),
            **kwargs)
        driver.load(self.saved_model_dir)

        # Serving time batch size should be fixed.
        batch_size = self.batch_size or 1
        all_files = list(tf.io.gfile.glob(image_path_pattern))
        print('all_files=', all_files)
        num_batches = len(all_files) // batch_size

        for i in range(num_batches):
            batch_files = all_files[i * batch_size:(i + 1) * batch_size]
            raw_images = [np.array(Image.open(f)) for f in batch_files]
            detections_bs = driver.serve_images(raw_images)
            for j in range(len(raw_images)):
                img = driver.visualize(raw_images[j], detections_bs[j],
                                       **kwargs)
                img_id = str(i * batch_size + j)
                output_image_path = os.path.join(output_dir, img_id + '.jpg')
                Image.fromarray(img).save(output_image_path)
                logging.info('writing file to %s', output_image_path)
Exemplo n.º 3
0
 def export_saved_model(self, **kwargs):
     tf.enable_resource_variables()
     driver = inference.ServingDriver(self.model_name, self.ckpt_path,
                                      self.image_size)
     driver.build(min_score_thresh=kwargs.get('min_score_thresh', 0.2),
                  max_boxes_to_draw=kwargs.get('max_boxes_to_draw', 50))
     driver.export(self.saved_model_dir)
Exemplo n.º 4
0
 def export_saved_model(self, **kwargs):
     """Export a saved model for inference."""
     tf.enable_resource_variables()
     driver = inference.ServingDriver(self.model_name,
                                      self.ckpt_path,
                                      enable_ema=self.enable_ema)
     driver.build(params_override=self.model_overrides,
                  min_score_thresh=kwargs.get('min_score_thresh', 0.2),
                  max_boxes_to_draw=kwargs.get('max_boxes_to_draw', 50))
     driver.export(self.saved_model_dir)
Exemplo n.º 5
0
 def saved_model_benchmark(self, image_path_pattern):
     """Perform inference for the given saved model."""
     driver = inference.ServingDriver(self.model_name,
                                      self.ckpt_path,
                                      enable_ema=self.enable_ema)
     driver.load(self.saved_model_dir)
     raw_images = []
     image = Image.open(image_path_pattern)
     raw_images.append(np.array(image))
     driver.benchmark(raw_images)
    def saved_model_inference(self, image_dir, output_dir, **kwargs):
        """Perform inference for the given saved model."""
        driver = inference.ServingDriver(
            self.model_name,
            self.ckpt_path,
            batch_size=self.batch_size,
            use_xla=self.use_xla,
            model_params=self.model_config.as_dict(),
            **kwargs)
        driver.load(self.saved_model_dir)

        # Serving time batch size should be fixed.
        batch_size = self.batch_size or 1
        # all_files = list(tf.io.gfile.listdir(image_dir))
        all_files = []
        for image in os.listdir(image_dir):
            image_file_name = image_dir + '/' + image
            all_files.append(image_file_name)
        print('all_files=', all_files)
        num_batches = (len(all_files) + batch_size - 1) // batch_size

        for i in range(num_batches):
            batch_files = all_files[i * batch_size:(i + 1) * batch_size]
            height, width = self.model_config.image_size
            images = [Image.open(f) for f in batch_files]
            if len(set([m.size for m in images])) > 1:
                # Resize only if images in the same batch have different sizes.
                images = [m.resize(height, width) for m in images]
            raw_images = [np.array(m) for m in images]
            size_before_pad = len(raw_images)
            if size_before_pad < batch_size:
                padding_size = batch_size - size_before_pad
                raw_images += [np.zeros_like(raw_images[0])] * padding_size

            detections_bs = driver.serve_images(raw_images)
            for j in range(size_before_pad):
                img = driver.visualize(raw_images[j], detections_bs[j],
                                       **kwargs)
                img_id = str(i * batch_size + j)
                output_image_path = os.path.join(output_dir, img_id + '.jpg')
                Image.fromarray(img).save(output_image_path)
                logging.info('writing file to %s', output_image_path)
                # 输出向量
                output_tensor_path = os.path.join(output_dir, img_id + '.txt')
                try:
                    f = open(output_tensor_path, 'w+')
                    f.write('detections_bs\n')
                    f.write('# Array shape: {0}\n'.format(
                        detections_bs[j].shape))
                    f.write(
                        '[image_id,ymin, xmin, ymax, xmax, score, class]\n')
                    np.savetxt(output_tensor_path, detections_bs[j])
                finally:
                    if f:
                        f.close()
Exemplo n.º 7
0
 def export_saved_model(self, **kwargs):
     """Export a saved model for inference."""
     tf.enable_resource_variables()
     driver = inference.ServingDriver(self.model_name,
                                      self.ckpt_path,
                                      enable_ema=self.enable_ema,
                                      use_xla=self.use_xla,
                                      data_format=self.data_format,
                                      **kwargs)
     driver.build(params_override=self.model_overrides)
     driver.export(self.saved_model_dir)
Exemplo n.º 8
0
 def export_saved_model(self, **kwargs):
     """Export a saved model for inference."""
     tf.enable_resource_variables()
     driver = inference.ServingDriver(
         self.model_name,
         self.ckpt_path,
         batch_size=self.batch_size,
         use_xla=self.use_xla,
         model_params=self.model_config.as_dict(),
         **kwargs)
     driver.build()
     driver.export(self.saved_model_dir, self.tflite_path, self.tensorrt)
Exemplo n.º 9
0
 def saved_model_benchmark(self, image_path_pattern, **kwargs):
     """Perform inference for the given saved model."""
     driver = inference.ServingDriver(self.model_name,
                                      self.ckpt_path,
                                      enable_ema=self.enable_ema,
                                      use_xla=self.use_xla,
                                      data_format=self.data_format,
                                      **kwargs)
     driver.load(self.saved_model_dir)
     raw_images = []
     image = Image.open(image_path_pattern)
     raw_images.append(np.array(image))
     driver.benchmark(raw_images, FLAGS.trace_filename)
Exemplo n.º 10
0
 def saved_model_inference(self, image_path_pattern, output_dir, **kwargs):
     """Perform inference for the given saved model."""
     driver = inference.ServingDriver(self.model_name,
                                      self.ckpt_path,
                                      enable_ema=self.enable_ema)
     driver.load(self.saved_model_dir)
     raw_images = []
     image = Image.open(image_path_pattern)
     raw_images.append(np.array(image))
     detections_bs = driver.serve_images(raw_images)
     for i, detections in enumerate(detections_bs):
         img = driver.visualize(raw_images[i], detections, **kwargs)
         output_image_path = os.path.join(output_dir, str(i) + '.jpg')
         Image.fromarray(img).save(output_image_path)
         logging.info('writing file to %s', output_image_path)
    def saved_model_video(self, video_path: Text, output_video: Text,
                          **kwargs):
        """Perform video inference for the given saved model."""
        import cv2  # pylint: disable=g-import-not-at-top

        driver = inference.ServingDriver(
            self.model_name,
            self.ckpt_path,
            batch_size=1,
            use_xla=self.use_xla,
            model_params=self.model_config.as_dict(),
        )
        driver.load(self.saved_model_dir)

        cap = cv2.VideoCapture(video_path)
        if not cap.isOpened():
            print("Error opening input video: {}".format(video_path))

        out_ptr = None
        if output_video:
            frame_width, frame_height = int(cap.get(3)), int(cap.get(4))
            out_ptr = cv2.VideoWriter(
                output_video,
                cv2.VideoWriter_fourcc("m", "p", "4", "v"),
                25,
                (frame_width, frame_height),
            )

        while cap.isOpened():
            # Capture frame-by-frame
            ret, frame = cap.read()
            if not ret:
                break

            raw_frames = [np.array(frame)]
            detections_bs = driver.serve_images(raw_frames)
            new_frame = driver.visualize(raw_frames[0], detections_bs[0],
                                         **kwargs)

            if out_ptr:
                # write frame into output file.
                out_ptr.write(new_frame)
            else:
                # show the frame online, mainly used for real-time speed test.
                cv2.imshow("Frame", new_frame)
                # Press Q on keyboard to  exit
                if cv2.waitKey(1) & 0xFF == ord("q"):
                    break
Exemplo n.º 12
0
 def saved_model_benchmark(self, image_path_pattern, **kwargs):
   """Perform inference for the given saved model."""
   driver = inference.ServingDriver(
       self.model_name,
       self.ckpt_path,
       batch_size=self.batch_size,
       use_xla=self.use_xla,
       model_params=self.model_config.as_dict(),
       **kwargs)
   driver.load(self.saved_model_dir)
   raw_images = []
   all_files = list(tf.io.gfile.glob(image_path_pattern))
   if len(all_files) < self.batch_size:
     all_files = all_files * (self.batch_size // len(all_files) + 1)
   raw_images = [np.array(Image.open(f)) for f in all_files[:self.batch_size]]
   driver.benchmark(raw_images, FLAGS.trace_filename)
Exemplo n.º 13
0
 def saved_model_inference(self, image_path_pattern, output_dir, **kwargs):
     """Perform inference for the given saved model."""
     with tf.Session() as sess:
         tf.saved_model.load(sess, ['serve'], self.saved_model_dir)
         raw_images = []
         image = Image.open(image_path_pattern)
         raw_images.append(np.array(image))
         detections_bs = sess.run('detections:0',
                                  {'image_arrays:0': raw_images})
         driver = inference.ServingDriver(self.model_name, self.ckpt_path,
                                          self.image_size)
         for i, detections in enumerate(detections_bs):
             print('detections[:10]=', detections[:10])
             img = driver.visualize(raw_images[i], detections, **kwargs)
             output_image_path = os.path.join(output_dir, str(i) + '.jpg')
             Image.fromarray(img).save(output_image_path)
             logging.info('writing file to %s', output_image_path)
Exemplo n.º 14
0
    def saved_model_inference(self, image_path_pattern, output_dir, **kwargs):
        """Perform inference for the given saved model."""
        driver = inference.ServingDriver(
            self.model_name,
            self.ckpt_path,
            batch_size=self.batch_size,
            use_xla=self.use_xla,
            model_params=self.model_config.as_dict(),
            **kwargs)
        driver.load(self.saved_model_dir)

        # Serving time batch size should be fixed.
        batch_size = self.batch_size or 1
        all_files = list(tf.io.gfile.glob(image_path_pattern))
        print('all_files=', all_files)
        num_batches = (len(all_files) + batch_size - 1) // batch_size

        for i in range(num_batches):
            batch_files = all_files[i * batch_size:(i + 1) * batch_size]
            height, width = self.model_config.image_size
            images = [Image.open(f) for f in batch_files]
            if len(set([m.size for m in images])) > 1:
                # Resize only if images in the same batch have different sizes.
                images = [m.resize(height, width) for m in images]
            raw_images = [np.array(m) for m in images]
            size_before_pad = len(raw_images)
            if size_before_pad < batch_size:
                padding_size = batch_size - size_before_pad
                raw_images += [np.zeros_like(raw_images[0])] * padding_size

            detections_bs = driver.serve_images(raw_images)
            for j in range(size_before_pad):
                img = driver.visualize(raw_images[j], detections_bs[j],
                                       **kwargs)
                img_id = str(i * batch_size + j)
                output_image_path = os.path.join(output_dir, img_id + '.jpg')
                Image.fromarray(img).save(output_image_path)
                print('writing file to %s' % output_image_path)
        print(detections_bs.dtype)
        np.save("filename.npy", detections_bs)
        return detections_bs
Exemplo n.º 15
0
def main(_):
    #do magic with model_config
    model_config = hparams_config.get_detection_config(FLAGS.model_name)
    model_config.override(FLAGS.hparams)  # Add custom overrides
    model_config.is_training_bn = False
    model_config.image_size = utils.parse_image_size(model_config.image_size)
    model_config.nms_configs.score_thresh = FLAGS.min_score_thresh
    model_config.nms_configs.method = FLAGS.nms_method
    model_config.nms_configs.max_output_size = FLAGS.max_boxes_to_draw

    client = microserviceclient.MicroserviceClient("efficientdetservice_" +
                                                   FLAGS.service_name)
    driver = inference.ServingDriver(FLAGS.model_name,
                                     FLAGS.ckpt_path,
                                     batch_size=1,
                                     use_xla=FLAGS.use_xla,
                                     model_params=model_config.as_dict())
    driver.load(FLAGS.saved_model_dir)

    def on_binaryNotification_handler(methodName, payload):
        nonlocal driver
        nonlocal client
        if methodName == "doInferencePlease":
            frame = cv2.imdecode(
                np.asarray(bytearray(payload), dtype=np.uint8),
                cv2.IMREAD_COLOR)
            raw_frames = [np.array(frame)]
            detections_bs = driver.serve_images(raw_frames)
            client.notify("inferenceResult", driver.to_json(detections_bs[0]))

            #new_frame = driver.visualize(raw_frames[0], detections_bs[0], min_score_thresh=FLAGS.min_score_thresh, max_boxes_to_draw=FLAGS.max_boxes_to_draw, line_thickness=FLAGS.line_thickness )
            #res, boxedFrame = cv2.imencode('.jpg', new_frame)
            #client.binaryNotify("inferenceResult", boxedFrame.tobytes())

    client.on_binaryNotification = on_binaryNotification_handler
    client.start()

    while True:
        time.sleep(0.10)

    client.stop()
Exemplo n.º 16
0
    def saved_model_webcam(self, webcam_idx, **kwargs):
        """Perform webcam inference for the given saved model."""
        import cv2  # pylint: disable=g-import-not-at-top

        driver = inference.ServingDriver(
            self.model_name,
            self.ckpt_path,
            batch_size=1,
            use_xla=self.use_xla,
            model_params=self.model_config.as_dict())
        driver.load(self.saved_model_dir)

        cap = cv2.VideoCapture(int(webcam_idx))
        if not cap.isOpened():
            print('Error opening input: {}'.format('webcam'))

        err_cnt = 0
        while cap.isOpened():
            # Capture frame-by-frame
            ret, frame = cap.read()
            if not ret:
                if err_cnt < 20:
                    err_cnt += 1
                    continue
                else:
                    print('Error retrieving images: {}'.format('webcam'))
                    break

            raw_frames = [np.array(frame)]
            detections_bs = driver.serve_images(raw_frames)

            json_str = driver.to_json(detections_bs[0])

            new_frame = driver.visualize(raw_frames[0], detections_bs[0],
                                         **kwargs)

            # show the frame online, mainly used for real-time speed test.
            cv2.imshow('Frame', new_frame)
            # Press Q on keyboard to  exit
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
Exemplo n.º 17
0
 def export_pb_model_image_arrays(self):
     driver = inference.ServingDriver(self.model_name, self.ckpt_path)
     driver.build()
     driver.save_inference_pb_model_from_raw_image(self.saved_model_dir)
Exemplo n.º 18
0
    def saved_model_inference(self, image_path_pattern, output_dir, **kwargs):
        """Perform inference for the given saved model."""
        driver = inference.ServingDriver(
            self.model_name,
            self.ckpt_path,
            batch_size=self.batch_size,
            use_xla=self.use_xla,
            model_params=self.model_config.as_dict(),
            **kwargs)
        driver.load(self.saved_model_dir)

        # Serving time batch size should be fixed. See new version of Google's Repo
        batch_size = self.batch_size or 1
        all_files = list(tf.io.gfile.glob(image_path_pattern))
        print('all_files=', all_files)
        if not FLAGS.big_image:
            num_batches = len(all_files) // batch_size

            for i in range(num_batches):
                batch_files = all_files[i * batch_size:(i + 1) * batch_size]
                raw_images = [np.array(Image.open(f)) for f in batch_files]
                detections_bs = driver.serve_images(raw_images)
                for j in range(len(raw_images)):
                    img = driver.visualize(raw_images[j], detections_bs[j],
                                           **kwargs)
                    img_id = str(i * batch_size + j)
                    output_image_path = os.path.join(output_dir,
                                                     img_id + '.jpg')
                    Image.fromarray(img).save(output_image_path)
                    logging.info('writing file to %s', output_image_path)
        else:
            for f in all_files:
                with Image.open(f) as im:
                    img_name = os.path.basename(f)
                    image_width = im.width
                    image_height = im.height
                    imagette_width = self.model_config.image_size[0]
                    imagette_height = self.model_config.image_size[1]
                    divx = round(int(image_width) / imagette_width)
                    divy = round(int(image_height) / imagette_height)
                    size = (imagette_width * divx, imagette_height * divy)
                    im_resized = im.resize(size)
                    imagettes = []
                    if FLAGS.raster:
                        raster_image = np.zeros(size[::-1])
                    eval_image = Image.new('RGB', size)
                    eval_time = 0
                    for x in range(divx):
                        for y in range(divy):
                            imagettes = [(np.array(
                                im_resized.crop(
                                    (x * imagette_width, y * imagette_height,
                                     (x + 1) * imagette_width,
                                     (y + 1) * imagette_height))))]
                            # driver.batch_size = len(imagettes)  # TODO: handle batch size
                            start = time.time()
                            detections_bs = driver.serve_images(imagettes)
                            eval_time += time.time() - start
                            if FLAGS.raster:
                                raster_imagette2 = np.zeros(
                                    shape=(imagette_height, imagette_width))
                                for detection_bs in detections_bs[0]:
                                    ymin = detection_bs[1]
                                    xmin = detection_bs[2]
                                    ymax = detection_bs[3]
                                    xmax = detection_bs[4]
                                    raster_imagette = np.zeros(
                                        shape=(imagette_height,
                                               imagette_width))
                                    raster_imagette[
                                        int(ymin):int(ymax),
                                        int(xmin):int(xmax)] = detection_bs[5]
                                    raster_imagette2 = np.maximum(
                                        raster_imagette2, raster_imagette)
                                raster_image[y * imagette_height:(y + 1) *
                                             imagette_height,
                                             x * imagette_width:(x + 1) *
                                             imagette_width] = raster_imagette2
                            eval_imagette = driver.visualize(
                                imagettes[0], detections_bs[0], **kwargs)
                            eval_image.paste(
                                Image.fromarray(eval_imagette),
                                (x * imagette_width, y * imagette_height,
                                 (x + 1) * imagette_width,
                                 (y + 1) * imagette_height))
                    print("EfficientDet detection in %s seconds ---" %
                          (eval_time))
                    if FLAGS.raster:
                        raster_image = (raster_image > driver.min_score_thresh
                                        ) * raster_image * 255
                        raster_image = raster_image.astype(np.uint8)
                        output_raster_path = os.path.join(
                            output_dir, img_name[:-4] +
                            '_EfficientDet_detection_raster.png')
                        ras_img = Image.fromarray(raster_image)
                        ras_img2 = ras_img.resize(im.size)
                        ras_img2.save(output_raster_path, "PNG")
                    output_image_path = os.path.join(
                        output_dir,
                        img_name[:-4] + '_EfficientDet_detection.jpg')
                    eval_image_resized = eval_image.resize(
                        (im.width, im.height))
                    eval_image_resized.save(output_image_path)
                    logging.info('writing file to %s', output_image_path)
Exemplo n.º 19
0
  def saved_model_inference(self, image_path_pattern, output_dir, **kwargs):
    """Perform inference for the given saved model."""
    driver = inference.ServingDriver(
        self.model_name,
        self.ckpt_path,
        batch_size=self.batch_size,
        use_xla=self.use_xla,
        model_params=self.model_config.as_dict(),
        **kwargs)
    driver.load(self.saved_model_dir)

    # Serving time batch size should be fixed.
    batch_size = self.batch_size or 1
    all_files = list(tf.io.gfile.glob(image_path_pattern))
    print('all_files=', all_files)
    num_batches = (len(all_files) + batch_size - 1) // batch_size

    parent_dirname = os.path.dirname(image_path_pattern)
    model = os.path.basename(os.path.normpath(parent_dirname))

    output_folder = os.path.join(output_dir, model)

    if not os.path.exists(output_folder):
      os.mkdir(output_folder)
    if not os.path.exists('logs'):
      os.mkdir('logs')
    log = open('logs/' + model + '.csv', 'w')
    log.write('name,num_boxes,ymin,xmin,ymax,xmax,score,class\n')
  
    for i in range(num_batches):
      batch_files = all_files[i * batch_size:(i + 1) * batch_size]
      height, width = self.model_config.image_size
      
      images = [Image.open(f) for f in batch_files]
      if len(set([m.size for m in images])) > 1:
        # Resize only if images in the same batch have different sizes.
        images = [m.resize(height, width) for m in images]
      raw_images = [np.array(m) for m in images]
      size_before_pad = len(raw_images)
      if size_before_pad < batch_size:
        padding_size = batch_size - size_before_pad
        raw_images += [np.zeros_like(raw_images[0])] * padding_size
      print([all_files[i * batch_size + j].split('/')[-1] for j in range(size_before_pad)])
      detections_bs = driver.serve_images(raw_images)
      for j in range(size_before_pad):
        img_id = i * batch_size + j
        filename = all_files[img_id].split('/')[-1]
        output_image_path = os.path.join(output_folder, filename)
        # if os.path.exists(output_image_path):
        #     continue
        # print(detections_bs[j])
        # detections_bs --> [image_id, ymin, xmin, ymax, xmax, score, class]
        pts = self.convert_array(detections_bs[j])

        img = driver.visualize(raw_images[j], detections_bs[j], **kwargs)
        
        Image.fromarray(img).save(output_image_path)
        for pt in pts:
          # print(pt)
          # if (pts == pt).all(-1).sum() != 1:
          #   continue
          log.write('{},{},{},{},{},{},{},{}\n'.format(
              filename, len(pts), pt[1], pt[2], pt[3], pt[4], pt[5], pt[6]))
        if len(pts) == 0:
          log.write('{},0,-1,-1,-1,-1,-1\n'.format(filename, 0))
        print('writing file to %s' % output_image_path)
    log.close()
Exemplo n.º 20
0
    def saved_model_inference(self, image_path_pattern, output_dir,
                              bbox_crop_margin_ratio, **kwargs):
        """Perform inference for the given saved model."""
        driver = inference.ServingDriver(
            self.model_name,
            self.ckpt_path,
            batch_size=self.batch_size,
            use_xla=self.use_xla,
            model_params=self.model_config.as_dict(),
            **kwargs)
        driver.load(self.saved_model_dir)
        # Serving time batch size should be fixed.
        batch_size = self.batch_size or 1
        all_files = list(tf.io.gfile.glob(image_path_pattern))
        # print('all_files=', all_files)
        num_batches = (len(all_files) + batch_size - 1) // batch_size
        detected_dir = os.path.join(output_dir, "detected")
        no_detected_dir = os.path.join(output_dir, "no_detected")
        crop_dir = os.path.join(output_dir, "crop")
        os.makedirs(detected_dir, exist_ok=True)
        os.makedirs(no_detected_dir, exist_ok=True)
        os.makedirs(crop_dir, exist_ok=True)
        for i in range(num_batches):
            batch_files = all_files[i * batch_size:(i + 1) * batch_size]
            height, width = self.model_config.image_size
            images = [Image.open(f).convert("RGB") for f in batch_files]
            if len(set([m.size for m in images])) > 1:
                # Resize only if images in the same batch have different sizes.
                images = [m.resize((height, width)) for m in images]
            raw_images = [np.array(m) for m in images]
            size_before_pad = len(raw_images)
            if size_before_pad < batch_size:
                padding_size = batch_size - size_before_pad
                raw_images += [np.zeros_like(raw_images[0])] * padding_size

            detections_bs = driver.serve_images(raw_images)
            print(detections_bs)
            for j in range(size_before_pad):
                if len(detections_bs[j]) == 0:
                    shutil.copy(batch_files[j], no_detected_dir)
                else:
                    prediction = detections_bs[j]
                    boxes = prediction[:, 1:5]
                    classes = prediction[:, 6].astype(int)
                    scores = prediction[:, 5]
                    im = Image.open(batch_files[j]).convert("RGB")
                    w, h = im.size
                    crop_cnt = 0
                    for k, box in enumerate(boxes):
                        if scores[k] < kwargs["min_score_thresh"]:
                            continue
                        crop_cnt += 1
                        if bbox_crop_margin_ratio != 0.0:
                            bw = box[3] - box[1]
                            bh = box[2] - box[0]
                            box[1] = max(
                                round(box[1] - (bw * bbox_crop_margin_ratio)),
                                0)
                            box[3] = min(
                                round(box[3] + (bw * bbox_crop_margin_ratio)),
                                w)
                            box[0] = max(
                                round(box[0] - (bh * bbox_crop_margin_ratio)),
                                0)
                            box[2] = min(
                                round(box[2] + (bh * bbox_crop_margin_ratio)),
                                h)
                        crop_im = im.crop((box[1], box[0], box[3], box[2]))
                        image_file_name = os.path.splitext(
                            os.path.basename(batch_files[j]))[0]
                        target_crop_dir = os.path.join(crop_dir,
                                                       str(classes[k]))
                        os.makedirs(target_crop_dir, exist_ok=True)
                        output_image_path = os.path.join(
                            target_crop_dir,
                            "{}_{}.jpg".format(image_file_name, j))
                        crop_im.save(output_image_path)

                    if crop_cnt < 1:
                        shutil.copy(batch_files[j], no_detected_dir)
                        continue

                    img = driver.visualize(raw_images[j], detections_bs[j],
                                           **kwargs)
                    # img_id = str(i * batch_size + j)
                    output_image_path = os.path.join(
                        detected_dir, os.path.basename(batch_files[j]))
                    Image.fromarray(img).save(output_image_path)
                    logging.info('writing file to %s', output_image_path)
Exemplo n.º 21
0
 def export_saved_model(self):
     driver = inference.ServingDriver(self.model_name, self.ckpt_path)
     driver.build()
     driver.export(self.saved_model_dir)
Exemplo n.º 22
0
    def agnosticCoco(self, input_image, coco_json, **kwargs):
        import json
        import pickle
        from coco_eval import coco_eval

        imageAnnotations = json.load(open(coco_json))
        evals = {}

        driver = inference.ServingDriver(
            self.model_name,
            self.ckpt_path,
            batch_size=self.batch_size,
            use_xla=self.use_xla,
            model_params=self.model_config.as_dict(),
            **kwargs)

        driver.load(self.saved_model_dir)

        for cat in imageAnnotations['categories']:
            id, name = cat['id'], cat['name']
            print('Doing', id, name)
            image_file_names = [
                os.path.join(input_image, img['file_name'])
                for img in imageAnnotations['images']
                if img['categories'] == [id]
            ]  # for test taking only three images
            if not len(image_file_names):
                print(id, name, 'have no images')
                continue

                # Serving time batch size should be fixed.
            batch_size = self.batch_size or 1
            all_files = image_file_names
            print('all_files=', all_files)
            num_batches = (len(all_files) + batch_size - 1) // batch_size
            results = []

            for i in range(num_batches):
                batch_files = all_files[i * batch_size:(i + 1) * batch_size]
                fnames = [f for f in batch_files]
                height, width = self.model_config.image_size
                images = [Image.open(f) for f in batch_files]
                # if len(set([m.size for m in images])) > 1:
                #     # Resize only if images in the same batch have different sizes.
                #     images = [m.resize(height, width) for m in images]
                raw_images = [np.array(m) for m in images]
                # size_before_pad = len(raw_images)
                # if size_before_pad < batch_size:
                #     padding_size = batch_size - size_before_pad
                #     raw_images += [np.zeros_like(raw_images[0])] * padding_size
                print(fnames)
                try:
                    detections_bs = driver.serve_images(raw_images)
                except:  ## no detection
                    detections_bs = []
                    continue

                boxes = detections_bs[0, :, 1:5]
                classes = detections_bs[0, :, 6].astype(int)
                scores = detections_bs[0, :, 5]

                res = dict(file=fnames[0],
                           height=height,
                           width=width,
                           detections=[])
                # creating compatible boxes:
                for j in range(boxes.shape[0]):
                    if scores[j] > kwargs['min_score_thresh']:
                        res['detections'].append(
                            dict(conf=scores[j],
                                 category=int(classes[j]),
                                 bbox=boxes[j, :]))
                results.append(res)

            # results = load_and_run_detector_output(model_file=flag.checkpoint_folder,
            #                                 image_file_names=image_file_names,
            #                                 confidence_threshold=0.4)
            if results:
                # convertire annotazioni alla buona classe
                coco_metrics = coco_eval(results,
                                         coco_json.replace(
                                             'animals', 'animal'),
                                         save=True,
                                         boxTransform='oid')
                evals[id] = (name, coco_metrics)
                results = {}

                with open('out/' + 'evals' + '.pkl', 'wb') as f:
                    pickle.dump(evals, f, pickle.HIGHEST_PROTOCOL)

                print(evals[id])
                results = []
Exemplo n.º 23
0
    def saved_model_inference(self, image_path_pattern, output_dir, **kwargs):
        """Perform inference for the given saved model."""
        driver = inference.ServingDriver(
            self.model_name,
            self.ckpt_path,
            batch_size=self.batch_size,
            use_xla=self.use_xla,
            model_params=self.model_config.as_dict(),
            **kwargs)
        driver.load(self.saved_model_dir)

        # Serving time batch size should be fixed.
        batch_size = self.batch_size or 1
        all_files = list(tf.io.gfile.glob(image_path_pattern))
        num_batches = (len(all_files) + batch_size - 1) // batch_size

        for i in tqdm(range(num_batches)):
            batch_files = all_files[i * batch_size:(i + 1) * batch_size]
            height, width = self.model_config.image_size
            images = []
            img_path = []
            for f in batch_files:
                images.append(Image.open(f))
                img_path.append(f)

            if len(set([m.size for m in images])) > 1:
                # Resize only if images in the same batch have different sizes.
                images = [m.resize(height, width) for m in images]
            raw_images = [np.array(m) for m in images]
            size_before_pad = len(raw_images)
            if size_before_pad < batch_size:
                padding_size = batch_size - size_before_pad
                raw_images += [np.zeros_like(raw_images[0])] * padding_size

            detections_bs = driver.serve_images(raw_images)
            for j in range(size_before_pad):
                img = driver.visualize(raw_images[j], detections_bs[j],
                                       **kwargs)
                img_id = str(i * batch_size + j)

                image_file_name = os.path.basename(img_path[j])
                folder_name = os.path.basename(os.path.dirname(img_path[j]))

                output_image_path = os.path.join(output_dir, 'png',
                                                 folder_name, image_file_name)
                output_image_path1 = os.path.join(output_dir, 'png',
                                                  folder_name)

                #print(image_file_name)

                boxes = detections_bs[j][:, 1:5]
                classes = detections_bs[j][:, 6].astype(int)
                scores = detections_bs[j][:, 5]

                box_path = output_dir + "/boxes"
                img_box_name = image_file_name[:image_file_name.rindex('.')]
                if not os.path.exists(box_path):
                    os.makedirs(box_path)

                with open(os.path.join(box_path, img_box_name) + '.txt',
                          'a') as box_log:
                    for iy, x in enumerate(detections_bs[j]):
                        if x[5] > kwargs.get('min_score_thresh'):
                            boxstr = ''

                            boxstr += str(x[5])  #confident
                            boxstr += ' ' + str(x[1:5][1])
                            boxstr += ' ' + str(x[1:5][0])
                            boxstr += ' ' + str(x[1:5][3])
                            boxstr += ' ' + str(x[1:5][2])

                            box_log.write('tumor ' + boxstr + '\n')
                            if not os.path.exists(output_image_path1):
                                os.makedirs(output_image_path1)
                            #print(output_image_path1)
                            Image.fromarray(img).save(output_image_path)