Exemplo n.º 1
0
    def get_sample(self, idx):
        """
    preprocess one sample from the list
    """
        cfg = self.cfg
        img_file_path = self.imgs[idx]

        imgname = os.path.splitext(os.path.basename(img_file_path))[0]

        frame = cv2.imread(img_file_path)
        im = frame.astype("float32")

        resized_image = resizeImage(im, cfg.short_edge_size, cfg.max_size)

        scale = (resized_image.shape[0] * 1.0 / im.shape[0] + \
                 resized_image.shape[1] * 1.0 / im.shape[1]) / 2.0

        return resized_image, scale, imgname, (im.shape[0], im.shape[1])
  def _run(self):
    cfg = self.cfg

    frame_count = 0
    while frame_count < self.num_frame:
      if self.stop_signal.is_set():
        return

      if self.is_moviepy:
        suc = True
        frame = next(self.vcap)
      else:
        suc, frame = self.vcap.read()
      if not suc:
        frame_count += 1
        continue

      if frame_count % self.frame_gap != 0:
        frame_count += 1
        continue

      # process the frames
      if self.is_moviepy:
        # moviepy ask ffmpeg to get rgb24
        frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
      im = frame.astype("float32")

      resized_image = resizeImage(im, cfg.short_edge_size, cfg.max_size)

      scale = (resized_image.shape[0] * 1.0 / im.shape[0] + \
               resized_image.shape[1] * 1.0 / im.shape[1]) / 2.0

      self.queue.put((resized_image, scale, frame_count), block=True)

      frame_count += 1

    self._wait_queue()
    if self.stop_signal.is_set():
      # We're done
      return
Exemplo n.º 3
0
				# skip some frame if frame_gap >1
				if cur_frame % args.frame_gap != 0:
					cur_frame+=1
					continue

				# 4. run detection on the frame stack if there is enough

				im = frame.astype("float32")

				if args.test_frame_extraction:
					frame_file = os.path.join(video_out_path, "%d.jpg"%cur_frame)
					cv2.imwrite(frame_file, im)
					cur_frame+=1
					continue

				resized_image = resizeImage(im, args.short_edge_size, args.max_size)

				scale = (resized_image.shape[0]*1.0/im.shape[0] + resized_image.shape[1]*1.0/im.shape[1])/2.0

				feed_dict = model.get_feed_dict_forward(resized_image)

				if args.get_box_feat:
					sess_input = [model.final_boxes, model.final_labels, model.final_probs, model.fpn_box_feat]

					final_boxes, final_labels, final_probs, box_feats = sess.run(sess_input,feed_dict=feed_dict)
					assert len(box_feats) == len(final_boxes)
					# save the box feature first

					featfile = os.path.join(feat_out_path, "%d.npy"%(cur_frame))
					np.save(featfile, box_feats)
				else: