def predict_on_json(predictions, annotation_path, classes, show=False, conf_threshold=None, dump_output_video=False,
                    path_to_output_video='output.avi', width=640, height=480, fps=30):
  annotation_generator, _ = ObjectDetectorJson.json_iterator(annotation_path, classes)
  annotation_data = [pickle.loads(x) for x in annotation_generator()]

  output = []
  if dump_output_video:
    fourcc = cv2.VideoWriter_fourcc(*'XVID')
    out = cv2.VideoWriter(path_to_output_video, fourcc, fps, (int(width), int(height)))
  for i, pred in enumerate(predictions):
    annotation = annotation_data[i]
    image_size = annotation['image_size']
    img_path = annotation['image']
    img_id = annotation['image_id']
    det = process_image(pred[:, 1:], image_size, img_id, conf_threshold, classes)
    output.extend(det)
    frame = cv2.imread(img_path)
    frame = cv2.resize(frame, tuple(image_size))
    img = draw_detections(frame, det)
    if show:
      cv2.imshow('detections', img)
      key = cv2.waitKey(10)
      if key == 27:
        break
    if dump_output_video:
      img_resized = cv2.resize(img, (width, height))
      out.write(img_resized)
  if dump_output_video:
    out.release()

  return output
示例#2
0
  def sample_data(json_path, num_samples, input_shape, classes, seed=666):
    if num_samples == 0:
      return None

    data, _ = ObjectDetectorJson.json_iterator(json_path, classes)
    data = [x for x in data()]
    # data = ObjectDetectorJson.convert_coco_to_toolbox_format(COCO(json_path), classes)

    ObjectDetectorJson.init_cache(json_path, cache_type='NONE', classes=classes)

    rng = random.Random(seed)
    selected_items = rng.sample(range(len(data)), num_samples)

    _, transform_param = MobileNetSSD.create_transform_parameters(*input_shape[:2])
    transformer = AnnotatedDataTransformer(transform_param, is_training=False)

    transform_fn = lambda value: ObjectDetectorJson.transform_fn(value, transformer, add_original_image=True)
    return [transform_fn(data[i]) for i in selected_items]
示例#3
0
def predict_on_json(predictions,
                    annotation_path,
                    classes,
                    show=False,
                    conf_threshold=None,
                    dump_output_video=False,
                    path_to_output_video='output.avi',
                    width=640,
                    height=480,
                    fps=30):
    annotation_generator, _ = ObjectDetectorJson.json_iterator(
        annotation_path, classes)
    annotation_data = [pickle.loads(x) for x in annotation_generator()]

    output = []
    if dump_output_video:
        fourcc = cv2.VideoWriter_fourcc(*'XVID')
        out = cv2.VideoWriter(path_to_output_video, fourcc, fps,
                              (int(width), int(height)))
    for i, pred in enumerate(predictions):
        annotation = annotation_data[i]
        image_size = annotation['image_size']
        img_path = annotation['image']
        img_id = annotation['image_id']
        det = process_image(pred[:, 1:], image_size, img_id, conf_threshold,
                            classes)
        output.extend(det)
        frame = cv2.imread(img_path)
        frame = cv2.resize(frame, tuple(image_size))
        img = draw_detections(frame, det)
        if show:
            cv2.imshow('detections', img)
            key = cv2.waitKey(10)
            if key == 27:
                break
        if dump_output_video:
            img_resized = cv2.resize(img, (width, height))
            out.write(img_resized)
    if dump_output_video:
        out.release()

    return output