def test_pipeline():
  dataset, dsp = test_yolo_input_task()

  drawer = utils.DrawBoxes(labels=coco.get_coco_names(), thickness=1)
  for l, (i, j) in enumerate(dataset):

    # boxes = box_ops.xcycwh_to_yxyx(j['bbox'])
    # j["bbox"] = boxes
    i = drawer(i,
               j)  # tf.image.draw_bounding_boxes(i, boxes, [[1.0, 0.0, 1.0]])

    gt = j['grid_form']

    obj3 = gt['3'][..., 4]
    obj4 = gt['4'][..., 4]
    obj5 = gt['5'][..., 4]

    fig, axe = plt.subplots(1, 4)

    axe[0].imshow(i[0])
    axe[1].imshow(obj3[0].numpy())
    axe[2].imshow(obj4[0].numpy())
    axe[3].imshow(obj5[0].numpy())

    fig.set_size_inches(18.5, 6.5, forward=True)
    plt.tight_layout()
    plt.show()

    if l >= 30:
      break
def process_model(model, process_size):
  drawer = utils.DrawBoxes(classes=80, labels=coco.get_coco_names())

  def run(image):
    image_ = tf.convert_to_tensor(image)
    image_ = tf.image.resize(image_, (process_size, process_size))
    image_ = tf.expand_dims(image_, axis=0)
    pred = model.predict(image_)
    image = drawer(image, pred)
    return image

  return run
Exemple #3
0
def build_model(version):
    if version == "v4":
        config = exp_cfg.YoloTask(model=exp_cfg.Yolo(
            base="v4",
            min_level=3,
            norm_activation=exp_cfg.common.NormActivation(activation="mish"),
            #_boxes = ['(10, 14)', '(23, 27)', '(37, 58)', '(81, 82)', '(135, 169)', '(344, 319)'],
            _boxes=[
                "(12, 16)", "(19, 36)", "(40, 28)", "(36, 75)", "(76, 55)",
                "(72, 146)", "(142, 110)", "(192, 243)", "(459, 401)"
            ],
        ))
    elif "tiny" in version:
        config = exp_cfg.YoloTask(model=exp_cfg.Yolo(
            base=version,
            min_level=4,
            norm_activation=exp_cfg.common.NormActivation(activation="leaky"),
            _boxes=[
                "(10, 14)", "(23, 27)", "(37, 58)", "(81, 82)", "(135, 169)",
                "(344, 319)"
            ],
            #_boxes = ['(12, 16)', '(19, 36)', '(40, 28)', '(36, 75)','(76, 55)', '(72, 146)', '(142, 110)', '(192, 243)','(459, 401)'],
        ))
    else:
        config = exp_cfg.YoloTask(model=exp_cfg.Yolo(
            base=version,
            min_level=3,
            norm_activation=exp_cfg.common.NormActivation(activation="leaky"),
            #_boxes = ['(10, 14)', '(23, 27)', '(37, 58)', '(81, 82)', '(135, 169)', '(344, 319)'],
            _boxes=[
                "(10, 13)", "(16, 30)", "(33, 23)", "(30, 61)", "(62, 45)",
                "(59, 119)", "(116, 90)", "(156, 198)", "(373, 326)"
            ],
        ))

    task = YoloTask(config)
    model = task.build_model()
    task.initialize(model)

    pfn = ms.preprocess_fn
    pofn = utils.DrawBoxes(classes=80,
                           labels=coco.get_coco_names(),
                           display_names=True,
                           thickness=2)
    server_t = ms.ModelServer(model=model,
                              preprocess_fn=pfn,
                              postprocess_fn=pofn,
                              wait_time=0.00001,
                              max_batch=5)
    return server_t
def run(model, video, disp_h, wait_time, max_batch, que_size):
    max_batch = 5 if max_batch is None else max_batch
    pfn = preprocess_fn
    pofn = utils.DrawBoxes(classes=80,
                           labels=coco.get_coco_names(),
                           display_names=True,
                           thickness=2)

    server = ModelServer(model=model,
                         preprocess_fn=pfn,
                         postprocess_fn=pofn,
                         wait_time=wait_time,
                         max_batch=max_batch)
    video = video_t.VideoServer(video,
                                wait_time=0.00000001,
                                que=que_size,
                                disp_h=disp_h)
    display = video_t.DisplayThread(server,
                                    alpha=0.9,
                                    wait_time=0.000001,
                                    fix_wt=False)
    server.start()
    video.start()
    display.start()

    # issue at soem point there is a
    # bottlenecked by the readeing thread.
    try:
        while (video.running and display.running):
            frame = video.get()
            if not isinstance(frame, type(None)):
                while not server.put(frame):
                    time.sleep(server.wait_time)
            time.sleep(server.wait_time)
    except Exception as e:
        print(e)
        traceback.print_exc()

    server.close()
    display.close()
  def __init__(self,
               file_name,
               model=None,
               preprocess_function=None,
               process_width=416,
               process_height=416,
               disp_h=720,
               classes=80,
               labels=None,
               print_conf=True,
               max_batch=None,
               wait_time=None,
               preprocess_with_gpu=False,
               scale_que=1,
               policy='float16',
               gpu_device='/GPU:0',
               preprocess_gpu='/GPU:0'):
    self._cap = cv2.VideoCapture(file_name)
    if not self._cap.isOpened():
      raise IOError('video file was not found')

    # support for ANSI cahracters in windows
    support_windows()

    self._file = file_name
    self._fps = 120000000

    self._gpu_device = gpu_device
    if preprocess_with_gpu:
      self._pre_process_device = preprocess_gpu
    else:
      self._pre_process_device = '/CPU:0'

    # self._cap.set(3, int(960))
    # self._cap.set(4, int(720))
    # self._cap.set(cv2.CAP_PROP_FPS, int(30))
    print(self._cap.get(3), self._cap.get(4))

    self._preprocess_function = preprocess_function
    self._height = int(self._cap.get(4)) if disp_h is None else disp_h
    self._og_height = int(self._cap.get(4))
    self._width = int(self._cap.get(3) * (self._height / self._og_height))
    self._classes = classes
    self._p_width = process_width
    self._p_height = process_height
    self._policy = policy
    self._model = model

    # fast but as close to one 2 one as possible
    if max_batch is None:
      if file_name == 0:
        self._batch_size = 5  # 40 fps more conistent frame to frame
      else:
        # faster but more potential for delay from input to output
        if tf.keras.mixed_precision.experimental.global_policy(
        ).name == 'mixed_float16' or tf.keras.mixed_precision.experimental.global_policy(
        ).name == 'float16':
          # self._batch_size = 9 # 45 fps faster but less frame to frame consistent, it will remain consistant, but there is opertunity for more frames to be loaded than
          self._batch_size = 5
        else:
          self._batch_size = 3

        if process_width > 416 or process_height > 416:
          self._batch_size = 3
    else:
      self._batch_size = max_batch

    self._colors = gen_colors(self._classes)

    if labels is None:
      self._labels = get_coco_names(
          path='yolo/dataloaders/dataset_specs/coco.names')
    else:
      self._labels = labels

    self._draw_fn = utils.DrawBoxes(
        classes=classes,
        labels=self._labels,
        display_names=print_conf,
        thickness=1)
    #get_draw_fn(self._colors, self._labels, print_conf)

    self._load_que = Queue(self._batch_size * scale_que)
    self._display_que = Queue(1 * scale_que)
    self._running = True

    self._dynamic_wt = wait_time == 'dynamic'
    if not self._dynamic_wt:
      self._wait_time = utils.get_wait_time(wait_time, max_batch)
    else:
      self._wait_time = 0.0001

    self._read_fps = 1
    self._prev_display_fps = 1
    self._display_fps = 1
    self._latency = -1
    self._batch_proc = 1
    self._frames = 1
    self._obj_detected = -1
    return
    #name = "saved_models/v4/tflite-regualr-no-nms"
    #name = "saved_models/v4/tflite-tiny-no-nms"
    name = "saved_models/v4/tiny"
    new_name = f"{name}_tensorrt"
    model = trt.TensorRT(
        saved_model=new_name,
        save_new_path=new_name,
        max_workspace_size_bytes=4000000000,
        max_batch_size=5)  # , precision_mode="INT8", use_calibration=True)
    model.compile()
    model.summary()
    model.set_postprocessor_fn(func)

    pfn = preprocess_fn
    pofn = utils.DrawBoxes(classes=80,
                           labels=coco.get_coco_names(),
                           display_names=True,
                           thickness=2)

    server = ModelServer(model=model,
                         preprocess_fn=pfn,
                         postprocess_fn=pofn,
                         wait_time=0.00001,
                         max_batch=5)
    video = video_t.VideoServer("videos/nyc.mp4",
                                wait_time=0.00000001,
                                que=1000,
                                disp_h=480)
    display = video_t.DisplayThread(server,
                                    alpha=0.9,
                                    wait_time=0.000001,
    def __init__(
            self,
            model,
            file_name=0,  # ,"/dev/video0",
            fake_cam_id='/dev/video20',
            preprocess_function=None,
            process_width=416,
            process_height=416,
            disp_h=720,
            classes=80,
            labels=None,
            print_conf=True,
            max_batch=None,
            wait_time=None,
            preprocess_with_gpu=False,
            scale_que=1,
            fps=30,
            policy='float16',
            gpu_device='/GPU:0',
            preprocess_gpu='/GPU:0'):
        self._cap = cv2.VideoCapture(file_name)  # + cv2.CAP_DSHOW)

        if not self._cap.isOpened():
            raise IOError('video file was not found')

        # support for ANSI cahracters in windows
        support_windows()

        self._file = file_name
        self._fps = 120000000

        self._gpu_device = gpu_device
        if preprocess_with_gpu:
            self._pre_process_device = preprocess_gpu
        else:
            self._pre_process_device = '/CPU:0'

        fourcc = cv2.VideoWriter_fourcc('M', 'J', 'P', 'G')
        self._cap.set(cv2.CAP_PROP_FOURCC, fourcc)
        self._cap.set(3, int(1280))
        self._cap.set(4, int(720))
        self._cap.set(cv2.CAP_PROP_FPS, int(fps))
        print(self._cap.get(3), self._cap.get(4))

        self._preprocess_function = preprocess_function
        self._height = int(self._cap.get(4)) if disp_h is None else disp_h
        self._og_height = int(self._cap.get(4))
        self._width = int(self._cap.get(3) * (self._height / self._og_height))
        self._classes = classes
        self._p_width = process_width
        self._p_height = process_height
        self._policy = policy
        self._model = model

        print(self._width)
        self._fake_cam = pyfakewebcam.FakeWebcam(
            '/dev/video20', 960, 720)  # self._width, self._height)

        # fast but as close to one 2 one as possible
        if max_batch is None:
            if file_name == 0:
                self._batch_size = 5  # 40 fps more conistent frame to frame
            else:
                # faster but more potential for delay from input to output
                if tf.keras.mixed_precision.experimental.global_policy(
                ).name == 'mixed_float16' or tf.keras.mixed_precision.experimental.global_policy(
                ).name == 'float16':
                    # self._batch_size = 9 # 45 fps faster but less frame to frame consistent, it will remain consistant, but there is opertunity for more frames to be loaded than
                    self._batch_size = 5
                else:
                    self._batch_size = 3

                if process_width > 416 or process_height > 416:
                    self._batch_size = 3
        else:
            self._batch_size = max_batch

        self._colors = gen_colors(self._classes)

        if labels is None:
            self._labels = get_coco_names(
                path='yolo/dataloaders/dataset_specs/coco.names')
        else:
            self._labels = labels

        self._draw_fn = get_draw_fn(self._colors, self._labels, print_conf)

        self._load_que = Queue(self._batch_size * scale_que)
        self._display_que = Queue(1 * scale_que)
        self._running = True
        if self._batch_size != 1:
            self._wait_time = 0.0015 * self._batch_size if wait_time is None else wait_time  # 0.05 default
        else:
            self._wait_time = 0.0001

        self._read_fps = 1
        self._display_fps = 1
        self._latency = -1
        self._batch_proc = 1
        self._frames = 1
        self._obj_detected = -1
        return