Exemplo n.º 1
0
 def _safety_store_txt(self, xyxy, frame_id, cls, conf_score):
     try:
         txt_save_path = self.txt_bbox_path + "frame-%s" % str(frame_id)
         if self.opt.save_txt:
             save_txt(txt_save_path, self.opt.txt_format, bbox_xyxy=xyxy, cls=cls, conf=conf_score)
     except:
         print("\n[%s] Unable to perform crop image in frame-%s" % (get_current_time(), str(frame_id)))
Exemplo n.º 2
0
    def _process_detection(self, image4yolo, raw_img, this_frame_id):

        # Get detections
        ts_det = time.time()
        img_torch = torch.from_numpy(image4yolo)  #.to(self.device)
        image4yolo = img_torch.to(self.device)
        if image4yolo.ndimension() == 3:
            image4yolo = image4yolo.unsqueeze(0)
        try:
            self.pred = self.model(image4yolo)[0]
        except Exception as e:
            print("~~ EEROR: ", e)
        t_inference = (time.time() - ts_det) * 1000  # to ms

        # Latency: Inference
        print('[%s] DONE Inference of frame-%s (%.3f ms)' %
              (get_current_time(), str(this_frame_id), t_inference))

        # Default: Disabled
        if self.opt.half:
            self.pred = self.pred.float()

        # Apply NMS: Non-Maximum Suppression
        # ts_nms = time.time()
        # to Removes detections with lower object confidence score than 'conf_thres'
        self.pred = non_max_suppression(self.pred,
                                        self.opt.conf_thres,
                                        self.opt.iou_thres,
                                        classes=self.opt.classes,
                                        agnostic=self.opt.agnostic_nms)
        # print('\n # Total Non-Maximum Suppression (NMS) time: (%.3fs)' % (time.time() - ts_nms))

        # Apply Classifier: Default DISABLED
        if self.classify:
            self.pred = apply_classifier(self.pred, self.modelc, image4yolo,
                                         raw_img)

        # Process detections
        '''
        p = path
        s = string for printing
        im0 = image (matrix)
        '''

        try:
            for i, det in enumerate(self.pred):  # detections per image
                if det is not None and len(det):
                    # Rescale boxes from img_size to im0 size
                    det[:, :4] = scale_coords(image4yolo.shape[2:], det[:, :4],
                                              raw_img.shape).round()

                    # Export results: Raw image OR BBox image OR Crop image OR BBox txt
                    # if self.opt.dump_raw_img or self.opt.dump_bbox_img or self.opt.dump_crop_img or self.opt.save_txt:
                    if self.opt.cv_out:
                        self._manage_detection_results(det, raw_img,
                                                       this_frame_id)

        except Exception as e:
            print("ERROR Plotting: ", e)
Exemplo n.º 3
0
 def _save_cropped_img(self, xyxy, im0, idx, cls, frame_id, ext):
     try:
         if self.opt.dump_crop_img:
             numpy_xyxy = torch2numpy(xyxy, int)
             xywh = np_xyxy2xywh(numpy_xyxy)
             crop_save_path = self.crop_img_path + "frame-%s_[cls=%s][idx=%s]%s" % (
             str(frame_id), str(idx), cls, ext)
             crop_image(crop_save_path, im0, xywh)
     except:
         print("\n[%s] Unable to perform crop image in frame-%s" % (get_current_time(), str(frame_id)))
Exemplo n.º 4
0
    def run(self):
        print("[%s] YOLO App is running now ..." % get_current_time())
        if not self.opt.is_source_stream:
            print("\n[%s] Reading folder data from path: `%s`" %
                  (get_current_time(), self.opt.source))
            self._read_from_folder()  # it prints the collected image paths
            self._detect_from_folder()
        else:
            print("\n[%s] Reading video streaming from URL: `%s`" %
                  (get_current_time(), self.opt.source))

            while self.is_running:
                try:
                    self._read_from_streaming()
                    self._detect_from_video_streaming()
                except:
                    print(
                        "\nUnable to communicate with the Streaming. Restarting . . ."
                    )
Exemplo n.º 5
0
    def detect(self, raw_img, frame_id):
        print("\n[%s] Starting YOLOv3 for frame-%s" %
              (get_current_time(), frame_id))

        # DO THE DETECTION HERE!
        self._save_results(raw_img, frame_id, is_raw=True)

        # Padded resize
        image4yolo = letterbox(raw_img, new_shape=self.img_size)[0]

        # Convert
        image4yolo = image4yolo[:, :, ::-1].transpose(
            2, 0, 1)  # BGR to RGB, to 3x416x416
        image4yolo = np.ascontiguousarray(image4yolo,
                                          dtype=np.float16 if self.half else
                                          np.float32)  # uint8 to fp16/fp32
        image4yolo /= 255.0  # 0 - 255 to 0.0 - 1.0

        # Start processing image
        print("[%s] Received frame-%d" % (get_current_time(), int(frame_id)))
        self._process_detection(image4yolo, raw_img, frame_id)
Exemplo n.º 6
0
    def _detection_handler(self, ret, img0, frame_id):
        is_break = False

        if ret and not is_break:
            # Force stop after n frames; disabled when self.max_frames == 0
            if self.opt.is_limited and frame_id == (self.opt.max_frames + 1):
                print(
                    "\n[%s] Detection is automatically halted; Total processed frames: `%s frames`"
                    % (get_current_time(), (frame_id - 1)))
                self.is_running = False
                is_break = True

            if not is_break:
                # Feed this image into YOLO network
                print("\n[%s] Feed frame-%s into YOLO network" %
                      (get_current_time(), str(frame_id)))
                self.yolo.detect(img0, frame_id)

        else:
            print("IMAGE is INVALID.")
            print("I guess there is no more frame to show.")
            is_break = True

        return is_break
Exemplo n.º 7
0
    def _detect_from_folder(self):
        for i in range(len(self.dataset)):
            received_frame_id, path, img, im0s, vid_cap = (i + 1), self.dataset[i][0], self.dataset[i][1], \
                                                          self.dataset[i][2], self.dataset[i][3]

            # ret = a boolean return value from getting the frame,
            # frame = the current frame being projected in the video
            try:
                ret, frame = True, im0s
                if self._detection_handler(ret, frame, received_frame_id):
                    break

            except Exception as e:
                print(" ---- e:", e)
                break

        print("\n[%s] No more frame to show." % get_current_time())
Exemplo n.º 8
0
    def __initialize_configurations(self):
        print("\n[%s] Initialize YOLO Configuration" % get_current_time())
        t0_load_weight = time.time()
        self.__load_weight()
        t_load_weight = time.time() - t0_load_weight
        print(".. Load `weight` in (%.3fs)" % t_load_weight)

        # Latency: Load YOLOv3 Weight
        print('Latency [Load `weight`]: (%.5fs)' % t_load_weight)

        t0_load_eval = time.time()
        self.__eval_model()
        t_load_eval_model = time.time() - t0_load_eval
        print(".. Load function `eval_model` in (%.3fs)" % t_load_eval_model)

        # Latency: Execute Evaluation Model
        print('Latency [Load `Eval Model`]: (%.5fs)' % t_load_eval_model)

        self.__get_names_colors()
Exemplo n.º 9
0
    def _manage_detection_results(self, det, raw_img, this_frame_id):
        """
            A function to do optional actions: Save cropped file, bbox in txt, bbox images
        """
        t0_copy_image = time.time()
        original_img = raw_img.copy()
        t1_copy_image = (time.time() - t0_copy_image) * 1000  # to ms
        print('[%s] Latency of copying image data of frame-%s (%.3f ms)' %
              (get_current_time(), str(this_frame_id), t1_copy_image))

        # TODO: We can do the parallel computation to enhance the performance further!
        # Draw BBox information into images
        idx_detected = 0
        bbox_data = []
        for *xyxy, conf, cls in det:
            numpy_xyxy = get_det_xyxy(xyxy)
            this_label = '%s %.2f' % (self.names[int(cls)], conf)
            this_color = self.colors[int(cls)]
            idx_detected += 1

            # Save cropped files
            self._save_cropped_img(xyxy, original_img, idx_detected,
                                   self.names[int(cls)], this_frame_id,
                                   self.opt.file_ext)
            # Save bbox information
            self._safety_store_txt(xyxy, this_frame_id, self.names[int(cls)],
                                   str(round(float(conf), 2)))

            this_bbox = {
                "obj_idx": idx_detected,
                "xyxy": [str(val) for val in numpy_xyxy],
                "label": this_label,
                "color": [str(color) for color in this_color]
            }
            bbox_data.append(this_bbox)
            plot_one_box(xyxy, raw_img, label=this_label, color=this_color)

        # Save BBox image
        self._save_results(raw_img, this_frame_id)