示例#1
0
    def run_detection(self, image, encoder, frame_id):
        boxes, confidence, classes = self.detect_image(image)
        features = encoder(image, boxes)
        detections = [
            Detection(bbox, 1.0, cls,
                      feature) for bbox, _, cls, feature in zip(
                          boxes, confidence, classes, features)
        ]

        # Run non-maxima suppression.
        boxes = np.array([d.tlwh for d in detections])
        scores = np.array([d.confidence for d in detections])
        indices = preprocessing.non_max_suppression(
            boxes, self.cfg.DEEPSORT.NMS_MAX_OVERLAP, scores)
        detections = [detections[i] for i in indices]
        detections_in_ROI = []

        print("[INFO] detected: ", len(detections))
        for det in detections:
            bbox = det.to_tlbr()
            centroid_det = (int(
                (bbox[0] + bbox[2]) // 2), int((bbox[1] + bbox[3]) // 2))
            if check_in_polygon(centroid_det, self.polygon_ROI):
                detections_in_ROI.append(det)

        print("-----------------")
        # return detections_in_ROI
        return detections, detections_in_ROI
示例#2
0
    def read_detection(self, image, frame_info, encoder, frame_id):
        detect_folder_path = self.args.read_detect
        detect_file_path = os.path.join(detect_folder_path,
                                        frame_info + ".txt")

        # file text store path to each frame
        detect_file = open(detect_file_path, 'r')
        lines = detect_file.readlines()

        boxes = []
        confidence = []
        classes = []

        for line in lines:
            detect = line.split()

            bbox = [
                int(detect[0]),
                int(detect[1]),
                int(detect[2]),
                int(detect[3])
            ]
            score = float(detect[4])
            class_id = int(detect[5])

            boxes.append(bbox)
            confidence.append(score)
            classes.append(class_id)

        features = encoder(image, boxes)
        detections = [
            Detection(bbox, 1.0, cls,
                      feature) for bbox, _, cls, feature in zip(
                          boxes, confidence, classes, features)
        ]

        # Run non-maxima suppression.
        boxes = np.array([d.tlwh for d in detections])
        scores = np.array([d.confidence for d in detections])
        indices = preprocessing.non_max_suppression(
            boxes, self.cfg.DEEPSORT.NMS_MAX_OVERLAP, scores)
        detections = [detections[i] for i in indices]
        detections_in_ROI = []

        # print("[INFO] detected: ", len(detections))
        for det in detections:
            bbox = det.to_tlbr()
            centroid_det = (int(
                (bbox[0] + bbox[2]) // 2), int((bbox[1] + bbox[3]) // 2))
            if check_in_polygon(centroid_det, self.polygon_ROI):
                detections_in_ROI.append(det)
        print("[INFO] detections in ROI: ", len(detections_in_ROI))
        print("-----------------")
        # return detections_in_ROI
        return detections, detections_in_ROI
示例#3
0
 def find_MOI_candidate(self, region_list, centroid_list):
     MOI_candidate = []
     for index, region in enumerate(region_list):
         region = Polygon(region)
         centroids_in_region = [
             centroid for centroid in centroid_list
             if check_in_polygon(centroid, region)
         ]
         percent_point = len(centroids_in_region) / len(centroid_list)
         if percent_point >= self.cfg.CAM.PIM_THRESHOLD:
             MOI_candidate.append(index + 1)
     return MOI_candidate
示例#4
0
    def counting(self,
                 count_frame,
                 cropped_frame,
                 _frame,
                 objs_dict,
                 counted_obj,
                 arr_cnt_class,
                 clf_model=None,
                 clf_labels=None):
        vehicles_detection_list = []
        frame_id = count_frame
        class_id = None
        for (track_id, info_obj) in objs_dict.items():
            centroid = info_obj['centroid']

            if int(
                    track_id
            ) in counted_obj:  #check if track_id in counted_object ignore it
                continue
            #if track_id not in counted object then check if centroid in range of ROI then count it
            if len(centroid) != 0 and check_in_polygon(
                    centroid, self.polygon_ROI
            ) == False and info_obj['flag_in_out'] == 1:
                info_obj['point_out'] = centroid
                if self.use_classify:
                    bbox = info_obj['best_bbox']
                    obj_img = cropped_frame[
                        int(bbox[1]):int(bbox[3]),
                        int(bbox[0]):int(
                            bbox[2]), :]  #crop obj following bbox for clf
                    class_id = self.run_classifier(clf_model, clf_labels,
                                                   obj_img)
                    if class_id == -1:
                        continue
                else:
                    class_id = info_obj['class_id']

                # MOI of obj
                moi, _ = MOI.compute_MOI(self.cfg, info_obj['point_in'],
                                         info_obj['point_out'])

                counted_obj.append(int(track_id))
                class_id = self.compare_class(class_id)
                arr_cnt_class[class_id][moi - 1] += 1
                print("[INFO] arr_cnt_class: \n", arr_cnt_class)
                vehicles_detection_list.append((frame_id, moi, class_id + 1))

        print("--------------")
        return _frame, arr_cnt_class, vehicles_detection_list
示例#5
0
    def run_detection(self, image, encoder, tracking, frame_id):
        boxes, confidence, classes = self.detector(image)
        features = encoder(image, boxes)
        detections = [
            Detection(bbox, 1.0, cls,
                      feature) for bbox, _, cls, feature in zip(
                          boxes, confidence, classes, features)
        ]

        # Run non-maxima suppression.
        boxes = np.array([d.tlwh for d in detections])
        scores = np.array([d.confidence for d in detections])
        indices = preprocessing.non_max_suppression(
            boxes, self.cfg.DEEPSORT.NMS_MAX_OVERLAP, scores)
        detections = [detections[i] for i in indices]
        detections_in_ROI = []

        print("[INFO] detected: ", len(detections))
        for det in detections:
            bbox = det.to_tlbr()
            centroid_det = (int(
                (bbox[0] + bbox[2]) // 2), int((bbox[1] + bbox[3]) // 2))
            if check_in_polygon(centroid_det, self.TRACKING_ROI):
                detections_in_ROI.append(det)
        print("[INFO] detections in ROI: ", len(detections_in_ROI))
        logFile = os.path.join(log_detected_cam_dir,
                               'frame_' + str(frame_id) + '.txt')
        with open(logFile, "a+") as f:
            # for det in detections_in_ROI:
            for det in detections:
                bbox = det.to_tlbr()
                score = "%.2f" % round(det.confidence * 100, 2) + "%"

                if len(classes) > 0:
                    cls = det.cls
                    # write log file
                    f.write("{} {} {} {} {} {}\n".format(
                        int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3]),
                        round(det.confidence * 100, 2), cls))

        print("-----------------")
        # return detections_in_ROI
        return detections, detections_in_ROI
示例#6
0
    def counting(self,
                 count_frame,
                 cropped_frame,
                 _frame,
                 objs_dict,
                 counted_obj,
                 arr_cnt_class,
                 clf_model=None,
                 clf_labels=None):
        vehicles_detection_list = []
        frame_id = count_frame
        class_id = None
        cv2.putText(_frame, "Frame ID: {}".format(str(frame_id)), (1000, 70),
                    cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 255), 2)
        for (track_id, info_obj) in objs_dict.items():
            centroid = info_obj['centroid']

            if int(
                    track_id
            ) in counted_obj:  # check if track_id in counted_object ignore it
                continue

            # if track_id not in counted object then check if centroid in range of ROI then count it
            if (info_obj['last_frame'] + 2 < count_frame
                    and info_obj['flag_in_out'] == 1) or (
                        check_in_polygon(centroid, self.polygon_ROI) == False
                        and info_obj['flag_in_out'] == 1):
                info_obj['point_out'] = centroid
                class_id = self.voting_class(info_obj['class_list'])

                # ignore special class not in contest
                if class_id == 4:
                    continue

                # compute MOI of obj
                if self.count_method == 1:
                    moi = MOI.compute_MOI_cosine(self.cfg,
                                                 info_obj['point_in'],
                                                 info_obj['point_out'])
                elif self.count_method == 2:
                    moi, _, _ = MOI.compute_MOI(self.cfg, info_obj['point_in'],
                                                info_obj['point_out'])
                elif self.count_method == 3:
                    moi, _, count = MOI.compute_MOI(self.cfg,
                                                    info_obj['point_in'],
                                                    info_obj['point_out'])
                    if count == 0 or count > 1:
                        moi = MOI.compute_MOI_cosine(self.cfg,
                                                     info_obj['point_in'],
                                                     info_obj['point_out'])
                elif self.count_method == 4:
                    MOI_candidate = self.find_MOI_candidate(
                        self.cfg.CAM.ROI_SPLIT_REGION,
                        info_obj['centroid_list'])
                    moi, count = MOI.compute_MOI_from_candidate(
                        self.cfg, info_obj['point_in'], info_obj['point_out'],
                        MOI_candidate)
                    if count == 0 or count > 1:
                        moi = MOI.compute_MOI_cosine_from_candidate(
                            self.cfg, info_obj['point_in'],
                            info_obj['point_out'], MOI_candidate)

                # mark objs which are counted
                counted_obj.append(int(track_id))

                if moi > 0:
                    info_obj['moi'] = moi
                    track_distance = math.sqrt((info_obj['point_out'][0] -
                                                info_obj['point_in'][0])**2 +
                                               (info_obj['point_out'][1] -
                                                info_obj['point_in'][1])**2)

                    if track_distance < self.cfg.CAM.D_THRESHOLD[moi - 1]:
                        continue

                    if self.args.frame_estimate:
                        distance_point_line = self.distance_point2roi(
                            centroid, self.cfg.CAM.LINE_OUT_ROI[moi - 1][0],
                            self.cfg.CAM.LINE_OUT_ROI[moi - 1][1])
                        info_obj['frame'] = frame_id + self.estimate_frame(
                            info_obj['centroid_deque'][0],
                            info_obj['centroid_deque'][-1], moi,
                            info_obj['last_bbox'], distance_point_line)
                    else:
                        info_obj['frame'] = frame_id

                    # visualize when obj out the ROI
                    if info_obj['frame'] == frame_id:
                        cv2.circle(_frame,
                                   (int(centroid[0]), int(centroid[1])), 12,
                                   self.color_list[moi - 1], -1)
                        cv2.putText(_frame,
                                    str(class_id + 1) + '.' + str(track_id),
                                    (int(centroid[0]) - 3, int(centroid[1])),
                                    cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                                    (255, 255, 255), 2)

                    arr_cnt_class[class_id][moi - 1] += 1
                    vehicles_detection_list.append(
                        (info_obj['frame'], moi, class_id + 1))

        print("--------------")
        return _frame, arr_cnt_class, vehicles_detection_list
示例#7
0
    def draw_tracking(self, image, tracker, tracking, detections, frame_id,
                      objs_dict):
        if tracking:
            # Call the tracker
            tracker.predict()
            tracker.update(detections)
            print("[INFO] track in ROI: ", len(tracker.tracks))

            for det in detections:
                bbox_det = det.to_tlbr()
                cv2.rectangle(image, (int(bbox_det[0]), int(bbox_det[1])),
                              (int(bbox_det[2]), int(bbox_det[3])),
                              (0, 0, 255), 1)

            for track in tracker.tracks:
                if not track.is_confirmed() or track.time_since_update > 1:
                    continue

                bbox = track.to_tlbr()
                centroid = (int(
                    (bbox[0] + bbox[2]) // 2), int((bbox[1] + bbox[3]) // 2))

                if track.track_id not in objs_dict:
                    objs_dict.update({
                        track.track_id: {
                            'flag_in_out': 0,
                            'best_bbox': track.det_best_bbox,
                            'best_bboxconf': track.det_confidence,
                            'class_id': track.det_class,
                            'frame': -1,
                            'centroid_list': [],
                            'class_list': []
                        }
                    })  # frame when vehicle out ROI BTC (frame estimate)

                # get the first point(x,y) when obj move into ROI
                if len(centroid) != 0 and check_in_polygon(
                        centroid, self.polygon_ROI) and objs_dict[
                            track.track_id]['flag_in_out'] == 0:
                    objs_dict[track.track_id].update({
                        'flag_in_out': 1,
                        'point_in': centroid,
                        'point_out': None,
                        'frame_in': frame_id
                    })

                # if bbox conf of obj < bbox conf in new frame ==> update best bbox conf
                if objs_dict[track.
                             track_id]['best_bboxconf'] < track.det_confidence:
                    objs_dict[track.track_id].update({
                        'best_bbox':
                        track.det_best_bbox,
                        'best_bboxconf':
                        track.det_confidence,
                        'class_id':
                        track.det_class
                    })

                objs_dict[track.track_id][
                    'centroid'] = centroid  # update position of obj each frame
                objs_dict[track.track_id]['centroid_list'].append(centroid)
                objs_dict[track.track_id]['last_bbox'] = bbox
                objs_dict[track.track_id]['last_frame'] = frame_id
                objs_dict[track.track_id]['class_list'].append(track.det_class)

                cv2.rectangle(image, (int(bbox[0]), int(bbox[1]) - 15),
                              (int(bbox[0] + 50), int(bbox[1])),
                              (255, 255, 255), -1)
                cv2.rectangle(image, (int(bbox[0]), int(bbox[1])),
                              (int(bbox[2]), int(bbox[3])), (255, 255, 255), 1)
                cv2.putText(
                    image,
                    str(track.det_class + 1) + "." + str(track.track_id),
                    (int(bbox[0]), int(bbox[1]) - 1), 0, 0.5, (0, 0, 0), 1)
                cv2.circle(image, (centroid[0], centroid[1]), 4, (0, 255, 0),
                           -1)

                # draw track line
                image = track.draw_track_line(image)

        print("----------------")
        return image, objs_dict
示例#8
0
    def draw_tracking(self, image, tracker, tracking, detections, frame_id,
                      objs_dict):
        if tracking:
            # if not check_in_polygon(center, ROI_POLYGON):
            #     continue

            # Call the tracker
            tracker.predict()
            tracker.update(detections)
            print("[INFO] track in ROI: ", len(tracker.tracks))

            logFile = os.path.join(log_tracking_cam_dir,
                                   'frame_' + str(frame_id) + '.txt')

            with open(logFile, "a+") as f:
                for (track, det) in zip(tracker.tracks, detections):
                    bbox_det = det.to_tlbr()
                    cv2.rectangle(image, (int(bbox_det[0]), int(bbox_det[1])),
                                  (int(bbox_det[2]), int(bbox_det[3])),
                                  (0, 0, 255), 2)

                    if not track.is_confirmed() or track.time_since_update > 1:
                        continue
                    bbox = track.to_tlbr()
                    centroid = (int(
                        (bbox[0] + bbox[2]) // 2), int(
                            (bbox[1] + bbox[3]) // 2))

                    if track.track_id not in objs_dict:
                        objs_dict.update({
                            track.track_id: {
                                'flag_in_out': 0,
                                'best_bbox': det.to_tlbr(),
                                'best_bboxconf': det.confidence,
                                'class_id': det.cls
                            }
                        })

                    # get the first point(x,y) when obj move into ROI
                    if len(centroid) != 0 and check_in_polygon(
                            centroid, self.polygon_ROI) and objs_dict[
                                track.track_id]['flag_in_out'] == 0:
                        objs_dict[track.track_id].update({
                            'flag_in_out': 1,
                            'point_in': centroid,
                            'point_out': None
                        })

                    # if bbox conf of obj < bbox conf in new frame ==> update best bbox conf
                    if objs_dict[
                            track.track_id]['best_bboxconf'] < det.confidence:
                        objs_dict[track.track_id].update({
                            'best_bbox':
                            det.to_tlbr(),
                            'best_bboxconf':
                            det.confidence,
                            'class_id':
                            det.cls
                        })

                    objs_dict[track.track_id][
                        'centroid'] = centroid  # update position of obj each frame

                    cv2.rectangle(image, (int(bbox[0]), int(bbox[1])),
                                  (int(bbox[2]), int(bbox[3])),
                                  (255, 255, 255), 2)
                    cv2.putText(image, "ID: " + str(track.track_id),
                                (int(bbox[0]), int(bbox[1])), 0,
                                1e-3 * image.shape[0], (0, 255, 0), 1)
                    cv2.circle(image, (centroid[0], centroid[1]), 4,
                               (0, 255, 0), -1)

                    # draw track line
                    image = track.draw_track_line(image)

                    class_name = det.cls
                    print("[INFO] class_name: ", class_name)
                    # write log file
                    f.write("{} {} {} {} {}\n".format(int(bbox[0]),
                                                      int(bbox[1]),
                                                      int(bbox[2]),
                                                      int(bbox[3]),
                                                      class_name))

            # print("[INFO] dict_MOI after added: ", dict_MOI)
            # print("[INFO] dict_detections: after added", dict_detections)

        print("----------------")
        return image, objs_dict