Esempio n. 1
0
    def update(self, bbox_xywh, confidences, ori_img):
        self.height, self.width = ori_img.shape[:2]
        # generate detections
        features = self._get_features(bbox_xywh, ori_img)
        detections = [
            Detection(bbox_xywh[i], conf, features[i])
            for i, conf in enumerate(confidences) if conf > self.min_confidence
        ]

        # run on non-maximum supression
        boxes = np.array([d.tlwh for d in detections])
        scores = np.array([d.confidence for d in detections])
        indices = non_max_suppression(boxes, self.nms_max_overlap, scores)
        detections = [detections[i] for i in indices]

        # update tracker
        self.tracker.predict()
        self.tracker.update(detections)

        # output bbox identities
        outputs = []
        for track in self.tracker.tracks:
            if not track.is_confirmed() or track.time_since_update > 1:
                continue
            box = track.to_tlwh()
            x1, y1, x2, y2 = self._xywh_to_xyxy(box)
            track_id = track.track_id
            outputs.append(np.array([x1, y1, x2, y2, track_id], dtype=np.int))
        if len(outputs) > 0:
            outputs = np.stack(outputs, axis=0)
        return outputs
Esempio n. 2
0
    def update(self, bbox_xywh, confidences, ori_img, all_name,start_time):
        self.height, self.width = ori_img.shape[:2]
        # generate detections
        features = self._get_features(bbox_xywh, ori_img)
        detections = [Detection(bbox_xywh[i], conf, features[i], all_name[i],start_time) for i, conf in enumerate(confidences) if
                      conf > self.min_confidence]

        # run on non-maximum supression
        boxes = np.array([d.tlwh for d in detections])
        scores = np.array([d.confidence for d in detections])

        #          非  最大值 抑制
        indices = non_max_suppression(boxes, self.nms_max_overlap, scores)

        detections = [detections[i] for i in indices]

        # update tracker
        self.tracker.predict()
        self.tracker.update(detections,start_time)
        # output bbox identities
        outputs = []

        return_name = []
        stay_time_all=[]
        for track in self.tracker.tracks:
            if not track.is_confirmed() or track.time_since_update > 1 :
            # #如果是不是为确认的,则跳过
            # if not track.is_confirmed():
                continue
            #只显示违规的
            if (start_time-track.start_time)<self.bad_time:
                continue
            box = track.to_tlwh()
            x1, y1, x2, y2 = self._xywh_to_xyxy(box)
            track_id = track.track_id

            class_name = track.class_name
            # print(class_name)
            outputs.append(np.array([x1, y1, x2, y2, track_id], dtype=np.int))
            return_name.append(class_name)
            part=[class_name+str(track_id),start_time-track.start_time]
            stay_time_all.append(part)
        if len(outputs) > 0:
            outputs = np.stack(outputs, axis=0)
        #暂定状态与确定状态的都可以记录其停留时间
        # stay_time_all=[ [track.class_name,track.track_id,start_time-track.start_time] \
        #                 for track in self.tracker.tracks \
        #                 if(start_time-track.start_time)>self.bad_time and track.is_confirmed]
        return outputs, return_name,stay_time_all
    def update(self, bbox_xywh, confidences, class_num, ori_img):
        self.height, self.width = ori_img.shape[:2]
        # generate detections
        detections = []
        try:
            features = self._get_features(bbox_xywh, ori_img)
            for i, conf in enumerate(confidences):
                if conf >= self.min_confidence and features.any():
                    # Detection 在detection.py找到相关的类
                    detections.append(
                        Detection(bbox_xywh[i], conf, class_num[i],
                                  features[i]))
                else:
                    pass
        except Exception as ex:
            # TODO Error: OpenCV(4.1.1) /io/opencv/modules/imgproc/src/resize.cpp:3720: error: (-215:Assertion failed) !ssize.empty() in function 'resize'
            print("{} Error: {}".format(
                time.strftime("%H:%M:%S", time.localtime()), ex))
            # print('Error or video finish ')

        # run on non-maximum supression
        boxes = np.array([d.tlwh for d in detections])
        scores = np.array([d.confidence for d in detections])
        indices = non_max_suppression(boxes, self.nms_max_overlap,
                                      scores)  # indices = [0] 或者 [0,1]
        detections = [detections[i]
                      for i in indices]  # 根据编号 做 嵌套的list[ [0编号],[1编号] ]
        # print(detections[0].confidence)
        # confidence: 0.5057685971260071
        # print(detections)
        # [bbox_xywh: [1508.47619629  483.33926392   34.95910645   77.69906616],
        #  confidence: 0.5140249729156494,
        #  bbox_xywh: [1678.99377441  526.4251709    36.55554199   80.11364746],
        #  confidence: 0.5057685971260071]

        # update tracker
        self.tracker.predict()
        # 现在输入的detections 是 做了嵌套编号的 list[ [0编号],[1编号] ]
        self.tracker.update(detections)
        # print("confidence {}".format(detections[0].confidence))

        # output bbox identities
        # tracks 存储相关信息
        outputs = []
        # tracker的属性 trackers储存着 很多个track类实例
        for track in self.tracker.tracks:
            if not track.is_confirmed() or track.time_since_update > 1:
                continue
            box = track.to_tlwh(
            )  # (top left x, top left y, width, height) 每帧都刷新
            x1, y1, x2, y2 = self._xywh_to_xyxy_centernet(
                box)  # xywh 转成 矩形的对角点坐标

            # 画运动轨迹
            # 轨迹为检测框中心
            center = (int((x1 + x2) / 2), int((y1 + y2) / 2))  #画轨迹图 记录每一次的中心点
            # 轨迹为检测框底部
            # center = (int((x1+x2)/2), int((y2)))  # 画轨迹图 记录每一次的底部

            points[track.track_id].append(center)  # 用队列先进先出的结构 记录运动中心轨迹
            # print(points[1][-1])  # 查看跟踪号为1的对象的中心点存储记忆
            # for j in range(1, len(points[track.track_id])):
            #     if points[track.track_id][j - 1] is None or points[track.track_id][j] is None:
            #        continue
            #     # thickness = int(np.sqrt(32 / float(j + 1)) * 2) #第一个点重 后续线逐渐变细
            #     cv2.line(ori_img,(points[track.track_id][j-1]), (points[track.track_id][j]),(8,196,255),thickness = 3,lineType=cv2.LINE_AA)

            track_id = track.track_id
            confidences = track.confidence * 100
            cls_num = track.class_num
            # print("track_id {} confidences {}".format(track_id,confidences))

            outputs.append(
                np.array([x1, y1, x2, y2, track_id, confidences, cls_num],
                         dtype=np.int))

        if len(outputs) > 0:
            outputs = np.stack(outputs, axis=0)

        return outputs, points