Ejemplo n.º 1
0
def associate_detections_to_trackers(bbox_points, features, track_boxs, tracker_features, iou_threshold=0.3):
    result = bbox_points
    iou_matrix = np.zeros((len(bbox_points), len(track_boxs)), dtype=np.float32)
    # pose_matrix = np.zeros((len(bbox_points),len(track_boxs)),dtype=np.float32)
    feat_matrix = np.zeros((len(bbox_points), len(track_boxs)), dtype=np.float32)
    # dists = matching.iou_distance(bbox_points, track_boxs)

    for d in range(len(bbox_points)):
        for t in range(len(track_boxs)):
            # pose_matrix[d,t] = get_pose_matching_score(bbox_points[d], track_boxs[t], trackers[t])
            iou_matrix[d,t]  = iou(bbox_points[d],track_boxs[t])

            # feat_matrix[d,t] = euclidean_dist(features[d].unsqueeze(0), tracker_features[t].unsqueeze(0))

    dists = -iou_matrix
    matches, u_track, u_detection = matching.linear_assignment(dists, thresh=0.7)
    for cur, pre in matches:
        # print(cur,pre)
        result[cur] = track_boxs[pre]
    return result
Ejemplo n.º 2
0
    def update(self, im_blob, img0):
        self.frame_id += 1
        activated_starcks = []
        refind_stracks = []
        lost_stracks = []
        removed_stracks = []

        width = img0.shape[1]
        height = img0.shape[0]
        inp_height = im_blob.shape[2]
        inp_width = im_blob.shape[3]
        c = np.array([width / 2., height / 2.], dtype=np.float32)
        s = max(float(inp_width) / float(inp_height) * height, width) * 1.0
        meta = {
            'c': c,
            's': s,
            'out_height': inp_height // self.opt.down_ratio,
            'out_width': inp_width // self.opt.down_ratio
        }
        ''' Step 1: Network forward, get detections & embeddings'''
        with torch.no_grad():
            output = self.model(im_blob)[-1]
            hm = output['hm'].sigmoid_()
            wh = output['wh']
            id_feature = output['id']
            id_feature = F.normalize(id_feature, dim=1)

            reg = output['reg'] if self.opt.reg_offset else None
            dets, inds = mot_decode(hm,
                                    wh,
                                    reg=reg,
                                    cat_spec_wh=self.opt.cat_spec_wh,
                                    K=self.opt.K)
            id_feature = _tranpose_and_gather_feat(id_feature, inds)
            id_feature = id_feature.squeeze(0)
            id_feature = id_feature.cpu().numpy()

        dets = self.post_process(dets, meta)
        dets = self.merge_outputs([dets])[1]

        remain_inds = dets[:, 4] > self.opt.conf_thres
        dets = dets[remain_inds]
        id_feature = id_feature[remain_inds]

        # vis
        '''
        for i in range(0, dets.shape[0]):
            bbox = dets[i][0:4]
            cv2.rectangle(img0, (bbox[0], bbox[1]),
                          (bbox[2], bbox[3]),
                          (0, 255, 0), 2)
        cv2.imshow('dets', img0)
        cv2.waitKey(0)
        id0 = id0-1
        '''

        if len(dets) > 0:
            '''Detections'''
            detections = [
                STrack(STrack.tlbr_to_tlwh(tlbrs[:4]), tlbrs[4], f, 30)
                for (tlbrs, f) in zip(dets[:, :5], id_feature)
            ]
        else:
            detections = []
        ''' Add newly detected tracklets to tracked_stracks'''
        unconfirmed = []
        tracked_stracks = []  # type: list[STrack]
        for track in self.tracked_stracks:
            if not track.is_activated:
                unconfirmed.append(track)
            else:
                tracked_stracks.append(track)
        ''' Step 2: First association, with embedding'''
        strack_pool = joint_stracks(tracked_stracks, self.lost_stracks)
        # Predict the current location with KF
        #for strack in strack_pool:
        #strack.predict()
        STrack.multi_predict(strack_pool)
        dists = matching.embedding_distance(strack_pool, detections)
        #dists = matching.gate_cost_matrix(self.kalman_filter, dists, strack_pool, detections)
        dists = matching.fuse_motion(self.kalman_filter, dists, strack_pool,
                                     detections)
        matches, u_track, u_detection = matching.linear_assignment(dists,
                                                                   thresh=0.7)

        for itracked, idet in matches:
            track = strack_pool[itracked]
            det = detections[idet]
            if track.state == TrackState.Tracked:
                track.update(detections[idet], self.frame_id)
                activated_starcks.append(track)
            else:
                track.re_activate(det, self.frame_id, new_id=False)
                refind_stracks.append(track)
        ''' Step 3: Second association, with IOU'''
        detections = [detections[i] for i in u_detection]
        r_tracked_stracks = [
            strack_pool[i] for i in u_track
            if strack_pool[i].state == TrackState.Tracked
        ]
        dists = matching.iou_distance(r_tracked_stracks, detections)
        matches, u_track, u_detection = matching.linear_assignment(dists,
                                                                   thresh=0.5)

        for itracked, idet in matches:
            track = r_tracked_stracks[itracked]
            det = detections[idet]
            if track.state == TrackState.Tracked:
                track.update(det, self.frame_id)
                activated_starcks.append(track)
            else:
                track.re_activate(det, self.frame_id, new_id=False)
                refind_stracks.append(track)

        for it in u_track:
            track = r_tracked_stracks[it]
            if not track.state == TrackState.Lost:
                track.mark_lost()
                lost_stracks.append(track)
        '''Deal with unconfirmed tracks, usually tracks with only one beginning frame'''
        detections = [detections[i] for i in u_detection]
        dists = matching.iou_distance(unconfirmed, detections)
        matches, u_unconfirmed, u_detection = matching.linear_assignment(
            dists, thresh=0.7)
        for itracked, idet in matches:
            unconfirmed[itracked].update(detections[idet], self.frame_id)
            activated_starcks.append(unconfirmed[itracked])
        for it in u_unconfirmed:
            track = unconfirmed[it]
            track.mark_removed()
            removed_stracks.append(track)
        """ Step 4: Init new stracks"""
        for inew in u_detection:
            track = detections[inew]
            if track.score < self.det_thresh:
                continue
            track.activate(self.kalman_filter, self.frame_id)
            activated_starcks.append(track)
        """ Step 5: Update state"""
        for track in self.lost_stracks:
            if self.frame_id - track.end_frame > self.max_time_lost:
                track.mark_removed()
                removed_stracks.append(track)

        # print('Ramained match {} s'.format(t4-t3))

        self.tracked_stracks = [
            t for t in self.tracked_stracks if t.state == TrackState.Tracked
        ]
        self.tracked_stracks = joint_stracks(self.tracked_stracks,
                                             activated_starcks)
        self.tracked_stracks = joint_stracks(self.tracked_stracks,
                                             refind_stracks)
        self.lost_stracks = sub_stracks(self.lost_stracks,
                                        self.tracked_stracks)
        self.lost_stracks.extend(lost_stracks)
        self.lost_stracks = sub_stracks(self.lost_stracks,
                                        self.removed_stracks)
        self.removed_stracks.extend(removed_stracks)
        self.tracked_stracks, self.lost_stracks = remove_duplicate_stracks(
            self.tracked_stracks, self.lost_stracks)
        # get scores of lost tracks
        output_stracks = [
            track for track in self.tracked_stracks if track.is_activated
        ]

        print('===========Frame {}=========='.format(self.frame_id))
        print('Activated: {}'.format(
            [track.track_id for track in activated_starcks]))
        print('Refind: {}'.format([track.track_id
                                   for track in refind_stracks]))
        print('Lost: {}'.format([track.track_id for track in lost_stracks]))
        print('Removed: {}'.format(
            [track.track_id for track in removed_stracks]))

        return output_stracks
Ejemplo n.º 3
0
    def update(self, im_blob, img0):

        self.frame_id += 1
        activated_starcks = []
        refind_stracks = []
        lost_stracks = []
        removed_stracks = []

        width = img0.shape[1]
        height = img0.shape[0]
        inp_height = im_blob.shape[2]
        inp_width = im_blob.shape[3]
        c = np.array([width / 2., height / 2.], dtype=np.float32)
        s = max(float(inp_width) / float(inp_height) * height, width) * 1.0
        meta = {
            'c': c,
            's': s,
            'out_height': inp_height // self.opt.down_ratio,
            'out_width': inp_width // self.opt.down_ratio
        }
        ''' Step 1: Network forward, get detections & embeddings'''
        output = self.model.execute(im_blob)

        # sigmoid
        hm = 1 / (1 + np.exp(-output[0]))
        wh = output[1]
        reg = output[2]
        id_feature = output[3]
        '''
        dets: n * 6 matrix where n is number of detections
              6 elements:
                bbox_top_left x, y; bbox_bottom_right x, y; conf_score; class
        inds: indices of detection in flatten array (152*272)
        '''
        dets, inds = mot_decode(hm, wh, reg, conf_thres=self.opt.conf_thres)

        # n * 128, where 128 is id feature vector size
        id_feature = get_feat_from_idx(id_feature, inds)

        # scale change and remove detections with small bbox
        dets = self.post_process(dets, meta)
        dets = self.merge_outputs([dets])[1]

        if len(dets) > 0:
            '''Detections'''
            # dets[:, :5]: bboxes (4), score (1)
            detections = [
                STrack(STrack.tlbr_to_tlwh(tlbrs[:4]), tlbrs[4], f, 30)
                for (tlbrs, f) in zip(dets[:, :5], id_feature)
            ]
        else:
            detections = []
        ''' Add newly detected tracklets to tracked_stracks'''
        unconfirmed = []
        tracked_stracks = []  # type: list[STrack]
        for track in self.tracked_stracks:
            if not track.is_activated:
                unconfirmed.append(track)
            else:
                tracked_stracks.append(track)
        ''' Step 2: First association, with embedding'''
        strack_pool = joint_stracks(tracked_stracks, self.lost_stracks)

        # Predict the current location with KF
        STrack.multi_predict(strack_pool)

        # strack_pool:  previous tracklets
        # detections:   current tracklets
        dists = matching.embedding_distance(strack_pool, detections)

        #dists = matching.iou_distance(strack_pool, detections)
        dists = matching.fuse_motion(self.kalman_filter, dists, strack_pool,
                                     detections)

        # matches:      [idx of tracked_stracks, idx of detections]
        # u_track:      [index of undefined track]
        # u_detection:  [index of undefined detection]
        matches, u_track, u_detection = matching.linear_assignment(dists,
                                                                   thresh=0.4)

        for itracked, idet in matches:
            track = strack_pool[itracked]
            det = detections[idet]
            if track.state == TrackState.Tracked:
                track.update(detections[idet], self.frame_id)
                activated_starcks.append(track)
            else:
                track.re_activate(det, self.frame_id, new_id=False)
                refind_stracks.append(track)
        ''' Step 3: Second association, with IOU'''
        detections = [detections[i] for i in u_detection]
        r_tracked_stracks = [
            strack_pool[i] for i in u_track
            if strack_pool[i].state == TrackState.Tracked
        ]
        dists = matching.iou_distance(r_tracked_stracks, detections)
        matches, u_track, u_detection = matching.linear_assignment(dists,
                                                                   thresh=0.5)

        for itracked, idet in matches:
            track = r_tracked_stracks[itracked]
            det = detections[idet]
            if track.state == TrackState.Tracked:
                track.update(det, self.frame_id)
                activated_starcks.append(track)
            else:
                track.re_activate(det, self.frame_id, new_id=False)
                refind_stracks.append(track)

        for it in u_track:
            track = r_tracked_stracks[it]
            if not track.state == TrackState.Lost:
                track.mark_lost()
                lost_stracks.append(track)
        '''Deal with unconfirmed tracks, usually tracks with only one beginning frame'''
        detections = [detections[i] for i in u_detection]
        dists = matching.iou_distance(unconfirmed, detections)
        matches, u_unconfirmed, u_detection = matching.linear_assignment(
            dists, thresh=0.7)
        for itracked, idet in matches:
            unconfirmed[itracked].update(detections[idet], self.frame_id)
            activated_starcks.append(unconfirmed[itracked])
        for it in u_unconfirmed:
            track = unconfirmed[it]
            track.mark_removed()
            removed_stracks.append(track)
        """ Step 4: Init new stracks"""
        for inew in u_detection:
            track = detections[inew]
            if track.score < self.det_thresh:
                continue
            track.activate(self.kalman_filter, self.frame_id)
            activated_starcks.append(track)
        """ Step 5: Update state"""
        for track in self.lost_stracks:
            if self.frame_id - track.end_frame > self.max_time_lost:
                track.mark_removed()
                removed_stracks.append(track)

        # print('Ramained match {} s'.format(t4-t3))

        self.tracked_stracks = [
            t for t in self.tracked_stracks if t.state == TrackState.Tracked
        ]
        self.tracked_stracks = joint_stracks(self.tracked_stracks,
                                             activated_starcks)
        self.tracked_stracks = joint_stracks(self.tracked_stracks,
                                             refind_stracks)
        self.lost_stracks = sub_stracks(self.lost_stracks,
                                        self.tracked_stracks)
        self.lost_stracks.extend(lost_stracks)
        self.lost_stracks = sub_stracks(self.lost_stracks,
                                        self.removed_stracks)
        self.removed_stracks.extend(removed_stracks)
        self.tracked_stracks, self.lost_stracks = remove_duplicate_stracks(
            self.tracked_stracks, self.lost_stracks)
        # get scores of lost tracks
        output_stracks = [
            track for track in self.tracked_stracks if track.is_activated
        ]

        return output_stracks
Ejemplo n.º 4
0
 def match(self, img, detections, features, frm_id):
     # get track state
     is_lost_arr = np.array([track.is_lost for track in self.tracks])
     tracked_track_idx = np.where(is_lost_arr)[0]
     lost_track_idx = np.where(is_lost_arr == False)[0]
     tracked_stracks = map(lambda x: self.tracks[x], tracked_track_idx)
     lost_stracks = map(lambda x: self.tracks[x], lost_track_idx)
     print len(self.tracks)
     self.tracks = []
     # first match, we match active track with detection
     ## 1 匹配跟踪的轨迹
     dists = matching.nearest_reid_distance(tracked_stracks,
                                            detections,
                                            features,
                                            metric='euclidean')
     # dist不参与运算 只是起到gating的作用 滤除过长的轨迹
     dists = matching.gate_cost_matrix(self.kf, dists, tracked_stracks,
                                       detections)
     # match has format (track_id, det_id)
     matches, u_track, u_detection = matching.linear_assignment(
         dists, thresh=self.min_ap_dist)
     for itracked, idet in matches:
         print "first match is ", tracked_stracks[itracked].track_id
         tracked_stracks[itracked].update(detections[idet], features[idet],
                                          frm_id, self.kf)
         self.tracks.append(tracked_stracks[itracked])
     detections = [detections[idet] for idet in u_detection]
     features = [features[idet] for idet in u_detection]
     ## 2 匹配消失的轨迹
     dists = matching.nearest_reid_distance(lost_stracks,
                                            detections,
                                            features,
                                            metric='euclidean')
     dists = matching.gate_cost_matrix(self.kf, dists, lost_stracks,
                                       detections)
     matches, u_lost, u_detection = matching.linear_assignment(
         dists, thresh=self.min_ap_dist)
     for itracked, idet in matches:
         print "second match is ", lost_stracks[itracked].track_id
         lost_stracks[itracked].update(detections[idet], features[idet],
                                       frm_id, self.kf)
         self.tracks.append(lost_stracks[itracked])
     ## 3 1中未匹配的轨迹继续匹配
     ## 匹配方式改为IOU匹配
     detections = [detections[i] for i in u_detection]
     features = [features[idet] for idet in u_detection]
     r_tracked_stracks = [tracked_stracks[idx] for idx in u_track]
     dists = matching.iou_distance(r_tracked_stracks, detections)
     matches, u_track, u_detection = matching.linear_assignment(dists,
                                                                thresh=0.7)
     for itracked, idet in matches:
         print "third match is ", r_tracked_stracks[itracked].track_id
         r_tracked_stracks[itracked].update(detections[idet],
                                            features[idet], frm_id, self.kf)
         self.tracks.append(r_tracked_stracks[itracked])
     # 未匹配到的更新
     for idx in u_track:
         r_tracked_stracks[idx].update(frm_id=frm_id, kalman_filter=self.kf)
         self.tracks.append(r_tracked_stracks[idx])
     detections = [detections[i] for i in u_detection]
     features = [features[idet] for idet in u_detection]
     ## 4 2中未匹配的轨迹继续匹配
     r_lost_stracks = [lost_stracks[idx] for idx in u_lost]
     dists = matching.iou_distance(r_lost_stracks, detections)
     matches, u_unconfirmed, u_detection = matching.linear_assignment(
         dists, thresh=0.7)
     for itracked, idet in matches:
         print "forth match is ", r_lost_stracks[itracked].track_id
         r_lost_stracks[itracked].update(detections[idet], features[idet],
                                         frm_id, self.kf)
         self.tracks.append(r_lost_stracks[itracked])
     # check u_unconfirmed and delete it if satisfy
     for idx in u_unconfirmed:
         r_lost_stracks[idx].update(frm_id=frm_id, kalman_filter=self.kf)
         if r_lost_stracks[idx].age >= P['max_lost_track_time']:
             continue
         self.tracks.append(r_lost_stracks[idx])
     detections = [detections[i] for i in u_detection]
     features = [features[idet] for idet in u_detection]
     # 生成新的轨迹
     for det, fea in zip(detections, features):
         self.tracks.append(
             BaseTrack(det, fea, frm_id, self.track_id, self.kf))
         self.track_id += 1
Ejemplo n.º 5
0
    def update(self, detections=None):
        self.frame_id += 1
        activated_tracks = []
        refind_tracks = []
        lost_tracks = []
        removed_tracks = []

        if not detections:  # If detections None
            detections = []

        ''' Add newly detected tracklets to tracked_tracks'''
        unconfirmed = []
        tracked_tracks = []  # type: list[Track]
        for track in self.tracked_tracks:
            if not track.is_activated:
                unconfirmed.append(track)
            else:
                tracked_tracks.append(track)

        ''' Matching with embedding'''
        track_pool = join_tracklists(tracked_tracks, self.lost_tracks)
        # Predict location with Kalman Filter
        # for track in track_pool:
        #     track.predict()  #Predict Individually each track
        Track.multi_predict(track_pool)  # Predict Together all Tracks
        # Get Embedding Distance
        dists = embedding_distance(track_pool, detections)
        # dists = gate_cost_matrix(self.kalman_filter, dists, track_pool, detections)
        dists = fuse_motion(
            self.kalman_filter, dists, track_pool, detections)
        matches, u_track, u_detection = linear_assignment(
            dists, thresh=0.7)

        for track_idx, det_idx in matches:
            track = track_pool[track_idx]
            det = detections[det_idx]
            if self.lsh:
                self.lsh.index(det.curr_feat, track.track_id)  # LSH Indexing

            if track.state == TrackState.Tracked:
                track.update(detections[det_idx], self.frame_id)
                activated_tracks.append(track)
            else:
                track.re_activate(det, self.frame_id, new_id=False)
                refind_tracks.append(track)

        '''LSH Similarity - Higher Effect'''

        if self.lsh and self.lsh_mode == 2:
            unmatchedDetections = [(detections[i], i) for i in u_detection]
            unmatchedTracks = [(track_pool[j], j)
                               for j in u_track if track_pool[j].state == TrackState.Tracked]
            matches, u_track, u_detection = checklshash(
                self.lsh, unmatchedDetections, unmatchedTracks)
            for track_idx, det_idx in matches:
                track = [t for t, i in unmatchedTracks if i == track_idx][0]
                det = detections[det_idx]
                self.lsh.index(det.curr_feat, track.track_id)  # LSH Indexing
                if track.state == TrackState.Tracked:
                    track.update(det, self.frame_id)
                    activated_tracks.append(track)
                else:
                    track.re_activate(det, self.frame_id, new_id=False)
                    refind_tracks.append(track)

        ''' Matching with IOU'''
        detections = [detections[i] for i in u_detection]
        r_tracked_tracks = [track_pool[i]
                            for i in u_track if track_pool[i].state == TrackState.Tracked]
        dists = iou_distance(r_tracked_tracks, detections)
        matches, u_track, u_detection = linear_assignment(
            dists, thresh=0.5)

        for track_idx, det_idx in matches:
            track = r_tracked_tracks[track_idx]
            det = detections[det_idx]
            if self.lsh:
                self.lsh.index(det.curr_feat, track.track_id)  # LSH Indexing
            if track.state == TrackState.Tracked:
                track.update(det, self.frame_id)
                activated_tracks.append(track)
            else:
                track.re_activate(det, self.frame_id, new_id=False)
                refind_tracks.append(track)

        '''LSH Similarity - Reduced Effect'''
        if self.lsh and self.lsh_mode == 1:
            unmatchedDetections = [(detections[i], i) for i in u_detection]
            unmatchedTracks = [(r_tracked_tracks[j], j) for j in u_track]
            matches, u_unconfirmed, u_detection = checklshash(
                self.lsh, unmatchedDetections, unmatchedTracks)
            for track_idx, det_idx in matches:
                track = [t for t, i in unmatchedTracks if i == track_idx][0]
                det = detections[det_idx]
                self.lsh.index(det.curr_feat, track.track_id)  # LSH Indexing
                if track.state == TrackState.Tracked:
                    track.update(det, self.frame_id)
                    activated_tracks.append(track)
                else:
                    track.re_activate(det, self.frame_id, new_id=False)
                    refind_tracks.append(track)

        # Mark unmatched tracks as lost
        for it in u_track:
            track = r_tracked_tracks[it]
            if not track.state == TrackState.Lost:
                track.mark_lost()
                lost_tracks.append(track)

        '''New unconfirmed tracks'''
        detections = [detections[i] for i in u_detection]
        dists = iou_distance(unconfirmed, detections)
        matches, u_unconfirmed, u_detection = linear_assignment(
            dists, thresh=0.7)
        for track_idx, det_idx in matches:
            unconfirmed[track_idx].update(detections[det_idx], self.frame_id)
            activated_tracks.append(unconfirmed[track_idx])

        '''Remove unconfirmed tracks'''
        for it in u_unconfirmed:
            track = unconfirmed[it]
            track.mark_removed()
            removed_tracks.append(track)

        """ Initialise new tracks"""
        for inew in u_detection:
            track = detections[inew]
            if track.score < self.det_thresh:
                continue
            track.activate(self.kalman_filter, self.frame_id)
            activated_tracks.append(track)

        """ Mark Track as lost"""
        for track in self.lost_tracks:
            if self.frame_id - track.end_frame > self.max_time_lost:
                track.mark_removed()
                removed_tracks.append(track)

        # print('Ramained match {} s'.format(t4-t3))

        self.tracked_tracks = [
            t for t in self.tracked_tracks if t.state == TrackState.Tracked]
        self.tracked_tracks = join_tracklists(
            self.tracked_tracks, activated_tracks)
        self.tracked_tracks = join_tracklists(
            self.tracked_tracks, refind_tracks)
        self.lost_tracks = remove_from_tracklists(
            self.lost_tracks, self.tracked_tracks)
        self.lost_tracks.extend(lost_tracks)
        self.lost_tracks = remove_from_tracklists(
            self.lost_tracks, self.removed_tracks)
        self.removed_tracks.extend(removed_tracks)
        self.tracked_tracks, self.lost_tracks = remove_duplicate_tracks(
            self.tracked_tracks, self.lost_tracks)
        # get scores of lost tracks
        output_tracks = [
            track for track in self.tracked_tracks if track.is_activated]

        logger.debug(
            '-----------Frame No. {}-----------'.format(self.frame_id))
        logger.debug('Active: {}'.format(
            [track.track_id for track in activated_tracks]))
        logger.debug('ReFound: {}'.format(
            [track.track_id for track in refind_tracks]))
        logger.debug('Lost: {}'.format(
            [track.track_id for track in lost_tracks]))
        logger.debug('Deleted: {}'.format(
            [track.track_id for track in removed_tracks]))

        return output_tracks