コード例 #1
0
    def __init__(self, track_id, box, vx, vy, ax, ay):
        self.track_id = track_id
        self.boxes = [box]
        self.kf = KalmanFilter(dim_x=8, dim_z=4)
        self.kf.F = np.array(
            [[1, 0, 0, 0, vx, 0, 0, 0],
             [0, 1, 0, 0, 0, vy, 0, 0],
             [0, 0, 1, 0, 0, 0, 0, 0],
             [0, 0, 0, 1, 0, 0, 0, 0],
             [0, 0, 0, 0, 1, 0, ax, 0],
             [0, 0, 0, 0, 0, 1, 0, ay],
             [0, 0, 0, 0, 0, 0, 1, 0],
             [0, 0, 0, 0, 0, 0, 0, 1]])
        self.kf.H = np.array(
            [[1, 0, 0, 0, 0, 0, 0, 0],
             [0, 1, 0, 0, 0, 0, 0, 0],
             [0, 0, 1, 0, 0, 0, 0, 0],
             [0, 0, 0, 1, 0, 0, 0, 0]])

        self.kf.R[2:, 2:] *= 10.
        self.kf.P[4:, 4:] *= 1000.  # give high uncertainty to the unobservable initial velocities
        self.kf.P *= 10.
        self.kf.Q[-1, -1] *= 0.01
        self.kf.Q[4:, 4:] *= 0.01
        self.kf.x[:4] = convert_bbox_to_z(np.array([box.left, box.bottom, box.right, box.top]))
        self.kf.predict()
        self.last_frame = box.frame
        self.real_boxes = 1
        self.counted = False
        self.phantom_boxes = 0
        self.max_phantom_boxes = 0
コード例 #2
0
ファイル: mot_tracker.py プロジェクト: intelli8786/MOTDE
    def __init__(self,
                 min_cls_score=0.4,
                 min_ap_dist=0.64,
                 max_time_lost=30,
                 use_tracking=True,
                 use_refind=True,
                 models={},
                 configs={}):
        self.models = models
        self.configs = configs

        self.min_cls_score = min_cls_score
        self.min_ap_dist = min_ap_dist
        self.max_time_lost = max_time_lost

        self.kalman_filter = KalmanFilter()

        self.tracked_stracks = []  # type: list[STrack]
        self.lost_stracks = []  # type: list[STrack]
        self.removed_stracks = []  # type: list[STrack]

        self.use_refind = use_refind
        self.use_tracking = use_tracking
        self.classifier = PatchClassifier()
        self.reid_model = load_reid_model()

        self.frame_id = 0
コード例 #3
0
    def __init__(self, opt, frame_rate=30):
        self.opt = opt
        self.model = Darknet(opt.cfg)
        # load_darknet_weights(self.model, opt.weights)

        self.model.load_state_dict(torch.load(opt.weights,
                                              map_location='cuda')['model'],
                                   strict=False)
        self.model.to('cuda')
        opt_level = 'O3'  # ChangeHere
        optimizer = torch.optim.SGD(self.model.parameters(),
                                    lr=0.01,
                                    momentum=0.9)
        self.model, optimizer = amp.initialize(self.model,
                                               optimizer,
                                               opt_level=opt_level)
        optimizer = []
        self.model.cuda().eval()

        self.tracked_stracks = []  # type: list[STrack]
        self.lost_stracks = []  # type: list[STrack]
        self.removed_stracks = []  # type: list[STrack]

        self.frame_id = 0
        self.det_thresh = opt.conf_thres
        self.buffer_size = int(frame_rate / 30.0 * opt.track_buffer)
        self.max_time_lost = self.buffer_size

        self.kalman_filter = KalmanFilter()
コード例 #4
0
    def __init__(self, min_det_score=0.4, min_cls_dist=0.5, max_time_lost=30,
        use_tracking=True):
        self.min_det_score = min_det_score  # 检测置信度阈值
        self.min_cls_dist = min_cls_dist    # 模板匹配的阈值
        self.max_time_lost = max_time_lost
        self.kalman_filter = KalmanFilter()
        self.tracked_stracks = []
        self.lost_stracks = []
        self.removed_stracks = []

        self.use_tracking = use_tracking
        self.classifier = PatchClassifier()

        self.frame_id = 0
コード例 #5
0
def extrapolate_metadata(directory_path, database_name, mongo_container):
    metadata = load_exif_data(directory_path)

    # Kalman
    kalman = KalmanFilter()
    initial_image_metadata = metadata[0]
    initial_utm = utm.from_latlon(
        initial_image_metadata.get("latitude", None),
        initial_image_metadata.get("longitude", None))
    init_data = np.array([initial_utm[0], initial_utm[1], 0, 0]).T
    init_noise_vec = np.array([
        initial_image_metadata.get("accuracy", None),
        initial_image_metadata.get("accuracy", None), 2, 2
    ]).T
    kalman.initialize(init_data=init_data,
                      init_noise_vec=init_noise_vec,
                      start_timestamp=initial_image_metadata.get("time", None))

    updated_data = update_using_kalman(kalman, metadata)

    geographic_db = GeographicMongoDB(database_name, mongo_container)

    insert_data_to_mongo(geographic_db, updated_data)
コード例 #6
0
    def __init__(self, opt, frame_rate=30):
        self.opt = opt
        self.model = Darknet(opt.cfg)
        # load_darknet_weights(self.model, opt.weights)
        self.model.load_state_dict(torch.load(opt.weights, map_location='cpu')['model'], strict=False)
        self.model.cuda().eval()

        self.tracked_stracks = []  # type: list[STrack]
        self.lost_stracks = []  # type: list[STrack]
        self.removed_stracks = []  # type: list[STrack]

        self.frame_id = 0
        self.det_thresh = opt.conf_thres
        self.buffer_size = int(frame_rate / 30.0 * opt.track_buffer)
        self.max_time_lost = self.buffer_size

        self.kalman_filter = KalmanFilter()
コード例 #7
0
ファイル: multitracker.py プロジェクト: Ascend/samples
    def __init__(self, opt, model, frame_rate=30):
        self.opt = opt
        print('Creating model...')
        self.model = model

        self.tracked_stracks = []  # type: list[STrack]
        self.lost_stracks = []  # type: list[STrack]
        self.removed_stracks = []  # type: list[STrack]

        self.frame_id = 0
        self.det_thresh = opt.conf_thres
        self.buffer_size = int(frame_rate / 30.0 * opt.track_buffer)
        self.max_time_lost = self.buffer_size
        self.max_per_image = opt.K
        self.mean = np.array(opt.mean, dtype=np.float32).reshape(1, 1, 3)
        self.std = np.array(opt.std, dtype=np.float32).reshape(1, 1, 3)

        self.kalman_filter = KalmanFilter()
コード例 #8
0
ファイル: mot_tracker.py プロジェクト: kibernetika-ai/MOTDT
    def __init__(self, **kwargs):

        self.min_cls_score = kwargs['tracker_min_cls_score']
        self.min_ap_dist = kwargs['tracker_min_ap_dist']
        self.max_time_lost = kwargs['tracker_max_time_lost']

        self.kalman_filter = KalmanFilter()

        self.tracked_stracks = []  # type: list[STrack]
        self.lost_stracks = []  # type: list[STrack]
        self.removed_stracks = []  # type: list[STrack]

        self.use_refind = not kwargs['tracker_no_refind']
        self.use_tracking = not kwargs['tracker_no_tracking']
        self.classifier = PatchClassifier(
            ckpt=kwargs['tracker_squeezenet_ckpt'])
        self.reid_model = load_reid_model(
            ckpt=kwargs['tracker_googlenet_ckpt'])

        self.frame_id = 0
コード例 #9
0
    def __init__(self, opt, frame_rate=30):
        self.opt = opt
        # if opt.yolo_version == "v5":
        #     self.model = Model(opt.cfg)
        # else:
        #     self.model = Darknet(opt.cfg, nID=14455)
        self.jde = get_joint_model(opt.joint_model)(opt)
        # load_darknet_weights(self.model, opt.weights)
        

        self.tracked_stracks = []  # type: list[STrack]
        self.lost_stracks = []  # type: list[STrack]
        self.removed_stracks = []  # type: list[STrack]

        self.frame_id = 0
        self.det_thresh = opt.conf_thres
        self.buffer_size = int(frame_rate / 30.0 * opt.track_buffer)
        self.max_time_lost = self.buffer_size

        self.kalman_filter = KalmanFilter()
コード例 #10
0
ファイル: multitracker.py プロジェクト: dqkqd/aihcmc
    def __init__(self,
                 max_age=10,
                 buffer_size=30,
                 det_thresh=0.5,
                 thresh1=0.7,
                 thresh2=0.5,
                 thresh3=0.7):
        self.det_thresh = det_thresh
        self.thresh1 = thresh1
        self.thresh2 = thresh2
        self.thresh3 = thresh3

        self.tracked_stracks = []  # type: list[STrack]
        self.lost_stracks = []  # type: list[STrack]
        self.removed_stracks = []  # type: list[STrack]

        self.frame_id = 0
        self.buffer_size = buffer_size
        self.max_time_lost = max_age
        self.kalman_filter = KalmanFilter()
コード例 #11
0
    def __init__(self,
                 min_cls_score=0.4,
                 min_ap_dist=0.64,
                 max_time_lost=30,
                 use_tracking=True,
                 use_refind=True,
                 metric_net=False,
                 ide=False):

        self.min_cls_score = min_cls_score
        self.min_ap_dist = min_ap_dist
        self.max_time_lost = max_time_lost

        self.kalman_filter = KalmanFilter()

        self.tracked_stracks = []  # type: list[STrack]
        self.lost_stracks = []  # type: list[STrack]
        self.removed_stracks = []  # type: list[STrack]

        self.use_refind = use_refind
        self.use_tracking = use_tracking
        self.classifier = PatchClassifier()
        self.reid_model = load_reid_model(ide=ide)
        if ide:
            self.min_ap_dist = 25
        if metric_net:
            self.metric_net = MetricNet(feature_dim=512 if not ide else 256,
                                        num_class=2).cuda()
            checkpoint = torch.load(
                '/home/houyz/Code/DeepCC/src/hyper_score/logs/1fps_train_IDE_40/GT/metric_net_L2_1200.pth.tar'
            )
            model_dict = checkpoint['state_dict']
            self.metric_net.load_state_dict(model_dict)
            self.metric_net.eval()
            self.min_ap_dist = 1
        else:
            self.metric_net = None

        self.frame_id = 0
コード例 #12
0
    def __init__(self, opt, args):
        self.opt = opt
        self.num_joints = 17
        self.frame_rate = opt.frame_rate
        #m = ResModel(n_ID=opt.nid)
        if self.opt.arch == "res50-fc512":
            m = resnet50_fc512(num_classes=1,pretrained=False)
        elif self.opt.arch == "osnet_ain":
            m = osnet_ain_x1_0(num_classes=1,pretrained=False)
        if len(args.gpus) > 1:
            self.model = nn.DataParallel(m,device_ids=args.gpus).to(args.device).eval()
        else:
            self.model = nn.DataParallel(m).to(args.device).eval()
        load_pretrained_weights(self.model,self.opt.loadmodel)
        self.tracked_stracks = []  # type: list[STrack]
        self.lost_stracks = []  # type: list[STrack]
        self.removed_stracks = []  # type: list[STrack]

        self.frame_id = 0
        self.det_thresh = opt.conf_thres
        self.buffer_size = int(self.frame_rate / 30.0 * opt.track_buffer)
        self.max_time_lost = self.buffer_size

        self.kalman_filter = KalmanFilter()
コード例 #13
0
class STrack(BaseTrack):
    shared_kalman = KalmanFilter()

    def __init__(self, tlwh, score, temp_feat, buffer_size=30):

        # wait activate
        self._tlwh = np.asarray(tlwh, dtype=np.float)
        self.kalman_filter = None
        self.mean, self.covariance = None, None
        """ when an strack is initialized for the first time, 
        set strack.stage = TrackState.tracked, strack.is_activated = False """
        self.is_activated = False  # BaseTrack __init__, state = TrackState.New
        self.score = score
        self.tracklet_len = 0

        self.smooth_feat = None
        self.update_features(temp_feat)
        self.features = deque([], maxlen=buffer_size)
        self.alpha = 0.9
    
    def update_features(self, feat):
        feat /= np.linalg.norm(feat)
        self.curr_feat = feat 
        if self.smooth_feat is None:
            self.smooth_feat = feat
        else:
            #  We maintain a single feature vector for a tracklet by moving-averaging the features in each frame, with a momentum \alpha.
            self.smooth_feat = self.alpha *self.smooth_feat + (1-self.alpha) * feat
        self.features.append(feat)
        self.smooth_feat /= np.linalg.norm(self.smooth_feat)

    def predict(self):
        mean_state = self.mean.copy()
        if self.state != TrackState.Tracked:
            """ Q: Why in predict(), if state is not Tracked, you zero one of the state variables (velocity of h)?
            A: For the first question, we find that setting the velocity to zero decreases the ID switches. 
            Keeping predicting the states of lost tracklets indeed brings more ID recall, 
            but in more cases, the tracklets tend to drift, which introduces many wrong assignments. 
            Therefore the overall IDS increases. 
            如果一直预测丢失的轨迹,虽然会提升recall,但是会带来轨迹漂移,从而导致错误分配,从而导致IDs上升
            We are looking for a better association algorithm to address these problems. 
            As for zeroing the velocity, we have not investigated whether it is good to zero the whole velocity vector (vx, vy, va, vh). This part of code is adapted from longcw/MOTDT."""
            mean_state[7] = 0
        self.mean, self.covariance = self.kalman_filter.predict(mean_state, self.covariance)

    @staticmethod
    def multi_predict(stracks):
        if len(stracks) > 0:
            multi_mean = np.asarray([st.mean.copy() for st in stracks])
            multi_covariance = np.asarray([st.covariance for st in stracks])
            for i,st in enumerate(stracks):
                if st.state != TrackState.Tracked:
                    multi_mean[i][7] = 0
            multi_mean, multi_covariance = STrack.shared_kalman.multi_predict(multi_mean, multi_covariance)
            for i, (mean, cov) in enumerate(zip(multi_mean, multi_covariance)):
                stracks[i].mean = mean
                stracks[i].covariance = cov

    def activate(self, kalman_filter, frame_id):
        """Start a new tracklet"""
        self.kalman_filter = kalman_filter
        self.track_id = self.next_id()
        self.mean, self.covariance = self.kalman_filter.initiate(self.tlwh_to_xyah(self._tlwh))

        self.tracklet_len = 0
        self.state = TrackState.Tracked
        #self.is_activated = True
        self.frame_id = frame_id
        self.start_frame = frame_id

    def re_activate(self, new_track, frame_id, new_id=False):
        self.mean, self.covariance = self.kalman_filter.update(
            self.mean, self.covariance, self.tlwh_to_xyah(new_track.tlwh)
        )

        self.update_features(new_track.curr_feat)
        self.tracklet_len = 0
        self.state = TrackState.Tracked
        """ strack.is_activated will be set to True when this strack is associated by another observation (via IOU distance) in the consecutive frames. """
        self.is_activated = True 
        self.frame_id = frame_id
        if new_id:
            self.track_id = self.next_id()

    def update(self, new_track, frame_id, update_feature=True):
        """
        Update a matched track
        :type new_track: STrack
        :type frame_id: int
        :type update_feature: bool
        :return:
        """
        self.frame_id = frame_id
        self.tracklet_len += 1

        new_tlwh = new_track.tlwh
        self.mean, self.covariance = self.kalman_filter.update(
            self.mean, self.covariance, self.tlwh_to_xyah(new_tlwh))
        self.state = TrackState.Tracked
        self.is_activated = True

        self.score = new_track.score
        if update_feature:
            self.update_features(new_track.curr_feat)

    @property
    #@jit(nopython=True)
    def tlwh(self):
        """Get current position in bounding box format `(top left x, top left y,
                width, height)`.
        """
        if self.mean is None:
            return self._tlwh.copy()
        ret = self.mean[:4].copy()
        ret[2] *= ret[3]
        ret[:2] -= ret[2:] / 2
        return ret

    @property
    #@jit(nopython=True)
    def tlbr(self):
        """Convert bounding box to format `(min x, min y, max x, max y)`, i.e.,
        `(top left, bottom right)`.
        """
        ret = self.tlwh.copy()
        ret[2:] += ret[:2]
        return ret

    @staticmethod
    #@jit(nopython=True)
    def tlwh_to_xyah(tlwh):
        """Convert bounding box to format `(center x, center y, aspect ratio,
        height)`, where the aspect ratio is `width / height`.
        """
        ret = np.asarray(tlwh).copy()
        ret[:2] += ret[2:] / 2
        ret[2] /= ret[3]
        return ret

    def to_xyah(self):
        return self.tlwh_to_xyah(self.tlwh)

    @staticmethod
    #@jit(nopython=True)
    def tlbr_to_tlwh(tlbr):
        ret = np.asarray(tlbr).copy()
        ret[2:] -= ret[:2]
        return ret

    @staticmethod
    #@jit(nopython=True)
    def tlwh_to_tlbr(tlwh):
        ret = np.asarray(tlwh).copy()
        ret[2:] += ret[:2]
        return ret

    def __repr__(self):
        return 'OT_{}_({}-{})'.format(self.track_id, self.start_frame, self.end_frame)
コード例 #14
0
class Track:
    def __init__(self, track_id, box, vx, vy, ax, ay):
        self.track_id = track_id
        self.boxes = [box]
        self.kf = KalmanFilter(dim_x=8, dim_z=4)
        self.kf.F = np.array(
            [[1, 0, 0, 0, vx, 0, 0, 0],
             [0, 1, 0, 0, 0, vy, 0, 0],
             [0, 0, 1, 0, 0, 0, 0, 0],
             [0, 0, 0, 1, 0, 0, 0, 0],
             [0, 0, 0, 0, 1, 0, ax, 0],
             [0, 0, 0, 0, 0, 1, 0, ay],
             [0, 0, 0, 0, 0, 0, 1, 0],
             [0, 0, 0, 0, 0, 0, 0, 1]])
        self.kf.H = np.array(
            [[1, 0, 0, 0, 0, 0, 0, 0],
             [0, 1, 0, 0, 0, 0, 0, 0],
             [0, 0, 1, 0, 0, 0, 0, 0],
             [0, 0, 0, 1, 0, 0, 0, 0]])

        self.kf.R[2:, 2:] *= 10.
        self.kf.P[4:, 4:] *= 1000.  # give high uncertainty to the unobservable initial velocities
        self.kf.P *= 10.
        self.kf.Q[-1, -1] *= 0.01
        self.kf.Q[4:, 4:] *= 0.01
        self.kf.x[:4] = convert_bbox_to_z(np.array([box.left, box.bottom, box.right, box.top]))
        self.kf.predict()
        self.last_frame = box.frame
        self.real_boxes = 1
        self.counted = False
        self.phantom_boxes = 0
        self.max_phantom_boxes = 0

    def update_phantom(self):
        predicted = self.kf.x
        b = convert_x_to_bbox(predicted)
        new_box = Box(self.track_id, b[0], b[1], b[2], b[3], self.last_frame, True, 0, "")
        self.last_frame += 1
        self.boxes.append(new_box)
        self.kf.update(predicted[:4])
        self.kf.predict()
        self.phantom_boxes += 1
        if self.phantom_boxes > self.max_phantom_boxes:
            self.max_phantom_boxes = self.phantom_boxes

    def update_real(self, box, non_decrease, real_detection):
        if len(self.boxes) > 0:
            prev = self.boxes[-1]
            ratio = (box.right - box.left) * (box.top - box.bottom) / (
                    (prev.right - prev.left) * (prev.top - prev.bottom))
            if ratio < non_decrease:
                predicted = convert_x_to_bbox(self.kf.x[:4])
                box = Box(self.track_id, predicted[0], predicted[1], predicted[2], predicted[3], self.last_frame, True, 0, "")

        self.boxes.append(box)
        if box.confidence >= real_detection:
            self.real_boxes += 1
        self.phantom_boxes = 0
        self.last_frame = box.frame
        self.kf.update(convert_bbox_to_z(np.array([box.left, box.bottom, box.right, box.top])))
        self.kf.predict()

    def get_prediction(self):
        return convert_x_to_bbox(self.kf.x[:4])

    def get_max_phantoms(self):
        return self.max_phantom_boxes
コード例 #15
0
ファイル: mot_tracker.py プロジェクト: intelli8786/MOTDE
 def clear(self):
     self.tracked_stracks = []  # type: list[STrack]
     self.lost_stracks = []  # type: list[STrack]
     self.removed_stracks = []  # type: list[STrack]
     self.frame_id = 0
     self.kalman_filter = KalmanFilter()
コード例 #16
0
def update_using_kalman(kalman: KalmanFilter, metadata):
    output_data = []
    last_point = metadata[0]
    last_utm = utm.from_latlon(last_point.get("latitude", None),
                               last_point.get("longitude", None))
    start_time = last_point['time']
    similar_data_count = 1
    for data in metadata[1:]:
        if data["time"] != last_point["time"]:
            data_utm = utm.from_latlon(data.get("latitude", None),
                                       data.get("longitude", None))
            if data["speed"] is not None:
                speed_vec = normalize_vec(
                    np.array([data_utm[0], data_utm[1]]) -
                    np.array(last_utm)) * data["speed"]
                new_data = np.array(
                    [data_utm[0], data_utm[1], speed_vec[0], speed_vec[1]]).T
            else:
                new_data = np.array([data_utm[0], data_utm[1]]).T
            last_utm = data_utm[:2]

            if data["accuracy"] is not None:
                noise_vec = [data["accuracy"] * 0.71, data["accuracy"] * 0.71]
            else:
                noise_vec = None

            kalman.update_estimations(new_data=new_data,
                                      measurement_noise=noise_vec,
                                      estimate_time=data["time"])
            last_point = data
            print(last_point['time'] - start_time, similar_data_count,
                  data.get("latitude", None), data.get("longitude", None))
            similar_data_count = 1
            predicted_data = kalman.predicted_data
            predicted_utm = predicted_data[:2]
            predicted_lat_lng = utm.to_latlon(predicted_utm[0],
                                              predicted_utm[1], data_utm[2],
                                              data_utm[3])
            output_data.append({
                'latitude': predicted_lat_lng[0],
                'longitude': predicted_lat_lng[1],
                'path': data['path']
            })
        else:
            new_time = data['time'] + 0.25 * similar_data_count
            data_utm = utm.from_latlon(data.get("latitude", None),
                                       data.get("longitude", None))
            last_utm = data_utm[:2]
            kalman.predict_values(new_time)
            predicted_data = kalman.predicted_data
            predicted_utm = predicted_data[:2]
            predicted_lat_lng = utm.to_latlon(predicted_utm[0],
                                              predicted_utm[1], data_utm[2],
                                              data_utm[3])
            output_data.append({
                'latitude': predicted_lat_lng[0],
                'longitude': predicted_lat_lng[1],
                'path': data['path']
            })
            similar_data_count += 1
    return output_data
コード例 #17
0
class STrack(BaseTrack):
    shared_kalman = KalmanFilter()

    def __init__(self, tlwh, score, temp_feat, pose,crop_box,file_name,ps,buffer_size=30):

        # wait activate
        self._tlwh = np.asarray(tlwh, dtype=np.float)
        self.kalman_filter = None
        self.mean, self.covariance = None, None
        self.is_activated = False

        self.score = score
        self.tracklet_len = 0

        self.smooth_feat = None
        self.update_features(temp_feat)
        self.features = deque([], maxlen=buffer_size)
        self.alpha = 0.9 
        self.pose = pose
        self.detscore = ps
        self.crop_box = crop_box
        self.file_name = file_name
    
    def update_features(self, feat):
        feat /= np.linalg.norm(feat)
        self.curr_feat = feat 
        if self.smooth_feat is None:
            self.smooth_feat = feat
        else:
            self.smooth_feat = self.alpha *self.smooth_feat + (1-self.alpha) * feat
        self.features.append(feat)
        self.smooth_feat /= np.linalg.norm(self.smooth_feat)

    def predict(self):
        mean_state = self.mean.copy()
        if self.state != TrackState.Tracked:
            mean_state[7] = 0
        self.mean, self.covariance = self.kalman_filter.predict(mean_state, self.covariance)

    @staticmethod
    def multi_predict(stracks):
        if len(stracks) > 0:
            multi_mean = np.asarray([st.mean.copy() for st in stracks])
            multi_covariance = np.asarray([st.covariance for st in stracks])
            for i,st in enumerate(stracks):
                if st.state != TrackState.Tracked:
                    multi_mean[i][7] = 0
            multi_mean, multi_covariance = STrack.shared_kalman.multi_predict(multi_mean, multi_covariance)
            for i, (mean, cov) in enumerate(zip(multi_mean, multi_covariance)):
                stracks[i].mean = mean
                stracks[i].covariance = cov


    def activate(self, kalman_filter, frame_id):
        """Start a new tracklet"""
        self.kalman_filter = kalman_filter
        self.track_id = self.next_id()
        self.mean, self.covariance = self.kalman_filter.initiate(self.tlwh_to_xyah(self._tlwh))

        self.tracklet_len = 0
        self.state = TrackState.Tracked
        #self.is_activated = True
        self.frame_id = frame_id
        self.start_frame = frame_id

    def re_activate(self, new_track, frame_id, new_id=False):
        self.mean, self.covariance = self.kalman_filter.update(
            self.mean, self.covariance, self.tlwh_to_xyah(new_track.tlwh)
        )

        self.update_features(new_track.curr_feat)
        self.tracklet_len = 0
        self.state = TrackState.Tracked
        self.is_activated = True
        self.frame_id = frame_id
        if new_id:
            self.track_id = self.next_id()
        self.pose = new_track.pose
        self.detscore = new_track.detscore
        self.crop_box = new_track.crop_box
        self.file_name = new_track.file_name

    def update(self, new_track, frame_id, update_feature=True):
        """
        Update a matched track
        :type new_track: STrack
        :type frame_id: int
        :type update_feature: bool
        :return:
        """
        self.frame_id = frame_id
        self.tracklet_len += 1
        self.pose = new_track.pose
        self.detscore = new_track.detscore
        self.crop_box = new_track.crop_box
        self.file_name = new_track.file_name
        new_tlwh = new_track.tlwh
        self.mean, self.covariance = self.kalman_filter.update(
            self.mean, self.covariance, self.tlwh_to_xyah(new_tlwh))
        self.state = TrackState.Tracked
        self.is_activated = True

        self.score = new_track.score
        if update_feature:
            self.update_features(new_track.curr_feat)

    @property
    #@jit(nopython=True)
    def tlwh(self):
        """Get current position in bounding box format `(top left x, top left y,
                width, height)`.
        """
        if self.mean is None:
            return self._tlwh.copy()
        ret = self.mean[:4].copy()
        ret[2] *= ret[3]
        ret[:2] -= ret[2:] / 2
        return ret

    @property
    #@jit(nopython=True)
    def tlbr(self):
        """Convert bounding box to format `(min x, min y, max x, max y)`, i.e.,
        `(top left, bottom right)`.
        """
        ret = self.tlwh.copy()
        ret[2:] += ret[:2]
        return ret

    @staticmethod
    #@jit(nopython=True)
    def tlwh_to_xyah(tlwh):
        """Convert bounding box to format `(center x, center y, aspect ratio,
        height)`, where the aspect ratio is `width / height`.
        """
        ret = np.asarray(tlwh).copy()
        ret[:2] += ret[2:] / 2
        ret[2] /= ret[3]
        return ret

    def to_xyah(self):
        return self.tlwh_to_xyah(self.tlwh)

    @staticmethod
    #@jit(nopython=True)
    def tlbr_to_tlwh(tlbr):
        ret = np.asarray(tlbr).copy()
        ret[2:] -= ret[:2]
        return ret

    @staticmethod
    #@jit(nopython=True)
    def tlwh_to_tlbr(tlwh):
        ret = np.asarray(tlwh).copy()
        ret[2:] += ret[:2]
        return ret

    def __repr__(self):
        return 'OT_{}_({}-{})'.format(self.track_id, self.start_frame, self.end_frame)
コード例 #18
0
ファイル: movie_demo.py プロジェクト: agorji/driver-assist
if __name__ == '__main__':
    database_name = "kalmanUpdated"
    container_name = "nice_weather_thusis_filisur"
    gpx_file_address = "data/Trackmovies/Thusis_Filisur_20200828/movie - Copy.gpx"
    video_address = "data/Trackmovies/Thusis_Filisur_20200828/movie.mp4"
    cv.namedWindow("Good Weather", cv.WINDOW_NORMAL)
    cv.resizeWindow("Good Weather", 640, 480)
    cv.namedWindow("Bad Weather", cv.WINDOW_NORMAL)
    cv.resizeWindow("Bad Weather", 640, 480)
    clock = pygame.time.Clock()

    geographic_db = GeographicMongoDB(database_name, container_name)

    with open(gpx_file_address) as gpx_file:
        gpx_data = gpxpy.parse(gpx_file)
    video_kalman = KalmanFilter(location_noise=4, speed_noise=3)
    way_points = gpx_data.waypoints
    utm_data = utm.from_latlon(way_points[0].latitude, way_points[0].longitude)
    print(utm_data)
    video = cv.VideoCapture(video_address)
    fake_timestamp = way_points[0].time.timestamp()
    fake_time_start = fake_timestamp
    start_real_time = time.time()
    prev_time = start_real_time
    video_kalman.initialize(
        np.array([utm_data[0], utm_data[1], 0, 0]).T,
        np.array([4, 4, 3, 3]).T, start_real_time)
    index = 1
    video.set(1, 100)
    current_image = ""
    while index < len(way_points):