コード例 #1
0
def main(video_path,
         model_path,
         track_target=0,
         visualize=True):
    """run video prediction

    Args:
        video_path:     video path
        model_path:     model path
        track_target:   0-person; 1-bicycle; 2-car; 7-truck
        visualize:      whether visualize tracking list

    """

    detector = Detector(model_path=model_path)
    kalman_filter = KalmanFilter()
    capture = cv2.VideoCapture(video_path)
    height = capture.get(cv2.CAP_PROP_FRAME_HEIGHT)
    width = capture.get(cv2.CAP_PROP_FRAME_WIDTH)

    # tracking list
    tracking_list = []
    label_count = 0
    is_first_frame = True

    while True:
        success, frame = capture.read()

        if not success:
            capture.release()
            break

        frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        # convert to Image object
        frame_pil = Image.fromarray(np.uint8(frame))
        new_frame = letterbox_image(frame_pil, INPUT_SIZE)
        image_array = np.expand_dims(np.array(new_frame, dtype='float32') / 255.0, axis=0)
        image_shape = np.expand_dims(np.array([height, width], dtype='float32'), axis=0)
        image_constant = tf.constant(image_array, dtype=tf.float32)
        image_shape = tf.constant(image_shape, dtype=tf.float32)

        # detect image
        results = detector.detect(image_constant, image_shape)
        pred_results = []
        for key, value in results.items():
            pred_results.append(value)
        boxes = pred_results[0].numpy()
        # scores = scores.numpy
        classes = pred_results[2].numpy()

        # find tracking targets
        track_id = np.where(classes == track_target)[0]
        track_boxes = boxes[track_id]
        num_tracks = len(track_boxes)
        if num_tracks > 0:
            track_boxes = box2xyah(track_boxes)
            track_boxes = [track_box for track_box in track_boxes]

        if not is_first_frame:
            # start tracking
            tracking_list, label_count = matching_cascade(tracking_list, track_boxes,
                                                          kalman_filter, label_count)

        if is_first_frame and (num_tracks > 0):
            is_first_frame = False

            for i in range(num_tracks):
                # initialize first frame
                mean_init, cov_init = kalman_filter.initiate(measurement=track_boxes[i])
                # create tracker
                new_tracker = create_tracker(mean=mean_init,
                                             cov=cov_init,
                                             detection=track_boxes[i])
                tracking_list.append(new_tracker)

        if visualize:
            # visulize results
            img = visualize_results(tracking_list, height, frame)
            img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
            cv2.imshow('avoid invasion', img)
            key = cv2.waitKey(30) & 0xff
            if key == 27:
                capture.release()
                break
コード例 #2
0
class Tracker(object):
    """
    This is the multi-target tracker.

    Parameters
    ----------
    metric : nn_matching.NearestNeighborDistanceMetric
        A distance metric for measurement-to-track association.
    max_age : int
        Maximum number of missed misses before a track is deleted.
    n_init : int
        Number of consecutive detections before the track is confirmed. The
        track state is set to `Deleted` if a miss occurs within the first
        `n_init` frames.

    Attributes
    ----------
    metric : nn_matching.NearestNeighborDistanceMetric
        The distance metric used for measurement to track association.
    max_age : int
        Maximum number of missed misses before a track is deleted.
    n_init : int
        Number of frames that a track remains in initialization phase.
    kf : kalman_filter.KalmanFilter
        A Kalman filter to filter target trajectories in image space.
    tracks : List[Track]
        The list of active tracks at the current time step.

    """
    def __init__(self, metric, max_iou_distance=0.7, max_age=70, n_init=3):
        self.metric = metric
        self.max_iou_distance = max_iou_distance
        self.max_age = max_age
        self.n_init = n_init

        self.kf = KalmanFilter()
        self.tracks = []
        self._next_id = 1
        self.last_confirm_id = 1

    def predict(self):
        """Propagate track state distributions one time step forward.

        This function should be called once every time step, before `update`.
        """
        if cfgs.debug:
            print('tracker pred:', len(self.tracks))
        for track in self.tracks:
            track.predict(self.kf)
            #print('run predict')

    def update(self, detections):
        """Perform measurement update and track management.

        Parameters
        ----------
        detections : List[deep_sort.detection.Detection]
            A list of detections at the current time step.

        """
        if cfgs.debug:
            print('updata_detect:', len(detections))
        # Run matching cascade.
        matches, unmatched_tracks, unmatched_detections = \
            self._match(detections)
        if cfgs.debug:
            print('unmatch_track for delete:', len(unmatched_tracks))
            print('unmatch_det for init track:', len(unmatched_detections))
        # Update track set.
        for track_idx, detection_idx in matches:
            #print("run match")
            self.last_confirm_id = self.tracks[track_idx].update(
                self.kf, detections[detection_idx], self.last_confirm_id)
        for track_idx in unmatched_tracks:
            self.tracks[track_idx].mark_missed()
            #print("run unmatch_track for delete")
        for detection_idx in unmatched_detections:
            #print("run unmatch_det for init track")
            self._initiate_track(detections[detection_idx])
        self.tracks = [t for t in self.tracks if not t.is_deleted()]

        # Update distance metric.
        active_targets = [t.track_id for t in self.tracks if t.is_confirmed()]
        if cfgs.debug:
            print('activate:', len(active_targets))
        features, targets = [], []
        for track in self.tracks:
            #print('track_feature',len(track.features))
            if not track.is_confirmed():
                continue
            features += track.features
            targets += [track.track_id for _ in track.features]
            track.features = []
        self.metric.partial_fit(np.asarray(features), np.asarray(targets),
                                active_targets)

    def _match(self, detections):
        # Split track set into confirmed and unconfirmed tracks.
        confirmed_tracks = [
            i for i, t in enumerate(self.tracks) if t.is_confirmed()
        ]
        unconfirmed_tracks = [
            i for i, t in enumerate(self.tracks) if not t.is_confirmed()
        ]
        if cfgs.debug:
            print('confirm and unconfirm:', len(confirmed_tracks),
                  len(unconfirmed_tracks))
        # Associate confirmed tracks using appearance features.
        matches_a, unmatched_tracks_a, unmatched_detections = \
            matching_cascade(self.metric,self.kf, self.metric.matching_threshold, self.max_age,
                self.tracks, detections, confirmed_tracks)
        # Associate remaining tracks together with unconfirmed tracks using IOU.
        iou_track_candidates = unconfirmed_tracks + [
            k for k in unmatched_tracks_a
            if self.tracks[k].time_since_update == 1
        ]
        unmatched_tracks_a = [
            k for k in unmatched_tracks_a
            if self.tracks[k].time_since_update != 1
        ]
        cost_matrix = iou_cost(self.tracks, detections, iou_track_candidates,
                               unmatched_detections)
        matches_b, unmatched_tracks_b, unmatched_detections = \
            min_cost_matching(
                cost_matrix, self.max_iou_distance, self.tracks,
                detections, iou_track_candidates, unmatched_detections)
        if cfgs.debug:
            print('iou mach tracks:: unconfirm and unmatch_track:',
                  iou_track_candidates)
            print("matcha and matchb", len(matches_a), len(matches_b))
            print('a:', matches_a)
            print('b:', matches_b)
        matches = matches_a + matches_b
        unmatched_tracks = list(set(unmatched_tracks_a + unmatched_tracks_b))
        return matches, unmatched_tracks, unmatched_detections

    def _initiate_track(self, detection):
        mean, covariance = self.kf.initiate(detection.to_xyah())
        self.tracks.append(
            Track(mean, covariance, self._next_id, self.n_init, self.max_age,
                  detection.feature))
        self._next_id += 1
コード例 #3
0
class Track:
    def __init__(self, bb, id_, max_miss_):
        self.track = [bb]
        self.alive = True
        self.kf = KalmanFilter()
        self.id_track = id_
        self.mean, self.cov = self.kf.initiate(bb.asp_ratio())
        self.num_miss = 0
        self.num_max_miss = max_miss_
        self.project_mean = 0
        self.color = (int(random.random() * 256), int(random.random() * 256),
                      int(random.random() * 256))

    def last(self):
        return self.track[-1]

    def get_last_mean(self):
        return self.mean[0:4]

    def append(self, bb):
        self.kalman_steps(bb.asp_ratio())
        aproxBB = BoudingBox(coord=_restoreStandardValue(self.get_last_mean()),
                             frame=bb.getIDFrame())
        self.track.append(aproxBB)

    def kalman_steps(self, dets):
        self.mean, self.cov = self.kf.predict(self.mean, self.cov)
        self.mean, self.cov = self.kf.update(self.mean, self.cov, dets)

    def getTrack(self):
        return self.track

    def getBBFrame(self, frame_id):
        for bb in self.track:
            if bb.getIDFrame() == frame_id:
                return bb

    def __len__(self):
        return len(self.track)

    def is_alive(self):
        return self.alive

    def ressurect(self):
        self.alive = True

    def project_position(self):
        self.append(
            BoudingBox(_restoreStandardValue(self.mean[:4]),
                       self.last().getIDFrame() + 1))

    def missed(self):
        self.num_miss += 1
        self.project_position()
        if self.num_miss > self.num_max_miss:
            self.kill()

    def kill(self):
        self.alive = False

    def getID(self):
        return self.id_track

    def getcolor(self):
        return self.color