Пример #1
0
    def track(self, detections):
        self.frame += 1
        self.tracks_active[self.frame] = {}
        #Clear the tracks in old frame
        self.clean_old_tracks()

        dets = [det for det in detections if det['score'] >= self.sigma_l]

        for id_, track in self.retrieve_tracks():
            if len(dets) > 0:
                # get det with highest iou
                best_match = max(dets,
                                 key=lambda x: iou(track['bbox'], x['bbox']))
                if iou(track['bbox'], best_match['bbox']) >= self.sigma_iou:
                    self.tracks_active[self.frame][id_] = best_match
                    # remove from best matching detection from detections
                    del dets[dets.index(best_match)]

        #Create new tracks
        for det in dets:
            self.id_count += 1
            self.tracks_active[self.frame][self.id_count] = det

        #Return the current tracks
        return self.tracks_active[self.frame]
Пример #2
0
def track_iou(detections, sigma_l, sigma_h, sigma_iou, t_min):
    """
    Simple IOU based tracker.
    See "High-Speed Tracking-by-Detection Without Using Image Information by E. Bochinski, V. Eiselein, T. Sikora" for
    more information.

    Args:
         detections (list): list of detections per frame, usually generated by util.load_mot
         sigma_l (float): low detection threshold.
         sigma_h (float): high detection threshold.
         sigma_iou (float): IOU threshold.
         t_min (float): minimum track length in frames.

    Returns:
        list: list of tracks.
    """

    tracks_active = []
    tracks_finished = []

    for frame_num, detections_frame in enumerate(detections, start=1):
        # apply low threshold to detections
        dets = [det for det in detections_frame if det['score'] >= sigma_l]

        updated_tracks = []
        for track in tracks_active:
            if len(dets) > 0:
                # get det with highest iou
                best_match = max(dets, key=lambda x: iou(track['bboxes'][-1], x['bbox']))
                if iou(track['bboxes'][-1], best_match['bbox']) >= sigma_iou:
                    track['bboxes'].append(best_match['bbox'])
                    track['max_score'] = max(track['max_score'], best_match['score'])
                    track['class'] = best_match['class']

                    updated_tracks.append(track)

                    # remove from best matching detection from detections
                    del dets[dets.index(best_match)]

            # if track was not updated
            if len(updated_tracks) == 0 or track is not updated_tracks[-1]:
                # finish track when the conditions are met
                if track['max_score'] >= sigma_h and len(track['bboxes']) >= t_min:
                    tracks_finished.append(track)

        # create new tracks
        new_tracks = [{'bboxes': [det['bbox']], 'max_score': det['score'], 'start_frame': frame_num,'class' : det['class']} for det in dets]
        tracks_active = updated_tracks + new_tracks

    # finish all remaining active tracks
    tracks_finished += [track for track in tracks_active
                        if track['max_score'] >= sigma_h and len(track['bboxes']) >= t_min]

    return tracks_finished
Пример #3
0
def track_iou(detections, sigma_l, sigma_h, sigma_iou, t_min):
    """
    Simple IOU based tracker.
    See "High-Speed Tracking-by-Detection Without Using Image Information by E. Bochinski, V. Eiselein, T. Sikora" for
    more information.

    Args:
         detections (list): list of detections per frame, usually generated by util.load_mot
         sigma_l (float): low detection threshold.
         sigma_h (float): high detection threshold.
         sigma_iou (float): IOU threshold.
         t_min (float): minimum track length in frames.

    Returns:
        list: list of tracks.
    """

    tracks_active = []
    tracks_finished = []

    for frame_num, detections_frame in enumerate(detections, start=1):
        # apply low threshold to detections
        dets = [det for det in detections_frame if det['score'] >= sigma_l]

        updated_tracks = []
        for track in tracks_active:
            if len(dets) > 0:
                # get det with highest iou
                best_match = max(dets, key=lambda x: iou(track['bboxes'][-1], x['bbox']))
                if iou(track['bboxes'][-1], best_match['bbox']) >= sigma_iou:
                    track['bboxes'].append(best_match['bbox'])
                    track['max_score'] = max(track['max_score'], best_match['score'])

                    updated_tracks.append(track)

                    # remove from best matching detection from detections
                    del dets[dets.index(best_match)]

            # if track was not updated
            if len(updated_tracks) == 0 or track is not updated_tracks[-1]:
                # finish track when the conditions are met
                if track['max_score'] >= sigma_h and len(track['bboxes']) >= t_min:
                    tracks_finished.append(track)

        # create new tracks
        new_tracks = [{'bboxes': [det['bbox']], 'max_score': det['score'], 'start_frame': frame_num} for det in dets]
        tracks_active = updated_tracks + new_tracks

    # finish all remaining active tracks
    tracks_finished += [track for track in tracks_active
                        if track['max_score'] >= sigma_h and len(track['bboxes']) >= t_min]

    return tracks_finished
Пример #4
0
def track_iou(pre_tracker, detections, sigma_iou):

    current_tracker = []
    number = 0
    best_data = 0.0

    # current_tracker = detections
    # print("current")
    # print(current_tracker)
    # for i in range(len(current_tracker)):
    #     print("current_tracker["+str(i)+"]")
    #     print(current_tracker[i])
    #     print("bbox")
    #     print(current_tracker[i]['bbox'])
    #     print("bbox number")
    #     print(current_tracker[i]['bbox'][0])

    if not pre_tracker and not detections:
        return pre_tracker, "wait"
    elif not pre_tracker:
        print("pre")
        pre_tracker = detections[0]
        return pre_tracker, None
    elif not detections:
        return pre_tracker, "wait"
    else:
        print("curr")
        current_tracker = detections

    for k in range(len(current_tracker)):
        if iou(pre_tracker['bbox'], current_tracker[k]['bbox']) >= best_data:
            best_data = iou(pre_tracker['bbox'], current_tracker[k]['bbox'])
            number = k

    print("pre print")
    print(pre_tracker)
    print("curr print[number]")
    print(current_tracker[number])

    if best_data >= sigma_iou:
        finish = current_tracker[number]
        pre_tracker = current_tracker[number]

    else:
        finish = None

    return pre_tracker, finish
Пример #5
0
def get_bg_proposals(object_proposals, annot):
    annot_rect = util.get_annot_rect(annot)

    bg_proposals = []
    for obj_proposal in object_proposals:
        if util.iou(obj_proposal, annot_rect) <= 0.5:
            bg_proposals.append(obj_proposal)
    return bg_proposals
Пример #6
0
def get_bg_proposals(object_proposals, annot):
    annot_rect = util.get_annot_rect(annot)

    bg_proposals = []
    for obj_proposal in object_proposals:
        if util.iou(obj_proposal, annot_rect) <= 0.5:
            bg_proposals.append(obj_proposal)
    return bg_proposals
Пример #7
0
def ratio_tracker(current_tracker, p_tracker):
    best_iou_ratio = 0.0 # 최상의 적합성 비율은 얼마인가
    best_data = [] # 최상의 비율을 가지는 데이터

    for c_tracker in current_tracker: # 현재 트래커에서 전에 맞는 최적을 찾아냄
        iou_ratio = iou(p_tracker['bbox'], c_tracker['bbox'])

        if  iou_ratio >= best_iou_ratio:
            best_iou_ratio = iou_ratio
            best_data = c_tracker

    return best_data, best_iou_ratio
Пример #8
0
def active_criteria(x, tracks):
    """
    Take matching candidate and track, offset the track's last bounding box by the predicted offset, and calculate IOU.

    Args:
        x (list [roi, bbox, score]): a detection from this frame.
        tracks (list [[frames], Kalman_filter]): a track containing all frames and a Kalman filter associated with it.
    """
    ofdx, ofdy, _, _ = tracks[0][-1]['pred_state'] - tracks[0][-1]['cur_state']
    offset_vector = np.array([ofdy, ofdx, ofdy, ofdx])
    offset_roi = tracks[0][-1]['roi'] + offset_vector

    th = iou(x['roi'], offset_roi)

    return th
Пример #9
0
    def _pair_bboxes(self, bboxes_true, bboxes_new, change_colors=False):
        pairs = []
        for i_true, bb_true in enumerate(bboxes_true):
            match = None

            # Calculate overlap of bounding boxes.
            iou_max = 0.0
            for i_new, bb_new in enumerate(bboxes_new):
                iou = util.iou(bb_true, bb_new)
                if iou > iou_max:
                    iou_max = iou
                    match = i_new

            if match is not None:
                pairs.append((i_true, match))

        # Check for missing.
        if len(pairs) < len(bboxes_true):
            matched_true = [x[0] for x in pairs]
            matched_new = [x[1] for x in pairs]
            for i_true, bb_true in enumerate(bboxes_true):
                if i_true in matched_true:
                    continue

                match = None
                d_min = numpy.Inf
                for i_new, bb_new in enumerate(bboxes_new):
                    # Skip already matched new bounding boxes.
                    if i_new in matched_new:
                        continue

                    d = util.bbox_distance(bb_true, bb_new)
                    if d < d_min:
                        d_min = d
                        match = i_new

                if match is not None:
                    pairs.append((i_true, match))
                    matched_true.append(i_true)

            # Update colors if still not all matched.
            if change_colors and len(pairs) < len(bboxes_true):
                colors_new = []
                for ci in [x[0] for x in sorted(pairs, key=lambda x: x[1])]:
                    colors_new.append(self._bbox_color[ci])
                self._bbox_color = numpy.array(colors_new)

        return pairs
Пример #10
0
def associate(tracks, detections, sigma_iou):
    """ perform association between tracks and detections in a frame.
    Args:
        tracks (list): input tracks
        detections (list): input detections
        sigma_iou (float): minimum intersection-over-union of a valid association

    Returns:
        (tuple): tuple containing:

        track_ids (numpy.array): 1D array with indexes of the tracks
        det_ids (numpy.array): 1D array of the associated indexes of the detections
    """
    costs = np.empty(shape=(len(tracks), len(detections)), dtype=np.float32)
    for row, track in enumerate(tracks):
        for col, detection in enumerate(detections):
            costs[row, col] = 1 - iou(track['bboxes'][-1], detection['bbox'])

    np.nan_to_num(costs)
    costs[costs > 1 - sigma_iou] = np.nan
    track_ids, det_ids = solve_dense(costs)
    return track_ids, det_ids
Пример #11
0
def dist_func(v1, v2, epsilon=1e-6, mode='iou'):
    """

    :param v1: n-d numpy array
    :param v2: n-d numpy array, same shape with v1
    :param epsilon:
    :param mode:
    :return:
    """
    if mode == 'euclid':
        sm = np.sum((v1 - v2)**2)

        return (sm + epsilon)**0.5
    elif mode == 'iou':
        x1, y1 = v1
        x2, y2 = v2

        box1 = [960 - x1 / 2, 600 - y1 / 2, 960 + x1 / 2, 600 + y1 / 2]
        box2 = [960 - x2 / 2, 600 - y2 / 2, 960 + x2 / 2, 600 + y2 / 2]

        return 1 - iou(box1, box2, encode=False)
    else:
        raise Exception('Unrecognized mode {}'.format(mode))
Пример #12
0
def main():
    img_list = []
    param_list = []
    results = []
    for _ in range(1000):
        params, img = noisy_circle(200, 50, 2)
        #normalize
        img_list.append(img / img.max())
        param_list.append(params)
    param_list = np.array(param_list)
    img_list = np.array(img_list)

    # pass all samples as one batch for runtime
    detected = find_circle(img_list[:, :, :, np.newaxis])

    # z-transform prediction into data range
    detected[:, :2] = detected[:, :2] * 200
    detected[:, 2] = 10 + detected[:, 2] * 40

    for i in range(1000):
        results.append(iou(param_list[i], detected[i]))
    results = np.array(results)
    print((results > 0.7).mean())
Пример #13
0
    def run(self):
        self.vc = cv2.VideoCapture(self.mot_video_dir)

        sigma_l = 0.4
        sigma_h = 0.5
        sigma_iou = 0.2
        t_min = 2

        # load bounding boxes.
        detections = load_mot(self.mot_det_dir)

        tracks_finished = []

        # set the color of the object randomly.
        color_for_boundingbox = [(13 * i % 255, (255 - 5 * i) % 255, (240 + 10 * i) % 255) for i in range(0, 51)]

        # run algorithm.
        for frame_num, detections_frame in enumerate(detections, start=1):
            # apply low threshold to detections

            dets = [det for det in detections_frame if det['score'] >= sigma_l]

            updated_tracks = []

            for track in self.tracks_active:
                if len(dets) > 0:
                    # get det with highest iou
                    best_match = max(dets, key=lambda x: iou(track['bboxes'][-1], x['bbox']))
                    if iou(track['bboxes'][-1], best_match['bbox']) >= sigma_iou:
                        track['bboxes'].append(best_match['bbox'])
                        track['max_score'] = max(track['max_score'], best_match['score'])

                        updated_tracks.append(track)

                        # remove from best matching detection from detections
                        del dets[dets.index(best_match)]

                # if track was not updated
                if len(updated_tracks) == 0 or track is not updated_tracks[-1]:
                    # finish track when the conditions are met
                    if track['max_score'] >= sigma_h and len(track['bboxes']) >= t_min:
                        tracks_finished.append(track)

            # create new tracks
            new_tracks = [{'bboxes': [det['bbox']], 'max_score': det['score'], 'start_frame': frame_num,
                           'color': color_for_boundingbox[(len(self.tracks_active) + random.randint(0, 51)) % 51]}
                          for i, det in enumerate(dets)]
            self.tracks_active = updated_tracks + new_tracks

            self.retval, current_frame = self.vc.read()
            labeled_frame = show_tracking_results(current_frame, self.tracks_active)
            rgb_frame = convert_cvimage_to_qimage(labeled_frame)
            self.mot_signal.emit(rgb_frame)


            # finish all remaining active tracks
        tracks_finished += [track for track in self.tracks_active
                            if track['max_score'] >= sigma_h and len(track['bboxes']) >= t_min]
        output_path = os.path.dirname(self.mot_video_dir)
        save_to_csv(output_path, tracks_finished)
        self.vc.release()
Пример #14
0
def track_viou(frames_path, detections, sigma_l, sigma_h, sigma_iou, t_min,
               ttl, tracker_type, keep_upper_height_ratio):
    """ V-IOU Tracker.
    See "Extending IOU Based Multi-Object Tracking by Visual Information by E. Bochinski, T. Senst, T. Sikora" for
    more information.

    Args:
         frames_path (str): path to ALL frames.
                            string must contain a placeholder like {:07d} to be replaced with the frame numbers.
         detections (list): list of detections per frame, usually generated by util.load_mot
         sigma_l (float): low detection threshold.
         sigma_h (float): high detection threshold.
         sigma_iou (float): IOU threshold.
         t_min (float): minimum track length in frames.
         ttl (float): maximum number of frames to perform visual tracking.
                      this can fill 'gaps' of up to 2*ttl frames (ttl times forward and backward).
         tracker_type (str): name of the visual tracker to use. see VisTracker for more details.
         keep_upper_height_ratio (float): float between 0.0 and 1.0 that determines the ratio of height of the object
                                          to track to the total height of the object used for visual tracking.

    Returns:
        list: list of tracks.
    """
    if tracker_type == 'NONE':
        assert ttl == 1, "ttl should not be larger than 1 if no visual tracker is selected"

    tracks_active = []
    tracks_extendable = []
    tracks_finished = []
    frame_buffer = []

    for frame_num, detections_frame in enumerate(tqdm(detections), start=1):
        # load frame and put into buffer
        frame_path = frames_path.format(frame_num)
        frame = cv2.imread(frame_path)
        assert frame is not None, "could not read '{}'".format(frame_path)
        frame_buffer.append(frame)
        if len(frame_buffer) > ttl + 1:
            frame_buffer.pop(0)

        # apply low threshold to detections
        dets = [det for det in detections_frame if det['score'] >= sigma_l]

        track_ids, det_ids = associate(tracks_active, dets, sigma_iou)
        updated_tracks = []
        for track_id, det_id in zip(track_ids, det_ids):
            tracks_active[track_id]['bboxes'].append(dets[det_id]['bbox'])
            tracks_active[track_id]['max_score'] = max(
                tracks_active[track_id]['max_score'], dets[det_id]['score'])
            tracks_active[track_id]['classes'].append(dets[det_id]['class'])
            tracks_active[track_id]['det_counter'] += 1

            if tracks_active[track_id]['ttl'] != ttl:
                # reset visual tracker if active
                tracks_active[track_id]['ttl'] = ttl
                tracks_active[track_id]['visual_tracker'] = None

            updated_tracks.append(tracks_active[track_id])

        tracks_not_updated = [
            tracks_active[idx] for idx in set(range(len(
                tracks_active))).difference(set(track_ids))
        ]

        for track in tracks_not_updated:
            if track['ttl'] > 0:
                if track['ttl'] == ttl:
                    # init visual tracker
                    track['visual_tracker'] = VisTracker(
                        tracker_type, track['bboxes'][-1], frame_buffer[-2],
                        keep_upper_height_ratio)
                # viou forward update
                ok, bbox = track['visual_tracker'].update(frame)

                if not ok:
                    # visual update failed, track can still be extended
                    tracks_extendable.append(track)
                    continue

                track['ttl'] -= 1
                track['bboxes'].append(bbox)
                updated_tracks.append(track)
            else:
                tracks_extendable.append(track)

        # update the list of extendable tracks. tracks that are too old are moved to the finished_tracks. this should
        # not be necessary but may improve the performance for large numbers of tracks (eg. for mot19)
        tracks_extendable_updated = []
        for track in tracks_extendable:
            if track['start_frame'] + len(
                    track['bboxes']) + ttl - track['ttl'] >= frame_num:
                tracks_extendable_updated.append(track)
            elif track['max_score'] >= sigma_h and track[
                    'det_counter'] >= t_min:
                tracks_finished.append(track)
        tracks_extendable = tracks_extendable_updated

        new_dets = [
            dets[idx] for idx in set(range(len(dets))).difference(set(det_ids))
        ]
        dets_for_new = []

        for det in new_dets:
            finished = False
            # go backwards and track visually
            boxes = []
            vis_tracker = VisTracker(tracker_type, det['bbox'], frame,
                                     keep_upper_height_ratio)

            for f in reversed(frame_buffer[:-1]):
                ok, bbox = vis_tracker.update(f)
                if not ok:
                    # can not go further back as the visual tracker failed
                    break
                boxes.append(bbox)

                # sorting is not really necessary but helps to avoid different behaviour for different orderings
                # preferring longer tracks for extension seems intuitive, LAP solving might be better
                for track in sorted(tracks_extendable,
                                    key=lambda x: len(x['bboxes']),
                                    reverse=True):

                    offset = track['start_frame'] + len(
                        track['bboxes']) + len(boxes) - frame_num
                    # association not optimal (LAP solving might be better)
                    # association is performed at the same frame, not adjacent ones
                    if 1 <= offset <= ttl - track['ttl'] and iou(
                            track['bboxes'][-offset], bbox) >= sigma_iou:
                        if offset > 1:
                            # remove existing visually tracked boxes behind the matching frame
                            track['bboxes'] = track['bboxes'][:-offset + 1]
                        track['bboxes'] += list(reversed(boxes))[1:]
                        track['bboxes'].append(det['bbox'])
                        track['max_score'] = max(track['max_score'],
                                                 det['score'])
                        track['classes'].append(det['class'])
                        track['ttl'] = ttl
                        track['visual_tracker'] = None

                        tracks_extendable.remove(track)
                        if track in tracks_finished:
                            del tracks_finished[tracks_finished.index(track)]
                        updated_tracks.append(track)

                        finished = True
                        break
                if finished:
                    break
            if not finished:
                dets_for_new.append(det)

        # create new tracks
        new_tracks = [{
            'bboxes': [det['bbox']],
            'max_score': det['score'],
            'start_frame': frame_num,
            'ttl': ttl,
            'classes': [det['class']],
            'det_counter': 1,
            'visual_tracker': None
        } for det in dets_for_new]
        tracks_active = []
        for track in updated_tracks + new_tracks:
            if track['ttl'] == 0:
                tracks_extendable.append(track)
            else:
                tracks_active.append(track)

    # finish all remaining active and extendable tracks
    tracks_finished = tracks_finished + \
                      [track for track in tracks_active + tracks_extendable
                       if track['max_score'] >= sigma_h and track['det_counter'] >= t_min]

    # remove last visually tracked frames and compute the track classes
    for track in tracks_finished:
        if ttl != track['ttl']:
            track['bboxes'] = track['bboxes'][:-(ttl - track['ttl'])]
        track['class'] = max(set(track['classes']), key=track['classes'].count)

        del track['visual_tracker']

    return tracks_finished
Пример #15
0
def main():
    args = tracker_args()
    frame_count = 0
    vid_loc = './data/video.mov'
    print('Track-before-Detect with Neural Networks')
    print('[1] Creating Frame Data')
    input_data(vid_loc)
    #create bootstrap detection
    print('[2] Creating Bootstrap Detection')
    detections = detection('./data/img_frames/1' + '.png', FAR, 0)
    #enter loop where tracker is fed detections, detector fed tracks, and threshold evaluated as this changes
    print('[3] Run T-b-D over frames')
    while (frame_count < 100):
        #grab a set amt. of frames from ./data/frames and move it to args.frames_path
        #delete args.frame_path first
        print('[*] Stage Frame Cluster')
        for root, dirs, files in os.walk(args.frames_path):
            for file in files:
                os.remove(os.path.join(root, file))
    #grab next 100 frames from ./data/img_frames offset by frame_count
        for i in range(100):
            src = './data/img_frames/' + str(i + frame_count) + '.png'
            copyfile(src,
                     args.frames_path + '/' + str(i + frame_count) + '.png')
        frame_count += 100
        #call tracks now
        print('[*] Track Frame Cluster')
        #testing here
        print(detections)
        tracks = tracker(args, detections)
        #call detection() on last image in args.frames_path
        print('[*] Detect Frame Cluster')
        detections = detection(
            args.frames_path + '/' + str(frame_count - 1) + '.png', FAR,
            frame_count - 1)
        #call detect and then mesh detect coord. over track coord. and compare, avoid detect() interepret time.
        #array of detection bounding boxes
        print('[*] Evaluate T-b-D Performance')
        dt_boxes = []
        for dt in range(len(detections)):
            temp = []
            temp.append(detections[dt][2])
            temp.append(detections[dt][3])
            temp.append(detections[dt][4])
            temp.append(detections[dt][5])
            dt_boxes.append(temp)
        ious = []
        if (len(tracks) == 0):
            print('There are no tracks')
        else:
            for track in range(len(tracks)):
                track_cmp = []
                #track_cmp.append(tracks[track][2])
                print tracks[track]
                track_cmp.append(tracks[track]['x'])
                track_cmp.append(tracks[track]['y'])
                track_cmp.append(tracks[track]['w'])
                track_cmp.append(tracks[track]['h'])
            for dt in range(len(dt_boxes)):
                #compare current track against all boxes
                tx1, ty1, tx2, ty2 = track_cmp[0], track_cmp[1], track_cmp[
                    0] + track_cmp[2], track_cmp[1] + track_cmp[3]
                bbox1 = [tx1, ty1, tx2, ty2]
                bbox1 = np.asarray(bbox1)
                dx1, dy1, dx2, dy2 = float(dt_boxes[dt][0]), float(
                    dt_boxes[dt][1]), float(dt_boxes[dt][0]) + float(
                        dt_boxes[dt][2]), float(dt_boxes[dt][1]) + float(
                            dt_boxes[dt][3])
                bbox2 = [dx1, dy1, dx2, dy2]
                bbox2 = np.asarray(bbox2)
                intovunion = iou(bbox1, bbox2)
                if (intovunion > args.sigma_l):
                    ious.append(intovunion)
                else:
                    pass
    #call ev_thresh() to see if FAR shoud be updated
        print('[*] Update False Alarm Rate Threshold')
        ev_thresh(len(ious), len(tracks))
    print('Track-before-Detect Complete!')