Beispiel #1
0
def task1_2(paths, ap=0.5):
    gt = read_annotations(paths['gt'], grouped=True, use_parked=True)
    det = read_detections(paths['det'])

    rec, prec, ap = voc_eval(det, gt, ap, use_confidence=True)
    print(ap)

    return
Beispiel #2
0
def evaluate_mtmc(reid_path, data_path):
    accumulator = mm.MOTAccumulator(auto_id=True)
    for cam in sorted(os.listdir(data_path)):

        vidcap = cv2.VideoCapture(os.path.join(data_path, cam, 'vdo.avi'))
        num_frames = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))

        gt = read_detections(os.path.join(data_path, cam, 'gt/gt.txt'),
                             grouped=True)
        det_reid = read_detections(os.path.join(reid_path, cam,
                                                'overlap_reid_detections.txt'),
                                   grouped=True)

        accumulator = eval_tracking(num_frames, gt, det_reid, accumulator, cam)

    mh = mm.metrics.create()
    return mh.compute(accumulator,
                      metrics=['precision', 'recall', 'idp', 'idr', 'idf1'],
                      name='acc')
Beispiel #3
0
def task1_1(paths, show, noise_params):

    gt = read_annotations(paths['gt'], grouped=False, use_parked=True)
    det = read_detections(paths['det'], grouped=True)

    grouped_gt = group_by_frame(gt)

    # if we want to replicate results
    # np.random.seed(10)

    cap = cv2.VideoCapture(paths['video'])
    # cap.set(cv2.CAP_PROP_POS_FRAMES, frame_id)  # to start from frame #frame_id
    num_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))

    if noise_params['add']:
        noisy_gt = add_noise(gt, noise_params, num_frames)
        grouped_noisy_gt = group_by_frame(noisy_gt)

    for frame_id in range(num_frames):
        _, frame = cap.read()

        if show['gt']:
            frame = draw_boxes(frame, grouped_gt[frame_id], color='g')

        if show['det']:
            frame = draw_boxes(frame, det[frame_id], color='b', det=True)

        if show['noisy']:
            frame = draw_boxes(frame, grouped_noisy_gt[frame_id], color='r')

        cv2.imshow('frame', frame)
        if cv2.waitKey() == 113:  # press q to quit
            break

        frame_id += 1

    cv2.destroyAllWindows()

    return
Beispiel #4
0
def create_csv_patches(video_path, det_path, patches_path, sequence, camera, writer):
    vidcap = cv2.VideoCapture(video_path)
    num_frames = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))
    print("Total frames: ", num_frames)

    det = read_detections(det_path, grouped=True)

    for frame_id in tqdm(range(num_frames), desc='Creating patches of seq ' + sequence + '/' + camera):
        _, frame = vidcap.read()
        if frame_id in det:
            det_bboxes = det[frame_id]

            for box in det_bboxes:
                # crop_img = frame[int(box.ytl):int(box.ybr), int(box.xtl):int(box.xbr)]
                # cv2.imwrite(patches_path + f"/{str(box.id)}_{sequence}_{camera}_{str(frame_id)}.jpg",
                #             crop_img.astype(int))

                filename = str(box.id) + '_' + sequence + '_' + camera + '_' + str(frame_id) + '.jpg'
                writer.writerow([filename, str(box.id), sequence, camera, str(frame_id), str(box.xtl), str(box.ytl),
                                 str(box.xbr), str(box.ybr), str(box.center[0]), str(box.center[1])])

        frame_id += 1

    return
Beispiel #5
0
        print('Train frames: ', train_len)

        backSub = cv2.bgsegm.createBackgroundSubtractorMOG(history=100, nmixtures=2, backgroundRatio=0.7)
        backSub = train_sota(vidcap, train_len, backSub)

        vidcap = cv2.VideoCapture(params['video_path'])
        test_len = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))
        print('Test frames: ', test_len)

        ap, detections = eval_sota(vidcap, test_len, backSub, params, init_frame=0, return_detections=True)
        print('AP: ', ap)

        save_detections(detections, det_path, frame_count)

        # gt = read_annotations(params['gt_path'], grouped=True, use_parked=True)
        gt = read_detections(os.path.join(args.data_path, args.seq, 'gt/gt.txt'), grouped=True)
        gt = fill_gt(gt, frame_count)
        det = read_detections(det_path, grouped=False)

        rec, prec, ap = voc_eval(det, gt, 0.5, use_confidence=True)
        print('AP from loaded detections: ', ap)

    else:
        model = 'COCO-Detection/faster_rcnn_R_50_FPN_3x.yaml'
        print('[INFO] Using model: ', model)

        weights_path = './W3/results/task1_2_all/faster_rcnn/lr_0_001_iter_5000_batch_512'

        cfg = get_cfg()
        cfg.merge_from_file(model_zoo.get_config_file(model))
        cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5  # set threshold for this model
Beispiel #6
0
def eval_sota(vidcap,
              test_len,
              backSub,
              params,
              init_frame=535,
              return_detections=False):
    print("Evaluating SOTA")
    # gt = read_annotations(params["gt_path"], grouped=True, use_parked=False)
    gt = read_detections(params['gt_path'], grouped=True)
    frame_id = int(vidcap.get(cv2.CAP_PROP_POS_FRAMES))

    detections = []
    annotations = {}

    for t in tqdm(range(test_len)):

        _, frame = vidcap.read()

        segmentation = backSub.apply(frame)

        roi = cv2.imread(params['roi_path'], cv2.IMREAD_GRAYSCALE) / 255
        segmentation = segmentation * roi
        segmentation = postprocess_fg(segmentation)
        det_bboxes = fg_bboxes(segmentation, frame_id, params)
        detections += det_bboxes

        segmentation = cv2.cvtColor(segmentation.astype(np.uint8),
                                    cv2.COLOR_GRAY2RGB)

        gt_bboxes = []
        if frame_id in gt:
            gt_bboxes = gt[frame_id]
        annotations[frame_id] = gt_bboxes

        text_bboxes = "nbb"  #no bouning boxes
        if params['show_boxes']:
            segmentation = draw_boxes(image=segmentation,
                                      boxes=gt_bboxes,
                                      color='g',
                                      linewidth=3)
            cv2.rectangle(frame, (10, 2), (120, 20), (255, 255, 255), -1)
            cv2.putText(
                frame, params["sota_method"] + " - " +
                str(vidcap.get(cv2.CAP_PROP_POS_FRAMES)), (15, 15),
                cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0))
            segmentation = draw_boxes(image=segmentation,
                                      boxes=det_bboxes,
                                      color='r',
                                      linewidth=3)
            cv2.imshow("Segmentation mask with detected boxes and gt",
                       segmentation)
            cv2.imshow('Frame', frame)
            text_bboxes = ""

        if params[
                'save_results'] and frame_id >= 1169 and frame_id < 1229:  # if frame_id >= 535 and frame_id < 550
            cv2.imwrite(
                params['results_path'] +
                f"seg_{str(frame_id)}_pp_{params['sota_method']}_{text_bboxes}.bmp",
                segmentation.astype(int))

            if cv2.waitKey() == 113:  # press q to quit
                break

        if params['save_results'] and frame_id >= 1169 and frame_id < 1229:

            segmentation = draw_boxes(image=segmentation,
                                      boxes=gt_bboxes,
                                      color='g',
                                      linewidth=3)
            segmentation = draw_boxes(image=segmentation,
                                      boxes=det_bboxes,
                                      color='r',
                                      linewidth=3)

            cv2.imwrite(f"seg/{str(frame_id)}.png", segmentation.astype(int))

        frame_id += 1

    detections = temporal_filter(group_by_frame(detections),
                                 init=init_frame,
                                 end=frame_id)
    rec, prec, ap = voc_evaluation.voc_eval(detections,
                                            annotations,
                                            ovthresh=0.5,
                                            use_confidence=False)

    if return_detections:
        return ap, group_by_frame(detections)
    else:
        return ap
Beispiel #7
0
def eval_tracking_MaximumOverlap(vidcap, test_len, params, opticalFlow=None):

    print("Evaluating Tracking")
    gt = read_annotations(params["gt_path"], grouped=True, use_parked=True)
    det = read_detections(params["det_path"], grouped=True, confidenceThr=0.4)
    frame_id = int(vidcap.get(cv2.CAP_PROP_POS_FRAMES))
    first_frame_id = frame_id
    print(frame_id)

    detections = []
    annotations = {}
    list_positions = {}

    center_seen_last5frames = {}
    id_seen_last5frames = {}

    tracking = Tracking()
    det_bboxes_old = -1

    old_frame = None

    # Create an accumulator that will be updated during each frame
    accumulator = mm.MOTAccumulator(auto_id=True)

    for t in tqdm(range((train_len + test_len) - first_frame_id)):

        _, frame = vidcap.read()
        # cv2.imshow('Frame', frame)
        # keyboard = cv2.waitKey(30)

        flow_aux = np.zeros(shape=(frame.shape[0], frame.shape[1], 2))

        if params['use_optical_flow'] and old_frame is not None:
            for d in det_bboxes_old:
                # print(d)
                flow = None
                # print("Computing optical flow")
                flow = computeOpticalFlow(old_frame,
                                          frame,
                                          d,
                                          option=params['optical_flow_option'])
                d.flow = [flow[0], -flow[1]]

                flow_aux[int(d.ytl):int(d.ybr),
                         int(d.xtl):int(d.xbr), :] = flow

            plot_flow(
                old_frame[:, :, [2, 1, 0]],
                flow_aux,
                step=16,
                fname=
                '/home/oscar/workspace/master/modules/m6/project/mcv-m6-2021-team4/W4/OF_BB/'
                + f"tracking_{str(frame_id)}_IoU.png")

        det_bboxes = det[frame_id]
        det_bboxes = tracking.set_frame_ids(det_bboxes, det_bboxes_old)
        detections += det_bboxes

        id_seen = []
        gt_bboxes = []
        if frame_id in gt:
            gt_bboxes = gt[frame_id]
        annotations[frame_id] = gt_bboxes

        objs = [bbox.center for bbox in gt_bboxes]
        hyps = [bbox.center for bbox in det_bboxes]

        for object_bb in det_bboxes:
            if object_bb.id in list(list_positions.keys()):
                if t < 5:
                    id_seen_last5frames[object_bb.id] = object_bb.id
                    center_seen_last5frames[object_bb.id] = object_bb.center
                list_positions[object_bb.id].append(
                    [int(x) for x in object_bb.center])
            else:
                if (t < 5):
                    id_seen_last5frames[object_bb.id] = object_bb.id
                    center_seen_last5frames[object_bb.id] = object_bb.center

                id_seen.append(object_bb)
                list_positions[object_bb.id] = [[
                    int(x) for x in object_bb.center
                ]]

        # To detect pared cars
        for bbox in id_seen:
            for idx in list(id_seen_last5frames.keys()):
                if idx != bbox.id:
                    center = [center_seen_last5frames[idx]]
                    mse = (np.square(
                        np.subtract(np.array(center),
                                    np.array([int(x)
                                              for x in bbox.center])))).mean()
                    if mse < 300:
                        setattr(bbox, 'id', idx)

        accumulator.update(
            [bbox.id
             for bbox in gt_bboxes],  # Ground truth objects in this frame
            [bbox.id
             for bbox in det_bboxes],  # Detector hypotheses in this frame
            mm.distances.norm2squared_matrix(
                objs, hyps
            )  # Distances from object 1 to hypotheses 1, 2, 3 and Distances from object 2 to hypotheses 1, 2, 3
        )

        if params['show_boxes']:
            drawed_frame_aux = draw_boxes(image=frame,
                                          boxes=det_bboxes,
                                          color='r',
                                          linewidth=3,
                                          det=False,
                                          boxIds=True,
                                          tracker=list_positions)
            drawed_frame = deepcopy(drawed_frame_aux)
            if not det_bboxes_old == -1:
                drawed_frame = draw_boxes_old(image=drawed_frame,
                                              boxes=det_bboxes_old,
                                              color='r',
                                              linewidth=3,
                                              det=False,
                                              boxIds=True,
                                              tracker=list_positions)
            cv2.rectangle(drawed_frame, (10, 2), (120, 20), (255, 255, 255),
                          -1)
            cv2.putText(drawed_frame, str(vidcap.get(cv2.CAP_PROP_POS_FRAMES)),
                        (15, 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0))
            cv2.imshow('Frame', drawed_frame)
            cv2.waitKey(30)
            cv2.imwrite(
                params['results_path'] + f"tracking_{str(frame_id)}_IoU.jpg",
                drawed_frame.astype(int))

            drawed_frame2 = deepcopy(drawed_frame_aux)
            if not det_bboxes_old == -1:
                drawed_frame2 = draw_boxes_old(image=drawed_frame2,
                                               boxes=det_bboxes_old,
                                               color='r',
                                               linewidth=3,
                                               det=False,
                                               boxIds=True,
                                               tracker=list_positions,
                                               shifted=True)
            cv2.rectangle(drawed_frame2, (10, 2), (120, 20), (255, 255, 255),
                          -1)
            cv2.putText(drawed_frame2,
                        str(vidcap.get(cv2.CAP_PROP_POS_FRAMES)), (15, 15),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0))
            cv2.imshow('Frame', drawed_frame2)
            cv2.waitKey(30)
            cv2.imwrite(
                './W4/OF_shifted_BB/' + f"tracking_{str(frame_id)}_IoU.jpg",
                drawed_frame2.astype(int))

        if params['save_results'] and frame_id >= (355 + 535) and frame_id < (
                410 + 535):  # if frame_id >= 535 and frame_id < 550
            drawed_frame = frame
            drawed_frame = draw_boxes(image=drawed_frame,
                                      boxes=det_bboxes,
                                      color='r',
                                      linewidth=3,
                                      det=False,
                                      boxIds=True,
                                      tracker=list_positions)
            if not det_bboxes_old == -1:
                drawed_frame = draw_boxes_old(image=drawed_frame,
                                              boxes=det_bboxes_old,
                                              color='r',
                                              linewidth=3,
                                              det=False,
                                              boxIds=True,
                                              tracker=list_positions)
            cv2.rectangle(drawed_frame, (10, 2), (120, 20), (255, 255, 255),
                          -1)
            cv2.putText(drawed_frame, str(vidcap.get(cv2.CAP_PROP_POS_FRAMES)),
                        (15, 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0))

            cv2.imwrite(
                params['results_path'] + f"tracking_{str(frame_id)}_IoU.jpg",
                drawed_frame.astype(int))

        frame_id += 1
        old_frame = frame
        det_bboxes_old = det_bboxes

    mh = mm.metrics.create()
    summary = mh.compute(accumulator,
                         metrics=['precision', 'recall', 'idp', 'idr', 'idf1'],
                         name='acc')
    print(summary)
Beispiel #8
0
def task2(gt_path, det_path, video_path, results_path):
    plot_frames_path = os.path.join(results_path, 'plot_frames/')
    video_frames_path = os.path.join(results_path, 'video_frames/')

    print(plot_frames_path)

    # If folder doesn't exist -> create it
    os.makedirs(plot_frames_path, exist_ok=True)
    os.makedirs(video_frames_path, exist_ok=True)

    show_det = True
    show_noisy = False

    gt = read_annotations(gt_path, grouped=False, use_parked=True)
    det = read_detections(det_path, grouped=True)

    grouped_gt = group_by_frame(gt)

    noise_params = {
        'add': False,
        'drop': 0.0,
        'generate_close': 0.0,
        'generate_random': 0.0,
        'type': 'specific',  # options: 'specific', 'gaussian', None
        'std': 40,  # pixels
        'position': False,
        'size': True,
        'keep_ratio': True
    }

    if noise_params['add']:
        noisy_gt = add_noise(gt, noise_params)
        grouped_noisy_gt = group_by_frame(noisy_gt)

    cap = cv2.VideoCapture(video_path)
    # cap.set(cv2.CAP_PROP_POS_FRAMES, frame_id)  # to start from frame #frame_id
    num_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))

    iou_list = {}

    for frame_id in range(20):
        _, frame = cap.read()

        frame = draw_boxes(frame, grouped_gt[frame_id], color='g')

        if show_det:
            frame = draw_boxes(frame, det[frame_id], color='b', det=True)
            frame_iou = mean_iou(det[frame_id], grouped_gt[frame_id], sort=True)

        if show_noisy:
            frame = draw_boxes(frame, grouped_noisy_gt[frame_id], color='r')
            frame_iou = mean_iou(grouped_noisy_gt[frame_id], grouped_gt[frame_id])

        iou_list[frame_id] = frame_iou

        plot = plot_iou(iou_list, num_frames)

        '''
        if show:
            fig.show()
            cv2.imshow('frame', frame)
            if cv2.waitKey() == 113:  # press q to quit
                break
        '''
        imageio.imwrite(video_frames_path + '{}.png'.format(frame_id), frame)
        plot.savefig(plot_frames_path + 'iou_{}.png'.format(frame_id))
        plt.close(plot)

        frame_id += 1

    save_gif(plot_frames_path, results_path + 'iou.gif')
    save_gif(video_frames_path, results_path + 'bbox.gif')
    # cv2.destroyAllWindows()

    return
Beispiel #9
0
    return mh.compute(accumulator,
                      metrics=['precision', 'recall', 'idp', 'idr', 'idf1'],
                      name='acc')


if __name__ == "__main__":
    args = parse_args()
    cams_path = os.path.join(args.reid_path, args.seq)

    print('[INFO] Evaluating MTMC tracking, sequence ', args.seq)
    accumulator = mm.MOTAccumulator(auto_id=True)
    for cam in sorted(os.listdir(cams_path)):

        vidcap = cv2.VideoCapture(
            os.path.join(args.data_path, args.seq, cam, 'vdo.avi'))
        num_frames = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))

        gt = read_detections(os.path.join(args.data_path, args.seq, cam,
                                          'gt/gt.txt'),
                             grouped=True)
        det_reid = read_detections(os.path.join(cams_path, cam,
                                                'overlap_reid_detections.txt'),
                                   grouped=True)

        accumulator = eval_tracking(num_frames, gt, det_reid, accumulator, cam)

    mh = mm.metrics.create()
    summary = mh.compute(accumulator,
                         metrics=['precision', 'recall', 'idp', 'idr', 'idf1'],
                         name='acc')
    print(summary)
Beispiel #10
0
        'c011': 84,  # 8.457s * 10 fps 
        'c012': 59,  # 5.879s * 10 fps 
        'c013': 0,
        'c014': 50,  # 5.042s * 10 fps 
        'c015': 68  # 8.492s * 8 fps 
    }

    cam_dicts = {}

    for cam in cams:
        vidcap = cv2.VideoCapture(params['data_path'] +
                                  "{}\\vdo.avi".format(cam))
        #vidcap.set(cv2.CAP_PROP_POS_FRAMES, frame_offset[cam])

        det = read_detections(params['data_path'] +
                              "{}\\overlap_reid_detections.txt".format(cam),
                              grouped=True,
                              confidenceThr=0.4)

        cam_dicts[cam] = [
            vidcap, int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT)), det
        ]

        print("Cam: {}, Total frames: {}, fps: {}, frame_offset: {}".format(
            cam, cam_dicts[cam][1], 10 / fps_ratio[cam], frame_offset[cam]))

    frame_id = 0
    frames = {}
    last_frame = np.zeros(shape=(1080, 1920, 3))
    ids = []
    while (True):
        if frame_id < 400:
Beispiel #11
0
    return filtered_detections



if __name__ == '__main__':
    args = parse_args()

    det_path = 'results/task1_1/' + args.model + '/detections.txt'

    if args.detect:
        det_path = model_detect[args.model](args.video_path)

    if args.visualize:
        gt = read_annotations(args.gt_path, grouped=True, use_parked=True)
        det = read_detections(det_path, grouped=False)

        det = group_by_frame(filter_by_conf(det, conf_thr=args.min_conf))

        vidcap = cv2.VideoCapture(args.video_path)
        # vidcap.set(cv2.CAP_PROP_POS_FRAMES, initial_frame)  # to start from frame #frame_id
        num_frames = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))

        for frame_id in range(num_frames):
            _, frame = vidcap.read()

            if frame_id >= 1755 and frame_id <= 1835:
                frame = draw_boxes(frame, gt[frame_id], color='g')
                frame = draw_boxes(frame, det[frame_id], color='b', det=True)

                cv2.imshow('frame', frame)