Example #1
0
def task1_1(paths, show, noise_params):

    gt = read_annotations(paths['gt'], grouped=False, use_parked=True)
    det = read_detections(paths['det'], grouped=True)

    grouped_gt = group_by_frame(gt)

    # if we want to replicate results
    # np.random.seed(10)

    cap = cv2.VideoCapture(paths['video'])
    # cap.set(cv2.CAP_PROP_POS_FRAMES, frame_id)  # to start from frame #frame_id
    num_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))

    if noise_params['add']:
        noisy_gt = add_noise(gt, noise_params, num_frames)
        grouped_noisy_gt = group_by_frame(noisy_gt)

    for frame_id in range(num_frames):
        _, frame = cap.read()

        if show['gt']:
            frame = draw_boxes(frame, grouped_gt[frame_id], color='g')

        if show['det']:
            frame = draw_boxes(frame, det[frame_id], color='b', det=True)

        if show['noisy']:
            frame = draw_boxes(frame, grouped_noisy_gt[frame_id], color='r')

        cv2.imshow('frame', frame)
        if cv2.waitKey() == 113:  # press q to quit
            break

        frame_id += 1

    cv2.destroyAllWindows()

    return
Example #2
0
def eval_sota(vidcap,
              test_len,
              backSub,
              params,
              init_frame=535,
              return_detections=False):
    print("Evaluating SOTA")
    # gt = read_annotations(params["gt_path"], grouped=True, use_parked=False)
    gt = read_detections(params['gt_path'], grouped=True)
    frame_id = int(vidcap.get(cv2.CAP_PROP_POS_FRAMES))

    detections = []
    annotations = {}

    for t in tqdm(range(test_len)):

        _, frame = vidcap.read()

        segmentation = backSub.apply(frame)

        roi = cv2.imread(params['roi_path'], cv2.IMREAD_GRAYSCALE) / 255
        segmentation = segmentation * roi
        segmentation = postprocess_fg(segmentation)
        det_bboxes = fg_bboxes(segmentation, frame_id, params)
        detections += det_bboxes

        segmentation = cv2.cvtColor(segmentation.astype(np.uint8),
                                    cv2.COLOR_GRAY2RGB)

        gt_bboxes = []
        if frame_id in gt:
            gt_bboxes = gt[frame_id]
        annotations[frame_id] = gt_bboxes

        text_bboxes = "nbb"  #no bouning boxes
        if params['show_boxes']:
            segmentation = draw_boxes(image=segmentation,
                                      boxes=gt_bboxes,
                                      color='g',
                                      linewidth=3)
            cv2.rectangle(frame, (10, 2), (120, 20), (255, 255, 255), -1)
            cv2.putText(
                frame, params["sota_method"] + " - " +
                str(vidcap.get(cv2.CAP_PROP_POS_FRAMES)), (15, 15),
                cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0))
            segmentation = draw_boxes(image=segmentation,
                                      boxes=det_bboxes,
                                      color='r',
                                      linewidth=3)
            cv2.imshow("Segmentation mask with detected boxes and gt",
                       segmentation)
            cv2.imshow('Frame', frame)
            text_bboxes = ""

        if params[
                'save_results'] and frame_id >= 1169 and frame_id < 1229:  # if frame_id >= 535 and frame_id < 550
            cv2.imwrite(
                params['results_path'] +
                f"seg_{str(frame_id)}_pp_{params['sota_method']}_{text_bboxes}.bmp",
                segmentation.astype(int))

            if cv2.waitKey() == 113:  # press q to quit
                break

        if params['save_results'] and frame_id >= 1169 and frame_id < 1229:

            segmentation = draw_boxes(image=segmentation,
                                      boxes=gt_bboxes,
                                      color='g',
                                      linewidth=3)
            segmentation = draw_boxes(image=segmentation,
                                      boxes=det_bboxes,
                                      color='r',
                                      linewidth=3)

            cv2.imwrite(f"seg/{str(frame_id)}.png", segmentation.astype(int))

        frame_id += 1

    detections = temporal_filter(group_by_frame(detections),
                                 init=init_frame,
                                 end=frame_id)
    rec, prec, ap = voc_evaluation.voc_eval(detections,
                                            annotations,
                                            ovthresh=0.5,
                                            use_confidence=False)

    if return_detections:
        return ap, group_by_frame(detections)
    else:
        return ap
Example #3
0
def eval(vidcap, frame_size, mean, std, params):
    gt = read_annotations(params['gt_path'], grouped=True, use_parked=False)
    init_frame = int(vidcap.get(cv2.CAP_PROP_POS_FRAMES))
    frame_id = init_frame
    detections = []
    annotations = {}
    for t in tqdm(range(params['num_frames_eval'])):
        _, frame = vidcap.read()
        frame = cv2.cvtColor(frame, color_space[params['color_space']][0])
        if params['color_space'] == 'H':
            H, S, V = np.split(frame, 3, axis=2)
            frame = np.squeeze(H)
        if params['color_space'] == 'L':
            L, A, B = np.split(frame, 3, axis=2)
            frame = np.squeeze(L)
        if params['color_space'] == 'CbCr':
            Y, Cb, Cr = np.split(frame, 3, axis=2)
            frame = np.dstack((Cb, Cr))

        segmentation, mean, std = bg_est_method[params['bg_est']](frame,
                                                                  frame_size,
                                                                  mean, std,
                                                                  params)
        roi = cv2.imread(params['roi_path'], cv2.IMREAD_GRAYSCALE) / 255
        segmentation = segmentation * roi
        segmentation = postprocess_fg(segmentation)

        if params[
                'save_results'] and frame_id >= 1169 and frame_id < 1229:  # if frame_id >= 535 and frame_id < 550
            cv2.imwrite(
                params['results_path'] +
                f"seg_{str(frame_id)}_pp_{str(params['alpha'])}.bmp",
                segmentation.astype(int))

        det_bboxes = fg_bboxes(segmentation, frame_id, params)
        detections += det_bboxes

        gt_bboxes = []
        if frame_id in gt:
            gt_bboxes = gt[frame_id]
        annotations[frame_id] = gt_bboxes

        if params['show_boxes']:
            seg = cv2.cvtColor(segmentation.astype(np.uint8),
                               cv2.COLOR_GRAY2RGB)
            seg_boxes = draw_boxes(image=seg,
                                   boxes=det_bboxes,
                                   color='r',
                                   linewidth=3)
            seg_boxes = draw_boxes(image=seg_boxes,
                                   boxes=gt_bboxes,
                                   color='g',
                                   linewidth=3)

            cv2.imshow("Segmentation mask with detected boxes and gt",
                       seg_boxes)
            if cv2.waitKey() == 113:  # press q to quit
                break

        frame_id += 1

    detections = temporal_filter(group_by_frame(detections),
                                 init=init_frame,
                                 end=frame_id)
    rec, prec, ap = voc_evaluation.voc_eval(detections,
                                            annotations,
                                            ovthresh=0.5,
                                            use_confidence=False)

    return ap
Example #4
0
def task2(gt_path, det_path, video_path, results_path):
    plot_frames_path = os.path.join(results_path, 'plot_frames/')
    video_frames_path = os.path.join(results_path, 'video_frames/')

    print(plot_frames_path)

    # If folder doesn't exist -> create it
    os.makedirs(plot_frames_path, exist_ok=True)
    os.makedirs(video_frames_path, exist_ok=True)

    show_det = True
    show_noisy = False

    gt = read_annotations(gt_path, grouped=False, use_parked=True)
    det = read_detections(det_path, grouped=True)

    grouped_gt = group_by_frame(gt)

    noise_params = {
        'add': False,
        'drop': 0.0,
        'generate_close': 0.0,
        'generate_random': 0.0,
        'type': 'specific',  # options: 'specific', 'gaussian', None
        'std': 40,  # pixels
        'position': False,
        'size': True,
        'keep_ratio': True
    }

    if noise_params['add']:
        noisy_gt = add_noise(gt, noise_params)
        grouped_noisy_gt = group_by_frame(noisy_gt)

    cap = cv2.VideoCapture(video_path)
    # cap.set(cv2.CAP_PROP_POS_FRAMES, frame_id)  # to start from frame #frame_id
    num_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))

    iou_list = {}

    for frame_id in range(20):
        _, frame = cap.read()

        frame = draw_boxes(frame, grouped_gt[frame_id], color='g')

        if show_det:
            frame = draw_boxes(frame, det[frame_id], color='b', det=True)
            frame_iou = mean_iou(det[frame_id], grouped_gt[frame_id], sort=True)

        if show_noisy:
            frame = draw_boxes(frame, grouped_noisy_gt[frame_id], color='r')
            frame_iou = mean_iou(grouped_noisy_gt[frame_id], grouped_gt[frame_id])

        iou_list[frame_id] = frame_iou

        plot = plot_iou(iou_list, num_frames)

        '''
        if show:
            fig.show()
            cv2.imshow('frame', frame)
            if cv2.waitKey() == 113:  # press q to quit
                break
        '''
        imageio.imwrite(video_frames_path + '{}.png'.format(frame_id), frame)
        plot.savefig(plot_frames_path + 'iou_{}.png'.format(frame_id))
        plt.close(plot)

        frame_id += 1

    save_gif(plot_frames_path, results_path + 'iou.gif')
    save_gif(video_frames_path, results_path + 'bbox.gif')
    # cv2.destroyAllWindows()

    return
Example #5
0


if __name__ == '__main__':
    args = parse_args()

    det_path = 'results/task1_1/' + args.model + '/detections.txt'

    if args.detect:
        det_path = model_detect[args.model](args.video_path)

    if args.visualize:
        gt = read_annotations(args.gt_path, grouped=True, use_parked=True)
        det = read_detections(det_path, grouped=False)

        det = group_by_frame(filter_by_conf(det, conf_thr=args.min_conf))

        vidcap = cv2.VideoCapture(args.video_path)
        # vidcap.set(cv2.CAP_PROP_POS_FRAMES, initial_frame)  # to start from frame #frame_id
        num_frames = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))

        for frame_id in range(num_frames):
            _, frame = vidcap.read()

            if frame_id >= 1755 and frame_id <= 1835:
                frame = draw_boxes(frame, gt[frame_id], color='g')
                frame = draw_boxes(frame, det[frame_id], color='b', det=True)

                cv2.imshow('frame', frame)
                if cv2.waitKey() == 113:  # press q to quit
                    break