def week2_adaptive_hsv(video: Video, debug=False) -> Iterator[Frame]:
    model_mean, model_std = get_background_model(video,
                                                 int(2141 * 0.25),
                                                 total_frames=int(2141 * 0.25),
                                                 pixel_value=PixelValue.HSV)

    ground_truth = read_detections(
        '../datasets/AICity_data/train/S03/c010/gt/gt.txt')

    frame_id = int(2141 * 0.25)
    roi = cv2.cvtColor(
        cv2.imread('../datasets/AICity_data/train/S03/c010/roi.jpg'),
        cv2.COLOR_BGR2GRAY)
    for im, mask in gaussian_model_adaptive(video,
                                            int(2141 * 0.25),
                                            model_mean,
                                            model_std,
                                            total_frames=int(2141 * 0.10),
                                            pixel_value=PixelValue.HSV,
                                            alpha=1.75,
                                            rho=0.01):
        mask = mask & roi
        if debug:
            cv2.imshow('f', mask)
            cv2.waitKey()
        mask = opening(mask, 7)
        if debug:
            cv2.imshow('f', mask)
            cv2.waitKey()
        mask = closing(mask, 35)
        if debug:
            cv2.imshow('f', mask)
            cv2.waitKey()
        mask, detections = find_boxes(mask)

        frame = Frame(frame_id)
        frame.detections = detections
        frame.ground_truth = ground_truth[frame_id]

        if debug:
            mask2 = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)
            for detection in detections:
                cv2.rectangle(
                    mask2,
                    (int(detection.top_left[1]), int(detection.top_left[0])),
                    (int(detection.get_bottom_right()[1]),
                     int(detection.get_bottom_right()[0])), (0, 255, 0), 5)
            for gt in ground_truth[frame_id]:
                cv2.rectangle(mask2,
                              (int(gt.top_left[1]), int(gt.top_left[0])),
                              (int(gt.get_bottom_right()[1]),
                               int(gt.get_bottom_right()[0])), (255, 0, 0), 5)
            cv2.imshow('f', mask2)
            cv2.waitKey()

        yield im, mask, frame

        frame_id += 1
Exemple #2
0
def compute_map_gt_det(det_file, gt_file):
    det_list = read_detections(det_file)
    gt_list = read_detections(gt_file)

    frames = []

    for i in range(0, len(det_list)):
        frame = Frame(i)
        frame.detections = det_list[i]
        frame.ground_truth = gt_list[i]
        frames.append(frame)

    mAP = mean_average_precision(frames, ignore_classes=True)

    return mAP
def main():
    start_frame = 1440
    end_frame = 1789

    gt = read_annotations('../annotations', start_frame, end_frame)

    alg = 'mask_rcnn'

    detections = read_detections('../datasets/AICity_data/train/S03/c010/det/det_{0}.txt'.format(alg))

    kalman = KalmanTracking()
    for i in range(start_frame, end_frame):
        f = Frame(i)
        f.detections = detections[i]
        f.ground_truth = gt[i - start_frame]
        kalman(f)
        print(seq(f.detections).map(lambda d: d.id).to_list())
Exemple #4
0
def main():
    im_1440 = cv2.imread(
        "../datasets/AICity_data_S03_c010_1440/frame_1440.jpg")
    top_left = [995, 410]
    width = 1241 - 995
    height = 605 - 410

    ground_truth = [Detection('', 'car', top_left, width, height)]
    """
        DETECTIONS FROM ALTERED GROUND TRUTH 
    """
    frame = Frame(0, ground_truth)
    frame.detections = alter_detections(ground_truth)

    plot_frame(im_1440, frame)
    iou = frame.get_detection_iou()
    iou_mean = frame.get_detection_iou_mean()
    print("IOU: ", iou, "IOU mean", iou_mean)
Exemple #5
0
def week2_soa(video: Video, debug=False) -> Iterator[Frame]:
    th = 150
    frame_id = 0
    fgbg = cv.createBackgroundSubtractorMOG2()

    ground_truth = read_detections(
        '../datasets/AICity_data/train/S03/c010/gt/gt.txt')
    roi = cv.cvtColor(
        cv.imread('../datasets/AICity_data/train/S03/c010/roi.jpg'),
        cv.COLOR_BGR2GRAY)

    for im in tqdm(video.get_frames(),
                   total=2141,
                   file=sys.stdout,
                   desc='Training model...'):
        mask = fgbg.apply(im)
        mask[mask < th] = 0

        mask.astype(np.uint8) * 255

        mask = mask & roi

        mask = opening(mask, 5)
        # cv.imshow('f', mask)
        # cv.waitKey()

        mask = closing(mask, 25)
        # cv.imshow('f', mask)
        # cv.waitKey()

        mask, detections = find_boxes(mask)

        frame = Frame(frame_id)
        frame.detections = detections
        frame.ground_truth = ground_truth[frame_id]

        frame_id += 1

        yield im, mask, frame