Exemple #1
0
def export_calib_session(session, args, json_name, bcp=False):
    if bcp:
        vp_json_path = os.path.join(args.bcp_path, 'data', session, '{}.json'.format(json_name))
        calib_json_path = os.path.join(args.bcp_path, 'results', session, 'system_{}_{}c.json'.format(json_name, args.conf))
    else:
        vp_json_path = os.path.join(args.bcs_path, 'dataset', session, '{}.json'.format(json_name))
        calib_json_path = os.path.join(args.bcs_path, 'results', session, 'system_{}_{}c.json'.format(json_name, args.conf))

    if session == 'S09' or session == 'S10' or session == 'S11':
        pp = np.array([640.5, 400.5])
    else:
        pp = np.array([960.5, 540.5])

    print("Starting for session {}".format(session))

    with open(vp_json_path, 'r') as f:
        vp_data = json.load(f)

    if bcp:
        vp_data = filter_vp(vp_data)

    if 'pred_vars' in vp_data[0].keys():
        pred_vars = np.array([item['pred_vars'] for item in vp_data if item['score'] > args.conf])
        pred_vars_vp1 = pred_vars[:, :, 0]
        pred_vars_vp2 = pred_vars[:, :, 1]
        best_scales_vp1 = np.argmin(pred_vars_vp1, axis=-1)
        best_scales_vp2 = np.argmin(pred_vars_vp2, axis=-1)
        _, counts_vp1 = np.unique(best_scales_vp1, return_counts=True)
        _, counts_vp2 = np.unique(best_scales_vp2, return_counts=True)

        # print("VP1 best scale counts: ", counts_vp1)
        # print("VP2 best scale counts: ", counts_vp2)
    else:
        counts_vp1 = np.zeros(4)
        counts_vp2 = np.zeros(4)




    # if 'vp1_var' in vp_data[0].keys():
    #     scores = np.array([item['score'] / (item['vp1_var'] * item['vp2_var']) for item in vp_data])
    #     vp1 = np.array([item['vp1'] for item in vp_data])
    #     vp2 = np.array([item['vp2'] for item in vp_data])
    #     f = np.sqrt(-np.sum((vp1 - pp[np.newaxis, :]) * (vp2 - pp[np.newaxis, :]), axis=1))

    scores = np.array([item['score'] for item in vp_data if item['score'] > args.conf])
    vp1 = np.array([item['vp1'] for item in vp_data if item['score'] > args.conf])
    vp2 = np.array([item['vp2'] for item in vp_data if item['score'] > args.conf])

    f = np.sqrt(-np.sum((vp1 - pp[np.newaxis, :]) * (vp2 - pp[np.newaxis, :]), axis=1))

    scores = scores[~np.isnan(f)]
    vp1 = vp1[~np.isnan(f)]
    vp2 = vp2[~np.isnan(f)]
    f = f[~np.isnan(f)]

    if args.debug:
        show_vps(np.array(vp1), np.array(vp2), session)

    med_f = np.nanmedian(f)

    m = (vp1[:, 1] - vp2[:, 1]) / (vp1[:, 0] - vp2[:, 0])
    b1 = vp1[:, 1] - m * vp1[:, 0]
    b2 = vp2[:, 1] - m * vp2[:, 0]

    med_m = np.nanmedian(m)
    med_k = np.nanmedian(np.concatenate([b1, b2]))

    vp1_calib, vp2_calib = get_calib_vp(vp1, med_m, med_k, med_f, pp)

    if args.debug:
        show_vps(np.array([vp1_calib]), np.array([vp2_calib]), session)

    lp1 = np.array([item['lp1'] for item in vp_data if "lp1" in item and item['lp1'] is not None and item['score'] > args.conf])
    lp2 = np.array([item['lp2'] for item in vp_data if "lp2" in item and item['lp2'] is not None and item['score'] > args.conf])

    if len(lp1) == 0:
        calib_dict = get_calib_dict(vp1_calib, vp2_calib, pp)
        save(calib_json_path, calib_dict)

    else:
        dists = []
        projector = get_projector(vp1_calib, vp2_calib, pp)
        for p1, p2 in zip(lp1, lp2):
            ip1 = projector(p1)
            ip2 = projector(p2)

            dists.append(np.linalg.norm(ip1 - ip2))

        med_dist = np.nanmedian(dists)
        scale = 0.52 / med_dist
        calib_dict = get_calib_dict(vp1_calib, vp2_calib, pp, scale=scale)
        save(calib_json_path, calib_dict)

    return counts_vp1, counts_vp2
def detect_session(detector, model_dir_name, data_path, session, args):
    batch_vp_detector = BatchVPDetectorReg(detector, args)

    print("Starting object detection for ", session)
    cap = cv2.VideoCapture(
        os.path.join(data_path, 'dataset', session, 'video.avi'))
    # DO NOT REMOVE OTHERWISE FRAMES WILL NOT SYNC
    total_frames = cap.get(cv2.CAP_PROP_FRAME_COUNT)

    print("Video loaded!")

    if args.mask:
        json_path = os.path.join(data_path, 'dataset', session,
                                 'detections_mask.json')
        output_json_name = 'VPout_{}_r{}_mask.json'.format(
            model_dir_name, args.resume)
    else:
        json_path = os.path.join(data_path, 'dataset', session,
                                 'detections.json')
        output_json_name = 'VPout_{}_r{}.json'.format(model_dir_name,
                                                      args.resume)

    output_json_path = os.path.join(data_path, 'dataset', session,
                                    output_json_name)

    with open(json_path, 'r') as f:
        detection_data = json.load(f)

    total_box_count = sum([len(item['boxes']) for item in detection_data])
    print("Loaded {} bounding boxes for {} frames".format(
        total_box_count, len(detection_data)))
    box_cnt = 0

    start_time = time.time()

    for detection_cnt, detection in enumerate(detection_data):
        for _ in range(args.skip):
            ret, frame = cap.read()

        if not ret or frame is None:
            break

        frame_cnt_orig = cap.get(cv2.CAP_PROP_POS_FRAMES)
        frame_cnt = detection['frame_cnt']

        if frame_cnt != frame_cnt_orig:
            raise Exception(
                "Frames from OD do not match frames now! Wrong skip param?")

        boxes = detection['boxes']
        scores = detection['scores']
        if args.mask:
            masks = detection['masks']

        if args.debug:
            show_debug(np.copy(frame), boxes)

        for i in range(len(boxes)):
            box_cnt += 1

            if args.mask:
                batch_vp_detector.process(frame,
                                          boxes[i],
                                          scores[i],
                                          frame_cnt=frame_cnt,
                                          mask=masks[i])
            else:
                batch_vp_detector.process(frame,
                                          boxes[i],
                                          scores[i],
                                          frame_cnt=frame_cnt)

        if args.dump_every != 0 and detection_cnt % args.dump_every == 0:
            print("Saving at detection ", detection_cnt)
            save(output_json_path, batch_vp_detector.output_list)

        remaining_seconds = (time.time() - start_time) / (box_cnt + 1) * (
            total_box_count - box_cnt)
        print('Frame {}, Box: {} / {}, ETA: {}'.format(
            frame_cnt, box_cnt, total_box_count,
            datetime.timedelta(seconds=remaining_seconds)))

    batch_vp_detector.finalize()
    print("Saving at box ", box_cnt)
    save(output_json_path, batch_vp_detector.output_list)
    print("Finished session: {} with {} boxes".format(session,
                                                      total_box_count))
def detect_session(detector, model_dir_name, data_path, session, args):
    batch_vp_detector = BatchVPDetectorReg(detector, args)

    print("Starting vp detection for ", session)

    if args.mask:
        output_json_name = 'VPout_{}_r{}_mask.json'.format(model_dir_name, args.resume)
        json_path = os.path.join(data_path, 'data', session, 'detections_mask.json')
    else:
        output_json_name = 'VPout_{}_r{}.json'.format(model_dir_name, args.resume)
        json_path = os.path.join(data_path, 'data', session, 'detections.json')

    with open(json_path, 'r') as f:
        detection_data = json.load(f)

    detection_data = detection_data[:args.max_frames]


    output_json_path = os.path.join(data_path, 'data', session, output_json_name)

    total_box_count = sum([len(item['boxes']) for item in detection_data])
    print("Loaded {} bounding boxes for {} frames".format(total_box_count, len(detection_data)))
    box_cnt = 0

    start_time = time.time()

    # running_average_frame = np.zeros([1080, 1920, 2], dtype=np.float32)
    prev_edge = None
    masks = None

    for detection_cnt, detection in enumerate(detection_data):
        frame_filename = detection['filename']
        frame = cv2.imread(os.path.join(data_path, 'frames', session, frame_filename))
        frame_cnt = detection['frame_cnt']

        boxes = detection['boxes']
        scores = detection['scores']
        del detection['boxes']
        del detection['scores']

        box_cnt += len(boxes)

        if args.mask:
            masks = detection['masks']
            del detection['masks']

        boxes, scores, masks, prev_edge = filter_boxes_bcp(boxes, scores, frame, prev_edge, masks=masks)

        if args.debug:
            show_debug(np.copy(frame), boxes)

        for i in range(len(boxes)):
            if args.mask:
                batch_vp_detector.process(frame, boxes[i], scores[i], frame_cnt=frame_cnt, frame_filename=frame_filename, mask=masks[i])
            else:
                batch_vp_detector.process(frame, boxes[i], scores[i], frame_cnt=frame_cnt, frame_filename=frame_filename)

        if args.dump_every != 0 and detection_cnt % args.dump_every == 0:
            print("Saving at detection ", detection_cnt)
            save(output_json_path, batch_vp_detector.output_list)

        remaining_seconds = (time.time() - start_time) / (box_cnt + 1) * (total_box_count - box_cnt)
        print('{} : {}, Box: {} / {}, ETA: {}'.format(frame_cnt, frame_filename, box_cnt, total_box_count, datetime.timedelta(seconds=remaining_seconds)))

    batch_vp_detector.finalize()
    print("Saving at box ", box_cnt)
    save(output_json_path, batch_vp_detector.output_list)
    print("Finished session: {} with {} boxes".format(session, total_box_count))