Exemple #1
0
def _load(pred_file, label_file, mask=True):
    pred_data = json_load(pred_file)
    print('Loaded predictions for %d frames from %s' % (len(pred_file), pred_file))

    label_data = json_load(label_file)
    print('Loaded labels for %d frames from %s' % (len(label_data), label_file))

    # find common subset
    common = list()
    for k, v in label_data.items():
        i = int(os.path.splitext(k)[0])
        common.append(
            [k, i]
        )
    print('Found %d frames in common' % len(common))

    pred_xyz, gt_xyz, gt_vis = list(), list(), list()
    for k, i in common:
        gt_vis.append(label_data[k]['vis3d'])
        gt_xyz.append(label_data[k]['xyz'])
        pred_xyz.append(pred_data[i]['kp_xyz'])
    pred_xyz, gt_xyz, gt_vis = np.array(pred_xyz), np.array(gt_xyz), np.array(gt_vis)
    pred_xyz = np.reshape(pred_xyz, gt_xyz.shape)

    # mask to only valid ones
    if mask:
        m = gt_vis > 0.5
        pred_xyz_m = pred_xyz[m]
        gt_xyz_m = gt_xyz[m]

    return pred_xyz_m, gt_xyz_m, common, pred_xyz, gt_xyz
Exemple #2
0
    def __init__(self, bb_file=None, pose_file=None, viewer_file=None):
        """ Possibly load model from file. """
        if bb_file is None:
            bb_file = 'config/bb_network.cfg.json'
        if pose_file is None:
            pose_file = 'config/pose_network.cfg.json'
        if viewer_file is None:
            viewer_file = 'config/viewer.cfg.json'

        self.bb = json_load(bb_file)
        self.pose = json_load(pose_file)
        self.viewer = json_load(viewer_file)
Exemple #3
0
def _show_marker_det_img_folder(marker_path, img_data_path, det_file_name,
                                block):
    # load detections
    det_file = os.path.join(img_data_path, det_file_name)
    print('\tDetection file: %s' % det_file)
    assert os.path.exists(det_file), 'Could not find detection file.'
    det = json_load(det_file)

    # check for image files
    img_list = find_images(img_data_path)
    print('Found %s images for marker detection.' % len(img_list))

    # sanity check
    assert len(det['p2d']) == len(
        img_list), 'Number of detections and number of images differs.'
    assert len(det['pid']) == len(
        img_list), 'Number of detections and number of images differs.'

    # set up detector
    detector = BoardDetector(marker_path)

    # show
    for idx, img_p in enumerate(img_list):
        img_file = os.path.basename(img_p)
        if img_file not in det['files']:
            print('No detection available for: %s' % img_file)
            continue
        img = cv2.imread(img_p)
        detector.draw_board(img,
                            np.array(det['p2d'][idx]),
                            np.array(det['pid'][idx]),
                            block=block)
Exemple #4
0
def test_calib_M_dist():
    from calib_M import calc_extrinsics
    K, dist, M = calc_extrinsics(
        './data/calib_test_data/marker_32h11b2_4x4x_7cm.json',
        'data/calib_test_data/rendered/M_dist_test/',
        'cam%d',
        'run%03d',
        det_file_name=None,
        calib_file_name=None,
        calib_out_file_name=None,
        estimate_dist=True,
        dist_complexity=1,
        optimize_distortion=False,
        cache=False,
        verbose=0)

    calib_gt = json_load(
        'data/calib_test_data/rendered/M_dist_test/calib.json')
    for cid in range(len(K)):
        cam_name = 'cam%d' % cid
        _same(calib_gt['K'][cam_name], K[cid], rtol=0.01)
        _same(calib_gt['dist'][cam_name], dist[cid], atol=0.01, rtol=0.5)
        _same(calib_gt['M'][cam_name],
              np.linalg.inv(M[cid]),
              atol=0.01,
              rtol=0.01)

    print('SUCCESS: test_calib_M_dist')
Exemple #5
0
def read_video_sample(vid_files,
                      fid,
                      cam_range,
                      calib_file,
                      read_dist=True,
                      read_parallel=False):
    # read calib
    calib_path = os.path.join(os.path.dirname(vid_files[0]), calib_file)
    calib = json_load(calib_path)

    K_list = [np.array(calib['K']['cam%d' % cid]) for cid in cam_range]
    M_list = [
        np.linalg.inv(np.array(calib['M']['cam%d' % cid])) for cid in cam_range
    ]
    if read_dist:
        dist_list = [
            np.array(calib['dist']['cam%d' % cid]) for cid in cam_range
        ]

    # read image
    img_list = list()
    if read_parallel:
        img_list = Parallel(n_jobs=len(cam_range))(
            delayed(read_vid_frame)(vid, fid) for vid in vid_files)

    else:
        for vid in vid_files:
            img_list.append(read_vid_frame(vid, fid))

    if read_dist:
        return img_list, K_list, M_list, dist_list
    return img_list, K_list, M_list
def _get_pred_bb(pred_file, idx):
    global predictions_bb_cache

    if pred_file not in predictions_bb_cache.keys():
        # load calib
        predictions_bb_cache[pred_file] = json_load(pred_file)

    return predictions_bb_cache[pred_file][idx]['boxes'], predictions_bb_cache[pred_file][idx]['xyz']
def _get_calib(calib_id, calib_path):
    global calib_cache

    if calib_cache is None:
        # load calib
        calib_cache = json_load(calib_path)

    return calib_cache[calib_id]
def _get_calib_videos(calib_path):
    global calib_cache_videos

    if calib_path not in calib_cache_videos.keys():
        # load calib
        calib_cache_videos[calib_path] = json_load(calib_path)

    return calib_cache_videos[calib_path]
Exemple #9
0
 def __init__(self, model_file):
     """ Possibly load model from file. """
     self.model_file = model_file
     model_data = json_load(model_file)
     self.keypoints, self.limbs, self.viewer, self.coord_frames, self.body_angles = self.load_def(
         model_data['skeleton'])
     self.datasets, self.bb_models, self.pose_models, self.preprocessing = self.load_data(
         model_data['data'])
Exemple #10
0
def merge_all_index_files(model):
    # create output structures
    dataset_index_labeled, calib_all = defaultdict(list), list()

    # Iter all recordings and check which output files exist
    set_names = list()
    for db in model.datasets:
        ident = get_ident(db)
        out_path = os.path.join(model.preprocessing['data_storage'], ident)
        set_names.append(db['db_set'])

        # check for record file
        file_out_rec = os.path.join(
            out_path, model.preprocessing['index_file_name'] % db['db_set'])
        if os.path.exists(file_out_rec):
            data = json_load(file_out_rec)
            data = update_calib_id(data, len(calib_all))
            dataset_index_labeled[db['db_set']].extend(data)

        # calib file
        calib_file = os.path.join(out_path, model.preprocessing['calib_file'])
        if os.path.exists(calib_file):
            calib_all.extend(json_load(calib_file))

    # Save merged indices
    for set_name in set(set_names):
        file_out = model.preprocessing['index_file_name'] % set_name
        if len(dataset_index_labeled[set_name]) > 0:
            json_dump(
                os.path.join(model.preprocessing['data_storage'], file_out),
                dataset_index_labeled[set_name])
            print(
                'Saved %d samples to %s' %
                (len(dataset_index_labeled[set_name]),
                 os.path.join(model.preprocessing['data_storage'], file_out)))

    # Save merged cam calibs
    if len(calib_all) > 0:
        json_dump(
            os.path.join(model.preprocessing['data_storage'],
                         model.preprocessing['calib_file']), calib_all)
Exemple #11
0
def test_calib_K_no_dist():
    from calib_K import calc_intrinsics
    K, dist = calc_intrinsics(
        './data/calib_test_data/marker_32h11b2_4x4x_7cm.json',
        'data/calib_test_data/rendered/K_test/cam0/',
        det_file_name=None,
        estimate_dist=False)

    calib_gt = json_load('data/calib_test_data/rendered/K_test/calib.json')
    _same(calib_gt['K'], K, rtol=0.01)

    print('SUCCESS: test_calib_K_no_dist')
Exemple #12
0
def test_calib_K_dist2():
    from calib_K import calc_intrinsics
    K, dist = calc_intrinsics(
        './data/calib_test_data/marker_32h11b2_4x4x_7cm.json',
        'data/calib_test_data/rendered/K_dist_test2/cam0/',
        det_file_name=None,
        estimate_dist=True,
        dist_complexity=2)
    calib_gt = json_load(
        'data/calib_test_data/rendered/K_dist_test2/calib.json')
    _same(calib_gt['K'], K, rtol=0.01)
    _same(calib_gt['dist'], dist, rtol=0.05)

    print('SUCCESS: test_calib_K_dist2')
def detect_marker(marker_path,
                  data_path,
                  output_file=None,
                  cache=False,
                  verbose=0):
    # check if folder/image or video case
    if os.path.isdir(data_path):
        # folder case
        base_dir = data_path
    else:
        # video case
        base_dir = os.path.dirname(data_path)

    # check for existing detection file
    if output_file is not None:
        det_file = os.path.join(base_dir, output_file)
        if cache and os.path.exists(det_file):
            if verbose > 0:
                print('Loading detection from: %s' % det_file)
            return json_load(det_file)

    if verbose > 0:
        print('Detection marker on:')
        print('\tData path: %s' % data_path)
        print('\tMarker file: %s' % marker_path)

    if os.path.isdir(data_path):
        if verbose > 0:
            print('\tAssuming: Folder of images.')
        points2d, point_ids, img_shape, files, base_dir = _detect_marker_img_folder(
            marker_path, data_path, verbose)

    else:
        if verbose > 0:
            print('\tAssuming: Video file.')
        points2d, point_ids, img_shape, files, base_dir = _detect_marker_video(
            marker_path, data_path)

    # save detections
    det = {
        'p2d': points2d,
        'pid': point_ids,
        'img_shape': img_shape,
        'files': files
    }

    if output_file is not None:
        json_dump(det_file, det, verbose=verbose > 0)

    return det
Exemple #14
0
def test_board_pose_estimator(show=False):
    """ Test detecting Boards in images. """
    import cv2
    from utils.general_util import json_load
    from core.BoardDetector import BoardDetector
    img_list = ['./data/calib_test_data/real/april_board_tags_sample.JPG']
    assert os.path.exists(img_list[0]), 'Image file not found.'

    detector = BoardDetector(
        './data/calib_test_data/marker_32h11b2_4x4x_7cm.json')
    point_coords_frames, point_ids_frames = detector.process_image_batch(
        img_list)
    gt1 = json_load('data/calib_test_data/real/gt_det1.json')
    _same(point_coords_frames[0], gt1['c'])
    _same(point_ids_frames[0], gt1['i'])

    if show:
        for img_path, points, point_ids in zip(img_list, point_coords_frames,
                                               point_ids_frames):
            image = cv2.imread(img_path)
            detector.draw_board(image, points, point_ids, linewidth=2)

    detector = BoardDetector(
        './data/calib_test_data/marker_16h5b1_4x4x_15cm.json')
    point_coords_frames, point_ids_frames = detector.process_image_batch(
        img_list)
    gt2 = json_load('data/calib_test_data/real/gt_det2.json')
    _same(point_coords_frames[0], gt2['c'])
    _same(point_ids_frames[0], gt2['i'])

    if show:
        for img_path, points, point_ids in zip(img_list, point_coords_frames,
                                               point_ids_frames):
            image = cv2.imread(img_path)
            detector.draw_board(image, points, point_ids, linewidth=2)

    print('SUCCESS: test_board_pose_estimator')
    def __init__(self, marker_def_file,
                 num_parallel_jobs=10, downsampling=1):

        # load marker info from file
        marker_def = json_load(marker_def_file)
        self.marker_dim = (marker_def['n_y'], marker_def['n_x'])
        marker_type = marker_def['family']
        assert marker_type in ['36h11', '16h5'], 'Marker family not implemented.'
        black_border = int(marker_def['border'])
        self.tag_size = marker_def['tsize']  # marker size in m
        self.tag_spacing = marker_def['tspace']  # space between tags in m
        self.offset = marker_def['offset']  # offset between front and back in m
        self.double = marker_def['double']  # is it a double sided tag?

        self.tag_detector_batch = PyRunAprilDetectorBatch(marker_type, black_border, num_parallel_jobs, 1.0/downsampling,
                                                          draw=False)
        self.object_points = self.get_april_tag_points()
Exemple #16
0
    def load_data(self, data_file):
        """ Load model from file. """
        data = json_load(data_file)

        assert 'bb_networks' in data.keys(), 'Missing needed field.'
        assert 'pose_networks' in data.keys(), 'Missing needed field.'
        assert 'datasets' in data.keys(), 'Missing needed field.'
        db_field = [
            'anno', 'calib', 'cam_range', 'db_set', 'frame_dir', 'path',
            'vid_file'
        ]
        assert all([
            all([k in d.keys() for k in db_field]) for d in data['datasets']
        ]), 'Missing needed field.'

        return data['datasets'], data['bb_networks'], data[
            'pose_networks'], data['preprocessing']
Exemple #17
0
def _estimate_plane(plane_file):
    """ Estimates a plane (normal and point) from a given set of annotated points, assuming all point form a single plane. """
    plane_data = json_load(plane_file)
    plane = None
    for k, v in plane_data.items():
        if 'xyz' in v.keys() and 'vis3d' in v.keys():
            xyz = np.array(v['xyz'])
            vis3d = np.array(v['vis3d'])
            if np.sum(vis3d) >= 3:
                points = xyz[vis3d > 0.5]
                p, n = _fit_plane(points)

                if np.dot(n, np.array([0.0, -1.0, 0.0])) < 0.0:
                    # normal should roughly point in -y direction
                    n *= -1.0
                plane = p, n

    return plane
Exemple #18
0
def load_calib(calib_path):
    # load file
    calib = json_load(calib_path)

    # find id's
    cid_list = list()
    for cid in range(1024):
        if 'cam%d' % cid in calib['K'].keys():
            cid_list.append(cid)

    # bring in different layout
    calib_out = {'K': list(), 'dist': list(), 'M': list()}

    for cid in cid_list:
        calib_out['K'].append(np.array(calib['K']['cam%d' % cid]))
        calib_out['dist'].append(np.array(calib['dist']['cam%d' % cid]))
        calib_out['M'].append(np.array(calib['M']['cam%d' % cid]))

    return calib_out
Exemple #19
0
def _show_marker_det_video(marker_path, video_path, det_file_name, block):
    # load detections
    data_path = os.path.dirname(video_path)
    det_file = os.path.join(data_path, det_file_name)
    print('\tDetection file: %s' % det_file)
    assert os.path.exists(det_file), 'Could not find detection file.'
    det = json_load(det_file)

    # check video path
    video = cv2.VideoCapture(video_path)
    num_frames = video.get(cv2.CAP_PROP_FRAME_COUNT)
    print('Found video with %d frames.' % num_frames)

    # sanity check
    assert len(
        det['p2d']
    ) == num_frames, 'Number of detections and number of frames differs.'
    assert len(
        det['pid']
    ) == num_frames, 'Number of detections and number of frames differs.'

    # set up detector
    detector = BoardDetector(marker_path)

    # show
    idx = 0
    while True:
        if not video.isOpened():
            break
        ret, img = video.read()

        if not ret:
            break

        detector.draw_board(img,
                            np.array(det['p2d'][idx]),
                            np.array(det['pid'][idx]),
                            block=block)

        idx += 1
Exemple #20
0
 def load_def(self, def_file):
     """ Load model from file. """
     data = json_load(def_file)
     self.data = data
     return data['keypoints'], data['limbs'], data['viewer'], data[
         'coord_frames'], data['body_angles']
Exemple #21
0
                        action='store_true',
                        help='If set, saves data.')

    args = parser.parse_args()

    # load model data
    model = Model(args.model)

    # sanity check input
    assert os.path.exists(
        args.pose_pred_file), 'Given pose prediction file was not found.'
    if args.plane:
        assert os.path.exists(
            args.plane_file), 'Given plane definition file was not found.'

    # output file to save results to
    output_file_name = os.path.join(os.path.dirname(args.pose_pred_file),
                                    args.file_out_name)
    print('Output file: %s' % output_file_name)

    # load pose data
    pose_pred = json_load(args.pose_pred_file)

    # run analyse
    variables = analyse(args, model, pose_pred)

    if args.save:
        print('Saving file...')
        # save calculated variables
        json_dump(output_file_name, variables, verbose=True)
Exemple #22
0
def calc_intrinsics(marker_path,
                    data_path,
                    det_file_name,
                    output_file=None,
                    estimate_dist=True,
                    dist_complexity=5,
                    cache=False,
                    verbose=0):
    if os.path.isdir(data_path):
        base_dir = data_path
    else:
        base_dir = os.path.dirname(data_path)

    # try to load precomputed
    if output_file is not None:
        calib_file = os.path.join(base_dir, output_file)
        if cache and os.path.exists(calib_file):
            if verbose > 0:
                print('Loading intrinsic calibration from: %s' % calib_file)
            calib = json_load(calib_file)
            return np.array(calib['K']), np.array(calib['dist'])

    if verbose > 0:
        print('Calculating intrinsic calibration for:')
        print('\tData path: %s' % data_path)
        print('\tMarker file: %s' % marker_path)

    # set up detector and estimator
    detector = BoardDetector(marker_path)

    if os.path.isdir(data_path):
        if verbose > 0:
            print('\tAssuming: Folder of images.')
        base_dir = data_path
    else:
        if verbose > 0:
            print('\tAssuming: Video file.')
        base_dir = os.path.dirname(data_path)

    # check for detections
    if det_file_name is None:
        det = detect_marker(marker_path,
                            data_path,
                            cache=cache,
                            verbose=verbose - 1)
    else:
        detections_file = os.path.join(base_dir, det_file_name)

        if not os.path.exists(detections_file):
            if verbose > 1:
                print(
                    'Could not locate marker detections. Running detector now and saving them to folder.'
                )
            det = detect_marker(marker_path,
                                data_path,
                                det_file_name,
                                verbose=verbose - 1)

        else:
            det = json_load(detections_file)

    # give points unique ids
    max_num_pts = len(detector.object_points)
    p2d, pid, p3dm, fid = enumerate_points(detector, det['p2d'], det['pid'],
                                           max_num_pts)
    if verbose > 0:
        print('Found %d unique points to estimate intrinsics from.' %
              pid.shape[0])

    # estimate intrinsics
    K, dist = estimate_intrinsics(p2d,
                                  fid,
                                  p3dm,
                                  det['img_shape'],
                                  estimate_dist=estimate_dist,
                                  dist_complexity=dist_complexity,
                                  verbose=verbose)

    # save intrinsics
    if output_file is not None:
        calib = {'K': K, 'dist': dist}
        json_dump(calib_file, calib, verbose=verbose > 0)
    return K, dist
    # args.traj_file = '/misc/lmbraid18/datasets/RatPose/RatTrack_paper_resub_sessions/Rat506_200306/trigg_Rat506_200306_2.json'

    # args.video_file = '/misc/lmbraid18/datasets/RatPose/RatTrack_paper_resub_sessions/G372_190325/run001_cam1.mp4'
    # args.pred_file = '/misc/lmbraid18/datasets/RatPose/RatTrack_paper_resub_sessions/G372_190325/pred_run001__00.json'
    # args.pred_dlc_file = '/misc/lmbraid18/datasets/RatPose/RatTrack_paper_resub_sessions/G372_190325/pred_dlc_unlabeled_G372_190325.json'

    # args.video_file = '/misc/lmbraid18/datasets/RatPose/RatTrack_paper_resub_sessions/Rat487_200308/run060_cam1.avi'
    # args.pred_file = '/misc/lmbraid18/datasets/RatPose/RatTrack_paper_resub_sessions/Rat487_200308/pred_run060__00.json'
    # args.pred_dlc_file = '/misc/lmbraid18/datasets/RatPose/RatTrack_paper_resub_sessions/Rat487_200308/pred_dlc_unlabeled_Rat487_200308.json'

    # args.video_file = '/misc/lmbraid18/datasets/RatPose/RatTrack_paper_resub_sessions/Rat480_190823/run005_cam1.avi'
    # args.pred_file = '/misc/lmbraid18/datasets/RatPose/RatTrack_paper_resub_sessions/Rat480_190823/Rat480_190823_run005_pred_pose_ours_al1.json'
    # args.pred_dlc_file = '/misc/lmbraid18/datasets/RatPose/RatTrack_paper_resub_sessions/Rat480_190823/Rat480_190823_run005_pred_pose_dlc_al1.json'

    # load poses
    d = json_load(args.pred_file)
    pose_ours = list()
    for x in d:
        pose_ours.append(x.get('kp_xyz', None))

    # ours_raw = json_load(args.pred_file)
    # pose_ours = list()
    # k = list(ours_raw.keys())[0].split(':')[0]  # this will not win the price for the most readable code line
    # for i in range(len(ours_raw)):
    #     pose_ours.append(ours_raw['%s:%d' % (k, i)].get('kp_xyz', None))

    dlc_raw = json_load(args.pred_dlc_file)
    pose_dlc = list()
    k = list(dlc_raw.keys())[0].split(':')[
        0]  # this will not win the price for the most readable code line
    for i in range(len(dlc_raw)):
def load_data(pose_var_file, laser_data_file):
    pose_vars = json_load(pose_var_file)
    laser_data = json_load(laser_data_file)['frames']
    return pose_vars, laser_data
Exemple #25
0
                        help='How to tell the run id'
                        ' from a given file name.')
    parser.add_argument('--rec_fmt',
                        type=str,
                        default='%s_cam%d.avi',
                        help='Re for the recording.')
    parser.add_argument('--max_cams',
                        type=int,
                        default=64,
                        help='Maximal number of cams we search for.')
    args = parser.parse_args()

    predictions = None
    if args.file.endswith('.json'):
        # load predictions
        predictions = json_load(args.file, verbose=True)

    # extract run from file name
    base_path, run_id = parse_file_name(args.file, args.run_wildcard)

    # Parse videos
    video_list = [
        (i,
         os.path.join(base_path,
                      args.rec_fmt % (args.run_wildcard % run_id, i)))
        for i in range(args.max_cams)
    ]
    video_list = [v for v in video_list if os.path.exists(v[1])]
    cam_range, video_list = [v[0]
                             for v in video_list], [v[1] for v in video_list]
Exemple #26
0
def _eval(this_mode, train_rat_list, eval_rat_list, margin=20):
    print('\nTYPE', this_mode)
    rows = [
        'train \ eval', 'fg acc', 'bg acc', 'avg acc', 'avg (weighted) acc',
        'fg/bg cnt'
    ]
    # rows.extend(eval_rat_list)
    fg_acc_table = PrettyTable(rows)

    # aligned_pred = defaultdict(list)
    pred_all = list()
    for train_rat, eval_rat in zip(train_rat_list, eval_rat_list):
        # load data
        pred_file = os.path.join(
            BASE_PATH,
            '%s_%s' % (eval_rat, REC_NAME_DICT[this_mode][eval_rat]),
            'pred_full_t%s_e%s_.json' % (train_rat, eval_rat))

        laser_data = os.path.join(os.path.dirname(pred_file), 'times.json')

        # assert os.path.exists(args.pred_cls), 'Pred file not found.'
        pred_data = json_load(pred_file)
        y_true, y_pred = np.array(pred_data['gt']), np.array(pred_data['pred'])

        assert os.path.exists(laser_data), 'Pred file not found.'
        laser_data = json_load(laser_data)['frames']

        # overall accuracy
        acc = np.mean(y_true == y_pred)
        # print('Overall accuracy', acc)

        # fg/bg accuracy
        m = y_true < 0.0
        bg_acc = np.mean(y_true[m] == y_pred[m])

        m = y_true > 0.0
        fg_acc = np.mean(y_true[m] == y_pred[m])
        # print('FG accuracy', acc)

        # accumulate aligned preditions
        for i in laser_data:
            # s, e = i, i+150
            s, e = i - margin, i + 150 + margin
            pred_all.append(y_pred[s:e])

        fg_cnt = np.sum(y_true > 0.0)
        bg_cnt = np.sum(y_true < 0.0)
        avg_acc = 0.5 * (fg_acc + bg_acc)
        d = [
            train_rat,
            '%.1f' % (fg_acc * 100.0),
            '%.1f' % (bg_acc * 100.0),
            '%.1f' % (avg_acc * 100.0),
            '%.1f' % (acc * 100.0),
            '%d/%d' % (fg_cnt, bg_cnt)
        ]
        fg_acc_table.add_row(d)

    pred_all = (np.mean(pred_all, 0) + 1.0) / 2.0
    pred_stim = (this_mode, np.arange(pred_all.shape[0]) - margin, pred_all)
    return pred_stim, fg_acc_table
Exemple #27
0
 def btn_load_clicked(self):
     self.anno = json_load(self.anno_file)
     self.update_frame_view_by_anno()
     self.update_visibility()
Exemple #28
0
def preproc_data(model):
    """ Preprocess labeled data so we can train networks with it. """
    print('Running preprocessing for:', model)
    print('Saving to output folder:', model.preprocessing['data_storage'])

    # Init output structures
    calib_all = list()
    for i, db in enumerate(model.datasets):
        dataset_index = defaultdict(list)
        ident = get_ident(db)
        print('Preprocessing dataset entry %d: %s' % (i, ident))

        # where we want to save the processed frames
        output_path = os.path.join(model.preprocessing['data_storage'], ident)

        # check if we previously dealt with this record
        if os.path.exists(output_path):
            print(' > This record was already preprocessed previously.')
            continue

        # check base paths existance
        if not os.path.exists(db['path']):
            print(' > Base path not found: %s' % db['path'])
            continue

        # check calib file
        calib_file_path = os.path.join(db['path'], db['calib'])
        if not os.path.exists(calib_file_path):
            print(' > Calib file not found: %s' % calib_file_path)
            continue
        calib_all.append(
            load_calib_data(calib_file_path, return_cam2world=False))

        # check annotation file
        anno_file = os.path.join(db['path'], db['frame_dir'], db['anno'])
        if os.path.exists(anno_file):
            print(' > Loading annotations from %s' % anno_file)
            anno = json_load(anno_file)
            print(' > Got %d annotations' % len(anno))

        else:
            print(' > Cant find annotation file: %s' % anno_file)
            print(' > Assuming dataset is not labeled.')
            continue

        if check_if_labeled(anno):
            print(' > Found labeled sequence: %s' %
                  os.path.join(db['path'], db['frame_dir']))
            cnt = sum([len(x) for x in dataset_index.values()])
            this_index = process_labeled(model, cnt, output_path, db, anno,
                                         calib_all)
            print(' > Adding %d samples to labeled set %s' %
                  (len(this_index), db['db_set']))
            dataset_index[db['db_set']].extend(this_index)
        else:
            print(
                ' > Sequence appears to be unlabeled (f.e. annotation file is empty).'
            )

        if len(dataset_index[db['db_set']]) > 0:
            file_out_rec = os.path.join(
                output_path,
                model.preprocessing['index_file_name'] % db['db_set'])
            json_dump(file_out_rec, dataset_index[db['db_set']])
            print(' > Saved %d samples to %s' %
                  (len(dataset_index[db['db_set']]), file_out_rec))

            # save Calib file
            json_dump(
                os.path.join(output_path, model.preprocessing['calib_file']),
                calib_all)

    merge_all_index_files(model)
Exemple #29
0
    # parse given input
    video_list, K_list, \
    dist_list, M_list,\
    pred_file_name = parse_input(args.video,
                                 args.cam_wildcard, args.run_wildcard, args.max_cams,
                                 args.calib_file_name,
                                 find_last_existant)
    print('Found %s video files to make predictions: %s' %
          (len(video_list), video_list[0]))
    print('Predictions will be saved to: %s' % pred_file_name)

    # load bb annotations
    if args.pred_file is not None:
        pred_file_name = args.pred_file
    predictions = json_load(pred_file_name)

    # create video readers
    video_readers = [
        VideoReaderFast(v, lambda x, K=K: preprocess(x, K, img_size=800))
        for v, K, dist in zip(video_list, K_list, dist_list)
    ]

    # start them
    for v in video_readers:
        if args.start_fid is not None:
            v.set_fid(args.start_fid)
        else:
            args.start_fid = 0
        v.start()
Exemple #30
0
def show_intrinsic_calib(marker_path, data_path, det_file_name,
                         calib_file_name, show_size):
    print('Showing marker detections for:')
    print('\tData path: %s' % data_path)
    print('\tMarker file: %s' % marker_path)

    if os.path.isdir(data_path):
        print('\tAssuming: Folder of images.')
        base_dir = data_path
    else:
        print('\tAssuming: Video file.')
        base_dir = os.path.dirname(data_path)

    det_file = os.path.join(base_dir, det_file_name)
    calib_file = os.path.join(base_dir, calib_file_name)
    print('\tDetection file: %s' % det_file)
    print('\tCalib file: %s' % calib_file)

    # load detections
    assert os.path.exists(det_file), 'Could not find detection file.'
    det = json_load(det_file)

    # load calibration
    assert os.path.exists(calib_file), 'Could not find detection file.'
    calib = json_load(calib_file)
    K, dist = np.array(calib['K']), np.array(calib['dist'])

    img = _read_first_frame(data_path, det)

    # set up detector
    detector = BoardDetector(marker_path)

    # calculate statistics
    err, angle, depths = list(), list(), list()
    for p2d, pid in tqdm(zip(det['p2d'], det['pid']),
                         total=len(det['p2d']),
                         desc='Calculating stats'):
        if len(pid) == 0:
            continue

        p3d_m = detector.object_points[pid]
        a, d, e = _calc_board_stats(np.array(p2d), p3d_m, K, dist)
        angle.append(a)
        depths.extend(d)
        err.extend(e)

    # Print reprojection error
    err = np.array(err)
    print('Reprojection error: min=%.2f, mean=%.2f, max=%.2f (px)' %
          (err.min(), err.mean(), err.max()))

    # show error and depth distribution
    angle = np.array(angle)
    angle = angle[~np.isnan(angle)]
    depths = np.array(depths)

    fig = plt.figure()
    ax1 = fig.add_subplot(121)
    ax2 = fig.add_subplot(122)
    ax1.hist(depths), ax1.set_title('distance to camera'), ax1.set_xlim(
        [0, depths.max()])
    ax2.hist(angle), ax2.set_title('angle wrt camera'), ax2.set_xlim([0, 180])
    stats_img = fig2data(fig)
    cv2.imshow('stats', stats_img[:, :, ::-1])
    cv2.waitKey(100)

    # show image space coverage
    pts_all = np.concatenate([d for d in det['p2d'] if len(d) > 0], 0)
    print('Showing %d detected points' % pts_all.shape[0])
    _show_coverage(img, pts_all, show_size)

    return det