Ejemplo n.º 1
0
def process_peter(scheme, optim, calib_folder, video_folder, pose_3d_folder,
                  video_3d_folder, out_folder, cam_regex, video_ext):

    vid_fnames_2d = glob(os.path.join(video_folder, "*." + video_ext))
    vid_fnames_3d = glob(os.path.join(video_3d_folder, "*." + video_ext))
    vid_fnames_3d = sorted(vid_fnames_3d, key=natural_keys)

    fnames_2d = defaultdict(list)
    for vid in vid_fnames_2d:
        vidname = tools.videoname_from_regex(cam_regex, vid)
        fnames_2d[vidname].append(vid)

    fnames_3d = defaultdict(list)
    for vid in vid_fnames_3d:
        vidname = true_basename(vid)
        fnames_3d[vidname].append(vid)

    calib_fname = os.path.join(calib_folder, 'calibration.toml')
    cgroup = CameraGroup.load(calib_fname)

    if len(vid_fnames_3d) > 0:
        os.makedirs(out_folder, exist_ok=True)

    for vid_fname in vid_fnames_3d:
        basename = true_basename(vid_fname)

        out_fname = os.path.join(out_folder, basename + "." + video_ext)
        pose_fname = os.path.join(pose_3d_folder, basename + ".csv")

        if not os.path.exists(pose_fname):
            print(out_fname, 'missing 3d data')
            continue

        if len(fnames_2d[basename]) == 0:
            print(out_fname, 'missing 2d videos')
            continue

        if len(fnames_3d[basename]) == 0:
            print(out_fname, 'missing 3d videos')
            continue

        fname_3d_current = fnames_3d[basename][0]
        fnames_2d_current = fnames_2d[basename]
        fnames_2d_current = sorted(fnames_2d_current, key=natural_keys)
        cam_names = [
            tools.camname_from_regex(cam_regex, fname)
            for fname in fnames_2d_current
        ]
        offsets_dict = load_offsets_dict({}, cam_names, video_folder)
        cgroup_subset = cgroup.subset_cameras_names(cam_names)

        visualize_combined(scheme, optim, pose_fname, cgroup_subset,
                           offsets_dict, fnames_2d_current, fname_3d_current,
                           out_fname)
Ejemplo n.º 2
0
def get_all_videos_fnames(config):
    vids_fnames = process_all(config, get_videos_fnames)
    cam_videos = defaultdict(list)

    for key, (session_path, fnames) in vids_fnames.items():
        for fname in fnames:
            # print(fname)
            vidname = get_video_name(config, fname)
            k = (key, session_path, vidname)
            cam_videos[k].append(fname)

    vid_names = sorted(cam_videos.keys(), key=lambda x: natural_keys(x[2]))

    all_fnames = []
    calib_fnames = []

    for name in tqdm(vid_names, desc='get videos', ncols=80):
        (key, session_path, vidname) = name
        fnames = sorted(cam_videos[name], key=natural_keys)
        cam_names = [get_cam_name(config, f) for f in fnames]

        cgroup = None
        calib_folder = find_calibration_folder(config, session_path)
        if calib_folder is not None:
            calib_fname = os.path.join(
                calib_folder, config['pipeline']['calibration_results'],
                'calibration.toml')
            if os.path.exists(calib_fname):
                cgroup = CameraGroup.load(calib_fname)

        # pose_fname = os.path.join(session_path, config['pipeline']['pose_3d'],
        #                           vidname+'.csv')

        if cgroup is None:  # or not os.path.exists(pose_fname):
            continue

        calib_fnames.append(calib_fname)
        all_fnames.append(fnames)

    normal_count = max([len(x) for x in all_fnames])
    bad_num = np.sum([len(x) != normal_count for x in all_fnames])
    if bad_num > 0:
        print(
            'W: ignored {} sets of videos with inconsistent number of cameras'.
            format(bad_num))
        all_fnames = [x for x in all_fnames if len(x) == normal_count]

    out = {
        'fnames': all_fnames,
        'calib_fnames': calib_fnames,
        'cam_names': cam_names
    }
    return out
Ejemplo n.º 3
0
def load_2d_projections(session_path, fname):
    calib_fname = os.path.join(session_path, "Calibration", "calibration.toml")
    cgroup = CameraGroup.load(calib_fname)

    config_fname = os.path.join(session_path, "config.toml")
    config = toml.load(config_fname)

    data = pd.read_csv(fname)

    M = np.identity(3)
    center = np.zeros(3)
    for i in range(3):
        center[i] = np.mean(data['center_{}'.format(i)])
        for j in range(3):
            M[i, j] = np.mean(data['M_{}{}'.format(i, j)])

    cols = [x for x in data.columns if '_error' in x]
    bodyparts = [c.replace('_error', '') for c in cols]

    vecs = []
    for bp in bodyparts:
        vec = np.array(data[[bp+'_x', bp+'_y', bp+'_z']])
        vecs.append(vec)
    p3d = np.array(vecs).swapaxes(0, 1)

    # project to 2d
    n_cams = len(cgroup.cameras)
    n_frames, n_joints, _ = p3d.shape

    all_points_flat = p3d.reshape(-1, 3)
    all_points_flat_t = (all_points_flat + center).dot(np.linalg.inv(M.T))

    points_2d_proj_flat = cgroup.project(all_points_flat_t)
    points_2d_proj = points_2d_proj_flat.reshape(n_cams, n_frames, n_joints, 2)

    # points_2d_proj = points_2d_proj.swapaxes(0, 1)
    cam_names = cgroup.get_names()
    offsets = [config['cameras'][name]['offset'] for name in cam_names]

    for i in range(n_cams):
        dx = offsets[i][0]
        dy = offsets[i][1]
        points_2d_proj[i, :, :, 0] -= dx
        points_2d_proj[i, :, :, 1] -= dy

    points_2d_proj = np.int32(np.round(points_2d_proj))
    out = dict()
    for i, cname in enumerate(cam_names):
        out[cname] = points_2d_proj[i].tolist()

    return out
Ejemplo n.º 4
0
def load_2d_projections(session, folders, fname):
    config = get_config(session)

    pipeline_calibration_videos = config.get('pipeline', {}).get('calibration_videos', 'calibration')
    search_path = os.path.normpath(safe_join(prefix, session, *folders))
    calib_folder = find_calibration_folder(config, search_path) 
    calib_fname = safe_join(calib_folder, pipeline_calibration_videos, "calibration.toml")
    cgroup = CameraGroup.load(os.path.normpath(calib_fname))

    offsets_dict = load_offsets_dict(config, cgroup.get_names())

    bodyparts, points_2d_proj, all_scores = get_projected_points(
        config, fname, cgroup, offsets_dict)

    cam_names = cgroup.get_names()

    points_2d_proj[~np.isfinite(points_2d_proj)] = 0
    points_2d_proj = np.int32(np.round(points_2d_proj))
    out = dict()
    for i, cname in enumerate(cam_names):
        out[cname] = points_2d_proj[i].swapaxes(0,1).tolist()

    return out
Ejemplo n.º 5
0
def process_session(config, session_path):
    pipeline_videos_raw = config['pipeline']['videos_raw']
    pipeline_pose_3d = config['pipeline']['pose_3d']
    pipeline_pose_2d_projected = config['pipeline']['pose_2d_projected']

    video_ext = config['video_extension']

    vid_fnames_2d = glob(
        os.path.join(session_path, pipeline_videos_raw, "*." + video_ext))
    vid_fnames_2d = sorted(vid_fnames_2d, key=natural_keys)

    pose_fnames_3d = glob(os.path.join(session_path, pipeline_pose_3d,
                                       "*.csv"))
    pose_fnames_3d = sorted(pose_fnames_3d, key=natural_keys)

    if len(pose_fnames_3d) == 0:
        return

    fnames_2d = defaultdict(list)
    for vid in vid_fnames_2d:
        vidname = get_video_name(config, vid)
        fnames_2d[vidname].append(vid)

    fnames_3d = defaultdict(list)
    for fname in pose_fnames_3d:
        vidname = true_basename(fname)
        fnames_3d[vidname].append(fname)

    cgroup = None
    calib_folder = find_calibration_folder(config, session_path)
    if calib_folder is not None:
        calib_fname = os.path.join(calib_folder,
                                   config['pipeline']['calibration_results'],
                                   'calibration.toml')
        if os.path.exists(calib_fname):
            cgroup = CameraGroup.load(calib_fname)

    if cgroup is None:
        print(
            'session {}: no calibration found, skipping'.format(session_path))
        return

    outdir = os.path.join(session_path, pipeline_pose_2d_projected)
    os.makedirs(outdir, exist_ok=True)

    for pose_fname in pose_fnames_3d:
        basename = true_basename(pose_fname)

        if len(fnames_2d[basename]) == 0:
            print(pose_fname, 'missing raw videos')
            continue

        fname_3d_current = pose_fname
        fnames_2d_current = fnames_2d[basename]
        fnames_2d_current = sorted(fnames_2d_current, key=natural_keys)

        out_fnames = [
            os.path.join(outdir,
                         true_basename(fname) + '.h5')
            for fname in fnames_2d_current
        ]

        if all([os.path.exists(f) for f in out_fnames]):
            continue

        print(pose_fname)

        cam_names = [
            get_cam_name(config, fname) for fname in fnames_2d_current
        ]

        video_folder = os.path.join(session_path, pipeline_videos_raw)
        offsets_dict = load_offsets_dict(config, cam_names, video_folder)

        cgroup_subset = cgroup.subset_cameras_names(cam_names)

        bodyparts, points_2d_proj, all_scores = get_projected_points(
            config, fname_3d_current, cgroup_subset, offsets_dict)

        metadata = {
            'scorer': 'scorer',
            'bodyparts': bodyparts,
            'index': np.arange(points_2d_proj.shape[2])
        }

        n_cams, n_joints, n_frames, _ = points_2d_proj.shape

        pts = np.zeros((n_frames, n_joints, 3), dtype='float64')

        for cix, (cname, outname) in enumerate(zip(cam_names, out_fnames)):
            pts[:, :, :2] = points_2d_proj[cix].swapaxes(0, 1)
            pts[:, :, 2] = all_scores.T
            write_pose_2d(pts, metadata, outname)
Ejemplo n.º 6
0
def process_session(config, session_path):
    # filtered = config['filter']['enabled']
    # if filtered:
    #     pipeline_videos_labeled_2d = config['pipeline']['videos_labeled_2d_filter']
    # else:
    #     pipeline_videos_labeled_2d = config['pipeline']['videos_labeled_2d']
    pipeline_videos_labeled_3d = config['pipeline']['videos_labeled_3d']
    pipeline_videos_raw = config['pipeline']['videos_raw']
    # pipeline_angles = config['pipeline']['angles']

    if config['filter3d']['enabled']:
        pipeline_pose_3d = config['pipeline']['pose_3d_filter']
    else:
        pipeline_pose_3d = config['pipeline']['pose_3d']
    pipeline_videos_combined = config['pipeline']['videos_combined']

    video_ext = config['video_extension']

    vid_fnames_2d = glob(
        os.path.join(session_path, pipeline_videos_raw, "*." + video_ext))

    # vid_fnames_2d = glob(os.path.join(session_path,
    #                                   pipeline_videos_labeled_2d, "*.avi"))

    vid_fnames_3d = glob(
        os.path.join(session_path, pipeline_videos_labeled_3d, "*.mp4"))
    vid_fnames_3d = sorted(vid_fnames_3d, key=natural_keys)

    fnames_2d = defaultdict(list)
    for vid in vid_fnames_2d:
        vidname = get_video_name(config, vid)
        fnames_2d[vidname].append(vid)

    fnames_3d = defaultdict(list)
    for vid in vid_fnames_3d:
        vidname = true_basename(vid)
        fnames_3d[vidname].append(vid)

    cgroup = None
    calib_folder = find_calibration_folder(config, session_path)
    if calib_folder is not None:
        calib_fname = os.path.join(calib_folder,
                                   config['pipeline']['calibration_results'],
                                   'calibration.toml')
        if os.path.exists(calib_fname):
            cgroup = CameraGroup.load(calib_fname)

    # angle_fnames = glob(os.path.join(session_path,
    #                                  pipeline_angles, '*.csv'))
    # angle_fnames = sorted(angle_fnames, key=natural_keys)

    outdir = os.path.join(session_path, pipeline_videos_combined)

    if len(vid_fnames_3d) > 0:
        os.makedirs(outdir, exist_ok=True)

    for vid_fname in vid_fnames_3d:
        basename = true_basename(vid_fname)

        out_fname = os.path.join(outdir, basename + '.mp4')
        pose_fname = os.path.join(session_path, pipeline_pose_3d,
                                  basename + '.csv')

        if os.path.exists(out_fname) and \
           abs(get_nframes(out_fname) - get_nframes(vid_fname)) < 100:
            continue

        if not os.path.exists(pose_fname):
            print(out_fname, 'missing 3d data')
            continue

        if len(fnames_2d[basename]) == 0:
            print(out_fname, 'missing 2d videos')
            continue

        if len(fnames_3d[basename]) == 0:
            print(out_fname, 'missing 3d videos')
            continue

        fname_3d_current = fnames_3d[basename][0]
        fnames_2d_current = fnames_2d[basename]
        fnames_2d_current = sorted(fnames_2d_current, key=natural_keys)

        cam_names = [
            get_cam_name(config, fname) for fname in fnames_2d_current
        ]

        print(out_fname)

        video_folder = os.path.join(session_path, pipeline_videos_raw)
        offsets_dict = load_offsets_dict(config, cam_names, video_folder)

        cgroup_subset = cgroup.subset_cameras_names(cam_names)

        visualize_combined(config, pose_fname, cgroup_subset, offsets_dict,
                           fnames_2d_current, fname_3d_current, out_fname)
Ejemplo n.º 7
0
def process_session(config, session_path):
    pipeline_calibration_videos = config['pipeline']['calibration_videos']
    pipeline_calibration_results = config['pipeline']['calibration_results']
    video_ext = config['video_extension']

    print(session_path)

    calibration_path = find_calibration_folder(config, session_path)

    if calibration_path is None:
        return

    videos = glob(
        os.path.join(calibration_path, pipeline_calibration_videos,
                     '*.' + video_ext))
    videos = sorted(videos)

    cam_videos = defaultdict(list)
    cam_names = set()
    for vid in videos:
        name = get_cam_name(config, vid)
        cam_videos[name].append(vid)
        cam_names.add(name)

    cam_names = sorted(cam_names)

    video_list = [sorted(cam_videos[cname]) for cname in cam_names]

    outname_base = 'calibration.toml'
    outdir = os.path.join(calibration_path, pipeline_calibration_results)
    outname = os.path.join(outdir, outname_base)

    print(outname)
    skip_calib = False
    init_stuff = True
    error = None

    if os.path.exists(outname):
        cgroup = CameraGroup.load(outname)
        if (not config['calibration']['animal_calibration']) or \
           ('adjusted' in cgroup.metadata and cgroup.metadata['adjusted']):
            return
        else:
            skip_calib = True
            if 'error' in cgroup.metadata:
                error = cgroup.metadata['error']
            else:
                error = None
        init_stuff = False
    elif config['calibration']['calibration_init'] is not None:
        calib_path = os.path.join(config['path'],
                                  config['calibration']['calibration_init'])
        print('loading calibration from: {}'.format(calib_path))
        cgroup = CameraGroup.load(calib_path)
        init_stuff = False
        skip_calib = len(videos) == 0
    else:
        if len(videos) == 0:
            print('no videos or calibration file found, continuing...')
            return
        cgroup = CameraGroup.from_names(cam_names,
                                        config['calibration']['fisheye'])

    board = get_calibration_board(config)

    if not skip_calib:
        rows_fname = os.path.join(outdir, 'detections.pickle')
        if os.path.exists(rows_fname):
            with open(rows_fname, 'rb') as f:
                all_rows = pickle.load(f)
        else:
            all_rows = cgroup.get_rows_videos(video_list, board)
            with open(rows_fname, 'wb') as f:
                pickle.dump(all_rows, f)

        cgroup.set_camera_sizes_videos(video_list)

        cgroup.calibrate_rows(all_rows,
                              board,
                              init_extrinsics=init_stuff,
                              init_intrinsics=init_stuff,
                              max_nfev=100,
                              n_iters=2,
                              n_samp_iter=100,
                              n_samp_full=300,
                              verbose=True)
        error = cgroup.calibrate_rows(all_rows,
                                      board,
                                      init_intrinsics=False,
                                      init_extrinsics=False,
                                      max_nfev=100,
                                      n_iters=10,
                                      n_samp_iter=100,
                                      n_samp_full=1000,
                                      verbose=True)

    cgroup.metadata['adjusted'] = False
    if error is not None:
        cgroup.metadata['error'] = float(error)
    cgroup.dump(outname)

    if config['calibration']['animal_calibration']:
        all_points, all_scores, all_cam_names = load_2d_data(
            config, calibration_path)
        imgp = process_points_for_calibration(all_points, all_scores)
        # error = cgroup.bundle_adjust(imgp, threshold=10, ftol=1e-4, loss='huber')
        cgroup = cgroup.subset_cameras_names(all_cam_names)
        error = cgroup.bundle_adjust_iter(imgp,
                                          ftol=1e-4,
                                          n_iters=10,
                                          n_samp_iter=300,
                                          n_samp_full=1000,
                                          max_nfev=500,
                                          verbose=True)
        cgroup.metadata['adjusted'] = True
        cgroup.metadata['error'] = float(error)

    cgroup.dump(outname)
Ejemplo n.º 8
0
def get_errors_group(config, group, scorer=None):
    if config['filter3d']['enabled']:
        pipeline_pose_3d = config['pipeline']['pose_3d_filter']
    else:
        pipeline_pose_3d = config['pipeline']['pose_3d']

    metadatas = dict()
    fnames_dict = dict()
    cam_names = []

    for cname, folder in group:
        metadata_fname = os.path.join('labeled-data', folder,
                                      'anipose_metadata.csv')
        if scorer is None:
            labels_fname = sorted(
                glob(os.path.join('labeled-data', folder,
                                  'CollectedData*.h5')))[0]
        else:
            labels_fname = os.path.join('labeled-data', folder,
                                        'CollectedData_{}.h5'.format(scorer))

        metadatas[cname] = pd.read_csv(metadata_fname)
        fnames_dict[cname] = labels_fname
        cam_names.append(cname)

    cam_names = sorted(cam_names)

    ## TODO: will have to modify this for custom offset per session
    offsets_dict = load_offsets_dict(config, cam_names)
    print(offsets_dict)

    out = load_pose2d_fnames(fnames_dict, offsets_dict, cam_names)

    points_labeled = out['points']
    bodyparts = out['bodyparts']

    metadata = metadatas[cam_names[0]]

    n_frames = len(metadata)
    n_joints = len(bodyparts)

    calib_fnames = np.array(metadata['calib'])
    points_3d_pred = np.full((n_frames, n_joints, 3), np.nan, 'float')
    points_3d_labeled = np.full((n_frames, n_joints, 3), np.nan, 'float')
    reproj_err_pred = np.full((n_frames, n_joints), np.nan, 'float')
    reproj_err_labeled = np.full((n_frames, n_joints), np.nan, 'float')

    # get predicted 3D points
    paths_3d = []
    curr_path = None
    curr_pose = None
    curr_fnum = None
    for i in range(n_frames):
        row = metadata.iloc[i]
        fname = row['video']
        fnum = row['framenum']
        prefix = os.path.dirname(os.path.dirname(fname))
        vidname = get_video_name(config, fname)
        pose_path = os.path.join(prefix, pipeline_pose_3d, vidname + '.csv')
        paths_3d.append(pose_path)
        if curr_path != pose_path:
            curr_pose = pd.read_csv(pose_path)
            curr_fnum = np.array(curr_pose['fnum'])
            curr_path = pose_path
        try:
            ix = np.where(curr_fnum == fnum)[0][0]
        except IndexError:
            print("W: frame {} not found in 3D data for video {}".format(
                fnum, fname))
            continue
        row = curr_pose.iloc[ix]
        M, center = get_transform(row)
        pts = np.array([(row[bp + '_x'], row[bp + '_y'], row[bp + '_z'])
                        for bp in bodyparts])
        pts_t = (pts + center).dot(np.linalg.inv(M.T))
        points_3d_pred[i] = pts_t
        reproj_err_pred[i] = [row[bp + '_error'] for bp in bodyparts]

    # triangulate 3D points from labeled points
    # get reprojection errors as well
    curr_cgroup = None
    curr_calib_fname = None
    for i in range(n_frames):
        calib_fname = calib_fnames[i]
        if curr_calib_fname != calib_fname:
            print(calib_fname)
            curr_cgroup = CameraGroup.load(calib_fname)
            curr_cgroup = curr_cgroup.subset_cameras_names(cam_names)
            print(curr_cgroup.get_names())
            curr_calib_fname = calib_fname
        pts = points_labeled[:, i]
        p3d = curr_cgroup.triangulate(pts)
        points_3d_labeled[i] = p3d
        reproj_err_labeled[i] = curr_cgroup.reprojection_error(p3d,
                                                               pts,
                                                               mean=True)

    # get L2 and reprojection errors
    errors = np.linalg.norm(points_3d_labeled - points_3d_pred, axis=2)

    # get angles
    vecs_pred = dict()
    vecs_lab = dict()
    for bp_ix, bp in enumerate(bodyparts):
        vecs_lab[bp] = points_3d_labeled[:, bp_ix]
        vecs_pred[bp] = points_3d_pred[:, bp_ix]
    angles = config.get('angles', dict())
    # angle_names = sorted(angles.keys())
    angles_pred = get_angles(vecs_pred, angles)
    angles_lab = get_angles(vecs_lab, angles)
    angle_names = sorted(angles_pred.keys())

    # save into dataframe
    out = pd.DataFrame()
    out['pose_path'] = paths_3d
    out['framenum'] = metadata['framenum']
    out['calib'] = metadata['calib']
    out['img'] = metadata['img']
    out['video'] = metadata['video']
    for ang_name in angle_names:
        out[ang_name + '_lab'] = angles_lab[ang_name]
        out[ang_name + '_pred'] = angles_pred[ang_name]
        out[ang_name + '_error'] = angles_pred[ang_name] - angles_lab[ang_name]
    for bp_ix, bp in enumerate(bodyparts):
        out[bp + '_x_lab'] = points_3d_labeled[:, bp_ix, 0]
        out[bp + '_y_lab'] = points_3d_labeled[:, bp_ix, 1]
        out[bp + '_z_lab'] = points_3d_labeled[:, bp_ix, 2]
        out[bp + '_reprojerr_lab'] = reproj_err_labeled[:, bp_ix]
        out[bp + '_x_pred'] = points_3d_pred[:, bp_ix, 0]
        out[bp + '_y_pred'] = points_3d_pred[:, bp_ix, 1]
        out[bp + '_z_pred'] = points_3d_pred[:, bp_ix, 2]
        out[bp + '_reprojerr_pred'] = reproj_err_pred[:, bp_ix]
        out[bp + '_error'] = errors[:, bp_ix]

    return out
Ejemplo n.º 9
0
def load_2d_data(config):
    pose_fnames = process_all(config, get_pose2d_fnames)
    cam_videos = defaultdict(list)

    for key, (session_path, fnames) in pose_fnames.items():
        for fname in fnames:
            # print(fname)
            vidname = get_video_name(config, fname)
            k = (key, session_path, vidname)
            cam_videos[k].append(fname)

    vid_names = sorted(cam_videos.keys())

    all_points = []
    all_scores = []
    all_proj = []
    all_fnames = []
    calib_fnames = []

    for name in tqdm(vid_names, desc='load points', ncols=80):
        # for name in vid_names:
        (key, session_path, vidname) = name
        fnames = sorted(cam_videos[name])
        cam_names = [get_cam_name(config, f) for f in fnames]
        fname_dict = dict(zip(cam_names, fnames))

        cgroup = None
        calib_folder = find_calibration_folder(config, session_path)
        if calib_folder is not None:
            calib_fname = os.path.join(
                calib_folder, config['pipeline']['calibration_results'],
                'calibration.toml')
            if os.path.exists(calib_fname):
                cgroup = CameraGroup.load(calib_fname)

        pose_fname = os.path.join(session_path, config['pipeline']['pose_3d'],
                                  vidname + '.csv')

        if cgroup is None or not os.path.exists(pose_fname):
            continue

        calib_fnames.append(calib_fname)

        video_folder = os.path.join(session_path,
                                    config['pipeline']['videos_raw'])
        offsets_dict = load_offsets_dict(config, cam_names, video_folder)
        out = load_pose2d_fnames(fname_dict)
        points_raw = out['points']
        scores = out['scores']
        bodyparts = out['bodyparts']

        vid_fnames = [
            os.path.join(session_path, config['pipeline']['videos_raw'],
                         true_basename(f) + '.' + config['video_extension'])
            for f in fnames
        ]

        points_proj = get_projected_points(bodyparts, pose_fname, cgroup,
                                           offsets_dict)

        all_points.append(points_raw)
        all_scores.append(scores)
        all_proj.append(points_proj)
        all_fnames.append(vid_fnames)

    out = {
        'points': all_points,
        'scores': all_scores,
        'proj': all_proj,
        'fnames': all_fnames,
        'cam_names': cam_names,
        'calib_fnames': calib_fnames,
        'bodyparts': bodyparts
    }
    return out
Ejemplo n.º 10
0
def triangulate(config,
                calib_folder, video_folder, pose_folder,
                fname_dict, output_fname):

    cam_names = sorted(fname_dict.keys())

    calib_fname = os.path.join(calib_folder, 'calibration.toml')
    cgroup = CameraGroup.load(calib_fname)

    offsets_dict = load_offsets_dict(config, cam_names, video_folder)

    out = load_pose2d_fnames(fname_dict, offsets_dict, cam_names)
    all_points_raw = out['points']
    all_scores = out['scores']
    bodyparts = out['bodyparts']

    cgroup = cgroup.subset_cameras_names(cam_names)

    n_cams, n_frames, n_joints, _ = all_points_raw.shape

    bad = all_scores < config['triangulation']['score_threshold']
    all_points_raw[bad] = np.nan

    if config['triangulation']['optim']:
        constraints = load_constraints(config, bodyparts)
        constraints_weak = load_constraints(config, bodyparts, 'constraints_weak')

        points_2d = all_points_raw
        scores_2d = all_scores

        points_shaped = points_2d.reshape(n_cams, n_frames*n_joints, 2)
        if config['triangulation']['ransac']:
            points_3d_init, _, _, _ = cgroup.triangulate_ransac(points_shaped, progress=True)
        else:
            points_3d_init = cgroup.triangulate(points_shaped, progress=True)
        points_3d_init = points_3d_init.reshape((n_frames, n_joints, 3))

        c = np.isfinite(points_3d_init[:, :, 0])
        if np.sum(c) < 20:
            print("warning: not enough 3D points to run optimization")
            points_3d = points_3d_init
        else:
            points_3d = cgroup.optim_points(
                points_2d, points_3d_init,
                constraints=constraints,
                constraints_weak=constraints_weak,
                # scores=scores_2d,
                scale_smooth=config['triangulation']['scale_smooth'],
                scale_length=config['triangulation']['scale_length'],
                scale_length_weak=config['triangulation']['scale_length_weak'],
                n_deriv_smooth=config['triangulation']['n_deriv_smooth'],
                reproj_error_threshold=config['triangulation']['reproj_error_threshold'],
                verbose=True)

        points_2d_flat = points_2d.reshape(n_cams, -1, 2)
        points_3d_flat = points_3d.reshape(-1, 3)

        errors = cgroup.reprojection_error(
            points_3d_flat, points_2d_flat, mean=True)
        good_points = ~np.isnan(all_points_raw[:, :, :, 0])
        num_cams = np.sum(good_points, axis=0).astype('float')

        all_points_3d = points_3d
        all_errors = errors.reshape(n_frames, n_joints)

        all_scores[~good_points] = 2
        scores_3d = np.min(all_scores, axis=0)

        scores_3d[num_cams < 1] = np.nan
        all_errors[num_cams < 1] = np.nan

    else:
        points_2d = all_points_raw.reshape(n_cams, n_frames*n_joints, 2)
        if config['triangulation']['ransac']:
            points_3d, picked, p2ds, errors = cgroup.triangulate_ransac(
                points_2d, min_cams=3, progress=True)

            all_points_picked = p2ds.reshape(n_cams, n_frames, n_joints, 2)
            good_points = ~np.isnan(all_points_picked[:, :, :, 0])

            num_cams = np.sum(np.sum(picked, axis=0), axis=1)\
                         .reshape(n_frames, n_joints)\
                         .astype('float')
        else:
            points_3d = cgroup.triangulate(points_2d, progress=True)
            errors = cgroup.reprojection_error(points_3d, points_2d, mean=True)
            good_points = ~np.isnan(all_points_raw[:, :, :, 0])
            num_cams = np.sum(good_points, axis=0).astype('float')

        all_points_3d = points_3d.reshape(n_frames, n_joints, 3)
        all_errors = errors.reshape(n_frames, n_joints)

        all_scores[~good_points] = 2
        scores_3d = np.min(all_scores, axis=0)

        scores_3d[num_cams < 2] = np.nan
        all_errors[num_cams < 2] = np.nan
        num_cams[num_cams < 2] = np.nan

    if 'reference_point' in config['triangulation'] and 'axes' in config['triangulation']:
        all_points_3d_adj, M, center = correct_coordinate_frame(config, all_points_3d, bodyparts)
    else:
        all_points_3d_adj = all_points_3d
        M = np.identity(3)
        center = np.zeros(3)

    dout = pd.DataFrame()
    for bp_num, bp in enumerate(bodyparts):
        for ax_num, axis in enumerate(['x','y','z']):
            dout[bp + '_' + axis] = all_points_3d_adj[:, bp_num, ax_num]
        dout[bp + '_error'] = all_errors[:, bp_num]
        dout[bp + '_ncams'] = num_cams[:, bp_num]
        dout[bp + '_score'] = scores_3d[:, bp_num]

    for i in range(3):
        for j in range(3):
            dout['M_{}{}'.format(i, j)] = M[i, j]

    for i in range(3):
        dout['center_{}'.format(i)] = center[i]

    dout['fnum'] = np.arange(n_frames)

    dout.to_csv(output_fname, index=False)