예제 #1
0
def get_all_videos_fnames(config):
    vids_fnames = process_all(config, get_videos_fnames)
    cam_videos = defaultdict(list)

    for key, (session_path, fnames) in vids_fnames.items():
        for fname in fnames:
            # print(fname)
            vidname = get_video_name(config, fname)
            k = (key, session_path, vidname)
            cam_videos[k].append(fname)

    vid_names = sorted(cam_videos.keys(), key=lambda x: natural_keys(x[2]))

    all_fnames = []
    calib_fnames = []

    for name in tqdm(vid_names, desc='get videos', ncols=80):
        (key, session_path, vidname) = name
        fnames = sorted(cam_videos[name], key=natural_keys)
        cam_names = [get_cam_name(config, f) for f in fnames]

        cgroup = None
        calib_folder = find_calibration_folder(config, session_path)
        if calib_folder is not None:
            calib_fname = os.path.join(
                calib_folder, config['pipeline']['calibration_results'],
                'calibration.toml')
            if os.path.exists(calib_fname):
                cgroup = CameraGroup.load(calib_fname)

        # pose_fname = os.path.join(session_path, config['pipeline']['pose_3d'],
        #                           vidname+'.csv')

        if cgroup is None:  # or not os.path.exists(pose_fname):
            continue

        calib_fnames.append(calib_fname)
        all_fnames.append(fnames)

    out = {
        'fnames': all_fnames,
        'calib_fnames': calib_fnames,
        'cam_names': cam_names
    }
    return out
예제 #2
0
def process_session(config, session_path):
    pipeline_calibration_videos = config['pipeline']['calibration_videos']
    pipeline_calibration_results = config['pipeline']['calibration_results']
    video_ext = config['video_extension']

    calibration_path = find_calibration_folder(config, session_path)

    if calibration_path is None:
        return

    videos = glob(
        os.path.join(calibration_path, pipeline_calibration_videos,
                     '*.' + video_ext))
    videos = sorted(videos)

    cam_videos = defaultdict(list)
    cam_names = set()
    for vid in videos:
        name = get_cam_name(config, vid)
        cam_videos[name].append(vid)
        cam_names.add(name)

    cam_names = sorted(cam_names)

    video_list = [sorted(cam_videos[cname]) for cname in cam_names]

    outname_base = 'calibration.toml'
    outdir = os.path.join(calibration_path, pipeline_calibration_results)
    outname = os.path.join(outdir, outname_base)

    print(outname)
    skip_calib = False
    init_stuff = True

    if os.path.exists(outname):
        cgroup = CameraGroup.load(outname)
        if (not config['calibration']['animal_calibration']) or \
           ('adjusted' in cgroup.metadata and cgroup.metadata['adjusted']):
            return
        else:
            skip_calib = True
            if 'error' in cgroup.metadata:
                error = cgroup.metadata['error']
            else:
                error = None
        init_stuff = False
    elif config['calibration']['calibration_init'] is not None:
        calib_path = os.path.join(config['path'],
                                  config['calibration']['calibration_init'])
        print('loading calibration from: {}'.format(calib_path))
        cgroup = CameraGroup.load(calib_path)
        init_stuff = False
        skip_calib = len(videos) == 0
    else:
        if len(videos) == 0:
            print('no videos or calibration file found, continuing...')
            return
        cgroup = CameraGroup.from_names(cam_names,
                                        config['calibration']['fisheye'])

    board = get_calibration_board(config)

    if not skip_calib:
        rows_fname = os.path.join(outdir, 'detections.pickle')
        if os.path.exists(rows_fname):
            with open(rows_fname, 'rb') as f:
                all_rows = pickle.load(f)
        else:
            all_rows = cgroup.get_rows_videos(video_list, board)
            with open(rows_fname, 'wb') as f:
                pickle.dump(all_rows, f)

        cgroup.set_camera_sizes_videos(video_list)
        error = cgroup.calibrate_rows(all_rows,
                                      board,
                                      init_intrinsics=init_stuff,
                                      init_extrinsics=init_stuff,
                                      ftol=1e-4,
                                      max_nfev=500,
                                      n_iters=10,
                                      error_threshold=1,
                                      end_mu=1,
                                      n_samp_iter=100,
                                      n_samp_full=2000)

    cgroup.metadata['adjusted'] = False
    if error is not None:
        cgroup.metadata['error'] = float(error)
    cgroup.dump(outname)

    if config['calibration']['animal_calibration']:
        all_points, all_scores = load_2d_data(config, calibration_path)
        imgp = process_points_for_calibration(all_points, all_scores)
        # error = cgroup.bundle_adjust(imgp, threshold=10, ftol=1e-4, loss='huber')
        error = cgroup.bundle_adjust_iter(imgp,
                                          ftol=1e-4,
                                          n_iters=10,
                                          n_samp_iter=300,
                                          n_samp_full=1000,
                                          max_nfev=500,
                                          verbose=True)
        cgroup.metadata['adjusted'] = True
        cgroup.metadata['error'] = float(error)

    cgroup.dump(outname)
예제 #3
0
def load_2d_data(config):
    pose_fnames = process_all(config, get_pose2d_fnames)
    cam_videos = defaultdict(list)

    for key, (session_path, fnames) in pose_fnames.items():
        for fname in fnames:
            # print(fname)
            vidname = get_video_name(config, fname)
            k = (key, session_path, vidname)
            cam_videos[k].append(fname)

    vid_names = sorted(cam_videos.keys())

    all_points = []
    all_scores = []
    all_proj = []
    all_fnames = []
    calib_fnames = []

    for name in tqdm(vid_names, desc='load points', ncols=80):
        # for name in vid_names:
        (key, session_path, vidname) = name
        fnames = sorted(cam_videos[name])
        cam_names = [get_cam_name(config, f) for f in fnames]
        fname_dict = dict(zip(cam_names, fnames))

        cgroup = None
        calib_folder = find_calibration_folder(config, session_path)
        if calib_folder is not None:
            calib_fname = os.path.join(
                calib_folder, config['pipeline']['calibration_results'],
                'calibration.toml')
            if os.path.exists(calib_fname):
                cgroup = CameraGroup.load(calib_fname)

        pose_fname = os.path.join(session_path, config['pipeline']['pose_3d'],
                                  vidname + '.csv')

        if cgroup is None or not os.path.exists(pose_fname):
            continue

        calib_fnames.append(calib_fname)

        video_folder = os.path.join(session_path,
                                    config['pipeline']['videos_raw'])
        offsets_dict = load_offsets_dict(config, cam_names, video_folder)
        out = load_pose2d_fnames(fname_dict)
        points_raw = out['points']
        scores = out['scores']
        bodyparts = out['bodyparts']

        vid_fnames = [
            os.path.join(session_path, config['pipeline']['videos_raw'],
                         true_basename(f) + '.' + config['video_extension'])
            for f in fnames
        ]

        points_proj = get_projected_points(bodyparts, pose_fname, cgroup,
                                           offsets_dict)

        all_points.append(points_raw)
        all_scores.append(scores)
        all_proj.append(points_proj)
        all_fnames.append(vid_fnames)

    out = {
        'points': all_points,
        'scores': all_scores,
        'proj': all_proj,
        'fnames': all_fnames,
        'cam_names': cam_names,
        'calib_fnames': calib_fnames,
        'bodyparts': bodyparts
    }
    return out
예제 #4
0
def triangulate(config,
                calib_folder, video_folder, pose_folder,
                fname_dict, output_fname):

    cam_names = sorted(fname_dict.keys())

    calib_fname = os.path.join(calib_folder, 'calibration.toml')
    cgroup = CameraGroup.load(calib_fname)

    offsets_dict = load_offsets_dict(config, cam_names, video_folder)

    out = load_pose2d_fnames(fname_dict, offsets_dict)
    all_points_raw = out['points']
    all_scores = out['scores']
    bodyparts = out['bodyparts']

    n_cams, n_frames, n_joints, _ = all_points_raw.shape

    # TODO: configure this threshold
    bad = all_scores < config['triangulation']['score_threshold']
    all_points_raw[bad] = np.nan

    if config['triangulation']['optim']:
        constraints = load_constraints(config, bodyparts)
        constraints_weak = load_constraints(config, bodyparts, 'constraints_weak')

        points_2d = all_points_raw
        scores_2d = all_scores

        points_3d = cgroup.triangulate_optim(
            points_2d,
            constraints=constraints,
            constraints_weak=constraints_weak,
            # scores=scores_2d,
            scale_smooth=config['triangulation']['scale_smooth'],
            scale_length=config['triangulation']['scale_length'],
            scale_length_weak=config['triangulation']['scale_length_weak'],
            n_deriv_smooth=config['triangulation']['n_deriv_smooth'],
            reproj_error_threshold=config['triangulation']['reproj_error_threshold'],
            init_progress=True, verbose=True)

        points_2d_flat = points_2d.reshape(n_cams, -1, 2)
        points_3d_flat = points_3d.reshape(-1, 3)

        errors = cgroup.reprojection_error(
            points_3d_flat, points_2d_flat, mean=True)
        good_points = ~np.isnan(all_points_raw[:, :, :, 0])
        num_cams = np.sum(good_points, axis=0).astype('float')

        all_points_3d = points_3d
        all_errors = errors.reshape(n_frames, n_joints)

        all_scores[~good_points] = 2
        scores_3d = np.min(all_scores, axis=0)

        scores_3d[num_cams < 1] = np.nan
        all_errors[num_cams < 1] = np.nan

    else:
        points_2d = all_points_raw.reshape(n_cams, n_frames*n_joints, 2)
        if config['triangulation']['ransac']:
            points_3d, picked, p2ds, errors = cgroup.triangulate_ransac(
                points_2d, min_cams=3, progress=True)

            all_points_picked = p2ds.reshape(n_cams, n_frames, n_joints, 2)
            good_points = ~np.isnan(all_points_picked[:, :, :, 0])

            num_cams = np.sum(np.sum(picked, axis=0), axis=1)\
                         .reshape(n_frames, n_joints)\
                         .astype('float')
        else:
            points_3d = cgroup.triangulate(points_2d, progress=True)
            errors = cgroup.reprojection_error(points_3d, points_2d, mean=True)
            good_points = ~np.isnan(all_points_raw[:, :, :, 0])
            num_cams = np.sum(good_points, axis=0).astype('float')

        all_points_3d = points_3d.reshape(n_frames, n_joints, 3)
        all_errors = errors.reshape(n_frames, n_joints)

        all_scores[~good_points] = 2
        scores_3d = np.min(all_scores, axis=0)

        scores_3d[num_cams < 2] = np.nan
        all_errors[num_cams < 2] = np.nan
        num_cams[num_cams < 2] = np.nan

    if 'reference_point' in config['triangulation'] and 'axes' in config['triangulation']:
        all_points_3d_adj, M, center = correct_coordinate_frame(config, all_points_3d, bodyparts)
    else:
        all_points_3d_adj = all_points_3d
        M = np.identity(3)
        center = np.zeros(3)

    dout = pd.DataFrame()
    for bp_num, bp in enumerate(bodyparts):
        for ax_num, axis in enumerate(['x','y','z']):
            dout[bp + '_' + axis] = all_points_3d_adj[:, bp_num, ax_num]
        dout[bp + '_error'] = all_errors[:, bp_num]
        dout[bp + '_ncams'] = num_cams[:, bp_num]
        dout[bp + '_score'] = scores_3d[:, bp_num]

    for i in range(3):
        for j in range(3):
            dout['M_{}{}'.format(i, j)] = M[i, j]

    for i in range(3):
        dout['center_{}'.format(i)] = center[i]

    dout['fnum'] = np.arange(n_frames)

    dout.to_csv(output_fname, index=False)
예제 #5
0
def process_session(config, session_path):
    # filtered = config['filter']['enabled']
    # if filtered:
    #     pipeline_videos_labeled_2d = config['pipeline']['videos_labeled_2d_filter']
    # else:
    #     pipeline_videos_labeled_2d = config['pipeline']['videos_labeled_2d']
    pipeline_videos_labeled_3d = config['pipeline']['videos_labeled_3d']
    pipeline_videos_raw = config['pipeline']['videos_raw']
    pipeline_angles = config['pipeline']['angles']
    pipeline_pose_3d = config['pipeline']['pose_3d']
    pipeline_videos_combined = config['pipeline']['videos_combined']

    video_ext = config['video_extension']

    vid_fnames_2d = glob(
        os.path.join(session_path, pipeline_videos_raw, "*." + video_ext))

    # vid_fnames_2d = glob(os.path.join(session_path,
    #                                   pipeline_videos_labeled_2d, "*.avi"))

    vid_fnames_3d = glob(
        os.path.join(session_path, pipeline_videos_labeled_3d, "*.avi"))
    vid_fnames_3d = sorted(vid_fnames_3d, key=natural_keys)

    fnames_2d = defaultdict(list)
    for vid in vid_fnames_2d:
        vidname = get_video_name(config, vid)
        fnames_2d[vidname].append(vid)

    fnames_3d = defaultdict(list)
    for vid in vid_fnames_3d:
        vidname = true_basename(vid)
        fnames_3d[vidname].append(vid)

    cgroup = None
    calib_folder = find_calibration_folder(config, session_path)
    if calib_folder is not None:
        calib_fname = os.path.join(calib_folder,
                                   config['pipeline']['calibration_results'],
                                   'calibration.toml')
        if os.path.exists(calib_fname):
            cgroup = CameraGroup.load(calib_fname)

    # angle_fnames = glob(os.path.join(session_path,
    #                                  pipeline_angles, '*.csv'))
    # angle_fnames = sorted(angle_fnames, key=natural_keys)

    outdir = os.path.join(session_path, pipeline_videos_combined)

    if len(vid_fnames_3d) > 0:
        os.makedirs(outdir, exist_ok=True)

    for vid_fname in vid_fnames_3d:
        basename = true_basename(vid_fname)

        out_fname = os.path.join(outdir, basename + '.avi')
        pose_fname = os.path.join(session_path, pipeline_pose_3d,
                                  basename + '.csv')

        if os.path.exists(out_fname) and \
           abs(get_nframes(out_fname) - get_nframes(vid_fname)) < 100:
            continue

        if len(fnames_2d[basename]) == 0:
            print(out_fname, 'missing 2d videos')
            continue

        if len(fnames_3d[basename]) == 0:
            print(out_fname, 'missing 3d videos')
            continue

        fname_3d_current = fnames_3d[basename][0]
        fnames_2d_current = fnames_2d[basename]
        fnames_2d_current = sorted(fnames_2d_current, key=natural_keys)

        cam_names = [
            get_cam_name(config, fname) for fname in fnames_2d_current
        ]

        print(out_fname)

        video_folder = os.path.join(session_path, pipeline_videos_raw)
        offsets_dict = load_offsets_dict(config, cam_names, video_folder)

        cgroup_subset = cgroup.subset_cameras_names(cam_names)

        visualize_combined(config, pose_fname, cgroup_subset, offsets_dict,
                           fnames_2d_current, fname_3d_current, out_fname)
예제 #6
0
def process_session(config, session_path):
    pipeline_videos_raw = config['pipeline']['videos_raw']
    pipeline_pose_3d = config['pipeline']['pose_3d']
    pipeline_pose_2d_projected = config['pipeline']['pose_2d_projected']

    video_ext = config['video_extension']

    vid_fnames_2d = glob(
        os.path.join(session_path, pipeline_videos_raw, "*." + video_ext))
    vid_fnames_2d = sorted(vid_fnames_2d, key=natural_keys)

    pose_fnames_3d = glob(os.path.join(session_path, pipeline_pose_3d,
                                       "*.csv"))

    if len(pose_fnames_3d) == 0:
        return

    fnames_2d = defaultdict(list)
    for vid in vid_fnames_2d:
        vidname = get_video_name(config, vid)
        fnames_2d[vidname].append(vid)

    fnames_3d = defaultdict(list)
    for fname in pose_fnames_3d:
        vidname = true_basename(fname)
        fnames_3d[vidname].append(fname)

    cgroup = None
    calib_folder = find_calibration_folder(config, session_path)
    if calib_folder is not None:
        calib_fname = os.path.join(calib_folder,
                                   config['pipeline']['calibration_results'],
                                   'calibration.toml')
        if os.path.exists(calib_fname):
            cgroup = CameraGroup.load(calib_fname)

    if cgroup is None:
        print(
            'session {}: no calibration found, skipping'.format(session_path))
        return

    outdir = os.path.join(session_path, pipeline_pose_2d_projected)
    os.makedirs(outdir, exist_ok=True)

    for pose_fname in pose_fnames_3d:
        basename = true_basename(pose_fname)

        if len(fnames_2d[basename]) == 0:
            print(pose_fname, 'missing raw videos')
            continue

        fname_3d_current = pose_fname
        fnames_2d_current = fnames_2d[basename]
        fnames_2d_current = sorted(fnames_2d_current, key=natural_keys)

        out_fnames = [
            os.path.join(outdir,
                         true_basename(fname) + '.h5')
            for fname in fnames_2d_current
        ]

        if all([os.path.exists(f) for f in out_fnames]):
            continue

        print(pose_fname)

        cam_names = [
            get_cam_name(config, fname) for fname in fnames_2d_current
        ]

        video_folder = os.path.join(session_path, pipeline_videos_raw)
        offsets_dict = load_offsets_dict(config, cam_names, video_folder)

        cgroup_subset = cgroup.subset_cameras_names(cam_names)

        bodyparts, points_2d_proj, all_scores = get_projected_points(
            config, fname_3d_current, cgroup_subset, offsets_dict)

        metadata = {
            'scorer': 'scorer',
            'bodyparts': bodyparts,
            'index': np.arange(points_2d_proj.shape[2])
        }

        n_cams, n_joints, n_frames, _ = points_2d_proj.shape

        pts = np.zeros((n_frames, n_joints, 3), dtype='float64')

        for cix, (cname, outname) in enumerate(zip(cam_names, out_fnames)):
            pts[:, :, :2] = points_2d_proj[cix].swapaxes(0, 1)
            pts[:, :, 2] = all_scores.T
            write_pose_2d(pts, metadata, outname)
예제 #7
0
def get_errors_group(config, group):
    pipeline_pose_3d = config['pipeline']['pose_3d']

    metadatas = dict()
    fnames_dict = dict()
    cam_names = []

    for cname, folder in group:
        metadata_fname = os.path.join('labeled-data', folder,
                                      'anipose_metadata.csv')
        labels_fname = glob(
            os.path.join('labeled-data', folder, 'CollectedData*.h5'))[0]
        metadatas[cname] = pd.read_csv(metadata_fname)
        fnames_dict[cname] = labels_fname
        cam_names.append(cname)

    cam_names = sorted(cam_names)

    ## TODO: will have to modify this for custom offset per session
    offsets_dict = load_offsets_dict(config, cam_names)

    out = load_pose2d_fnames(fnames_dict, offsets_dict)

    points_labeled = out['points']
    bodyparts = out['bodyparts']

    metadata = metadatas[cam_names[0]]

    n_frames = len(metadata)
    n_joints = len(bodyparts)

    calib_fnames = np.array(metadata['calib'])
    points_3d_pred = np.full((n_frames, n_joints, 3), np.nan, 'float')
    points_3d_labeled = np.full((n_frames, n_joints, 3), np.nan, 'float')
    reproj_err_pred = np.full((n_frames, n_joints), np.nan, 'float')
    reproj_err_labeled = np.full((n_frames, n_joints), np.nan, 'float')

    # get predicted 3D points
    paths_3d = []
    curr_path = None
    curr_pose = None
    curr_fnum = None
    for i in range(n_frames):
        row = metadata.iloc[i]
        fname = row['video']
        fnum = row['framenum']
        prefix = os.path.dirname(os.path.dirname(fname))
        vidname = get_video_name(config, fname)
        pose_path = os.path.join(prefix, pipeline_pose_3d, vidname + '.csv')
        paths_3d.append(pose_path)
        if curr_path != pose_path:
            curr_pose = pd.read_csv(pose_path)
            curr_fnum = np.array(curr_pose['fnum'])
            curr_path = pose_path
        try:
            ix = np.where(curr_fnum == fnum)[0][0]
        except IndexError:
            print("W: frame {} not found in 3D data for video {}".format(
                fnum, fname))
            continue
        row = curr_pose.iloc[ix]
        M, center = get_transform(row)
        pts = np.array([(row[bp + '_x'], row[bp + '_y'], row[bp + '_z'])
                        for bp in bodyparts])
        pts_t = (pts + center).dot(np.linalg.inv(M.T))
        points_3d_pred[i] = pts_t
        reproj_err_pred[i] = [row[bp + '_error'] for bp in bodyparts]

    # triangulate 3D points from labeled points
    # get reprojection errors as well
    curr_cgroup = None
    curr_calib_fname = None
    for i in range(n_frames):
        calib_fname = calib_fnames[i]
        if curr_calib_fname != calib_fname:
            curr_cgroup = CameraGroup.load(calib_fname)
            curr_calib_fname = calib_fname
        pts = points_labeled[:, i]
        p3d = curr_cgroup.triangulate(pts)
        points_3d_labeled[i] = p3d
        reproj_err_labeled[i] = curr_cgroup.reprojection_error(p3d,
                                                               pts,
                                                               mean=True)

    # get L2 and reprojection errors
    errors = np.linalg.norm(points_3d_labeled - points_3d_pred, axis=2)

    # get angles
    vecs_pred = dict()
    vecs_lab = dict()
    for bp_ix, bp in enumerate(bodyparts):
        vecs_lab[bp] = points_3d_labeled[:, bp_ix]
        vecs_pred[bp] = points_3d_pred[:, bp_ix]
    angles = config.get('angles', dict())
    angle_names = sorted(angles.keys())
    angles_pred = get_angles_vecs(vecs_pred, angles)
    angles_lab = get_angles_vecs(vecs_lab, angles)

    # save into dataframe
    out = pd.DataFrame()
    out['pose_path'] = paths_3d
    out['framenum'] = metadata['framenum']
    out['calib'] = metadata['calib']
    for ang_name in angle_names:
        out[ang_name + '_lab'] = angles_lab[ang_name]
        out[ang_name + '_pred'] = angles_pred[ang_name]
        out[ang_name + '_error'] = angles_pred[ang_name] - angles_lab[ang_name]
    for bp_ix, bp in enumerate(bodyparts):
        out[bp + '_x_lab'] = points_3d_labeled[:, bp_ix, 0]
        out[bp + '_y_lab'] = points_3d_labeled[:, bp_ix, 1]
        out[bp + '_z_lab'] = points_3d_labeled[:, bp_ix, 2]
        out[bp + '_reprojerr_lab'] = reproj_err_labeled[:, bp_ix]
        out[bp + '_x_pred'] = points_3d_pred[:, bp_ix, 0]
        out[bp + '_y_pred'] = points_3d_pred[:, bp_ix, 1]
        out[bp + '_z_pred'] = points_3d_pred[:, bp_ix, 2]
        out[bp + '_reprojerr_pred'] = reproj_err_pred[:, bp_ix]
        out[bp + '_error'] = errors[:, bp_ix]

    return out