コード例 #1
0
def main():
    parser = argparse.ArgumentParser(description="Calibrate using keypoint matches");
    parser.add_argument('pattern_key_fname', type=str, help='reference pattern keypoint file')
    parser.add_argument('num_frames', type=int, help='Number of scene frames')
    parser.add_argument('scene_key_fmt', type=str, help='Printf-formatted string representing keypoint files from N scene frames')
    parser.add_argument('match_fmt', type=str, help='Printf-formatted string representing keypoint match files')
    parser.add_argument('example_image', type=str, help='Example image to get dimensions from')
    parser.add_argument('out_fname', type=str, help='filename for intrinsic calibrated camera')
    parser.add_argument('--min_matches', default=20, type=str, help='omit frames with fewer than N matches')

    args = parser.parse_args()

    pattern_keys = feat.read_features(args.pattern_key_fname)[0];

    fnames = [(args.scene_key_fmt % x, args.match_fmt % x) for x in range(1,args.num_frames+1)]
    fnames = [x for x in fnames if os.path.isfile(x[0])]

    if len(fnames) == 0:
        print "No matching keypoint files"
        exit(1)

    missing = next((x[1] for x in fnames if not os.path.isfile(x[1])), None)
    if missing is not None:
        print "File not found: %s" % missing
        exit(1)

    [frame_fnames, match_fnames] = zip(*fnames)

    print "reading keypoint from %d frames" % len(frame_fnames)
    frames_keys = [feat.read_features(x)[0] for x in frame_fnames]
    print "reading matches from %d frames" % len(match_fnames)
    frames_matches = [feat.read_matches(x) for x in match_fnames]

    img = cv2.imread(args.example_image)
    img_size = (img.shape[1], img.shape[0]);


    [obj_pts, image_pts] = to_calibration_data(frames_matches, pattern_keys, frames_keys, args.min_matches)

    cam = calibrate_intrinsic(obj_pts, image_pts, img_size)

    calib.write_intrinsic_camera(args.out_fname, cam);
コード例 #2
0
def main():
    parser = argparse.ArgumentParser(description="Match keypoints and calibrate camera");
    parser.add_argument('pattern_key_fname', type=str, help='reference pattern keypoint file')
    parser.add_argument('num_frames', type=int, help='Number of scene frames')
    parser.add_argument('scene_key_fmt', type=str, help='Printf-formatted string representing keypoint files from N scene frames')
    parser.add_argument('example_image', type=str, help='Example image (used to get dimensions)')
    parser.add_argument('out_matches_fmt', type=str, help='printf string for output matches filenames')
    parser.add_argument('out_camera_fname', type=str, help='output camera intrinsic calibration filename')
    parser.add_argument('--num_iterations', nargs='?', type=int, default=0.6, help='number of iterations of match & homography re-estimation (2-3 is usually good)')
    parser.add_argument('--lowe_threshold', nargs='?', type=float, default=0.6, help='Use David Lowe\'s ratio criterion for pruning bad matches')
    parser.add_argument('--homography_threshold', nargs='?', type=float, default=0, help='fit a homography to matches and prune by this reprojection error threshold (zero ignores)')
    parser.add_argument('--H_det_threshold', nargs='?', type=float, default=0, help='ignore frame if fitted homography has too small of a determinant.')
    parser.add_argument('--f_threshold', nargs='?', type=float, default=2, help='fit a fundamental matrix to matches and prune by this error threshold (zero ignores)')
    parser.add_argument('--min_matches', nargs='?', type=int, default=25, help='Minimum number of matches in a frame to be accepted into calibration set')

    args = parser.parse_args()

    [pattern_keys, pattern_descs] = feat.read_features(args.pattern_key_fname);

    if args.num_frames == 1:
        frame_fnames = [args.scene_key_fmt]
        out_match_fnames = [args.out_matches_fmt]
    else:      
        I = range(1, args.num_frames+1)
        frame_fnames = [args.scene_key_fmt % x for x in I]
        out_match_fnames = [args.out_matches_fmt % x for x in I]

    tmp = [(a,b) for (a,b) in zip(frame_fnames, out_match_fnames) if os.path.isfile(a)]
    [frame_fnames, out_match_fnames] = zip(*tmp)

    if len(frame_fnames) == 0:
        print "No files found matching %s" % args.scene_key_fmt
        exit(1)

    print "reading keypoint from %d frames" % len(frame_fnames)
    frames_features = [feat.read_features(x) for x in frame_fnames]
    [frames_keys, frames_descs] = zip(*frames_features)

    img = cv2.imread(args.example_image)
    img_size = (img.shape[1], img.shape[0]);

    distortion = np.zeros((1,5))

    pattern_kp_mat = np.array([x.pt for x in pattern_keys], np.float32)
    frame_kp_mats = [np.array([x.pt for x in keys], np.float32) for keys in frames_keys]
    fixed_kp_mats = [x.copy() for x in frame_kp_mats]

    frames_matches = [feat.match_features(pattern_descs, descs, args.lowe_threshold) for descs in frames_descs]

    for i in range(0, args.num_iterations):
        print "Iteration %d" % i
        homo_matches = []
        homo_keys = []
        for [kp_mat, fixed_kp_mat, matches] in zip(frame_kp_mats, fixed_kp_mats, frames_matches):
            if len(matches) < args.min_matches:
                homo_matches.append([])
                homo_keys.append([])
                continue
            filtered_matches = matches;
            if args.homography_threshold > 0:
                [filtered_matches, H] = feat.filter_matches_by_homography(
                        pattern_kp_mat,
                        fixed_kp_mat,
                        filtered_matches,
                        args.homography_threshold)
                det_H = np.linalg.det(H)
                if args.H_det_threshold > 0 and det_H < args.H_det_threshold:
                    filtered_matches = []
                print "%d matches after homography filter" % len(filtered_matches)
            if args.f_threshold > 0 and len(filtered_matches) > 4:
                filtered_matches = feat.filter_matches_by_fundamental_matrix(
                        pattern_kp_mat,
                        fixed_kp_mat,
                        filtered_matches,
                        args.f_threshold)
                print "%d matches after fundamental matrix filter" % len(filtered_matches)

            homo_matches.append(filtered_matches)
            homo_keys.append(kp_mat)

        if len(homo_matches) == 0:
            print "Error: No frames with sufficient matches found"
            exit(1)

        # gather calibration data
        tmp = [(x,y) for (x,y) in zip(homo_matches, homo_keys) if len(x) >= args.min_matches]


        print "using %d frames for calibration after pruning bad frames" % len(tmp)
        tmp = [calib.keypoint_matches_to_calibration_data(m, pattern_kp_mat, k) \
                for (m,k) in tmp]
        [obj_pts, img_pts] = zip(*tmp)

        # perform calibration
        [error, K, distortion, rvecs, tvecs] = cv2.calibrateCamera(obj_pts, img_pts, img_size)

        if i < args.num_iterations-1:
            # undistort image points, which will improve
            # homography filtering next iteration
            fixed_kp_mats = []
            for kp_mat in frame_kp_mats:
                
                tmp = np.reshape(kp_mat, (-1, 1, 2))
                if kp_mat.shape[0] == 0:
                    tmp = kp_mat
                else:
                    tmp = cv2.undistortPoints(tmp, K, distortion, P=K)

                fixed_kp_mats.append(np.reshape(tmp, (-1, 2)))
     
    out_matchset = [x if len(x) >= args.min_matches else [] for x in homo_matches]
    assert(len(out_match_fnames) == len(out_matchset))
    map(lambda (fname, m): feat.write_matches(fname, m), zip(out_match_fnames, out_matchset))

    cam = calib.Intrinsic_camera()
    cam.K = K
    cam.distortion = distortion
    cam.image_size = img_size
    calib.write_intrinsic_camera(args.out_camera_fname, cam)
コード例 #3
0
def main():
    parser = argparse.ArgumentParser(
        description="Match keypoints and calibrate camera")
    parser.add_argument('pattern_key_fname',
                        type=str,
                        help='reference pattern keypoint file')
    parser.add_argument('num_frames', type=int, help='Number of scene frames')
    parser.add_argument(
        'scene_key_fmt',
        type=str,
        help=
        'Printf-formatted string representing keypoint files from N scene frames'
    )
    parser.add_argument('example_image',
                        type=str,
                        help='Example image (used to get dimensions)')
    parser.add_argument('out_matches_fmt',
                        type=str,
                        help='printf string for output matches filenames')
    parser.add_argument('out_camera_fname',
                        type=str,
                        help='output camera intrinsic calibration filename')
    parser.add_argument(
        '--num_iterations',
        nargs='?',
        type=int,
        default=0.6,
        help=
        'number of iterations of match & homography re-estimation (2-3 is usually good)'
    )
    parser.add_argument(
        '--lowe_threshold',
        nargs='?',
        type=float,
        default=0.6,
        help='Use David Lowe\'s ratio criterion for pruning bad matches')
    parser.add_argument(
        '--homography_threshold',
        nargs='?',
        type=float,
        default=0,
        help=
        'fit a homography to matches and prune by this reprojection error threshold (zero ignores)'
    )
    parser.add_argument(
        '--H_det_threshold',
        nargs='?',
        type=float,
        default=0,
        help='ignore frame if fitted homography has too small of a determinant.'
    )
    parser.add_argument(
        '--f_threshold',
        nargs='?',
        type=float,
        default=2,
        help=
        'fit a fundamental matrix to matches and prune by this error threshold (zero ignores)'
    )
    parser.add_argument(
        '--min_matches',
        nargs='?',
        type=int,
        default=25,
        help=
        'Minimum number of matches in a frame to be accepted into calibration set'
    )

    args = parser.parse_args()

    [pattern_keys, pattern_descs] = feat.read_features(args.pattern_key_fname)

    if args.num_frames == 1:
        frame_fnames = [args.scene_key_fmt]
        out_match_fnames = [args.out_matches_fmt]
    else:
        I = range(1, args.num_frames + 1)
        frame_fnames = [args.scene_key_fmt % x for x in I]
        out_match_fnames = [args.out_matches_fmt % x for x in I]

    tmp = [(a, b) for (a, b) in zip(frame_fnames, out_match_fnames)
           if os.path.isfile(a)]
    [frame_fnames, out_match_fnames] = zip(*tmp)

    if len(frame_fnames) == 0:
        print "No files found matching %s" % args.scene_key_fmt
        exit(1)

    print "reading keypoint from %d frames" % len(frame_fnames)
    frames_features = [feat.read_features(x) for x in frame_fnames]
    [frames_keys, frames_descs] = zip(*frames_features)

    img = cv2.imread(args.example_image)
    img_size = (img.shape[1], img.shape[0])

    distortion = np.zeros((1, 5))

    pattern_kp_mat = np.array([x.pt for x in pattern_keys], np.float32)
    frame_kp_mats = [
        np.array([x.pt for x in keys], np.float32) for keys in frames_keys
    ]
    fixed_kp_mats = [x.copy() for x in frame_kp_mats]

    frames_matches = [
        feat.match_features(pattern_descs, descs, args.lowe_threshold)
        for descs in frames_descs
    ]

    for i in range(0, args.num_iterations):
        print "Iteration %d" % i
        homo_matches = []
        homo_keys = []
        for [kp_mat, fixed_kp_mat,
             matches] in zip(frame_kp_mats, fixed_kp_mats, frames_matches):
            if len(matches) < args.min_matches:
                homo_matches.append([])
                homo_keys.append([])
                continue
            filtered_matches = matches
            if args.homography_threshold > 0:
                [filtered_matches, H] = feat.filter_matches_by_homography(
                    pattern_kp_mat, fixed_kp_mat, filtered_matches,
                    args.homography_threshold)
                det_H = np.linalg.det(H)
                if args.H_det_threshold > 0 and det_H < args.H_det_threshold:
                    filtered_matches = []
                print "%d matches after homography filter" % len(
                    filtered_matches)
            if args.f_threshold > 0 and len(filtered_matches) > 4:
                filtered_matches = feat.filter_matches_by_fundamental_matrix(
                    pattern_kp_mat, fixed_kp_mat, filtered_matches,
                    args.f_threshold)
                print "%d matches after fundamental matrix filter" % len(
                    filtered_matches)

            homo_matches.append(filtered_matches)
            homo_keys.append(kp_mat)

        if len(homo_matches) == 0:
            print "Error: No frames with sufficient matches found"
            exit(1)

        # gather calibration data
        tmp = [(x, y) for (x, y) in zip(homo_matches, homo_keys)
               if len(x) >= args.min_matches]

        print "using %d frames for calibration after pruning bad frames" % len(
            tmp)
        tmp = [calib.keypoint_matches_to_calibration_data(m, pattern_kp_mat, k) \
                for (m,k) in tmp]
        [obj_pts, img_pts] = zip(*tmp)

        # perform calibration
        [error, K, distortion, rvecs,
         tvecs] = cv2.calibrateCamera(obj_pts, img_pts, img_size)

        if i < args.num_iterations - 1:
            # undistort image points, which will improve
            # homography filtering next iteration
            fixed_kp_mats = []
            for kp_mat in frame_kp_mats:

                tmp = np.reshape(kp_mat, (-1, 1, 2))
                if kp_mat.shape[0] == 0:
                    tmp = kp_mat
                else:
                    tmp = cv2.undistortPoints(tmp, K, distortion, P=K)

                fixed_kp_mats.append(np.reshape(tmp, (-1, 2)))

    out_matchset = [
        x if len(x) >= args.min_matches else [] for x in homo_matches
    ]
    assert (len(out_match_fnames) == len(out_matchset))
    map(lambda (fname, m): feat.write_matches(fname, m),
        zip(out_match_fnames, out_matchset))

    cam = calib.Intrinsic_camera()
    cam.K = K
    cam.distortion = distortion
    cam.image_size = img_size
    calib.write_intrinsic_camera(args.out_camera_fname, cam)
コード例 #4
0
def main():
    parser = argparse.ArgumentParser(
        description="Calibrate using keypoint matches")
    parser.add_argument('pattern_key_fname',
                        type=str,
                        help='reference pattern keypoint file')
    parser.add_argument('num_frames', type=int, help='Number of scene frames')
    parser.add_argument(
        'scene_key_fmt',
        type=str,
        help=
        'Printf-formatted string representing keypoint files from N scene frames'
    )
    parser.add_argument(
        'match_fmt',
        type=str,
        help='Printf-formatted string representing keypoint match files')
    parser.add_argument('example_image',
                        type=str,
                        help='Example image to get dimensions from')
    parser.add_argument('out_fname',
                        type=str,
                        help='filename for intrinsic calibrated camera')
    parser.add_argument('--min_matches',
                        default=20,
                        type=str,
                        help='omit frames with fewer than N matches')

    args = parser.parse_args()

    pattern_keys = feat.read_features(args.pattern_key_fname)[0]

    fnames = [(args.scene_key_fmt % x, args.match_fmt % x)
              for x in range(1, args.num_frames + 1)]
    fnames = [x for x in fnames if os.path.isfile(x[0])]

    if len(fnames) == 0:
        print "No matching keypoint files"
        exit(1)

    missing = next((x[1] for x in fnames if not os.path.isfile(x[1])), None)
    if missing is not None:
        print "File not found: %s" % missing
        exit(1)

    [frame_fnames, match_fnames] = zip(*fnames)

    print "reading keypoint from %d frames" % len(frame_fnames)
    frames_keys = [feat.read_features(x)[0] for x in frame_fnames]
    print "reading matches from %d frames" % len(match_fnames)
    frames_matches = [feat.read_matches(x) for x in match_fnames]

    img = cv2.imread(args.example_image)
    img_size = (img.shape[1], img.shape[0])

    [obj_pts, image_pts] = to_calibration_data(frames_matches, pattern_keys,
                                               frames_keys, args.min_matches)

    cam = calibrate_intrinsic(obj_pts, image_pts, img_size)

    calib.write_intrinsic_camera(args.out_fname, cam)