コード例 #1
0
ファイル: test.py プロジェクト: i2r-nglx/pycv
def compute_and_save_K_Rt():
    H = compute_homography()
    K = camera.my_calibration((2048, 1152))
    cam1, box = compute_camera_matrix_1(K, H)
    cam2 = compute_camera_matrix_2(cam1, K, box, H)
    with open('../../data/ar_camera_mag.pkl', 'wb') as f:
        pickle.dump(K, f)
        pickle.dump(dot(linalg.inv(K), cam2.P), f)
コード例 #2
0
ファイル: test.py プロジェクト: ak352/pycv
def compute_and_save_K_Rt():
    H = compute_homography()
    K = camera.my_calibration((2048, 1152))
    cam1, box = compute_camera_matrix_1(K, H)
    cam2 = compute_camera_matrix_2(cam1, K, box, H)
    with open('../../data/ar_camera_mag.pkl','wb') as f:
        pickle.dump(K,f)
        pickle.dump(dot(linalg.inv(K),cam2.P),f)
コード例 #3
0
ファイル: test_cube.py プロジェクト: ak352/pycv
def get_krt(im1, im2):
    ims = [im1, im2]
    sifts = []
    for x in range(2):
        sifts.append(ims[x][:-3]+"sift")

    # compute features                                                        
    #sift.process_image('../../data/book_frontal.JPG','../../data/im0.sift')
    sift.process_image(ims[0],sifts[0])

    l0,d0 = sift.read_features_from_file(sifts[0])
    #sift.process_image('../../data/book_perspective.JPG','../../data/im1.sift')
    sift.process_image(ims[1],sifts[1])
    l1,d1 = sift.read_features_from_file(sifts[1])
    # match features and estimate homography                                        
    matches = sift.match_twosided(d0,d1)
    ndx = matches.nonzero()[0]
    fp = homography.make_homog(l0[ndx,:2].T)
    ndx2 = [int(matches[i]) for i in ndx]
    print len(ndx2)
    tp = homography.make_homog(l1[ndx2,:2].T)
    model = homography.RansacModel()
    H,ransac_data = homography.H_from_ransac(fp,tp,model)


    # camera calibration
    #K = camera.my_calibration((747,1000))
    K = camera.my_calibration((Image.open(im2).size))
    # 3D points at plane z=0 with sides of length 0.2
    box = cube.cube_points([0,0,0.1],0.1)
    # project bottom square in first image
    cam1 = camera.Camera( hstack((K,dot(K,array([[0],[0],[-1]])) )) )
    # first points are the bottom square
    box_cam1 = cam1.project(homography.make_homog(box[:,:5]))
    # use H to transfer points to the second image
    print dot(H,box_cam1)
    box_trans = homography.normalize(dot(H,box_cam1))
    # compute second camera matrix from cam1 and H
    cam2 = camera.Camera(dot(H,cam1.P))
    A = dot(linalg.inv(K),cam2.P[:,:3])
    A = array([A[:,0],A[:,1],cross(A[:,0],A[:,1])]).T
    cam2.P[:,:3] = dot(K,A)
    # project with the second camera
    box_cam2 = cam2.project(homography.make_homog(box))
    # test: projecting point on z=0 should give the same
    point = array([1,1,0,1]).T
    print homography.normalize(dot(dot(H,cam1.P),point))
    print cam2.project(point)

    import pickle
    with open('%s.pkl' % ims[1][:-4],'w') as f:
        pickle.dump(K,f)
        pickle.dump(dot(linalg.inv(K),cam2.P),f)
    sys.stderr.write("K and Rt dumped to %s.pkl\n" % ims[1][:-4])
コード例 #4
0
def get_krt(im1, im2):
    ims = [im1, im2]
    sifts = []
    for x in range(2):
        sifts.append(ims[x][:-3] + "sift")

    # compute features
    #sift.process_image('../../data/book_frontal.JPG','../../data/im0.sift')
    sift.process_image(ims[0], sifts[0])

    l0, d0 = sift.read_features_from_file(sifts[0])
    #sift.process_image('../../data/book_perspective.JPG','../../data/im1.sift')
    sift.process_image(ims[1], sifts[1])
    l1, d1 = sift.read_features_from_file(sifts[1])
    # match features and estimate homography
    matches = sift.match_twosided(d0, d1)
    ndx = matches.nonzero()[0]
    fp = homography.make_homog(l0[ndx, :2].T)
    ndx2 = [int(matches[i]) for i in ndx]
    print len(ndx2)
    tp = homography.make_homog(l1[ndx2, :2].T)
    model = homography.RansacModel()
    H, ransac_data = homography.H_from_ransac(fp, tp, model)

    # camera calibration
    #K = camera.my_calibration((747,1000))
    K = camera.my_calibration((Image.open(im2).size))
    # 3D points at plane z=0 with sides of length 0.2
    box = cube.cube_points([0, 0, 0.1], 0.1)
    # project bottom square in first image
    cam1 = camera.Camera(hstack((K, dot(K, array([[0], [0], [-1]])))))
    # first points are the bottom square
    box_cam1 = cam1.project(homography.make_homog(box[:, :5]))
    # use H to transfer points to the second image
    print dot(H, box_cam1)
    box_trans = homography.normalize(dot(H, box_cam1))
    # compute second camera matrix from cam1 and H
    cam2 = camera.Camera(dot(H, cam1.P))
    A = dot(linalg.inv(K), cam2.P[:, :3])
    A = array([A[:, 0], A[:, 1], cross(A[:, 0], A[:, 1])]).T
    cam2.P[:, :3] = dot(K, A)
    # project with the second camera
    box_cam2 = cam2.project(homography.make_homog(box))
    # test: projecting point on z=0 should give the same
    point = array([1, 1, 0, 1]).T
    print homography.normalize(dot(dot(H, cam1.P), point))
    print cam2.project(point)

    import pickle
    with open('%s.pkl' % ims[1][:-4], 'w') as f:
        pickle.dump(K, f)
        pickle.dump(dot(linalg.inv(K), cam2.P), f)
    sys.stderr.write("K and Rt dumped to %s.pkl\n" % ims[1][:-4])
コード例 #5
0
  pickle.dump(matches, open('out_ch05_recover_match.pickle', 'wb'))
matches = pickle.load(open('out_ch05_recover_match.pickle', 'rb'))

tic.k('matched')

ndx = matches.nonzero()[0]
x1 = homography.make_homog(l[0][ndx, :2].T)
ndx2 = [int(matches[i]) for i in ndx]
x2 = homography.make_homog(l[1][ndx2, :2].T)

print '{} matches'.format(len(ndx))

image = [numpy.array(Image.open(name)) for name in imname]

# calibration (FIXME?)
K = camera.my_calibration(image[0].shape[:2])

# Normalize with inv(K) (allows metric reconstruction).
x1n = numpy.dot(numpy.linalg.inv(K), x1)
x2n = numpy.dot(numpy.linalg.inv(K), x2)

tic.k('normalized')

# Estimate E.
model = sfm.RansacModel()
# Note that x2n is passed as first parameter, since F_from_ransac() and friends
# compute the F matrix mapping from the 2nd parameter to the first, and the
# code below gives camera 1 the identity transform.
E, inliers = sfm.F_from_ransac(x2n, x1n, model)

tic.k('ransacd')
コード例 #6
0
#sift.plot_matches(im0, im1, l0, l1, matches, show_below=False)
#show()

ndx = matches.nonzero()[0]
fp = homography.make_homog(l0[ndx, :2].T)
ndx2 = [int(matches[i]) for i in ndx]
tp = homography.make_homog(l1[ndx2, :2].T)

#model = homography.RansacModel()
#H = homography.H_from_ransac(fp, tp, model)
# Not enough matches for H_from_ransac(), but all matches happen to be correct
# anyways, so no need for that.
H = homography.H_from_points(fp, tp)


K = camera.my_calibration(im0.shape[:2])

# How big this appears depends on the z translation in cam1 in the cam1 line.
box = cube_points([0, 0, 0.1], 0.1)

# project bottom square in first image
cam1 = camera.Camera(numpy.hstack((K, numpy.dot(K, array([[0], [0], [-1]])))))
box_cam1 = cam1.project(homography.make_homog(box[:, :5]))

# transfer points to second image
box_trans = homography.normalize(numpy.dot(H, box_cam1))

# compute second camera matrix
cam2 = camera.Camera(numpy.dot(H, cam1.P))

# H * P transforms points in z = 0 correctly, so its first two colums are
コード例 #7
0
#gray()
#sift.plot_matches(im0, im1, l0, l1, matches, show_below=False)
#show()

ndx = matches.nonzero()[0]
fp = homography.make_homog(l0[ndx, :2].T)
ndx2 = [int(matches[i]) for i in ndx]
tp = homography.make_homog(l1[ndx2, :2].T)

#model = homography.RansacModel()
#H = homography.H_from_ransac(fp, tp, model)
# Not enough matches for H_from_ransac(), but all matches happen to be correct
# anyways, so no need for that.
H = homography.H_from_points(fp, tp)

K = camera.my_calibration(im0.shape[:2])

# How big this appears depends on the z translation in cam1 in the cam1 line.
box = cube_points([0, 0, 0.1], 0.1)

# project bottom square in first image
cam1 = camera.Camera(numpy.hstack((K, numpy.dot(K, array([[0], [0], [-1]])))))
box_cam1 = cam1.project(homography.make_homog(box[:, :5]))

# transfer points to second image
box_trans = homography.normalize(numpy.dot(H, box_cam1))

# compute second camera matrix
cam2 = camera.Camera(numpy.dot(H, cam1.P))

# H * P transforms points in z = 0 correctly, so its first two colums are
コード例 #8
0
ファイル: ch05_recover_3d.py プロジェクト: EJHortala/books-2
    pickle.dump(matches, open('out_ch05_recover_match.pickle', 'wb'))
matches = pickle.load(open('out_ch05_recover_match.pickle', 'rb'))

tic.k('matched')

ndx = matches.nonzero()[0]
x1 = homography.make_homog(l[0][ndx, :2].T)
ndx2 = [int(matches[i]) for i in ndx]
x2 = homography.make_homog(l[1][ndx2, :2].T)

print '{} matches'.format(len(ndx))

image = [numpy.array(Image.open(name)) for name in imname]

# calibration (FIXME?)
K = camera.my_calibration(image[0].shape[:2])

# Normalize with inv(K) (allows metric reconstruction).
x1n = numpy.dot(numpy.linalg.inv(K), x1)
x2n = numpy.dot(numpy.linalg.inv(K), x2)

tic.k('normalized')

# Estimate E.
model = sfm.RansacModel()
# Note that x2n is passed as first parameter, since F_from_ransac() and friends
# compute the F matrix mapping from the 2nd parameter to the first, and the
# code below gives camera 1 the identity transform.
E, inliers = sfm.F_from_ransac(x2n, x1n, model)

tic.k('ransacd')