def convert_points(j, matches):
     """将匹配转换成齐次坐标点的函数"""
     ndx = matches[j].nonzero()[0]
     fp = homography.make_homog(kp1[ndx, :2].T)
     ndx2 = [int(matches[j][i]) for i in ndx]
     tp = homography.make_homog(kp2[ndx2, :2].T)
     return fp, tp
Пример #2
0
def convert_points(matches, l, j):
    ndx = matches[j].nonzero()[0]
    fp = homography.make_homog(l[j + 1][ndx, :2].T)
    ndx2 = [int(matches[j][i]) for i in ndx]
    tp = homography.make_homog(l[j][ndx2, :2].T)

    return fp, tp
 def convert_points(j, matches):
     """将匹配转换成齐次坐标点的函数"""
     ndx = matches[j].nonzero()[0]
     fp = homography.make_homog(kp1[ndx, :2].T)
     ndx2 = [int(matches[j][i]) for i in ndx]
     tp = homography.make_homog(kp2[ndx2, :2].T)
     return fp, tp
Пример #4
0
def convert_points(matches,kp1,kp2):
#     ndx = matches.nonzero()[0]
    fp=np.array([kp1[m.queryIdx].pt for m in matches])
    fp = homography.make_homog(fp.T)
#     ndx2 = [int(matches[i]) for i in ndx]
    tp=np.array([kp2[m.trainIdx].pt for m in matches])
    tp = homography.make_homog(tp.T)
    return fp,tp
Пример #5
0
def convert_points(j):
	ndx = matches[j].nonzero()[0]
	fp = homography.make_homog(l[j+1][ndx,:2].T) 
	ndx2 = [int(matches[j][i]) for i in ndx]
	tp = homography.make_homog(l[j][ndx2,:2].T) 
	# switch x and y - TODO this should move elsewhere
	fp = vstack([fp[1],fp[0],fp[2]])
	tp = vstack([tp[1],tp[0],tp[2]])
	return fp,tp
def convert_points(j):
    ndx = matches[j].nonzero()[0]
    fp = homography.make_homog(l[j + 1][ndx, :2].T)
    ndx2 = [int(matches[j][i]) for i in ndx]
    tp = homography.make_homog(l[j][ndx2, :2].T)

    # switch x and y - TODO this should move elsewhere
    fp = vstack([fp[1], fp[0], fp[2]])
    tp = vstack([tp[1], tp[0], tp[2]])
    return fp, tp
Пример #7
0
def get_krt(im1, im2):
    ims = [im1, im2]
    sifts = []
    for x in range(2):
        sifts.append(ims[x][:-3]+"sift")

    # compute features                                                        
    #sift.process_image('../../data/book_frontal.JPG','../../data/im0.sift')
    sift.process_image(ims[0],sifts[0])

    l0,d0 = sift.read_features_from_file(sifts[0])
    #sift.process_image('../../data/book_perspective.JPG','../../data/im1.sift')
    sift.process_image(ims[1],sifts[1])
    l1,d1 = sift.read_features_from_file(sifts[1])
    # match features and estimate homography                                        
    matches = sift.match_twosided(d0,d1)
    ndx = matches.nonzero()[0]
    fp = homography.make_homog(l0[ndx,:2].T)
    ndx2 = [int(matches[i]) for i in ndx]
    print len(ndx2)
    tp = homography.make_homog(l1[ndx2,:2].T)
    model = homography.RansacModel()
    H,ransac_data = homography.H_from_ransac(fp,tp,model)


    # camera calibration
    #K = camera.my_calibration((747,1000))
    K = camera.my_calibration((Image.open(im2).size))
    # 3D points at plane z=0 with sides of length 0.2
    box = cube.cube_points([0,0,0.1],0.1)
    # project bottom square in first image
    cam1 = camera.Camera( hstack((K,dot(K,array([[0],[0],[-1]])) )) )
    # first points are the bottom square
    box_cam1 = cam1.project(homography.make_homog(box[:,:5]))
    # use H to transfer points to the second image
    print dot(H,box_cam1)
    box_trans = homography.normalize(dot(H,box_cam1))
    # compute second camera matrix from cam1 and H
    cam2 = camera.Camera(dot(H,cam1.P))
    A = dot(linalg.inv(K),cam2.P[:,:3])
    A = array([A[:,0],A[:,1],cross(A[:,0],A[:,1])]).T
    cam2.P[:,:3] = dot(K,A)
    # project with the second camera
    box_cam2 = cam2.project(homography.make_homog(box))
    # test: projecting point on z=0 should give the same
    point = array([1,1,0,1]).T
    print homography.normalize(dot(dot(H,cam1.P),point))
    print cam2.project(point)

    import pickle
    with open('%s.pkl' % ims[1][:-4],'w') as f:
        pickle.dump(K,f)
        pickle.dump(dot(linalg.inv(K),cam2.P),f)
    sys.stderr.write("K and Rt dumped to %s.pkl\n" % ims[1][:-4])
Пример #8
0
def get_krt(im1, im2):
    ims = [im1, im2]
    sifts = []
    for x in range(2):
        sifts.append(ims[x][:-3] + "sift")

    # compute features
    #sift.process_image('../../data/book_frontal.JPG','../../data/im0.sift')
    sift.process_image(ims[0], sifts[0])

    l0, d0 = sift.read_features_from_file(sifts[0])
    #sift.process_image('../../data/book_perspective.JPG','../../data/im1.sift')
    sift.process_image(ims[1], sifts[1])
    l1, d1 = sift.read_features_from_file(sifts[1])
    # match features and estimate homography
    matches = sift.match_twosided(d0, d1)
    ndx = matches.nonzero()[0]
    fp = homography.make_homog(l0[ndx, :2].T)
    ndx2 = [int(matches[i]) for i in ndx]
    print len(ndx2)
    tp = homography.make_homog(l1[ndx2, :2].T)
    model = homography.RansacModel()
    H, ransac_data = homography.H_from_ransac(fp, tp, model)

    # camera calibration
    #K = camera.my_calibration((747,1000))
    K = camera.my_calibration((Image.open(im2).size))
    # 3D points at plane z=0 with sides of length 0.2
    box = cube.cube_points([0, 0, 0.1], 0.1)
    # project bottom square in first image
    cam1 = camera.Camera(hstack((K, dot(K, array([[0], [0], [-1]])))))
    # first points are the bottom square
    box_cam1 = cam1.project(homography.make_homog(box[:, :5]))
    # use H to transfer points to the second image
    print dot(H, box_cam1)
    box_trans = homography.normalize(dot(H, box_cam1))
    # compute second camera matrix from cam1 and H
    cam2 = camera.Camera(dot(H, cam1.P))
    A = dot(linalg.inv(K), cam2.P[:, :3])
    A = array([A[:, 0], A[:, 1], cross(A[:, 0], A[:, 1])]).T
    cam2.P[:, :3] = dot(K, A)
    # project with the second camera
    box_cam2 = cam2.project(homography.make_homog(box))
    # test: projecting point on z=0 should give the same
    point = array([1, 1, 0, 1]).T
    print homography.normalize(dot(dot(H, cam1.P), point))
    print cam2.project(point)

    import pickle
    with open('%s.pkl' % ims[1][:-4], 'w') as f:
        pickle.dump(K, f)
        pickle.dump(dot(linalg.inv(K), cam2.P), f)
    sys.stderr.write("K and Rt dumped to %s.pkl\n" % ims[1][:-4])
Пример #9
0
def get_camera_params(sz, H):
    """
    Get camere parametes: K, Rts.
    """
    # camera calibration
    K = my_calibration((sz))
    #print(K)
    # 3D points at plane z=0 with sides of length 0.2
    box = cube_points([0, 0, 0.1], 0.1)

    # project bottom square in first image
    cam1 = Camera(np.hstack((K, np.dot(K, np.array([[0], [0], [-1]])))))

    # first points are the bottom square
    box_cam1 = cam1.project(homography.make_homog(box[:, :5]))

    # use H to transfer points to the second image
    box_trans = homography.normalize(np.dot(H, box_cam1))

    # compute second camera metrx from cam1 and H
    cam2 = Camera(np.dot(H, cam1.P))
    A = np.dot(linalg.inv(K), cam2.P[:, :3])
    A = np.array([A[:, 0], A[:, 1], np.cross(A[:, 0], A[:, 1])]).T
    cam2.P[:, :3] = np.dot(K, A)

    Rt = np.dot(linalg.inv(K), cam2.P)

    return K, Rt
Пример #10
0
    def setUp(self):
        points = array([
            [-1.1, -1.1, -1.1],
            [1.4, -1.4, -1.4],
            [-1.5, 1.5, -1],
            [1, 1.8, -1],
            [-1.2, -1.2, 1.2],
            [1.3, -1.3, 1.3],
            [-1.6, 1.6, 1],
            [1, 1.7, 1],
        ])
        points = homography.make_homog(points.T)

        P = hstack((eye(3), array([[0], [0], [0]])))
        cam = camera.Camera(P)
        self.x = cam.project(points)

        r = [0.05, 0.1, 0.15]
        rot = camera.rotation_matrix(r)
        cam.P = dot(cam.P, rot)
        cam.P[:, 3] = array([1, 0, 0])
        self.x2 = cam.project(points)
        print self.x2.shape
        print self.x2

        K, R, t = cam.factor()
        self.expectedE = dot(sfm.skew(t), R)
        self.expectedE /= self.expectedE[2, 2]
Пример #11
0
def compute_camera_matrix_1(K, H):
    # 3D points at plane z=0 with sides of length 0.2
    box = cube.cube_points([0, 0, 0.1], 0.1)
    # project bottom square in first image
    cam1 = camera.Camera(hstack((K, dot(K, array([[0], [0], [-1]])))))
    # first points are the bottom square
    box_cam1 = cam1.project(homography.make_homog(box[:, :5]))
    # use H to transfer points to the second image

    box_trans = homography.normalize(dot(H, box_cam1))
    return cam1, box
Пример #12
0
Файл: test.py Проект: ak352/pycv
def compute_camera_matrix_1(K, H):
    # 3D points at plane z=0 with sides of length 0.2
    box = cube.cube_points([0,0,0.1],0.1)
    # project bottom square in first image
    cam1 = camera.Camera( hstack((K,dot(K,array([[0],[0],[-1]])) )) )
    # first points are the bottom square
    box_cam1 = cam1.project(homography.make_homog(box[:,:5]))
    # use H to transfer points to the second image

    box_trans = homography.normalize(dot(H,box_cam1))
    return cam1, box
Пример #13
0
def get_H(im0_path, im1_path):
    """
    Get the Homography matrix.
    """
    # compute features
    sift.process_image(im0_path, 'im0.sift')
    l0, d0 = sift.read_features_from_file('im0.sift')

    sift.process_image(im1_path, 'im1.sift')
    l1, d1 = sift.read_features_from_file('im1.sift')

    # match features and estimate homography
    matches = sift.match_twosided(d0, d1)
    ndx = matches.nonzero()[0]
    fp = homography.make_homog(l0[ndx, :2].T)
    ndx2 = [int(matches[i]) for i in ndx]
    tp = homography.make_homog(l1[ndx2, :2].T)

    model = homography.RansacModel()
    H = homography.H_from_ransac(fp, tp, model)[0]

    return H
Пример #14
0
def compute_homography():
    # compute features
    #sift.process_image('../../data/book_frontal.JPG','../../data/im0.sift')
    im1 = '../../data/space_front.jpg'
    im2 = '../../data/space_perspective.jpg'
    im1 = '../../data/mag_front.jpg'
    im2 = '../../data/mag_perspective.jpg'
    ims = [im1, im2]
    sifts = []
    for k in range(2):
        sifts.append(ims[k][:-4] + ".sift")
    l0, d0 = sift.read_features_from_file(sifts[0])
    l1, d1 = sift.read_features_from_file(sifts[1])

    # match features and estimate homography
    matches = sift.match_twosided(d0, d1)
    ndx = matches.nonzero()[0]
    fp = homography.make_homog(l0[ndx, :2].T)
    ndx2 = [int(matches[i]) for i in ndx]
    tp = homography.make_homog(l1[ndx2, :2].T)
    model = homography.RansacModel()

    H, ransac_data = homography.H_from_ransac(fp, tp, model)
    return H
Пример #15
0
Файл: test.py Проект: ak352/pycv
def compute_homography():
    # compute features
    #sift.process_image('../../data/book_frontal.JPG','../../data/im0.sift')
    im1='../../data/space_front.jpg'
    im2='../../data/space_perspective.jpg'
    im1='../../data/mag_front.jpg'
    im2='../../data/mag_perspective.jpg'
    ims = [im1, im2]
    sifts = []
    for k in range(2):
        sifts.append(ims[k][:-4]+".sift")
    l0,d0 = sift.read_features_from_file(sifts[0])
    l1,d1 = sift.read_features_from_file(sifts[1])

    # match features and estimate homography
    matches = sift.match_twosided(d0,d1)
    ndx = matches.nonzero()[0]
    fp = homography.make_homog(l0[ndx,:2].T)
    ndx2 = [int(matches[i]) for i in ndx]
    tp = homography.make_homog(l1[ndx2,:2].T)
    model = homography.RansacModel()

    H,ransac_data = homography.H_from_ransac(fp,tp,model)
    return H
Пример #16
0
def compute_camera_matrix_2(cam1, K, box, H):
    # compute second camera matrix from cam1 and H
    cam2 = camera.Camera(dot(H, cam1.P))
    A = dot(linalg.inv(K), cam2.P[:, :3])
    A = array([A[:, 0], A[:, 1], cross(A[:, 0], A[:, 1])]).T
    cam2.P[:, :3] = dot(K, A)
    # project with the second camera
    box_cam2 = cam2.project(homography.make_homog(box))
    # test: projecting point on z=0 should give the same
    point = array([1, 1, 0, 1]).T
    print(homography.normalize(dot(dot(H, cam1.P), point)))
    print(cam2.project(point))

    #H is correct as seen by the plots but cam2.P does not eem to be correct
    #test cam2.P
    return cam2
Пример #17
0
Файл: test.py Проект: ak352/pycv
def compute_camera_matrix_2(cam1, K, box, H):
    # compute second camera matrix from cam1 and H
    cam2 = camera.Camera(dot(H,cam1.P))
    A = dot(linalg.inv(K),cam2.P[:,:3])
    A = array([A[:,0],A[:,1],cross(A[:,0],A[:,1])]).T
    cam2.P[:,:3] = dot(K,A)
    # project with the second camera
    box_cam2 = cam2.project(homography.make_homog(box))
    # test: projecting point on z=0 should give the same
    point = array([1,1,0,1]).T
    print(homography.normalize(dot(dot(H, cam1.P),point)))
    print(cam2.project(point))

    #H is correct as seen by the plots but cam2.P does not eem to be correct
    #test cam2.P
    return cam2
Пример #18
0
  def setUp(self):
    points = array([
      [-1.1, -1.1, -1.1], [ 1.4, -1.4, -1.4], [-1.5,  1.5, -1], [ 1,  1.8, -1],
      [-1.2, -1.2,  1.2], [ 1.3, -1.3,  1.3], [-1.6,  1.6,  1], [ 1,  1.7,  1],
      ])
    points = homography.make_homog(points.T)

    P = hstack((eye(3), array([[0], [0], [0]])))
    cam = camera.Camera(P)
    self.x = cam.project(points)

    r = [0.05, 0.1, 0.15]
    rot = camera.rotation_matrix(r)
    cam.P = dot(cam.P, rot)
    cam.P[:, 3] = array([1, 0, 0])
    self.x2 = cam.project(points)

    K, R, t = cam.factor()
    self.expectedE = dot(sfm.skew(t), R)
    self.expectedE /= self.expectedE[2, 2]
Пример #19
0
# ---

import numpy as np
import matplotlib.pyplot as plt
import cv2
#自作モジュール
import camera
import image_processing as ip
import desc_val as dv
import homography
import sfm

im1, im2, points2D, points3D, corr, P = sfm.load_merton_data(show_detail=True)

#3Dの点を同次座標系にして射影する
X = homography.make_homog(points3D)
x = P[0].project(X)

points2D[0]

# +
#画像1の上に点を描写する
ip.show_img(im1, figsize=(6, 4))
plt.plot(points2D[0][0], points2D[0][1], '*')
plt.show()

ip.show_img(im1, figsize=(6, 4))
plt.plot(x[0], x[1], '*')
plt.show()
# -
Пример #20
0
im1 = array(Image.open('out_ch4pics/h_image.jpg'))
#sift.plot_features(im1, l1, circle=True)
#show()

if not os.path.exists('out_ch04_markerpose.pickle'):
    matches = sift.match(d0, d1)
    pickle.dump(matches, open('out_ch04_markerpose.pickle', 'wb'))
matches = pickle.load(open('out_ch04_markerpose.pickle', 'rb'))

#figure()
#gray()
#sift.plot_matches(im0, im1, l0, l1, matches, show_below=False)
#show()

ndx = matches.nonzero()[0]
fp = homography.make_homog(l0[ndx, :2].T)
ndx2 = [int(matches[i]) for i in ndx]
tp = homography.make_homog(l1[ndx2, :2].T)

#model = homography.RansacModel()
#H = homography.H_from_ransac(fp, tp, model)
# Not enough matches for H_from_ransac(), but all matches happen to be correct
# anyways, so no need for that.
H = homography.H_from_points(fp, tp)

K = camera.my_calibration(im0.shape[:2])

# How big this appears depends on the z translation in cam1 in the cam1 line.
box = cube_points([0, 0, 0.1], 0.1)

# project bottom square in first image
Пример #21
0
l1, d1 = sift.detectAndCompute(im2_gray, None)

#%%
''' Match points from img1 to points in img2 '''
bf = cv2.BFMatcher(crossCheck=True)
matches = bf.match(d0, d1)

#https://stackoverflow.com/questions/30716610/how-to-get-pixel-coordinates-from-feature-matching-in-opencv-python
kp0 = [l0[mat.queryIdx].pt for mat in matches]
kp1 = [l1[mat.trainIdx].pt for mat in matches]
kp0 = np.array(kp0)
kp1 = np.array(kp1)

#%%
''' Compute from and to points '''
fp = homography.make_homog(kp0[:100, :2].T)
tp = homography.make_homog(kp1[:100, :2].T)

model = homography.RansacModel()
H, _ = homography.H_from_ransac(fp, tp, model)

#%%
''' Draw cone '''


def cube_points(c, wid):
    """ Creates a list of points for plotting
    a cube with plot. (the first 5 points are
    the bottom square, some sides repeated). """
    p = []
    #bottom
Пример #22
0
def processInput():
    print ""
    if inputArgs.left == "" or inputArgs.right == "":
        print "Missing images!"
        quit()

    # here we go ...

    # load image pair - we might need them for later
    img_l = cv2.imread(inputArgs.left)
    img_r = cv2.imread(inputArgs.right)

    if img_l == None or img_r == None:
        print "Missing images!"
        quit()

    ### git them points - calibration
    mkp_l = []
    mkp_r = []

    file_kp = open(inputArgs.nameCalibration, 'r')

    if file_kp == None:
        print "Missing matching points file"
        quit()

    for line in file_kp:
        l_r = line.split(';')
        mkp_l.append(
            [float(l_r[0].split(',')[0]),
             float(l_r[0].split(',')[1])])
        mkp_r.append(
            [float(l_r[1].split(',')[0]),
             float(l_r[1].split(',')[1])])
    file_kp.close()

    mkp_l = np.float32(mkp_l)
    mkp_r = np.float32(mkp_r)

    ### git them points - reconstruction
    mkp_l_p = []
    mkp_r_p = []

    file_kp = open(inputArgs.namePoints, 'r')

    if file_kp == None:
        print "Missing matching points file"
        quit()

    for line in file_kp:
        l_r = line.split(';')
        mkp_l_p.append(
            [float(l_r[0].split(',')[0]),
             float(l_r[0].split(',')[1])])
        mkp_r_p.append(
            [float(l_r[1].split(',')[0]),
             float(l_r[1].split(',')[1])])
    file_kp.close()

    mkp_l_p = np.float32(mkp_l_p)
    mkp_r_p = np.float32(mkp_r_p)

    ### git them calibrations - left
    K_l = []
    file_c_l = open(inputArgs.leftCalibration, 'r')

    if file_c_l == None:
        print "Missing left calibration file"
        quit()

    for line in file_c_l:
        c_l = line.split(' ')
        K_l.append([float(c_l[0]), float(c_l[1]), float(c_l[2])])
    file_c_l.close()
    K_l = np.float32(K_l)

    ### git them calibrations - right
    K_r = []
    file_c_r = open(inputArgs.rightCalibration, 'r')

    if file_c_r == None:
        print "Missing right calibration file"
        quit()

    for line in file_c_r:
        c_l = line.split(' ')
        K_r.append([float(c_l[0]), float(c_l[1]), float(c_l[2])])
    file_c_r.close()
    K_r = np.float32(K_r)

    ### ok, now we start work

    # make homogeneous and normalize with inv(K)
    x1 = homography.make_homog(mkp_l_p.T)
    x2 = homography.make_homog(mkp_r_p.T)

    x1n = dot(inv(K_l), x1)
    x2n = dot(inv(K_r), x2)

    # compute E (E = (K_r)T * F * K_l)
    #F, mask = cv2.findFundamentalMat(mkp_l, mkp_r, cv2.FM_8POINT)
    F, mask = cv2.findFundamentalMat(mkp_l, mkp_r, cv2.FM_RANSAC, 1, 0.99)

    # we select only inlier points - most pf the time this makes it worse
    #mkp_l = mkp_l[mask.ravel()==1]
    #mkp_r = mkp_r[mask.ravel()==1]

    E = K_r.transpose()
    E = E.dot(F)
    E = E.dot(K_l)

    # compute camera matrices (P2 will be list of four solutions)
    P1 = array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0]])
    P2 = sfm.compute_P_from_essential(E)

    # pick the solution with points in front of cameras
    ind = 0
    maxres = 0

    for i in range(4):
        # triangulate inliers and compute depth for each camera
        X = sfm.triangulate(x1n, x2n, P1, P2[i])
        d1 = dot(P1, X)[2]
        d2 = dot(P2[i], X)[2]
        if sum(d1 > 0) + sum(d2 > 0) > maxres:
            maxres = sum(d1 > 0) + sum(d2 > 0)
            ind = i
            infront = (d1 > 0) & (d2 > 0)

    # triangulate inliers and remove points not in front of both cameras
    X = sfm.triangulate(x1n, x2n, P1, P2[ind])
    X = X[:, infront]

    # visualization
    if inputArgs.debug == 1:
        # draw points
        img_tmp = img_l.copy()
        for kp in mkp_l_p:
            x = int(kp[0])
            y = int(kp[1])
            cv2.circle(img_tmp, (x, y), 3, (0, 0, 255))

        cv2.imwrite(inputArgs.namePoints + ".jpg", img_tmp)

    # 3D plot
    out_points = []
    out_colors = []

    for i in range(len(X[0])):
        out_points.append([X[0][i], X[1][i], X[2][i]])
        #out_colors.append(img_l[int(x1[1][i])][int(x1[0][i])])
        out_colors.append([
            img_l[int(x1[1][i])][int(x1[0][i])][2],
            img_l[int(x1[1][i])][int(x1[0][i])][1],
            img_l[int(x1[1][i])][int(x1[0][i])][0]
        ])

    out_points = np.float32(out_points)
    out_colors = np.float32(out_colors)
    write_ply(inputArgs.namePoints + ".ply", out_points, out_colors)
Пример #23
0
    if (np.all(homography_matrix == None)):
        ip.imwrite(file_path, frame)
        continue

    # 第1の画像の底面の正方形を射影する
    # カメラオブジェクトを定義
    #平行移動でz軸に-1を定義しているのは
    #立方体を手前に移動させるため
    #X→x_q→x_tとしているため、z軸での移動は
    #x_tでは拡大縮小に見える。
    P = np.hstack((K, K @ np.array([[0], [0], [-5]])))
    cam1 = camera.Camera(P)

    # 最初の点群は、底面の正方形
    pts = homography.make_homog(box[:, :5])
    box_cam1 = cam1.project(pts)

    #     img_for_edit=img_query.copy()
    #     for i in range(5):
    #         circle_pt=tuple([int(pt) for pt in box_cam1[:2,i]])
    #         cv2.circle(img_for_edit,circle_pt,50,(255,255,255),thickness=-1)
    #     ip.show_img(img_for_edit,show_axis=True)

    # cam1とHから第2のカメラ行列を計算する
    P2 = homography_matrix @ cam1.P
    cam2 = camera.Camera(P2)

    # P=K (R|t)なので
    #P[:,:3]は K R
    #Pの前半3列にKの逆行列をかければRが算出できる。
Пример #24
0
import sys
sys.path.append("../homography")
sys.path.append("../camera")
sys.path.append("../local_image_descriptors")
import homography
import camera
import sift
# compute features
im1 = "../../data/book_frontal.JPG"
im2 = "../../data/book_perspective.JPG"

sift.process_image(im1,'../../data/im0.sift')
l0,d0 = sift.read_features_from_file('../../data/im0.sift')
sift.process_image(im2,'../../data/im1.sift')
l1,d1 = sift.read_features_from_file('../../data/im1.sift')
# match features and estimate homography
matches = sift.match_twosided(d0,d1)
ndx = matches.nonzero()[0]
fp = homography.make_homog(l0[ndx,:2].T)
ndx2 = [int(matches[i]) for i in ndx]
tp = homography.make_homog(l1[ndx2,:2].T)
model = homography.RansacModel()
H = homography.H_from_ransac(fp,tp,model)

Пример #25
0
for i in range(len(imname)):
  l[i], d[i] = sift.read_or_compute(imname[i], siftname[i], histeq)

tic.k('loaded sifts')

print '{} / {} features'.format(len(d[0]), len(d[1]))
if not os.path.exists('out_ch05_recover_match.pickle'):
  #matches = sift.match(d[0], d[1])
  matches = sift.match_twosided(d[0], d[1])
  pickle.dump(matches, open('out_ch05_recover_match.pickle', 'wb'))
matches = pickle.load(open('out_ch05_recover_match.pickle', 'rb'))

tic.k('matched')

ndx = matches.nonzero()[0]
x1 = homography.make_homog(l[0][ndx, :2].T)
ndx2 = [int(matches[i]) for i in ndx]
x2 = homography.make_homog(l[1][ndx2, :2].T)

print '{} matches'.format(len(ndx))

image = [numpy.array(Image.open(name)) for name in imname]

# calibration (FIXME?)
K = camera.my_calibration(image[0].shape[:2])

# Normalize with inv(K) (allows metric reconstruction).
x1n = numpy.dot(numpy.linalg.inv(K), x1)
x2n = numpy.dot(numpy.linalg.inv(K), x2)

tic.k('normalized')
Пример #26
0
from numpy import *
from matplotlib.pylab import *
# calibration
K = array([[2394,0,932],[0,2398,628],[0,0,1]])
print "load images and compute features"
im1 = array(Image.open('../data/alcatraz1.jpg')) 
sift.process_image('../data/alcatraz1.jpg','im1.sift')
l1,d1 = sift.read_features_from_file('im1.sift')
im2 = array(Image.open('../data/alcatraz2.jpg'))
sift.process_image('../data/alcatraz2.jpg','im2.sift')
l2,d2 = sift.read_features_from_file('im2.sift')
print "match features"
matches = sift.match_twosided(d1,d2)
ndx = matches.nonzero()[0]
# make homogeneous and normalize with inv(K)
x1 = homography.make_homog(l1[ndx,:2].T)
ndx2 = [int(matches[i]) for i in ndx]
x2 = homography.make_homog(l2[ndx2,:2].T)
x1n = dot(inv(K),x1)
x2n = dot(inv(K),x2)
print "estimate E with RANSAC"
model = sfm.RansacModel()
E,inliers = sfm.F_from_ransac(x1n,x2n,model)
print "compute camera matrices (P2 will be list of four solutions)"
P1 = array([[1,0,0,0],[0,1,0,0],[0,0,1,0]]) 
P2 = sfm.compute_P_from_essential(E)
print "pick the solution with points in front of cameras"
ind = 0
maxres = 0
for i in range(4):
    # triangulate inliers and compute depth for each camera
Пример #27
0
k = array([2394, 0, 932], [0, 2398, 628], [0, 0, 1])

# load images and compute features
im1 = array(Image.open('Building_1.jpg'))
sift.process_image('Building_1.jpg', 'im1.sift')
l1, d1 = sift.read_features_from_file('im1.sift')

im2 = array(Image.open('Building_1.jpg'))
sift.process_image('Building_2.jpg', 'im2.sift')
l2, d2 = sift.read_features_from_file('im2.sift')

# match features
matches = sift.match_twosided(d1, d2)
ndx = matches.nonzero()[0]

# make homogenous and normalize  with inv(k)
x1 = homography.make_homo(l1[ndx, :2].T)
ndx2 = [int(matches[i]) for i in ndx]
x2 = homography.make_homog(l2[ndx2, :2].T)

x1n = dot(inv(k), x1)
x2n = dot(inv(k), x2)

#estunage E with RANSAC
model = sfm.RansacModel()
E, inliers = sfm.F_from_ransac(x1n, x2n, model)

# compute camera matrices (p2 will be list of four solutions)
P1 = array([1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0])
P2 = sfm.compute_P_from_essential(E)
Пример #28
0
for i in range(len(imname)):
    l[i], d[i] = sift.read_or_compute(imname[i], siftname[i], histeq)

tic.k('loaded sifts')

print '{} / {} features'.format(len(d[0]), len(d[1]))
if not os.path.exists('out_ch05_recover_match.pickle'):
    #matches = sift.match(d[0], d[1])
    matches = sift.match_twosided(d[0], d[1])
    pickle.dump(matches, open('out_ch05_recover_match.pickle', 'wb'))
matches = pickle.load(open('out_ch05_recover_match.pickle', 'rb'))

tic.k('matched')

ndx = matches.nonzero()[0]
x1 = homography.make_homog(l[0][ndx, :2].T)
ndx2 = [int(matches[i]) for i in ndx]
x2 = homography.make_homog(l[1][ndx2, :2].T)

print '{} matches'.format(len(ndx))

image = [numpy.array(Image.open(name)) for name in imname]

# calibration (FIXME?)
K = camera.my_calibration(image[0].shape[:2])

# Normalize with inv(K) (allows metric reconstruction).
x1n = numpy.dot(numpy.linalg.inv(K), x1)
x2n = numpy.dot(numpy.linalg.inv(K), x2)

tic.k('normalized')
Пример #29
0
l, d = {}, {}
for i in range(len(imname)):
    l[i], d[i] = sift.read_or_compute(imname[i], siftname[i])

tic.k('loaded sifts')

if not os.path.exists('out_ch03_ex07_match.pickle'):
    matches = sift.match(d[1], d[0])
    pickle.dump(matches, open('out_ch03_ex07_match.pickle', 'wb'))
matches = pickle.load(open('out_ch03_ex07_match.pickle', 'rb'))

tic.k('matched')

ndx = matches.nonzero()[0]
fp = homography.make_homog(l[1][ndx, :2].T)
ndx2 = [int(matches[i]) for i in ndx]
tp = homography.make_homog(l[0][ndx2, :2].T)

tic.k('converted')

im1 = array(Image.open(imname[0]).convert('L'))
im2 = array(Image.open(imname[1]).convert('L'))
if len(im1.shape) == 2:
    gray()
imshow(im1)

tic.k('imloaded')

model = homography.AffineRansacModel()
l, d = {}, {}
for i in range(len(imname)):
  l[i], d[i] = sift.read_or_compute(imname[i], siftname[i])

tic.k('loaded sifts')


if not os.path.exists('out_ch03_ex06_match.pickle'):
  matches = sift.match(d[1], d[0])
  pickle.dump(matches, open('out_ch03_ex06_match.pickle', 'wb'))
matches = pickle.load(open('out_ch03_ex06_match.pickle', 'rb'))

tic.k('matched')

ndx = matches.nonzero()[0]
fp = homography.make_homog(l[1][ndx, :2].T)
ndx2 = [int(matches[i]) for i in ndx]
tp = homography.make_homog(l[0][ndx2, :2].T)


tic.k('converted')

im1 = array(Image.open(imname[0]).convert('L'))
im2 = array(Image.open(imname[1]).convert('L'))
if len(im1.shape) == 2:
  gray()
imshow(im1)

tic.k('imloaded')

model = homography.RansacModel()
Пример #31
0
featlist = [imlist[i][:-3] + "sift" for i in range(imcount)]

with open("vocabulary.pkl", "rb") as f:
    voc = pickle.load(f)

searcher = imagesearch.Searcher("test.db", voc)

query_imid = 50
res_count = 20

res = [w[1] for w in searcher.query(imlist[query_imid])[:res_count]]
print "regular results for query %d:" % query_imid, res

# Rerank by trying to fit a homography.
q_locs, q_descr = sift.read_features_from_file(featlist[query_imid])
fp = homography.make_homog(q_locs[:, :2].T)

model = homography.RansacModel()

rank = {}
for ndx in res[1:]:
    locs, descr = sift.read_features_from_file(featlist[ndx - 1])  # res is 1-based

    matches = sift.match(q_descr, descr)
    ind = matches.nonzero()[0]
    ind2 = [int(matches[i]) for i in ind]
    tp = homography.make_homog(locs[:, :2].T)

    try:
        H, inliers = homography.H_from_ransac(fp[:, ind], tp[:, ind2], model, match_threshold=4)
    except:
if len(sys.argv) != 2:
  print 'usage: %prog image.jpeg'
  sys.exit(1)

imname = sys.argv[1]
im = array(Image.open(imname))
imshow(im)
corners = array(ginput(4))
# FIXME: sort corners, currently expects tl, bl, br, tr order.

# Fake more realistic side lengths.
h = 400
w = int(np.linalg.norm(corners[3] - corners[0]) /
        np.linalg.norm(corners[1] - corners[0]) * h)

corners = homography.make_homog(corners.T)
normalized = homography.make_homog(array([[0, 0, w, w], [0, h, h, 0]]))
H = homography.H_from_points(normalized, corners)

#normalized = homography.make_homog(array([[0, 0, 1, 1], [0, 1, 1, 0]]))
#print homography.H_from_points(normalized, corners)
#print H

if True:
  overshoot = 0.8  # Adds a 80% border around the selected area.
  s = (2 * overshoot + 1)
  H = dot(H, array([[s, 0, -w*overshoot],
                    [0, s, -h*overshoot],
                    [0, 0, 1]]))

mapped = imtools.Htransform(im, H, (h, w))
tic.k('loaded sifts')

print '{} / {} features'.format(len(d[0]), len(d[1]))

immd5 = md5.md5(''.join(imname)).hexdigest()
matchcache = 'out_ch05ex02_cache_matches_%s.pickle' % immd5
if not os.path.exists(matchcache):
    #matches = sift.match(d[0], d[1])
    matches = sift.match_twosided(d[0], d[1])
    pickle.dump(matches, open(matchcache, 'wb'))
matches = pickle.load(open(matchcache, 'rb'))

tic.k('matched')

ndx = matches.nonzero()[0]
x1 = homography.make_homog(l[0][ndx, :2].T)
ndx2 = [int(matches[i]) for i in ndx]
x2 = homography.make_homog(l[1][ndx2, :2].T)

print '{} matches'.format(len(ndx))

image = [numpy.array(Image.open(name)) for name in imname]

# calibration (FIXME?)
K = camera.my_calibration(image[0].shape[:2])

# Normalize with inv(K) (allows metric reconstruction).
x1n = numpy.dot(numpy.linalg.inv(K), x1)
x2n = numpy.dot(numpy.linalg.inv(K), x2)

tic.k('normalized')
Пример #34
0
im1 = array(Image.open(imname).convert('L'))
l1, d1 = sift.read_features_from_file('im1.sift')

figure()
gray()
sift.plot_features(im1, l1, circle=True)
show()





# 匹配特征
matches = sift.match_twosided(d0, d1)
ndx = matches.nonzero()[0]
fp = homography.make_homog(l0[ndx, :2].T)
ndx2 = [int(matches[i]) for i in ndx]
tp = homography.make_homog(l1[ndx2, :2].T)

print 'fp', fp
print 'tp', tp

model = homography.RansacModel()
H = homography.H_from_ransac(fp, tp, model)

# 计算相机标定矩阵
K = my_calibration((747, 1000))

# 位于边长为0.2, z=0平面上的三维点
box = cube_points([0, 0, 0.1], 0.1)
Пример #35
0

    # We use OpenCV instead of the calculus of the homography present in the book
    H = find_homography(kp0, desc0, kp1, desc1)

    # Note: always resize image to 747 x 1000 or change the K below
    # camera calibration
    K = my_calibration((480,640))

    # 3D points at plane z=0 with sides of length 0.2
    box = cube_points([0,0,0.1],0.1)

    # project bottom square in first image
    cam1 = camera.Camera( hstack((K,dot(K,array([[0],[0],[-1]])) )) )
    # first points are the bottom square
    box_cam1 = cam1.project(homography.make_homog(box[:,:5]))


    # use H to transfer points to the second image
    box_trans = homography.normalize(dot(H,box_cam1))

    # compute second camera matrix from cam1 and H
    cam2 = camera.Camera(dot(H,cam1.P))
    A = dot(linalg.inv(K),cam2.P[:,:3])
    A = array([A[:,0],A[:,1],cross(A[:,0],A[:,1])]).T
    cam2.P[:,:3] = dot(K,A)

    # project with the second camera
    box_cam2 = cam2.project(homography.make_homog(box))

    print(box_cam2)
Пример #36
0
import homography
import camera
import sift

#compute the feature
sift.process_image('/home/wangkai/Pictures/book_frontal.jpg','im0.sift')
l0,d0 = sift.read_feature_from_file('im0.sift')

sift.process_image('/home/wangkai/Pictures/book_prespective.jpg','im1.sift')
l1,d1 = sift.read_feature_from_file('im1.sift')

#match the feature, compute the homography
matches = sift.match_twoside(d0,d1)
ndx = matches.nozeros()[0]
fp = homography.make_homog(l0[ndx,:2].T)
ndx2 = [int(matches[i]) for i in ndx]
tp = homography.make_homog(l1[ndx2,:2].T)

model = homography.RansacModel()
H = homography.H_from_ransac(fp,tp,model)

Пример #37
0
from numpy import *
import homography

f = array(([1, 1], [2, 1], [3, 2], [4, 4]))
t = f + 1
f = f.T
f = homography.make_homog(f)
t = t.T
t = homography.make_homog(t)

#print f, t
print homography.H_from_points(f, t)
Пример #38
0
    K[0, 2] = 0.5 * col
    K[1, 2] = 0.5 * row
    return K


sift.process_image('book_frontal.JPG', 'im0.sift')
l0, d0 = sift.read_features_from_file('im0.sift')
sift.process_image('book_perspective.JPG', 'im1.sift')
l1, d1 = sift.read_features_from_file('im1.sift')

# 匹配特征,并计算单应性矩阵

# match features and estimate homography
matches = sift.match_twosided(d0, d1)  #匹配
ndx = matches.nonzero()[0]
fp = homography.make_homog(l0[ndx, :2].T)  #vstack
ndx2 = [int(matches[i]) for i in ndx]
tp = homography.make_homog(l1[ndx2, :2].T)  #vstack

model = homography.RansacModel()
H, inliers = homography.H_from_ransac(fp, tp, model)  #删除误匹配

# camera calibration
K = my_calibration((747, 1000))  #相机焦距光圈初始化

# 3D points at plane z=0 with sides of length 0.2
box = cube_points([0, 0, 0.1], 0.1)

# project bottom square in first image
cam1 = camera.Camera(hstack((K, dot(K, array([[0], [0], [-1]])))))  #相机初始化
# first points are the bottom square
    H = h**o.H_from_ransac(obj1, obj2, model)

    # # Draw first 60 matches.
    # img3 = img2
    # img3 = cv2.drawMatches(img1, kp1, img2, kp2, matches[:60], img3)

    # cv2.imshow('result', img3)
    # cv2.imwrite('result.jpg', img3)
    # cv2.waitKey()

    # 位于边长为0.2, z = 0平面上底部的正方形
    Box = cube_points([0, 0, 0.1], 0.1)

    cam1 = Cli.Camera(
        numpy.hstack((K, numpy.dot(K, numpy.array([[0], [0], [-1]])))))
    box_cam1 = cam1.project(h**o.make_homog(Box[:, :5]))

    # 使用H将点变换到第二幅图像中
    box_trans = h**o.normalize(numpy.dot(H[0], box_cam1))

    # 从cam1和H中计算第二个照相机矩阵
    cam2 = Cli.Camera(numpy.dot(H[0], cam1.P))
    A = numpy.dot(numpy.linalg.inv(K), cam2.P[:, :3])
    A = numpy.array([A[:, 0], A[:, 1], numpy.cross(A[:, 0], A[:, 1])]).T
    cam2.P[:, :3] = numpy.dot(K, A)

    # 使用第二个照相机矩阵投影
    box_cam2 = cam2.project(h**o.make_homog(Box))

    # 测试:将点投影在Z=0上,应该能够得到相同点
    point = numpy.array([[1, 1, 0, 1]]).T
Пример #40
0
#限界よりmatch点が多かったら
if num_matches < num_inspect:
    num_inspect = num_matches
x1 = []
x2 = []
for i in range(num_inspect):
    m = good_matches[i]
    k1 = kp1[m.queryIdx]
    x1.append(k1.pt)

    k2 = kp2[m.trainIdx]
    x2.append(k2.pt)

#同次座標系にし、K^-1を使って3次元座標にする。
x1 = np.array(x1).T
x1 = homography.make_homog(x1)

x2 = np.array(x2).T
x2 = homography.make_homog(x2)

x1n = np.linalg.inv(K) @ x1
x2n = np.linalg.inv(K) @ x2

#RANSACでEを推定
model = sfm.RansacModel()
E, inliers = sfm.F_from_ransac(x1n, x2n, model)

#カメラ行列を計算する
P1 = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0]])
P2 = sfm.compute_P_from_essential(E)
tic.k('loaded sifts')

print '{} / {} features'.format(len(d[0]), len(d[1]))

immd5 = md5.md5(''.join(imname)).hexdigest()
matchcache = 'out_ch05ex02_cache_matches_%s.pickle' % immd5
if not os.path.exists(matchcache):
  #matches = sift.match(d[0], d[1])
  matches = sift.match_twosided(d[0], d[1])
  pickle.dump(matches, open(matchcache, 'wb'))
matches = pickle.load(open(matchcache, 'rb'))

tic.k('matched')

ndx = matches.nonzero()[0]
x1 = homography.make_homog(l[0][ndx, :2].T)
ndx2 = [int(matches[i]) for i in ndx]
x2 = homography.make_homog(l[1][ndx2, :2].T)

print '{} matches'.format(len(ndx))

image = [numpy.array(Image.open(name)) for name in imname]

# calibration (FIXME?)
K = camera.my_calibration(image[0].shape[:2])

# Normalize with inv(K) (allows metric reconstruction).
x1n = numpy.dot(numpy.linalg.inv(K), x1)
x2n = numpy.dot(numpy.linalg.inv(K), x2)

tic.k('normalized')
Пример #42
0
Файл: test.py Проект: ak352/pycv
from numpy import *
import homography

f = array(([1,1], [2,1], [3,2], [4,4]))
t = f+1
f = f.T
f = homography.make_homog(f)
t = t.T
t = homography.make_homog(t)


#print f, t
print homography.H_from_points(f,t)
Пример #43
0
def convert_points(j):
    ndx = matches[j].nonzero()[0]
    fp = homography.make_homog(l[j + 1][ndx, :2].T)
    ndx2 = [int(matches[j][i]) for i in ndx]
    tp = homography.make_homog(l[j][ndx2, :2].T)
    return fp, tp
Пример #44
0
featlist = [imlist[i][:-3] + 'sift' for i in range(imcount)]

with open('vocabulary.pkl', 'rb') as f:
    voc = pickle.load(f)

searcher = imagesearch.Searcher('test.db', voc)

query_imid = 50
res_count = 20

res = [w[1] for w in searcher.query(imlist[query_imid])[:res_count]]
print 'regular results for query %d:' % query_imid, res

# Rerank by trying to fit a homography.
q_locs, q_descr = sift.read_features_from_file(featlist[query_imid])
fp = homography.make_homog(q_locs[:, :2].T)

model = homography.RansacModel()

rank = {}
for ndx in res[1:]:
    locs, descr = sift.read_features_from_file(featlist[ndx -
                                                        1])  # res is 1-based

    matches = sift.match(q_descr, descr)
    ind = matches.nonzero()[0]
    ind2 = [int(matches[i]) for i in ind]
    tp = homography.make_homog(locs[:, :2].T)

    try:
        H, inliers = homography.H_from_ransac(fp[:, ind],
Пример #45
0
im2_path = './e2.jpg'

im1 = np.array(Image.open(im1_path))
sift.process_image(im1_path, 'im1.sift')
l1, d1 = sift.read_features_from_file('im1.sift')

im2 = np.array(Image.open(im2_path))
sift.process_image(im2_path, 'im2.sift')
l2, d2 = sift.read_features_from_file('im2.sift')

# match features
matches = sift.match_twosided(d1, d2)
ndx = matches.nonzero()[0]

# make homogeneous and normalize with inv(K)
x1 = homography.make_homog(l1[ndx, :2].T)
ndx2 = [int(matches[i]) for i in ndx]
x2 = homography.make_homog(l2[ndx2, :2].T)

x1n = np.dot(np.linalg.inv(K), x1)
x2n = np.dot(np.linalg.inv(K), x2)

# estimate E with RANSAC
model = sfm.RansacModel()
E, inliers = sfm.F_from_ransac(x1n, x2n, model)

# compute camera matrices (P2 will be list of four solutions)
P1 = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0]])
P2 = sfm.compute_P_from_essential(E)

# pick the solution with points in front of cameras
Пример #46
0
from pylab import *
import numpy

import camera
import homography

points = homography.make_homog(loadtxt('house.p3d').T)

P = hstack((eye(3), array([[0], [0], [-10]])))
cam = camera.Camera(P)
x = cam.project(points)

figure()
plot(x[0], x[1], 'k.')


r = 0.05 * numpy.random.rand(3)
rot = camera.rotation_matrix(r)

figure()
for t in range(20):
  cam.P = dot(cam.P, rot)
  x = cam.project(points)
  plot(x[0], x[1], 'k.')

show()