Ejemplo n.º 1
0
def main():

    img_path = '../Data/stereo/centre/'
    imgs = sorted(os.listdir(img_path))
    fx, fy, cx, cy, G_camera_image, LUT = ReadCameraModel('../Data/model')
    i = 100
    img1 = cv2.imread(img_path + imgs[i], -1)
    img1 = cv2.cvtColor(img1, cv2.COLOR_BAYER_GR2BGR)
    img1 = UndistortImage(img1, LUT)

    img2 = cv2.imread(img_path + imgs[i + 1], -1)
    img2 = cv2.cvtColor(img2, cv2.COLOR_BAYER_GR2BGR)
    img2 = UndistortImage(img2, LUT)

    x1, x2 = utils.getMatchingFeaturePoints(img1, img2)

    x1 = np.hstack([x1, np.ones((x1.shape[0], 1))])
    x2 = np.hstack([x2, np.ones((x2.shape[0], 1))])

    print(x1.shape)
    print(x2.shape)

    F = EstimateFundamentalMatrix(x1, x2)
    print("Calculated")
    print(F)
    # print(F_n)
    print("CV2")
    F, _ = cv2.findFundamentalMat(x1, x2)
    print(F)
def main():
    img_path = '../Data/stereo/centre/'
    imgs = sorted(os.listdir(img_path))
    fx, fy, cx, cy, G_camera_image, LUT = ReadCameraModel('../Data/model')

    i = 100
    img1 = cv2.imread(img_path + imgs[i], -1)
    img1 = cv2.cvtColor(img1, cv2.COLOR_BAYER_GR2BGR)
    img1 = UndistortImage(img1, LUT)

    img2 = cv2.imread(img_path + imgs[i + 1], -1)
    img2 = cv2.cvtColor(img2, cv2.COLOR_BAYER_GR2BGR)
    img2 = UndistortImage(img2, LUT)

    x1, x2 = utils.getMatchingFeaturePoints(img1, img2)

    x1 = np.hstack([x1, np.ones((x1.shape[0], 1))])
    x2 = np.hstack([x2, np.ones((x2.shape[0], 1))])

    # print(x1.shape)
    # print(x2.shape)
    features = np.hstack([x1, x2])
    # getInliersRANSAC(features,threshold=(0.07),size=8,num_inliers=0.6*features.shape[0],num_iters=1000)
    x1_in, x2_in = RANSAC.getInliersRANSAC(features,
                                           threshold=(0.005),
                                           size=8,
                                           num_inliers=0.6 * features.shape[0],
                                           num_iters=200)
    fund_mtx = fundamental.EstimateFundamentalMatrix(x1_in, x2_in)
    K = np.array([[fx, 0, cx], [0, fy, cy], [0, 0, 1]])

    ess_mtx = essential.EssentialMatrixFromFundamentalMatrix(fund_mtx, K)
    print("Essential Matrix")
    print(ess_mtx)

    C, R = ExtractCameraPose(ess_mtx)
    print("Pose Orientation :")
    print(R)

    E_act = cv2.findEssentialMat(x1_in[:, :2], x2_in[:, :2], K)
    # _,R,T,_ = cv2.recoverPose(E_act[0],x1_in[:,:2],x2_in[:,:2])

    # print("Pose Position :")
    # print(T.T)
    # print("Pose Orientation :")
    # print(R)

    R1, R2, T = cv2.decomposeEssentialMat(E_act[0])
    print("OpenCV R1")
    print(R1)
    print("OpenCV R2")
    print(R2)

    print("Calculated Pose Position :")
    print(C)
    print("Opencv T")
    print(T.T)
Ejemplo n.º 3
0
def main():
    img_path = '../Data/stereo/centre/'
    imgs = sorted(os.listdir(img_path))
    fx, fy, cx, cy, G_camera_image, LUT = ReadCameraModel('../Data/model')

    i = 100
    img1 = cv2.imread(img_path + imgs[i], -1)
    img1 = cv2.cvtColor(img1, cv2.COLOR_BAYER_GR2BGR)
    img1 = UndistortImage(img1, LUT)

    img2 = cv2.imread(img_path + imgs[i + 1], -1)
    img2 = cv2.cvtColor(img2, cv2.COLOR_BAYER_GR2BGR)
    img2 = UndistortImage(img2, LUT)

    x1, x2 = utils.getMatchingFeaturePoints(img1, img2)

    x1 = np.hstack([x1, np.ones((x1.shape[0], 1))])
    x2 = np.hstack([x2, np.ones((x2.shape[0], 1))])

    features = np.hstack([x1, x2])
    x1_in, x2_in = RANSAC.getInliersRANSAC(features,
                                           threshold=(0.005),
                                           size=8,
                                           num_inliers=0.6 * features.shape[0],
                                           num_iters=200)
    fund_mtx = fundamental.EstimateFundamentalMatrix(x1_in, x2_in)
    K = np.array([[fx, 0, cx], [0, fy, cy], [0, 0, 1]])

    ess_mtx = essential.EssentialMatrixFromFundamentalMatrix(fund_mtx, K)

    C, R = cameraPose.ExtractCameraPose(ess_mtx)

    X_tri = []
    for j in range(4):
        X_tri.append(
            linearTriangulation(K, np.zeros(3), C[j], np.eye(3), R[j], x1_in,
                                x2_in))
    X_tri = np.array(X_tri)
    print(X_tri.shape)

    sys.exit(0)
Ejemplo n.º 4
0
def main():
    img_path = '../Data/stereo/centre/'
    imgs = sorted(os.listdir(img_path))
    fx, fy, cx, cy, G_camera_image, LUT = ReadCameraModel('../Data/model')

    i = 100
    img1 = cv2.imread(img_path + imgs[i], -1)
    img1 = cv2.cvtColor(img1, cv2.COLOR_BAYER_GR2BGR)
    img1 = UndistortImage(img1, LUT)

    img2 = cv2.imread(img_path + imgs[i + 1], -1)
    img2 = cv2.cvtColor(img2, cv2.COLOR_BAYER_GR2BGR)
    img2 = UndistortImage(img2, LUT)

    x1, x2 = utils.getMatchingFeaturePoints(img1, img2)

    x1 = np.hstack([x1, np.ones((x1.shape[0], 1))])
    x2 = np.hstack([x2, np.ones((x2.shape[0], 1))])

    # print(x1.shape)
    # print(x2.shape)
    features = np.hstack([x1, x2])
    # getInliersRANSAC(features,threshold=(0.07),size=8,num_inliers=0.6*features.shape[0],num_iters=1000)
    x1_in, x2_in = RANSAC.getInliersRANSAC(features,
                                           threshold=(0.005),
                                           size=8,
                                           num_inliers=0.6 * features.shape[0],
                                           num_iters=1000)
    fund_mtx = fundamental.EstimateFundamentalMatrix(x1_in, x2_in)
    K = np.array([[fx, 0, cx], [0, fy, cy], [0, 0, 1]])

    E = EssentialMatrixFromFundamentalMatrix(fund_mtx, K)
    print("Essential Matrix")
    print(E)

    E_act = cv2.findEssentialMat(x1_in[:, :2], x2_in[:, :2], K)
    print("E actual")
    print(E_act[0])
Ejemplo n.º 5
0
def main():

    img_path = '../Data/stereo/centre/'
    imgs = sorted(os.listdir(img_path))
    fx, fy, cx, cy, G_camera_image, LUT = ReadCameraModel('../Data/model')

    i = 150
    img1 = cv2.imread(img_path + imgs[i], -1)
    img1 = cv2.cvtColor(img1, cv2.COLOR_BAYER_GR2BGR)
    img1 = UndistortImage(img1, LUT)

    img2 = cv2.imread(img_path + imgs[i + 1], -1)
    img2 = cv2.cvtColor(img2, cv2.COLOR_BAYER_GR2BGR)
    img2 = UndistortImage(img2, LUT)

    print(img1.shape)

    x1, x2 = utils.getMatchingFeaturePoints(img1, img2)

    x1 = np.hstack([x1, np.ones((x1.shape[0], 1))])
    x2 = np.hstack([x2, np.ones((x2.shape[0], 1))])

    # print(x1.shape)
    # print(x2.shape)
    features = np.hstack([x1, x2])
    # getInliersRANSAC(features,threshold=(0.07),size=8,num_inliers=0.6*features.shape[0],num_iters=1000)
    x1_in, x2_in = getInliersRANSAC(features,
                                    threshold=(0.005),
                                    size=8,
                                    num_inliers=0.6 * features.shape[0],
                                    num_iters=500)
    print(x1_in.shape)
    # drawFeatures(img1,img2,x1,x2,'r',0.3)
    # drawFeatures(img1,img2,x1_in,x2_in,'g',1)
    # plt.show()

    # sys.exit(0)

    fund_mtx = fundamental.EstimateFundamentalMatrix(x1_in, x2_in)

    # x1 = x1_in
    # x2 = x2_in

    # print("CV2 Fundamental Matrix :",correct_fund_mtx[0])

    print("Fundamental Matrix RANSAC:")
    print(fund_mtx)

    # img1 =cv2.imread('../Data/1.jpg')
    # img2 =cv2.imread('../Data/2.jpg')

    pts1 = np.int32(x1[:, :2])
    pts2 = np.int32(x2[:, :2])
    F, _ = cv2.findFundamentalMat(x1, x2, cv2.RANSAC)

    drawFeatures(img1, img2, x1, x2, 'r', 0.1)
    drawFeatures(img1, img2, x1_in, x2_in, 'g', 0.5)
    plt.show()

    print("F")
    print(F)

    # Find epilines corresponding to points in right image (second image) and
    # drawing its lines on left image
    lines1 = cv2.computeCorrespondEpilines(pts2.reshape(-1, 1, 2), 2, F)
    lines1 = lines1.reshape(-1, 3)
    # lines1 = [lines1[15]]
    # img5,img6 = drawlines(img1,img2,lines1,pts1,pts2)
    img5, img6 = drawlines(img1, img2, lines1[25:30], pts1[25:30], pts2[25:30])

    # Find epilines corresponding to points in left image (first image) and
    # drawing its lines on right image
    lines2 = cv2.computeCorrespondEpilines(pts1.reshape(-1, 1, 2), 1, F)
    lines2 = lines2.reshape(-1, 3)
    # lines2 = [lines2[25]]
    # img3,img4 = drawlines(img2,img1,lines2,pts2,pts1)
    img3, img4 = drawlines(img2, img1, lines2[25:30], pts2[25:30], pts1[25:30])

    plt.subplot(221), plt.imshow(img5)
    plt.subplot(222), plt.imshow(img3)
    # plt.show()

    # Find epilines corresponding to points in right image (second image) and
    # drawing its lines on left image
    lines1 = cv2.computeCorrespondEpilines(pts2.reshape(-1, 1, 2), 2, fund_mtx)
    lines1 = lines1.reshape(-1, 3)
    # lines1 = [lines1[25]]
    # img5,img6 = drawlines(img1,img2,lines1,pts1,pts2)
    img5, img6 = drawlines(img1, img2, lines1[25:30], pts1[25:30], pts2[25:30])

    # Find epilines corresponding to points in left image (first image) and
    # drawing its lines on right image
    lines2 = cv2.computeCorrespondEpilines(pts1.reshape(-1, 1, 2), 1, fund_mtx)
    lines2 = lines2.reshape(-1, 3)
    # lines2 = [lines2[25]]
    # img3,img4 = drawlines(img2,img1,lines2,pts2,pts1)
    img3, img4 = drawlines(img2, img1, lines2[25:30], pts2[25:30], pts1[25:30])

    plt.subplot(223), plt.imshow(img5)
    plt.subplot(224), plt.imshow(img3)
    plt.show()
Ejemplo n.º 6
0
def main():
    img_path = '../Data/stereo/centre/'
    imgs = sorted(os.listdir(img_path))
    fx, fy, cx, cy, G_camera_image, LUT = ReadCameraModel('../Data/model')
    i = 100
    pt_old = np.zeros((3, 1))
    pt_old_cv = np.zeros((3, 1))
    i = 15
    while i < len(imgs):
        # for i in range(15,len(imgs)):
        # for i in range(500,600):
        i = i + 3

        print(i)
        img1 = cv2.imread(img_path + imgs[i])
        img1 = cv2.cvtColor(img1, cv2.COLOR_BAYER_GR2BGR)
        img1 = UndistortImage(img1, LUT)

        img2 = cv2.imread(img_path + imgs[i + 1])
        img2 = cv2.cvtColor(img2, cv2.COLOR_BAYER_GR2BGR)
        img2 = UndistortImage(img2, LUT)

        x1, x2 = utils.getMatchingFeaturePoints(img1, img2)

        x1 = np.hstack([x1, np.ones((x1.shape[0], 1))])
        x2 = np.hstack([x2, np.ones((x2.shape[0], 1))])

        features = np.hstack([x1, x2])
        x1_in, x2_in = RANSAC.getInliersRANSAC(features,
                                               threshold=(0.002),
                                               size=8,
                                               num_inliers=0.6 *
                                               features.shape[0],
                                               num_iters=200)
        # print("Inliers :",x1_in.shape[0])
        fund_mtx = fundamental.EstimateFundamentalMatrix(x1_in, x2_in)
        K = np.array([[fx, 0, cx], [0, fy, cy], [0, 0, 1]])

        ess_mtx = essential.EssentialMatrixFromFundamentalMatrix(fund_mtx, K)

        C, R = cameraPose.ExtractCameraPose(ess_mtx)

        X_tri = []
        for j in range(4):
            X_tri.append(
                triangulation.linearTriangulation(K, np.zeros(3), C[j],
                                                  np.eye(3), R[j], x1_in,
                                                  x2_in))
        X_tri = np.array(X_tri)
        # print(X_tri.shape)
        C_c, R_c = disambiguateCameraPose.disambiguateCameraPose(X_tri, C, R)
        C_c = np.reshape(C_c, (3, 1))
        if (C_c[2] < 0):
            C_c *= -1

        # print("Pose Position :")
        # print(C_c.T)
        # print("Pose Orientation :")
        # print(R_c)

        pt_new = np.matmul(R_c, pt_old)
        pt_new += C_c

        if abs(pt_new[0] - pt_old[0]) > 2:
            pt_new[0] = copy.copy(pt_old[0])
        if abs(pt_new[1] - pt_old[1]) > 2:
            pt_new[1] = copy.copy(pt_old[1])
        if abs(pt_new[2] - pt_old[2]) > 2:
            pt_new[2] = copy.copy(pt_old[2])

        print("Old point :", pt_old.T)
        print("New point :", pt_new.T)

        plt.figure("Calculated")
        plt.plot([pt_old[0], pt_new[0]], [pt_old[2], pt_new[2]])
        pt_old = copy.copy(pt_new)

        # E_act = cv2.findEssentialMat(x1[:,:2],x2[:,:2],K)
        # _,R,T,_ = cv2.recoverPose(E_act[0],x1[:,:2],x2[:,:2])

        # pt_new_cv = np.matmul(R,pt_old_cv)
        # pt_new_cv += T

        # if abs(pt_new_cv[0]-pt_old_cv[0]) >0.5:
        # 	pt_new_cv[0] = copy.copy(pt_old_cv[0])
        # if abs(pt_new_cv[1]-pt_old_cv[1]) >0.5:
        # 	pt_new_cv[1] = copy.copy(pt_old_cv[1])

        # print("Act :",plot_act)
        # print("Calc :",plot_calc)
        # print("---")
        # print("CV2 Pose Position :")
        # print(T.T)
        # print("CV2 Pose Orientation :")
        # print(R)
        #
        # print("Pose Position cv2:")
        # print(T.T)
        # print("Pose Orientation cv2 :")
        # print(R)

        # print("Old point :",pt_old_cv.T)
        # print("New point :",pt_new_cv.T)
        # plt.figure("OpenCV")
        # plt.plot([pt_old_cv[0],pt_new_cv[0]],[pt_old_cv[2],pt_new_cv[2]])
        # pt_old_cv = copy.copy(pt_new_cv)

        plt.show()
        plt.pause(0.00001)

        print("!-----------------!")
Ejemplo n.º 7
0
def main():
	img_path = '../Data/stereo/centre/'
	imgs = sorted(os.listdir(img_path))
	fx,fy,cx,cy,G_camera_image,LUT = ReadCameraModel('../Data/model')

	i = 100
	img1 = cv2.imread(img_path+imgs[i],-1)
	img1 = cv2.cvtColor(img1,cv2.COLOR_BAYER_GR2BGR)
	img1 = UndistortImage(img1,LUT)

	img2 = cv2.imread(img_path+imgs[i+1],-1)
	img2 = cv2.cvtColor(img2,cv2.COLOR_BAYER_GR2BGR)
	img2 = UndistortImage(img2,LUT)


	x1,x2 = utils.getMatchingFeaturePoints(img1,img2)

	x1 = np.hstack([x1,np.ones((x1.shape[0],1))])
	x2 = np.hstack([x2,np.ones((x2.shape[0],1))])

	features = np.hstack([x1,x2])
	x1_in,x2_in = RANSAC.getInliersRANSAC(features,threshold=(0.005),size=8,num_inliers=0.6*features.shape[0],num_iters=1000)
	print(x1_in.shape)
	fund_mtx = fundamental.EstimateFundamentalMatrix(x1_in,x2_in)
	K = np.array([[fx,0,cx],[0,fy,cy],[0,0,1]])

	ess_mtx = essential.EssentialMatrixFromFundamentalMatrix(fund_mtx,K)

	
	C,R = cameraPose.ExtractCameraPose(ess_mtx)
	# print("Available C :")
	# print(C)

	print("Available R")
	print(R)

	X_tri = []
	for j in range(4):
		X_tri.append(triangulation.linearTriangulation(K,np.zeros(3),C[j],np.eye(3),R[j],x1_in,x2_in))
	X_tri = np.array(X_tri)
	# print(X_tri.shape)
	C_c,R_c = disambiguateCameraPose(X_tri,C,R)

	print("Pose Position :")
	print(C_c)
	print("Pose Orientation :")
	print(R_c)	

	# angle = utils.rotationMatrixToEulerAngles(R_c)
	# print("Angle :")
	# print(angle)

	E_act = cv2.findEssentialMat(x1_in[:,:2],x2_in[:,:2],K)
	_,R,T,_ = cv2.recoverPose(E_act[0],x1_in[:,:2],x2_in[:,:2])

	print("---")
	print("CV2 Pose Position :")
	print(T.T)
	print("CV2 Pose Orientation :")
	print(R)	
	# angle = utils.rotationMatrixToEulerAngles(R)
	# print("CV2 Ang;e:")
	# print(angle)
	print("<----------------->")