예제 #1
0
def main():

    img_path = '../Data/stereo/centre/'
    imgs = sorted(os.listdir(img_path))
    fx, fy, cx, cy, G_camera_image, LUT = ReadCameraModel('../Data/model')
    i = 100
    img1 = cv2.imread(img_path + imgs[i], -1)
    img1 = cv2.cvtColor(img1, cv2.COLOR_BAYER_GR2BGR)
    img1 = UndistortImage(img1, LUT)

    img2 = cv2.imread(img_path + imgs[i + 1], -1)
    img2 = cv2.cvtColor(img2, cv2.COLOR_BAYER_GR2BGR)
    img2 = UndistortImage(img2, LUT)

    x1, x2 = utils.getMatchingFeaturePoints(img1, img2)

    x1 = np.hstack([x1, np.ones((x1.shape[0], 1))])
    x2 = np.hstack([x2, np.ones((x2.shape[0], 1))])

    print(x1.shape)
    print(x2.shape)

    F = EstimateFundamentalMatrix(x1, x2)
    print("Calculated")
    print(F)
    # print(F_n)
    print("CV2")
    F, _ = cv2.findFundamentalMat(x1, x2)
    print(F)
def main():
    img_path = '../Data/stereo/centre/'
    imgs = sorted(os.listdir(img_path))
    fx, fy, cx, cy, G_camera_image, LUT = ReadCameraModel('../Data/model')

    i = 100
    img1 = cv2.imread(img_path + imgs[i], -1)
    img1 = cv2.cvtColor(img1, cv2.COLOR_BAYER_GR2BGR)
    img1 = UndistortImage(img1, LUT)

    img2 = cv2.imread(img_path + imgs[i + 1], -1)
    img2 = cv2.cvtColor(img2, cv2.COLOR_BAYER_GR2BGR)
    img2 = UndistortImage(img2, LUT)

    x1, x2 = utils.getMatchingFeaturePoints(img1, img2)

    x1 = np.hstack([x1, np.ones((x1.shape[0], 1))])
    x2 = np.hstack([x2, np.ones((x2.shape[0], 1))])

    # print(x1.shape)
    # print(x2.shape)
    features = np.hstack([x1, x2])
    # getInliersRANSAC(features,threshold=(0.07),size=8,num_inliers=0.6*features.shape[0],num_iters=1000)
    x1_in, x2_in = RANSAC.getInliersRANSAC(features,
                                           threshold=(0.005),
                                           size=8,
                                           num_inliers=0.6 * features.shape[0],
                                           num_iters=200)
    fund_mtx = fundamental.EstimateFundamentalMatrix(x1_in, x2_in)
    K = np.array([[fx, 0, cx], [0, fy, cy], [0, 0, 1]])

    ess_mtx = essential.EssentialMatrixFromFundamentalMatrix(fund_mtx, K)
    print("Essential Matrix")
    print(ess_mtx)

    C, R = ExtractCameraPose(ess_mtx)
    print("Pose Orientation :")
    print(R)

    E_act = cv2.findEssentialMat(x1_in[:, :2], x2_in[:, :2], K)
    # _,R,T,_ = cv2.recoverPose(E_act[0],x1_in[:,:2],x2_in[:,:2])

    # print("Pose Position :")
    # print(T.T)
    # print("Pose Orientation :")
    # print(R)

    R1, R2, T = cv2.decomposeEssentialMat(E_act[0])
    print("OpenCV R1")
    print(R1)
    print("OpenCV R2")
    print(R2)

    print("Calculated Pose Position :")
    print(C)
    print("Opencv T")
    print(T.T)
예제 #3
0
def image_process(H_init, p_0, all_images):
    data_points = []

    for index in range(20, len(all_images) - 1):
        print("image_number = ", index)

        img1 = cv2.imread("stereo/centre/" + str(all_images[index]),
                          0)  #loading first image in greyscale
        img2 = cv2.imread("stereo/centre/" + str(all_images[index + 1]),
                          0)  #loading second image in greyscale

        colorimage1 = cv2.cvtColor(
            img1,
            cv2.COLOR_BayerGR2BGR)  #converting first image from BayerGR to BGR
        colorimage2 = cv2.cvtColor(
            img2, cv2.COLOR_BayerGR2BGR
        )  #converting second image from BayerGR to BGR

        undistortedimage1 = UndistortImage(
            colorimage1, LUT)  #undistorting first image using LUT
        undistortedimage2 = UndistortImage(
            colorimage2, LUT)  #undistorting second image using LUT

        gray1 = cv2.cvtColor(undistortedimage1,
                             cv2.COLOR_BGR2GRAY)  #converting back to grayscale
        gray2 = cv2.cvtColor(undistortedimage2,
                             cv2.COLOR_BGR2GRAY)  #converting back to grayscale

        #  Finding proper matches between two frames
        features1, features2 = SIFT_matches(gray1, gray2)

        #using inbuilt function to directly compute F_matrix after RANSAC
        FinalFundamentalMatrix, m = cv2.findFundamentalMat(
            features1, features2, cv2.FM_RANSAC)
        features1 = features1[m.ravel() == 1]
        features2 = features2[m.ravel() == 1]

        # Calculating the Essential matrix
        E_matrix = K.T @ FinalFundamentalMatrix @ K

        # Obtaining final Rotational and Translation pose for cameras
        retval, rot_mat, T, mask = cv2.recoverPose(E_matrix, features1,
                                                   features2, K)

        H_init = H_init @ H_matrix(
            rot_mat, T
        )  #matmul between H_init(Ident 4x4) and H_matrix that was calculated using new Rot and Trans matrix
        p_projection = H_init @ p_0  #new projection point calculated

        data_points.append([p_projection[0][0], -p_projection[2][0]])

        plt.scatter(p_projection[0][0], -p_projection[2][0], color='g')
    return data_points
def data_prep(data_img):
    img_1 = cv2.cvtColor(data_img[x], cv2.COLOR_BAYER_GR2BGR)
    img_2 = cv2.cvtColor(data_img[x + 1], cv2.COLOR_BAYER_GR2BGR)
    fx, fy, cx, cy, G_C_1, LUT1 = ReadCameraModel('Oxford_dataset/model')
    undimg_1 = UndistortImage(img_1, LUT1)
    undimg_1 = cv2.cvtColor(undimg_1, cv2.COLOR_BGR2GRAY)
    imgeqi_1 = cv2.equalizeHist(undimg_1)
    fx, fy, cx, cy, G_C_2, LUT2 = ReadCameraModel('Oxford_dataset/model')
    undimg_2 = UndistortImage(img_2, LUT2)
    undimg_2 = cv2.cvtColor(undimg_2, cv2.COLOR_BGR2GRAY)
    imgeqi_2 = cv2.equalizeHist(undimg_2)

    return fx, fy, cx, cy, imgeqi_1, imgeqi_2
예제 #5
0
    def preprocess(self, image, nextimage):
        rgb1 = cv2.cvtColor(image, cv2.COLOR_BAYER_GR2BGR)
        rgb2 = cv2.cvtColor(nextimage, cv2.COLOR_BAYER_GR2BGR)
        undimg_1 = UndistortImage(rgb1, self.LUT1)
        undimg_1 = cv2.cvtColor(undimg_1, cv2.COLOR_BGR2GRAY)
        undimg_2 = UndistortImage(rgb2, self.LUT2)
        undimg_2 = cv2.cvtColor(undimg_2, cv2.COLOR_BGR2GRAY)
        imgeqi_1 = cv2.equalizeHist(undimg_1)
        # imgeqi_1 = (imgeqi_1[0:800, 0:1280].copy())
        imgeqi_2 = cv2.equalizeHist(undimg_2)
        # imgeqi_2 = (imgeqi_2[0:800, 0:1280].copy())

        return imgeqi_1, imgeqi_2
예제 #6
0
def preprocess_data(img_path, name, LUT):
    im = cv2.imread(os.path.join(img_path, name), 0)
    BGR_im = cv2.cvtColor(im, cv2.COLOR_BayerGR2BGR)
    un_im = UndistortImage(BGR_im, LUT)
    un_im = cv2.cvtColor(un_im, cv2.COLOR_BGR2GRAY)
    un_im = un_im[200:650, :]

    return un_im
예제 #7
0
def main():
    img_path = '../Data/stereo/centre/'
    imgs = sorted(os.listdir(img_path))
    fx, fy, cx, cy, G_camera_image, LUT = ReadCameraModel('../Data/model')

    i = 100
    img1 = cv2.imread(img_path + imgs[i], -1)
    img1 = cv2.cvtColor(img1, cv2.COLOR_BAYER_GR2BGR)
    img1 = UndistortImage(img1, LUT)

    img2 = cv2.imread(img_path + imgs[i + 1], -1)
    img2 = cv2.cvtColor(img2, cv2.COLOR_BAYER_GR2BGR)
    img2 = UndistortImage(img2, LUT)

    x1, x2 = utils.getMatchingFeaturePoints(img1, img2)

    x1 = np.hstack([x1, np.ones((x1.shape[0], 1))])
    x2 = np.hstack([x2, np.ones((x2.shape[0], 1))])

    features = np.hstack([x1, x2])
    x1_in, x2_in = RANSAC.getInliersRANSAC(features,
                                           threshold=(0.005),
                                           size=8,
                                           num_inliers=0.6 * features.shape[0],
                                           num_iters=200)
    fund_mtx = fundamental.EstimateFundamentalMatrix(x1_in, x2_in)
    K = np.array([[fx, 0, cx], [0, fy, cy], [0, 0, 1]])

    ess_mtx = essential.EssentialMatrixFromFundamentalMatrix(fund_mtx, K)

    C, R = cameraPose.ExtractCameraPose(ess_mtx)

    X_tri = []
    for j in range(4):
        X_tri.append(
            linearTriangulation(K, np.zeros(3), C[j], np.eye(3), R[j], x1_in,
                                x2_in))
    X_tri = np.array(X_tri)
    print(X_tri.shape)

    sys.exit(0)
예제 #8
0
def main():
    img_path = '../Data/stereo/centre/'
    imgs = sorted(os.listdir(img_path))
    fx, fy, cx, cy, G_camera_image, LUT = ReadCameraModel('../Data/model')

    i = 100
    img1 = cv2.imread(img_path + imgs[i], -1)
    img1 = cv2.cvtColor(img1, cv2.COLOR_BAYER_GR2BGR)
    img1 = UndistortImage(img1, LUT)

    img2 = cv2.imread(img_path + imgs[i + 1], -1)
    img2 = cv2.cvtColor(img2, cv2.COLOR_BAYER_GR2BGR)
    img2 = UndistortImage(img2, LUT)

    x1, x2 = utils.getMatchingFeaturePoints(img1, img2)

    x1 = np.hstack([x1, np.ones((x1.shape[0], 1))])
    x2 = np.hstack([x2, np.ones((x2.shape[0], 1))])

    # print(x1.shape)
    # print(x2.shape)
    features = np.hstack([x1, x2])
    # getInliersRANSAC(features,threshold=(0.07),size=8,num_inliers=0.6*features.shape[0],num_iters=1000)
    x1_in, x2_in = RANSAC.getInliersRANSAC(features,
                                           threshold=(0.005),
                                           size=8,
                                           num_inliers=0.6 * features.shape[0],
                                           num_iters=1000)
    fund_mtx = fundamental.EstimateFundamentalMatrix(x1_in, x2_in)
    K = np.array([[fx, 0, cx], [0, fy, cy], [0, 0, 1]])

    E = EssentialMatrixFromFundamentalMatrix(fund_mtx, K)
    print("Essential Matrix")
    print(E)

    E_act = cv2.findEssentialMat(x1_in[:, :2], x2_in[:, :2], K)
    print("E actual")
    print(E_act[0])
예제 #9
0
def preprocess_data(img_path, name, LUT):
    im = cv2.imread(os.path.join(img_path, name), 0)
    BGR_im = cv2.cvtColor(im, cv2.COLOR_BayerGR2BGR)
    un_im = UndistortImage(BGR_im, LUT)
    # un_im = un_im[200:650, :]

    # cv2.imshow("image", un_im)
    # if cv2.waitKey(0) & 0xff == 27:
    # 	cv2.destroyAllWindows()

    # h, w = im.shape
    # dim = (int(0.6*w), int(0.6*h))
    # un_im = cv2.resize(un_im, dim)

    return un_im
예제 #10
0
def undistortImage(path_to_model, path_to_images):
    '''
	Here, we undistort the dataset images
	:param path_to_model:
	:param path_to_images:
	:return: K - calibration matrix
	'''

    # Read camera parameters
    fx, fy, cx, cy, G_camera_image, LUT = ReadCameraModel(path_to_model)

    # iterate through each image, convert to BGR, undistort(function takes all channels in input)
    images = glob.glob(path_to_images + "/*.png")
    images.sort()

    for cnt, image in enumerate(images):
        frame = cv2.imread(image, -1)
        frame_RGB = cv2.cvtColor(frame, cv2.COLOR_BayerGR2BGR)
        undistorted_image = UndistortImage(frame_RGB, LUT)
        cv2.imwrite("./undistort/frame" + str(cnt) + ".png", undistorted_image)
예제 #11
0
def raw2Undistorted(img_path, LUT):
    img = cv2.imread(img_path, cv2.IMREAD_UNCHANGED)
    img = cv2.cvtColor(img, cv2.COLOR_BayerGR2BGR)
    img = UndistortImage(img, LUT)

    return img
예제 #12
0
print(t_f[1,0])

fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')

for i in range(len(filenames) - 1):
    if i<=40:
        continue

    f = filenames[i]
    f1 = filenames[i + 1]
    img1 = cv2.imread(f, 0)
    img2 = cv2.imread(f1, 0)

    img1 = cv2.cvtColor(img1, cv2.COLOR_BAYER_GR2RGB)
    img1 = UndistortImage(img1, LUT)    
    img2 = cv2.cvtColor(img2, cv2.COLOR_BAYER_GR2RGB)
    img2 = UndistortImage(img2, LUT)


    img1GrayScale = cv2.cvtColor(img1, cv2.COLOR_RGB2GRAY)
    img2GrayScale = cv2.cvtColor(img2, cv2.COLOR_RGB2GRAY)
    pts1, pts2 = FindCorrespondence(img1GrayScale, img2GrayScale)
    corr_list = np.concatenate((pts1, pts2), 1)

    F, mI = RANSACforF(pts1, pts2, corr_list)

    E = EstimateEssentialMatrix(cameraMatrix, F)


    P1, P2, P3, P4, (C1, R1), (C2, R2), (C3, R3), (C4, R4) = EstimateCameraPose(E, cameraMatrix)
예제 #13
0
h = np.eye(4)
current_pos = np.zeros((3, 1))
current_rot = np.eye(3)
while a < (len(car_images) - 1):
    # for i in range(len(car_images)):
    #imageRaw1 = cv2.imread(images[i], cv2.IMREAD_GRAYSCALE | cv2.IMREAD_ANYDEPTH)
    #rgb1 = cv2.cvtColor(imageRaw1, cv2.COLOR_BAYER_GR2BGR)
    rgb1 = cv2.cvtColor(car_images[a], cv2.COLOR_BAYER_GR2BGR)

    #imageRaw2 = cv2.imread(frame2, cv2.IMREAD_GRAYSCALE | cv2.IMREAD_ANYDEPTH)
    #rgb2 = cv2.cvtColor(imageRaw2, cv2.COLOR_BAYER_GR2BGR)
    rgb2 = cv2.cvtColor(car_images[a + 1], cv2.COLOR_BAYER_GR2BGR)

    fx, fy, cx, cy, G_camera_image1, LUT1 = ReadCameraModel(
        'Oxford_dataset/model')
    undistorted_image1 = UndistortImage(rgb1, LUT1)
    undistorted_image1 = cv2.cvtColor(undistorted_image1, cv2.COLOR_BGR2GRAY)

    eqimage1 = cv2.equalizeHist(undistorted_image1)

    fx, fy, cx, cy, G_camera_image2, LUT2 = ReadCameraModel(
        'Oxford_dataset/model')
    undistorted_image2 = UndistortImage(rgb2, LUT2)
    undistorted_image2 = cv2.cvtColor(undistorted_image2, cv2.COLOR_BGR2GRAY)

    eqimage2 = cv2.equalizeHist(undistorted_image2)

    K = np.array([[fx, 0, cx], [0, fy, cy], [0, 0, 1]])

    # Initiate STAR detector
    orb = cv2.ORB_create()
예제 #14
0
def image_process(img):
    color_image = cv2.cvtColor(img, cv2.COLOR_BayerGR2BGR)
    color_image_undistorted = UndistortImage(color_image, LUT)
    gray_image = cv2.cvtColor(color_image_undistorted, cv2.COLOR_BGR2GRAY)
    return gray_image
예제 #15
0
for i in range(0, N):
    img_current_frame = cv2.imread(centre_path[i], 0)
    img_next_frame = cv2.imread(centre_path[i + 1], 0)
    img_current_frame = cv2.rectangle(
        img_current_frame,
        (np.float32(50), np.float32(np.shape(img_current_frame)[0])),
        (np.float32(1250), np.float32(800)), (0, 0, 0), -1)
    img_next_frame = cv2.rectangle(
        img_next_frame,
        (np.float32(50), np.float32(np.shape(img_next_frame)[0])),
        (np.float32(1250), np.float32(800)), (0, 0, 0), -1)
    color_img = cv2.cvtColor(img_current_frame, cv2.COLOR_BayerGR2RGB)
    color_img_next = cv2.cvtColor(img_next_frame, cv2.COLOR_BayerGR2RGB)

    undist_img = UndistortImage(color_img, LUT)
    undist_img_next = UndistortImage(color_img_next, LUT)
    MIN_MATCH_COUNT = 10

    # Initiate SIFT detector
    sift = cv2.xfeatures2d.SIFT_create()

    # find the keypoints and descriptors with SIFT
    kp1, des1 = sift.detectAndCompute(undist_img, None)
    kp2, des2 = sift.detectAndCompute(undist_img_next, None)

    FLANN_INDEX_KDTREE = 0
    index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
    search_params = dict(checks=50)

    flann = cv2.FlannBasedMatcher(index_params, search_params)
예제 #16
0
images = []

# Read and save camera intrinsic parameters
fx, fy, cx, cy, G_camera_image, LUT = ReadCameraModel(modelDirectory)
'''
K = np.array([[fx , 0 , cx],[0 , fy , cy],[0 , 0 , 1]]) 
f = open('Camera Intrinsics.txt','a')
f.write('K: ' + '\n' + str(K) + '\n\n')
f.write('G_camera_image: ' + '\n' + str(G_camera_image))
f.close()
'''

# Process all images in the image directory
for imageName in os.listdir(imageDirectory):
    rawImage = cv2.imread(imageDirectory + imageName)

    # Convert raw image to grayscale and demosaic into BGR image
    gray = cv2.cvtColor(rawImage, cv2.COLOR_BGR2GRAY)
    imageBGR = cv2.cvtColor(gray, cv2.COLOR_BayerGR2BGR)

    # Undistort BGR image
    undistoredImage = UndistortImage(imageBGR, LUT)

    # Save to dataset folder
    cv2.imwrite('dataset/' + imageName, undistoredImage)




import os
from UndistortImage import UndistortImage
from ReadCameraModel import ReadCameraModel

img = glob.glob('./stereo/centre/*.png')
img.sort()
print(img)
data = []
final = []
fx, fy, cx, cy, G_camera_image, LUT = ReadCameraModel("./model")
for i in range(len(img)):
    temp = cv2.imread(img[i], 0)
    data.append(temp)
count = 0
EXTN = ".png"
for j in range(len(data)):
    count = count + 1
    # if(count>50):
    # break
    pic = data[j]
    print(count)
    tempo = cv2.cvtColor(pic, cv2.COLOR_BayerGR2BGR)
    temp_img = UndistortImage(tempo, LUT)
    filename = str(count).zfill(4) + EXTN
    cv2.imwrite("./undistorted_input_images/frame%s" % filename, temp_img)
    # print(temp_img)
    final.append(temp_img)
cv2.imshow("image", final[0])
cv2.waitKey(0)
cv2.destroyAllWindows()
예제 #18
0
def undistortImageToGray(img, LUT):
    colorimage = cv2.cvtColor(img, cv2.COLOR_BayerGR2BGR)
    undistortedimage = UndistortImage(colorimage, LUT)
    gray = cv2.cvtColor(undistortedimage, cv2.COLOR_BGR2GRAY)
    # equ = cv2.equalizeHist(gray1)
    return gray
예제 #19
0
def main():
    img_path = '../Data/stereo/centre/'
    imgs = sorted(os.listdir(img_path))
    fx, fy, cx, cy, G_camera_image, LUT = ReadCameraModel('../Data/model')
    i = 100
    pt_old = np.zeros((3, 1))
    pt_old_cv = np.zeros((3, 1))
    i = 15
    while i < len(imgs):
        # for i in range(15,len(imgs)):
        # for i in range(500,600):
        i = i + 3

        print(i)
        img1 = cv2.imread(img_path + imgs[i])
        img1 = cv2.cvtColor(img1, cv2.COLOR_BAYER_GR2BGR)
        img1 = UndistortImage(img1, LUT)

        img2 = cv2.imread(img_path + imgs[i + 1])
        img2 = cv2.cvtColor(img2, cv2.COLOR_BAYER_GR2BGR)
        img2 = UndistortImage(img2, LUT)

        x1, x2 = utils.getMatchingFeaturePoints(img1, img2)

        x1 = np.hstack([x1, np.ones((x1.shape[0], 1))])
        x2 = np.hstack([x2, np.ones((x2.shape[0], 1))])

        features = np.hstack([x1, x2])
        x1_in, x2_in = RANSAC.getInliersRANSAC(features,
                                               threshold=(0.002),
                                               size=8,
                                               num_inliers=0.6 *
                                               features.shape[0],
                                               num_iters=200)
        # print("Inliers :",x1_in.shape[0])
        fund_mtx = fundamental.EstimateFundamentalMatrix(x1_in, x2_in)
        K = np.array([[fx, 0, cx], [0, fy, cy], [0, 0, 1]])

        ess_mtx = essential.EssentialMatrixFromFundamentalMatrix(fund_mtx, K)

        C, R = cameraPose.ExtractCameraPose(ess_mtx)

        X_tri = []
        for j in range(4):
            X_tri.append(
                triangulation.linearTriangulation(K, np.zeros(3), C[j],
                                                  np.eye(3), R[j], x1_in,
                                                  x2_in))
        X_tri = np.array(X_tri)
        # print(X_tri.shape)
        C_c, R_c = disambiguateCameraPose.disambiguateCameraPose(X_tri, C, R)
        C_c = np.reshape(C_c, (3, 1))
        if (C_c[2] < 0):
            C_c *= -1

        # print("Pose Position :")
        # print(C_c.T)
        # print("Pose Orientation :")
        # print(R_c)

        pt_new = np.matmul(R_c, pt_old)
        pt_new += C_c

        if abs(pt_new[0] - pt_old[0]) > 2:
            pt_new[0] = copy.copy(pt_old[0])
        if abs(pt_new[1] - pt_old[1]) > 2:
            pt_new[1] = copy.copy(pt_old[1])
        if abs(pt_new[2] - pt_old[2]) > 2:
            pt_new[2] = copy.copy(pt_old[2])

        print("Old point :", pt_old.T)
        print("New point :", pt_new.T)

        plt.figure("Calculated")
        plt.plot([pt_old[0], pt_new[0]], [pt_old[2], pt_new[2]])
        pt_old = copy.copy(pt_new)

        # E_act = cv2.findEssentialMat(x1[:,:2],x2[:,:2],K)
        # _,R,T,_ = cv2.recoverPose(E_act[0],x1[:,:2],x2[:,:2])

        # pt_new_cv = np.matmul(R,pt_old_cv)
        # pt_new_cv += T

        # if abs(pt_new_cv[0]-pt_old_cv[0]) >0.5:
        # 	pt_new_cv[0] = copy.copy(pt_old_cv[0])
        # if abs(pt_new_cv[1]-pt_old_cv[1]) >0.5:
        # 	pt_new_cv[1] = copy.copy(pt_old_cv[1])

        # print("Act :",plot_act)
        # print("Calc :",plot_calc)
        # print("---")
        # print("CV2 Pose Position :")
        # print(T.T)
        # print("CV2 Pose Orientation :")
        # print(R)
        #
        # print("Pose Position cv2:")
        # print(T.T)
        # print("Pose Orientation cv2 :")
        # print(R)

        # print("Old point :",pt_old_cv.T)
        # print("New point :",pt_new_cv.T)
        # plt.figure("OpenCV")
        # plt.plot([pt_old_cv[0],pt_new_cv[0]],[pt_old_cv[2],pt_new_cv[2]])
        # pt_old_cv = copy.copy(pt_new_cv)

        plt.show()
        plt.pause(0.00001)

        print("!-----------------!")
예제 #20
0
    if Translation_final[2] > 0:
        Translation_final = -Translation_final

    return Rotation_final, Translation_final


H_Start = np.identity(4)
p_0 = np.array([[0, 0, 0, 1]]).T
flag = 0

data_points = []
for index in range(19, len(frames) - 1):
    print(frames[index], index)
    img1 = cv2.imread("stereo/centre/" + str(frames[index]), 0)
    colorimage1 = cv2.cvtColor(img1, cv2.COLOR_BayerGR2BGR)
    undistortedimage1 = UndistortImage(colorimage1, LUT)
    gray1 = cv2.cvtColor(undistortedimage1, cv2.COLOR_BGR2GRAY)

    img2 = cv2.imread("stereo/centre/" + str(frames[index + 1]), 0)
    colorimage2 = cv2.cvtColor(img2, cv2.COLOR_BayerGR2BGR)
    undistortedimage2 = UndistortImage(colorimage2, LUT)
    gray2 = cv2.cvtColor(undistortedimage2, cv2.COLOR_BGR2GRAY)

    grayImage1 = gray1[200:650, 0:1280]
    grayImage2 = gray2[200:650, 0:1280]

    sift = cv2.xfeatures2d.SIFT_create()

    kp1, des1 = sift.detectAndCompute(grayImage1, None)
    kp2, des2 = sift.detectAndCompute(grayImage2, None)
예제 #21
0
def main():
    # Preprocess Images
    start_t = time.time()
    H_init = np.identity(4)
    p0 = np.array([[0, 0, 0, 1]]).T
    data = []
    for i in range(18, len(f) - 1):
        print(i)
        img1 = cv2.imread(path + str(f[i]), 0)
        # cv2.imshow("",img1)
        # cv2.waitKey(0)
        # Frame 1
        bgr_img1 = cv2.cvtColor(img1, cv2.COLOR_BayerGR2BGR)
        undist_img1 = UndistortImage(bgr_img1, LUT)
        gray_img1 = cv2.cvtColor(undist_img1, cv2.COLOR_BGR2GRAY)

        # Frame 2
        img2 = cv2.imread(path + str(f[i + 1]), 0)
        bgr_img2 = cv2.cvtColor(img2, cv2.COLOR_BayerGR2BGR)
        undist_img2 = UndistortImage(bgr_img2, LUT)
        gray_img2 = cv2.cvtColor(undist_img2, cv2.COLOR_BGR2GRAY)

        gray_img1 = gray_img1[250:600, 0:1280]
        gray_img2 = gray_img2[250:600, 0:1280]

        # Find Points of Correspondence

        f1, f2 = findpts(gray_img1, gray_img2)

        for l in range(0, len(f1)):
            a = (int(f1[l][0]), int(f1[l][1]))
            sift_img = cv2.circle(gray_img2, a, 1, (255, 0, 0), 3)
        cv2.imwrite(os.path.join(wrframe, str(i) + '.jpg'), bgr_img1)
        #sift_img = plt.imread(sift_img)
        # plt.imshow("",sift_img)
        # cv2.waitKey(0)
        # #print(f2)

        # Determine F

        F, i1, i2 = findF(f1, f2)

        # Determine E
        E = findE(K, F)

        #print(E)

        # Find T and R
        R_list, T_list = decomp(E)

        # Find Best estimates for R and T
        R, T = cheirality_check(R_list, T_list, i1, i2)

        # Find Homogeneous Matrix and Camera Centre for Each frame

        H = get_homogeneous(R, T)
        H_init = H_init @ H
        p = H_init @ p0

        print('X = ', p[0])
        print('Y = ', p[2])

        # Write the Co-ordinates in a text file
        #data.append([p[0][0], -p[2][0]])
        file1.write(str(p[0][0]) + ",")
        file1.write(str(-p[2][0]) + "\n")

        # Plot the Trajectory
        # plt.yticks(np.arange(-200,800,100))
        # plt.xticks(np.arange(0,1000,100))
        plt.scatter(p[0][0], -p[2][0], color='b')
        plt.savefig(os.path.join(pathplot, str(i) + '.png'))
    file1.close()
    end_t = time.time()
    delta = (end_t - start_t) / 60
    print("Time taken: ", delta)
    plt.show()
예제 #22
0
    images.append(image)
    images.sort()

fx, fy, cx, cy, G_camera_image, LUT = ReadCameraModel('model/')
K = np.array([[fx, 0, cx], [0, fy, cy], [0, 0, 1]])

init = np.identity(4)
origin = np.array([[0, 0, 0, 1]]).T

trajectory_points = []

for ind in range(19, len(images) - 1):
    print(ind)
    img1 = cv2.imread(path + str(images[ind]), 0)
    first_image = cv2.cvtColor(img1, cv2.COLOR_BayerGR2BGR)
    first_image_undistorted = UndistortImage(first_image, LUT)
    first_image_gray = cv2.cvtColor(first_image_undistorted,
                                    cv2.COLOR_BGR2GRAY)

    # Reading next image
    img2 = cv2.imread(path + str(images[ind + 1]), 0)
    next_image = cv2.cvtColor(img2, cv2.COLOR_BayerGR2BGR)
    next_image_undistorted = UndistortImage(next_image, LUT)
    next_image_gray = cv2.cvtColor(next_image_undistorted, cv2.COLOR_BGR2GRAY)

    # Cropping the area of interest from the current image
    first_image_final = first_image_gray[200:650, 0:1280]
    # Cropping the area of interest from the next frame
    next_image_final = next_image_gray[200:650, 0:1280]

    sift = cv2.xfeatures2d.SIFT_create()
예제 #23
0
import cv2
import glob
import numpy as np
from ReadCameraModel import ReadCameraModel
from UndistortImage import UndistortImage

model_dir = 'Oxford_dataset/model/'
fx, fy, cx, cy, G_camera_image, LUT = ReadCameraModel(model_dir)

count = 0
for img in glob.glob('Oxford_dataset/stereo/centre/*.png'):
    image = cv2.imread(img,cv2.IMREAD_GRAYSCALE | cv2.IMREAD_ANYDEPTH)
    color_image = cv2.cvtColor(image,cv2.COLOR_BayerGR2BGR)
    dist = UndistortImage(color_image,LUT)
    cv2.imwrite('data/'+'%d.png'%count,dist)
    count = count+1

K = np.matrix([[fx,0,cx],[0,fy,cy],[0,0,1]])
예제 #24
0
def main():

    img_path = '../Data/stereo/centre/'
    imgs = sorted(os.listdir(img_path))
    fx, fy, cx, cy, G_camera_image, LUT = ReadCameraModel('../Data/model')

    i = 150
    img1 = cv2.imread(img_path + imgs[i], -1)
    img1 = cv2.cvtColor(img1, cv2.COLOR_BAYER_GR2BGR)
    img1 = UndistortImage(img1, LUT)

    img2 = cv2.imread(img_path + imgs[i + 1], -1)
    img2 = cv2.cvtColor(img2, cv2.COLOR_BAYER_GR2BGR)
    img2 = UndistortImage(img2, LUT)

    print(img1.shape)

    x1, x2 = utils.getMatchingFeaturePoints(img1, img2)

    x1 = np.hstack([x1, np.ones((x1.shape[0], 1))])
    x2 = np.hstack([x2, np.ones((x2.shape[0], 1))])

    # print(x1.shape)
    # print(x2.shape)
    features = np.hstack([x1, x2])
    # getInliersRANSAC(features,threshold=(0.07),size=8,num_inliers=0.6*features.shape[0],num_iters=1000)
    x1_in, x2_in = getInliersRANSAC(features,
                                    threshold=(0.005),
                                    size=8,
                                    num_inliers=0.6 * features.shape[0],
                                    num_iters=500)
    print(x1_in.shape)
    # drawFeatures(img1,img2,x1,x2,'r',0.3)
    # drawFeatures(img1,img2,x1_in,x2_in,'g',1)
    # plt.show()

    # sys.exit(0)

    fund_mtx = fundamental.EstimateFundamentalMatrix(x1_in, x2_in)

    # x1 = x1_in
    # x2 = x2_in

    # print("CV2 Fundamental Matrix :",correct_fund_mtx[0])

    print("Fundamental Matrix RANSAC:")
    print(fund_mtx)

    # img1 =cv2.imread('../Data/1.jpg')
    # img2 =cv2.imread('../Data/2.jpg')

    pts1 = np.int32(x1[:, :2])
    pts2 = np.int32(x2[:, :2])
    F, _ = cv2.findFundamentalMat(x1, x2, cv2.RANSAC)

    drawFeatures(img1, img2, x1, x2, 'r', 0.1)
    drawFeatures(img1, img2, x1_in, x2_in, 'g', 0.5)
    plt.show()

    print("F")
    print(F)

    # Find epilines corresponding to points in right image (second image) and
    # drawing its lines on left image
    lines1 = cv2.computeCorrespondEpilines(pts2.reshape(-1, 1, 2), 2, F)
    lines1 = lines1.reshape(-1, 3)
    # lines1 = [lines1[15]]
    # img5,img6 = drawlines(img1,img2,lines1,pts1,pts2)
    img5, img6 = drawlines(img1, img2, lines1[25:30], pts1[25:30], pts2[25:30])

    # Find epilines corresponding to points in left image (first image) and
    # drawing its lines on right image
    lines2 = cv2.computeCorrespondEpilines(pts1.reshape(-1, 1, 2), 1, F)
    lines2 = lines2.reshape(-1, 3)
    # lines2 = [lines2[25]]
    # img3,img4 = drawlines(img2,img1,lines2,pts2,pts1)
    img3, img4 = drawlines(img2, img1, lines2[25:30], pts2[25:30], pts1[25:30])

    plt.subplot(221), plt.imshow(img5)
    plt.subplot(222), plt.imshow(img3)
    # plt.show()

    # Find epilines corresponding to points in right image (second image) and
    # drawing its lines on left image
    lines1 = cv2.computeCorrespondEpilines(pts2.reshape(-1, 1, 2), 2, fund_mtx)
    lines1 = lines1.reshape(-1, 3)
    # lines1 = [lines1[25]]
    # img5,img6 = drawlines(img1,img2,lines1,pts1,pts2)
    img5, img6 = drawlines(img1, img2, lines1[25:30], pts1[25:30], pts2[25:30])

    # Find epilines corresponding to points in left image (first image) and
    # drawing its lines on right image
    lines2 = cv2.computeCorrespondEpilines(pts1.reshape(-1, 1, 2), 1, fund_mtx)
    lines2 = lines2.reshape(-1, 3)
    # lines2 = [lines2[25]]
    # img3,img4 = drawlines(img2,img1,lines2,pts2,pts1)
    img3, img4 = drawlines(img2, img1, lines2[25:30], pts2[25:30], pts1[25:30])

    plt.subplot(223), plt.imshow(img5)
    plt.subplot(224), plt.imshow(img3)
    plt.show()
예제 #25
0
                it += 1
                continue

            # elif it > 5 and it % 2 != 0:
            #     it += 1
            #     continue

            elif it > 700:
                it += 1
                break
            print('Iteration: ', it)

            # load image
            img = cv2.imread(filepath, 0)
            img = cv2.cvtColor(img, cv2.COLOR_BayerGR2BGR)
            img_orig = UndistortImage(img, LUT)
            img = cv2.cvtColor(img_orig, cv2.COLOR_BGR2GRAY)

            if (it == 1):
                img_orig1 = img_orig
                img1 = img
                it = 2
                orb_kp1, orb_des1 = orb.detectAndCompute(img1, None)
                continue

            it += 1
            img2 = img
            img_orig2 = img_orig

            # continue
            # find orb features

lastH = np.identity(
    4)  # Initial camera Pose is considered as an identity matrix
origin = np.array([[0, 0, 0, 1]]).T

l = []  # Variable for storing all the trajectory points

for index in range(19, len(images) - 1):  # Looping over all the images

    img1 = cv2.imread("stereo/centre/" + str(images[index]),
                      0)  # Read current image
    colorimage1 = cv2.cvtColor(
        img1,
        cv2.COLOR_BayerGR2BGR)  # Convert image from bayer format to BGR format
    undistortedimage1 = UndistortImage(colorimage1,
                                       LUT)  # Undistort the current image
    gray1 = cv2.cvtColor(
        undistortedimage1,
        cv2.COLOR_BGR2GRAY)  # Convert the undistorted image in grayscale

    img2 = cv2.imread("stereo/centre/" + str(images[index + 1]),
                      0)  # Read next image
    colorimage2 = cv2.cvtColor(
        img2,
        cv2.COLOR_BayerGR2BGR)  # Convert image from bayer format to BGR format
    undistortedimage2 = UndistortImage(colorimage2,
                                       LUT)  # Undistort the next image
    gray2 = cv2.cvtColor(
        undistortedimage2,
        cv2.COLOR_BGR2GRAY)  # Convert the undistorted image in grayscale
예제 #27
0

# Main

# Preprocess Images
start_t = time.time()
H_init = np.identity(4)
p0 = np.array([[0, 0, 0, 1]]).T
data = []
for i in range(19, 25):
    print(i)
    img1 = cv2.imread(path + str(f[i]), 0)
    # cv2.imshow("",img1)
    # cv2.waitKey(0)
    bgr_img1 = cv2.cvtColor(img1, cv2.COLOR_BayerGR2BGR)
    undist_img1 = UndistortImage(bgr_img1, LUT)
    gray_img1 = cv2.cvtColor(undist_img1, cv2.COLOR_BGR2GRAY)

    img2 = cv2.imread(path + str(f[i + 1]), 0)
    bgr_img2 = cv2.cvtColor(img2, cv2.COLOR_BayerGR2BGR)
    undist_img2 = UndistortImage(bgr_img2, LUT)
    gray_img2 = cv2.cvtColor(undist_img2, cv2.COLOR_BGR2GRAY)

    gray_img1 = gray_img1[200:650, 0:1280]
    gray_img2 = gray_img2[200:650, 0:1280]

    # Find Points of Correspondence

    f1, f2 = findpts(gray_img1, gray_img2)

    F, m = cv2.findFundamentalMat
예제 #28
0
    if Translation_final[2] > 0:
        Translation_final = -Translation_final

    return Rotation_final, Translation_final


H_Start = np.identity(4)
p_0 = np.array([[0, 0, 0, 1]]).T
flag = 0

data_points = []
for index in range(18, len(frames) - 1):
    print(frames[index], index)
    img1 = cv2.imread(pathimage + str(frames[index]), 0)
    distorted_image1 = cv2.cvtColor(img1, cv2.COLOR_BayerGR2BGR)
    undistorted_image1 = UndistortImage(distorted_image1, LUT)
    grayscale_1 = cv2.cvtColor(undistorted_image1, cv2.COLOR_BGR2GRAY)

    img2 = cv2.imread(pathimage + str(frames[index + 1]), 0)
    distorted_image2 = cv2.cvtColor(img2, cv2.COLOR_BayerGR2BGR)
    undistorted_image2 = UndistortImage(distorted_image2, LUT)
    grayscale_2 = cv2.cvtColor(undistorted_image2, cv2.COLOR_BGR2GRAY)

    frame_image1 = grayscale_1[200:650, 0:1280]
    frame_image2 = grayscale_2[200:650, 0:1280]

    sift = cv2.xfeatures2d.SIFT_create()

    keypoints_1, descriptor_1 = sift.detectAndCompute(frame_image1, None)
    keypoints_2, descriptor_2 = sift.detectAndCompute(frame_image2, None)
예제 #29
0
def undistortImg(img):
    colorimage = cv2.cvtColor(img, cv2.COLOR_BayerGR2BGR)
    undistortedimage = UndistortImage(colorimage, LUT)
    gray = cv2.cvtColor(undistortedimage, cv2.COLOR_BGR2GRAY)

    return gray
예제 #30
0
def main():
	img_path = '../Data/stereo/centre/'
	imgs = sorted(os.listdir(img_path))
	fx,fy,cx,cy,G_camera_image,LUT = ReadCameraModel('../Data/model')

	i = 100
	img1 = cv2.imread(img_path+imgs[i],-1)
	img1 = cv2.cvtColor(img1,cv2.COLOR_BAYER_GR2BGR)
	img1 = UndistortImage(img1,LUT)

	img2 = cv2.imread(img_path+imgs[i+1],-1)
	img2 = cv2.cvtColor(img2,cv2.COLOR_BAYER_GR2BGR)
	img2 = UndistortImage(img2,LUT)


	x1,x2 = utils.getMatchingFeaturePoints(img1,img2)

	x1 = np.hstack([x1,np.ones((x1.shape[0],1))])
	x2 = np.hstack([x2,np.ones((x2.shape[0],1))])

	features = np.hstack([x1,x2])
	x1_in,x2_in = RANSAC.getInliersRANSAC(features,threshold=(0.005),size=8,num_inliers=0.6*features.shape[0],num_iters=1000)
	print(x1_in.shape)
	fund_mtx = fundamental.EstimateFundamentalMatrix(x1_in,x2_in)
	K = np.array([[fx,0,cx],[0,fy,cy],[0,0,1]])

	ess_mtx = essential.EssentialMatrixFromFundamentalMatrix(fund_mtx,K)

	
	C,R = cameraPose.ExtractCameraPose(ess_mtx)
	# print("Available C :")
	# print(C)

	print("Available R")
	print(R)

	X_tri = []
	for j in range(4):
		X_tri.append(triangulation.linearTriangulation(K,np.zeros(3),C[j],np.eye(3),R[j],x1_in,x2_in))
	X_tri = np.array(X_tri)
	# print(X_tri.shape)
	C_c,R_c = disambiguateCameraPose(X_tri,C,R)

	print("Pose Position :")
	print(C_c)
	print("Pose Orientation :")
	print(R_c)	

	# angle = utils.rotationMatrixToEulerAngles(R_c)
	# print("Angle :")
	# print(angle)

	E_act = cv2.findEssentialMat(x1_in[:,:2],x2_in[:,:2],K)
	_,R,T,_ = cv2.recoverPose(E_act[0],x1_in[:,:2],x2_in[:,:2])

	print("---")
	print("CV2 Pose Position :")
	print(T.T)
	print("CV2 Pose Orientation :")
	print(R)	
	# angle = utils.rotationMatrixToEulerAngles(R)
	# print("CV2 Ang;e:")
	# print(angle)
	print("<----------------->")