예제 #1
0
    def visualize_epipolar_lines(self, img1, img2, p1, p2, E, save_path):
        """
        This function visualizes the epipolar lines
        img1, img2: are the two images
        p1, p2: are the good keypoints
        E: Essential matrix
        save_path: destination to save the visualization image
        """
        # get fundamental matrix
        F, mask_fdm = cv2.findFundamentalMat(p1, p2, cv2.RANSAC)
        p1_selected = p1[mask_fdm.ravel() == 1]
        p2_selected = p2[mask_fdm.ravel() == 1]

        # draw lines
        lines1 = cv2.computeCorrespondEpilines(p2_selected.reshape(-1, 1, 2),
                                               2, F).reshape(-1, 3)
        img5, _ = self.drawlines(img1, img2, lines1, p1_selected, p2_selected,
                                 100)

        lines2 = cv2.computeCorrespondEpilines(p1_selected.reshape(-1, 1, 2),
                                               1, F).reshape(-1, 3)
        img3, _ = self.drawlines(img2, img1, lines2, p2_selected, p1_selected,
                                 100)
        canvas = np.concatenate((img5, img3), axis=1)
        cv2.imwrite(save_path, canvas)
예제 #2
0
def draw_epipolar_lines(F,
                        img1,
                        img2,
                        pts1,
                        pts2,
                        display=ct.DONT_DISPLAY_PLOT):
    # Find epilines corresponding to points in right image (second image) and
    # drawing its lines on left image
    lines1 = cv.computeCorrespondEpilines(pts2.reshape(-1, 1, 2), 2, F)
    lines1 = lines1.reshape(-1, 3)
    img5, img6 = drawlines(img1, img2, lines1, pts1, pts2)

    # Find epilines corresponding to points in left image (first image) and
    # drawing its lines on right image
    lines2 = cv.computeCorrespondEpilines(pts1.reshape(-1, 1, 2), 1, F)
    lines2 = lines2.reshape(-1, 3)
    img3, img4 = drawlines(img2, img1, lines2, pts2, pts1)

    im.write(ct.POSE_WRITE_PATH + "epipolar_lines1.jpg", img5)
    im.write(ct.POSE_WRITE_PATH + "epipolar_lines2.jpg", img3)

    if display:
        plt.subplot(121), plt.imshow(img5)
        plt.subplot(122), plt.imshow(img3)
        plt.show()
예제 #3
0
def drawEpilinesAndMatches(imgL, imgR, pts1, pts2, kp1, kp2, good):

    # Find epilines corresponding to points in right image (second image) and
    # drawing its lines on left image
    lines1 = cv2.computeCorrespondEpilines(pts2.reshape(-1, 1, 2), 2, F)
    lines1 = lines1.reshape(-1, 3)
    img3_L = imgL
    img3_R = imgR
    img3_L, img3_R = drawlines(img3_L, img3_R, lines1, pts1, pts2)

    fig = plt.figure()
    plt.subplot(121)
    plt.imshow(imgL), plt.title(
        'Input L (no lines should be written on these variables?)')
    plt.subplot(122)
    plt.imshow(imgR), plt.title(
        'Input R (no lines should be written on these variables?)')
    plt.show()

    # Find epilines corresponding to points in left image (first image) and
    # drawing its lines on right image
    lines2 = cv2.computeCorrespondEpilines(pts1.reshape(-1, 1, 2), 1, F)
    lines2 = lines2.reshape(-1, 3)
    img4_L = imgL
    img4_R = imgR
    img4_L, img4_R = drawlines(img4_L, img4_R, lines2, pts2, pts1)

    # cv2.drawMatchesKnn expects list of lists as matches.
    # http://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_feature2d/py_matcher/py_matcher.html
    imgDummy = np.zeros((1, 1))
    img5 = cv2.drawMatches(imgL, kp1, imgR, kp2, good[:10], imgDummy)

    return img3_L, img3_R, img4_L, img4_R, img5, lines1, lines2
예제 #4
0
def draw_epipolar_lines(img1, img2, corrs, F):
    pts1, pts2 = [], []
    dists = []
    for i in range(corrs.shape[0]):
        x_1, y_1, x_2, y_2 = corrs[i]
        pts1.append((y_1, x_1))
        pts2.append((y_2, x_2))
    pts1 = np.array(pts1)
    pts2 = np.array(pts2)
    img1 = (img1.copy()*256.).astype(np.uint8)
    img2 = (img2.copy()*256.).astype(np.uint8)

    lines1 = cv2.computeCorrespondEpilines(pts2.reshape(-1,1,2), 2,F)
    lines1 = lines1.reshape(-1,3)
    img1_out,img2_out = drawlines(img1,img2,lines1,pts1,pts2)

    lines2 = cv2.computeCorrespondEpilines(pts1.reshape(-1,1,2), 1,F)
    lines2 = lines2.reshape(-1,3)
    img4_out,img3_out = drawlines(img2,img1,lines2,pts2,pts1)

    img1_out = cv2.cvtColor(img1_out, cv2.COLOR_BGR2RGB)
    img2_out = cv2.cvtColor(img2_out, cv2.COLOR_BGR2RGB)
    img3_out = cv2.cvtColor(img3_out, cv2.COLOR_BGR2RGB)
    img4_out = cv2.cvtColor(img4_out, cv2.COLOR_BGR2RGB)
    return img1_out, img2_out, img3_out, img4_out
def compute_distances_kpts_to_epilines(points_i, points_j,
                                       F: np.array) -> np.array:
    if F is None:
        return (np.zeros((points_i.shape[0], 2)) + np.inf)

    # Epipolar lines in image I of the points in image J
    lines_i = cv2.computeCorrespondEpilines(points_j.reshape(-1, 1, 2), 2,
                                            F).reshape(-1, 3)

    # Epipolar lines in image J of the points in image I
    lines_j = cv2.computeCorrespondEpilines(points_i.reshape(-1, 1, 2), 1,
                                            F).reshape(-1, 3)

    dist = []
    for k in range(points_i.shape[0]):
        # Params for image i
        xi, yi = points_i[k]
        ai, bi, ci = lines_i[k]

        # Params for image j
        xj, yj = points_j[k]
        aj, bj, cj = lines_j[k]

        di = np.abs(ai * xi + bi * yi + ci) / np.sqrt(ai * ai + bi * bi)
        dj = np.abs(aj * xj + bj * yj + cj) / np.sqrt(aj * aj + bj * bj)

        dist.append((di, dj))

    dist = np.array(dist)
    return dist
예제 #6
0
    def draw_epipolar_lines(self, feat_mode="SURF"):
        """Draws epipolar lines

            This method computes and draws the epipolar lines of the two
            loaded images.

            :param feat_mode: whether to use rich descriptors for feature
                              matching ("surf") or optic flow ("flow")
        """
        self._extract_keypoints(feat_mode)
        self._find_fundamental_matrix()
        # Find epilines corresponding to points in right image (second image)
        # and drawing its lines on left image
        pts2re = self.match_pts2.reshape(-1, 1, 2)
        lines1 = cv2.computeCorrespondEpilines(pts2re, 2, self.F)
        lines1 = lines1.reshape(-1, 3)
        img3, img4 = self._draw_epipolar_lines_helper(self.img1, self.img2,
                                                      lines1, self.match_pts1,
                                                      self.match_pts2)

        # Find epilines corresponding to points in left image (first image) and
        # drawing its lines on right image
        pts1re = self.match_pts1.reshape(-1, 1, 2)
        lines2 = cv2.computeCorrespondEpilines(pts1re, 1, self.F)
        lines2 = lines2.reshape(-1, 3)
        img1, img2 = self._draw_epipolar_lines_helper(self.img2, self.img1,
                                                      lines2, self.match_pts2,
                                                      self.match_pts1)

        cv2.imshow("left", img1)
        cv2.imshow("right", img3)
        cv2.waitKey()
예제 #7
0
    def DrawCorrespondEpilines(self, src, dst, pts1, pts2, F):
        """Draw Correspond Epilines on the image by Fundamental matrix

        Parameters
        ----------
        src : ndarray
            A HxW(x3) matrix of opencv image
        dst : ndarray
            A HxW(x3) matrix of opencv image
        pts1: List of cv2.KeyPoint
            keypoints matched of src image
        pts1: List of cv2.KeyPoint
            keypoints matched of dst image
        F: ndarray
            A 3x3 Matrix of Fundamental Matrix

        Returns
        -------
        draw: ndarray
            A HxW matrix of opencv image
        """
        lines1 = cv2.computeCorrespondEpilines(pts2.reshape(-1, 1, 2), 2, F)
        lines1 = lines1.reshape(-1, 3)
        img1 = drawlines(src, lines1, pts1, pts2)

        lines2 = cv2.computeCorrespondEpilines(pts1.reshape(-1, 1, 2), 1, F)
        lines2 = lines2.reshape(-1, 3)
        img2 = drawlines(dst, lines2, pts2, pts1)
        return img1, img2
예제 #8
0
def draw_lines_show(img1, img2, uvmat, F):

    # 右画像(二番目の画像)中の点に対応するエピポーラ線の計算
    # 計算したエピポーラ線を左画像に描画
    pts1 = np.array([[i[0], i[1]] for i in uvmat])

    pts2 = np.array([[i[2], i[3]] for i in uvmat])
    lines1 = cv2.computeCorrespondEpilines(pts2.reshape(-1, 1, 2), 2, F)
    lines1 = lines1.reshape(-1, 3)
    img5, img6 = draw_lines(img1, img2, lines1, pts1, pts2)

    # 左画像(一番目の画像)中の点に対応するエピポーラ線の計算
    # 計算したエピポーラ線を右画像に描画
    lines2 = cv2.computeCorrespondEpilines(pts1.reshape(-1, 1, 2), 1, F)
    lines2 = lines2.reshape(-1, 3)
    img3, img4 = draw_lines(img2, img1, lines2, pts2, pts1)

    # 結果の表示
    #plt.subplot(121), plt.imshow(img5)
    #plt.subplot(122), plt.imshow(img3)
    #plt.savefig("epipolar.jpg")
    plt.imshow(img5)
    plt.savefig("epipolar_img1.jpg")
    plt.imshow(img3)
    plt.savefig("epipolar_img2.jpg")
예제 #9
0
def draw_epilines(img1, img2, pt1, pt2, fmat):
    #convert the image to BGR form
    img1 = cvtColor(img1,COLOR_RGB2BGR)
    img2 = cvtColor(img2,COLOR_RGB2BGR)
    
    #get the size of the two images to calculate the end points
    weight1 = img1.shape[1]
    weight2 = img2.shape[1]
    
    #make the float into int
    pt1_x = np.int32(pt1[0])
    pt1_y = np.int32(pt1[1])
    p1 = (pt1_x,pt1_y)
    
    pt2_x = np.int32(pt2[0])
    pt2_y = np.int32(pt2[1])
    p2 = (pt2_x,pt2_y)
    
    #create point set to calculate the epilines
    pts_1 = [[p1[0],p1[1]],[0,0]]               
    pts_1 = np.matrix(pts_1)
    
    pts_2 = [[p2[0],p2[1]],[0,0]]               
    pts_2 = np.matrix(pts_2)
    
    #calculate epilines
    lines_2 = computeCorrespondEpilines(pts_1.reshape(-1,1,2), 2,fmat)
    lines_1 = computeCorrespondEpilines(pts_2.reshape(-1,1,2), 1,fmat)
    
    #draw2
    #start point in draw 2
    x_start_2 = 0
    y_start_2 = -lines_2[0][0,2] /lines_2[0][0,1]
    pt_start_2 = (x_start_2, y_start_2)
    
    #end point in draw 2
    x_end_2 = weight2
    y_end_2 = np.int32((-lines_2[0][0,2] - lines_2[0][0,0] * x_end_2) /lines_2[0][0,1])
    pt_end_2 = (x_end_2, y_end_2)
    
    image_2 = circle(img2, p2 , 20, (0, 255, 0), -1)
    image_2 = line(img2, pt_start_2, pt_end_2, (0, 255, 0), 3) 
    draw2 = image_2
    
    #draw1
    #start point in draw 1
    x_start_1 = 0
    y_start_1 = -lines_1[0][0,2] /lines_1[0][0,1]
    pt_start_1 = (x_start_1, y_start_1)
    
    #end point in draw 1
    x_end_1 = weight1
    y_end_1 = np.int32((-lines_1[0][0,2] - lines_1[0][0,0] * x_end_1) /lines_1[0][0,1])
    pt_end_1 = (x_end_1, y_end_1)
    
    image_1 = circle(img1, p1 , 20, (0, 255, 0), -1)
    image_1 = line(img1, pt_start_1, pt_end_1, (0, 255, 0), 3) 
    draw1 = image_1
    
    return draw1, draw2
def draw_epilines(img1, img2, pts1, pts2, F12):
    # get epilines in the img1 w.r.t points in the img2
    lines1 = cv2.computeCorrespondEpilines(pts2.reshape(-1, 1, 2), 2, F12)
    lines1 = lines1.reshape(-1, 3)
    # get epilines in the img2 w.r.t points in the img1
    lines2 = cv2.computeCorrespondEpilines(pts1.reshape(-1, 1, 2), 1, F12)
    lines2 = lines2.reshape(-1, 3)

    h1, w1 = img1.shape[0], img1.shape[1]
    h2, w2 = img2.shape[0], img2.shape[1]

    num_pts = len(pts1)
    colors = np.random.randint(0, 255, (num_pts, 3))
    for r1, r2, pt1, pt2, color in zip(lines1, lines2, pts1, pts2, colors):
        color = tuple(color.tolist())
        # two end points of epiline in img1
        x1_l, y1_l = map(int, [0, -r1[2] / r1[1]])
        x1_r, y1_r = map(int, [w1, -(r1[2] + r1[0] * w1) / r1[1]])
        # two end points of epiline in img2
        x2_l, y2_l = map(int, [0, -r2[2] / r2[1]])
        x2_r, y2_r = map(int, [w2, -(r2[2] + r2[0] * w2) / r2[1]])
        # draw line
        img1 = cv2.line(img1, (x1_l, y1_l), (x1_r, y1_r), color, 1)
        img2 = cv2.line(img2, (x2_l, y2_l), (x2_r, y2_r), color, 1)
        # draw point
        img1 = cv2.circle(img1, tuple(pt1), 5, color, -1)
        img2 = cv2.circle(img2, tuple(pt2), 5, color, -1)
    return img1, img2
예제 #11
0
def drawEpilinesAndMatches(imgL, imgR, pts1, pts2, kp1, kp2, good):

    # Find epilines corresponding to points in right image (second image) and
    # drawing its lines on left image
    lines1 = cv2.computeCorrespondEpilines(pts2.reshape(-1,1,2), 2,F)
    lines1 = lines1.reshape(-1,3)
    img3_L = imgL
    img3_R = imgR
    img3_L, img3_R = drawlines(img3_L,img3_R,lines1,pts1,pts2)
    
    fig = plt.figure()    
    plt.subplot(121)
    plt.imshow(imgL), plt.title('Input L (no lines should be written on these variables?)')
    plt.subplot(122)
    plt.imshow(imgR), plt.title('Input R (no lines should be written on these variables?)')
    plt.show()    
    
    # Find epilines corresponding to points in left image (first image) and
    # drawing its lines on right image
    lines2 = cv2.computeCorrespondEpilines(pts1.reshape(-1,1,2), 1,F)
    lines2 = lines2.reshape(-1,3)
    img4_L = imgL
    img4_R = imgR
    img4_L,img4_R = drawlines(img4_L,img4_R,lines2,pts2,pts1)

    
    # cv2.drawMatchesKnn expects list of lists as matches.
    # http://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_feature2d/py_matcher/py_matcher.html
    imgDummy = np.zeros((1,1))
    img5 = cv2.drawMatches(imgL,kp1,imgR,kp2,good[:10],imgDummy)
    
    return img3_L, img3_R, img4_L, img4_R, img5, lines1, lines2
예제 #12
0
def rectify(image_1,
            image_2):  # Rectify the images to make epipolar lines parallel

    pts_1, pts_2, H1, H2, F = features(image_1, image_2)

    pts_1 = np.int32(pts_1)
    pts_2 = np.int32(pts_2)

    rec1 = cv2.warpPerspective(image_1, H1, (720, 480))
    rec2 = cv2.warpPerspective(image_2, H2, (720, 480))

    F_rec = np.dot(np.dot(np.linalg.inv(H2).T, F), np.linalg.inv(H1))

    lines_1 = cv2.computeCorrespondEpilines(pts_1.reshape(-1, 1, 2), 1,
                                            F_rec).reshape(-1, 3)
    lines_2 = cv2.computeCorrespondEpilines(pts_2.reshape(-1, 1, 2), 2,
                                            F_rec).reshape(-1, 3)

    draw_epiline(rec1, lines_2)
    draw_epiline(rec2, lines_1)

    h_stack = np.hstack((rec1, rec2))
    cv2.imshow('rectify', h_stack)

    return rec1, rec2
예제 #13
0
def display_epilines(img1, img2, x1, x2, F):
    # Find epilines corresponding to points in right image (second image) and
    # drawing its lines on left image
    cv2.namedWindow('epipolar lines and matches at img1', cv2.WINDOW_NORMAL)
    cv2.namedWindow('epipolar lines and matches at img2', cv2.WINDOW_NORMAL)

    lines1 = cv2.computeCorrespondEpilines(x2.reshape(-1, 1, 2), 2, F)
    lines1 = lines1.reshape(-1, 3)
    img3, img4 = draw_lines(img1, img2, lines1, x1, x2)

    # Find epilines corresponding to points in left image (first image) and
    # drawing its lines on right image
    lines2 = cv2.computeCorrespondEpilines(x1.reshape(-1, 1, 2), 1, F)
    lines2 = lines2.reshape(-1, 3)
    img5, img6 = draw_lines(img2, img1, lines2, x2, x1)

    if opencv:
        cv2.imshow('epipolar lines and matches at img1', img3)
        cv2.imshow('epipolar lines and matches at img2', img5)
        # ASCII(q) = 113, ASCII(esc) = 27, ASCII(space) = 32
        while True:
            key = cv2.waitKey(0) & 0xFF
            if key == 113 or key == 27:
                cv2.destroyWindow('epipolar lines and matches at img1')
                cv2.destroyWindow('epipolar lines and matches at img2')
                break
    else:
        plt.subplot(121), plt.imshow(img3)
        plt.subplot(122), plt.imshow(img5)
        plt.show()
예제 #14
0
def createLine(event, x, y, flags, param):
    if event == cv2.EVENT_LBUTTONDOWN:
        print("Creating Line for " + param[0])
        print(param[1])
        if param[0] == "Frame1":
            imageIndex = 1
            otherIndex = 2
        else:
            imageIndex = 2
            otherIndex = 1

        color = (randrange(256), randrange(256) , randrange(256))

        point = np.array([x,y])
        line = [0,0,0]
        line = cv2.computeCorrespondEpilines(point.reshape(-1,1,2), imageIndex, param[1])
        line = line[0][0]
        size = param[2].shape[1]
        x0, y0 = map(int, [0, -line[2]/line[1]])
        x1, y1 = map(int, [size, -(line[2]+(line[0]*float(size)))/line[1]])
        cv2.line(param[2], (x0, y0),(x1, y1),color, 2)

        point = np.array([x1,y1])
        line = [0,0,0]
        line = cv2.computeCorrespondEpilines(point.reshape(-1,1,2), otherIndex, param[1])
        line = line[0][0]
        x0, y0 = map(int, [0, -line[2]/line[1]])
        x1, y1 = map(int, [size, -(line[2]+(line[0]*float(size)))/line[1]])
        cv2.line(param[3], (x0, y0),(x1, y1),color, 2)
예제 #15
0
def show_epilines(image1, img_points1, image2, img_points2, fundamental_mat):
    # Obtenemos las epilineas de ambas imágenes
    epipolarline_img1 = cv2.computeCorrespondEpilines(img_points1, 1, fundamental_mat).reshape(-1,3)
    epipolarline_img2 = cv2.computeCorrespondEpilines(img_points2, 2, fundamental_mat).reshape(-1,3)
    # Dibujamos las líneas epipolares
    # Lineas epipolares de la primera imagen sobre la segunda
    epip1, epip2 = draw_epilines(image1, img_points1, image2, img_points2, epipolarline_img2)
    canvas1 = np.zeros((epip1.shape[0],epip1.shape[1]+epip2.shape[1], 3), dtype = np.uint8)
    fx.insert_img_into_other(img_src=epip2, img_dest=canvas1,
                          pixel_left_top_row=0, pixel_left_top_col=0,
                          substitute=True)
    fx.insert_img_into_other(img_src=epip1, img_dest=canvas1,
                          pixel_left_top_row=0, pixel_left_top_col=epip1.shape[1],
                          substitute=True)
    
    # Lineas epipolares de la segunda imagen sobre la primera
    epip3, epip4 = draw_epilines(image2, img_points2, image1, img_points1, epipolarline_img1)
    canvas2 = np.zeros((epip3.shape[0],epip3.shape[1]+epip4.shape[1], 3), dtype = np.uint8)
    fx.insert_img_into_other(img_src=epip3, img_dest=canvas2,
                          pixel_left_top_row=0, pixel_left_top_col=0,
                          substitute=True)
    fx.insert_img_into_other(img_src=epip4, img_dest=canvas2,
                          pixel_left_top_row=0, pixel_left_top_col=epip1.shape[1],
                          substitute=True)
    # Mostramos ambas imágenes
    fx.show_img(canvas1, 'Epilineas')
    fx.show_img(canvas2, 'Epilineas')
    
    return epipolarline_img1, epipolarline_img2
예제 #16
0
def compute_and_drawlines(img1, img2, pts1, pts2, F):
    lines1 = cv.computeCorrespondEpilines(pts2.reshape(-1, 1, 2), 2, F)
    print(pts1.shape)
    lines1 = lines1.reshape(-1, 3)
    print('Shape of lines: ', lines1.shape)
    lines2 = cv.computeCorrespondEpilines(pts1.reshape(-1, 1, 2), 2, F)
    lines2 = lines2.reshape(-1, 3)
    ''' img1 - image on which we draw the epilines for the points in img2
    lines - corresponding epilines '''
    r, c = img1.shape
    img1 = cv.cvtColor(img1, cv.COLOR_GRAY2BGR)
    img2 = cv.cvtColor(img2, cv.COLOR_GRAY2BGR)
    '''for r,pt1,pt2 in zip(lines1,pts1,pts2):
        color = tuple(np.random.randint(0,255,3).tolist())
        x0,y0 = map(int, [0, -r[2]/r[1] ])
        x1,y1 = map(int, [c, -(r[2]+r[0]*c)/r[1] ])
        img1 = cv.line(img1, (x0,y0), (x1,y1), color,1)
        img1 = cv.circle(img1,tuple(pt1),5,color,-1)
        img2 = cv.circle(img2,tuple(pt2),5,color,-1)

    imgplot = plt.imshow(img1)
    #plt.show()
    imgplot = plt.imshow(img2)
    #plt.show()'''
    return (lines1, lines2)
def draw_epilines(img1, img2, pt1, pt2, fmat):
    #...
    left_line = computeCorrespondEpilines(
        np.array(pt1).reshape(-1, 1, 2), 2, fmat)
    left_line = left_line.reshape(3, -1)
    right_line = computeCorrespondEpilines(
        np.array(pt2).reshape(-1, 1, 2), 1, fmat)
    right_line = right_line.reshape(3, -1)
    r1, k1, r2, k2 = img1.shape[0], img1.shape[1], img2.shape[0], img2.shape[1]
    val1, val2 = [0, int(-1 * right_line[2] / right_line[1])]
    val3, val4 = [
        k1, int(-(right_line[2] + right_line[0] * k1) / right_line[1])
    ]
    fig1 = line(img1, (val1, val2), (val3, val4), color=(0, 0, 0), thickness=4)
    fig1 = circle(img1, (int(pt1[0]), int(pt1[1])),
                  radius=10,
                  color=(200, 50, 40),
                  thickness=-5)
    val5, val6 = [0, int(-left_line[2] / left_line[1])]
    val7, val8 = [k2, int(-(left_line[2] + left_line[0] * k1) / left_line[1])]
    fig2 = line(img2, (val5, val6), (val7, val8), color=(0, 0, 0), thickness=4)
    fig2 = circle(img2, (int(pt2[0]), int(pt2[1])),
                  radius=10,
                  color=(200, 50, 40),
                  thickness=-5)
    return fig1, fig2
예제 #18
0
def do_epilines(F,
                left_img,
                left_idx,
                left_pts,
                right_img,
                right_idx,
                right_pts,
                prefix='epilines',
                p=dflt_params):
    # Find epilines corresponding to points in right image (second image) and
    # drawing its lines on left image
    lines1 = cv.computeCorrespondEpilines(right_pts.reshape(-1, 1, 2), 2, F)
    lines1 = lines1.reshape(-1, 3)
    print_message(f"Computed Epilines lines1:\n{lines1}", params=p)
    # Find epilines corresponding to points in left image (first image) and
    # drawing its lines on right image
    lines2 = cv.computeCorrespondEpilines(left_pts.reshape(-1, 1, 2), 1, F)
    lines2 = lines2.reshape(-1, 3)
    print_message(f"Computed Epilines lines2:\n{lines2}", params=p)

    epiline_images = []
    left_lines, _ = cv_drawlines(left_img, right_img, lines1, left_pts,
                                 right_pts)
    epiline_images.append((left_lines, f"{prefix}_{left_idx}-{right_idx}.bmp"))
    right_lines, _ = cv_drawlines(right_img, left_img, lines2, right_pts,
                                  left_pts)
    epiline_images.append(
        (right_lines, f"{prefix}_{right_idx}-{left_idx}.bmp"))

    output = [cv_save(f, i, params=p) for i, f in epiline_images]

    return output
예제 #19
0
def knnmatchandfundamentalmatrix(image1, image2, keypoints_tsucuba_left,
                                 descriptors_tsucuba_left,
                                 keypoints_tsucuba_right,
                                 descriptors_tsucuba_right):
    matcher = cv2.BFMatcher()
    matches = matcher.knnMatch(descriptors_tsucuba_left,
                               descriptors_tsucuba_right,
                               k=2)

    good_match = []
    good_match_new = []
    good_match_random = []
    pts1 = []
    pts2 = []
    new_pts1 = []
    new_pts2 = []
    for i, j in matches:
        if i.distance < 0.75 * j.distance:
            good_match.append([i])
            good_match_new.append(i)
            pts2.append(keypoints_tsucuba_right[i.trainIdx].pt)
            pts1.append(keypoints_tsucuba_left[i.queryIdx].pt)

    knn_image = cv2.drawMatchesKnn(image1, keypoints_tsucuba_left, image2,
                                   keypoints_tsucuba_right, good_match, None)
    cv2.imwrite('task2_matches_knn.jpg', knn_image)

    pts1 = np.int32(pts1)
    pts2 = np.int32(pts2)
    F, mask = cv2.findFundamentalMat(pts1, pts2, cv2.RANSAC)
    pts1 = pts1[mask.ravel() == 1]
    pts2 = pts2[mask.ravel() == 1]
    print(F)
    for i in range(10):
        rand = random.randint(0, len(pts1) - 1)
        new_pts1.append(pts1[rand])
        new_pts2.append(pts2[rand])

    new_pts1 = np.int32(new_pts1)
    new_pts2 = np.int32(new_pts2)
    # Find epilines corresponding to points in right image (second image) and
    # drawing its lines on left image
    lines1 = cv2.computeCorrespondEpilines(new_pts2.reshape(-1, 1, 2), 2, F)
    lines1 = lines1.reshape(-1, 3)
    img5, img6 = drawlines(image1, image2, lines1, new_pts1, new_pts2)

    # Find epilines corresponding to points in left image (first image) and
    # drawing its lines on right image
    lines2 = cv2.computeCorrespondEpilines(new_pts1.reshape(-1, 1, 2), 1, F)
    lines2 = lines2.reshape(-1, 3)
    img3, img4 = drawlines(image2, image1, lines2, new_pts2, new_pts1)

    cv2.imwrite('task2_epi_right.jpg', img3)
    cv2.imwrite('task2_epi_left.jpg', img5)

    stereo = cv2.StereoBM_create(numDisparities=96, blockSize=31)
    disparity = stereo.compute(image1, image2)
    cv2.imwrite('task2_disparity.jpg', disparity)

    return
예제 #20
0
def compute_fundamental_matrix(img1_points, img2_points, img1=[], img2=[]):
    '''
    By default openCV uses RANSAC with an 8 point sample.
    F is the fundamental matrix.
    Mask is a column vector with as many rows as points in imgi_points, if mask[j] == 0,
    then img1_points[j] and img2_points[j] are no longer considered as true correspondences.
    '''
    F, mask = cv.findFundamentalMat(img1_points, img2_points)
    # Getting rid of the outliers.
    img1_points = img1_points[mask.flatten() == 1]
    img2_points = img2_points[mask.flatten() == 1]

    # Computing epipolar lines in order to compute each epipole.
    lines_img1 = cv.computeCorrespondEpilines(img2_points.reshape(-1, 1, 2), 2,
                                              F)
    lines_img2 = cv.computeCorrespondEpilines(img1_points.reshape(-1, 1, 2), 1,
                                              F)
    # Computing the epipole in the first image.
    epipole1 = np.cross(lines_img1[0], lines_img1[1]).flatten()
    if img1 != []:
        draw_epilines(img1, lines_img1, name='epilines1.png')
    # Compting the epipole in the second image.
    epipole2 = np.cross(lines_img2[0], lines_img2[1]).flatten()
    if img2 != []:
        draw_epilines(img2, lines_img2, name='epilines2.png')
    return F, img1_points.T, img2_points.T, epipole1, epipole2
예제 #21
0
    def __epipolarsAverageDistanceTest (self, F1, F2):
        logger.info ('\nepipolarsAverageDistanceTest\n')
        logger.debug ('F1: ' + str (F1) + '\nF2: ' + '\ntype of F1: ' + str (type (F1)) + '\ntype of F2: ' + str (type (F2)))

        testPoints = np.int32 ([(rand.randint (0, 1000), rand.randint (0, 1000)) for i in range (1000)]).reshape (-1,1,2)
        lines1 = cv2.computeCorrespondEpilines (testPoints, 1, F1).reshape (-1,3)
        lines2 = cv2.computeCorrespondEpilines (testPoints, 1, F2).reshape (-1,3)
    
        logger.debug ('testPoints: ' + str (testPoints[:10]))
        logger.debug ('lines1:' + str (lines1[:10]) + '\nlines2:' + str (lines2[:10]))

        points1 = [-line[2] / line[1] for line in lines1]
        points2 = [-line[2] / line[1] for line in lines2]

        logger.debug ('points1: ' + str (points1[:10]))
        logger.debug ('points2: ' + str (points2[:10]))

        distances = [abs (point1 - point2) for (point1, point2) in zip (points1, points2)]
        logger.info ('distances: ' + str (distances[:10]))

        average1 = np.mean (distances)
        average2 = np.mean ([abs (distance - average1) for distance in distances])

        logger.debug ('average1: ' + str (average1))
        logger.debug ('average2: ' + str (average2))

        return (average1, average2)
        pass
예제 #22
0
def draw_epipolar_lines(im1, im2, F, points1, points2):
    """
    We followed the official opencv tutorial:
    https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_calib3d/py_epipolar_geometry/py_epipolar_geometry.html
    Draws the epipolar lines for the best fundamental matrix F and the keypoints for `img1` and `img2`.
    :param im1:
    :param im2:
    :param F:
    :param points1:
    :param points2:
    :return:
    """
    # transform to grayscale
    im1 = cv2.cvtColor(im1, cv2.COLOR_RGB2BGR)
    im2 = cv2.cvtColor(im2, cv2.COLOR_RGB2BGR)
    # cv2.circle needs int points and not float -> type conversion

    points1 = points1.astype(int)
    points2 = points2.astype(int)
    # get epilines for first image that correspond to second image and draw them on the left
    lines1 = cv2.computeCorrespondEpilines(points2.reshape(-1, 1, 2), 2, F)
    # from list of lists to list
    lines1 = lines1.reshape(-1, 3)
    img5, img6 = plot_epipolar_line(im1, im2, lines1, points1, points2)
    # Find epilines corresponding to points in left image (first image) and
    # drawing its lines on right image
    lines2 = cv2.computeCorrespondEpilines(points1.reshape(-1, 1, 2), 1, F)
    lines2 = lines2.reshape(-1, 3)
    img3, img4 = plot_epipolar_line(im2, im1, lines2, points2, points1)
    # plot the two images with the epipolar lines
    plt.subplot(121)
    plt.imshow(img5)
    plt.subplot(122)
    plt.imshow(img3)
    plt.show()
def check_calibration(image_points_left, image_points_right, left_camMat,
                      right_camMat, leftDistC, rightDistC, F, object_point,
                      img_count):

    sides = "left", "right"
    which_image = {sides[0]: 1, sides[1]: 2}
    undistorted, lines = {}, {}

    undistorted["left"] = cv2.undistortPoints(
        np.concatenate(image_points_left).reshape(-1, 1, 2),
        left_camMat,
        leftDistC,
        P=left_camMat)
    lines["left"] = cv2.computeCorrespondEpilines(undistorted["left"],
                                                  which_image["left"], F)

    undistorted["right"] = cv2.undistortPoints(
        np.concatenate(image_points_right).reshape(-1, 1, 2),
        right_camMat,
        leftDistC,
        P=right_camMat)
    lines["right"] = cv2.computeCorrespondEpilines(undistorted["right"],
                                                   which_image["right"], F)

    total_error = 0
    this_side, other_side = sides
    for side in sides:
        for i in range(len(undistorted[side])):
            total_error += abs(
                undistorted[this_side][i][0][0] * lines[other_side][i][0][0] +
                undistorted[this_side][i][0][1] * lines[other_side][i][0][1] +
                lines[other_side][i][0][2])
        other_side, this_side = sides
    total_points = img_count * len(object_point)
    return total_error / total_points
예제 #24
0
def drawEpilines(img1, img2, pts1, pts2, F):
    # Rectify images
    ret, h1, h2 = cv2.stereoRectifyUncalibrated(pts1, pts2, F,
                                                (img1.shape[1], img1.shape[0]))

    img1_ = img1.copy()
    img2_ = img2.copy()

    # Calculate and draw the epiplines in img1
    lines1 = cv2.computeCorrespondEpilines(pts2, 2, F)
    lines1 = lines1.reshape(-1, 3)
    imgLeft = drawlines(img1_, lines1, pts1, pts2)

    # Calculate and draw the epiplines in img2
    lines2 = cv2.computeCorrespondEpilines(pts1, 1, F)
    lines2 = lines2.reshape(-1, 3)
    imgRight = drawlines(img2_, lines2, pts2, pts1)

    imgLeftRectified = cv2.warpPerspective(imgLeft, h1,
                                           (img1.shape[1], img1.shape[0]))
    imgRightRectified = cv2.warpPerspective(imgRight, h2,
                                            (img2.shape[1], img2.shape[0]))

    imgLeftRectifiedNoEpi = cv2.warpPerspective(img1, h1,
                                                (img1.shape[1], img1.shape[0]))
    imgRightRectifiedNoEpi = cv2.warpPerspective(
        img2, h2, (img2.shape[1], img2.shape[0]))

    return imgLeft, imgRight, imgLeftRectified, imgRightRectified, imgLeftRectifiedNoEpi, imgRightRectifiedNoEpi
예제 #25
0
    def draw_epipolar_lines(self, feat_mode="SURF"):
        """Draws epipolar lines

            This method computes and draws the epipolar lines of the two
            loaded images.

            :param feat_mode: whether to use rich descriptors for feature
                              matching ("surf") or optic flow ("flow")
        """
        self._extract_keypoints(feat_mode)
        self._find_fundamental_matrix()
        # Find epilines corresponding to points in right image (second image)
        # and drawing its lines on left image
        pts2re = self.match_pts2.reshape(-1, 1, 2)
        lines1 = cv2.computeCorrespondEpilines(pts2re, 2, self.F)
        lines1 = lines1.reshape(-1, 3)
        img3, img4 = self._draw_epipolar_lines_helper(self.img1, self.img2,
                                                      lines1, self.match_pts1,
                                                      self.match_pts2)

        # Find epilines corresponding to points in left image (first image) and
        # drawing its lines on right image
        pts1re = self.match_pts1.reshape(-1, 1, 2)
        lines2 = cv2.computeCorrespondEpilines(pts1re, 1, self.F)
        lines2 = lines2.reshape(-1, 3)
        img1, img2 = self._draw_epipolar_lines_helper(self.img2, self.img1,
                                                      lines2, self.match_pts2,
                                                      self.match_pts1)

        cv2.imshow("left", img1)
        cv2.imshow("right", img3)
        cv2.waitKey()
def drawEpilines():
    bf = cv2.BFMatcher()
    matches = bf.knnMatch(des1, des2, k=2)
    matches_random = np.random.permutation(matches)[:10]

    good_random = []
    pts1_rand = []
    pts2_rand = []
    for m, n in matches_random:
        good_random.append(m)
        pts1_rand.append(kp1[m.queryIdx].pt)
        pts2_rand.append(kp2[m.trainIdx].pt)

    pts1_rand = np.int32(pts1_rand)
    pts2_rand = np.int32(pts2_rand)
    F1, mask1 = cv2.findFundamentalMat(pts1_rand, pts2_rand, cv2.FM_LMEDS)

    linesLeft = cv2.computeCorrespondEpilines(pts2_rand.reshape(-1, 1, 2), 2,
                                              F1)
    linesLeft = linesLeft.reshape(-1, 3)
    img5, img6 = drawlines(img1, img2, linesLeft, pts1_rand, pts2_rand)

    linesRight = cv2.computeCorrespondEpilines(pts1_rand.reshape(-1, 1, 2), 1,
                                               F1)
    linesRight = linesRight.reshape(-1, 3)
    img7, img8 = drawlines(img2, img1, linesRight, pts2_rand, pts1_rand)

    cv2.imwrite('task2_epi_left.jpg', img5)
    cv2.imwrite('task2_epi_right.jpg', img7)
예제 #27
0
def sift3dImageAlignment(img1, img2):
    _, kp1, des1 = sift_kp(img1)
    _, kp2, des2 = sift_kp(img2)
    goodMatch = get_good_match(des1, des2)
    pts1 = np.float32([kp1[m.queryIdx].pt
                       for m in goodMatch]).reshape(-1, 1, 2)
    pts2 = np.float32([kp2[m.trainIdx].pt
                       for m in goodMatch]).reshape(-1, 1, 2)
    F, mask = cv2.findFundamentalMat(pts1, pts2, cv2.FM_LMEDS)
    if mask != None:
        pts1 = pts1[mask.ravel() == 1]
        pts2 = pts2[mask.ravel() == 1]
    # Find epilines corresponding to points in right image (second image) and
    # drawing its lines on left image

    lines1 = cv2.computeCorrespondEpilines(pts2.reshape(-1, 1, 2), 2, F)
    lines1 = lines1.reshape(-1, 3)
    img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
    img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
    img5, img6 = drawlines(img1, img2, lines1, pts1, pts2)

    # Find epilines corresponding to points in left image (first image) and
    # drawing its lines on right image
    lines2 = cv2.computeCorrespondEpilines(pts1.reshape(-1, 1, 2), 1, F)
    lines2 = lines2.reshape(-1, 3)
    img3, img4 = drawlines(img2, img1, lines2, pts2, pts1)

    cv2.namedWindow('Resultx', cv2.WINDOW_NORMAL)
    cv2.imshow('Resultx', img3)
    cv2.namedWindow('Resulty', cv2.WINDOW_NORMAL)
    cv2.imshow('Resulty', img5)
예제 #28
0
def draw_ep(M, img1, img2):
    fp1 = M[:, 0:2]
    fp2 = M[:, 2:4]

    # normalize
    fp1 = fp1.T
    fp2 = fp2.T

    # 2차원의 좌표를 homogeneous 로 변경
    fp1 = np.insert(fp1, 2, 1, axis=0)
    fp2 = np.insert(fp2, 2, 1, axis=0)

    F = compute_F_norm(M)


    # Draw a polar line in the left image for the points in the right image
    lines1 = cv2.computeCorrespondEpilines(fp2.reshape(-1, 1, 2), 2, F)
    lines1 = lines1.reshape(-1, 3)
    img5, img6 = drawlines(img1, img2, lines1, fp1, fp2)

    # In the left picture, draw the epipolar line in the right picture
    lines2 = cv2.computeCorrespondEpilines(fp1.reshape(-1, 1, 2), 1, F)
    lines2 = lines2.reshape(-1, 3)
    img3, img4 = drawlines(img2, img1, lines2, fp2, fp1)

    plt.subplot(121), plt.imshow(img5)
    plt.subplot(122), plt.imshow(img3)
    plt.show()
    def disp_epipoles(self, img0, img1, F):
        img0 = img0.copy()
        img1 = img1.copy()

        def drawlines(img, lines, pts):
            r, c, _ = img.shape
            for r, pt in zip(lines, pts):
                color = tuple(np.random.randint(0, 255, 3).tolist())
                x0, y0 = map(int, [0, -r[2] / r[1]])
                x1, y1 = map(int, [c, -(r[2] + r[0] * c) / r[1]])
                cv2.line(img, (x0, y0), (x1, y1), color, 1)
                cv2.circle(img, tuple(pt), 5, color, -1)
            return img

        epilines1 = cv2.computeCorrespondEpilines(
            self.sorted_keypts_1.reshape(-1, 1, 2), 2, F)
        epilines1 = epilines1.reshape(-1, 3)
        img0_ = drawlines(img0, epilines1, self.sorted_keypts_0)

        epilines2 = cv2.computeCorrespondEpilines(
            self.sorted_keypts_0.reshape(-1, 1, 2), 1, F)
        epilines2 = epilines2.reshape(-1, 3)
        img1_ = drawlines(img1, epilines2, self.sorted_keypts_1)

        cv2.imshow(f"img{self.img_count}", img0_)
        self.img_count += 1
        cv2.imshow(f"img{self.img_count}", img1_)
        self.img_count += 1
        cv2.waitKey(0)
예제 #30
0
    def computeEpipole(self,  clickLoc,  I,  d=[-100, 100], n=20):
        pt = np.array([[clickLoc.x()], [clickLoc.y()]])
        if self.mode == 'openCV':
            if I == 'L':
                lineParams = cv2.computeCorrespondEpilines(
                    pt, 1, self.calData['F'])
            elif I == 'R':
                lineParams = cv2.computeCorrespondEpilines(
                    pt, 2, self.calData['F'])
            print(lineParams)
        else:
            pass
        h = self.normalizePixel(
            pt, self.calData['fc_left'], self.calData['cc_left'], self.calData['kc_left'], self.calData['alpha_c_left'])
        uo = np.hstack((h[0], h[1], 1))
        T = self.calData['T']
        R = self.calData['R']
        S = np.array([[0, -T[2], T[1]], [T[2], 0, -T[0]], [-T[1], T[0], 0]])

        l_epipole = np.dot(np.dot(S, R), uo.transpose())

        KK_right = np.array([
            [self.calData['fc_right'][0][0],
             self.calData['alpha_c_right'][0][0] * self.calData['fc_right'][0][0],
             self.calData['cc_right'][0][0]],
            [0,
             self.calData['fc_right'][1][0],
             self.calData['cc_right'][1][0]],
            [0,  0,  1]])

        if np.sqrt(np.dot(l_epipole[1], l_epipole[1])) > np.sqrt(np.dot(l_epipole[0], l_epipole[0])):
            limit_x_pos = (
                (pt[1][0] + d[1]) - self.calData['cc_right'][0]) / self.calData['fc_right'][0]
            limit_x_neg = (
                (pt[1][0] - d[0]) - self.calData['cc_right'][0]) / self.calData['fc_right'][0]

            x_list = (limit_x_pos - limit_x_neg) * (np.array(range(n)).astype('float') / (float(n) - 1)) + limit_x_neg

            pt = np.cross(np.tile(np.array([l_epipole]).T, (1, n)).T, np.array(
                [np.ones((1, n))[0], np.zeros((1, n))[0], -x_list]).T).T

        else:

            limit_y_pos = (
                (pt[1][0] + d[1]) - self.calData['cc_right'][1]) / self.calData['fc_right'][1]
            limit_y_neg = (
                (pt[1][0] - d[0]) - self.calData['cc_right'][1]) / self.calData['fc_right'][1]

            y_list = (limit_y_pos - limit_y_neg) * \
                (range(n) / (n - 1)) + limit_y_neg

            pt = np.cross(np.tile(l_epipole, (1, n)), np.array(
                [[np.zeros((1, n))], [np.ones(1, n)], [-y_list]]))

        pt = np.vstack((pt[0, :] / pt[2, :], pt[1, :] / pt[2, :]))
        ptd = self.applyDistortion(pt, self.calData['kc_right'])
        epipole = np.dot(KK_right,  np.vstack((ptd, np.ones((1, n)))))

        return epipole[0:2, :]
def epipolarlines(index):
    img1 = cv.imread('newimageG' + str(index) + '.jpg', 0)
    img2 = cv.imread('newimageD' + str(index) + '.jpg', 0)

    sift = cv.xfeatures2d.SIFT_create()

    kp1, des1 = sift.detectAndCompute(img1, None)
    kp2, des2 = sift.detectAndCompute(img2, None)

    FLANN_INDEX_KDTREE = 1
    index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
    search_params = dict(checks=50)

    flann = cv.FlannBasedMatcher(index_params, search_params)
    matches = flann.knnMatch(des1, des2, k=2)

    good = []
    pts1 = []
    pts2 = []

    for i, (m, n) in enumerate(matches):
        if m.distance < 0.8 * n.distance:
            good.append(m)
            pts2.append(kp2[m.trainIdx].pt)
            pts1.append(kp1[m.queryIdx].pt)
    pts1 = np.int32(pts1)
    pts2 = np.int32(pts2)
    F, mask = cv.findFundamentalMat(pts1, pts2, cv.FM_LMEDS)

    pts1 = pts1[mask.ravel() == 1]
    pts2 = pts2[mask.ravel() == 1]

    def drawlines(img1, img2, lines, pts1, pts2):
        ''' img1 - image on which we draw the epilines for the points in img2
            lines - corresponding epilines '''
        r, c = img1.shape
        img1 = cv.cvtColor(img1, cv.COLOR_GRAY2BGR)
        img2 = cv.cvtColor(img2, cv.COLOR_GRAY2BGR)
        for r, pt1, pt2 in zip(lines, pts1, pts2):
            color = tuple(np.random.randint(0, 255, 3).tolist())
            x0, y0 = map(int, [0, -r[2] / r[1]])
            x1, y1 = map(int, [c, -(r[2] + r[0] * c) / r[1]])
            img1 = cv.line(img1, (x0, y0), (x1, y1), color, 1)
            img1 = cv.circle(img1, tuple(pt1), 5, color, -1)
            img2 = cv.circle(img2, tuple(pt2), 5, color, -1)
        return img1, img2

    lines1 = cv.computeCorrespondEpilines(pts2.reshape(-1, 1, 2), 2, F)
    lines1 = lines1.reshape(-1, 3)
    img5, img6 = drawlines(img1, img2, lines1, pts1, pts2)

    lines2 = cv.computeCorrespondEpilines(pts1.reshape(-1, 1, 2), 1, F)
    lines2 = lines2.reshape(-1, 3)
    img3, img4 = drawlines(img2, img1, lines2, pts2, pts1)

    plt.subplot(121), plt.imshow(img5)
    plt.subplot(122), plt.imshow(img3)
    plt.show()
    return
예제 #32
0
파일: Blatt_08.py 프로젝트: MarkusClln/CVIS
def matches_2():
    descriptors_cv2_1 = to_cv2_di(descriptors3)
    descriptors_cv2_2 = to_cv2_di(descriptors4)

    keypoints_cv2_1 = to_cv2_kplist(detected_keypoints3)
    keypoints_cv2_2 = to_cv2_kplist(detected_keypoints4)

    bf = cv2.BFMatcher()

    img1 = cv2.imread(image_pathes[2])
    img2 = cv2.imread(image_pathes[3])

    matches = bf.knnMatch(descriptors_cv2_1, descriptors_cv2_2, k=2)
    good = []
    pts1 = []
    pts2 = []
    theshold_matching = 0.7
    for m, n in matches:
        if m.distance < theshold_matching * n.distance:
            good.append([m])
            pts1.append(keypoints_cv2_1[m.queryIdx].pt)
            pts2.append(keypoints_cv2_2[m.trainIdx].pt)

    print("matches 2 with 0.8: " + str(len(good)))
    img_out = cv2.drawMatchesKnn(img1, keypoints_cv2_1, img2, keypoints_cv2_2,
                                 good, None)
    cv2.imwrite("out\\.2_0,8png", img_out)

    pts1 = np.int32(pts1)
    pts2 = np.int32(pts2)
    #create FundamentalMatrix
    F, mask = cv2.findFundamentalMat(pts1, pts2, cv2.FM_LMEDS)

    #fx = fy = 721.5
    #cx = 690.5
    #cy = 172.8
    #F[0][0] = fx
    #F[0][2] = cx
    #F[1][1] = fy
    #F[1][2] = cy
    # F = np.matrix([[fx, 0, cx], [0, fy, cy], [0, 0, 0]])

    #select only -----
    pts1 = pts1[mask.ravel() == 1]
    pts2 = pts2[mask.ravel() == 1]
    lines = cv2.computeCorrespondEpilines(pts2.reshape(-1, 1, 2), 2, F)
    lines = lines.reshape(-1, 3)
    gray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
    gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
    img1, img2 = drawlines(gray1, gray2, lines, pts1, pts2)

    lines2 = cv2.computeCorrespondEpilines(pts1.reshape(-1, 1, 2), 1, F)
    lines2 = lines2.reshape(-1, 3)
    img3, img4 = drawlines(gray2, gray1, lines2, pts2, pts1)
    #cv2.imshow('img', img1)
    cv2.imwrite("out\\3_08.png", img1)
    #cv2.waitKey(8000)
    #cv2.imshow('img', img3)
    cv2.imwrite("out\\4_08.png", img3)
예제 #33
0
def EpipolarGeometry(pts1, pts2, F, maskF, FT, maskE):
    ############ to adapt ##########################
    img1 = cv.pyrDown(cv.imread('Images/leftT2.jpg', 0))
    img2 = cv.pyrDown(cv.imread('Images/rightT2.jpg', 0))
    #################################################
    r, c = img1.shape

    # Trouver les points d'interet en utilisant la matrice fondamentale
    pts1F = pts1[maskF.ravel() == 1]
    pts2F = pts2[maskF.ravel() == 1]

    # trouver les droite épipolaire dans l'image de droite en utilisant la matrice fondamentale
    lines1 = cv.computeCorrespondEpilines(pts2.reshape(-1, 1, 2), 2, F)
    lines1 = lines1.reshape(-1, 3)
    img5, img6 = drawlines(img1, img2, lines1, pts1F, pts2F)
    # trouver les droite épipolaire dans l'image de gauche en utilisant la matrice fondamentale
    lines2 = cv.computeCorrespondEpilines(pts1.reshape(-1, 1, 2), 1, F)
    lines2 = lines2.reshape(-1, 3)
    img3, img4 = drawlines(img2, img1, lines2, pts2, pts1)
    plt.figure('Fright')
    plt.subplot(121), plt.imshow(img5)
    plt.subplot(122), plt.imshow(img6)
    plt.figure('Fleft')
    plt.subplot(121), plt.imshow(img4)
    plt.subplot(122), plt.imshow(img3)

    # Trouver les points d'interet en utilisant la matrice essentiel
    pts1 = pts1[maskE.ravel() == 1]
    pts2 = pts2[maskE.ravel() == 1]
    # trouver les droite épipolaire dans l'image de droite en utilisant la matrice essentiel
    lines1 = cv.computeCorrespondEpilines(pts2.reshape(-1, 1, 2), 2, FT)
    lines1 = lines1.reshape(-1, 3)
    img5T, img6T = drawlines(img1, img2, lines1, pts1, pts2)
    plt.figure('FTright')
    plt.subplot(121), plt.imshow(img5T)
    plt.subplot(122), plt.imshow(img6T)
    # trouver les droite épipolaire dans l'image de gauche en utilisant la matrice essentiel
    lines2 = cv.computeCorrespondEpilines(pts1.reshape(-1, 1, 2), 1, FT)
    lines2 = lines2.reshape(-1, 3)
    img3T, img4T = drawlines(img2, img1, lines2, pts2, pts1)
    plt.figure('FTleft')
    plt.subplot(121), plt.imshow(img4T)
    plt.subplot(122), plt.imshow(img3T)
    plt.show()

    # calculer les homographies qui permettent pour que les droites epipolaire soit
    # dans des lignes correspondantes
    retval, H1, H2 = cv.stereoRectifyUncalibrated(pts1, pts2, F, (c, r))
    print('H1\n', H1)
    print('H2\n', H2)
    # Faire une transformation de perspective à l'aide des matrice de homographie
    im_dst1 = cv.warpPerspective(img1, H1, (c, r))
    im_dst2 = cv.warpPerspective(img2, H2, (c, r))
    cv.namedWindow('left', 0)
    cv.imshow('left', im_dst1)
    cv.namedWindow('right', 0)
    cv.imshow('right', im_dst2)
    cv.waitKey(1)
예제 #34
0
def calculate_cost(cam1, person1, cam2, person2):
    """ calculate the epipolar distance between two humans
    :param cam1:
    :param person1:
    :param cam2:
    :param person2:
    :return:
    """
    F = get_fundamental_matrix(cam1.P, cam2.P)
    J = len(person1)
    assert J == len(person2)

    # drop all points that are -1 -1 (not visible)
    pts1 = []
    pts2 = []
    weights1 = []
    weights2 = []
    for jid in range(J):
        x1, y1, w1 = person1[jid]
        x2, y2, w2 = person2[jid]
        if x1 >= 0 and x2 >= 0:
            pts1.append((x1, y1))
            weights1.append(w1)
            pts2.append((x2, y2))
            weights2.append(w2)
    weights1 = np.clip(weights1, a_min=0, a_max=1)
    weights2 = np.clip(weights2, a_min=0, a_max=1)

    if len(pts1) == 0:
        return np.finfo(np.float32).max

    pts1 = np.array(pts1)
    pts2 = np.array(pts2)

    epilines_1to2 = np.squeeze(
        cv2.computeCorrespondEpilines(pts1, 1, F))

    epilines_2to1 = np.squeeze(
        cv2.computeCorrespondEpilines(pts2, 2, F))

    total = 0

    n_pairs = len(pts1)
    assert n_pairs == len(pts2)

    if n_pairs == 1:
        epilines_1to2 = np.expand_dims(epilines_1to2, axis=0)
        epilines_2to1 = np.expand_dims(epilines_2to1, axis=0)

    for p1, l1to2, w1, p2, l2to1, w2 in zip(
            pts1, epilines_1to2, weights1,
            pts2, epilines_2to1, weights2):
        d1 = gm.line_to_point_distance(*l1to2, *p2)
        d2 = gm.line_to_point_distance(*l2to1, *p1)
        total += d1 + d2
    return total / n_pairs  # normalize
예제 #35
0
파일: Util.py 프로젝트: zo7/comet-maps
def draw_epilines(img1, kp1, img2, kp2, matches, fund):
    '''
    Draws epilines between two images.

    Args:
        ...
    '''

    _, cols, _ = img1.shape

    # Place images side by side
    img = combine_images(img1, img2)

    colors = random_colors()

    pts1 = np.float32(
        [kp1[m.queryIdx].pt for m in matches]
    ).reshape(-1, 1, 2)
    pts2 = np.float32(
        [kp2[m.trainIdx].pt for m in matches]
    ).reshape(-1, 1, 2)

    lines2 = cv2.computeCorrespondEpilines(pts1, 1, fund)
    lines1 = cv2.computeCorrespondEpilines(pts2, 2, fund)

    try:
        # Draw matches
        for i in range(0, len(pts1)):

            color = colors[i%len(colors)]

            x1, y1 = int(pts1[i][0][0]), int(pts1[i][0][1])
            x2, y2 = int(pts2[i][0][0]), int(pts2[i][0][1])

            x2 += cols

            a1, b1, c1 = lines1[i][0]
            a2, b2, c2 = lines2[i][0]

            cv2.circle(img, (x1,y1), 8, color, 2)
            cv2.circle(img, (x2,y2), 8, color, 2)

            y11 =          -c1  / b1 # At x = 0
            y12 = (-a1*cols-c1) / b1 # At x = col

            y21 =          -c2  / b2 # At x = 0
            y22 = (-a2*cols-c2) / b2 # At x = col

            cv2.line(img, (0,    int(y11)), (cols,   int(y12)), color, 1)
            cv2.line(img, (cols, int(y21)), (2*cols, int(y22)), color, 1)

        return img

    except:
        print 'Warning: Could not create epilines'
        return None
예제 #36
0
def shi_tomasi(gray):
    # image:源图片
    # maxCorners:最佳角点数量
    # qualityLevel:质量等级,小于此质量等级的角点都会被忽略
    # minDistance:交点之间的最小距离
    corners = cv2.goodFeaturesToTrack(gray, 25, 0.01, 10)
    cv2.computeCorrespondEpilines()
    # 返回的结果是 [[ 311., 250.]] 两层括号的数组。
    corners = np.int0(corners)
    return corners
예제 #37
0
def epipolar_geometry(frame1, frame2):
    #sift = cv2.SIFT()

    # Find the keypoints and descriptors with SIFT
    #kp1, des1 = sift.detectAndCompute(frame1, None)
    #kp2, des2 = sift.detectAndCompute(frame2, None)

    # Trying ORB instead of SIFT
    orb = cv2.ORB()

    kp1, des1 = orb.detectAndCompute(frame1, None)
    kp2, des2 = orb.detectAndCompute(frame2, None)

    des1, des2 = map(numpy.float32, (des1, des2))

    # FLANN parameters
    FLANN_INDEX_KDTREE = 0 
    index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
    search_params = dict(checks = 50)

    flann = cv2.FlannBasedMatcher(index_params, search_params)
    matches = flann.knnMatch(des1, des2, k=2)

    good, pts1, pts2 = [], [], []

    # Ratio test as per Lowe's paper
    for i, (m, n) in enumerate(matches):
        if m.distance < 0.8*n.distance:
            good.append(m)
            pts1.append(kp1[m.queryIdx].pt)
            pts2.append(kp2[m.trainIdx].pt)

    pts1 = numpy.float32(pts1)
    pts2 = numpy.float32(pts2)
    F, mask = cv2.findFundamentalMat(pts1, pts2, cv2.FM_LMEDS)

    return F, mask

    pts1 = pts1[mask.ravel() == 1]
    pts2 = pts2[mask.ravel() == 1]

    lines1 = cv2.computeCorrespondEpilines(pts2.reshape(-1, 1, 2), 2, F)
    lines1 = lines1.reshape(-1, 3)
    img1, _ = drawlines(frame1, frame2, lines1, pts1, pts2)
    
    lines2 = cv2.computeCorrespondEpilines(pts1.reshape(-1, 1, 2), 2, F)
    lines2 = lines2.reshape(-1, 3)
    img2, _ = drawlines(frame2, frame1, lines2, pts2, pts1)

    matplotlib.pyplot.subplot(121)
    matplotlib.pyplot.imshow(img1)
    matplotlib.pyplot.subplot(122)
    matplotlib.pyplot.imshow(img2)
    matplotlib.show()
예제 #38
0
	def drawEpipolarLines(Xp_1,Xp_2,F,im_1,im_2):
		F = np.mat(F)
		#get epipolar lines
		lines1 = cv2.computeCorrespondEpilines(Xp_2.reshape(-1,1,2), 2,F)
		lines1 = lines1.reshape(-1,3)
		img3,img4 = clsReconstruction.drawlines(im_1,im_2,lines1,Xp_1,Xp_2)

		lines2 = cv2.computeCorrespondEpilines(Xp_1.reshape(-1,1,2), 1,F)
		lines2 = lines2.reshape(-1,3)
		img5, img6 = clsReconstruction.drawlines(im_2, im_1, lines2,Xp_2, Xp_1)

		plt.subplot(121),plt.imshow(img3)
		plt.subplot(122),plt.imshow(img5)
		plt.show()
예제 #39
0
    def check_calibration(self, calibration):
        """
        Check calibration quality by computing average reprojection error.

        First, undistort detected points and compute epilines for each side.
        Then compute the error between the computed epipolar lines and the
        position of the points detected on the other side for each point and
        return the average error.
        """
        sides = "left", "right"
        which_image = {sides[0]: 1, sides[1]: 2}
        undistorted, lines = {}, {}
        for side in sides:
            undistorted[side] = cv2.undistortPoints(
                         np.concatenate(self.image_points[side]).reshape(-1,
                                                                         1, 2),
                         calibration.cam_mats[side],
                         calibration.dist_coefs[side],
                         P=calibration.cam_mats[side])
            lines[side] = cv2.computeCorrespondEpilines(undistorted[side],
                                              which_image[side],
                                              calibration.f_mat)
        total_error = 0
        this_side, other_side = sides
        for side in sides:
            for i in range(len(undistorted[side])):
                total_error += abs(undistorted[this_side][i][0][0] *
                                   lines[other_side][i][0][0] +
                                   undistorted[this_side][i][0][1] *
                                   lines[other_side][i][0][1] +
                                   lines[other_side][i][0][2])
            other_side, this_side = sides
        total_points = self.image_count * len(self.object_points)
        return total_error / total_points
예제 #40
0
파일: project.py 프로젝트: basveeling/cv2
 def print_epilines(self, Fcv, i_next, i_prev, points1, points2):
     lines1 = cv2.computeCorrespondEpilines(np.float32(points2).reshape(-1, 1, 2), 2, Fcv)
     lines1 = lines1.reshape(-1, 3)
     img5, img6 = drawlines(self.images[i_prev], self.images[i_next], lines1, np.int32(points1),
                            np.int32(points2))
     plt.subplot(121), plt.imshow(img5)
     plt.subplot(122), plt.imshow(img6)
     plt.show()
예제 #41
0
파일: util.py 프로젝트: minhnh/hbrs_courses
def draw_epilines(img1, img2, points_img1, points_img2, fundamental_matrix):
    # Find epilines corresponding to points in right image (second image) and
    # drawing its lines on left image
    lines1 = cv2.computeCorrespondEpilines(points_img2.reshape(-1, 1, 2), 2, fundamental_matrix)
    lines1 = lines1.reshape(-1, 3)
    img5, img6 = drawlines(img1, img2, lines1, points_img1, points_img2)

    # Find epilines corresponding to points in left image (first image) and
    # drawing its lines on right image
    lines2 = cv2.computeCorrespondEpilines(points_img1.reshape(-1, 1, 2), 1, fundamental_matrix)
    lines2 = lines2.reshape(-1, 3)
    img3, img4 = drawlines(img2, img1, lines2, points_img2, points_img1)

    plt.figure(figsize=(20, 20))
    plt.subplot(211), plt.imshow(img5)
    plt.subplot(212), plt.imshow(img3)
    plt.show()
    return lines1, lines2
예제 #42
0
파일: draw.py 프로젝트: caomw/RECONSTRUCT
def draw_epilines(src_pts, dst_pts, img1, img2, F, mask):
	# select only inlier points
	img1_pts = src_pts[mask.ravel()==1]
	img2_pts = dst_pts[mask.ravel()==1]

	# find epilines corresponding to points in the 2nd image and draw them on the 1st image
	lines1 = cv2.computeCorrespondEpilines(img2_pts.reshape(-1,1,2), 2, F)
	lines1 = lines1.reshape(-1,3)
	img5, img6 = draw_lines(img1, img2, lines1, img1_pts, img2_pts)

	# find epilines corresponding to points in the 1st image and draw them on the 2nd image
	lines2 = cv2.computeCorrespondEpilines(img1_pts.reshape(-1,1,2), 1, F)
	lines2 = lines2.reshape(-1,3)
	img3, img4 = draw_lines(img2, img1, lines2, img2_pts, img1_pts)

	plt.subplot(121), plt.imshow(img5)
	plt.subplot(122), plt.imshow(img3)
	plt.show()
예제 #43
0
 def get_movement_fundamental(self, pts_new, pts_old):
     '''get movement between images based on matched points'''
     pts_old = np.float32(pts_old) #convert to floats
     pts_new = np.float32(pts_new)
     F = cv2.findFundamentalMat(pts_old, pts_new, method=cv2.cv.CV_FM_LMEDS) #find fundamental matrix :(
     #print F[0]
     #get epipolar lines through all of the key points
     lines = cv2.computeCorrespondEpilines(pts_old.reshape(-1, 1, 2), 1, F[0])
     return lines.reshape(-1, 3) #return the lines
예제 #44
0
def match(img1, img2, K, distort):
	#plotter.plot2(img1)
	timeStart = time.time()
	img1 = cv2.undistort(img1,K,distort)
	img2 = cv2.undistort(img2,K,distort)
	pts1, pts2, des1, des2 = correspondences.getCorrespondences(img1,img2)
	print("Time for correspondences: "+str(time.time()-timeStart))
	if(len(pts1)<8):
		print("ERROR: <8 correspondeces")
		return
	timeStart = time.time()
	F, mask = computervision.findFundamentalMatrix(K,pts1,pts2)
	print("Time for Fundamental: "+str(time.time()-timeStart))
	#F = F/np.linalg.norm(F)
	pts1 = pts1[mask.ravel()==1]
	pts2 = pts2[mask.ravel()==1]
	des1 = [des1[ind] for ind, x in enumerate(mask) if x==1]
	des2 = [des2[ind] for ind, x in enumerate(mask) if x==1]
	timeStart = time.time()
	if(pts1.shape[0]>8):
		F = computervision.nonlinearOptimizationFundamental(F,K,pts1,pts2)
	print("Time for nonlinearOptimizationFundamental: "+str(time.time()-timeStart))

	testFundamentalMatrix(F,pts1,pts2)
	lines1 = cv2.computeCorrespondEpilines(pts2.reshape(-1,1,2), 2,F)
	lines1 = lines1.reshape(-1,3)
	img5 = drawlines(img1,img2,lines1,pts1,pts2,K)
	lines2 = cv2.computeCorrespondEpilines(pts1.reshape(-1,1,2), 1,F)
	lines2 = lines2.reshape(-1,3)
	img3 = drawlines(img2,img1,lines2,pts2,pts1,K)
	
	p1, p2, X, rot, trans = computervision.getCameraMatrix(F,K,pts1,pts2)
	print("Translation: "+str(trans))
	plotter.plot(rot,trans,X,img1,pts1)
	reprojectionError(X,pts1,pts2,p1,p2)
	cubePosition = X[0]#np.array([0,0,50,1])#
	projectPoint(img5,X,p1)
	projectPoint(img3,X,p2)
	projectCube(img5,p1,cubePosition)
	projectCube(img3,p2,cubePosition)
	vis = showCombinedImgs(img5,img3)
	drawCorrespondences(vis,pts1,pts2)
	cv2.imshow("test", vis)
	return patch.makePatches(pts1,pts2,X,des1,des2)
예제 #45
0
def getEpilines(pts1,pts2,width,height):
    pts1 = np.int32(pts1)
    pts2 = np.int32(pts2)
    F, mask = cv2.findFundamentalMat(pts1,pts2,cv2.FM_LMEDS)

    pts = np.array([(i,j) for i in range(width) for j in range(height)])

    epilines = cv2.computeCorrespondEpilines(pts.reshape(-1,1,2), 2,F)
    epilines = epilines.reshape(-1,3)
    return epilines
예제 #46
0
def drawEpiline(img1, img2, pts1, pts2, F):

    pts1,pts2 = pts1[:30,:], pts2[:30,:]    #scale down a little bit

    # Find epilines corresponding to points in right image (second image) and
    # drawing its lines on left image
    lines1 = cv2.computeCorrespondEpilines(pts2.reshape(-1,1,2), 2,F)
    lines1 = lines1.reshape(-1,3)
    img5,img6 = drawlines(img1,img2,lines1,pts1,pts2)   

    # Find epilines corresponding to points in left image (first image) and
    # drawing its lines on right image
    lines2 = cv2.computeCorrespondEpilines(pts1.reshape(-1,1,2), 1,F)
    lines2 = lines2.reshape(-1,3)
    img3,img4 = drawlines(img2,img1,lines2,pts2,pts1)

    plt.subplot(121),plt.imshow(img5)
    plt.subplot(122),plt.imshow(img3)
    plt.show()
def visualize(I1, I2, F, pt1, pt2):

    # Find epilines corresponding to points in left image (first image) and
    # drawing its lines on right image
    #lines1 = cv2.computeCorrespondEpilines(pt2.reshape(-1, 1, 2), 1, F)
    #lines1 = lines1.reshape(-1, 3)
    #img1, img2 = drawlines(I1, I2, lines1, pt1, pt2)
    #cv2.imshow("Like In Assignment PDF", img1)
    #cv2.imshow("Like In Assignment PDF_2", img2)
    #cv2.waitKey(0)

    # Find epilines corresponding to points in left image (first image) and
    # drawing its lines on right image
    lines2 = cv2.computeCorrespondEpilines(pt1.reshape(-1, 1, 2), 1, F)
    lines2 = lines2.reshape(-1, 3)
    img3, img4 = drawlines(I2, I1, lines2, pt2, pt1)
    cv2.imshow("Like In Assignment PDF", img3)
    cv2.imshow("Like In Assignment PDF_2", img4)
    cv2.waitKey(0)
예제 #48
0
def find_fund_mat(src_pts, dst_pts, threshold=1.5): 
    n_pts = src_pts.shape[0]
    src_pts_ext = np.concatenate((src_pts, np.ones((src_pts.shape[0], 1))), axis=1)
    dst_pts_ext = np.concatenate((dst_pts, np.ones((dst_pts.shape[0], 1))), axis=1)
    F = np.zeros((3, 3))
    matchesMask = np.zeros(n_pts)
    
    max_vote = -1
    population = range(n_pts)
    for k in range(2000):
        eight_point_filter = tuple(sorted(random.sample(population, 8)))
        temp_F = find_fund_mat_eight_pts(src_pts_ext[eight_point_filter, :],
                                         dst_pts_ext[eight_point_filter, :])
        if temp_F is not None:
            epilines = cv2.computeCorrespondEpilines(src_pts.reshape(-1, 1, 2), 1, temp_F).reshape(-1, 3)
            dists = np.abs(np.sum(epilines * dst_pts_ext, axis=1))
            temp_mask = dists <= threshold
            if np.sum(temp_mask) > max_vote:
                max_vote = np.sum(temp_mask)
                F = temp_F
                matchesMask = temp_mask
            
    return F, matchesMask
def compute_F(im1, im2):
    if show_rt:
        t = time.time()
    #get points in frame A
    #get sift points to match (kp1)
    kp1, des1 = sift.detectAndCompute(im1, None)

    #get points in frame B
    #get sift points to match (kp2)
    kp2, des2 = sift.detectAndCompute(im2, None)

    #only take points that are in both images to compute fundamental matrix
    # FLANN parameters
    FLANN_INDEX_KDTREE = 0
    index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
    search_params = dict(checks=50)   # or pass empty dictionary

    flann = cv2.FlannBasedMatcher(index_params,search_params)

    matches = flann.knnMatch(des1,des2,k=2)

    #cleaner, from http://docs.opencv.org/master/da/de9/tutorial_py_epipolar_geometry.html#gsc.tab=0
    #no idea whats going on here
    good = []
    pts1 = []
    pts2 = []
    # ratio test as per Lowe's paper
    for i,(m,n) in enumerate(matches):
        if m.distance < 0.6*n.distance:
            good.append(m)
            pts2.append(kp2[m.trainIdx].pt)
            pts1.append(kp1[m.queryIdx].pt)

    pts1 = np.int32(pts1)
    pts2 = np.int32(pts2)

    pts1 = pts1.astype(float)
    pts2 = pts2.astype(float)

    #compute fundamental matrix based on given keypoints (F)
    F, mask = cv2.findFundamentalMat(pts1.astype(float),pts2.astype(float),cv2.FM_RANSAC)
    #F, mask = cv2.findFundamentalMat(pts1, pts2, method=CV_FM_RANSAC)     ^^cv2.FM_LMEDS

    pts1 = pts1[mask.ravel() == 1]
    pts2 = pts2[mask.ravel() == 1]

    pts1 = np.float32(pts1)
    pts2 = np.float32(pts2)


    if UNIT_TESTING:
        # We select only inlier points

        # Find epilines corresponding to points in right image (second image) and
        # drawing its lines on left image
        lines1 = cv2.computeCorrespondEpilines(pts2.reshape(-1,1,2), 2,F)
        lines1 = lines1.reshape(-1,3)
        img5,img6 = drawlines(im1,im2,lines1,pts1,pts2)

        # Find epilines corresponding to points in left image (first image) and
        # drawing its lines on right image
        lines2 = cv2.computeCorrespondEpilines(pts1.reshape(-1,1,2), 1,F)
        lines2 = lines2.reshape(-1,3)
        img3,img4 = drawlines(im2,im1,lines2,pts2,pts1)

        #print '\n', 'img5:', '\n', img5, '\n'
        #cv2.imshow('img5', img5)
        #cv2.imshow('img3', img3)
        #cv2.waitKey(0)

        plt.subplot(121),plt.imshow(img5)
        plt.subplot(122),plt.imshow(img3)
        plt.show()

    if DEBUGGING:
        print '\nFundamental Matrix:\n', F, '\n'

    if show_rt:
        t = time.time() - t
        print '\ncompute_F() runtime:', t, 'secs\n'

    return F, pts1, pts2
예제 #50
0
def StereoCameraCalibration() :
	# Calibrate the left camera
	cam1 = CameraCalibration( sorted( glob.glob( '{}/left*.png'.format(calibration_directory) ) ) )
	# Calibrate the right camera
	cam2 = CameraCalibration( sorted( glob.glob( '{}/right*.png'.format(calibration_directory) ) ) )
	# Stereo calibration termination criteria
	criteria = (cv2.TERM_CRITERIA_MAX_ITER + cv2.TERM_CRITERIA_EPS, 100, 1e-5)
	# Stereo calibration flags
	flags  = 0
	flags |= cv2.CALIB_USE_INTRINSIC_GUESS
#	flags |= cv2.CALIB_FIX_INTRINSIC
#	flags |= cv2.CALIB_FIX_PRINCIPAL_POINT
#	flags |= cv2.CALIB_FIX_FOCAL_LENGTH
	flags |= cv2.CALIB_FIX_ASPECT_RATIO
#	flags |= cv2.CALIB_SAME_FOCAL_LENGTH
#	flags |= cv2.CALIB_ZERO_TANGENT_DIST
	flags |= cv2.CALIB_RATIONAL_MODEL
	flags |= cv2.CALIB_FIX_K3
	flags |= cv2.CALIB_FIX_K4
	flags |= cv2.CALIB_FIX_K5
	# Stereo calibration
	calibration = cv2.stereoCalibrate( cam1['obj_points'], cam1['img_points'], cam2['img_points'],
		cam1['camera_matrix'], cam1['dist_coefs'], cam2['camera_matrix'], cam2['dist_coefs'], cam1['img_size'],
		flags=flags, criteria=criteria )
	# Store the stereo calibration results in a dictionary
	parameter_names = ( 'calib_error', 'camera_matrix_l', 'dist_coefs_l', 'camera_matrix_r', 'dist_coefs_r', 'R', 'T', 'E', 'F' )
	calibration = dict( zip( parameter_names, calibration ) )
	# Stereo rectification
	rectification = cv2.stereoRectify(
		calibration['camera_matrix_l'], calibration['dist_coefs_l'],
		calibration['camera_matrix_r'], calibration['dist_coefs_r'],
		cam1['img_size'], calibration['R'], calibration['T'], flags=0 )
	# Store the stereo rectification results in the dictionary
	parameter_names = ( 'R1', 'R2', 'P1', 'P2', 'Q', 'ROI1', 'ROI2' )
	calibration.update( zip( parameter_names, rectification ) )
	# Undistortion maps
	calibration['left_map'] = cv2.initUndistortRectifyMap(
		calibration['camera_matrix_l'], calibration['dist_coefs_l'],
		calibration['R1'], calibration['P1'], cam1['img_size'], cv2.CV_32FC1 )
	calibration['right_map'] = cv2.initUndistortRectifyMap(
		calibration['camera_matrix_r'], calibration['dist_coefs_r'],
		calibration['R2'], calibration['P2'], cam2['img_size'], cv2.CV_32FC1 )
	# Compute reprojection error
	undistorted_l = cv2.undistortPoints( np.concatenate( cam1['img_points'] ).reshape(-1, 1, 2),
		calibration['camera_matrix_l'], calibration['dist_coefs_l'], P=calibration['camera_matrix_l'] )
	undistorted_r = cv2.undistortPoints( np.concatenate( cam2['img_points'] ).reshape(-1, 1, 2),
		calibration['camera_matrix_r'], calibration['dist_coefs_r'], P=calibration['camera_matrix_r'] )
	lines_l = cv2.computeCorrespondEpilines( undistorted_l, 1, calibration['F'] )
	lines_r = cv2.computeCorrespondEpilines( undistorted_r, 2, calibration['F'] )
	calibration['reproject_error'] = 0
	for i in range( len( undistorted_l ) ) :
		calibration['reproject_error'] += abs( undistorted_l[i][0][0] * lines_r[i][0][0] +
			undistorted_l[i][0][1] * lines_r[i][0][1] + lines_r[i][0][2] ) + abs( undistorted_r[i][0][0] * lines_l[i][0][0] +
			undistorted_r[i][0][1] * lines_l[i][0][1] + lines_l[i][0][2] )
	calibration['reproject_error'] /= len( undistorted_r )
	# Write calibration results
	with open( '{}/calibration.log'.format(calibration_directory) , 'w') as output_file :
		output_file.write( '\n~~~ Left camera calibration ~~~\n\n' )
		output_file.write( 'Calibration error : {}\n'.format( cam1['calib_error'] ) )
		output_file.write( 'Reprojection error : {}\n'.format( cam1['reproject_error'] ) )
		output_file.write( 'Camera matrix :\n{}\n'.format( cam1['camera_matrix'] ) )
		output_file.write( 'Distortion coefficients :\n{}\n'.format( cam1['dist_coefs'].ravel() ) )
		output_file.write( '\n~~~ Right camera calibration ~~~\n\n' )
		output_file.write( 'Calibration error : {}\n'.format( cam2['calib_error'] ) )
		output_file.write( 'Reprojection error : {}\n'.format( cam2['reproject_error'] ) )
		output_file.write( 'Camera matrix :\n{}\n'.format( cam2['camera_matrix'] ) )
		output_file.write( 'Distortion coefficients :\n{}\n'.format( cam2['dist_coefs'].ravel() ) )
		output_file.write( '\n~~~ Stereo camera calibration ~~~\n\n' )
		output_file.write( 'Stereo calibration error : {}\n'.format( calibration['calib_error'] ) )
		output_file.write( 'Reprojection error : {}\n'.format( calibration['reproject_error'] ) )
		output_file.write( 'Left camera matrix :\n{}\n'.format( calibration['camera_matrix_l'] ) )
		output_file.write( 'Left distortion coefficients :\n{}\n'.format( calibration['dist_coefs_l'].ravel() ) )
		output_file.write( 'Right camera matrix :\n{}\n'.format( calibration['camera_matrix_r'] ) )
		output_file.write( 'Right distortion coefficients :\n{}\n'.format( calibration['dist_coefs_r'].ravel() ) )
		output_file.write( 'Rotation matrix :\n{}\n'.format( calibration['R'] ) )
		output_file.write( 'Translation vector :\n{}\n'.format( calibration['T'].ravel() ) )
		output_file.write( 'Essential matrix :\n{}\n'.format( calibration['E'] ) )
		output_file.write( 'Fundamental matrix :\n{}\n'.format( calibration['F'] ) )
		output_file.write( 'Rotation matrix for the first camera :\n{}\n'.format( calibration['R1'] ) )
		output_file.write( 'Rotation matrix for the second camera :\n{}\n'.format( calibration['R2'] ) )
		output_file.write( 'Projection matrix for the first camera :\n{}\n'.format( calibration['P1'] ) )
		output_file.write( 'Projection matrix for the second camera :\n{}\n'.format( calibration['P2'] ) )
		output_file.write( 'Disparity-to-depth mapping matrix :\n{}\n'.format( calibration['Q'] ) )
		output_file.write( 'ROI for the left camera :  {}\n'.format( calibration['ROI1'] ) )
		output_file.write( 'ROI for the right camera : {}\n'.format( calibration['ROI2'] ) )
	# Write the calibration object with all the parameters
	with open( '{}/calibration.pkl'.format(calibration_directory) , 'wb') as output_file :
		pickle.dump( calibration, output_file, pickle.HIGHEST_PROTOCOL )
	# Return the calibration
	return calibration
예제 #51
0
def stereoCalibrate():
    board_n = board_w * board_h
    board_sz = (board_w, board_h)
    # cv2.namedWindow('Stereo Calibration', cv2.WINDOW_AUTOSIZE)

    board_pts = inverted_seq(board_sz, square_sz)

    image1, image2, image1_pts, image2_pts, object_pts, imgSize = getMatchedPtsByChessboard(
        [3], board_sz, board_pts)
    #image1, image2, image1_pts, image2_pts, object_pts, imgSize = getMatchedPts()
    # print image1_pts, image2_pts, object_pts, imgSize

    cameraMatrix1 = np.identity(3, np.float32); cameraMatrix2 = np.identity(3, np.float32)
    distCoeffs1 = np.zeros((5), np.float32); distCoeffs2 = np.zeros((5), np.float32)
    #CALIBRATE THE CAMERA
    flags = cv2.CALIB_FIX_ASPECT_RATIO + cv2.CALIB_ZERO_TANGENT_DIST + cv2.CALIB_SAME_FOCAL_LENGTH
    criteria = (1 + 2, 100, 1e-5) # 1 for MAX_ITER and 2 for EPS |= =|
    retval, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, R, T, E, F = cv2.stereoCalibrate(
        object_pts, image1_pts, image2_pts, cameraMatrix1, distCoeffs1,
        cameraMatrix2, distCoeffs2, imgSize, flags = flags, criteria = criteria)
    print 'Re-projection error', retval
    print 'Intrinsic matrix 1\n', cameraMatrix1
    print 'Intrinsic matrix 2\n', cameraMatrix2
    print 'Distortion coeffs 1\n', distCoeffs1
    print 'Distortion coeffs 2\n', distCoeffs2
    print 'Rotation matrix\n', R
    print 'Translation vector\n', T
    print 'Eigen matrix\n', E
    print 'Fundamental matrix\n', F
    cameraMatrix = cameraMatrix1
    cameraMatrix[:2,2] = (imgSize[0] / 2, imgSize[1] / 2)
    distCoeffs = np.zeros((1, 5), np.float32)
    if True:
        distCoeffs1 = distCoeffs2 = distCoeffs
        cameraMatrix1 = cameraMatrix2 = cameraMatrix
    image1 = cv2.undistort(image1, cameraMatrix1, distCoeffs1)
    image2 = cv2.undistort(image2, cameraMatrix2, distCoeffs2)
    
    #CALIBRATION QUALITY CHECK
    #Using the epipolar geometry constraint: m2' * F * m1 = 0
    assert image1_pts[0].shape == image2_pts[0].shape
    newshape = image1_pts[0].shape
    newshape = (newshape[0], 1, newshape[1])
    #Work in undistorted space (An undocumented signature is used here. @Dstray)
    image1_pts = cv2.undistortPoints(image1_pts[0].reshape(newshape, order = 'F'),
        cameraMatrix1, distCoeffs1, P = cameraMatrix1)
    image2_pts = cv2.undistortPoints(image2_pts[0].reshape(newshape, order = 'F'),
        cameraMatrix2, distCoeffs2, P = cameraMatrix2)
    points1, points2 = image1_pts.reshape(-1, 2), image2_pts.reshape(-1, 2)
    #implemented in opencv 3.0.0
    if False:
        lines1 = cv2.computeCorrespondEpilines(points2, 2, F).reshape(-1, 3)
        lines2 = cv2.computeCorrespondEpilines(points1, 1, F).reshape(-1, 3)
        image1, image2 = drawlines(image1, image2, lines1, points2)
        image2, image1 = drawlines(image2, image1, lines2, points1)
    cv2.imshow('Image 1', image1); cv2.imshow('Image 2', image2)
    cv2.waitKey(0)

    #COMPUTE AND DISPLAY RECTIFICATION
    urmaps, Q = bouguetRectify(image1_pts, image2_pts, imgSize,
        cameraMatrix1, cameraMatrix2, distCoeffs1, distCoeffs2, R, T)
    '''urmaps, F = hartleyRectify(image1_pts, image2_pts, imgSize,
        cameraMatrix1, cameraMatrix2, distCoeffs1, distCoeffs2, F)'''
    print 'Q\n', Q
    Q = np.float32([[1, 0, 0,  -0.5*imgSize[0]],
                    [0, 1, 0,  -0.5*imgSize[1]],
                    [0, 0, 0, cameraMatrix[0][0]],
                    [0, 0, 0.07,      0]])
    #Save the results
    np.save('rectmap.npy', urmaps)
    np.save('Q.npy', Q)

    #RECTIFY THE IMAGES AND FIND DISPARITY MAPS
    #Setup for finding correspondences
    img1r = cv2.remap(image1, urmaps[0], urmaps[1], cv2.INTER_LINEAR)
    img2r = cv2.remap(image2, urmaps[2], urmaps[3], cv2.INTER_LINEAR)
    cv2.imshow('Image 1', img1r); cv2.imshow('Image 2', img2r)
    cv2.waitKey(0)
    
    '''disp, points = stereoBMMatch(img1r, img2r, Q)
    colors = cv2.cvtColor(image1, cv2.COLOR_BGR2RGB)
    mask = disp > disp.min()
    points = points[mask]; colors = colors[mask]
    write_ply('_3DPoints.ply', points, colors)'''
    #stereosgbm_match(img1r, img2r, '_3DPoints.ply', Q)

    cv2.waitKey(0)
    cv2.destroyAllWindows()
예제 #52
0
def main():
	# Now ask for input
	print('\nThis program takes an image of a room and lets you know which room you are most likely in.\n')
	print('It then computes how closely your image matches an image in the database')
	print('If it is a close enough match-- it will tell you your current camera position in relatoin to teh camera position for teh photo it matched with.\n')
	image = raw_input("Please enter the path of the image containing a room: ")

	
	#get current working directory
	mypath = os.getcwd()
	#append /rooms
	mypath = mypath + '/Rooms'

	#get array of directory paths aka rooms
	directories = [x[0] for x in os.walk(mypath)]
	#remove rooms directory
	del directories[0]
	rooms = len(directories)

	#set up array for greatest match 
	for i in range(rooms):
		roomMatched.insert(i, 1000000)
		photoMatched.insert(i, 100)

	#get images in each room and put into images array 
	for directory in range(len(directories)):
		files = [x[2] for x in os.walk(directories[directory])]
		images.insert(directory, files)

	
	#blur original image
	blurredOriginal = blurImages(image)

	# Initiate ORB detector
	orb = cv2.ORB_create()
	#determine keypoints and their descriptors from teh current image
	kp1, des1 = orb.detectAndCompute(blurredOriginal,None)


	#Loop through each image in database
	for i in range(len(directories)):
		for j in range(len(images[i][0])):
			#get database image
			img2location = directories[i] + '/' + images[i][0][j]
			img2 = blurImages(img2location)
			
			# find the keypoints and descriptors with ORB
			kp2, des2 = orb.detectAndCompute(img2, None)
			#kp2, des2 = orb.detectAndCompute(img2,None)
			bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
			# Match descriptors.
			matches = bf.match(des1,des2)
			# Sort them in the order of their distance.
			matches = sorted(matches, key = lambda x:x.distance)
			#get average distance away from 10 best matching points
			dist = 0
			diffSize = []
			diffAngle = []

			for x in range(10):
				dist += matches[x].distance
				#put ten angle differences into diffAngle[]
				ang = kp1[matches[x].queryIdx].angle
				ang2 = kp2[matches[x].trainIdx].angle
				angDif = ang - ang2
				diffAngle.insert(x, angDif)
				#put ten size differences into diffSize[]
				size1 = kp1[matches[x].queryIdx].size
				size2 = kp2[matches[x].trainIdx].size
				sizeDiff = size1 - size2
				diffSize.insert(x, sizeDiff)

			dist = dist/10
			stdDevAng = np.std(diffAngle)/10
			stdDevSize = np.std(diffSize)/10
			#alter dist to be the dist*stdDev*stdDev
			dist = dist*stdDevSize*stdDevAng
			#update least distance if likely
			if roomMatched[i] > dist:
				roomMatched[i] = dist
				photoMatched[i] = j
	
	#get the index of the most likely room by lowest distance
	mostLikely = roomMatched.index(min(roomMatched))
	print('\n\n******OUTPUT*********')
	if (roomMatched[mostLikely]  > 30):
		print('You are most likely in room ', mostLikely + 1, ' but we cannot say this with confidence as the current photos score is: ', roomMatched[mostLikely], ', which is above our cutoff point of 30.')

	else:
		closestImageLocation = directories[mostLikely] + '/' + images[mostLikely][0][photoMatched[mostLikely]]
		
		print('You are most likely in room: ', mostLikely + 1)
		print('Closest Image: ', closestImageLocation)
		print('Distance: ', roomMatched[mostLikely], ' (the closer to zero the higher the probability you are in given room)')


		#intrinsic parameters of camera -- found using matlab
		K = np.float32([3514.89135426281, 0, 1514.08070548149, 0, 3528.06290049699, 2019.41405283900, 0, 0, 1]).reshape(3,3)
		K_inv = np.linalg.inv(K)


		img2 = cv2.imread(closestImageLocation, cv2.IMREAD_GRAYSCALE)

		###Plot best image with test image
		# find the keypoints and descriptors with ORB
		kp2, des2 = orb.detectAndCompute(img2,None) 
		bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
		# Match descriptors.
		matches = bf.match(des1,des2)
		# Sort them in the order of their distance.
		matches = sorted(matches, key = lambda x:x.distance)

		srcPts = []
		dstPts = []
		# get best 40 points from matched image and source image
		for f in range(40):
					srcPt = kp1[matches[f].queryIdx].pt
					dstPt = kp2[matches[f].trainIdx].pt
					srcPts.insert(f, srcPt)
					dstPts.insert(f, dstPt)
		
		#Fill arrays with 40 matching points 
		src_pts = np.float32([srcPts[m] for m in range(len(srcPts))])
	   	dst_pts = np.float32([dstPts[m] for m in range(len(dstPts))])
	   
	   	#Compute the fundamental matrix using ransac
	  	F, mask = cv2.findFundamentalMat(src_pts,dst_pts,cv2.FM_RANSAC, 1)

 
		# Selecting only the inliers
		pts1 = src_pts[mask.ravel()==1]
		pts2 = dst_pts[mask.ravel()==1]
	
		F, mask = cv2.findFundamentalMat(pts1,pts2,cv2.FM_RANSAC, 1)

		pts1 = pts1[mask.ravel() == 1]
		pts2 = pts2[mask.ravel() == 1]
		 
		print("Fundamental matrix: \n", F)
		
		# Find Error in matrix using x'^t(F)x = 0
		pt1 = np.array([[pts1[0][0]], [pts1[0][1]], [1]])
		pt2 = np.array([[pts2[0][0], pts2[0][1], 1]])
		fundMatrixError =  (pt2.dot(F)).dot(pt1)
		#based off one point match
		print ("Error in fundamental matrix: ", fundMatrixError)
		 
		#Draw epilines and matching points on images
		lines = cv2.computeCorrespondEpilines(pts2.reshape(-1,1,2), 2,F)
		lines = lines.reshape(-1,3)

		lines2 = cv2.computeCorrespondEpilines(pts1.reshape(-1,1,2), 1,F)
		lines2 = lines2.reshape(-1,3)

		BMEpilines = blurredOriginal.copy()
		CurrEpilines = img2.copy()

		BMEpilines = drawEpilines(BMEpilines,lines,pts1,pts2)
		CurrEpilines = drawEpilines(CurrEpilines,lines2,pts2,pts1)	 

		#Calculate Essential Matrix 
		E = K.transpose().dot(F).dot(K)


		#Decomposing matrices of E
		U, diag110, Vt = np.linalg.svd(E)
		if np.linalg.det(np.dot(U,Vt))<0:
			Vt = -Vt
		E = np.dot(U,np.dot(np.diag([1.0,1.0,0.0]),Vt))
		U, diag110, Vt = np.linalg.svd(E)
		print("Essential matrix: \n", E)

		#W is a rotation matrix that 
		W = np.array([0.0, -1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0]).reshape(3, 3)
		 

		#normalize/ homogenize image coords by applying the camera instrinic params to each point 
		for i in range(len(pts1)):
		    normalizedSourcePoints.append(K_inv.dot([pts1[i][0], pts1[i][1], 1.0]))
		    normalizedCurrentPoints.append(K_inv.dot([pts2[i][0], pts2[i][1], 1.0]))
		 

		#deetrmine which of the 4 possible relative camera posistions is teh correct one 

		# First possible rotational matrix value
		R = np.dot(U, W)
		R = np.dot(R, Vt)
		# Firts possible translation vector value 
		T = U[:, 2]
		if (not checkInFront(R, T)):
		    # Second choice: T = -u3
		    T = - U[:, 2]
		    if (not checkInFront(R, T)):
		        #R = U * Wt * Vt, T = u3
		        R = U.dot(W.T).dot(Vt)
		        T = U[:, 2]
		        if not checkInFront(R, T):
		        	#Only possibility
		            T = - U[:, 2]
		 

		yR1 = -math.sin([2][0])
		xR1 = math.atan2(R[2][1]/math.cos(yR1), R[2][2]/math.cos(yR1))
		zR1 = math.atan2(R[1][0]/math.cos(yR1), R[0][0]/math.cos(yR1))

		# Decompose rotational matrix
		rotationX = math.atan2(R[2][1], R[2][2])
		rotationY = -math.asin((R[2][0]))
		rotationZ = math.atan2(R[1][0], R[0][0])

		print('xrotation1: ', xR1*180/np.pi, ' yrotation1: ', yR1*180/np.pi, ' zrotation: ', zR1*180/np.pi)

		#Output
		print("Rotation matrix: ", R)	
		print("Translation vector: ", T)
		
		cv2.imshow("Current Image", BMEpilines)
		cv2.imshow("Best Matched Image", CurrEpilines)
		k = cv2.waitKey(0)
예제 #53
0
def find_correspondences(img1, img2, fundamental, xform):
    '''
    Finds correspondences for each point between images.

    Args:
        instance (ImageInstance): The other instance to find
            correspondences in.
    Returns:
        dict<tuple, tuple>, a map of correspondences between (x,y) points
            in this image and (x,y) points in the other
    '''

    rows, cols, _ = img1.shape

    # Transform to get better correspondences using cross-correlation.
    #
    # This isn't *really* rectifying the image to line up correspondences,
    # but we were unable to get it to work the proper way so here's the next
    # best thing.

    xform = np.linalg.inv(xform) # Reverse transform
    timg = cv2.warpPerspective(img2, xform, (1024, 1024))

    # Find mask of the comet's silhouette

    def mask_for_img(img):
        _, mask = cv2.threshold(img, 24, 255, cv2.THRESH_BINARY)
        kernel = np.ones((4,4))
        mask = cv2.erode(mask, kernel)
        mask = np.sum(mask, axis=2)
        return mask

    mask1 = mask_for_img(img1)
    mask2 = mask_for_img(img2)

    # Get points in image that lie on mask

    pts = [(x,y) for y in range(0, rows, sfm.CORRESPONDENCE_SKIP)
                 for x in range(0, cols, sfm.CORRESPONDENCE_SKIP)
                     if mask1[y,x] > 0]
    pts = np.float32(pts)

    # Find epilines

    lines = cv2.computeCorrespondEpilines(pts, 1, fundamental)

    # Scan along epilines and find correspondences

    correspondences = dict()

    def normalize(x):
        return (x-np.mean(x))/np.std(x)

    imgpad1 = np.pad(img1, sfm.WINDOW_RADIUS, 'edge')
    imgpad2 = np.pad(timg, sfm.WINDOW_RADIUS, 'edge')

    win_size = 2*sfm.WINDOW_RADIUS + 1

    for i in range(0, len(pts)):
        a,b,c = lines[i][0]
        x,y   = int(pts[i][0]), int(pts[i][1])

        win1 = normalize(imgpad1[y:y+win_size,x:x+win_size])

        best_pos = None
        best_corr = 0

        for xx in range(0, cols):
            yy = int(np.round((-a*xx - c)/b))

            if 0 <= yy and yy < cols and mask2[yy,xx] > 0:

                # Find corresponding points in transformed image
                t_x = int( (xform[0,0]*xx + xform[0,1]*yy + xform[0,2]) /
                           (xform[2,0]*xx + xform[2,1]*yy + xform[2,2]) )
                t_y = int( (xform[1,0]*xx + xform[1,1]*yy + xform[1,2]) /
                           (xform[2,0]*xx + xform[2,1]*yy + xform[2,2]) )

                win2 = normalize(imgpad2[t_y:t_y+win_size,t_x:t_x+win_size])
                corr = np.sum(win1*win2) / win1.size

                if corr > best_corr:
                    best_pos = (xx,yy)
                    best_corr = corr

        if best_pos is not None:

            if sfm.SHOW_CORRESPONDENCE:

                img = Util.combine_images(img1, img2)
                cv2.circle(img, (x,y), 8, Util.GREEN, 2)
                cv2.circle(img, (best_pos[0]+1024, best_pos[1]), 8, Util.GREEN, 2)
                y1 = int(-c/b)
                y2 = int((-a*cols-c)/b)
                cv2.line(img, (cols, y1), (2*cols, y2), Util.BLUE, 1)
                img = scipy.misc.imresize(img, 0.5)
                cv2.imshow('debug', img)
                cv2.waitKey(0)
                cv2.destroyAllWindows()

            correspondences[(x,y)] = best_pos

    return correspondences
예제 #54
0
	# ratio test to retain only the good matches
	for i,(m,n) in enumerate(matches):
		if m.distance < 0.7*n.distance:
			pts_left_image.append(kps_left[m.queryIdx].pt)
			pts_right_image.append(kps_right[m.trainIdx].pt)

	pts_left_image = np.float32(pts_left_image)
	pts_right_image = np.float32(pts_right_image)
	F, mask = cv2.findFundamentalMat(pts_left_image, pts_right_image, cv2.FM_LMEDS)

	# Selecting only the inliers
	pts_left_image = pts_left_image[mask.ravel()==1]
	pts_right_image = pts_right_image[mask.ravel()==1]

	# Drawing the lines on left image and the corresponding feature points on the right image
	lines1 = cv2.computeCorrespondEpilines(pts_right_image.reshape(-1,1,2), 2, F)
	lines1 = lines1.reshape(-1,3)
	img_left_lines, img_right_pts = draw_lines(img_left, img_right, lines1, pts_left_image, pts_right_image)

	# Drawing the lines on right image and the corresponding feature points on the left image
	lines2 = cv2.computeCorrespondEpilines(pts_left_image.reshape(-1,1,2), 1, F)
	lines2 = lines2.reshape(-1,3)
	img_right_lines, img_left_pts = draw_lines(img_right, img_left, lines2, pts_right_image, pts_left_image)

	cv2.imshow('Epi lines on left image', img_left_lines)
	cv2.imshow('Feature points on right image', img_right_pts)
	cv2.imshow('Epi lines on right image', img_right_lines)
	cv2.imshow('Feature points on left image', img_left_pts)
	cv2.waitKey()
	cv2.destroyAllWindows()
    def __FundamentalMatrix(self, point):
        # Check if the image is frozen.
        # SIGB: The user can frozen the input image presses "f" key.
        if self.IsFrozen:

            # Insert the new selected point in the queue.
            if self.__UpdateQueue(point):

                # Get all points selected by the user.
                points = np.asarray(self.PointsQueue, dtype=np.float32)

                # <000> Get the selected points from the left and right images.
                pointsLeft,pointsRight=[],[]
                
                for i in xrange(0,16):
                    if i % 2 ==0:
                        pointLeft=points[i]
                        pointsLeft.append(pointLeft)
                
                    elif i % 2 ==1:
                        pointRight=points[i]
                        pointsRight.append(pointRight) 
                        
                pointsLeft=np.array(pointsLeft, dtype=np.float32) 
                pointsRight=np.array(pointsRight, dtype=np.float32)       

                # <001> Estimate the Fundamental Matrix.
                FundMat,mask1=cv2.findFundamentalMat(pointsLeft,pointsRight)

                # <002> Save the Fudamental Matrix in the F attribute of the CamerasParameters class.
                StereoCameras.Instance.Parameters.F=FundMat

                # Get each point from left image.
                for pt in pointsLeft:
                    
                    # <003> Estimate the epipolar line.
                   
                    lineEpi=cv2.computeCorrespondEpilines(np.array([pt]), 1, StereoCameras.Instance.Parameters.F)

                    # <004> Define the initial and final points of the line.  
                    initialP=(1280+(int(-lineEpi[0,0,2]/lineEpi[0,0,0])),0)
                    FinalP=(1280,int(-lineEpi[0,0,2]/lineEpi[0,0,1]))                    
                    
                    # <005> Draws the epipolar line in the input image.
                    cv2.line(self.Image, initialP, FinalP, (255,0,0))


                # Get each point from right image.
                for pt in pointsRight:

                    # <006> Estimate the epipolar line.
                    lineEpi=cv2.computeCorrespondEpilines(np.array([pt]), 2, StereoCameras.Instance.Parameters.F)
                    initialP=(int(-lineEpi[0,0,2]/lineEpi[0,0,0]),0)
                    FinalP=(0,int(-lineEpi[0,0,2]/lineEpi[0,0,1]))
                    cv2.line(self.Image, initialP, FinalP, (0,0,255))
                    

                # Show the final result of this process to the user.
                cv2.imshow("Original", self.Image)

                # Update the fundamental matrix flag and release the system
                self.hasFundamentalMatrix = True
    def build_epipolar_lines(self, points, fundamental_matrix, is_right, show_lines=True):
        lines = cv2.computeCorrespondEpilines(points, 2 if is_right else 1, fundamental_matrix)
        lines = lines.reshape(-1, 3)

        if show_lines:
            self.draw_lines(self.Image, lines, points, is_right)
예제 #57
0
    def externalCall(self):

        sift = cv2.xfeatures2d.SIFT_create()

        img1 = self.inputImageLeft.data
        img2 = self.inputImageRight.data

        img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
        img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)

        # find the keypoints and descriptors with SIFT
        kp1, des1 = sift.detectAndCompute(img1, None)
        kp2, des2 = sift.detectAndCompute(img2, None)


        # FLANN parameters
        FLANN_INDEX_KDTREE = 0
        index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
        search_params = dict(checks=50)

        flann = cv2.FlannBasedMatcher(index_params, search_params)
        matches = flann.knnMatch(des1, des2, k=2)


        good = []
        pts1 = []
        pts2 = []

        # ratio test as per Lowe's paper
        for i,(m,n) in enumerate(matches):
            if m.distance < 0.8*n.distance:
                good.append(m)
                pts2.append(kp2[m.trainIdx].pt)
                pts1.append(kp1[m.queryIdx].pt)




        # Now we have the list of best matches from both the images. Let’s find the Fundamental Matrix.

        pts1 = np.int32(pts1)
        pts2 = np.int32(pts2)
        pts1 = np.float32(pts1)
        pts2 = np.float32(pts2)
        F, mask = cv2.findFundamentalMat(pts1, pts2, cv2.FM_LMEDS)
        if F is None:
            print 'F IS NONE'
            return
        print F

        # We select only inlier points
        pts1 = pts1[mask.ravel()==1]
        pts2 = pts2[mask.ravel()==1]


        # Find epilines corresponding to points in right image (second image) and
        # drawing its lines on left image
        lines1 = cv2.computeCorrespondEpilines(pts2.reshape(-1, 1, 2), 2, F)
        lines1 = lines1.reshape(-1, 3)
        img5, img6 = drawlines(img1, img2, lines1, pts1, pts2)

        # Find epilines corresponding to points in left image (first image) and
        # drawing its lines on right image
        lines2 = cv2.computeCorrespondEpilines(pts1.reshape(-1, 1, 2), 1, F)
        lines2 = lines2.reshape(-1, 3)
        img3,img4 = drawlines(img2, img1, lines2, pts2, pts1)

        cv2.imshow("img5", img5)
        cv2.imshow("img3", img3)
예제 #58
0
# ratio test as per Lowe's paper
for i,(m,n) in enumerate(matches):
    if m.distance < 0.8*n.distance:
        good.append(m)
        pts2.append(kp2[m.trainIdx].pt)
        pts1.append(kp1[m.queryIdx].pt)

pts1 = np.int32(pts1)
pts2 = np.int32(pts2)
F, mask = cv2.findFundamentalMat(pts1,pts2,cv2.FM_LMEDS)
 
# We select only inlier points
pts1 = pts1[mask.ravel()==1]
pts2 = pts2[mask.ravel()==1]

# Find epilines corresponding to points in right image (second image) and
# drawing its lines on left image
lines1 = cv2.computeCorrespondEpilines(pts2.reshape(-1,1,2), 2,F)
lines1 = lines1.reshape(-1,3)
img5,img6 = drawlines(img1,img2,lines1,pts1,pts2)

# Find epilines corresponding to points in left image (first image) and
# drawing its lines on right image
lines2 = cv2.computeCorrespondEpilines(pts1.reshape(-1,1,2), 1,F)
lines2 = lines2.reshape(-1,3)
img3,img4 = drawlines(img2,img1,lines2,pts2,pts1)

plt.subplot(121),plt.imshow(img5)
plt.subplot(122),plt.imshow(img3)
plt.show()
	for r,pt1,pt2 in zip(lines,pts1,pts2):
		color = tuple(np.random.randint(0,255,3).tolist())
		x0,y0 = map(int, [0, -r[2]/r[1] ])
		
		x1,y1 = map(int, [c, -(r[2]+r[0]*c)/r[1] ])

		# in place transformatino in CV2
		cv2.line(img1, (x0,y0), (x1,y1), color,1) # previously img1 = blah
		
		cv2.circle(img1,tuple(pt1),5,color,-1)
		#print(img1)
		cv2.circle(img2,tuple(pt2),5,color,-1)
	return img1,img2

# Find epilines through the points in the right image and first image. Draw the lienes on left image
lines1 = cv2.computeCorrespondEpilines(pts2.reshape(-1,1,2), 2,F) # what does this do?
lines1 = lines1.reshape(-1,3)
img5,img6 = drawLines(img1,img2,lines1,pts1,pts2)



lines2 = cv2.computeCorrespondEpilines(pts1.reshape(-1,1,2),1,F)
lines2 = lines2.reshape(-1,3)
img3,img4 = drawLines(img2,img1,lines2,pts2,pts1)

cv2.imshow('hello',img5)
cv2.imshow('hello',img3)
#plt.subplot(121),plt.imshow(img5)
#plt.subplot(122),plt.imshow(img3)
#plt.show()
예제 #60
0
for i in range (0,len(leftImagePoints)):
	sampleLeftImagePoints = leftImagePoints[i]
	sampleRightImagePoints = rightImagePoints[i]

	pointsShape = sampleLeftImagePoints.shape

	sampleLeftImagePoints = sampleLeftImagePoints.reshape( (1,)+pointsShape )
	sampleRightImagePoints = sampleRightImagePoints.reshape( (1,)+pointsShape )



	pointsLeftUndistorted = cv2.undistortPoints(sampleLeftImagePoints, cameraMatrixLeft, distortionCoeffsLeft , P=cameraMatrixLeft)
	pointsRightUndistorted = cv2.undistortPoints(sampleRightImagePoints, cameraMatrixRight, distortionCoeffsRight , P=cameraMatrixRight)

	epiLinesLeft = cv2.computeCorrespondEpilines(pointsLeftUndistorted, 1, calib_F)
	epiLinesRight = cv2.computeCorrespondEpilines(pointsRightUndistorted, 2, calib_F)

	for j in range (0,epiLinesLeft.shape[0]):
		totalPoints += 1
		totalError += math.fabs( \
			sampleLeftImagePoints[0][j][0]*epiLinesRight[j][0][0] + \
			sampleLeftImagePoints[0][j][1]*epiLinesRight[j][0][1] + \
			epiLinesRight[j][0][2] \
			) + math.fabs( \
			sampleRightImagePoints[0][j][0]*epiLinesLeft[j][0][0] + \
			sampleRightImagePoints[0][j][1]*epiLinesLeft[j][0][1] + \
			epiLinesLeft[j][0][2] \
			)

averageEpipolarError = totalError / totalPoints