def get_cld(im1, im2): #im1 and im2 must be black and white
    if show_rt:
        t = time.time()

    #compute Fundamental matrix
    F, pts1, pts2 = compute_F(im1, im2)

    #compute Essential matrix
    E = compute_E(F, im1, im2)

    #get translation matrix for translation in the x direction
    R, T = compute_R_T(E)

    #get K
    K = compute_K(im1)

    #get projection from K, R, T
    P = compute_P(K, R, T)

    #quit()
    Tx = T[2]

    #need to reshape vector so that stereorectifyuncalibrated can
    pts1_n = pts1.reshape((pts1.shape[0] * 2, 1))
    pts2_n = pts2.reshape((pts2.shape[0] * 2, 1))

    #compute rectification transform
    retval, rec1, rec2 = cv2.stereoRectifyUncalibrated(pts1_n, pts2_n, F, (len(im1), len(im1[0])))

    if DEBUGGING:
        print 'rec1:\n', rec1
        print 'rec2:\n', rec2

    #apply rectification matrix
    rec_im1 = cv2.warpPerspective(im1, rec1, (len(im1), len(im1[0])))

    rec_im2 = cv2.warpPerspective(im2, rec2, (len(im2), len(im2[0])))

    if UNIT_TESTING:
        cv2.imshow('im1', rec_im1)
        cv2.waitKey(0)

        cv2.imshow('im2', rec_im2)
        cv2.waitKey(0)

        '''
        h = rec_im1.shape[0]
        w = rec_im1.shape[1]
        dispim = np.zeros((h,2*w), dtype=np.uint8)
        dispim[:,0:w] = rec_im1
        dispim[:,w:] = rec_im2
        for jj in range(0,h,15):
            cv2.line(dispim, (0,jj), (2*w,jj), (255,255,255), 1)
        cv2.imshow('Sbs', dispim)
        cv2.waitKey(0)
        '''

    #get disparity map
    stereo = cv2.StereoBM(cv2.STEREO_BM_BASIC_PRESET,ndisparities=16, SADWindowSize=15)

    #rec_im1 = cv2.cvtColor(rec_im1, cv2.COLOR_BGR2GRAY)
    #rec_im2 = cv2.cvtColor(rec_im2, cv2.COLOR_BGR2GRAY)

    disparity = stereo.compute(rec_im1, rec_im2) #/ 16

    disparityF = disparity.astype(float)
    maxv = np.max(disparityF.flatten())
    minv = np.min(disparityF.flatten())
    disparityF = 255.0*(disparityF-minv)/(maxv-minv)
    disparityU = disparityF.astype(np.uint8)

    print 'disparity.dtype:', disparity.dtype

    cv2.imwrite('disparity.jpg', disparityU)

    if UNIT_TESTING:
        cv2.imshow('disparity', disparityU)
        cv2.waitKey(0)

        plt.subplot(122), plt.imshow(disparityF)
        plt.show()


    #get perspective transform (Q)
    cx = len(im1[0]) / 2
    cy = len(im1) / 2
    cxp = cx

    Q = np.asarray([[1, 0, 0, -cx],
                    [0, 1, 0, -cy],
                    [0, 0, 0, f_pix],
                    [0, 0, -1/Tx, (cx-cxp)/Tx]])

    #reproject to 3d
    im_3D = cv2.reprojectImageTo3D(disparityU, Q)

    if show_rt:
        print '\nget_cld() runtime:', time.time() - t, 'secs\n'

    return im_3D
Example #2
0
def returnH1_H2(points1, points2, F, size):
    p1 = points1.reshape(
        len(points1) * 2,
        1)  #stackoverflow上需要将(m,2)的点变为(m*2,1),因为不变在c++中会产生内存溢出
    p2 = points2.reshape(len(points2) * 2, 1)
    _, H1, H2 = cv2.stereoRectifyUncalibrated(p1, p2, F, size)  #size是宽,高
    return H1, H2
Example #3
0
def drawEpilines(img1, img2, pts1, pts2, F):
    # Rectify images
    ret, h1, h2 = cv2.stereoRectifyUncalibrated(pts1, pts2, F,
                                                (img1.shape[1], img1.shape[0]))

    img1_ = img1.copy()
    img2_ = img2.copy()

    # Calculate and draw the epiplines in img1
    lines1 = cv2.computeCorrespondEpilines(pts2, 2, F)
    lines1 = lines1.reshape(-1, 3)
    imgLeft = drawlines(img1_, lines1, pts1, pts2)

    # Calculate and draw the epiplines in img2
    lines2 = cv2.computeCorrespondEpilines(pts1, 1, F)
    lines2 = lines2.reshape(-1, 3)
    imgRight = drawlines(img2_, lines2, pts2, pts1)

    imgLeftRectified = cv2.warpPerspective(imgLeft, h1,
                                           (img1.shape[1], img1.shape[0]))
    imgRightRectified = cv2.warpPerspective(imgRight, h2,
                                            (img2.shape[1], img2.shape[0]))

    imgLeftRectifiedNoEpi = cv2.warpPerspective(img1, h1,
                                                (img1.shape[1], img1.shape[0]))
    imgRightRectifiedNoEpi = cv2.warpPerspective(
        img2, h2, (img2.shape[1], img2.shape[0]))

    return imgLeft, imgRight, imgLeftRectified, imgRightRectified, imgLeftRectifiedNoEpi, imgRightRectifiedNoEpi
    def rectify_images(self, img_info):
        R, t, F = img_info
        _, H0, H1 = cv2.stereoRectifyUncalibrated(self.sorted_keypts_0,
                                                  self.sorted_keypts_1, F,
                                                  self.img0.shape[:2][::-1])
        print(10 * "-", "H0", 10 * "-")
        print(H0)
        print(24 * "-")

        print(10 * "-", "H1", 10 * "-")
        print(H1)
        print(24 * "-")

        if self.debug:
            self.disp_epipoles(self.img0, self.img1, F)

        # Recalculate F and keypoint locations while accounting for rectification
        F = (np.linalg.inv(H1)).T @ F @ np.linalg.inv(H0)
        keypts_0 = np.array([self.sorted_keypts_0], dtype=np.float32)
        keypts_1 = np.array([self.sorted_keypts_1], dtype=np.float32)
        self.sorted_keypts_0 = cv2.perspectiveTransform(
            keypts_0, H0)[0].astype(np.int32, copy=False)
        self.sorted_keypts_1 = cv2.perspectiveTransform(
            keypts_1, H1)[0].astype(np.int32, copy=False)

        # Rectify images
        img0 = cv2.warpPerspective(self.img0, H0, self.img0.shape[:2][::-1])
        img1 = cv2.warpPerspective(self.img1, H1, self.img1.shape[:2][::-1])

        # Display epipolar lines
        if self.debug:
            self.disp_epipoles(img0, img1, F)

        return img0, img1, F
Example #5
0
def stereo_rectification(img1,img2,sift_params,match_th =0.8):
    kp1,des1,kp2,des2,_,_ = sift_params
    
    index_params = dict(algorithm = 1, trees = 5)
    search_params = dict(checks=50)
    flann = cv2.FlannBasedMatcher(index_params,search_params)
    matches = flann.knnMatch(des1,des2,k=2)
    
#     Store best matches
    pts1 = []
    pts2 = []
    for i,(m,n) in enumerate(matches):
        if m.distance < 0.8*n.distance:
            pts2.append(kp2[m.trainIdx].pt)
            pts1.append(kp1[m.queryIdx].pt)
    pts1 = np.float32(pts1)
    pts2 = np.float32(pts2)
    
    F, mask = cv2.findFundamentalMat(pts1,pts2,cv2.RANSAC)

    # We select only inlier points
    pts1 = pts1[mask.ravel()==1]
    pts2 = pts2[mask.ravel()==1]
    img_size = img1.shape[0:2]
    p,H1,H2=cv2.stereoRectifyUncalibrated(pts1, pts2, F, img_size)
        
    H3= H1.dot(H2)
    img1_corrected = cv2.warpPerspective(img1, H1, img_size)
    img2_corrected = cv2.warpPerspective(img2, H3, img_size)
    
    return img1_corrected, img2_corrected
def solve(images):

    on = images[0]
    off = images[1]
    dist = distance(on, off)

    print 'decoding x'
    xcoords, width = decode_axis(on, off, images[2::2], 'x')
    print 'decoding y'
    ycoords, height = decode_axis(on, off, images[3::2], 'y')

    cam_points = []
    proj_points = []

    for i in xrange(on.shape[0]):
        print i / on.shape[0]
        for j in xrange(on.shape[1]):
            x = xcoords[i,j]
            y = ycoords[i,j]
            if dist[i,j] > 0.25:
                cam_points.append((i, j))
                proj_points.append((height - y - 1, x))

    print len(cam_points), 'points'

    print 'finding fundamental matrix'
    cam_array = np.array(cam_points) / 1024 - 1
    proj_array = np.array(proj_points) / 1024 - 1
    F, status = cv2.findFundamentalMat(cam_array, proj_array, cv.CV_FM_RANSAC)

    print 'stereo rectify'
    cam_array = np.array(cam_points) / 1024 - 1
    proj_array = np.array(proj_points) / 1024 - 1
    res, H1, H2 = cv2.stereoRectifyUncalibrated(cam_array, proj_array, F, on.shape[:2])
def rectification(left, right, correct_kpts1, correct_kpts2, F):
    """Returns rectified images using OpenCV rectification algorithm"""
    """
    We have implemented our rectification method based on papar
    Physically-valid view synthesis by image interpolation, Charles R. Dyer Steven M. Seitz.

    In this case, we can obtain the matrix of rotation, translation and scale to de-rectify, but
    this algorithm has a very poor performance compared to OpenCV built-in rectification method.
    Furthermore, this method is not good enough to calculate the disparity, so we comment the few following lines
    and we use OpenCV.
    """
    correct_kpts1 = np.array(correct_kpts1)
    correct_kpts1 = correct_kpts1.reshape((correct_kpts1.shape[0] * 2, 1))
    correct_kpts2 = np.array(correct_kpts2)
    correct_kpts2 = correct_kpts2.reshape((correct_kpts2.shape[0] * 2, 1))
    shape = (left.shape[1], left.shape[0])

    rectBool, H1, H2 = cv2.stereoRectifyUncalibrated(correct_kpts1,
                                                     correct_kpts2,
                                                     F,
                                                     shape,
                                                     threshold=1)
    R1 = cv2.warpPerspective(left, H1, shape)
    R2 = cv2.warpPerspective(right, H2, shape)

    return R1, R2
Example #8
0
def epipolar_rectify(imL,imR,show_matches=True):
    descriptor_extractor = ORB(n_keypoints=2000)
    
    descriptor_extractor.detect_and_extract(imL)
    keypoints1 = descriptor_extractor.keypoints
    descriptors1 = descriptor_extractor.descriptors    
    
    descriptor_extractor.detect_and_extract(imR)
    keypoints2 = descriptor_extractor.keypoints
    descriptors2 = descriptor_extractor.descriptors        
    
    matches12 = match_descriptors(descriptors1, descriptors2,metric='hamming', cross_check=True)
    
    pts1=keypoints1[matches12[:,0],:]
    pts2=keypoints2[matches12[:,1],:]    
    
    
    F, mask = cv2.findFundamentalMat(pts1,pts2,cv2.FM_RANSAC)
    pts1 = pts1[mask.ravel()==1]
    pts2 = pts2[mask.ravel()==1]
    
    res,H1,H2=cv2.stereoRectifyUncalibrated(pts1,pts2,F,imL.shape,10)
    
    if show_matches:
        fig, ax = plt.subplots(nrows=1, ncols=1)
        plot_matches(ax, imL, imR, keypoints1, keypoints2, matches12)    
    
    return H1,H2
Example #9
0
def rectify_corresp_imgs(
    img_base: np.ndarray, img_corresp: np.ndarray,
    key_pts_base: np.array, key_pts_corresp: np.array
) -> tuple:
    """Rectify correspondent images considering theirs key points

    Args:
    - `img_base:np.ndarray`: Base image
    - `img_corresp:np.ndarray`: Correspondent image
    - `key_pts_base:np.array`: Base image key points
    - `key_pts_corresp:np.array`: Corresoondent image key points

    Return:
    - Both rectified image
    """
    fund_matrix, mask = cv.findFundamentalMat(key_pts_base, key_pts_corresp, cv.FM_LMEDS)

    # # We select only inlier points
    key_pts_base = key_pts_base[mask.ravel() == 1]
    key_pts_corresp = key_pts_corresp[mask.ravel() == 1]

    _, h1, h2 = cv.stereoRectifyUncalibrated(key_pts_base, key_pts_corresp, fund_matrix, img_base.shape)

    ret_img_base = cv.warpPerspective(img_base, h1, (img_base.shape[1], img_base.shape[0]))
    ret_img_corresp = cv.warpPerspective(img_corresp, h2, (img_corresp.shape[1], img_corresp.shape[0]))

    return ret_img_base, ret_img_corresp
Example #10
0
def solve(images):

    on = images[0]
    off = images[1]
    dist = distance(on, off)

    print 'decoding x'
    xcoords, width = decode_axis(on, off, images[2::2], 'x')
    print 'decoding y'
    ycoords, height = decode_axis(on, off, images[3::2], 'y')

    cam_points = []
    proj_points = []

    for i in xrange(on.shape[0]):
        print i / on.shape[0]
        for j in xrange(on.shape[1]):
            x = xcoords[i, j]
            y = ycoords[i, j]
            if dist[i, j] > 0.25:
                cam_points.append((i, j))
                proj_points.append((height - y - 1, x))

    print len(cam_points), 'points'

    print 'finding fundamental matrix'
    cam_array = np.array(cam_points) / 1024 - 1
    proj_array = np.array(proj_points) / 1024 - 1
    F, status = cv2.findFundamentalMat(cam_array, proj_array, cv.CV_FM_RANSAC)

    print 'stereo rectify'
    cam_array = np.array(cam_points) / 1024 - 1
    proj_array = np.array(proj_points) / 1024 - 1
    res, H1, H2 = cv2.stereoRectifyUncalibrated(cam_array, proj_array, F,
                                                on.shape[:2])
Example #11
0
def rectify_pair(image_left, image_right, viz=False):
    """Computes the pair's fundamental matrix and rectifying homographies.

    Arguments:
      image_left, image_right: 3-channel images making up a stereo pair.

    Returns:
      F: the fundamental matrix relating epipolar geometry between the pair.
      H_left, H_right: homographies that warp the left and right image so
        their epipolar lines are corresponding rows.
    """

    image_a_points, image_b_points = find_feature_points(image_left,
                                                         image_right)

    f_mat, mask = cv2.findFundamentalMat(image_a_points,
                                         image_b_points,
                                         cv2.RANSAC)
    imsize = (image_right.shape[1], image_right.shape[0])
    image_a_points = image_a_points[mask.ravel() == 1]
    image_b_points = image_b_points[mask.ravel() == 1]

    _, H1, H2 = cv2.stereoRectifyUncalibrated(image_a_points,
                                              image_b_points,
                                              f_mat, imsize)

    return f_mat, H1, H2
def RectifyImage(img1, img2, left_points, right_points, F_matrix):
  # Get extrinsic and intrinsic parameters
  (ex, h1, h2) = cv2.stereoRectifyUncalibrated(left_points, right_points, F_matrix, img1.shape[:2])
  # Get combination transform and warp the image
  comb_trans = np.linalg.inv(h1).dot(h2)
  im_warp = cv2.warpPerspective(img1, comb_trans, (img2.shape[1], img1.shape[0]))
  return im_warp
Example #13
0
def EpipolarGeometry(pts1, pts2, F, maskF, FT, maskE):
    ############ to adapt ##########################
    img1 = cv.pyrDown(cv.imread('Images/leftT2.jpg', 0))
    img2 = cv.pyrDown(cv.imread('Images/rightT2.jpg', 0))
    #################################################
    r, c = img1.shape

    # Trouver les points d'interet en utilisant la matrice fondamentale
    pts1F = pts1[maskF.ravel() == 1]
    pts2F = pts2[maskF.ravel() == 1]

    # trouver les droite épipolaire dans l'image de droite en utilisant la matrice fondamentale
    lines1 = cv.computeCorrespondEpilines(pts2.reshape(-1, 1, 2), 2, F)
    lines1 = lines1.reshape(-1, 3)
    img5, img6 = drawlines(img1, img2, lines1, pts1F, pts2F)
    # trouver les droite épipolaire dans l'image de gauche en utilisant la matrice fondamentale
    lines2 = cv.computeCorrespondEpilines(pts1.reshape(-1, 1, 2), 1, F)
    lines2 = lines2.reshape(-1, 3)
    img3, img4 = drawlines(img2, img1, lines2, pts2, pts1)
    plt.figure('Fright')
    plt.subplot(121), plt.imshow(img5)
    plt.subplot(122), plt.imshow(img6)
    plt.figure('Fleft')
    plt.subplot(121), plt.imshow(img4)
    plt.subplot(122), plt.imshow(img3)

    # Trouver les points d'interet en utilisant la matrice essentiel
    pts1 = pts1[maskE.ravel() == 1]
    pts2 = pts2[maskE.ravel() == 1]
    # trouver les droite épipolaire dans l'image de droite en utilisant la matrice essentiel
    lines1 = cv.computeCorrespondEpilines(pts2.reshape(-1, 1, 2), 2, FT)
    lines1 = lines1.reshape(-1, 3)
    img5T, img6T = drawlines(img1, img2, lines1, pts1, pts2)
    plt.figure('FTright')
    plt.subplot(121), plt.imshow(img5T)
    plt.subplot(122), plt.imshow(img6T)
    # trouver les droite épipolaire dans l'image de gauche en utilisant la matrice essentiel
    lines2 = cv.computeCorrespondEpilines(pts1.reshape(-1, 1, 2), 1, FT)
    lines2 = lines2.reshape(-1, 3)
    img3T, img4T = drawlines(img2, img1, lines2, pts2, pts1)
    plt.figure('FTleft')
    plt.subplot(121), plt.imshow(img4T)
    plt.subplot(122), plt.imshow(img3T)
    plt.show()

    # calculer les homographies qui permettent pour que les droites epipolaire soit
    # dans des lignes correspondantes
    retval, H1, H2 = cv.stereoRectifyUncalibrated(pts1, pts2, F, (c, r))
    print('H1\n', H1)
    print('H2\n', H2)
    # Faire une transformation de perspective à l'aide des matrice de homographie
    im_dst1 = cv.warpPerspective(img1, H1, (c, r))
    im_dst2 = cv.warpPerspective(img2, H2, (c, r))
    cv.namedWindow('left', 0)
    cv.imshow('left', im_dst1)
    cv.namedWindow('right', 0)
    cv.imshow('right', im_dst2)
    cv.waitKey(1)
Example #14
0
def hartleyRectify(points1, points2, imgSize, M1, M2, D1, D2, F = None):
    F, mask = cv2.findFundamentalMat(points1, points2, cv2.FM_RANSAC, 3, 0.99)
    #print 'mask\n', mask
    retval, H1, H2 = cv2.stereoRectifyUncalibrated(
        points1, points2, F, imgSize)
    retval, M1i = cv2.invert(M1); retval, M2i = cv2.invert(M2)
    R1, R2 = np.dot(np.dot(M1i, H1), M1), np.dot(np.dot(M2i, H2), M2)
    map1x, map1y = cv2.initUndistortRectifyMap(M1, D1, R1, M1, imgSize, cv2.CV_32FC1)
    map2x, map2y = cv2.initUndistortRectifyMap(M2, D2, R2, M2, imgSize, cv2.CV_32FC1)
    return (map1x, map1y, map2x, map2y), F
Example #15
0
def do_rectification(intrinsic_matrix,
                     distortion_coeffs,
                     refined_mtx,
                     rectify_F,
                     size,
                     left_img,
                     left_idx,
                     left_pts,
                     right_img,
                     right_idx,
                     right_pts,
                     prefix="remap",
                     p=dflt_params):
    img_w, img_h = size
    # No threshold because we selected only inliers, above
    cal_success, H1, H2 = cv.stereoRectifyUncalibrated(left_pts,
                                                       right_pts,
                                                       rectify_F,
                                                       (img_w, img_h),
                                                       threshold=3.0)
    if cal_success is not True:
        raise ValueError("Failed to rectify images.")
    print_message(f"Calculated Homography Matrices:\nH1: {H1}\nH2: {H2}",
                  params=p)

    # See last comment in https://docs.opencv.org/2.4/modules/imgproc/doc/geometric_transformations.html#initundistortrectifymap
    # Also slide 25: http://ece631web.groups.et.byu.net/Lectures/ECEn631%2014%20-%20Calibration%20and%20Rectification.pdf
    R1 = np.linalg.inv(intrinsic_matrix).dot(H1).dot(intrinsic_matrix)
    R2 = np.linalg.inv(intrinsic_matrix).dot(H2).dot(intrinsic_matrix)
    print_message(f"R Matrices for undistort mappings\nR1: {R1}\nR2: {R2}")

    map1_1, map1_2 = cv.initUndistortRectifyMap(intrinsic_matrix,
                                                distortion_coeffs, R1,
                                                refined_mtx, (img_w, img_h),
                                                cv.CV_32FC1)
    map2_1, map2_2 = cv.initUndistortRectifyMap(intrinsic_matrix,
                                                distortion_coeffs, R2,
                                                refined_mtx, (img_w, img_h),
                                                cv.CV_32FC1)

    print_message(f"Rectify maps:\nmap1_1: {map1_1}\nmat1_2: {map1_2} ",
                  params=p)
    print_message(f"Rectify maps:\nmap2_1: {map2_1}\nmat2_2: {map2_2} ",
                  params=p)

    rectified_images = []
    left_remap = cv.remap(left_img, map1_1, map1_2, cv.INTER_LANCZOS4)
    rectified_images.append(
        (left_remap, f"{prefix}_{left_idx}-{right_idx}.bmp"))
    right_remap = cv.remap(right_img, map2_1, map2_2, cv.INTER_LANCZOS4)
    rectified_images.append(
        (right_remap, f"{prefix}_{right_idx}-{left_idx}.bmp"))

    output = [cv_save(f, i, params=p) for i, f in rectified_images]
    return output
def rectify_pair(image_left, image_right, viz=False):
    """Computes the pair's fundamental matrix and rectifying homographies.

    Arguments:
      image_left, image_right: 3-channel images making up a stereo pair.

    Returns:
      F: the fundamental matrix relating epipolar geometry between the pair.
      H_left, H_right: homographies that warp the left and right image so
        their epipolar lines are corresponding rows.
    """
    sift = cv2.SIFT()
    height, width, depth = image_left.shape
    # find features and descriptors
    kp1, des1 = sift.detectAndCompute(image_left, None)
    kp2, des2 = sift.detectAndCompute(image_right, None)

    # FLANN parameters
    FLANN_INDEX_KDTREE = 0
    index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
    search_params = dict(checks=50)

    flann = cv2.FlannBasedMatcher(index_params, search_params)
    matches = flann.knnMatch(des1, des2, k=2)

    pts1 = []
    pts2 = []
    # ratio scientifically chosen to be best
    for i, (m, n) in enumerate(matches):
        if m.distance < 0.65*n.distance:
            pts2.append(kp2[m.trainIdx].pt)
            pts1.append(kp1[m.queryIdx].pt)
    pts1 = np.float32(pts1)
    pts2 = np.float32(pts2)
    fMat, mask = cv2.findFundamentalMat(pts1, pts2, cv2.FM_RANSAC, 3, 0.99)
    # cv2.FM_LMEDS) # this appears to work the same
    h1 = np.empty((3, 3))
    h2 = np.empty((3, 3))
    cv2.stereoRectifyUncalibrated(
        pts1.flatten(), pts2.flatten(), fMat,
        (height, width), h1, h2, threshold=3)
    return fMat, h1, h2
def hartleyRectify(points1, points2, imgSize, M1, M2, D1, D2, F):
    # F, mask = cv2.findFundamentalMat(points1, points2, cv2.FM_RANSAC, 3, 0.99)
    # print 'mask\n', mask
    retval, H1, H2 = cv2.stereoRectifyUncalibrated(
        points1, points2, F, imgSize)
    retval, M1i = cv2.invert(M1);
    retval, M2i = cv2.invert(M2)
    R1, R2 = np.dot(np.dot(M1i, H1), M1), np.dot(np.dot(M2i, H2), M2)
    map1x, map1y = cv2.initUndistortRectifyMap(M1, D1, R1, M1, imgSize, cv2.CV_32FC1)
    map2x, map2y = cv2.initUndistortRectifyMap(M2, D2, R2, M2, imgSize, cv2.CV_32FC1)
    return (map1x, map1y, map2x, map2y), F
Example #18
0
def rectify_stereo_pair_uncalibrated(imgL, imgR, threshold):
    width, height = imgL.shape[:2]

    F, mask, left_points, right_points = findFundementalMatrix(imgL, imgR, threshold)

    # linesL, linesR = calcualteEpilines(left_points, right_points, F)
    # img5, img6 = drawlines(imgL.copy(), imgR.copy(), linesL, left_points, right_points)
    # img3, img4 = drawlines(imgR.copy(), imgL.copy(), linesR, right_points, left_points)
    # pyplot.subplot(121), pyplot.imshow(img5)
    # pyplot.subplot(122), pyplot.imshow(img3)
    # pyplot.show()

    # Rectify the images
    ret, h_left, h_right = cv2.stereoRectifyUncalibrated(left_points, right_points, F,
                                                         (imgL.shape[1], imgL.shape[0]))

    # S = rectify_shearing(h_left, h_right, (imgL.shape[1], imgL.shape[0]))
    # h_left = S.dot(h_left)

    # Apply the rectification transforms to the images
    # camera_matrix = calibrator.camera_matrix
    # distortion = calibrator.distortion_coeff
    # imgsize = (imgL.shape[1], imgL.shape[0])
    # map1x, map1y, map2x, map2y = remap(camera_matrix, distortion, h_left, h_right, imgsize)
    #
    # rectified_left = cv2.remap(imgL, map1x, map1y,
    #                            interpolation=cv2.INTER_LINEAR)
    #
    # rectified_right = cv2.remap(imgR, map2x, map2y,
    #                             interpolation=cv2.INTER_LINEAR)

    rectified_left = cv2.warpPerspective(imgL, h_left, (height, width), flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT)
    rectified_right = cv2.warpPerspective(imgR, h_right, (height, width), flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT)

    # ## DRAW RECALCULATED EPILINES ##
    F, mask, left_points, right_points = findFundementalMatrix(rectified_left, rectified_right, 0.8)
    #
    # linesL, linesR = calcualteEpilines(left_points, right_points, F)
    # rectified_left, img6 = drawlines(rectified_left.copy(), rectified_right.copy(), linesL, left_points, right_points)
    # rectified_right, img4 = drawlines(rectified_right.copy(), rectified_left.copy(), linesR, right_points, left_points)
    # pyplot.subplot(121), pyplot.imshow(rectified_left)
    # pyplot.subplot(122), pyplot.imshow(rectified_right)
    # pyplot.show()

    ## Display rectified images ##
    cv2.imshow('Left RECTIFIED', rectified_left)
    cv2.imshow('Right RECTIFIED', rectified_right)
    pyplot.show()
    cv2.waitKey(0)

    return rectified_left, rectified_right
Example #19
0
def rectify_pair(image_left, image_right, viz=False):
    """Computes the pair's fundamental matrix and rectifying homographies.

    Arguments:
      image_left, image_right: 3-channel images making up a stereo pair.

    Returns:
      F: the fundamental matrix relating epipolar geometry between the pair.
      H_left, H_right: homographies that warp the left and right image so
        their epipolar lines are corresponding rows.
    """

    img1 = image_left  # queryimage # left image
    img2 = image_right  # trainimage # right image

    sift = cv2.SIFT()

    # find the keypoints and descriptors with SIFT
    kp1, des1 = sift.detectAndCompute(img1, None)
    kp2, des2 = sift.detectAndCompute(img2, None)

    # FLANN parameters
    FLANN_INDEX_KDTREE = 0
    index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
    search_params = dict(checks=50)

    flann = cv2.FlannBasedMatcher(index_params, search_params)
    matches = flann.knnMatch(des1, des2, k=2)

    good = []
    pts1 = []
    pts2 = []

    # ratio test as per Lowe's paper
    for m, n in matches:
        if m.distance / n.distance < .75:
            good.append(m)

    pts1 = np.float32([kp1[m.queryIdx].pt for m in good])
    pts2 = np.float32([kp2[m.trainIdx].pt for m in good])

    F, mask = cv2.findFundamentalMat(pts1, pts2, cv2.FM_RANSAC)

    pts1 = pts1.reshape(-1, 1, 2)
    pts2 = pts2.reshape(-1, 1, 2)
    width, height = img1.shape[:2]
    r, H_left, H_right = cv2.stereoRectifyUncalibrated(pts1, pts2, F,
                                                       (width, height))

    return F, H_left, H_right
    def stereo_calibrate_two_homography_uncalib(self):
        """Calibrate camera and construct Homography."""
        # init camera calibrations
        rt, self.M1, self.d1, self.r1, self.t1 = cv2.calibrateCamera(
            self.objpoints, self.imgpoints_l, self.img_shape, None, None)
        rt, self.M2, self.d2, self.r2, self.t2 = cv2.calibrateCamera(
            self.objpoints, self.imgpoints_r, self.img_shape, None, None)

        # config
        flags = 0
        #flags |= cv2.CALIB_FIX_ASPECT_RATIO
        flags |= cv2.CALIB_USE_INTRINSIC_GUESS
        #flags |= cv2.CALIB_SAME_FOCAL_LENGTH
        #flags |= cv2.CALIB_ZERO_TANGENT_DIST
        flags |= cv2.CALIB_RATIONAL_MODEL
        #flags |= cv2.CALIB_FIX_K1
        #flags |= cv2.CALIB_FIX_K2
        #flags |= cv2.CALIB_FIX_K3
        #flags |= cv2.CALIB_FIX_K4
        #flags |= cv2.CALIB_FIX_K5
        #flags |= cv2.CALIB_FIX_K6
        stereocalib_criteria = (cv2.TERM_CRITERIA_COUNT +
                                cv2.TERM_CRITERIA_EPS, 100, 1e-5)

        # stereo calibration procedure
        ret, self.M1, self.d1, self.M2, self.d2, R, T, E, F = cv2.stereoCalibrate(
            self.objpoints,
            self.imgpoints_l,
            self.imgpoints_r,
            self.M1,
            self.d1,
            self.M2,
            self.d2,
            self.img_shape,
            criteria=stereocalib_criteria,
            flags=flags)

        assert ret < 1.0, "[ERROR] Calibration RMS error < 1.0 (%i). Re-try image capture." % (
            ret)
        print("[OK] Calibration successful w/ RMS error=" + str(ret))

        F_test, mask = cv2.findFundamentalMat(
            np.array(self.imgpoints_l).reshape(-1, 2),
            np.array(self.imgpoints_r).reshape(-1, 2), cv2.FM_RANSAC,
            2)  # try ransac and other methods too.

        res, self.H1, self.H2 = cv2.stereoRectifyUncalibrated(
            np.array(self.imgpoints_l).reshape(-1, 2),
            np.array(self.imgpoints_r).reshape(-1, 2), F_test, self.img_shape,
            2)
Example #21
0
def calculateDisparity(pointsImage_L, pointsImage_R, focal_l, imageLeft,
                       imageRight):
    """
    Retifica as imagens imageLeft e imageRight utilizandos o método stereoRectifyUncalibrated da openCV
    :param pointsImage_L: Pontos em coordenadas em pixels da imagem da esquerda
    :param pointsImage_R: Pontos em coordenadas em pixels da imagem da direita
    :param focal_l: Distância focal da câmera da esquerda
    :param imageLeft: Caminho da imagem da esquerda
    :param imageRight: Caminho da imagem da esquerda
    :return disparity, depth, depthplot: Mapa de disparidade, mapa de profundidade, mapa de profundidade normalizado
    """

    imSL = cv2.resize(imageLeft, (640, 480))

    imSR = cv2.resize(imageRight, (640, 480))

    fundamental_matrix, inliers = cv2.findFundamentalMat(pointsImage_SLf,
                                                         pointsImage_SRf,
                                                         method=cv2.FM_LMEDS)

    retval, H1, H2 = cv2.stereoRectifyUncalibrated(
        pointsImage_SLf,
        pointsImage_SRf,
        fundamental_matrix,
        imgSize=(640, 480),
        threshold=0,
    )

    imgL_undistorted = cv2.warpPerspective(imSL, H1, (640, 480))
    #plt.imshow(imgL_undistorted,'gray')
    #plt.show()
    imgR_undistorted = cv2.warpPerspective(imSR, H2, (640, 480))
    #plt.imshow(imgR_undistorted,'gray')
    #plt.show()

    #  perspective transformation matrix
    #  https://medium.com/@omar.ps16/stereo-3d-reconstruction-with-opencv-using-an-iphone-camera-part-iii-95460d3eddf0
    Q = np.float64([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, focal_l * 0.05, 0],
                    [0, 0, 0, 1]])

    stereo = cv2.StereoBM_create(numDisparities=48, blockSize=5)
    disparity = stereo.compute(imgL_undistorted, imgR_undistorted)

    depth = cv2.reprojectImageTo3D(disparity, Q)
    depthplot = cv2.normalize(depth, depth, 0, 255, cv2.NORM_MINMAX,
                              cv2.CV_8UC1)

    return disparity, depth, depthplot
def rectify_pair(image_left, image_right, viz=False):
    """
    Computes the pair's fundamental matrix and rectifying homographies.

    Arguments:
      image_left, image_right: 3-channel images making up a stereo pair.

    Returns:
      F: the fundamental matrix relating epipolar geometry between the pair.
      H_left, H_right: homographies that warp the left and right image so
        their epipolar lines are corresponding rows.
    """
    # Extract features
    sift = cv2.SIFT()
    kp_left, desc_left = sift.detectAndCompute(image_left, None)
    kp_right, desc_right = cv2.SIFT().detectAndCompute(image_right, None)
    index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=TREES)
    search_params = dict(checks=CHECKS)
    flann = cv2.FlannBasedMatcher(index_params, search_params)
    matches = flann.knnMatch(desc_left, desc_right, k=KNN_ITERS)

    # Store all the good matches as per Lowe's ratio test
    good = []
    for m, n in matches:
        if m.distance < LOWE_RATIO * n.distance:
            good.append(m)

    # Pick out the left and right points from the good matches
    pts_left = np.float32([kp_left[m.queryIdx].pt
                           for m in good]).reshape(-1, 1, 2)
    pts_right = np.float32([kp_right[m.trainIdx].pt
                            for m in good]).reshape(-1, 1, 2)

    # Compute the fundamental matrix
    F, mask = cv2.findFundamentalMat(pts_left, pts_right, cv2.FM_RANSAC)
    pts_left = pts_left[mask.ravel() == 1]
    pts_right = pts_right[mask.ravel() == 1]

    # Rectify the images
    width, height, _ = image_left.shape
    _, h1, h2 = cv2.stereoRectifyUncalibrated(pts_left, pts_right, F,
                                              (width, height))

    # Return the fundamental matrix,
    # the homography for warping the left image,
    # and the homography for warping the right image
    return F, h1, h2
def rectify_pair(image_left, image_right, viz=False):
    """Computes the pair's fundamental matrix and rectifying homographies.

    Arguments:
      image_left, image_right: 3-channel images making up a stereo pair.

    Returns:
      F: the fundamental matrix relating epipolar geometry between the pair.
      H_left, H_right: homographies that warp the left and right image so
        their epipolar lines are corresponding rows.
    """

    # Initiate SIFT detector
    sift = cv2.SIFT()

    # find the keypoints and descriptors with SIFT
    kp1, des1 = sift.detectAndCompute(image_left, None)
    kp2, des2 = sift.detectAndCompute(image_right, None)

    FLANN_INDEX_KDTREE = 0
    index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
    search_params = dict(checks=50)

    flann = cv2.FlannBasedMatcher(index_params, search_params)

    matches = flann.knnMatch(des1, des2, k=2)

    # store all the good matches as per Lowe's ratio test
    good = []
    for m, n in matches:
        if m.distance < 0.68 * n.distance:
            good.append(m)

    src_pts = np.float32([kp1[m.queryIdx].pt for m in good])
    dst_pts = np.float32([kp2[m.trainIdx].pt for m in good])

    # find the fundamental matrix
    F, mask = cv2.findFundamentalMat(src_pts, dst_pts, cv2.RANSAC, 3, 0.99)
    src_pts = src_pts.flatten()
    dst_pts = dst_pts.flatten()

    # rectify the images, produce the homographies: H_left and H_right
    retval, H_left, H_right = cv2.stereoRectifyUncalibrated(
        src_pts, dst_pts, F, image_left.shape[:2])

    return F, H_left, H_right
def get_rectified_stereo(im_left, im_right):
    imsize = im_left.shape[1], im_right.shape[0]

    pts1, pts2, _ = get_sift_matches(im_left, im_right)
    F, pts1_inliers, pts2_inliers = get_fundamental_mat(pts1, pts2)

    retval, H1, H2 = cv2.stereoRectifyUncalibrated(pts1_inliers, pts2_inliers,
                                                   F, imsize)

    #     retval, H1, H2 = cv2.stereoRectifyUncalibrated(pts1.astype(int32),
    #                                                    pts2.astype(int32),
    #                                                    F, imsize)

    assert retval, 'failed to estimate homographies for stereo rectification1'
    im_left_rect = cv2.warpPerspective(im_left, H1, imsize)
    im_right_rect = cv2.warpPerspective(im_right, H2, imsize)

    return im_left_rect, im_right_rect
def rectify_images(img1, img2):
    sift = cv2.xfeatures2d.SIFT_create()

    # find the keypoints and descriptors with SIFT
    kp1, des1 = sift.detectAndCompute(img1, None)
    kp2, des2 = sift.detectAndCompute(img2, None)

    FLANN_INDEX_KDTREE = 0
    index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
    search_params = dict(checks=50)

    flann = cv2.FlannBasedMatcher(index_params, search_params)

    matches = flann.knnMatch(des1, des2, k=2)

    # store all the good matches as per Lowe's ratio test.
    good = []
    pts1 = []
    pts2 = []
    for m, n in matches:
        if m.distance < 0.7 * n.distance:
            good.append(m)
            pts2.append(kp2[m.trainIdx].pt)
            pts1.append(kp1[m.queryIdx].pt)
    pts1 = np.array(pts1)
    pts2 = np.array(pts2)
    F, mask = cv2.findFundamentalMat(pts1, pts2, cv2.FM_LMEDS)
    pts1 = pts1[:, :][mask.ravel() == 1]
    pts2 = pts2[:, :][mask.ravel() == 1]

    pts1 = np.int32(pts1)
    pts2 = np.int32(pts2)

    p1fNew = pts1.reshape((pts1.shape[0] * 2, 1))
    p2fNew = pts2.reshape((pts2.shape[0] * 2, 1))

    retBool, rectmat1, rectmat2 = cv2.stereoRectifyUncalibrated(
        p1fNew, p2fNew, F, img1.shape[:2])

    dst11 = cv2.warpPerspective(img1, rectmat1, img1.shape[:2])
    dst22 = cv2.warpPerspective(img2, rectmat2, img2.shape[:2])
    #plt.imshow(dst22)
    return dst11, dst22
Example #26
0
def rectify_images_sift(image_A, image_B, window_size=16, stride=8, method="greedy", name="p1"):
    """Rectify two stereo images."""
    print("Finding matching points")
    match_A, match_B = compute_match_sift(image_A, image_B, method=method)

    print("Finding Fundamantel Matrix")
    F, mask = cv2.findFundamentalMat(match_A, match_B)

    print("Computing homography")
    ret, H1, H2 = cv2.stereoRectifyUncalibrated(match_A, match_B, F, image_A.shape[0:2])

    print("Rectifying images")
    new_img_A = cv2.warpPerspective(image_A, H1, image_A.shape[0:2])
    new_img_B = cv2.warpPerspective(image_B, H2, image_A.shape[0:2])

    cv2.imwrite("output/rect_sift_" + method + "_" + name + "_a" + ".png", new_img_A)
    cv2.imwrite("output/rect_sift_" + method + "_" + name + "_b" + ".png", new_img_B)

    return new_img_A, new_img_B
Example #27
0
def rectifyAndPlot(img1_path, img2_path, F, pts1, pts2, openCV_F=False):
    img1_gray = cv.imread(img1_path, cv.IMREAD_GRAYSCALE)
    img2_gray = cv.imread(img2_path, cv.IMREAD_GRAYSCALE)

    ret_bool, rectmat1, rectmat2 = cv.stereoRectifyUncalibrated(pts1, pts2, F, (img1_gray.shape[1], img1_gray.shape[0]))
    print(rectmat1)
    rectmat1_inv = np.linalg.inv(rectmat1)
    rectmat2 = rectmat1_inv.dot(rectmat2)
    dst2 = cv.warpPerspective(img2_gray, rectmat2, (img1_gray.shape[1], img1_gray.shape[0]))
    out = np.concatenate([img1_gray, dst2], axis=1)
    plt.imshow(out)
    plt.show()
    if (img2_path == '/content/drive/My Drive/Colab Notebooks/A4/I2.jpg'):
        if (openCV_F == True):
            cv.imwrite('/content/drive/My Drive/Colab Notebooks/A4/I1-I2-openCV-rectify.png', out)
        else:
            cv.imwrite('/content/drive/My Drive/Colab Notebooks/A4/I1-I2-rectify.png', out)
    else:
        if (openCV_F == True):
            cv.imwrite('/content/drive/My Drive/Colab Notebooks/A4/I1-I3-openCV-rectify.png', out)
        else:
            cv.imwrite('/content/drive/My Drive/Colab Notebooks/A4/I1-I3-rectify.png', out)
Example #28
0
def rectifyWrapper(imgL, imgR, lines1, lines2, pts1, pts2, F):
    
    # We need to apply the perspective transformation
    # e.g. http://www.pyimagesearch.com/2014/05/05/building-pokedex-python-opencv-perspective-warping-step-5-6/
        
    # Transform the images so the matching horizontal lines will be horizontal 
    # with each other between images, http://scientiatertiidimension.blogspot.ca/2013/11/playing-with-disparity.html
    # (hint: cv2.stereoRectifyUncalibrated, cv2.warpPerspective).
    imgsize = (imgL.shape[1], imgL.shape[0])
    
    # transpose
    pts1 = pts1.T
    pts2 = pts2.T
    
    try:
        H1, H2 = cv2.stereoRectifyUncalibrated(pts1, pts2, F, imgsize)
    except cv2.error:
        print "cv2.error"
        H1, H2 = rectify_uncalibrated(lines1, lines2, pts1, pts2, F, imgsize)
        print "applying the custom-rectify code then as the cv2.stereoRectifyUncalibrated failed"
        
    # http://stackoverflow.com/questions/19704369/stereorectifyuncalibrated-not-accepting-same-array-as-findfundamentalmat
    # OpenCV Error: Assertion failed (CV_IS_MAT(_points1) && CV_IS_MAT(_points2) && ... 
    
    # correct for shearing
    # http://scicomp.stackexchange.com/questions/2844/shearing-and-hartleys-rectification
    S = rectify_shearing(H1, H2, imgsize)
    H1 = S.dot(H1)

    
    # Init Undistort, Map (mapx / mapy)
    img1, map1x, map1y, img2, map2x, map2y = undistortMap(H1, H2, imgL, imgR)
   
    # Remap
    rimg1, rimg2 = remap(img1, map1x, map1y, img2, map2x, map2y)
    
    return rimg1, rimg2
Example #29
0
def rectifyWrapper(imgL, imgR, lines1, lines2, pts1, pts2, F):

    # We need to apply the perspective transformation
    # e.g. http://www.pyimagesearch.com/2014/05/05/building-pokedex-python-opencv-perspective-warping-step-5-6/

    # Transform the images so the matching horizontal lines will be horizontal
    # with each other between images, http://scientiatertiidimension.blogspot.ca/2013/11/playing-with-disparity.html
    # (hint: cv2.stereoRectifyUncalibrated, cv2.warpPerspective).
    imgsize = (imgL.shape[1], imgL.shape[0])

    # transpose
    pts1 = pts1.T
    pts2 = pts2.T

    try:
        H1, H2 = cv2.stereoRectifyUncalibrated(pts1, pts2, F, imgsize)
    except cv2.error:
        print "cv2.error"
        H1, H2 = rectify_uncalibrated(lines1, lines2, pts1, pts2, F, imgsize)
        print "applying the custom-rectify code then as the cv2.stereoRectifyUncalibrated failed"

    # http://stackoverflow.com/questions/19704369/stereorectifyuncalibrated-not-accepting-same-array-as-findfundamentalmat
    # OpenCV Error: Assertion failed (CV_IS_MAT(_points1) && CV_IS_MAT(_points2) && ...

    # correct for shearing
    # http://scicomp.stackexchange.com/questions/2844/shearing-and-hartleys-rectification
    S = rectify_shearing(H1, H2, imgsize)
    H1 = S.dot(H1)

    # Init Undistort, Map (mapx / mapy)
    img1, map1x, map1y, img2, map2x, map2y = undistortMap(H1, H2, imgL, imgR)

    # Remap
    rimg1, rimg2 = remap(img1, map1x, map1y, img2, map2x, map2y)

    return rimg1, rimg2
Example #30
0
F, inlier_mask = cv2.findFundamentalMat(best_kp1, best_kp2, cv2.FM_7POINT)
inlier_mask = inlier_mask.flatten()

#points within epipolar lines
inlier_kp1 = best_kp1[inlier_mask == 1]
inlier_kp2 = best_kp2[inlier_mask == 1]

inlier_matches = best_matches[inlier_mask == 1]

img3 = cv2.drawMatches(I1, kp1, I2, kp2, inlier_matches, flags=2, outImg=None)
plt.imshow(img3), plt.show()

thresh = 0

_, H1, H2 = cv2.stereoRectifyUncalibrated(np.float32(inlier_kp1),
                                          np.float32(inlier_kp2), F,
                                          I1gray.shape[::-1], 1)

I1_rect = np.float32([[[0, 0], [I1.shape[1], 0], [I1.shape[1], I1.shape[0]],
                       [0, I1.shape[0]]]])
warped_I1_rect = cv2.perspectiveTransform(I1_rect, H1)

I2_rect = np.float32([[[0, 0], [I2.shape[1], 0], [I2.shape[1], I2.shape[0]],
                       [0, I2.shape[0]]]])
warped_I2_rect = cv2.perspectiveTransform(I2_rect, H2)

min_x_I1 = min(warped_I1_rect[0][0][0], warped_I1_rect[0][1][0],
               warped_I1_rect[0][2][0], warped_I1_rect[0][3][0])
min_x_I2 = min(warped_I2_rect[0][0][0], warped_I2_rect[0][1][0],
               warped_I2_rect[0][2][0], warped_I2_rect[0][3][0])
def match(detector_name, descriptor_name, matcher_name, image1_file, image2_file):
    
    print "\n###############################\n"
    print detector_name+"\t"+descriptor_name+"\t"+matcher_name
#     Read images
    image1                     = cv2.imread(image1_file, cv2.CV_LOAD_IMAGE_GRAYSCALE)
    image2                     = cv2.imread(image2_file, cv2.CV_LOAD_IMAGE_GRAYSCALE)
    
    #      -- Step 1: Compute keypoints      
    if (detector_name == "SIFT"):
        #configurable sift constructor -> http://docs.opencv.org/modules/nonfree/doc/feature_detection.html
        BEST_FEATURES       = 100  #default 0 meaning get all
        OCTAVE_LAYERS       = 5    #default 3
        CONTRAST_THRESHOLD  = 0.04 #default 0.04, larger -> less points
        EDGE_THRESHOLD      = 10   #default 10,   smaller -> less points
        SIGMA               = 1.6  #default 1.6.  meaning the level of gaussian blur
        detector            = cv2.SIFT(BEST_FEATURES,OCTAVE_LAYERS,CONTRAST_THRESHOLD,EDGE_THRESHOLD,SIGMA)
    elif (detector_name == "SURF"):
        HESSIAN_THRESHOLD   = 500  #larger -> less points
        OCTAVES             = 4    #default 4
        OCTAVE_LAYERS       = 2    #default 2
        EXTENDED            = True #default true, ie use 128 descriptor otherwise 64
        UPRIGHT             = False#default false, ie compute orientation
        detector            = cv2.SURF(HESSIAN_THRESHOLD,OCTAVES,OCTAVE_LAYERS,EXTENDED,UPRIGHT)        
    else:
        detector                  = cv2.FeatureDetector_create(detector_name)
    t1                         = time.time()
    
    keypoints1                 = detector.detect(image1)
    
    t2                         = time.time()
    print "Time to get keypoints for query image: ", str(t2 - t1)
    keypoints2                 = detector.detect(image2)

#   draw sift key points with sizes and their oritentations
    img1 = cv2.drawKeypoints(image1,keypoints1,flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
    cv2.imwrite('sift_keypoints1.jpg',img1)
    img2 = cv2.drawKeypoints(image2,keypoints2,flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
    cv2.imwrite('sift_keypoints2.jpg',img2)
    
    #      -- Step 2: Compute descriptors
    descriptor                 = cv2.DescriptorExtractor_create(descriptor_name)
    
    t1                         = time.time()
    (keypoints1, descriptors1) = descriptor.compute(image1, keypoints1)
    t2                         = time.time()
    print "Time to get descriptors for query image: ", str(t2 - t1)
    (keypoints2, descriptors2) = descriptor.compute(image2, keypoints2)
    
#===============================================================================
# #     -- Step 3: Matching descriptor 
    t1 = time.time()
    matcher = cv2.DescriptorMatcher_create(matcher_name)
    bimatches = matcher.knnMatch(descriptors1, descriptors2,2) #for each feature find 2 best matches if can.

    #Filter out a match if the two bests are too close. We'd rather not take those points.  
    RATIO_THREASHOLD = 0.9; # bigger -> more points remains, max = 1 ie don't discard any.
    matches = []
    for m in bimatches:
        if(len(m)==2):
            if(m[0].distance <= RATIO_THREASHOLD*m[1].distance):
                matches.append(m[0]) #knnmatch automatically sort the points such that first point always has less distance
        else:
            matches.append(m)

            
#     
#     t2 = time.time()
#     
# #     print number of matches
#     print "time: ", t2-t1
#     print "detector: ", detector_name, " descriptor extractor: ", descriptor_name, " matcher: ", matcher_name
#     print '#matches:', len(matches)
#===============================================================================
    t3 = time.time()
    print "Time to match: ", str(t3  - t1)
#     -- Step 4: Draw matches on image

    print image1.shape
    h1, w1          = image1.shape[:2]
    print w1, h1
    h2, w2                   = image2.shape[:2]
    view1                    = np.zeros((max(h1, h2), w1 + w2), np.uint8)
    view1[:h1, :w1]          = image1
    view1[:h2, w1:]          = image2
    view1[:, :]              = view1[:, :]
    view1[:, :]              = view1[:, :]
    view                     = copy.copy(view1)   
    view_trans_FM            = copy.copy(view)
    view_final_match_FM      = copy.copy(view)
    view_final_match_orig_FM = copy.copy(view)
    view_trans_HM            = copy.copy(view)
    view_final_match_HM      = copy.copy(view)
    view_final_match_orig_HM = copy.copy(view)
 

    
#     Draw all matches between two images
    for m in matches:
        color = tuple([np.random.randint(0, 255) for _ in xrange(3)])
        Qpt   = (int(keypoints1[m.queryIdx].pt[0]), int(keypoints1[m.queryIdx].pt[1]))
        Tpt   = (int(keypoints2[m.trainIdx].pt[0])+w1, int(keypoints2[m.trainIdx].pt[1]))
        
        cv2.line(view1, Qpt, Tpt, color, 3)
        cv2.circle(view1, Qpt, 10,color, 3)
        cv2.circle(view1, Tpt, 10,color, 3)   
        
    cv2.imwrite(detector_name+"_"+descriptor_name+"_"+matcher_name+"_all_matches.jpg", view1)     

    dist     = [m.distance for m in matches]
    
    min_dist = min(dist)
    avg_dist = (sum(dist) / len(dist))
    print 'distance: min: %.3f' % min_dist
    print 'distance: mean: %.3f' % avg_dist
    print 'distance: max: %.3f' % max(dist)
    
    # keep only the reasonable matches
    # good_matches = heapq.nsmallest(20, matches, key=lambda match: match.distance)
    good_matches = [m for m in matches if m.distance < avg_dist*0.8]
    # good_matches.sort(cmp=None, key=distance, reverse=False);
    # sorted(good_matches, key=lambda match: match.distance)
    
    print "Number of match is: "+str(len(matches))
    print "Number of good match is: "+str(len(good_matches))
    
    src_points  = []
    dest_points = []
    for m in good_matches:
        color = tuple([np.random.randint(0, 255) for _ in xrange(3)])
        Qpt   = (int(keypoints1[m.queryIdx].pt[0]), int(keypoints1[m.queryIdx].pt[1]))
        Tpt   = (int(keypoints2[m.trainIdx].pt[0]+w1), int(keypoints2[m.trainIdx].pt[1]))

        src_points.append(keypoints1[m.queryIdx].pt)
        dest_points.append(keypoints2[m.trainIdx].pt)        

        cv2.line(view, Qpt, Tpt, color, 3)
        cv2.circle(view, Qpt, 10,color, 3)
        cv2.circle(view, Tpt, 10,color, 3)

    # Draw out good matches
    cv2.imwrite(detector_name+"_"+descriptor_name+"_"+matcher_name+"_good_matches.jpg", view)

    if len(src_points) > 8:
        #Compute the homography with RANSAC
        
        F, M  = cv2.findFundamentalMat(np.array(src_points, dtype='float32'), np.array(dest_points, dtype='float32'), cv.CV_FM_RANSAC, 3, 0.99)
        #stereoRectifyUncalibrated use different format for points.
        src_pts = []
        des_pts = []
        for i in range(len(src_points)):
            src_pts.append(src_points[i][0])
            src_pts.append(src_points[i][1])
            des_pts.append(dest_points[i][0])
            des_pts.append(dest_points[i][1])
    
        r, H1, H2 = cv2.stereoRectifyUncalibrated(np.array(src_pts, dtype='float32'), np.array(des_pts, dtype='float32'), F, image1.shape,threshold=5)
    
        srcTri        = np.array([(0,0), (w1,0), (w1,h1), (0,h1)],dtype='float32')
        srcTri        = np.array([srcTri])
        
        height, width = view.shape[:2] 
        desTri_FM     = cv2.perspectiveTransform(srcTri, H2) #Result from stereoRectifyUncalibrated
    
        if isGoodQuad(desTri_FM[0]):
        # if True:
    
            # //-- Draw lines between the corners (the mapped object in the scene - image_2 )
            cv2.line(view_trans_FM, (int(desTri_FM[0][0][0]) + w1, int(desTri_FM[0][0][1])), (int(desTri_FM[0][1][0]) + w1, int(desTri_FM[0][1][1])), (255,255,255), 4)
            cv2.line(view_trans_FM, (int(desTri_FM[0][1][0]) + w1, int(desTri_FM[0][1][1])), (int(desTri_FM[0][2][0]) + w1, int(desTri_FM[0][2][1])), (255,255,255), 4)
            cv2.line(view_trans_FM, (int(desTri_FM[0][2][0]) + w1, int(desTri_FM[0][2][1])), (int(desTri_FM[0][3][0]) + w1, int(desTri_FM[0][3][1])), (255,255,255), 4)
            cv2.line(view_trans_FM, (int(desTri_FM[0][3][0]) + w1, int(desTri_FM[0][3][1])), (int(desTri_FM[0][0][0]) + w1, int(desTri_FM[0][0][1])), (255,255,255), 4)
        
            cv2.imwrite(detector_name+"_"+descriptor_name+"_"+matcher_name+"_perspectiveTrans_FM.jpg", view_trans_FM)
        
            # Perform perspectiveTransform on all source points;
            dest_trans_points_FM = cv2.perspectiveTransform(np.array([src_points], dtype='float32'), H2)
            final_src_points  = []
            final_des_points  = []
            #filter out miss matched points
            for i in range(len(src_points)):
                des_pt   = dest_points[i]
                trans_pt_FM = dest_trans_points_FM[0][i]
                if math.hypot(des_pt[0] - trans_pt_FM[0], des_pt[1] - trans_pt_FM[1]) < 200:
                    final_src_points.append(src_points[i])
                    final_des_points.append(((int(trans_pt_FM[0])), int(trans_pt_FM[1])))
        
                    color = tuple([np.random.randint(0, 255) for _ in xrange(3)])
        
                    cv2.line(view_final_match_FM, (int(src_points[i][0]), int(src_points[i][1])), ((int(trans_pt_FM[0]))+w1, int(trans_pt_FM[1])), color, 3)
                    cv2.circle(view_final_match_FM, (int(src_points[i][0]), int(src_points[i][1])), 10,color, 3)
                    cv2.circle(view_final_match_FM, ((int(trans_pt_FM[0]))+w1, int(trans_pt_FM[1])), 10,color, 3)
        
                    #Draw original points on dest image
                    cv2.line(view_final_match_orig_FM, (int(src_points[i][0]), int(src_points[i][1])), ((int(des_pt[0]))+w1, int(des_pt[1])), color, 3)
        
                    cv2.circle(view_final_match_orig_FM, (int(src_points[i][0]), int(src_points[i][1])), 10,color, 3)
                    cv2.circle(view_final_match_orig_FM, ((int(des_pt[0]))+w1, int(des_pt[1])), 10,color, 3)
        
        
        
            print "Fundamental Metrix Final number of matches %d"% (len(final_src_points))
            if len(final_src_points) > 0:
                cv2.imwrite(detector_name+"_"+descriptor_name+"_"+matcher_name+"_FM_FinalMatches.jpg", view_final_match_FM)
                cv2.imwrite(detector_name+"_"+descriptor_name+"_"+matcher_name+"_FM_FinalMatches(orig).jpg", view_final_match_orig_FM)
    
    
        H,mask        = cv2.findHomography(np.array(src_points, dtype='float32'), np.array(dest_points, dtype='float32'), cv.CV_RANSAC)
        H             = np.array(H, dtype='float32')
        dest_trans_points_HM = cv2.perspectiveTransform(np.array([src_points], dtype='float32'), H)
        desTri_HM     = cv2.perspectiveTransform(srcTri, H) #result from homography
        if isGoodQuad(desTri_HM[0]):
        # if True:
    
            # //-- Draw lines between the corners (the mapped object in the scene - image_2 )    
            cv2.line(view_trans_HM, (int(desTri_HM[0][0][0]) + w1, int(desTri_HM[0][0][1])), (int(desTri_HM[0][1][0]) + w1, int(desTri_HM[0][1][1])), (0,204,0), 4)
            cv2.line(view_trans_HM, (int(desTri_HM[0][1][0]) + w1, int(desTri_HM[0][1][1])), (int(desTri_HM[0][2][0]) + w1, int(desTri_HM[0][2][1])), (0,204,0), 4)
            cv2.line(view_trans_HM, (int(desTri_HM[0][2][0]) + w1, int(desTri_HM[0][2][1])), (int(desTri_HM[0][3][0]) + w1, int(desTri_HM[0][3][1])), (0,204,0), 4)
            cv2.line(view_trans_HM, (int(desTri_HM[0][3][0]) + w1, int(desTri_HM[0][3][1])), (int(desTri_HM[0][0][0]) + w1, int(desTri_HM[0][0][1])), (0,204,0), 4)
        
            cv2.imwrite(detector_name+"_"+descriptor_name+"_"+matcher_name+"_perspectiveTrans_HM.jpg", view_trans_HM)
        
            # Perform perspectiveTransform on all source points;
            dest_trans_points_HM = cv2.perspectiveTransform(np.array([src_points], dtype='float32'), H)
            final_src_points  = []
            final_des_points  = []
            #filter out miss matched points
            for i in range(len(src_points)):
                des_pt   = dest_points[i]
                trans_pt_HM = dest_trans_points_HM[0][i]
                if math.hypot(des_pt[0] - trans_pt_HM[0], des_pt[1] - trans_pt_HM[1]) < 200:
                    final_src_points.append(src_points[i])
                    final_des_points.append(((int(trans_pt_HM[0])), int(trans_pt_HM[1])))
        
                    color = tuple([np.random.randint(0, 255) for _ in xrange(3)])
        
                    cv2.line(view_final_match_HM, (int(src_points[i][0]), int(src_points[i][1])), ((int(trans_pt_HM[0]))+w1, int(trans_pt_HM[1])), color, 3)
                    cv2.circle(view_final_match_HM, (int(src_points[i][0]), int(src_points[i][1])), 10,color, 3)
                    cv2.circle(view_final_match_HM, ((int(trans_pt_HM[0]))+w1, int(trans_pt_HM[1])), 10,color, 3)
        
                    #Draw original points on dest image
                    cv2.line(view_final_match_orig_HM, (int(src_points[i][0]), int(src_points[i][1])), ((int(des_pt[0]))+w1, int(des_pt[1])), color, 3)
                    cv2.circle(view_final_match_orig_HM, (int(src_points[i][0]), int(src_points[i][1])), 10,color, 3)
                    cv2.circle(view_final_match_orig_HM, ((int(des_pt[0]))+w1, int(des_pt[1])), 10,color, 3)
        
        
            print "Homography found Final number of matches %d"% (len(final_src_points))
            if len(final_src_points) > 0:
                cv2.imwrite(detector_name+"_"+descriptor_name+"_"+matcher_name+"_HM_FinalMatches.jpg", view_final_match_HM)
                cv2.imwrite(detector_name+"_"+descriptor_name+"_"+matcher_name+"_HM_FinalMatches(orig).jpg", view_final_match_orig_HM)


    print "\n###############################\n"
Example #32
0
def rectify (left, right, rmin=None):
  # Check image dimensions
  if left.shape != right.shape:
    raise ValueError ("left/right images must have the same dimensions")

  h,w,d = left.shape
  mask = ones((h,w), dtype=uint8)

  # Run a SURF detector on both images
  detector = cv2.SURF(0.)
  kp1, desc1 = detector.detect(uint8(left.mean(axis=2)).copy(), mask, False)
  kp2, desc2 = detector.detect(uint8(right.mean(axis=2)).copy(), mask, False)
  desc1 = desc1.reshape ((len(kp1), -1))
  desc2 = desc2.reshape ((len(kp2), -1))

  # Put descriptor responses in arrays
  resp1 = array([kp.response for kp in kp1])
  resp2 = array([kp.response for kp in kp2])

  #print ("Left : Found {0} descr. MIN/AVG/MAX:STD is {1} / {2} / {3} : {4}".format (len(kp1), resp1.min(), resp1.mean(), resp1.max(), resp1.std()))
  #print ("Right: Found {0} descr. MIN/AVG/MAX:STD is {1} / {2} / {3} : {4}".format (len(kp2), resp2.min(), resp2.mean(), resp2.max(), resp2.std()))
  #drawkp (left, kp1)

  # We want to keep only descriptors which are slightly (by std-dev) better than average
  rmin1 = rmin
  rmin2 = rmin
  if rmin is None:
    rmin1 = resp1.mean()
    rmin2 = resp2.mean()

  iok1 = find(resp1 > rmin1)
  iok2 = find(resp2 > rmin2)
  kp1 = array(kp1)[iok1]
  kp2 = array(kp2)[iok2]
  desc1 = desc1[iok1, :].copy()
  desc2 = desc2[iok2, :].copy()

  # Match descriptors, see http://www.maths.lth.se/matematiklth/personal/solem/book.html for more info
  matchidx = -1 * ones ((len(desc1)), 'int')
  desc2t = desc2.transpose()
  dist_ratio = 0.6
  for i in range(len(desc1)):
    dotprods = dot (desc1[i,:], desc2t) * 0.9999
    acdp = arccos (dotprods)
    index = argsort (acdp)

    if acdp[index[0]] < dist_ratio * acdp[index[1]]:
      matchidx[i] = index[0]

  # Only keep matched descriptors
  kp1 = [kp1[i] for i in range(len(matchidx)) if matchidx[i] >= 0]
  kp2 = [kp2[matchidx[i]] for i in range(len(matchidx)) if matchidx[i] >= 0]
  kp1a = array ([kp.pt for kp in kp1])
  kp2a = array ([kp.pt for kp in kp2])

  # Rectify images
  ff = cv2.findFundamentalMat (kp1a, kp2a)[0]
  rv, h1, h2 = cv2.stereoRectifyUncalibrated (kp1a.transpose().reshape ((-1)), kp2a.transpose().reshape((-1)), ff, (h, w))

  left_r = left.copy()
  right_r = cv2.warpPerspective (right, np.linalg.inv(np.mat(h1)) * np.mat(h2), (w,h))
  return left_r, right_r
Example #33
0
matches = sorted(matches, key=lambda x: x.distance)

pts1 = []
pts2 = []
for mat in matches:
    idx1 = mat.queryIdx
    idx2 = mat.trainIdx
    pts1.append(kp1[idx1].pt)
    pts2.append(kp2[idx2].pt)
pts1 = np.int32(pts1)
pts2 = np.int32(pts2)

#calc fundamental matrix
F, mask = cv2.findFundamentalMat(pts1, pts2, cv2.FM_LMEDS)
#ロバスト推定
ret, HL, HR = cv2.stereoRectifyUncalibrated(pts1, pts2, F, (width, height))
dstL = cv2.warpPerspective(imgL, HL, (width, height))
dstR = cv2.warpPerspective(imgR, HR, (width, height))

#stereoBM
stereo = cv2.StereoBM_create(16)
disp = stereo.compute(dstL, dstR)

orb = cv2.ORB_create()
kp1, des1 = orb.detectAndCompute(dstL, None)
kp2, des2 = orb.detectAndCompute(dstR, None)

bf = cv2.BFMatcher(cv2.NORM_HAMMING, True)
matches = bf.match(des1, des2)
matches = sorted(matches, key=lambda x: x.distance)
def match(detector_name, descriptor_name, matcher_name, image1_file, image2_file):
    
    print "\n###############################\n"
    print detector_name
#     Read images
    image1                     = cv2.imread(image1_file, cv2.CV_LOAD_IMAGE_GRAYSCALE)
    image2                     = cv2.imread(image2_file, cv2.CV_LOAD_IMAGE_GRAYSCALE)
    
    #      -- Step 1: Compute keypoints
    detector                   = cv2.FeatureDetector_create(detector_name)
    
    t1                         = time.time()
    
    keypoints1                 = detector.detect(image1)
    
    t2                         = time.time()
    print "Time to get keypoints for query image: ", str(t2 - t1)
    keypoints2                 = detector.detect(image2)
    
    #      -- Step 2: Compute descriptors
    descriptor                 = cv2.DescriptorExtractor_create(descriptor_name)
    
    t1                         = time.time()
    (keypoints1, descriptors1) = descriptor.compute(image1, keypoints1)
    t2                         = time.time()
    print "Time to get descriptors for query image: ", str(t2 - t1)
    (keypoints2, descriptors2) = descriptor.compute(image2, keypoints2)
    
#===============================================================================
# #     -- Step 3: Matching descriptor 
    t1 = time.time()
    matcher = cv2.DescriptorMatcher_create(matcher_name)
    matches = matcher.match(descriptors1, descriptors2)
#     
#     t2 = time.time()
#     
# #     print number of matches
#     print "time: ", t2-t1
#     print "detector: ", detector_name, " descriptor extractor: ", descriptor_name, " matcher: ", matcher_name
#     print '#matches:', len(matches)
#===============================================================================
    t3 = time.time()
    print "Time to match: ", str(t3  - t1)
#     -- Step 4: Draw matches on image

    print image1.shape
    h1, w1          = image1.shape[:2]
    print w1, h1
    h2, w2          = image2.shape[:2]
    view1           = np.zeros((max(h1, h2), w1 + w2), np.uint8)
    view1[:h1, :w1] = image1
    view1[:h2, w1:] = image2
    view1[:, :]     = view1[:, :]
    view1[:, :]     = view1[:, :]
    
    
    view            = copy.copy(view1)
    
    view2           = copy.copy(view)
    view3           = copy.copy(view)
    view4           = copy.copy(view)

    
#     Draw all matches between two images
    for m in matches:
        color = tuple([np.random.randint(0, 255) for _ in xrange(3)])
        Qpt   = (int(keypoints1[m.queryIdx].pt[0]), int(keypoints1[m.queryIdx].pt[1]))
        Tpt   = (int(keypoints2[m.trainIdx].pt[0])+w1, int(keypoints2[m.trainIdx].pt[1]))
        
        cv2.line(view1, Qpt, Tpt, color, 3)
        cv2.circle(view1, Qpt, 10,color, 3)
        cv2.circle(view1, Tpt, 10,color, 3)   
        
    cv2.imwrite(detector_name+"_"+descriptor_name+"_"+matcher_name+"_all_matches.jpg", view1)     

    dist     = [m.distance for m in matches]
    
    min_dist = min(dist)
    avg_dist = (sum(dist) / len(dist))
    print 'distance: min: %.3f' % min_dist
    print 'distance: mean: %.3f' % avg_dist
    print 'distance: max: %.3f' % max(dist)
    
    # threshold: half the mean
    # thres_dist = (sum(dist) / len(dist)) * 0.5
    
    # keep only the reasonable matches
    # good_matches = heapq.nsmallest(20, matches, key=lambda match: match.distance)
    good_matches = [m for m in matches if m.distance < avg_dist*0.8]
    # good_matches.sort(cmp=None, key=distance, reverse=False);
    # sorted(good_matches, key=lambda match: match.distance)
    
    print "Number of match is: "+str(len(matches))
    print "Number of good match is: "+str(len(good_matches))
    
    src_points  = []
    dest_points = []
    for m in good_matches:
        color = tuple([np.random.randint(0, 255) for _ in xrange(3)])
        Qpt   = (int(keypoints1[m.queryIdx].pt[0]), int(keypoints1[m.queryIdx].pt[1]))
        Tpt   = (int(keypoints2[m.trainIdx].pt[0]), int(keypoints2[m.trainIdx].pt[1]))

        src_points.append(keypoints1[m.queryIdx].pt)
        dest_points.append(keypoints2[m.trainIdx].pt)        

        cv2.line(view, Qpt, (Tpt[0]+w1, Tpt[1]), color, 3)
        cv2.circle(view, Qpt, 10,color, 3)
        cv2.circle(view, (Tpt[0]+w1, Tpt[1]), 10,color, 3)

    # Draw out good matches
    cv2.imwrite(detector_name+"_"+descriptor_name+"_"+matcher_name+"_good_matches.jpg", view)

    #Compute the homography with RANSAC
    t1            = time.time()
    H,mask        = cv2.findHomography(np.array(src_points, dtype='float32'), np.array(dest_points, dtype='float32'), cv.CV_RANSAC)
    t3            = time.time()
    print "Time to perform geoverification: ", str(t3  - t1)
    
    F, M  = cv2.findFundamentalMat(np.array(src_points, dtype='float32'), np.array(dest_points, dtype='float32'), cv.CV_FM_RANSAC, 3, 0.99)

    src_pts = []
    des_pts = []
    for i in range(len(src_points)):
        src_pts.append(src_points[i][0])
        src_pts.append(src_points[i][1])
        des_pts.append(dest_points[i][0])
        des_pts.append(dest_points[i][1])

    r, H1, H2 = cv2.stereoRectifyUncalibrated(np.array(src_pts, dtype='float32'), np.array(des_pts, dtype='float32'), F, image1.shape,threshold=5)
    # r, H1, H2 = cv2.stereoRectifyUncalibrated(np.array((1,2,2,3,3,4,4,5), dtype='float32'), np.array((1,2,2,3,3,4,4,5), dtype='float32'), F, (2,2))

    H             = np.array(H, dtype='float32')
    srcTri        = np.array([(0,0), (w1,0), (w1,h1), (0,h1)],dtype='float32')
    srcTri        = np.array([srcTri])
    
    height, width = view.shape[:2] 
    desTri        = cv2.perspectiveTransform(srcTri, H2)

    # //-- Draw lines between the corners (the mapped object in the scene - image_2 )
    cv2.line(view2, (int(desTri[0][0][0]) + w1, int(desTri[0][0][1])), (int(desTri[0][1][0]) + w1, int(desTri[0][1][1])), (255,255,255), 4)
    cv2.line(view2, (int(desTri[0][1][0]) + w1, int(desTri[0][1][1])), (int(desTri[0][2][0]) + w1, int(desTri[0][2][1])), (255,255,255), 4)
    cv2.line(view2, (int(desTri[0][2][0]) + w1, int(desTri[0][2][1])), (int(desTri[0][3][0]) + w1, int(desTri[0][3][1])), (255,255,255), 4)
    cv2.line(view2, (int(desTri[0][3][0]) + w1, int(desTri[0][3][1])), (int(desTri[0][0][0]) + w1, int(desTri[0][0][1])), (255,255,255), 4)

    cv2.imwrite(detector_name+"_"+descriptor_name+"_"+matcher_name+"_perspectiveTrans.jpg", view2)

    # Perform perspectiveTransform on all source points;
    dest_trans_points = cv2.perspectiveTransform(np.array([src_points], dtype='float32'), H2)
    final_src_points  = []
    final_des_points  = []
    #filter out miss matched points
    for i in range(len(src_points)):
        des_pt   = dest_points[i]
        trans_pt = dest_trans_points[0][i]
        if math.hypot(des_pt[0] - trans_pt[0], des_pt[1] - trans_pt[1]) < 100:
            final_src_points.append(src_points[i])
            final_des_points.append(((int(trans_pt[0])), int(trans_pt[1])))

            color = tuple([np.random.randint(0, 255) for _ in xrange(3)])

            cv2.line(view3, (int(src_points[i][0]), int(src_points[i][1])), ((int(trans_pt[0]))+w1, int(trans_pt[1])), color, 3)
            cv2.circle(view3, (int(src_points[i][0]), int(src_points[i][1])), 10,color, 3)
            cv2.circle(view3, ((int(trans_pt[0]))+w1, int(trans_pt[1])), 10,color, 3)

            #Draw original points on dest image
            cv2.line(view4, (int(src_points[i][0]), int(src_points[i][1])), ((int(des_pt[0]))+w1, int(des_pt[1])), color, 3)
            cv2.circle(view4, (int(src_points[i][0]), int(src_points[i][1])), 10,color, 3)
            cv2.circle(view4, ((int(des_pt[0]))+w1, int(des_pt[1])), 10,color, 3)
 
    print "Final number of matches %d"% (len(final_src_points))
    cv2.imwrite(detector_name+"_"+descriptor_name+"_"+matcher_name+"_FinalMatches.jpg", view3)
    cv2.imwrite(detector_name+"_"+descriptor_name+"_"+matcher_name+"_FinalMatches(orig).jpg", view4)

    print "\n###############################\n"
Example #35
0
                    distCoeffs1 = distcoeffs1,
                    cameraMatrix2 = cameramatrix2,
                    distCoeffs2 = distcoeffs2,
                    R = R, T = T, E = E, F = F,
                    flags = sum(allflags))
    rms.append(out[0])

# <codecell>

plt.plot((rms))
# plt.ylim(ymax = 4)
plt.show()

# <codecell>

cv2.stereoRectifyUncalibrated()

# <codecell>

R1, R2, P1, P2, Q, validPixROI1, validPixROI2 = cv2.stereoRectify(cameraMatrix1 = cameramatrix1, 
                  distCoeffs1 = distcoeffs1, 
                  cameraMatrix2 = cameramatrix2, 
                  distCoeffs2 = distcoeffs2, 
                  imageSize = (image_width, image_height), 
                  R = R, T = T,
                  flags = 0 * cv2.CALIB_ZERO_DISPARITY,
                  alpha = 1,
                  newImageSize = (1 * image_width, 1 * image_height)
                  )

# <codecell>
Example #36
0
def main():
    img1 = cv2.imread('./Dataset 2/im0.png')
    img2 = cv2.imread('./Dataset 2/im1.png')

    # # Scale the image
    # scale_percent = 10 # percent of original size
    # width = int(img1.shape[1] * scale_percent / 100)
    # height = int(img1.shape[0] * scale_percent / 100)
    # dim = (width, height)
    # img1 = cv2.resize(img1, dim, interpolation = cv2.INTER_AREA)
    # img2 = cv2.resize(img2, dim, interpolation = cv2.INTER_AREA)

    h1, w1, ch1 = img1.shape
    h2, w2, ch2 = img2.shape

    kp1, kp2, good = detect_feature(img1, img2)

    feature_1 = []
    feature_2 = []

    for i, match in enumerate(good):
        feature_1.append(kp1[match.queryIdx].pt)
        feature_2.append(kp2[match.trainIdx].pt)

    # Only for testing the result
    pts1 = np.int32(feature_1)
    pts2 = np.int32(feature_2)
    F, mask = cv2.findFundamentalMat(pts1, pts2, cv2.FM_RANSAC)

    Best_F_matrix, new_feature_1, new_feature_2 = estimate_fundamental_matrix(
        feature_1, feature_2)
    # print(Best_F_matrix)

    new_feature_1 = np.int32(new_feature_1)
    new_feature_2 = np.int32(new_feature_2)

    E_matrix = essential_matrix(Best_F_matrix, K1)
    R, T = extract_camera_pose(E_matrix, K1)
    # print(R)
    # print(T)
    H = []
    I = np.array([0, 0, 0, 1])
    for i, j in zip(R, T):
        h = np.hstack((i, j))
        h = np.vstack((h, I))
        H.append(h)
    # print('H:\n', H)
    _, H1, H2 = cv2.stereoRectifyUncalibrated(np.float32(new_feature_1),
                                              np.float32(new_feature_2),
                                              Best_F_matrix,
                                              imgSize=(w1, h1))
    # print("H1:\n", H1)
    # print("H2:\n",H2)

    # Store the output data in the text file
    file = open('./output/dataset2_output_data.txt', 'w')
    file.write('*' * 50 + '\n' +
               '(Acquired by the inbuilt function) Foundatmental matrix' +
               '\n')
    file.write(str(F) + '\n')
    file.write('*' * 50 + '\n' + 'Estimated foundatmental matrix' + '\n')
    file.write(str(Best_F_matrix) + '\n')
    file.write('*' * 50 + '\n' + 'Essential matrix' + '\n')
    file.write(str(E_matrix) + '\n')
    file.write('*' * 50 + '\n' + 'Rotation vector' + '\n')
    file.write(str(R) + '\n')
    file.write('*' * 50 + '\n' + 'Translation vector' + '\n')
    file.write(str(T) + '\n')
    file.write('*' * 50 + '\n' + 'Homography matrix (H1)' + '\n')
    file.write(str(H) + '\n')
    file.write('*' * 50 + '\n' + 'Homography matrix img1' + '\n')
    file.write(str(H1) + '\n')
    file.write('*' * 50 + '\n' + 'Homography matrix img2' + '\n')
    file.write(str(H2) + '\n')
    file.write('*' * 50 + '\n')
    file.close()

    img1_rectified = cv2.warpPerspective(img1, H1, (w1, h1))
    img2_rectified = cv2.warpPerspective(img2, H2, (w2, h2))

    img1_res, img2_res = rectification(img1_rectified, img2_rectified)

    res = np.concatenate((img1_res, img2_res), axis=1)
    cv2.imshow("Rectification", res)

    img1_rectified = cv2.warpPerspective(img1, H1, (w1, h1))
    img2_rectified = cv2.warpPerspective(img2, H2, (w2, h2))

    disparity_map = get_disparity_map(img1_rectified, img2_rectified)

    disparity_map_gray = None
    disparity_map_gray = cv2.normalize(disparity_map,
                                       disparity_map_gray,
                                       alpha=0,
                                       beta=255,
                                       norm_type=cv2.NORM_MINMAX,
                                       dtype=cv2.CV_8U)
    cv2.imshow('disparity_gray', disparity_map_gray)

    depth_map = get_depth_map(disparity_map_gray)

    disparity_map_heat = None
    disparity_map_heat = cv2.normalize(disparity_map,
                                       disparity_map_heat,
                                       alpha=vmin,
                                       beta=vmax,
                                       norm_type=cv2.NORM_MINMAX,
                                       dtype=cv2.CV_8U)
    disparity_map_heat = cv2.applyColorMap(disparity_map_heat,
                                           cv2.COLORMAP_JET)
    cv2.imshow("disparity_heat", disparity_map_heat)

    depth_map_gray = None
    depth_map_gray = cv2.normalize(depth_map,
                                   depth_map_gray,
                                   alpha=0,
                                   beta=255,
                                   norm_type=cv2.NORM_MINMAX,
                                   dtype=cv2.CV_8U)
    cv2.imshow('depth_gray', depth_map_gray)

    depth_map_heat = cv2.applyColorMap(depth_map_gray, cv2.COLORMAP_JET)
    cv2.imshow("depth_heat", depth_map_heat)

    cv2.imwrite('./output/data_2_Rectification.jpg', res)
    cv2.imwrite('./output/data_2_disparity_gray.jpg', disparity_map_gray)
    cv2.imwrite('./output/data_2_disparity_heat.jpg', disparity_map_heat)
    cv2.imwrite('./output/data_2_depth_gray.jpg', depth_map)
    cv2.imwrite('./output/data_2_depth_heat.jpg', depth_map_heat)

    cv2.waitKey(0)
    cv2.destroyAllWindows()
Example #37
0
    return img1color, img2color

lines1 = cv.computeCorrespondEpilines(
    pts2.reshape(-1, 1, 2), 2, fundamental_matrix)
lines1 = lines1.reshape(-1, 3)
img5, img6 = drawlines(img1, img2, lines1, pts1, pts2)

lines2 = cv.computeCorrespondEpilines(
    pts1.reshape(-1, 1, 2), 1, fundamental_matrix)
lines2 = lines2.reshape(-1, 3)
img3, img4 = drawlines(img2, img1, lines2, pts2, pts1)

h1, w1 = img1.shape
h2, w2 = img2.shape
_, H1, H2 = cv.stereoRectifyUncalibrated(
    np.float32(pts1), np.float32(pts2), fundamental_matrix, imgSize=(w1, h1)
)

img1_rectified = cv.warpPerspective(img1, H1, (w1, h1))
img2_rectified = cv.warpPerspective(img2, H2, (w2, h2))

block_size = 15
min_disp = -96
max_disp = 112

num_disp = max_disp - min_disp

uniquenessRatio = 5

speckleWindowSize = 0
#  space, which explains the suffix "uncalibrated". Another related difference
#  from cv.stereoRectify is that the function outputs not the rectification
#  transformations in the object (3D) space, but the planar perspective
#  transformations encoded by the homography matrices `H1` and `H2`. The
#  function implements the algorithm [Hartley99].
#
#  ### Note
#  While the algorithm does not need to know the intrinsic parameters of the
#  cameras, it heavily depends on the epipolar geometry. Therefore, if the
#  camera lenses have a significant distortion, it would be better to correct
#  it before computing the fundamental matrix and calling this function. For
#  example, distortion coefficients can be estimated for each head of stereo
#  camera separately by using cv.calibrateCamera. Then, the images can be
#  corrected using cv.undistort, or just the point coordinates can be corrected
#  with cv.undistortPoints.
ret, Hl, Hr = cv.stereoRectifyUncalibrated(new_left[:, :, 0:2], new_right[:, :, 0:2], F, (C, R))

Hl/=Hl[2,2]
Hr/=Hr[2,2]
Hl[0,2]+=250

# Printing the Homographies
print('Left Homography: \n', Hl)
print('The Right Homography: \n',Hr)

# Rectifying the Images:
# WARPPERSPECTIVE  Applies a perspective transformation to an image
#
#      dst = cv.warpPerspective(src, M)
#      dst = cv.warpPerspective(src, M, 'OptionName',optionValue, ...)
#
def main():
    Parser = argparse.ArgumentParser()
    Parser.add_argument('--dataset',
                        default='1',
                        help='Dataset type , Default: 1')

    Args = Parser.parse_args()
    dataset = int(Args.dataset)

    if dataset == 1:
        SavePath = './Output/Dataset1/'
        d_thresh = 100000
    if dataset == 2:
        SavePath = './Output/Dataset2/'
        d_thresh = 90000
    if dataset == 3:
        SavePath = './Output/Dataset3/'
        d_thresh = 200000

    images, K1, K2, params = readData(dataset, BasePath="../Data/Project 3/")
    foldercheck(SavePath)
    im1, im2 = images
    im1, im2 = rgb(rescale(im1, 30)), rgb(rescale(im2, 30))
    h1, w1 = im1.shape[:2]
    h2, w2 = im2.shape[:2]
    pts1, pts2, im_matches = SIFTpoints(im1, im2)
    print('SIFT points Detected : ', len(pts1))

    data = (pts1, pts2)
    F, inlier_mask = FundamentalMatrix(data,
                                       s=8,
                                       thresh=0.001,
                                       n_iterations=2000)
    pts1_ = pts1[inlier_mask == 1]
    pts2_ = pts2[inlier_mask == 1]

    E = EssentialMatrix(K1, K2, F)
    print('Inliers SIFT points : ', len(pts1_))

    R, C, x3D = recoverPose(E, pts1_, pts2_, K1, K2)

    l1 = cv2.computeCorrespondEpilines(pts2_.reshape(-1, 1, 2), 2, F)
    im2_epilines, _ = drawEpilines(im1, im2, l1[:10], pts1_, pts2_)
    l2 = cv2.computeCorrespondEpilines(pts1_.reshape(-1, 1, 2), 1, F)
    im1_epilines, _ = drawEpilines(im2, im1, l2[:10], pts2_, pts1_)
    out = np.hstack((im1_epilines, im2_epilines))
    cv2.imwrite(SavePath + 'epilinesImage1.png', out)

    ret, H1, H2 = cv2.stereoRectifyUncalibrated(np.float32(pts1_),
                                                np.float32(pts2_),
                                                F,
                                                imgSize=(w1, h1))
    im1_rectified = cv2.warpPerspective(im1, H1, (w1, h1))
    im2_rectified = cv2.warpPerspective(im2, H2, (w2, h2))
    out = np.hstack((im1_rectified, im2_rectified))
    cv2.imwrite(SavePath + 'rectifiedImage.png', out)

    dst1 = cv2.perspectiveTransform(pts1_.reshape(-1, 1, 2), H1).squeeze()
    dst2 = cv2.perspectiveTransform(pts2_.reshape(-1, 1, 2), H2).squeeze()
    lines1_ = epiLines(pts2_, 2, F, w2)
    warpedlines1 = warpEpilines(lines1_, H1)
    lines2_ = epiLines(pts1_, 1, F, w2)
    warpedlines2 = warpEpilines(lines2_, H2)
    im1_print = drawLines(im1_rectified, warpedlines1[:10], dst1[:10])
    im2_print = drawLines(im2_rectified, warpedlines2[:10], dst2[:10])
    out = np.hstack((im1_print, im2_print))
    cv2.imwrite(SavePath + 'epilines_rectifiedImage.png', out)

    imL, imR = gray(im1_rectified), gray(im2_rectified)
    disparityMap = DisparityMap(imL,
                                imR,
                                warpedlines1,
                                warpedlines2,
                                win_size=10,
                                searchRange=100)
    np.save(SavePath + 'disparityMap.npy', disparityMap)
    #     disparity_map_print = cv2.normalize(disparityMap, None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F).astype(np.uint8)
    plt.figure(figsize=(10, 10))
    plt.imshow(disparityMap, cmap=plt.cm.RdBu, interpolation='bilinear')
    plt.savefig(SavePath + ' disparityMap.png')
    plt.imshow(disparityMap, cmap='gray', interpolation='bilinear')
    plt.savefig(SavePath + ' disparityMap_gray.png')

    baseline = params[1]
    f = K1[0, 0]
    depthMap = (baseline * f) / (disparityMap + 1e-15)
    depthMap[depthMap > d_thresh] = d_thresh
    depthMap = np.uint8(depthMap * 255 / np.max(depthMap))
    plt.figure(figsize=(10, 10))
    plt.imshow(depthMap, cmap=plt.cm.RdBu, interpolation='bilinear')
    plt.savefig(SavePath + ' depthMap.png')
    plt.imshow(depthMap, cmap='gray', interpolation='bilinear')
    plt.savefig(SavePath + ' depthMap_gray.png')
Example #40
0
def main():
    # Toda a parte da implementação do SIFT eu peguei do usuário Bilou563 nos fóruns da OpenCV
    # https://answers.opencv.org/question/90742/opencv-depth-map-from-uncalibrated-stereo-system/
    # Acesso em 06/05/2019

    print("Carregando as imagens")

    img1 = cv2.imread('data/FurukawaPonce/MorpheusL.jpg',
                      cv2.CV_8UC1)  #queryimage # left image
    img2 = cv2.imread('data/FurukawaPonce/MorpheusR.jpg',
                      cv2.CV_8UC1)  #trainimage # right image

    # Ajustando o tamanho de img1 para que as imagens tenham o mesmo tamanho (necessário)
    img1 = img1[:, 0:
                1300]  # Crop de uma parte que não tem nada (eu vi antes na imagem)
    # E colocando no mesmo aspect ratio de img2
    img1 = cv2.resize(img1, img2.shape)  # Colocando no mesmo tamanho de img2

    print("Encontrando pontos correspondentes com SIFT")
    #Obtainment of the correspondent point with SIFT
    sift = cv2.xfeatures2d.SIFT_create()

    ###find the keypoints and descriptors with SIFT
    kp1, des1 = sift.detectAndCompute(img1, None)
    kp2, des2 = sift.detectAndCompute(img2, None)

    ###FLANN parameters
    FLANN_INDEX_KDTREE = 0
    index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
    search_params = dict(checks=50)

    flann = cv2.FlannBasedMatcher(index_params, search_params)
    matches = flann.knnMatch(des1, des2, k=2)

    good = []
    pts1 = []
    pts2 = []

    ###ratio test as per Lowe's paper
    for i, (m, n) in enumerate(matches):
        if m.distance < 0.8 * n.distance:
            good.append(m)
            pts2.append(kp2[m.trainIdx].pt)
            pts1.append(kp1[m.queryIdx].pt)

    pts1 = np.array(pts1)
    pts2 = np.array(pts2)

    print(
        "Calculando a matriz fundamental com base nos pontos correspondentes")
    #Computation of the fundamental matrix
    F, mask = cv2.findFundamentalMat(pts1, pts2, cv2.FM_LMEDS)

    # Obtainment of the rectification matrix and use of the warpPerspective to transform them...
    pts1 = pts1[:, :][mask.ravel() == 1]
    pts2 = pts2[:, :][mask.ravel() == 1]

    pts1 = np.int32(pts1)
    pts2 = np.int32(pts2)

    p1fNew = pts1.reshape((pts1.shape[0] * 2, 1))
    p2fNew = pts2.reshape((pts2.shape[0] * 2, 1))

    print("Retificando as imagens com base na matriz fundamental")
    retBool, rectmat1, rectmat2 = cv2.stereoRectifyUncalibrated(
        p1fNew, p2fNew, F, img1.shape)

    dst11 = cv2.warpPerspective(img1, rectmat1, img1.shape,
                                cv2.BORDER_ISOLATED)
    dst22 = cv2.warpPerspective(img2, rectmat2, img2.shape,
                                cv2.BORDER_ISOLATED)

    print("Gravando as imagens retificadas")
    # Gravando as imagens retificadas
    cv2.imwrite('data/output/rectifiedMorpheusL.png', dst11)
    cv2.imwrite('data/output/rectifiedMorpheusR.png', dst22)

    print("Calculando a disparidade")
    disp = stereo.compute(dst11.astype(np.uint8), dst22.astype(
        np.uint8)).astype(np.float32) / 16

    plt_show(disp, "data/output/dispMorpheus.png")

    # Histograma que usei para achar os valores 60 e -60 usados
    # para criar disp_filtered logo abaixo
    # É uma tentativa de remover os pontos que ele calculou a
    # disparidade errado/não encontrou match
    # plt.hist(disp.ravel(), 40)
    # plt.title("Histogram with 'auto' bins")
    # hist = plt.gcf()
    # hist.savefig("histMorpheus.png")
    # plt.show()

    disp_filtered = np.where(disp > 60, np.amin(disp), disp)
    disp_filtered = np.where(disp_filtered < -60, -60, disp)

    plt_show(disp_filtered, "data/output/dispMorpheusFiltered.png")

    #########################################
    ######## Cálculo da profundidade ########
    #########################################

    print("Carregando os parâmetros de calibração")
    # Carregando os parâmetros de calibração
    # MorpheusL
    imL_calib = open('data/FurukawaPonce/MorpheusL.txt', 'r')
    texto = imL_calib.read()

    start_f1 = texto.find('fc = ')
    end_f1 = texto.find(']', start_f1)
    f1 = np.fromstring(texto[start_f1 + 6:end_f1].replace(';', ' '),
                       dtype=float,
                       sep=' ')

    start_c1 = texto.find('cc = ')
    end_c1 = texto.find(']', start_c1)
    c1 = np.fromstring(texto[start_c1 + 6:end_c1].replace(';', ' '),
                       dtype=float,
                       sep=' ')

    start_alpha = texto.find('alpha_c = ')
    end_alpha = texto.find(';', start_alpha)
    alpha = float(texto[start_alpha + 10:end_alpha])

    start_R = texto.find('R = ')
    end_R = texto.find(']', start_R)
    R1 = np.fromstring(texto[start_R + 5:end_R].replace(',',
                                                        ' ').replace(';', ' '),
                       dtype=float,
                       sep=' ')
    R1.shape = (3, 3)

    start_Tc = texto.find('Tc = ')
    end_Tc = texto.find(']', start_Tc)
    Tc1 = np.fromstring(texto[start_Tc + 6:end_Tc].replace(';', ' '),
                        dtype=float,
                        sep=' ')
    Tc1.shape = (3, 1)

    matrix1 = np.array([[f1[0], alpha, c1[0]], [0, f1[1], c1[1]], [0, 0, 1]],
                       dtype=float)

    # Compensando a matriz de intrínsecos pelo resize da MorpheusL de acordo com:
    # https://dsp.stackexchange.com/a/6098
    # Acesso em 06/05/2019
    scaling_compensation = np.array([[12 / 13, 0, 1 / 26],
                                     [0, 12 / 13, 1 / 26], [0, 0, 1]])
    matrix1 = scaling_compensation @ matrix1

    # MorpheusR
    imR_calib = open('data/FurukawaPonce/MorpheusR.txt', 'r')
    texto = imR_calib.read()

    start_f2 = texto.find('fc = ')
    end_f2 = texto.find(']', start_f2)
    f2 = np.fromstring(texto[start_f2 + 6:end_f2].replace(';', ' '),
                       dtype=float,
                       sep=' ')

    start_c2 = texto.find('cc = ')
    end_c2 = texto.find(']', start_c2)
    c2 = np.fromstring(texto[start_c2 + 6:end_c2].replace(';', ' '),
                       dtype=float,
                       sep=' ')

    start_alpha = texto.find('alpha_c = ')
    end_alpha = texto.find(';', start_alpha)
    alpha = float(texto[start_alpha + 10:end_alpha])

    start_R = texto.find('R = ')
    end_R = texto.find(']', start_R)
    R2 = np.fromstring(texto[start_R + 5:end_R].replace(',',
                                                        ' ').replace(';', ' '),
                       dtype=float,
                       sep=' ')
    R2.shape = (3, 3)

    start_Tc = texto.find(
        'Tc_8 = ')  # Por alguma razão está como 'Tc_8' nessa imagem
    end_Tc = texto.find(']', start_Tc)
    Tc2 = np.fromstring(texto[start_Tc + 8:end_Tc].replace(';', ' '),
                        dtype=float,
                        sep=' ')
    Tc2.shape = (3, 1)

    matrix2 = np.array([[f2[0], alpha, c1[0]], [0, f2[1], c2[1]], [0, 0, 1]],
                       dtype=float)

    # Ajustando parâmetros
    rvec1, _ = cv2.Rodrigues(R1)
    rvec2, _ = cv2.Rodrigues(R2)

    Tc1.shape = (1, 3)
    Tc2.shape = (1, 3)

    # Calculando o deslocamento e rotação relativos entre as câmeras
    rvec3, tvec3, _, _, _, _, _, _, _, _, = cv2.composeRT(
        rvec1.ravel(), Tc1.ravel(), rvec2.ravel(), Tc2.ravel())

    print("Calculando profundidade")
    focal_length = np.mean([np.mean(f1), np.mean(f2)])
    doffs = c2[0] - c1[0]  # Diferença no eixo x entre os pontos principais
    baseline = np.linalg.norm(tvec3)  # Distância entre as câmeras
    disp2depth_factor = baseline * focal_length

    # Disparidade em mm(?)
    # Tem algum fator errado aqui porque está muito grande
    depth = np.zeros(disp_filtered.shape, dtype=np.uint8)
    depth = disp2depth_factor / (disp_filtered + doffs)
    # plt_show(depth, "depthMorpheus_mm.png")

    furthest = disp2depth_factor / (
        np.amin(disp_filtered[disp_filtered > -60]) + doffs)

    depth = disp2depth_factor / (disp_filtered + doffs)
    # Normalizando a profundidade de acordo com o roteiro
    depth[disp_filtered > -60] = np.floor(
        (disp2depth_factor /
         (disp_filtered[disp_filtered > -60] + doffs)) * 254 / furthest)
    depth[disp_filtered <= -60] = 255

    plt_show(depth, "data/output/depthMorpheus.png")
Example #41
0
img5,img6 = drawlines(img1,img2,lines1,pts1,pts2)

     # Find epilines corresponding to points in left image (first image) and
     # drawing its lines on right image
lines2 = cv2.computeCorrespondEpilines(pts1.reshape(-1,1,2), 1,F)
lines2 = lines2.reshape(-1,3)
img3,img4 = drawlines(img2,img1,lines2,pts2,pts1)

   

pp1 = pts1.reshape((pts1.shape[0] * 2, 1))
pp2 = pts2.reshape((pts2.shape[0] * 2, 1))

size =  img1.shape[1], img1.shape[0]

suc, h1, h2  = cv2.stereoRectifyUncalibrated(pp1, pp2, F, size)

R1, R2, P1, P2, Q, validPixROI1, validPixROI2 = cv2.stereoRectify(M, dis, M, dis, size, R, T) 
print validPixROI1, validPixROI2

cv2.imwrite("img2p.png", img3)
cv2.imwrite("img1p.png", img5) 


img5Un = cv2.warpPerspective(img5, h1, size)
img3Un = cv2.warpPerspective(img3, h2, size)
cv2.imwrite("img2r Un.png", img3Un)
cv2.imwrite("img1r Un.png", img5Un) 

img1 = cv2.imread('l.jpg',-1)
img2 = cv2.imread('r.jpg',-1)
Example #42
0
    lines2 = lines2.reshape(-1, 3)
    img3, img4 = hc.drawlines(img2, img1, lines2, pts2, pts1)

    fig = plt.figure(3)
    axarr = fig.subplots(1, 2)
    axarr[0].imshow(img5, cmap='gray')
    axarr[1].imshow(img3, cmap='gray')
    plt.show()

    # rectification
    pts1 = np.int32(pts1)
    pts2 = np.int32(pts2)
    p1fNew = pts1.reshape((pts1.shape[0] * 2, 1))
    p2fNew = pts2.reshape((pts2.shape[0] * 2, 1))

    retBool, rectmat1, rectmat2 = cv2.stereoRectifyUncalibrated(
        p1fNew, p2fNew, F, img1.shape[::-1])
    dst11 = cv2.warpPerspective(img1, rectmat1, img1.shape[::-1])
    dst22 = cv2.warpPerspective(img2, rectmat2, img1.shape[::-1])

    fig = plt.figure(4)
    axarr = fig.subplots(1, 2)
    axarr[0].imshow(dst11, cmap='gray')
    axarr[1].imshow(dst22, cmap='gray')
    plt.show()

    #calculation of the disparity
    stereoMatcher = cv2.StereoBM_create()
    stereoMatcher.setMinDisparity(0)
    stereoMatcher.setNumDisparities(128)
    stereoMatcher.setBlockSize(5)
Example #43
0
    def match_stereo(self, img1, img2, img1_view, img2_view, thrs = 500, ratio_thrs = 0.5):
        gray1 = cv2.cvtColor(img1, cv.CV_BGR2GRAY)
        gray2 = cv2.cvtColor(img2, cv.CV_BGR2GRAY)

        surf = cv2.SURF(thrs, 1, 4)
        kp1, d1 = surf.detect(gray1, None, False)
        kp2, d2 = surf.detect(gray2, None, False)
        d1.shape = (-1, surf.descriptorSize())
        d2.shape = (-1, surf.descriptorSize())
        print len(kp1), len(kp2)
        m = match(d1, d2, ratio_thrs)
        #p1 = [p.pt for p in kp1]
        #p2 = [p.pt for p in kp2]
        #m = local_match(p1, p2, d1, d2, max_dist = 5.0 / pixel_extent, min_neigh = 5, r_threshold = ratio_thrs)
        
        pairs = np.float32( [(kp1[i].pt, kp2[j].pt) for i, j in m] )
        mp1, mp2 = pairs[:,0].copy(), pairs[:,1].copy()

        '''
        for (x1, y1), (x2, y2) in np.int32(zip(mp1, mp1)):
            cv.circle(img1, (x1, y1), 2, (0, 255, 0))
            cv.circle(img2, (x2, y2), 2, (0, 255, 0))
            cv.line(img1, (x1, y1), (x2, y2), (0, 255, 0))
            cv.line(img2, (x1, y1), (x2, y2), (0, 255, 0))
        self.img_flip[]
        '''
        
        F, status = cv2.findFundamentalMat(mp1, mp2, cv2.FM_RANSAC, 5.0)
        status = status.ravel() != 0
        print '%d / %d' % (sum(status), len(status))
        mp1, mp2 = mp1[status], mp2[status]

        
        rectified_size = (800, 800)
        retval, H1, H2 = cv2.stereoRectifyUncalibrated(mp1.reshape(1, -1, 2), mp2.reshape(1, -1, 2), F, rectified_size)
        gH1 = np.dot(H1, img1_view)
        gH2 = np.dot(H2, img2_view)

        mp1 = cv2.perspectiveTransform(mp1.reshape(1, -1, 2), H1)
        mp2 = cv2.perspectiveTransform(mp2.reshape(1, -1, 2), H2)
        d = mp1[0,:,0]-mp2[0,:,0]

        def draw_vis(img, H, size):
            return cv2.warpPerspective(img, H, size)
        vis1 = draw_vis(self.frames[0].lods[0], gH1, rectified_size)
        vis2 = draw_vis(self.frames[1].lods[0], gH2, rectified_size)

        anaglyph = vis2.copy()
        anaglyph[..., 2] = vis1[..., 2]

        #e1 = cv2.canny(cv2.cvtColor(vis1, cv.CV_BGR2GRAY), 100, 200)
        #e2 = cv2.canny(cv2.cvtColor(vis2, cv.CV_BGR2GRAY), 100, 200)
        #edge_anaglyph = np.dstack([e2, e2, e1])
        #anaglyph = np.maximum(anaglyph, edge_anaglyph)
        
        print 'stereo matching...'
        disp = stereo.calc_disparity(vis1, vis2, d.min(), d.max())
        
        fnbase = '%02d_' % self.shot_idx

        print 'saving ply...'
        verts = np.zeros(disp.shape + (3,), np.float32)
        verts[...,1], verts[...,0] = np.ogrid[ :rectified_size[1], :rectified_size[0] ]
        verts[...,2] = disp*4
        verts *= 0.1
        write_ply_bin(fnbase+'cloud.ply', verts, cv2.cvtColor(vis1, cv.CV_BGR2RGB))
        
        
        vis_disp = disp.copy()
        vis_disp -= d.min()
        vis_disp /= np.percentile(vis_disp, 99)
        vis_disp = np.uint8(np.clip(vis_disp, 0, 1)*255)
        
        cv2.imshow('disp', vis_disp)
        cv2.imshow('anaglyph', anaglyph)

        cv2.imwrite(fnbase+'l.png', vis1)
        cv2.imwrite(fnbase+'r.png', vis2)
        #cv2.imwrite(fnbase+'anaglyph.bmp', anaglyph)
        #cv2.imwrite(fnbase+'disp.bmp', vis_disp)
        #cv2.imwrite(fnbase+'small.bmp', self.cur_preview)
        
        self.shot_idx += 1