Exemple #1
1
def calculateExtrinsic(img1, img2, cameraIntrinsic):
    """
    Takes 2 images, plus intrinsic parameters and returns extrinsic parameters between the two photos
    Assumes same camera
    """
    sift = cv2.SIFT()
    
    # find the keypoints and descriptors with SIFT
    kp1, des1 = sift.detectAndCompute(img1,None)
    kp2, des2 = sift.detectAndCompute(img2,None)
 
    # FLANN parameters
    FLANN_INDEX_KDTREE = 0
    index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
    search_params = dict(checks=50)   # or pass empty dictionary
 
    flann = cv2.FlannBasedMatcher(index_params,search_params)
 
    matches = flann.knnMatch(des1,des2,k=2)
 
    good = []
    pts1 = []
    pts2 = []
 
    # ratio test as per Lowe's paper
    for i,(m,n) in enumerate(matches):
        if m.distance < 0.7*n.distance:
            good.append(m)
            pts2.append(kp2[m.trainIdx].pt)
            pts1.append(kp1[m.queryIdx].pt)

    pts2 = np.float32(pts2)
    pts1 = np.float32(pts1)       
    F, mask = cv2.findFundamentalMat(pts1,pts2,cv2.FM_LMEDS)
    return F
Exemple #2
0
def epipolar_rectify(imL,imR,show_matches=True):
    descriptor_extractor = ORB(n_keypoints=2000)
    
    descriptor_extractor.detect_and_extract(imL)
    keypoints1 = descriptor_extractor.keypoints
    descriptors1 = descriptor_extractor.descriptors    
    
    descriptor_extractor.detect_and_extract(imR)
    keypoints2 = descriptor_extractor.keypoints
    descriptors2 = descriptor_extractor.descriptors        
    
    matches12 = match_descriptors(descriptors1, descriptors2,metric='hamming', cross_check=True)
    
    pts1=keypoints1[matches12[:,0],:]
    pts2=keypoints2[matches12[:,1],:]    
    
    
    F, mask = cv2.findFundamentalMat(pts1,pts2,cv2.FM_RANSAC)
    pts1 = pts1[mask.ravel()==1]
    pts2 = pts2[mask.ravel()==1]
    
    res,H1,H2=cv2.stereoRectifyUncalibrated(pts1,pts2,F,imL.shape,10)
    
    if show_matches:
        fig, ax = plt.subplots(nrows=1, ncols=1)
        plot_matches(ax, imL, imR, keypoints1, keypoints2, matches12)    
    
    return H1,H2
def findMatchesKnn(image1, image2, filter=True, ratio=True):
    '''
    Takes images
    Returns unsorted matches between keypoints in both images using the kNN match
    '''
    bf = cv2.BFMatcher()
    matches = bf.knnMatch(image1.descs, image2.descs, k=2)
    points1 = []
    points2 = []
    new_matches = []
    for m, n in matches:
        if m.distance < .75 * n.distance and ratio:
            new_matches.append(m)
            points1.append(image1.kps[m.queryIdx].pt)
            points2.append(image2.kps[m.trainIdx].pt)
        elif not ratio:
            points1.append(image1.kps[m.queryIdx].pt)
            points2.append(image2.kps[m.trainIdx].pt)
            new_matches.append(m)

    if filter:
        F, mask = cv2.findFundamentalMat(np.array(points1), np.array(points2), method=cv2.FM_RANSAC)

        new_points1, new_points2, newer_matches = [], [], []
        for i in range(len(new_matches)):
            if mask[i] == 1:
                new_points1.append(image1.kps[new_matches[i].queryIdx].pt)
                new_points2.append(image2.kps[new_matches[i].trainIdx].pt)
                newer_matches.append(new_matches[i])

        return new_points1, new_points2, newer_matches
    else:
        return points1, points2, new_matches
Exemple #4
0
def stereoMatching(img1, img2):

    sift = cv2.SIFT()

    k1, d1 = sift.detectAndCompute(img1, None)
    k2, d2 = sift.detectAndCompute(img2, None)

    # matches
    bf = cv2.BFMatcher()
    matches = bf.knnMatch(d1, d2, k=2)

    good = []
    pts1 = []
    pts2 = []

    for m, n in matches:
        if m.distance < 0.75 * n.distance:
            good.append(m)
            pts2.append(k2[m.trainIdx].pt)
            pts1.append(k1[m.queryIdx].pt)

    good = sorted(good, key=lambda x: x.distance)

    pts1 = np.float32(pts1)
    pts2 = np.float32(pts2)

    # Testing a hacky outlier detection with findFundamentalMat.
    F, mask = cv2.findFundamentalMat(pts1, pts2, cv2.RANSAC, 2)
    print len(pts1)

    # We select only inlier points
    pts1 = pts1[mask.ravel() == 1]
    pts2 = pts2[mask.ravel() == 1]
    print len(pts1)
    return pts1, pts2
Exemple #5
0
def get_fundamental_matrix(left_image, right_image):
    img1 = cv2.imread(left_image, 0)
    img2 = cv2.imread(right_image, 0)

    kp1, des1 = SIFT.detectAndCompute(img1,None)
    kp2, des2 = SIFT.detectAndCompute(img2,None)

    # FLANN parameters
    FLANN_INDEX_KDTREE = 0
    index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
    search_params = dict(checks=50)

    flann = cv2.FlannBasedMatcher(index_params,search_params)
    matches = flann.knnMatch(des1,des2,k=2)

    good = []
    pts1 = []
    pts2 = []

    # ratio test as per Lowe's paper
    for i,(m,n) in enumerate(matches):
        if m.distance < 0.8*n.distance:
            good.append(m)
            pts2.append(kp2[m.trainIdx].pt)
            pts1.append(kp1[m.queryIdx].pt)

    pts1 = np.array(pts1, dtype='float32')
    pts2 = np.array(pts2, dtype='float32')

    F, mask = cv2.findFundamentalMat(pts1,pts2,cv2.FM_LMEDS)
    return F
def get_H_list(Apts, Bpts, Nsamples=24):
    Npts = len(Apts[0])
    if Npts < Nsamples:
        Nsamples = Npts
    r = random.sample(range(Npts), Nsamples)
    E_mat, gdm = cv2.findFundamentalMat(Apts[:2,r].T, Bpts[:2,r].T)#, cv2.FM_8POINT)#, cv2.RANSAC,1,0.99)
    return H_from_E( E_mat )
 def set_pair_by_match_points(self,pair_a_b,points_a,points_b):
     print "!",
     f, mask=cv2.findFundamentalMat(points_a, points_b )
     positive_count=sum(mask)
     ij,f=normalize(pair_a_b,f)
     self.F[ij]=f
     self.N[ij]=positive_count
Exemple #8
0
def generate_moving_mask_v3(frame_lst,
                            flows_lst,
                            var_thres=1,
                            area_min=100,
                            area_max=np.inf):
    h, w = flows_lst[0].shape[:2]
    points1 = np.mgrid[:h, :w].reshape(2, -1).astype(np.float32).T[:, (1, 0)]
    bgdModel = np.empty((1, 65), dtype=np.float64)
    fgdModel = np.empty((1, 65), dtype=np.float64)
    mask = np.empty((h, w), dtype=np.uint8)
    Fs = []
    masks = []
    for frame, flow in zip(frame_lst, flows_lst):
        points2 = points1 + flow.reshape(-1, 2)
        F, bg = cv2.findFundamentalMat(points1, points2, cv2.FM_RANSAC,
                                       var_thres)
        bg = bg.reshape(h, w).astype(np.bool)
        fg = ~bg
        if np.sum(fg) > 10:
            mask[fg] = cv2.GC_PR_FGD
            mask[bg] = cv2.GC_PR_BGD
            bgdModel[...] = 0
            fgdModel[...] = 0
            mask, _, _ = cv2.grabCut(frame, mask, None, bgdModel, fgdModel, 1,
                                     cv2.GC_INIT_WITH_MASK)
            components = cv2.connectedComponents((mask & 1).astype(np.uint8),
                                                 ltype=cv2.CV_16U)
            masks.append(filter_components(components, area_min, area_max))
        else:
            masks.append((0, np.zeros_like(fg, dtype=np.uint16)))
        Fs.append(F)
    return Fs, masks
Exemple #9
0
def rectify_pair(image_left, image_right, viz=False):
    """Computes the pair's fundamental matrix and rectifying homographies.

    Arguments:
      image_left, image_right: 3-channel images making up a stereo pair.

    Returns:
      F: the fundamental matrix relating epipolar geometry between the pair.
      H_left, H_right: homographies that warp the left and right image so
        their epipolar lines are corresponding rows.
    """

    image_a_points, image_b_points = find_feature_points(image_left,
                                                         image_right)

    f_mat, mask = cv2.findFundamentalMat(image_a_points,
                                         image_b_points,
                                         cv2.RANSAC)
    imsize = (image_right.shape[1], image_right.shape[0])
    image_a_points = image_a_points[mask.ravel() == 1]
    image_b_points = image_b_points[mask.ravel() == 1]

    _, H1, H2 = cv2.stereoRectifyUncalibrated(image_a_points,
                                              image_b_points,
                                              f_mat, imsize)

    return f_mat, H1, H2
Exemple #10
0
    def FfromPoints(cls,
                    P1,
                    P2,
                    method,
                    ransacThresh,
                    confidence,
                    maxiters):
        """
        Compute fundamental matrix from two sets of corresponding image points
        see https://docs.opencv.org/master/d9/d0c/
        group__calib3d.html#gae850fad056e407befb9e2db04dd9e509
        """
        # TODO check valid input
        # need at least 7 pairs of points
        # TODO sort options in a user-friendly manner
        fopt = {'7p': cv.FM_7POINT,
                '8p': cv.FM_8POINT,
                'ransac': cv.FM_RANSAC,
                'lmeds': cv.FM_LMEDS}

        F, mask = cv.findFundamentalMat(P1, P2,
                                        method=fopt[method],
                                        ransacReprojThreshold=ransacThresh,
                                        confidence=confidence,
                                        maxIters=maxiters)
        # print('Fund mat = ', F)

        return F
def filterFeatures(p1, p2, K, method):
    inliers = 0
    total = len(p1)
    space = ""
    status = []
    M = None
    if len(p1) < 7:
        # not enough points
        return None, np.zeros(total), [], []
    if method == 'homography':
        M, status = cv2.findHomography(p1, p2, cv2.LMEDS, tol)
    elif method == 'fundamental':
        M, status = cv2.findFundamentalMat(p1, p2, cv2.LMEDS, tol)
    elif method == 'essential':
        M, status = cv2.findEssentialMat(p1, p2, K, cv2.LMEDS, threshold=tol)
    elif method == 'none':
        M = None
        status = np.ones(total)
    newp1 = []
    newp2 = []
    for i, flag in enumerate(status):
        if flag:
            newp1.append(p1[i])
            newp2.append(p2[i])
    p1 = np.float32(newp1)
    p2 = np.float32(newp2)
    inliers = np.sum(status)
    total = len(status)
    #print '%s%d / %d  inliers/matched' % (space, np.sum(status), len(status))
    return M, status, np.float32(newp1), np.float32(newp2)
Exemple #12
0
def returnPose(und1, und2, k):
    # find the keypoints and descriptors with SIFT
    kp1, des1 = sift.detectAndCompute(und1, None)
    kp2, des2 = sift.detectAndCompute(und2, None)

    FLANN_INDEX_KDTREE = 0
    index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
    search_params = dict(checks=50)

    flann = cv2.FlannBasedMatcher(index_params, search_params)
    matches = flann.knnMatch(des1, des2, k=2)

    good = []
    pts1 = []
    pts2 = []

    for i, (m, n) in enumerate(matches):
        if m.distance < 1 * n.distance:
            good.append(m)
            pts2.append(kp2[m.trainIdx].pt)
            pts1.append(kp1[m.queryIdx].pt)

    pts1 = np.int32(pts1)
    pts2 = np.int32(pts2)
    F, mask = cv2.findFundamentalMat(pts1, pts2, cv2.FM_RANSAC)

    pts1 = pts1[mask.ravel() == 1]
    pts2 = pts2[mask.ravel() == 1]

    E = k.T @ F @ k
    retval, R, t, mask = cv2.recoverPose(E, pts1, pts2, k)
    return R, t
def findMatches(image1, image2, filter=False):
    '''
    Takes two images
    Returns list of matches between the keypoints in poth images
    '''
    bf = cv2.BFMatcher(cv2.NORM_L1, crossCheck=True)
    matches = bf.match(image1.descs, image2.descs)

    points1 = []
    points2 = []

    for match in matches:
        points1.append(image1.kps[match.queryIdx].pt)
        points2.append(image2.kps[match.trainIdx].pt)

    if filter:
        F, mask = cv2.findFundamentalMat(np.array(points1), np.array(points2), method=cv2.FM_RANSAC)

        new_points1, new_points2, new_matches = [], [], []
        for i in range(len(matches)):
            if mask[i] == 1:
                new_points1.append(image1.kps[matches[i].queryIdx].pt)
                new_points2.append(image2.kps[matches[i].trainIdx].pt)
                new_matches.append(matches[i])

        return new_points1, new_points2, new_matches

    else:
        return points1, points2, matches
def solve(images):

    on = images[0]
    off = images[1]
    dist = distance(on, off)

    print 'decoding x'
    xcoords, width = decode_axis(on, off, images[2::2], 'x')
    print 'decoding y'
    ycoords, height = decode_axis(on, off, images[3::2], 'y')

    cam_points = []
    proj_points = []

    for i in xrange(on.shape[0]):
        print i / on.shape[0]
        for j in xrange(on.shape[1]):
            x = xcoords[i,j]
            y = ycoords[i,j]
            if dist[i,j] > 0.25:
                cam_points.append((i, j))
                proj_points.append((height - y - 1, x))

    print len(cam_points), 'points'

    print 'finding fundamental matrix'
    cam_array = np.array(cam_points) / 1024 - 1
    proj_array = np.array(proj_points) / 1024 - 1
    F, status = cv2.findFundamentalMat(cam_array, proj_array, cv.CV_FM_RANSAC)

    print 'stereo rectify'
    cam_array = np.array(cam_points) / 1024 - 1
    proj_array = np.array(proj_points) / 1024 - 1
    res, H1, H2 = cv2.stereoRectifyUncalibrated(cam_array, proj_array, F, on.shape[:2])
def MatchFunM(ID):
    print ID
    info = Info.GetVideoInfo(ID)
    frame_sift_lst = [x for x in sorted(os.listdir(info['frame_sift_path'])) if x.endswith('.sift')]
    pano_sift_lst = [x for x in sorted(os.listdir(info['pano_sift_path'])) if x.endswith('.sift')]

    results = np.load('%s/fisher_results.npy'%info['pano_path'])
    MM = []
    for index, name in enumerate(frame_sift_lst):
        Mi = []
        frame_short_name = name.split('.')[0]
        for i in range(0,results.shape[1]):
            pano_name = pano_sift_lst[results[index, i]]
            pano_short_name = pano_name.split('.')[0]
            kp_pairs = lib_SIFTmatch.flann_match('%s/%s'%(info['frame_sift_path'],frame_short_name),
                                                 '%s/%s'%(info['pano_sift_path'],pano_short_name))
            #print kp_pairs
            try:
                (mkp1, mkp2) = zip(*kp_pairs)
                mkp1_pts = [ (x[0],x[1]) for x in mkp1 ]
                mkp2_pts = [ (x[0],x[1]) for x in mkp2 ]
                mkp1_pts = np.float32(mkp1_pts)
                mkp2_pts = np.float32(mkp2_pts)
                F, mask = cv2.findFundamentalMat(mkp1_pts,mkp2_pts,cv2.FM_RANSAC,20)
                q_pts = mkp1_pts[mask.ravel()==1]
                t_pts = mkp2_pts[mask.ravel()==1]
                Mi.append(len(q_pts))
            except:
                Mi.append(0)
                continue
        MM.append(Mi)
    np.save('%s/results_fundM'%info['pano_path'],MM)    
Exemple #16
0
def sift_get_fmat(img1,
                  img2,
                  total=100,
                  ratio=0.8,
                  algo=cv2.FM_LMEDS,
                  random=False,
                  display=False):
    # find the keypoints and descriptors with SIFT
    kp1, des1 = sift.detectAndCompute(img1, None)
    kp2, des2 = sift.detectAndCompute(img2, None)

    # FLANN parameters
    FLANN_INDEX_KDTREE = 0
    index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
    search_params = dict(checks=50)

    flann = cv2.FlannBasedMatcher(index_params, search_params)
    matches = flann.knnMatch(des1, des2, k=2)

    good = []
    pts1 = []
    pts2 = []

    # ratio test as per Lowe's paper
    for i, (m, n) in enumerate(matches):
        if m.distance < ratio * n.distance:
            good.append(m)

    sorted_good_mat = sorted(good, key=lambda m: m.distance)
    for m in sorted_good_mat:
        pts2.append(kp2[m.trainIdx].pt)
        pts1.append(kp1[m.queryIdx].pt)

    pts1 = np.float32(pts1)
    pts2 = np.float32(pts2)
    print('pts size: ', pts1.size)
    assert pts1.size > 2 and pts2.size > 2
    F, mask = cv2.findFundamentalMat(pts1, pts2, algo)
    if mask is None or np.linalg.matrix_rank(F) != 2:
        return None, None, None
    # assert np.linalg.matrix_rank(F) == 2

    # We select only inlier points
    pts1 = pts1[mask.ravel() == 1]
    pts2 = pts2[mask.ravel() == 1]

    if random:
        # Randomly sample top-[total] number of points
        pts = random.sample(zip(pts1, pts2), min(len(pts1), total))
        pts1, pts2 = np.array([ p for p, _ in pts ]), \
                     np.array([ p for _, p in pts ])
    else:
        pts1 = pts1[:min(len(pts1), total)]
        pts2 = pts2[:min(len(pts1), total)]

    if display:
        draw_matches(img1, pts1, img2, pts2, good)

    return F, pts1, pts2
Exemple #17
0
 def feature_matching(self, logging=False, view_match=False):
     for i, im_i in enumerate(self.images[:-1]):
         for j, im_j in enumerate(self.images[i + 1:i + 2], i + 1):
             i_kp, j_kp = [], []
             src, dst = [], []
             # do knnMatch first and ransac later
             matches = self.matcher.knnMatch(im_i.desc, im_j.desc, 2)
             for match in matches:
                 if match[0].distance >= 0.75 * match[1].distance:
                     continue
                 kp_i_idx, kp_j_idx = match[0].queryIdx, match[0].trainIdx
                 src.append(im_i.kp[kp_i_idx].pt)
                 dst.append(im_j.kp[kp_j_idx].pt)
                 i_kp.append(kp_i_idx)
                 j_kp.append(kp_j_idx)
             # do fundamentalMat RANSAC
             src = np.float32([pt for pt in src])
             dst = np.float32([pt for pt in dst])
             F, masks = cv2.findFundamentalMat(src,
                                               dst,
                                               cv2.FM_RANSAC,
                                               ransacReprojThreshold=3,
                                               confidence=0.99)
             self.F_mats[(i, j)] = F
             print("Feature Matching", i, 'vs.', j, '=', np.sum(masks), '/',
                   src.shape[0])
             if logging:
                 print("Feature Matching", i, 'vs.', j, '=', np.sum(masks),
                       '/', src.shape[0])
             if masks is None:
                 print("Cannot Find Enough Feature Matching Between " +
                       str(i) + " and " + str(j))
                 continue
             for k, mask_v in enumerate(masks):
                 if not mask_v: continue
                 im_i.set_kp_kp(i_kp[k], j, j_kp[k])
                 im_j.set_kp_kp(j_kp[k], i, i_kp[k])
             if view_match:
                 win_name = str(i) + " VS. " + str(j)
                 cv2.namedWindow(win_name)
                 im1 = copy.deepcopy(im_i.im)
                 im2 = copy.deepcopy(im_j.im)
                 canvas = np.column_stack([im1, im2])
                 offset = im2.shape[1]
                 for k, mask_v in enumerate(masks):
                     if not mask_v: continue
                     kp_i_loc = tuple(np.int32(src[k]))
                     kp_j_loc = list(np.int32(dst[k]))
                     kp_j_loc[0] += offset
                     color = [0] * 3
                     color[k % 3] = 255
                     cv2.line(canvas, kp_i_loc, tuple(kp_j_loc),
                              tuple(color), 1)
                 #new_size = tuple([int(v / self.downsample) for v in canvas.shape])
                 new_size = tuple([int(v / 4) for v in canvas.shape])
                 canvas = cv2.resize(canvas, (new_size[1], new_size[0]))
                 cv2.imshow(win_name, canvas)
                 cv2.waitKey(0)
                 cv2.destroyAllWindows()
def find_homography(img1, img2, kp1, kp2, matches, im_list1, im_list2,
                    out_dir):
    src_pts = np.float32([kp1[m.queryIdx].pt for m in matches[:]])
    dst_pts = np.float32([kp2[m.trainIdx].pt for m in matches[:]])
    F, F_mask = cv2.findFundamentalMat(src_pts, dst_pts, cv2.FM_RANSAC)
    matchesMask = F_mask.ravel().tolist()
    draw_params = dict(matchColor=(0, 255, 0),
                       singlePointColor=None,
                       matchesMask=matchesMask,
                       flags=2)

    img3 = cv2.drawMatches(img1, kp1, img2, kp2, matches, None, **draw_params)
    filename = (im_list1.split('.')[0] + '_' + im_list2.split('.')[0] + '_' +
                'Fmatching' + '.' + im_list1.split('.')[1])
    cv2.imwrite('./' + out_dir + '/' + filename, img3)
    H, H_mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
    matchesMask = H_mask.ravel().tolist()
    draw_params = dict(matchColor=(0, 255, 0),
                       singlePointColor=None,
                       matchesMask=matchesMask,
                       flags=2)
    img4 = cv2.drawMatches(img1, kp1, img2, kp2, matches, None, **draw_params)
    filename = (im_list1.split('.')[0] + '_' + im_list2.split('.')[0] + '_' +
                'Hmatching' + '.' + im_list1.split('.')[1])
    cv2.imwrite('./' + out_dir + '/' + filename, img4)
    if np.sum(F_mask) > 0.7 * len(matches):
        print('More than 70% percent inliers survive in Fundamental Matrix')
        print('We calculate the Homography matrix')
        if np.sum(H_mask) > 0.7 * len(matches):
            print('Still More than 70% percent inliers survive')
            print('Most parts of points can match')
            H = H
        elif np.sum(H_mask) > 0.2 * len(matches) and np.sum(
                H_mask) < 0.7 * len(matches):
            print('About 20%-70% percent keypoints survive')
            print('Small parts of points can match')
            H = H
        else:
            print('Below 20% percent keypoints survive')
            print('Most keypoints cannot match')
            H = np.zeros((3, 3))
    elif np.sum(F_mask) > 0.2 * len(matches) and np.sum(
            F_mask) < 0.7 * len(matches):
        print('20%-70% percent inliers survive in Fundamental Matrix')
        print('We calculate the Homography matrix')
        if np.sum(H_mask) > 0.2 * len(matches) and np.sum(
                H_mask) < 0.7 * len(matches):
            print('About 20%-70% percent keypoints survive')
            print('Small parts of points can match')
            H = H
        else:
            print('Below 20% percent keypoints survive')
            print('Most keypoints cannot match')
            H = np.zeros((3, 3))
    else:
        print('Below 20% percent keypoints survive in Fundamental Matrix')
        print('Most keypoints cannot match')
        H = np.zeros((3, 3))
    return H
def epipolarlines(index):
    img1 = cv.imread('newimageG' + str(index) + '.jpg', 0)
    img2 = cv.imread('newimageD' + str(index) + '.jpg', 0)

    sift = cv.xfeatures2d.SIFT_create()

    kp1, des1 = sift.detectAndCompute(img1, None)
    kp2, des2 = sift.detectAndCompute(img2, None)

    FLANN_INDEX_KDTREE = 1
    index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
    search_params = dict(checks=50)

    flann = cv.FlannBasedMatcher(index_params, search_params)
    matches = flann.knnMatch(des1, des2, k=2)

    good = []
    pts1 = []
    pts2 = []

    for i, (m, n) in enumerate(matches):
        if m.distance < 0.8 * n.distance:
            good.append(m)
            pts2.append(kp2[m.trainIdx].pt)
            pts1.append(kp1[m.queryIdx].pt)
    pts1 = np.int32(pts1)
    pts2 = np.int32(pts2)
    F, mask = cv.findFundamentalMat(pts1, pts2, cv.FM_LMEDS)

    pts1 = pts1[mask.ravel() == 1]
    pts2 = pts2[mask.ravel() == 1]

    def drawlines(img1, img2, lines, pts1, pts2):
        ''' img1 - image on which we draw the epilines for the points in img2
            lines - corresponding epilines '''
        r, c = img1.shape
        img1 = cv.cvtColor(img1, cv.COLOR_GRAY2BGR)
        img2 = cv.cvtColor(img2, cv.COLOR_GRAY2BGR)
        for r, pt1, pt2 in zip(lines, pts1, pts2):
            color = tuple(np.random.randint(0, 255, 3).tolist())
            x0, y0 = map(int, [0, -r[2] / r[1]])
            x1, y1 = map(int, [c, -(r[2] + r[0] * c) / r[1]])
            img1 = cv.line(img1, (x0, y0), (x1, y1), color, 1)
            img1 = cv.circle(img1, tuple(pt1), 5, color, -1)
            img2 = cv.circle(img2, tuple(pt2), 5, color, -1)
        return img1, img2

    lines1 = cv.computeCorrespondEpilines(pts2.reshape(-1, 1, 2), 2, F)
    lines1 = lines1.reshape(-1, 3)
    img5, img6 = drawlines(img1, img2, lines1, pts1, pts2)

    lines2 = cv.computeCorrespondEpilines(pts1.reshape(-1, 1, 2), 1, F)
    lines2 = lines2.reshape(-1, 3)
    img3, img4 = drawlines(img2, img1, lines2, pts2, pts1)

    plt.subplot(121), plt.imshow(img5)
    plt.subplot(122), plt.imshow(img3)
    plt.show()
    return
Exemple #20
0
def matches_2():
    descriptors_cv2_1 = to_cv2_di(descriptors3)
    descriptors_cv2_2 = to_cv2_di(descriptors4)

    keypoints_cv2_1 = to_cv2_kplist(detected_keypoints3)
    keypoints_cv2_2 = to_cv2_kplist(detected_keypoints4)

    bf = cv2.BFMatcher()

    img1 = cv2.imread(image_pathes[2])
    img2 = cv2.imread(image_pathes[3])

    matches = bf.knnMatch(descriptors_cv2_1, descriptors_cv2_2, k=2)
    good = []
    pts1 = []
    pts2 = []
    theshold_matching = 0.7
    for m, n in matches:
        if m.distance < theshold_matching * n.distance:
            good.append([m])
            pts1.append(keypoints_cv2_1[m.queryIdx].pt)
            pts2.append(keypoints_cv2_2[m.trainIdx].pt)

    print("matches 2 with 0.8: " + str(len(good)))
    img_out = cv2.drawMatchesKnn(img1, keypoints_cv2_1, img2, keypoints_cv2_2,
                                 good, None)
    cv2.imwrite("out\\.2_0,8png", img_out)

    pts1 = np.int32(pts1)
    pts2 = np.int32(pts2)
    #create FundamentalMatrix
    F, mask = cv2.findFundamentalMat(pts1, pts2, cv2.FM_LMEDS)

    #fx = fy = 721.5
    #cx = 690.5
    #cy = 172.8
    #F[0][0] = fx
    #F[0][2] = cx
    #F[1][1] = fy
    #F[1][2] = cy
    # F = np.matrix([[fx, 0, cx], [0, fy, cy], [0, 0, 0]])

    #select only -----
    pts1 = pts1[mask.ravel() == 1]
    pts2 = pts2[mask.ravel() == 1]
    lines = cv2.computeCorrespondEpilines(pts2.reshape(-1, 1, 2), 2, F)
    lines = lines.reshape(-1, 3)
    gray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
    gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
    img1, img2 = drawlines(gray1, gray2, lines, pts1, pts2)

    lines2 = cv2.computeCorrespondEpilines(pts1.reshape(-1, 1, 2), 1, F)
    lines2 = lines2.reshape(-1, 3)
    img3, img4 = drawlines(gray2, gray1, lines2, pts2, pts1)
    #cv2.imshow('img', img1)
    cv2.imwrite("out\\3_08.png", img1)
    #cv2.waitKey(8000)
    #cv2.imshow('img', img3)
    cv2.imwrite("out\\4_08.png", img3)
Exemple #21
0
 def fundamental_matrix_estimate(self, pts1, pts2):
     F, mask = cv2.findFundamentalMat(
         pts1,
         pts2,
         cv2.FM_RANSAC,
         ransacReprojThreshold=self.configs['thresh'],
         confidence=self.configs['confidence'])
     return mask[:, 0].astype(np.bool), F
 def compute_test_fund(self):
     im1_pts = self.matches[:,:2]
     n = im1_pts.shape[0]
     im2_pts = self.matches[:,2:]        
     F,mask = cv2.findFundamentalMat(np.float32(im1_pts),np.float32(im2_pts),cv2.FM_8POINT)
     self.F = F
     res = self.compute_residual(im1_pts, im2_pts, n)
     return F,res
def step2():
    def homography_cv2(src, dst):
        src_pts = src.reshape(-1,1,2)
        dst_pts = dst.reshape(-1,1,2)
        H, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
        return H, mask.ravel().tolist()

    def plot_matching(imA,kpsA,imB,kpsB,matches,H,inliers):
        imB = imB.copy()
        h,w = imA.shape[:2]
        transformed_box = cv2.perspectiveTransform(np.float32([[0,0],[0,h-1],[w-1,h-1],[w-1,0]]).reshape(-1,1,2),H)
        cv2.polylines(imB,[np.int32(transformed_box)],True,(255,255,255),3, cv2.LINE_AA)
        im = cv2.drawMatches(imA,kpsA,imB,kpsB,matches,None,
            matchColor = (255,255,0),
            singlePointColor = None,
            matchesMask = inliers,
            flags = 2)
        cv2.imshow('feature-matching', im)

        def more_points(event, x, y, flags, param):
            if event == cv2.EVENT_LBUTTONUP:
                if x<=imA.shape[1]:
                    print('points1.append(({}, {}))'.format(x, y))
                else:
                    print('points2.append(({}, {}))'.format(x-imA.shape[1], y))
        cv2.setMouseCallback('feature-matching', more_points)

    imA = sresize(cv2.imread("Deer/20171128_IMG_7754.JPG"), 0.2)
    imB = sresize(cv2.imread("Deer/20171128_IMG_7755.JPG"), 0.2)
    # SIFT+FLANN matching
    sift = cv2.xfeatures2d.SIFT_create()
    (kpsA, desA) = sift.detectAndCompute(imA, None)
    (kpsB, desB) = sift.detectAndCompute(imB, None)
    flann = cv2.FlannBasedMatcher(dict(algorithm = 0, trees = 5), dict(checks = 50))
    # Lowe's ratio test
    matches = [m for m,n in flann.knnMatch(desA,desB,k=2) if m.distance < 0.7*n.distance] 
    if len(matches)>10:
        while 1: # Just try again when getting singular matrix
            try:
                H, inliers = homography_cv2(
                    np.float32([ kpsA[m.queryIdx].pt for m in matches ]),
                    np.float32([ kpsB[m.trainIdx].pt for m in matches ]))
                break
            except np.linalg.linalg.LinAlgError:
                continue
    else:
        print("Not enough matches are found")
        sys.exit(1)
    print('Homography:')
    print(H)
    plot_matching(imA,kpsA,imB,kpsB,matches,H,inliers)
    print('H inliers:', np.sum(inliers))
    inliers = np.flatnonzero(np.array(inliers))
    points1 = np.float32([ kpsA[m.queryIdx].pt for m in matches ])[inliers]
    points2 = np.float32([ kpsB[m.trainIdx].pt for m in matches ])[inliers]
    F, F_inliers = cv2.findFundamentalMat(points1, points2)
    F_inliers = np.flatnonzero(F_inliers)
    return F, np.dot(np.dot(K.T, F), K), points1[F_inliers].tolist(), points2[F_inliers].tolist()
Exemple #24
0
def fundamental_matrix(points1, points2):
    F_b, _ = cv2.findFundamentalMat(points1, points2, cv2.FM_8POINT)

    mat = []
    mass_cent = [0., 0.]
    mass_cent_p = [0., 0.]
    for i in range(len(points1)):
        mass_cent[0] += points1[i, 0]
        mass_cent[1] += points1[i, 1]
        mass_cent_p[0] += points2[i, 0]
        mass_cent_p[1] += points2[i, 1]
    mass_cent = np.divide(mass_cent, float(len(points1)))
    mass_cent_p = np.divide(mass_cent_p, float(len(points1)))

    scale1 = 0.
    scale2 = 0.
    for i in range(len(points1)):
        scale1 += np.sqrt((points1[i][0] - mass_cent[0])**2 +
                          (points1[i][1] - mass_cent[1])**2)
        scale2 += np.sqrt((points2[i][0] - mass_cent_p[0])**2 +
                          (points2[i][1] - mass_cent_p[1])**2)

    scale1 = scale1 / len(points1)
    scale2 = scale2 / len(points1)

    scale1 = np.sqrt(2.) / scale1
    scale2 = np.sqrt(2.) / scale2
    A = np.zeros((8, 9))
    for i in range(8):
        x1 = (points1[i][0] - mass_cent[0]) * scale1
        y1 = (points1[i][1] - mass_cent[1]) * scale1
        x2 = (points2[i][0] - mass_cent_p[0]) * scale2
        y2 = (points2[i][1] - mass_cent_p[1]) * scale2

        row = np.array([x2 * x1, x2 * y1, x2, y2 * x1, y2 * y1, y2, x1, y1, 1])
        A[i] = row

    U, S, V = np.linalg.svd(A)

    F = V[-1]
    F = np.reshape(F, (3, 3))
    U, S, V = np.linalg.svd(F)
    S[2] = 0
    F = U @ np.diag(S) @ V

    T1 = np.array([
        scale1, 0, -scale1 * mass_cent[0], 0, scale1, -scale1 * mass_cent[1],
        0, 0, 1
    ])
    T1 = T1.reshape((3, 3))
    T2 = np.array([
        scale2, 0, -scale2 * mass_cent_p[0], 0, scale2,
        -scale2 * mass_cent_p[1], 0, 0, 1
    ])
    T2 = T2.reshape((3, 3))
    F = np.transpose(T2) @ F @ T1
    F = F / F[-1, -1]
    return F, F_b
def GetFundamentalMatrix(img1pts, img2pts, outlierThres=.1, prob=.99):
    F, mask = cv2.findFundamentalMat(img1pts,
                                     img2pts,
                                     method=cv2.FM_RANSAC,
                                     param1=outlierThres,
                                     param2=prob)
    mask = mask.astype(bool).flatten()

    return F, mask
Exemple #26
0
 def F_ES(self):
     '''
     use RANSAC to estimation fundamental matrix
     '''
     pts1, pts2 = self.match_pts1, self.match_pts2
     F, mask = cv2.findFundamentalMat(pts1, pts2, cv2.FM_RANSAC)
     F = F / F[2, 2]
     self.F = F
     return F
def get_fundamental_matrix_from_csv(csv1, csv2):
    x1 = np.loadtxt(csv1, delimiter=',')
    x2 = np.loadtxt(csv2, delimiter=',')
    F_im, ransac_mask = cv2.findFundamentalMat(x1,
                                               x2,
                                               method=cv2.FM_RANSAC,
                                               param1=3,
                                               param2=0.995)
    return F_im
def main():
    files_directory = r'C:\Scratch\IPA_Data\Sampled\15_Ambient_Combined\06_red_green_blue'

    image_names = [r'a0_amb.tif', r'b0_amb.tif']

    image_paths = []

    for filename in image_names:
        image_paths.append(os.path.join(files_directory, filename))

    img1 = cv.imread(image_paths[0], 1)
    img2 = cv.imread(image_paths[1], 1)

    surf = cv.xfeatures2d.SURF_create()

    surf.setHessianThreshold(400)

    # find the keypoints and descriptors with SIFT
    kp1, des1 = surf.detectAndCompute(img1, None)
    kp2, des2 = surf.detectAndCompute(img2, None)

    print(len(kp1))
    print(len(kp2))

    img3 = cv.drawKeypoints(img1, kp1, None, (255, 0, 0), 4)
    img4 = cv.drawKeypoints(img2, kp2, None, (255, 0, 0), 4)

    bf = cv.BFMatcher()
    matches = bf.knnMatch(des1, des2, k=2)

    # Apply ratio test
    good = []
    pts2 = []
    pts1 = []

    for i, (m, n) in enumerate(matches):
        if m.distance < 0.8 * n.distance:
            good.append(m)
            pts2.append(kp2[m.trainIdx].pt)
            pts1.append(kp1[m.queryIdx].pt)

    pts1 = np.int32(pts1)
    pts2 = np.int32(pts2)
    f_matrix, mask = cv.findFundamentalMat(pts1, pts2, cv.FM_LMEDS)
    homog = cv.findHomography(pts1, pts2)

    aff = cv.estimateAffine2D(pts1, pts2, np.ones([len(pts1)]), cv.LMEDS, 3,
                              2000, 0.99, 10)

    warp_dst = cv.warpAffine(img1, aff[0], (img2.shape[1], img2.shape[0]))

    center = (warp_dst.shape[1] // 2, warp_dst.shape[0] // 2)

    cv.imshow('Source image', img2)
    cv.imshow('Warp', warp_dst)

    cv.waitKey()
Exemple #29
0
 def _f_inlier(self, width=256, height=256):
     grid = np.indices((height, width)).transpose((1, 2, 0))
     grid = np.flip(grid, 2)
     grid = grid[self.y_inds, self.x_inds]
     flow = self.flow * [width, height]
     flow = flow[self.y_inds, self.x_inds]
     F, mask = cv2.findFundamentalMat(grid, flow, cv2.FM_RANSAC, 8, 0.9)
     num_inlier = np.sum(mask)
     return num_inlier / self.tg_rgbvec.shape[0]
 def compute_test_fund(self):
     im1_pts = self.matches[:, :2]
     n = im1_pts.shape[0]
     im2_pts = self.matches[:, 2:]
     F, mask = cv2.findFundamentalMat(np.float32(im1_pts),
                                      np.float32(im2_pts), cv2.FM_8POINT)
     self.F = F
     res = self.compute_residual(im1_pts, im2_pts, n)
     return F, res
    def FindFundamentalRansac(self, kpts1, kpts2):
        # Compute Fundamental matrix from a set of corresponding keypoints,
        # within a RANSAC scheme
        # @param kpts1: list of keypoints of the previous frame
        # @param kpts2: list of keypoints of the current frame

        kpts1 = np.float32(kpts1)
        kpts2 = np.float32(kpts2)
        self.F, self.mask = cv2.findFundamentalMat(kpts1, kpts2, cv2.FM_RANSAC)
Exemple #32
0
def get_fundamental_matrix(kpt_locs1, kpt_locs2):
    # 3x3
    kpt_locs1 = np.asarray(kpt_locs1)
    kpt_locs2 = np.asarray(kpt_locs2)

    F, mask = cv2.findFundamentalMat(kpt_locs1, kpt_locs2, cv2.FM_RANSAC, 0.1,
                                     0.99)

    return F
Exemple #33
0
 def computeFundamentalMatrix(self, kps_ref, kps_cur):
         F, mask = cv2.findFundamentalMat(kps_ref, kps_cur, cv2.FM_RANSAC, param1=kRansacThresholdPixels, param2=kRansacProb)
         if F is None or F.shape == (1, 1):
             # no fundamental matrix found
             raise Exception('No fundamental matrix found')
         elif F.shape[0] > 3:
             # more than one matrix found, just pick the first
             F = F[0:3, 0:3]
         return np.matrix(F), mask 	
 def get_movement_fundamental(self, pts_new, pts_old):
     '''get movement between images based on matched points'''
     pts_old = np.float32(pts_old) #convert to floats
     pts_new = np.float32(pts_new)
     F = cv2.findFundamentalMat(pts_old, pts_new, method=cv2.cv.CV_FM_LMEDS) #find fundamental matrix :(
     #print F[0]
     #get epipolar lines through all of the key points
     lines = cv2.computeCorrespondEpilines(pts_old.reshape(-1, 1, 2), 1, F[0])
     return lines.reshape(-1, 3) #return the lines
Exemple #35
0
def FundamentalRANSACCompute(kp1,kp2,desc1,desc2,hamming=False,flann=False):
    MIN_MATCH_COUNT = 10
    if not flann:
        if not hamming:
            bf = cv2.BFMatcher(cv2.NORM_L2, crossCheck=True)
        else:
            bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
        matches = bf.match(desc1, desc2)
        # matches = sorted(matches, key=lambda x: x.distance)

        if len(matches) > MIN_MATCH_COUNT:
            src_pts = np.float32([kp1[m.queryIdx].pt for m in matches
                                  ]).reshape(-1, 1, 2)
            dst_pts = np.float32([kp2[m.trainIdx].pt for m in matches
                                  ]).reshape(-1, 1, 2)

            M, inlierMask = cv2.findFundamentalMat(src_pts, dst_pts,method=cv2.FM_RANSAC)
            return True, M, inlierMask, matches
        else:
            print('Two few matched points')
            return False,None,None,None
    else:
        FLANN_INDEX_KDITREE = 0
        index_params = dict(algorithm=FLANN_INDEX_KDITREE, tree=5)
        search_params = dict(checks=50)
        flann = cv2.FlannBasedMatcher(index_params, search_params)
        matches = flann.knnMatch(desc1, desc2, k=2)#k=2 means (m,n) bestMatch vs. betterMatch

        good = []
        for m, n in matches:
            if m.distance < 0.8 * n.distance:
                good.append(m)

        if len(good) > MIN_MATCH_COUNT:
            src_pts = np.float32([kp1[m.queryIdx].pt for m in good
                                  ]).reshape(-1, 1, 2)
            dst_pts = np.float32([kp2[m.trainIdx].pt for m in good
                                  ]).reshape(-1, 1, 2)

            M, inlierMask = cv2.findFundamentalMat(src_pts, dst_pts, method=cv2.FM_RANSAC)
            return True,M,inlierMask,good
        else:
            print('Two few matched points')
            return False,None,None,None
Exemple #36
0
def main():
    img1 = cv2.imread(leftname, 0)  #queryimage # left image
    img2 = cv2.imread(rightname, 0)  #trainimage # right image
    sift = cv2.xfeatures2d.SIFT_create()
    # find the keypoints and descriptors with SIFT
    kp1, des1 = sift.detectAndCompute(img1, None)
    kp2, des2 = sift.detectAndCompute(img2, None)
    # now do best matchings
    pts1 = []
    pts2 = []
    # FLANN parameters
    correspondences = find_correspondence(des1, des2, 50)

    for lidx, ridx, distance in correspondences:
        pts2.append(kp2[ridx].pt)
        pts1.append(kp1[lidx].pt)

    pts1 = np.int32(pts1)
    pts2 = np.int32(pts2)
    #print(pts1, pts2)
    F, mask = cv2.findFundamentalMat(pts1, pts2, cv2.FM_LMEDS)

    #F = run8point(pts1, pts2)
    #F = run7point(pts1, pts2)[:3]
    # We select only inlier points
    #pts1 = pts1[mask.ravel()==1]
    #pts2 = pts2[mask.ravel()==1]

    def drawlines(img1, img2, lines, pts1, pts2):
        ''' img1 - image on which we draw the epilines for the points in img2
            lines - corresponding epilines '''
        r, c = img1.shape
        img1 = cv2.cvtColor(img1, cv2.COLOR_GRAY2BGR)
        img2 = cv2.cvtColor(img2, cv2.COLOR_GRAY2BGR)
        for r, pt1, pt2 in zip(lines, pts1, pts2):
            color = tuple(np.random.randint(0, 255, 3).tolist())
            x0, y0 = map(int, [0, -r[2] / r[1]])
            x1, y1 = map(int, [c, -(r[2] + r[0] * c) / r[1]])
            img1 = cv2.line(img1, (x0, y0), (x1, y1), color, 1)
            img1 = cv2.circle(img1, tuple(pt1), 5, color, -1)
            img2 = cv2.circle(img2, tuple(pt2), 5, color, -1)
        return img1, img2

    # Find epilines corresponding to points in right image (second image) and
    # drawing its lines on left image
    lines1 = cv2.computeCorrespondEpilines(pts2.reshape(-1, 1, 2), 2, F)
    lines1 = lines1.reshape(-1, 3)
    img5, img6 = drawlines(img1, img2, lines1, pts1, pts2)
    # Find epilines corresponding to points in left image (first image) and
    # drawing its lines on right image
    lines2 = cv2.computeCorrespondEpilines(pts1.reshape(-1, 1, 2), 1, F)
    lines2 = lines2.reshape(-1, 3)
    img3, img4 = drawlines(img2, img1, lines2, pts2, pts1)
    plt.subplot(121), plt.imshow(img5)
    plt.subplot(122), plt.imshow(img3)
    plt.show()
Exemple #37
0
def recover_pose(config, kp1, desc1, kp2, desc2):
    flann_params = dict(
        algorithm=FLANN_INDEX_LSH,
        table_number=6,  # 12
        key_size=12,  # 20
        multi_probe_level=1)  #2
    matcher = cv2.FlannBasedMatcher(
        flann_params, {})  # bug : need to pass empty dict (#1329)

    raw_matches = matcher.knnMatch(desc1, trainDescriptors=desc2, k=2)  #2
    logging.info('原始匹配数目: %s', len(raw_matches))

    mi, p1, p2, kp_pairs = filter_matches(kp1, kp2, raw_matches)
    # p1 = cv2.KeyPoint_convert(kp_pairs[0])
    # p1 = cv2.KeyPoint_convert(kp_pairs[1])

    if len(p1) < 4:
        logging.info('过滤之后匹配数目( %s )小于4个,无法定位', len(p1))
        return

    logging.info('过滤之后匹配数目: %s', len(kp_pairs))

    # H, status = cv2.findHomography(p1, p2, cv2.RANSAC, 5.0)
    # F, status = cv2.findFundamentalMat(p1, p2)
    # H, status = cv2.findHomography(p1, p2, 0)
    H, status = None, None
    if config.homography:
        logging.info("使用 finHomography 过滤匹配结果")
        H, status = cv2.findHomography(p1, p2, cv2.RANSAC, 3.0)
    elif config.fundamental:
        logging.info("使用 findFundamentalMat 过滤匹配结果")
        H, status = cv2.findFundamentalMat(p1, p2)

    if config.show:
        explore_match(config, kp_pairs, status, H)

    if status is None:
        status = np.ones(len(kp_pairs), np.bool_)

    pts1 = np.float64(
        [kpp[0].pt for kpp, flag in zip(kp_pairs, status) if flag])
    pts2 = np.float64(
        [kpp[1].pt for kpp, flag in zip(kp_pairs, status) if flag])

    n = len(np.unique(pts2, axis=0))
    if n < 9:
        logging.info('最终匹配数目( %s )小于9个,无法定位', n)
        return

    K = camera_matrix(config)
    E, mask = cv2.findEssentialMat(pts1, pts2, K)
    retval, R, t, mask = cv2.recoverPose(E, pts1, pts2, K)

    nv = np.matrix(R) * np.float64([0, 0, 1]).reshape(3, 1)
    yaw = np.arctan2(nv[0], nv[2])
    return yaw, t
Exemple #38
0
    def filter_by_homography(self, K, i1, i2, j, filter):
        clean = True

        tol = float(i1.width) / 200.0  # rejection range in pixels
        if tol < 1.0:
            tol = 1.0
        # print "tol = %.4f" % tol
        matches = i1.match_list[j]
        if len(matches) < self.min_pairs:
            i1.match_list[j] = []
            return True
        p1 = []
        p2 = []
        for k, pair in enumerate(matches):
            use_raw_uv = False
            if use_raw_uv:
                p1.append(i1.kp_list[pair[0]].pt)
                p2.append(i2.kp_list[pair[1]].pt)
            else:
                # undistorted uv points should be better, right?
                p1.append(i1.uv_list[pair[0]])
                p2.append(i2.uv_list[pair[1]])

        p1 = np.float32(p1)
        p2 = np.float32(p2)
        #print "p1 = %s" % str(p1)
        #print "p2 = %s" % str(p2)
        if filter == "homography":
            #method = cv2.RANSAC
            method = cv2.LMEDS
            M, status = cv2.findHomography(p1, p2, method, tol)
        elif filter == "fundamental":
            method = cv2.FM_RANSAC  # more stable
            #method = cv2.FM_LMEDS # keeps dropping more points
            M, status = cv2.findFundamentalMat(p1, p2, method, tol)
        elif filter == "essential":
            # method = cv2.FM_RANSAC
            method = cv2.FM_LMEDS
            M, status = cv2.findEssentialMat(p1, p2, K, method, threshold=tol)
        elif filter == "none":
            status = np.ones(len(matches))
        else:
            # fail
            M, status = None, None
        print('  %s vs %s: %d / %d  inliers/matched' \
            % (i1.name, i2.name, np.sum(status), len(status)))
        # remove outliers
        for k, flag in enumerate(status):
            if not flag:
                print("    deleting: " + str(matches[k]))
                clean = False
                matches[k] = (-1, -1)
        for pair in reversed(matches):
            if pair == (-1, -1):
                matches.remove(pair)
        return clean
Exemple #39
0
def load(path):
    img1 = cv2.imread(path + 'im0.png', 0)
    img2 = cv2.imread(path + 'im1.png', 0)

    kp1, desc1 = keypoints_and_descriptors(img1)
    kp2, desc2 = keypoints_and_descriptors(img2)

    flag = cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS

    img_plot = cv2.drawKeypoints(img1,
                                 kp1,
                                 None,
                                 color=(0, 255, 0),
                                 flags=flag)
    cv2.imwrite(path + '_kp1.png', img_plot)
    img_plot = cv2.drawKeypoints(img2,
                                 kp2,
                                 None,
                                 color=(0, 255, 0),
                                 flags=flag)
    cv2.imwrite(path + '_kp2.png', img_plot)

    bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
    matches = bf.match(desc1, desc2)

    img_plot = cv2.drawMatches(img1,
                               kp1,
                               img2,
                               kp2,
                               matches,
                               None,
                               matchColor=(0, 255, 0),
                               singlePointColor=(255, 0, 0))
    cv2.imwrite(path + '_matches.png', img_plot)

    pts1 = to_array(lambda e: e.pt, kp1)
    pts2 = to_array(lambda e: e.pt, kp2)
    match_arr = to_array(lambda e: (e.queryIdx, e.trainIdx), matches)

    pts1 = pts1[match_arr[:, 0], :] - np.array(img1.shape, dtype=np.float) / 2
    pts2 = pts2[match_arr[:, 1], :] - np.array(img2.shape, dtype=np.float) / 2
    all_ones = np.ones(shape=(match_arr.shape[0], 1))
    x = np.hstack((pts1, all_ones, pts2, all_ones))

    F, mask = cv2.findFundamentalMat(pts1, pts2, method=cv2.FM_8POINT)
    dists = []
    for i in range(match_arr.shape[0]):
        epi_lines1 = np.dot(F, x[i, :3])
        epi_lines1 /= np.linalg.norm(epi_lines1[:2])
        dists.append(np.abs(np.sum(x[i, 3:] * epi_lines1)))

    plt.figure()
    plt.plot(dists)

    data = dict(data=x, img1=img1, img2=img2)
    return data
Exemple #40
0
def Motion_detect(Image1, Image2, Height, K):
    '''
    Image1: image 1 
    Image2: image 2
    Height: Height of ego car in metres
    K: 3x3 camera intrinsic matrix

    '''

    orb = cv2.ORB_create()
    Keypoints1, Descriptors1 = orb.detectAndCompute(Image1, None)
    Keypoints2, Descriptors2 = orb.detectAndCompute(Image2, None)
    bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
    
    # Match descriptors.
    matches = bf.match(Descriptors1,Descriptors2)
    
    # Sort them in the order of their distance.
    # matches = sorted(matches, key = lambda x:x.distance)
    kp1_list = np.mat([])
    kp2_list = np.mat([])
    k = 0

    number_of_matches = 3000

    for m in matches:
        img1Idx = m.queryIdx
        img2Idx = m.trainIdx

        (img1x, img1y) = Keypoints1[img1Idx].pt
        (img2x, img2y) = Keypoints2[img2Idx].pt

        if k == 0:
            kp1_list = [[img1x,img1y,1]]
            kp2_list = [[img2x,img2y,1]]
            k = 1
        else:
            kp1_list = np.append(kp1_list,[[img1x,img1y,1]],axis = 0)
            kp2_list = np.append(kp2_list,[[img2x,img2y,1]],axis = 0)
            k+=1
        if k == number_of_matches:
            break

    F = cv2.findFundamentalMat(kp1_list[:,0:2], kp2_list[:,0:2], method = cv2.FM_RANSAC)
    E = K.T @ F[0] @ K 
    
    Image3 = cv2.drawMatches(Image1,kp1_list,Image2,kp2_list,matches[:10]ls
    )
    plt.imshow(Image3)
    plt.show()
    Transformation_info = cv2.recoverPose(E, (kp1_list[:,0:2]), (kp2_list[:,0:2]), K)
    Rotation = Transformation_info[1]
    Translation = Transformation_info[2]
    Translation = Translation# / Translation[2]

    return Rotation, Translation
Exemple #41
0
def compute_fundamental_matrix(kp1, kp2, method='ransac', reproj_threshold=5.0, confidence=0.99):
    """
    Given two arrays of keypoints compute the fundamental matrix

    Parameters
    ----------
    kp1 : ndarray
          (n, 2) of coordinates from the source image

    kp2 : ndarray
          (n, 2) of coordinates from the destination image

    outlier_algorithm : {'ransac', 'lmeds', 'normal'}
                        The openCV algorithm to use for outlier detection

    reproj_threshold : float
                       The maximum distances in pixels a reprojected points
                       can be from the epipolar line to be considered an inlier

    confidence : float
                 [0, 1] that the estimated matrix is correct

    Returns
    -------
    transformation_matrix : ndarray
                            The 3x3 transformation matrix

    mask : ndarray
           Boolean array of the outliers

    Notes
    -----
    While the method is user definable, if the number of input points
    is < 7, normal outlier detection is automatically used, if 7 > n > 15,
    least medians is used, and if 7 > 15, ransac can be used.
    """
    if method == 'ransac':
        method_ = cv2.FM_RANSAC
    elif method == 'lmeds':
        method_ = cv2.FM_LMEDS
    elif method == 'normal':
        method_ = cv2.FM_7POINT
    else:
        raise ValueError("Unknown outlier detection method.  Choices are: 'ransac', 'lmeds', or 'normal'.")

    transformation_matrix, mask = cv2.findFundamentalMat(kp1,
                                                         kp2,
                                                         method_,
                                                         reproj_threshold,
                                                         confidence)
    try:
        mask = mask.astype(bool)
    except:
        pass  # pragma: no cover

    return transformation_matrix, mask
Exemple #42
0
def compute_F_RANSAC(img1, img2):
    x1, x2 = get_pixel_points(img1, img2)
    if len(x1) < 8:
        raise RuntimeError('Fundamental matrix requires N >= 8 pts')
    else:
        F, mask = cv2.findFundamentalMat(x1, x2, cv2.FM_RANSAC)
    # U,S,V = np.linalg.svd(F)
    # S[2] = 0
    # F = np.dot(U,np.dot(np.diag(S),V))
    return F, mask
Exemple #43
0
def getFundMat(good, kp1, kp2):
    '''
        good: DMatch object list
        kp1,kp2: query key points, train key points
    '''
    print 'The number of matches used in fundamental matrix is %d'%len(good)
    pts1,pts2 = getMatchPts(good, kp1, kp2)
    # F, mask = find_fund_mat(pts1,pts2)            #custom version of find fundmat
    F, mask = cv2.findFundamentalMat(pts1, pts2, cv2.FM_RANSAC)
    return F, mask
Exemple #44
0
def main(argv):
    r1, r2 = [], []
    with open('points2') as f:
        for s in f:
            x1, y1, x2, y2 = map(float, s.split())
            r1.append([x1, y1])
            r2.append([x2, y2])
    p1, p2 = np.array(r1), np.array(r2)
    ret, mask = cv2.findFundamentalMat(p1, p2)
    print ret
Exemple #45
0
def hartleyRectify(points1, points2, imgSize, M1, M2, D1, D2, F = None):
    F, mask = cv2.findFundamentalMat(points1, points2, cv2.FM_RANSAC, 3, 0.99)
    #print 'mask\n', mask
    retval, H1, H2 = cv2.stereoRectifyUncalibrated(
        points1, points2, F, imgSize)
    retval, M1i = cv2.invert(M1); retval, M2i = cv2.invert(M2)
    R1, R2 = np.dot(np.dot(M1i, H1), M1), np.dot(np.dot(M2i, H2), M2)
    map1x, map1y = cv2.initUndistortRectifyMap(M1, D1, R1, M1, imgSize, cv2.CV_32FC1)
    map2x, map2y = cv2.initUndistortRectifyMap(M2, D2, R2, M2, imgSize, cv2.CV_32FC1)
    return (map1x, map1y, map2x, map2y), F
 def _find_fundamental_matrix(self, method='RANSAC'):
     """Estimates fundamental matrix
         :para method: which way to use for fundamental matrix estimation
             RANSAC or DL(deep learning)
     """
     if method == 'RANSAC':
         self.F, self.Fmask = cv2.findFundamentalMat(
             self.match_pts1, self.match_pts2, cv2.FM_RANSAC, 0.1, 0.99)
     elif method == 'DL':
         print('Need to complete')
Exemple #47
0
def computeFundamentalMatrix(kps_ref, kps_cur):             
    F, mask = cv2.findFundamentalMat(kps_ref, kps_cur, cv2.FM_RANSAC, 2, 0.99)
    if F is None or F.shape == (1, 1):
        # no fundamental matrix found
        raise Exception('No fundamental matrix found')
    elif F.shape[0] > 3:
        # more than one matrix found, just pick the first
        print('more than one matrix found, just pick the first')
        F = F[0:3, 0:3]
    return np.matrix(F), mask
Exemple #48
0
def verify_cv2_fundam(kps1, kps2, tentatives, th=1.0, n_iter=10000):
    src_pts = np.float32([kps1[m.queryIdx].pt
                          for m in tentatives]).reshape(-1, 2)
    dst_pts = np.float32([kps2[m.trainIdx].pt
                          for m in tentatives]).reshape(-1, 2)
    F, mask = cv2.findFundamentalMat(src_pts, dst_pts, cv2.RANSAC, th, 0.999,
                                     n_iter)
    print('cv2 found {} inliers'.format(
        int(deepcopy(mask).astype(np.float32).sum())))
    return F, mask
def getEpilines(pts1,pts2,width,height):
    pts1 = np.int32(pts1)
    pts2 = np.int32(pts2)
    F, mask = cv2.findFundamentalMat(pts1,pts2,cv2.FM_LMEDS)

    pts = np.array([(i,j) for i in range(width) for j in range(height)])

    epilines = cv2.computeCorrespondEpilines(pts.reshape(-1,1,2), 2,F)
    epilines = epilines.reshape(-1,3)
    return epilines
Exemple #50
0
def epipolar_geometry(frame1, frame2):
    #sift = cv2.SIFT()

    # Find the keypoints and descriptors with SIFT
    #kp1, des1 = sift.detectAndCompute(frame1, None)
    #kp2, des2 = sift.detectAndCompute(frame2, None)

    # Trying ORB instead of SIFT
    orb = cv2.ORB()

    kp1, des1 = orb.detectAndCompute(frame1, None)
    kp2, des2 = orb.detectAndCompute(frame2, None)

    des1, des2 = map(numpy.float32, (des1, des2))

    # FLANN parameters
    FLANN_INDEX_KDTREE = 0 
    index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
    search_params = dict(checks = 50)

    flann = cv2.FlannBasedMatcher(index_params, search_params)
    matches = flann.knnMatch(des1, des2, k=2)

    good, pts1, pts2 = [], [], []

    # Ratio test as per Lowe's paper
    for i, (m, n) in enumerate(matches):
        if m.distance < 0.8*n.distance:
            good.append(m)
            pts1.append(kp1[m.queryIdx].pt)
            pts2.append(kp2[m.trainIdx].pt)

    pts1 = numpy.float32(pts1)
    pts2 = numpy.float32(pts2)
    F, mask = cv2.findFundamentalMat(pts1, pts2, cv2.FM_LMEDS)

    return F, mask

    pts1 = pts1[mask.ravel() == 1]
    pts2 = pts2[mask.ravel() == 1]

    lines1 = cv2.computeCorrespondEpilines(pts2.reshape(-1, 1, 2), 2, F)
    lines1 = lines1.reshape(-1, 3)
    img1, _ = drawlines(frame1, frame2, lines1, pts1, pts2)
    
    lines2 = cv2.computeCorrespondEpilines(pts1.reshape(-1, 1, 2), 2, F)
    lines2 = lines2.reshape(-1, 3)
    img2, _ = drawlines(frame2, frame1, lines2, pts2, pts1)

    matplotlib.pyplot.subplot(121)
    matplotlib.pyplot.imshow(img1)
    matplotlib.pyplot.subplot(122)
    matplotlib.pyplot.imshow(img2)
    matplotlib.show()
Exemple #51
0
    def __FundamentalMatrix(self, point):
        # Check if the image is frozen.
        # SIGB: The user can frozen the input image presses "f" key.
        if self.__isFrozen:

            # Insert the new selected point in the queue.
            if self.__UpdateQueue(point):

                # Get all points selected by the user.
                points = np.asarray(self.PointsQueue, dtype=np.float32)
                
                # <000> Get the selected points from the left and right images.
                leftPoints = points[1::2]
                rightPoints = points[::2]
                
                #print leftPoints
                #print rightPoints

                # <001> Estimate the Fundamental Matrix.
                F, mask = cv2.findFundamentalMat(leftPoints, rightPoints)

                # Get each point from left image.
                # for point in leftPoints:
                for point in leftPoints:
                    #pass
                    # <002> Estimate the epipolar line.
                    rightEpiLine = np.dot(F, point)

                    # <003> Define the initial and final points of the line.
                    a, b, c = rightEpiLine
                    x0 = 640
                    x1 = 1280
                    y0 = int(-(c+a*x0)/b)
                    y1 = int(-(c+a*x1)/b)
                    
                    # <004> Draws the epipolar line in the input image.
                    cv2.line(self.__Image, (x0, y0), (x1, y1), (255, 0, 0), 1)

                # Get each point from right image.
                # for point in rightPoints:
                for point in rightPoints:
                    #pass
                    # <005> Estimate the epipolar line.
                    
                    leftEpiLine = np.dot(point.transpose(), F)
                    
                    a, b, c = leftEpiLine
                    x0 = 0
                    x1 = 640
                    y0 = int(-(c+a*x0)/b)
                    y1 = int(-(c+a*x1)/b)
                    
                    cv2.line(self.__Image, (x0, y0), (x1, y1), (0, 0, 255), 1)
Exemple #52
0
def find_essential_matrix(K_matrices, norm_pts1, norm_pts2):
    '''Estimate an essential matrix that satisfies the epipolar constraint for all the corresponding points.'''
    # K = K1, the calibration matrix of the first camera of the current image pair 
    K = K_matrices[-2]
    # convert to Nx2 arrays for findFundamentalMat
    norm_pts1 = np.array([ pt[0] for pt in norm_pts1 ])
    norm_pts2 = np.array([ pt[0] for pt in norm_pts2 ])
    # inliers (1 in mask) are features that satisfy the epipolar constraint
    F, mask = cv2.findFundamentalMat(norm_pts1, norm_pts2, cv2.RANSAC)
    E = np.dot(K.T, np.dot(F, K))

    return E, mask
def getfundamentals(matches,img1,img2):
    pt1 = np.zeros((matches.size/4, 2), 'float32')
    pt2 = np.zeros((matches.size/4, 2), 'float32')
    i = 0
    for m in matches:
        pt1[i] = (float(m[0]), float(m[1]))
        pt2[i] = (float(m[2]), float(m[3]))
        i += 1
    F, mask = cv2.findFundamentalMat(pt1, pt2, cv2.FM_LMEDS)


    return F, pt1, pt2
Exemple #54
0
def robust_match(p1, p2, matches, config):
    '''Computes robust matches by estimating the Fundamental matrix via RANSAC.
    '''
    if len(matches) < 8:
        return np.array([])

    p1 = p1[matches[:, 0]][:, :2].copy()
    p2 = p2[matches[:, 1]][:, :2].copy()

    F, mask = cv2.findFundamentalMat(p1, p2, cv2.cv.CV_FM_RANSAC, config['robust_matching_threshold'], 0.99)
    inliers = mask.ravel().nonzero()

    return matches[inliers]
    def FindFundamentalRansacPro(self, queue):
        # Compute Fundamental matrix from a set of corresponding keypoints,
        # within a RANSAC scheme
        # @param kpts1: list of keypoints of the previous frame
        # @param kpts2: list of keypoints of the current frame

        temp = queue.get()
        kpts1 = temp[0]
        kpts2 = temp[1]
        F = temp[2]

        F, mask = cv2.findFundamentalMat(kpts1, kpts2, cv2.FM_RANSAC)
        res = [F, mask]
        queue.put(res)
Exemple #56
0
def calibrate_stereo_pair(sensorpair):
    images = glob.glob('%s/*.jpg' % root_dir)
    img = cv2.imread(images[int(random.random() * len(images))], 0)
    height = 485/2
    width = 1900 / 5
    # The Occam is upright so we transpose here to orient things left-right
    # Not sure if necessary
    img1 = img[0:height, width*sensorpair:width*(sensorpair + 1)].T
    img2 = img[height:485, width*sensorpair:width*(sensorpair + 1)].T

    #img1 = cv2.imread('%s/%s/left.jpg',0)  #queryimage # left image
    #img2 = cv2.imread('%s/%s/right.jpg',0) #trainimage # right image
    #cv2.imshow('left image', img1)
    #cv2.imshow('right image', img2)
    #cv2.waitKey(0)

    sift = cv2.SIFT()

    # find the keypoints and descriptors with SIFT
    kp1, des1 = sift.detectAndCompute(img1,None)
    kp2, des2 = sift.detectAndCompute(img2,None)

    # FLANN parameters
    FLANN_INDEX_KDTREE = 0
    index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
    search_params = dict(checks=50)

    flann = cv2.FlannBasedMatcher(index_params,search_params)
    matches = flann.knnMatch(des1,des2,k=2)

    good = []
    pts1 = []
    pts2 = []

    # ratio test as per Lowe's paper
    for i,(m,n) in enumerate(matches):
        if m.distance < 0.8*n.distance:
            good.append(m)
            pts2.append(kp2[m.trainIdx].pt)
            pts1.append(kp1[m.queryIdx].pt)

    pts1 = np.array(pts1)
    pts2 = np.array(pts2)
    F, mask = cv2.findFundamentalMat(pts1,pts2,cv2.FM_LMEDS)

    output = '%s/sensorpair%s' % (root_dir, sensorpair)
    if 'sensorpair%s' % sensorpair not in os.listdir(root_dir):
        os.mkdir(output)
    np.save(open('%s/fundamental.numpy' % output, 'w'), F)
    np.save(open('%s/mask.numpy' % output, 'w'), mask)
Exemple #57
0
    def reviewFundamentalErrors(self, fuzz_factor=1.0, interactive=True):
        total_removed = 0

        # Test fundametal matrix constraint
        for i, i1 in enumerate(self.image_list):
            # rejection range in pixels
            tol = float(i1.width) / 800.0 + fuzz_factor
            print "tol = %.4f" % tol
            if tol < 0.0:
                tol = 0.0
            for j, matches in enumerate(i1.match_list):
                if i == j:
                    continue
                if len(matches) < self.min_pairs:
                    i1.match_list[j] = []
                    continue
                i2 = self.image_list[j]
                p1 = []
                p2 = []
                for k, pair in enumerate(matches):
                    p1.append( i1.kp_list[pair[0]].pt )
                    p2.append( i2.kp_list[pair[1]].pt )

                p1 = np.float32(p1)
                p2 = np.float32(p2)
                #print "p1 = %s" % str(p1)
                #print "p2 = %s" % str(p2)
                M, status = cv2.findFundamentalMat(p1, p2, cv2.RANSAC, tol)

                size = len(status)
                inliers = np.sum(status)
                print '%s vs %s: %d / %d  inliers/matched' \
                    % (i1.name, i2.name, inliers, size)

                if inliers < size:
                    total_removed += (size - inliers)
                    if interactive:
                        status = self.showMatch(i1, i2, matches, status)

                    delete_list = []
                    for k, flag in enumerate(status):
                        if not flag:
                            print "    deleting: " + str(matches[k])
                            #match[i] = (-1, -1)
                            delete_list.append(matches[k])

                    for pair in delete_list:
                        self.deletePair(i, j, pair)
        return total_removed
    def _ransac(self, method, matches, tl, tr):
        if len(matches) < 4:
            return matches

        pl, pr = self._extract_points(matches, tl, tr)

        _, inliers = cv2.findFundamentalMat(pl, pr, method,
            Detector.RANSAC_D, Detector.RANSAC_C)

        good_matches = []
        for m, is_inlier in zip(matches, inliers):
            if is_inlier:
                good_matches.append(m)

        return good_matches
Exemple #59
0
def filter_matches_by_fundamental_matrix(key_pts1, key_pts2, matches, threshold):
    N = len(matches)
    i = 0

    I = [x.queryIdx for x in matches]
    src_pts = key_pts1[I,:].copy()

    I = [x.trainIdx for x in matches]
    dst_pts = key_pts2[I,:].copy()

    N = len(matches)
    [H, mask] = cv2.findFundamentalMat(src_pts, dst_pts, cv2.FM_RANSAC, threshold)

    mask = np.nonzero(np.reshape(mask, (-1)))[0]
    return [matches[i] for i in mask]
def polynomial_triangulation(u1, P1, u2, P2):
    """
    Polynomial (Optimal) triangulation.
    Uses Linear-Eigen for final triangulation.
    Relative speed: 0.1
    
    (u1, P1) is the reference pair containing normalized image coordinates (x, y) and the corresponding camera matrix.
    (u2, P2) is the second pair.
    
    u1 and u2 are matrices: amount of points equals #rows and should be equal for u1 and u2.
    
    The status-vector is based on the assumption that all 3D points have finite coordinates.
    """
    P1_full = np.eye(4); P1_full[0:3, :] = P1[0:3, :]    # convert to 4x4
    P2_full = np.eye(4); P2_full[0:3, :] = P2[0:3, :]    # convert to 4x4
    P_canon = P2_full.dot(cv2.invert(P1_full)[1])    # find canonical P which satisfies P2 = P_canon * P1
    
    # "F = [t]_cross * R" [HZ 9.2.4]; transpose is needed for numpy
    F = np.cross(P_canon[0:3, 3], P_canon[0:3, 0:3], axisb=0).T
    
    # Other way of calculating "F" [HZ (9.2)]
    #op1 = (P2[0:3, 3:4] - P2[0:3, 0:3] .dot (cv2.invert(P1[0:3, 0:3])[1]) .dot (P1[0:3, 3:4]))
    #op2 = P2[0:3, 0:4] .dot (cv2.invert(P1_full)[1][0:4, 0:3])
    #F = np.cross(op1.reshape(-1), op2, axisb=0).T
    
    # Project 2D matches to closest pair of epipolar lines
    u1_new, u2_new = cv2.correctMatches(F, u1.reshape(1, len(u1), 2), u2.reshape(1, len(u1), 2))
    
    # For a purely sideways trajectory of 2nd cam, correctMatches() returns NaN for all possible points!
    if np.isnan(u1_new).all() or np.isnan(u2_new).all():
        F = cv2.findFundamentalMat(u1, u2, cv2.FM_8POINT)[0]    # so use a noisy version of the fund mat
        u1_new, u2_new = cv2.correctMatches(F, u1.reshape(1, len(u1), 2), u2.reshape(1, len(u1), 2))
    
    # Triangulate using the refined image points
    return linear_eigen_triangulation(u1_new[0], P1, u2_new[0], P2)    # TODO: replace with linear_LS: better results for points not at Inf