コード例 #1
0
    def __init__(self,
                 image_size,
                 margin,
                 num_vertical_points,
                 num_horizontal_points,
                 nonlinear_pert_range=[-2, 2],
                 rot_range=[-np.pi / 8, np.pi / 8],
                 scale_range=[1.05, 1.15],
                 trans_range=[-10, 10],
                 append_offset_channels=False):

        self.nonlinear_pert_range = nonlinear_pert_range
        self.rot_range = rot_range
        self.scale_range = scale_range
        self.trans_range = trans_range
        self.num_points = num_horizontal_points * num_vertical_points
        self.append_offset_channels = append_offset_channels
        horizontal_points = np.linspace(margin, image_size[0] - margin,
                                        num_horizontal_points)
        vertical_points = np.linspace(margin, image_size[1] - margin,
                                      num_vertical_points)
        xv, yv = np.meshgrid(horizontal_points, vertical_points, indexing='xy')
        xv = xv.reshape(1, -1, 1)
        yv = yv.reshape(1, -1, 1)
        self.grid = np.concatenate((xv, yv), axis=2)
        self.matches = list()

        # TPS define the alignment between source and target grid points
        # here, we just assume nth source keypoint aligns to nth target keypoint
        for i in range(self.num_points):
            self.matches.append(cv2.DMatch(i, i, 0))
コード例 #2
0
def SIFT_matching(img1, img2, threshold):
    if img1[2] is None or len(img1[2]) == 0 or img2[2] is None or len(img2[2]) == 0:
        return cv2.drawMatches(img1[0], img1[1], img2[0], img2[1], [], img2[0], flags=2)
    euclidean = scipy.spatial.distance.cdist(img1[2], img2[2], metric='euclidean')
    # it is possible that this step might run out of space, if so, need more memory
    sorted1 = np.argsort(euclidean, axis=1)
    closest, closest1 = sorted1[:, 0], sorted1[:, 1]
    left_id = np.arange(img1[2].shape[0])
    dist_ratios = euclidean[left_id, closest] / euclidean[left_id, closest1]
    suppressed = dist_ratios * (dist_ratios < threshold)
    left_id = np.nonzero(suppressed)[0]
    right_id = closest[left_id]
    pairs = np.stack((left_id, right_id)).transpose()
    pair_dists = euclidean[pairs[:, 0], pairs[:, 1]]
    sorted_dist_id = np.argsort(pair_dists)
    sorted_pairs = pairs[sorted_dist_id]
    sorted_dists = pair_dists[sorted_dist_id].reshape((sorted_pairs.shape[0], 1))

    matches = []
    print("Best 10 values: ")
    for i in range(10):
        print("Pairs: {}, dist: {}".format(sorted_pairs[-i], sorted_dists[-i]))
        matches.append(cv2.DMatch(sorted_pairs[-i][0], sorted_pairs[-i][1], sorted_dists[-i]))
    result = cv2.drawMatches(img1[0], img1[1], img2[0], img2[1], matches, img2[0], flags=2)
    return result
コード例 #3
0
ファイル: extractMatchRot.py プロジェクト: kinalmehta/d2-net
def	drawMatches(image1, image2, feat1, feat2):
	image1 = np.array(image1)
	image2 = np.array(image2)

	matches = match_descriptors(feat1['descriptors'], feat2['descriptors'], cross_check=True)
	print('Number of raw matches: %d.' % matches.shape[0])

	keypoints_left = feat1['keypoints'][matches[:, 0], : 2]
	keypoints_right = feat2['keypoints'][matches[:, 1], : 2]
	np.random.seed(0)
	model, inliers = ransac(
		(keypoints_left, keypoints_right),
		ProjectiveTransform, min_samples=4,
		residual_threshold=8, max_trials=10000
	)
	n_inliers = np.sum(inliers)
	print('Number of inliers: %d.' % n_inliers)

	inlier_keypoints_left = [cv2.KeyPoint(point[0], point[1], 1) for point in keypoints_left[inliers]]
	inlier_keypoints_right = [cv2.KeyPoint(point[0], point[1], 1) for point in keypoints_right[inliers]]
	placeholder_matches = [cv2.DMatch(idx, idx, 1) for idx in range(n_inliers)]
	image3 = cv2.drawMatches(image1, inlier_keypoints_left, image2, inlier_keypoints_right, placeholder_matches, None)

	plt.figure(figsize=(20, 20))
	plt.imshow(image3)
	plt.axis('off')
	plt.show()
コード例 #4
0
def getAkazeCorrespondingPoints(kpl, kpr,  desl, desr):
    p1, p2 = getCPoints(kpl, kpr,  desl, desr, 0.2)
    #p1 = p1.reshape(-1, p1.shape[0], 2)
    #p2 = p2.reshape(-1, p2.shape[0], 2)
    H, _ = cv2.findHomography(p1, p2, cv2.RANSAC, 0.7)
    homography = np.array(H).reshape(3,3)
    matcher = cv2.DescriptorMatcher_create(cv2.DescriptorMatcher_BRUTEFORCE_HAMMING)
    nn_matches = matcher.knnMatch(desl, desr, 2)
    
    matched1 = []
    matched2 = []
    nn_match_ratio = 0.8 # Nearest neighbor matching ratio
    for m, n in nn_matches:
        if m.distance < nn_match_ratio * n.distance:
            matched1.append(kpl[m.queryIdx])
            matched2.append(kpr[m.trainIdx])
    
    inliers1 = []
    inliers2 = []
    good_matches = []
    inlier_threshold = 2.5 # Distance threshold to identify inliers with homography check
    for i, m in enumerate(matched1):
        col = np.ones((3,1), dtype=np.float64)
        col[0:2,0] = m.pt
        col = np.dot(homography, col)
        col /= col[2,0]
        dist = sqrt(pow(col[0,0] - matched2[i].pt[0], 2) +\
                    pow(col[1,0] - matched2[i].pt[1], 2))
        if dist < inlier_threshold:
            good_matches.append(cv2.DMatch(len(inliers1), len(inliers2), 0))
            inliers1.append(matched1[i])
            inliers2.append(matched2[i])
    pts1 = [list(inliers1[idx].pt) for idx in range(0, len(inliers1))]
    pts2 = [list(inliers2[idx].pt) for idx in range(0, len(inliers2))]
    return pts1, pts2      
コード例 #5
0
ファイル: variant2.py プロジェクト: Dudoserg/python-img
def createImages(img1, img2, coordinates, resultPathName):
    key_points_1 = []
    key_points_2 = []
    allignment_points = []

    i = 0
    for coor in coordinates:
        x1 = coor[0][0]
        y1 = coor[0][1]
        x2 = coor[1][0]
        y2 = coor[1][1]
        key_points_1.append(cv2.KeyPoint(x1, y1, 0))
        key_points_2.append(cv2.KeyPoint(x2, y2, 0))
        allignment_points.append([cv2.DMatch(i, i, 0, 57)])
        i += 1

    # рисуем результат, совмещаем точки первого и второго изображения
    img_result = cv2.drawMatchesKnn(
        img1,
        key_points_1,
        img2,
        key_points_2,
        allignment_points,
        None,
        flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)

    # считаем смещение
    bias_x_middle, bias_y_middle = calculate_bias(coordinates)

    # заголовок результирующего изображения
    title = "X: " + str(round(bias_x_middle, 5)) + \
            " Y: " + str(round(bias_y_middle, 5))

    # рисуем и сохраняем результат
    plot_create_image(img_result, title, resultPathName)
コード例 #6
0
def match_piece(piece, solved_kp, solved_desc, finished_gray):
    # piece is grayscale sub_image of piece
    # solved_kp are the keypoints in the grayscale solved image
    # solved_desc are the descriptors for the keypoints in the solved image
    sift = cv2.xfeatures2d.SIFT_create()
    kp, des = sift.detectAndCompute(piece, None)

    # Match features.
    bf = cv2.BFMatcher()
    matches = bf.knnMatch(des, solved_desc, k=2)

    # Apply ratio test
    good = []
    for m, n in matches:
        if m.distance < 0.75 * n.distance:
            good.append(m)

    points1 = np.zeros((len(good), 2), dtype=np.float32)
    points2 = np.zeros((len(good), 2), dtype=np.float32)

    verified_matches = []

    for i, match in enumerate(good):
        points1[i, :] = kp[match.queryIdx].pt
        points2[i, :] = solved_kp[match.trainIdx].pt
        verified_matches.append((match.queryIdx, match.trainIdx))

    # Find homography
    H, mask = cv2.findHomography(points1, points2, cv2.RANSAC)

    # If no homography is found throw an exception
    if not H.any():
        print(len(good))
        raise Exception("ERROR")

    # Check for inlier matches
    inlier_matches = []
    num_inliers = 0
    for match in verified_matches:
        p = pointPairToPoint(kp[match[0]].pt, solved_kp[match[1]].pt)
        dist = getDistForHP(p, H)
        if dist < 1:
            inlier_matches.append(cv2.DMatch(match[0], match[1], 0))
            num_inliers += 1

    img3 = cv2.drawMatches(piece,
                           kp,
                           finished_gray,
                           solved_kp,
                           inlier_matches,
                           None,
                           flags=2)

    # Find coordinates on finished puzzle image
    input_cord = np.array([piece.shape[0] / 2, piece.shape[1] / 2, 1])
    output_cord = np.matmul(H, input_cord.transpose())

    output_xy = (int(output_cord[0] / output_cord[2]),
                 int(output_cord[1] / output_cord[2]))
    return output_xy
コード例 #7
0
def thin_plate_spline(img, init_points):
    tps = cv2.createThinPlateSplineShapeTransformer()

    sshape = init_points.astype(np.float32)
    l = sshape.shape[0]
    print(l)

    delta = np.empty((0, 2))
    for n in range(l):
        randx = random.randint(-50, 50) * 1
        randy = random.randint(-100, 100) * 1
        delta = np.concatenate([delta, np.array([[randx, randy]])])

    # delta = np.array([[-10,100],[10,100],[-10,-100],[10,-100]],np.float32)
    tshape = sshape + delta
    sshape = sshape.reshape(1, -1, 2)
    tshape = tshape.reshape(1, -1, 2)

    matches = list()
    for n in range(l):
        matches.append(cv2.DMatch(n, n, 0))

    tps.estimateTransformation(tshape, sshape, matches)

    out_img = tps.warpImage(img)

    return out_img
コード例 #8
0
def filter_ratio_matches(matches, kp1, kp2, ratio=0.7):
    """ Returns Filtered Image Matches based on ratio matches

        :param matches: List of Correspondence
        :param kp1: List points in Image 1
        :param kp2: List points in Image 2
        :param ratio: ratio between best/2nd best [0,1]. Default = 0.7 - Based on LOWA papers

        :type matches: List [cv2.DMatch]
        :type kp1: List cv2.KeyPoint
        :type kp2: List cv2.KeyPoint
        :type ratio: float

        :returns filtered matches, filtered keypoints in image 1, filtered keypoints in image 2
        :rtype List [cv2.DMatch],  List cv2.KeyPoint,  List cv2.KeyPoint
    """
    new_kp1, new_kp2, new_matches = [], [], []
    ctr = 0
    for i, (m, n) in enumerate(matches):  #
        if m.distance < ratio * n.distance:
            new_kp1.append(kp1[m.queryIdx])
            new_kp2.append(kp2[m.trainIdx])
            new_matches.append([cv.DMatch(ctr, ctr, m.distance)])
            ctr += 1
    return new_matches, new_kp1, new_kp2
コード例 #9
0
    def GeometricFilterFromMatcher(self, im1, im2, Filer='ORSA_H', verb=False):
        filercode = 0
        if Filer == 'ORSA_F':
            filercode = 1
        T = np.zeros(9, dtype=ctypes.c_float)
        floatp = ctypes.POINTER(ctypes.c_float)
        intp = ctypes.POINTER(ctypes.c_int)
        # boolp = ctypes.POINTER(ctypes.c_bool)
        h1, w1 = im1.shape[:2]
        h2, w2 = im2.shape[:2]
        self.libDA.GeometricFilterFromNodes(self.MatcherPtr,
                                            T.ctypes.data_as(floatp), w1, h1,
                                            w2, h2, filercode, verb)

        NFM = self.libDA.NumberOfFilteredMatches(self.MatcherPtr)
        FM = np.zeros(3 * NFM, dtype=ctypes.c_int)
        self.libDA.ArrayOfFilteredMatches(self.MatcherPtr,
                                          FM.ctypes.data_as(intp))
        # print(NFM,FM)
        Matches = [
            cv2.DMatch(FM[3 * i], FM[3 * i + 1], FM[3 * i + 2])
            for i in range(0, NFM)
        ]

        return Matches, T.astype(np.float).reshape(3, 3)
def featureMatching(interestPoints1, interestPoints2):
    a = len(interestPoints1)
    b = len(interestPoints2)
    totalMatches = []
    ssdRatio = []
    for first in range(0, a):
        value1 = interestPoints1[first][2]
        secondBestIndex = 0
        bestDistance = 10
        secondBestDistance = 10

        for second in range(0, b):
            value2 = interestPoints2[second][2]
            d = value1 - value2
            d = d ** 2
            tempSum = d.sum()

            if tempSum < bestDistance:
                secondBestDistance = bestDistance
                bestDistance = tempSum
                secondBestIndex = second

        if bestDistance < thresholdMatching:
            ssdRatio.append((bestDistance, secondBestDistance, bestDistance / secondBestDistance))
            match = cv.DMatch(first, secondBestIndex, bestDistance)
            totalMatches.append(match)

    return totalMatches, ssdRatio
コード例 #11
0
def featureMatcher(descriptor1, descriptor2):
    matchvector = []
    bestdistance = 100000
    secondbestmatch = 100000
    bestindexes = (0,0)
    secondbestindexes = (0,0)

    for x in range(descriptor1.shape[0] - 1):       
        for y in range(descriptor2.shape[0] - 1):
            distance= SSD(descriptor1[x], descriptor2[y])
           
            #if the distance is greater than the current best, ignore it and continue
            if distance > bestdistance: 
                continue
            
            ratio = bestdistance / secondbestmatch
           
            if distance < bestdistance and ratio <= 1:
                secondbestmatch = bestdistance
                secondbestindexes = bestindexes
                bestdistance = distance
                bestindexes = (x,y)
        #resetting the best values for the next iteration
        bestx, besty = bestindexes
        newmatch = cv2.DMatch(bestx, besty, bestdistance)
        matchvector.append(newmatch)     
        bestdistance = 100000
        secondbestmatch = 100000
        bestindexes = (0,0)
        secondbestindexes = (0,0)
        
    matchvector = sorted(matchvector, key = lambda x:x.distance)       
    return matchvector[:200]
コード例 #12
0
def test_symmetric_matches_knn():
    matches1 = [
        [cv2.DMatch(_queryIdx=i, _trainIdx=j, _distance=10) for i, j in product(range(k, k + 3), range(k, k + 3))]
        for k in range(10)
    ]
    matches2 = [
        [cv2.DMatch(_queryIdx=i, _trainIdx=j, _distance=15) for i, j in product(range(k, k + 3), range(k, k + 3))]
        for k in range(5, 15)
    ]
    assert len(utils.symmetric_matches_k_matches_per_point(matches1, matches2, 1)) == 0
    symmetric = utils.symmetric_matches_k_matches_per_point(matches1, matches2, 10)
    assert len(symmetric) == 7
    assert len(symmetric[0]) == 1
    assert len(symmetric[1]) == 4
    assert len(symmetric[2]) == 9
    assert len(symmetric[-1]) == 9
コード例 #13
0
    def matchFeatures(self, desc1, desc2):
        '''
        Input:
            desc1 -- the feature descriptors of image 1 stored in a numpy array,
                dimensions: rows (number of key points) x
                columns (dimension of the feature descriptor)
            desc2 -- the feature descriptors of image 2 stored in a numpy array,
                dimensions: rows (number of key points) x
                columns (dimension of the feature descriptor)
        Output:
            features matches: a list of cv2.DMatch objects
                How to set attributes:
                    queryIdx: The index of the feature in the first image
                    trainIdx: The index of the feature in the second image
                    distance: The ratio test score
        '''
        matches = []
        # feature count = n
        assert desc1.ndim == 2
        # feature count = m
        assert desc2.ndim == 2
        # the two features should have the type
        assert desc1.shape[1] == desc2.shape[1]

        if desc1.shape[0] == 0 or desc2.shape[0] == 0:
            return []

        dists = scipy.spatial.distance.cdist(desc1, desc2)
        for i in range(dists.shape[0]):
            dist = np.partition(dists[i], 1)[:2]
            match = cv2.DMatch(i, np.argmin(dists[i]), dist[0] / dist[1])
            matches.append(match)

        return matches
コード例 #14
0
ファイル: match.py プロジェクト: zihsh/CV
def matchFeatures(desc1, desc2):
    '''
    params:
        desc1:img1的descriptor
        desc2:img1的descriptor
    return:
        features matches: v2.DMatch的list
    '''
    matches = []
    # feature count = n
    assert desc1.ndim == 2
    # feature count = m
    assert desc2.ndim == 2
    # the two features should have the type
    assert desc1.shape[1] == desc2.shape[1]

    if desc1.shape[0] == 0 or desc2.shape[0] == 0:
        return []

    for i in range(desc1.shape[0]):
        u = desc1[i]
        diff = desc2 - u
        diff = diff ** 2
        sum_diff = diff.sum(axis = 1)
        dis = sum_diff ** 0.5
        j = np.argmin(dis)
        match = cv2.DMatch()
        match.queryIdx = i
        match.trainIdx = int(j)
        match.distance = dis[j]
        matches.append(match)

    return matches
コード例 #15
0
 def getTrueMatch(self, thre=1):
     Truelistindex = self.TrueMatches >= thre
     # 获取img1的特征点列表
     leftkeypointarr = np.where(self.TrueMatches >= thre)
     # 因为x,y对应的是[c,r],所以需要反过来
     self.leftkeypoint = [
         cv2.KeyPoint(y, x, 1)
         for (x, y) in zip(leftkeypointarr[0], leftkeypointarr[1])
     ]
     # 获取对应的img2的坐标
     rightkeypointarr = [
         self.leftmatchc[Truelistindex], self.leftmatchr[Truelistindex]
     ]
     self.rightkeypoint = [
         cv2.KeyPoint(x, y, 1)
         for (x, y) in zip(rightkeypointarr[0], rightkeypointarr[1])
     ]
     # 生成match
     lens = len(self.leftkeypoint)
     self.truematch = [
         cv2.DMatch(x, y, 1)
         for (x, y) in zip(np.arange(0, lens), np.arange(0, lens))
     ]
     # 获取
     return self.leftkeypoint, self.rightkeypoint, self.truematch
コード例 #16
0
def return_Dmatch(cover_des, desk_des):
    c_len = len(cover_des)
    d_len = len(desk_des)
    dist = np.zeros((c_len, d_len))
    result = []
    for j in range(d_len):
        min = 100000
        mini = -1
        for i in range(c_len):
            if min > cv2.norm(cover_des[i], desk_des[j], cv2.NORM_HAMMING):
                min = cv2.norm(cover_des[i], desk_des[j], cv2.NORM_HAMMING)
                mini = i
        temp = cv2.DMatch()
        temp.imgIdx = 0
        temp.queryIdx = j
        temp.trainIdx = mini
        temp.distance = min
        result.append(temp)
    result = sorted(result, key=lambda x: x.distance)
    '''th = 1
    print(len(result))
    for i in range(len(result)-1):
        if abs(result[i].distance - result[i+1].distance) < th:
           result.remove(result[i])'''
    return result
コード例 #17
0
def SSDFeatureMatcher(desc1, desc2):
    matches = []
    # feature count = n
    assert desc1.ndim == 2
    # feature count = m
    assert desc2.ndim == 2
    # the two features should have the type
    assert desc1.shape[1] == desc2.shape[1]

    if desc1.shape[0] == 0 or desc2.shape[0] == 0:
        return []

    # TODO 7: Perform simple feature matching.  This uses the SSD
    # distance between two feature vectors, and matches a feature in
    # the first image with the closest feature in the second image.
    # Note: multiple features from the first image may match the same
    # feature in the second image.
    # TODO-BLOCK-BEGIN
    num_features, num_feature_dims = desc1.shape
    dists = spatial.distance.cdist(desc1, desc2)
    mins = np.argmin(dists, axis=1)
    for i in range(len(mins)):
        m = cv2.DMatch(_queryIdx=i,
                       _trainIdx=mins[i],
                       _distance=np.amin(dists[i]))
        matches.append(m)
    # TODO-BLOCK-END

    return matches
コード例 #18
0
def match(binarray, binarray2):
    templis = [0.0, 0, 0]
    matchess = []
    matche = []
    for i in range(len(binarray)):
        temp = 100

        for j in range(len(binarray2)):
            k = binarray[i] - binarray2[j]
            k = (k * k).sum()
            if k < temp:
                temp = k
                templis = [k, i, j]
        t = templis.copy()
        matche.append(t)

    sort = sorted(matche, key=lambda tup: tup[2])
    last_used = sort[0][2]
    xyz = []
    mainmatche = []

    #Custom function to remove bad matches
    for num in sort:
        if num[2] == last_used:
            xyz.append(num)
        elif num[2] > last_used:
            ans = sorted(xyz, key=lambda tup: tup[0])
            mainmatche.append(ans[0])
            xyz = []
            last_used = num[2]
            xyz.append(num)

    for t in mainmatche:
        matchess.append(cv2.DMatch(t[1], t[2], t[0]))
    return matchess
コード例 #19
0
ファイル: image.py プロジェクト: zhuyoucai168/figocr
def draw_match_2_side(img1, kp1, img2, kp2, N):
    """Draw matches on 2 sides
    Args:
        img1 (HxW(xC) array): image 1
        kp1 (Nx2 array): keypoint for image 1
        img2 (HxW(xC) array): image 2
        kp2 (Nx2 array): keypoint for image 2
        N (int): number of matches to draw
    Returns:
        out_img (Hx2W(xC) array): output image with drawn matches
    """
    kp_list = np.linspace(0,
                          min(kp1.shape[0], kp2.shape[0]) - 1,
                          N,
                          dtype=np.int)

    # Convert keypoints to cv2.Keypoint object
    cv_kp1 = [cv2.KeyPoint(x=pt[0], y=pt[1], _size=1) for pt in kp1[kp_list]]
    cv_kp2 = [cv2.KeyPoint(x=pt[0], y=pt[1], _size=1) for pt in kp2[kp_list]]

    out_img = np.array([])
    good_matches = [
        cv2.DMatch(_imgIdx=0, _queryIdx=idx, _trainIdx=idx, _distance=0)
        for idx in range(N)
    ]
    out_img = cv2.drawMatches(img1,
                              cv_kp1,
                              img2,
                              cv_kp2,
                              matches1to2=good_matches,
                              outImg=out_img)

    return out_img
コード例 #20
0
ファイル: demo.py プロジェクト: xing-x2/OANet
def draw_match(img1_path, img2_path, corr1, corr2):
    img1 = cv2.imread(img1_path)
    img2 = cv2.imread(img2_path)

    corr1 = [
        cv2.KeyPoint(corr1[i, 0], corr1[i, 1], 1)
        for i in range(corr1.shape[0])
    ]
    corr2 = [
        cv2.KeyPoint(corr2[i, 0], corr2[i, 1], 1)
        for i in range(corr2.shape[0])
    ]

    assert len(corr1) == len(corr2)

    draw_matches = [cv2.DMatch(i, i, 0) for i in range(len(corr1))]

    display = cv2.drawMatches(img1,
                              corr1,
                              img2,
                              corr2,
                              draw_matches,
                              None,
                              matchColor=(0, 255, 0),
                              singlePointColor=(0, 0, 255),
                              flags=4)
    return display
コード例 #21
0
def kNN_matcher(des1, des2, k=2):
    """
    Input - 
            des1 & des2 - Descriptor matrices for 2 images
            k - Number of nearest neighbors to consider
    Returns - A vector of nearest neighbors of des1 & their indices for keypoints
    
    Mnemonic - des1 is like xtest, des2 is like xtrain
    """

    # Compute the L2 equations
    distances = np.sum(des1**2, axis=1, keepdims=True) + np.sum(
        des2**2, axis=1) - 2 * des1.dot(des2.T)
    distances = np.sqrt(distances)

    # Get smallest indices
    min_indices = np.argsort(distances, axis=1)

    # Init ndarray
    nearest_neighbors = []

    # Iter for nearest neighbors
    for i in range(min_indices.shape[0]):
        neighbors = min_indices[i][:k]
        curr_matches = []
        for j in range(len(neighbors)):
            match = cv2.DMatch(i, neighbors[j], 0,
                               distances[i][neighbors[j]] * 1.)
            curr_matches.append(match)
        nearest_neighbors.append(curr_matches)

    return nearest_neighbors
コード例 #22
0
ファイル: getMatch.py プロジェクト: singnet/semantic-vision
def matchesFromResponse(response):
    matches = []
    for oneMatch in response.all_matches:
        matches.append(
            cv2.DMatch(oneMatch.queryIdx, oneMatch.trainIdx, oneMatch.imgIdx,
                       oneMatch.distance))
    return matches
コード例 #23
0
def keypoint_guided_tps():

    num_sample = 64
    pair_list = io.load_json(
        'datasets/DF_Pose/Label/pair_split.json')['test'][0:num_sample]
    pose_label = io.load_data('datasets/DF_Pose/Label/pose_label.pkl')
    image_dir = 'datasets/DF_Pose/Img/img_df/'
    seg_dir = 'datasets/DF_Pose/Img/seg-lip_df_revised/'
    output_dir = 'temp/patch_matching/output/tps_keypoint/'
    io.mkdir_if_missing(output_dir)
    tps = cv2.createThinPlateSplineShapeTransformer()

    for i, (id_1, id_2) in enumerate(tqdm.tqdm(pair_list)):
        kp_1 = np.array(pose_label[id_1][1:14],
                        dtype=np.float64).reshape(1, -1, 2)
        kp_2 = np.array(pose_label[id_2][1:14],
                        dtype=np.float64).reshape(1, -1, 2)
        kp_matches = []
        for j in range(kp_1.shape[1]):
            if (kp_1[0, j] >= 0).all() and (kp_2[0, j] >= 0).all():
                kp_matches.append(cv2.DMatch(j, j, 0))
        if len(kp_matches) == 0:
            continue

        tps.estimateTransformation(kp_2, kp_1, kp_matches)
        img_1 = cv2.imread(image_dir + id_1 + '.jpg')
        img_2 = cv2.imread(image_dir + id_2 + '.jpg')

        img_w = tps.warpImage(img_1)
        seg = cv2.imread(seg_dir + id_2 + '.bmp', cv2.IMREAD_GRAYSCALE)
        mask = ((seg == 3) | (seg == 7)).astype(img_w.dtype)[:, :, np.newaxis]
        img_out = img_w * mask + img_2 * (1 - mask)

        cv2.imwrite(output_dir + '%d_%s_%s.jpg' % (i, id_1, id_2), img_out)
        cv2.imwrite(output_dir + 'w%d_%s_%s.jpg' % (i, id_1, id_2), img_w)
コード例 #24
0
ファイル: evaluator.py プロジェクト: jsBrique/pyslam-1
 def feature_matcher(self, sess, ref_feat, test_feat):
     matches = self.bf_matcher(sess, ref_feat, test_feat)
     matches = [
         cv2.DMatch(matches[i][0], matches[i][1], 0)
         for i in range(matches.shape[0])
     ]
     return matches
コード例 #25
0
    def match(self, des1, des2, thresh=150.0):
        # (inefficient & slow right now)
        # hungarian algorithm based matching
        nax = np.newaxis
        c = np.linalg.norm(des1[:,nax] - des2[nax,:], axis=-1)

        # cost mask with outliers removed
        i_good = np.where(np.min(c, axis=1) < 150.0)[0]
        j_good = np.where(np.min(c, axis=0) < 150.0)[0]

        c = c[i_good[:,nax], j_good]

        mi, mj = linear_sum_assignment(c)

        # matching cost
        c      = c[mi,mj]

        # real match index
        mi, mj = i_good[mi], j_good[mj]

        matches = [cv2.DMatch(
            _queryIdx = e1,
            _trainIdx = e2,
            _distance = e3)
            for (e1,e2,e3) in zip(mi, mj, c)]
        return matches
コード例 #26
0
    def match(self, desc1, desc2):
        desc1 = desc1.transpose()
        desc2 = desc2.transpose()

        assert desc1.shape[0] == desc2.shape[0]
        if desc1.shape[1] == 0 or desc2.shape[1] == 0:
            return []

        # Compute L2 distance. Easy since vectors are unit normalized.
        dmat = np.dot(desc1.T, desc2)
        dmat = np.sqrt(2 - 2 * np.clip(dmat, -1, 1))
        # Get NN indices and scores.
        idx = np.argmin(dmat, axis=1)
        scores = dmat[np.arange(dmat.shape[0]), idx]
        # Threshold the NN matches.
        keep = scores < self.nn_thresh
        # Check if nearest neighbor goes both directions and keep those.
        idx2 = np.argmin(dmat, axis=0)
        keep_bi = np.arange(len(idx)) == idx2[idx]
        keep = np.logical_and(keep, keep_bi)
        idx = idx[keep]
        scores = scores[keep]
        # Get the surviving point indices.
        m_idx1 = np.arange(desc1.shape[1])[keep]
        m_idx2 = idx

        # Populate the final 3xN match data structure.
        matches = []
        for i1, i2, d in zip(m_idx1, m_idx2, scores):
            matches.append(cv2.DMatch(i1, i2, d))

        return matches
コード例 #27
0
def dictToMatch(match):
    return cv.DMatch(
        match['queryIdx'],
        match['trainIdx'],
        match['imgIdx'],
        match['distance'],
    )
コード例 #28
0
def findMatches(descriptor1, descriptor2):

    index1 = 0
    matches = []
    ssd = []
    for point1 in descriptor1:

        sum_of_square = (descriptor2 - point1)**2
        sum_of_square = np.sum(sum_of_square, axis=1)
        min_index = np.argmin(sum_of_square)

        temp_sum = sum_of_square.copy()
        temp_sum = np.delete(temp_sum, min_index)

        min_index_second = np.argmin(temp_sum)
        min_value_second = temp_sum[min_index_second]

        min_value = sum_of_square[min_index]

        ratio = float(min_value / min_value_second)
        #        vector.append(min_index_second)

        index2 = min_index

        if (ratio < 0.8):
            #            print(min_value_second, min_value_second)
            #            print(ratio)
            match = cv2.DMatch(index1, index2, ratio)
            matches.append(match)
            ssd.append(ratio)

        index1 = index1 + 1
        matches = sorted(matches, key=lambda x: x.distance)
    return matches, ssd
コード例 #29
0
def customLoader(d):
    '''This function supports the deserialization of the custom types defined
       above.'''
    if '__type__' in d:
        if d['__type__'] == 'cv2.KeyPoint':
            k = cv2.KeyPoint()
            k.pt = (float(d['point'][0]), float(d['point'][1]))
            k.size = float(d['size'])
            k.angle = float(d['angle'])
            k.response = float(d['response'])
            k.octave = int(d['octave'])
            k.class_id = int(d['class_id'])
            return k
        elif d['__type__'] == 'cv2.DMatch':
            dm = cv2.DMatch()
            dm.distance = float(d['distance'])
            dm.trainIdx = int(d['trainIdx'])
            dm.queryIdx = int(d['queryIdx'])
            dm.imgIdx = int(d['imgIdx'])
            return dm
        elif d['__type__'] == 'numpy.ndarray':
            arr = np.array([float(x) for x in d['__array__']])
            arr.reshape(tuple([int(x) for x in d['__shape__']]))
            return arr
        else:
            return d
    else:
        return d
コード例 #30
0
ファイル: main.py プロジェクト: tatyana2501/Recognition
def AKAZE(im1,im2):
    start = time.time()
    detector = cv.AKAZE_create()
    (kps1,descs1)=detector.detectAndCompute(im1,None)
    (kps2,descs2)=detector.detectAndCompute(im2,None)
    finish = time.time()
    bf=cv.BFMatcher(cv.NORM_HAMMING)
    matches = bf.knnMatch(descs1,descs2,k=2)
    matched1 = []
    matched2 = []
    match,mm=[],[]
    t.append(finish-start)
    nn_match_ratio = 0.8 # Nearest neighbor matching ratio
    for m, n in matches:
        mm.append((m.distance + n.distance) / 2.0)
        if m.distance < nn_match_ratio * n.distance:
            match.append(cv.DMatch(len(matched1), len(matched2), 0))
            matched1.append(kps1[m.queryIdx])
            matched2.append(kps2[m.trainIdx])
    m2=np.mean(mm)
    res = np.empty((max(im1.shape[0], im2.shape[0]), im1.shape[1]+im2.shape[1], 3), dtype=np.uint8)
    im3 = cv.drawMatches(im1,matched1,im2,matched2,match,res)
    good.append(len(match)/len(kps1))
    loc.append(m2)
    f.write("%9f%12f%10f\n"%(len(match)/len(kps1),m2,finish-start))
    print("keypoints: {}, descriptors: {}".format(len(kps1), descs1.shape))
    print("keypoints: {}, descriptors: {}".format(len(kps2), descs2.shape))
    print("Matches: {}".format(len(matched1)))
    print("Matches: {}".format(len(matched2)))
    print("{}".format(matches))
    print("{}".format(matched1))
    print("{}".format(matched2))