Ejemplo n.º 1
0
    def initialize(self, image_data):
        """
        Initializes the bag of words descriptor and returns the mapped results of image_data

        :param image_data: ndarray [n, 3D image]
        :return: list [label, [1D image descriptor]]
        """
        termination_criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.1)
        bow_model = cv2.BOWKMeansTrainer(self._num_clusters, termination_criteria)

        key_point_tensor = {}
        for i in range(image_data.shape[0]):
            cv_image = image_data[i]
            descriptors, key_points = BagOfFeaturesTransform.extract_features_descriptors(cv_image, self._patch_size)

            key_point_tensor[i] = key_points
            bow_model.add(descriptors[1])

        self._clusters = bow_model.cluster()

        self._img_descriptor_mapper = cv2.BOWImgDescriptorExtractor(non_free.SURF_create(extended=True),
                                                                    cv2.FlannBasedMatcher_create())
        self._img_descriptor_mapper.setVocabulary(self._clusters)

        training_x = []
        for img_idx, img_descriptors in key_point_tensor.items():
            image_quantized_descriptor = self._img_descriptor_mapper.compute(image_data[img_idx], img_descriptors)
            training_x.append(image_quantized_descriptor)

        return np.vstack(training_x)
Ejemplo n.º 2
0
    def _match_features(self, img1_kp_des, img2_kp_des):
        kp1, des1 = img1_kp_des
        kp2, des2 = img2_kp_des

        matcher = cv.FlannBasedMatcher_create()
        matches = matcher.knnMatch(des2, des1, k=2)

        # Filter out unreliable points
        good = []
        for m, n in matches:
            if m.distance < 0.5 * n.distance:
                good.append(m)

        print('good matches', len(good), '/', len(matches))
        if len(good) < 3:
            return None
        # convert keypoints to format acceptable for estimator
        src_pts = np.float32([kp1[m.trainIdx][0]
                              for m in good]).reshape(-1, 1, 2)
        dst_pts = np.float32([kp2[m.queryIdx][0]
                              for m in good]).reshape(-1, 1, 2)

        # find out how images shifted (compute affine transformation)
        affine_transform_matrix, mask = cv.estimateAffinePartial2D(
            dst_pts, src_pts, method=cv.RANSAC, confidence=0.99)
        return affine_transform_matrix
Ejemplo n.º 3
0
def main():
    left = cv2.imread('img/i.jpg')
    #left=cv2.resize(left,dsize=(512,512))
    left_gray = cv2.cvtColor(left, cv2.COLOR_BGR2GRAY)
    right = cv2.imread('img/j.jpg')
    #right=cv2.resize(right,dsize=(512,512))
    right_gray = cv2.cvtColor(right, cv2.COLOR_BGR2GRAY)

    #提取左右图像的surf特征点
    detector = cv2.xfeatures2d_SURF.create(hessianThreshold=400)
    left_kps, left_dess = detector.detectAndCompute(left_gray, None)
    right_kps, right_dess = detector.detectAndCompute(right_gray, None)

    #利用knn对左右图像的特征点进行匹配
    matcher = cv2.FlannBasedMatcher_create()
    knn_matchers = matcher.knnMatch(left_dess, right_dess, 2)
    good_keypoints = []

    #挑出好的匹配点
    for m, n in knn_matchers:
        if m.distance < 0.5 * n.distance:
            good_keypoints.append(m)
    left_points = np.zeros(shape=(len(good_keypoints), 2), dtype=np.float32)
    right_points = np.zeros(shape=(len(good_keypoints), 2), dtype=np.float32)
    outimg = np.zeros(shape=(right.shape[0], right.shape[0] + left.shape[0],
                             3),
                      dtype=np.uint8)
    cv2.drawMatches(left, left_kps, right, right_kps, good_keypoints, outimg)
    # cv2.imshow('hks',outimg)
    # cv2.waitKey(0)
    for i in range(len(good_keypoints)):
        left_points[i][0] = left_kps[good_keypoints[i].queryIdx].pt[0]
        left_points[i][1] = left_kps[good_keypoints[i].queryIdx].pt[1]
        right_points[i][0] = right_kps[good_keypoints[i].trainIdx].pt[0]
        right_points[i][1] = right_kps[good_keypoints[i].trainIdx].pt[1]

    #求取单应矩阵
    H, _ = cv2.findHomography(right_points, left_points)

    #求出右图像的透视变化顶点
    warp_point = warp_corner(H, right)
    #求出右图像的透视变化图像
    imagewarp = cv2.warpPerspective(
        right, H, (left.shape[1] + right.shape[1], left.shape[0]))

    #对左右图像进行拼接,返回最后的拼接图像
    image_seam_optim = Seam_Left_Right(left,
                                       imagewarp,
                                       H,
                                       warp_point,
                                       with_optim_mask=True)
    cv2.namedWindow('image_seam_optim', cv2.WINDOW_NORMAL)
    cv2.imshow('image_seam_optim', image_seam_optim)
    cv2.waitKey(0)
Ejemplo n.º 4
0
def matchFeatures(masterDescriptors,slaveDescriptors,matcherType=FLANN,matchPercent=0.15):
    if matcherType == BF:
        match_method = cv.DescriptorMatcher_create(
            cv.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING)
    elif matcherType == FLANN:
        match_method = cv.FlannBasedMatcher_create()

    matches = match_method.match(masterDescriptors, slaveDescriptors)
    matches = sorted(matches, key=lambda x: x.distance)
    numGoodMatches = int(len(matches) * matchPercent)
    matches = matches[:numGoodMatches]
    return matches
    def __setstate__(self, state):
        """
        Restores the state of the object; sets the image descriptor mapper. Note that the training set is not restored

        :param state: saved state dictionary
        """
        self.__dict__.update(state)
        self._img_descriptor_mapper = cv2.BOWImgDescriptorExtractor(
            non_free.SURF_create(extended=True),
            cv2.FlannBasedMatcher_create())
        self._img_descriptor_mapper.setVocabulary(self._clusters)
        self._X = None

        return
Ejemplo n.º 6
0
def nn_match(descs1, descs2):
    """
    Perform nearest neighbor match, using descriptors.

    This function uses OpenCV FlannBasedMatcher

    :param descs1: descriptors from image 1, (N1, D)
    :param descs2: descriptors from image 2, (N2, D)
    :return indices: a list of tuples, each is (queryIdx, trainIdx)
    """

    flann = cv2.FlannBasedMatcher_create()
    matches = flann.match(descs1.astype(np.float32),
                          descs2.astype(np.float32),
                          crossCheck=True)
    indices = [(x.queryIdx, x.trainIdx) for x in matches]

    return indices
Ejemplo n.º 7
0
def h**o(path, imgA, imgB, save_path):
    p_imA = os.path.join(path, imgA)
    s_imA = os.path.join(save_path, 'imgA.png')
    p_imB = os.path.join(path, imgB)
    s_imB = os.path.join(save_path, 'imgB.png')
    if not os.path.exists(save_path):
        os.makedirs(save_path)
        print(f'Create {save_path}')
    imA = cv2.imread(p_imA)
    imB = cv2.imread(p_imB)

    surf = cv2.xfeatures2d.SURF_create(200)
    keypointA, descriptorA = surf.detectAndCompute(imA, None)
    keypointB, descriptorB = surf.detectAndCompute(imB, None)
    if descriptorA is None or descriptorB is None:
        print('!!')
        return
    bastMacher = cv2.FlannBasedMatcher_create()
    matches = bastMacher.match(descriptorA, descriptorB)
    matches.sort(key=lambda m: m.distance)
    if len(matches) > 10:
        best = 10
    elif len(matches) >= 4:
        best = len(matches)
    else:
        print('!!!')
        return
    print(f'len(goodmatch):{best}')
    goodmatch = matches[0:best]
    imagePointsA = np.array([keypointA[m.queryIdx].pt for m in goodmatch])
    imagePointsB = np.array([keypointB[m.trainIdx].pt for m in goodmatch])
    h**o, mask = cv2.findHomography(imagePointsA, imagePointsB)
    matchesMask = mask.ravel().tolist()
    h, w = imA.shape[0:2]
    imAwB = cv2.warpPerspective(imA, h**o, (w, h))  # (512, 384))
    draw_params = dict(matchColor=(0, 255, 0),
                       singlePointColor=None,
                       matchesMask=matchesMask,
                       flags=2)
    img3 = cv2.drawMatches(imA, keypointA, imB, keypointB, goodmatch, None)
    cv2.imwrite(s_imA, imA)
    cv2.imwrite(s_imB, imB)
    cv2.imwrite(os.path.join(save_path, 'imAwB.png'), imAwB)
    cv2.imwrite(os.path.join(save_path, 'match.jpg'), img3)
Ejemplo n.º 8
0
def nn_match(descs1, descs2):
    """
    Perform nearest neighbor match, using descriptors.

    This function uses OpenCV FlannBasedMatcher

    :param descs1: descriptors from image 1, (N1, D)
    :param descs2: descriptors from image 2, (N2, D)
    :return indices: indices into keypoints from image 2, (N1, D)
    """
    # diff = descs1[:, None, :] - descs2[None, :, :]
    # diff = np.linalg.norm(diff, ord=2, axis=2)
    # indices = np.argmin(diff, axis=1)

    flann = cv2.FlannBasedMatcher_create()
    matches = flann.match(descs1.astype(np.float32), descs2.astype(np.float32))
    indices = [x.trainIdx for x in matches]

    return indices
Ejemplo n.º 9
0
def create_descriptor_features(image_files):
    """Create features for images with SIFT descriptor

    :param image_files: list of images to be processed
    :type image_files: list(str)
    :return: numpy array of the created features
    :rtype: np.array
    """
    trainer = cv2.BOWKMeansTrainer(clusterCount=100)
    sift = cv2.xfeatures2d.SIFT_create()
    matcher = cv2.FlannBasedMatcher_create()
    bow_extractor = cv2.BOWImgDescriptorExtractor(sift, matcher)

    print('Creating dictionary')
    if os.path.exists('data/dictionary.npy'):
        dictionary = np.load('data/dictionary.npy')
    else:
        for filename in image_files:
            file = f'data/images/{filename.lower()}'
            img = cv2.imread(file)
            img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
            key_points, desc_obj = sift.detectAndCompute(img, mask=None)
            trainer.add(desc_obj)

        dictionary = trainer.cluster()
        np.save('data/dictionary.npy', dictionary)

    bow_extractor.setVocabulary(dictionary)

    feature_data = np.zeros(shape=(len(image_files), dictionary.shape[0]),
                            dtype=np.float32)

    print('Extract features')
    for i, filename in zip(range(len(image_files)), image_files):
        file = f'data/images/{filename.lower()}'
        img = cv2.imread(file)
        img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
        points = sift.detect(img)
        feature_data[i] = bow_extractor.compute(img, points)

    return feature_data
Ejemplo n.º 10
0
def ap2A_LA2NN(d_im1, d_im2, ratio = 0.8):
  """Obtiene correspondencias entre descriptores.
  Usa el método 'Lowe-Average-2NN'.
  Argumentos posicionales:
  - d_im1, d_im2: Descriptores de cada imagen
  Argumentos opcionales:
  - ratio: El ratio usado para descartar correspondencias ambiguas. Por defecto 0.8
  Devuelve: Correspondencias"""

  # Declara el matcher
  matcher = cv.FlannBasedMatcher_create()

  # Obten las dos mejores correspondencias de cada punto
  matches = matcher.knnMatch(d_im1, d_im2, k = 2)

  # Toma las correspondencias que no sean ambiguas de acuerdo al test dado por Lowe
  clear_matches = []
  for best, second in matches:
    if best.distance/second.distance < ratio:
      clear_matches.append(best)

  return clear_matches
    def __init__(self, training_images_dir, **kwargs):
        """
        This data provider utilizes the visual bag of words algorithm to map image_file
        to feature vectors for the one-class SVM classifier.
        Process:
            - Partition each image into a grid and generate SURF descriptor from each patch
            - Compute K clusters from all of the features from all of the image_file (visual bag of words)
            - Construct normalized histogram for each image
            - Feature vector is then the values of the normalized histogram (vector quantization)

        :param training_images_dir: (string)
        :param kwargs:
            - num_clusters: (Integer) Size of the visual bag of words
            - resize_image: (tuple(x, y)) resize input image
            - patch_size: (Integer) size of patch to compute a descriptor
        """

        # note~ not much arg validation here...

        self._resize_image = kwargs.pop("resize_image", ())
        self._patch_size = kwargs.pop("patch_size", 16)

        termination_criteria = (cv2.TERM_CRITERIA_EPS +
                                cv2.TERM_CRITERIA_MAX_ITER, 30, 0.1)
        bow_model = cv2.BOWKMeansTrainer(kwargs.pop("num_clusters", 500),
                                         termination_criteria)

        key_point_tensor = {}
        training_counter = 0

        for root, sub_dirs, files in os.walk(training_images_dir):
            for image_file in files:
                if not image_file.endswith(".jpg"):
                    continue

                training_counter += 1
                if training_counter % 1000 == 0:
                    print(f"{training_counter} images completed")

                image_path = os.path.join(root, image_file)

                cv_image = DataProviderSURF.read_image(image_path,
                                                       self._resize_image)
                descriptors, key_points = DataProviderSURF.extract_features_descriptors(
                    cv_image, self._patch_size)

                key_point_tensor[image_file] = [cv_image, key_points]
                bow_model.add(descriptors[1])

        print(f"{training_counter} total number of images in training.")

        self._clusters = bow_model.cluster()

        self._img_descriptor_mapper = cv2.BOWImgDescriptorExtractor(
            non_free.SURF_create(extended=True),
            cv2.FlannBasedMatcher_create())
        self._img_descriptor_mapper.setVocabulary(self._clusters)

        training_x_list = []
        for img, img_data in key_point_tensor.items():
            image_descriptor = self._img_descriptor_mapper.compute(
                img_data[0], img_data[1])
            training_x_list.append(image_descriptor)

        self._X = np.vstack(training_x_list)

        return
s_imA = os.path.join(save_path, 'imgA.png')
p_imB = os.path.join(path, imgB)
s_imB = os.path.join(save_path, 'imgB.png')
p_gt = os.path.join(path, gt)
s_gt = os.path.join(save_path, 'gt.jpg')
if not os.path.exists(save_path):
    os.makedirs(save_path)
    print(f'Create {save_path}')
imA = cv2.imread(p_imA)
imB = cv2.imread(p_imB)
flow = read_gen(p_gt)

surf = cv2.xfeatures2d.SURF_create(800)
keypointA, descriptorA = surf.detectAndCompute(imA, None)
keypointB, descriptorB = surf.detectAndCompute(imB, None)
bastMacher = cv2.FlannBasedMatcher_create()
matches = bastMacher.match(descriptorA, descriptorB)
matches.sort(key=lambda m: m.distance)
goodmatch = matches[0:10]
imagePointsA = np.array([keypointA[m.queryIdx].pt for m in goodmatch])
imagePointsB = np.array([keypointB[m.trainIdx].pt for m in goodmatch])
h**o, mask = cv2.findHomography(imagePointsA, imagePointsB)
matchesMask = mask.ravel().tolist()
h, w = imA.shape[0:2]
imAwB = cv2.warpPerspective(imA, h**o, (w, h))  # (512, 384))
homoflow = GenFlowAB(h**o, (w, h))
draw_params = dict(matchColor=(0, 255, 0),
                   singlePointColor=None,
                   matchesMask=matchesMask,
                   flags=2)
img3 = cv2.drawMatches(imA, keypointA, imB, keypointB, goodmatch, None)
Ejemplo n.º 13
0
 def __init__(self):
     self.db = []
     self.flann = cv2.FlannBasedMatcher_create()
     self.sift = cv2.xfeatures2d.SIFT_create(600)
Ejemplo n.º 14
0
def flann_surf():
    # 读取图片
    trainImg = cv.imread(img_path + 'book1.jpg')
    # 下采样
    trainImg = cv.pyrDown(trainImg)
    # 转换为灰度图
    grayImg = cv.cvtColor(trainImg, cv.COLOR_BGR2GRAY)
    # surf 提取特征点
    surf_Detector = cv.xfeatures2d.SURF_create(80)  # SURF实例
    trainpt = surf_Detector.detect(grayImg)  # 关键点检测
    surf_desc = cv.xfeatures2d.SURF_create()  # SURF实例
    keypt, descpt = surf_desc.compute(grayImg, trainpt)  # 计算特征向量

    # orb 提取算法
    orb_fdet = cv.ORB_create()  # ORB实例
    orb_pt = orb_fdet.detect(grayImg)  # 关键点检测
    orb_ext = cv.ORB.create()  # ORB实例
    orb_pt, orb_desc = orb_ext.compute(grayImg, orb_pt)  # 计算特征向量
    # flidx
    distParam = {
        "algorithm": 6,
        "table_number": 12,
        "key_size": 20,
        "multi_probe_level": 2
    }
    flidx = cv.flann_Index(orb_desc, distParam)

    # flann 进行匹配
    fbMatcher = cv.FlannBasedMatcher_create()  # FLANN实例
    fbMatcher.add([descpt])  # 添加特征
    fbMatcher.train()  # 训练
    # 视频实例
    cap = cv.VideoCapture(0)
    cap.set(cv.CAP_PROP_FRAME_HEIGHT, trainImg.shape[0])

    def detectTmpimg(tmpImg):
        # 转换为灰度图
        tmpImg = cv.cvtColor(tmpImg, cv.COLOR_BGR2GRAY)
        # 特征点检测
        tmppt1 = surf_Detector.detect(tmpImg)
        # 计算特征向量
        tmppt1, tmpdesc = surf_desc.compute(tmpImg, tmppt1)
        # 匹配训练
        matches = fbMatcher.knnMatch(tmpdesc, 2)
        # 得到优秀的匹配点
        goodMatches = []
        for i in range(len(matches)):
            if (matches[i][0].distance < 0.6 * matches[i][1].distance):
                goodMatches.append(matches[i][0])

        dstImg = cv.drawMatches(tmpImg, tmppt1, trainImg, trainpt, goodMatches,
                                None)
        cv.imshow('dstImg', dstImg)

    def orb_detectImg(tmpImg):
        # 转换为灰度图
        tmpImg = cv.cvtColor(tmpImg, cv.COLOR_BGR2GRAY)
        # 特征点检测
        tmppt1 = orb_fdet.detect(tmpImg)
        # 计算特征向量
        tmppt1, tmpdesc = orb_ext.compute(tmpImg, tmppt1)
        # KNN匹配
        indMat, matches = flidx.knnSearch(tmpdesc, 2)

        goodMatches = []
        for i in range(len(matches)):
            if (matches[i][0] < 0.6 * matches[i][1]):
                goodMatches.append(cv.DMatch(i, matches[i][0], matches[i][1]))

        dstImg = cv.drawMatches(tmpImg, tmppt1, trainImg, trainpt, goodMatches,
                                None)
        cv.imshow('dstImg', dstImg)

    while True:
        c = cv.waitKey(1)
        if (c == ord('q')):
            break
        retval, testimg = cap.read()
        if (testimg is None):
            continue
        dtime = cv.getTickCount()
        if (c != ord('1')):
            orb_detectImg(testimg)
        else:
            pass  # detectTmpimg(testimg)

        dtime = cv.getTickCount() - dtime
        print('time : %d' % (cv.getTickFrequency() / dtime))

    cv.destroyAllWindows()
Ejemplo n.º 15
0
def findStuff():
    srcImg1 = cv.imread(img_path + 'test1-93.jpg')
    srcImg2 = cv.imread(img_path + 'test2-93.jpg')
    cv.imshow('srcImg1', srcImg1)
    cv.imshow('srcImg2', srcImg2)
    # 检测关键点
    detector = cv.xfeatures2d.SURF_create(400)
    kp_object = detector.detect(srcImg1, None)
    kp_scene = detector.detect(srcImg2, None)
    # 计算特征向量
    extractor = cv.xfeatures2d.SURF_create()
    des_object = extractor.compute(srcImg1, kp_object)
    des_scene = extractor.compute(srcImg2, kp_scene)
    # FLANN匹配
    matcher = cv.FlannBasedMatcher_create()
    matches = matcher.match(des_object[1], des_scene[1])
    matches = sorted(matches, key=lambda x: x.distance)
    # 找到最大距离和最小距离
    min_dst = 100
    max_dst = 0
    for i in range(len(matches)):
        dst = matches[i].distance
        min_dst = min(min_dst, dst)
        max_dst = max(max_dst, dst)
    print('最大距离为%.2f,最小距离为%.2f' % (max_dst, min_dst))
    good_matches = []
    for i in range(len(matches)):
        if (matches[i].distance < 3 * min_dst):
            good_matches.append(matches[i])
    # 绘制出匹配到的关键点
    matImg = cv.drawMatches(srcImg1, kp_object, srcImg2, kp_scene,
                            good_matches, None)
    obj = np.float32([kp_object[m.queryIdx].pt
                      for m in good_matches]).reshape(-1, 1, 2)
    scene = np.float32([kp_scene[m.trainIdx].pt
                        for m in good_matches]).reshape(-1, 1, 2)
    H, mask = cv.findHomography(obj, scene, cv.RANSAC)
    # 获取角点
    obj_corners = [[[0, 0], [srcImg1.shape[1], 0],
                    [srcImg1.shape[1], srcImg1.shape[0]],
                    [0, srcImg1.shape[0]]]]
    # 透视变换
    sce_corners = cv.perspectiveTransform(
        np.array(obj_corners, dtype=np.float32).reshape(-1, 1, 2), H)
    sce_corners.dtype = 'int'
    # 绘制直线
    cv.line(
        matImg,
        (sce_corners[0][0][0] + srcImg1.shape[1], sce_corners[0][0][1] + 0),
        (sce_corners[1][0][0] + srcImg1.shape[1], sce_corners[1][0][1] + 0),
        (255, 0, 123), 4)
    cv.line(
        matImg,
        (sce_corners[1][0][0] + srcImg1.shape[1], sce_corners[1][0][1] + 0),
        (sce_corners[2][0][0] + srcImg1.shape[1], sce_corners[2][0][1] + 0),
        (255, 0, 123), 4)
    cv.line(
        matImg,
        (sce_corners[2][0][0] + srcImg1.shape[1], sce_corners[2][0][1] + 0),
        (sce_corners[3][0][0] + srcImg1.shape[1], sce_corners[3][0][1] + 0),
        (255, 0, 123), 4)
    cv.line(
        matImg,
        (sce_corners[3][0][0] + srcImg1.shape[1], sce_corners[3][0][1] + 0),
        (sce_corners[0][0][0] + srcImg1.shape[1], sce_corners[0][0][1] + 0),
        (255, 0, 123), 4)
    cv.imshow('dstImg', matImg)
    cv.waitKey(0)
Ejemplo n.º 16
0
def main():
    print("Loading images...")
    queryImagePaths = getImagePaths("query")
    databaseImagePaths = getImagePaths("database")
    queryImages = getImages(queryImagePaths)
    databaseImages = getImages(databaseImagePaths, True)
    querySamples = []
    databaseSamples = []

    sift = cv2.xfeatures2d.SIFT_create()
    flann = cv2.FlannBasedMatcher_create()

    print("Generating keypoints...")
    for image in queryImages:
        keypoints, descriptors = sift.detectAndCompute(image[1], None)
        querySamples.append(Sample(image[0], image[1], keypoints, descriptors))

    for image in databaseImages:
        keypoints, descriptors = sift.detectAndCompute(image[1], None)
        databaseSamples.append(
            Sample(image[0], image[1], keypoints, descriptors))

    print("Detecting best match...")
    results = []
    for i, querySample in enumerate(querySamples):
        perSampleResults = []
        for j, databaseSample in enumerate(databaseSamples):
            matches = flann.knnMatch(querySample.descriptors,
                                     databaseSample.descriptors,
                                     k=2)

            good = []
            for m, n in matches:
                if m.distance < 0.7 * n.distance:
                    good.append(m)

            if len(good) > 0:
                src_pts = np.float32([
                    querySample.keypoints[m.queryIdx].pt for m in good
                ]).reshape(-1, 1, 2)
                dst_pts = np.float32([
                    databaseSample.keypoints[m.trainIdx].pt for m in good
                ]).reshape(-1, 1, 2)

                mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC)[1]
                perSampleResults.append((i, j, good, mask))

        perSampleResults.sort(key=lambda x: len(x[3]), reverse=True)
        results.append(perSampleResults)

    if not os.path.exists("results"):
        os.mkdir("results")

    for i, result in enumerate(results):
        figure = plt.figure(querySamples[result[0][0]].path)

        img1 = querySamples[result[0][0]].image
        img2 = databaseSamples[result[0][1]].image
        kp1 = querySamples[result[0][0]].keypoints
        kp2 = databaseSamples[result[0][1]].keypoints
        good = result[0][2]
        mask = result[0][3].ravel().tolist()

        result1 = cv2.drawMatches(img1, [], img2, [], [], None, (0, 255, 255),
                                  (0, 255, 255), None, 0)
        result2 = cv2.drawMatches(img1, kp1, img2, kp2, [], None,
                                  (0, 255, 255), (0, 255, 255), None, 0)
        result3 = cv2.drawMatches(img1, kp1, img2, kp2, good, None,
                                  (0, 255, 255), (0, 255, 255), None, 2)
        result4 = cv2.drawMatches(img1, kp1, img2, kp2, good, None,
                                  (0, 255, 255), (0, 255, 255), mask, 2)

        plt.subplot(2, 2, 1)
        plt.imshow(cv2.cvtColor(result1, cv2.COLOR_BGR2RGB))
        plt.xticks([]), plt.yticks([])
        plt.title("Query Image + Best Match")

        plt.subplot(2, 2, 2)
        plt.imshow(cv2.cvtColor(result2, cv2.COLOR_BGR2RGB))
        plt.xticks([]), plt.yticks([])
        plt.title("SIFT Keypoints")

        plt.subplot(2, 2, 3)
        plt.imshow(cv2.cvtColor(result3, cv2.COLOR_BGR2RGB))
        plt.xticks([]), plt.yticks([])
        plt.title("Matches")

        plt.subplot(2, 2, 4)
        plt.imshow(cv2.cvtColor(result4, cv2.COLOR_BGR2RGB))
        plt.xticks([]), plt.yticks([])
        plt.title("Matches + RANSAC")

        cv2.imwrite("results/img{}_1.jpg".format(i + 1), result1)
        cv2.imwrite("results/img{}_2.jpg".format(i + 1), result2)
        cv2.imwrite("results/img{}_3.jpg".format(i + 1), result3)
        cv2.imwrite("results/img{}_4.jpg".format(i + 1), result4)

        plt.show()
        plt.close(figure)
Ejemplo n.º 17
0
 def run(
     self,
     images: List[Url],
     nfeatures: int = 0,
     nOctaveLayers: int = 3,
     contrastThreshold: float = 0.04,
     edgeThreshold: float = 10,
     sigma: float = 1.6,
     ratio: float = 0.8,
     similarity_metric: SimilarityMetric = SimilarityMetric.
     INVERSE_DISTANCE,
     damping: float = 0.5,
     max_iter: int = 200,
     convergence_iter: int = 15,
     affinity: AffinityPropagationAffinity = AffinityPropagationAffinity.
     EUCLIDEAN,
     descriptor_matcher: DescriptorMatcher = DescriptorMatcher.FLANNBASED,
 ) -> ClusterResults:
     if not isinstance(similarity_metric, SimilarityMetric):
         similarity_metric = SimilarityMetric(similarity_metric)
     if not isinstance(affinity, AffinityPropagationAffinity):
         affinity = AffinityPropagationAffinity(affinity)
     if not isinstance(descriptor_matcher, DescriptorMatcher):
         descriptor_matcher = DescriptorMatcher(descriptor_matcher)
     list_of_images = list()
     matrix = SimilarityMatrix.empty_matrix(len(images))
     for url in images:
         print("SIFT DESCRIPTORS: %s" % url)
         keypoints, descriptors = cv2.xfeatures2d.SIFT_create(
             nfeatures,
             nOctaveLayers,
             contrastThreshold,
             edgeThreshold,
             sigma,
         ).detectAndCompute(image=read_image(url), mask=None)
         list_of_images.append(descriptors)
     combo = list(
         itertools.combinations_with_replacement(range(len(list_of_images)),
                                                 2))
     if descriptor_matcher == DescriptorMatcher.FLANNBASED:
         matcher = cv2.FlannBasedMatcher_create()
     else:
         matcher = cv2.BFMatcher_create()
     for idx, (i, j) in enumerate(combo):
         print("SIFT SIMILARITY: ( %i , %i ) %i / %i" %
               (i, j, idx, len(combo)))
         if i != j:
             matches = matcher.knnMatch(queryDescriptors=list_of_images[i],
                                        trainDescriptors=list_of_images[j],
                                        k=2)
             good = []
             for m, n in matches:
                 if m.distance < ratio * n.distance:
                     good.append(m)
             if similarity_metric == SimilarityMetric.INVERSE_DISTANCE:
                 inverse_distance = 0
                 for k in good:
                     inverse_distance += 1 - k.distance
                 if len(good) > 0:
                     matrix[i][j] = inverse_distance / len(good)
                     matrix[j][i] = inverse_distance / len(good)
             elif similarity_metric == SimilarityMetric.COUNT:
                 matrix[i][j] = len(good)
                 matrix[j][i] = len(good)
         else:
             if similarity_metric == SimilarityMetric.INVERSE_DISTANCE:
                 matrix[i][i] = 1
             elif similarity_metric == SimilarityMetric.COUNT:
                 matrix[i][i] = nfeatures
     print('CLUSTER: AffinityPropagation')
     cluster = AffinityPropagation(
         damping=damping,
         max_iter=max_iter,
         convergence_iter=convergence_iter,
         affinity=affinity.value,
         random_state=0,
     ).fit_predict(matrix).tolist()
     return ClusterResults(images, cluster)
Ejemplo n.º 18
0
train = cv.imread('box_in_scene.png', cv.IMREAD_GRAYSCALE)  # trainImage

# Initiate SIFT detector
sift = cv.xfeatures2d.SIFT_create()

# find the keypoints and descriptors with SIFT
query_kp, query_des = sift.detectAndCompute(query, None)
train_kp, train_des = sift.detectAndCompute(train, None)

# FLANN parameters
FLANN_INDEX_KDTREE = 1
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
search_params = dict(checks=50)  # or pass empty dictionary

flann = cv.FlannBasedMatcher(index_params, search_params)
cv.FlannBasedMatcher_create()

matches = flann.knnMatch(query_des, train_des, k=2)

# Need to draw only good matches, so create a mask
matchesMask = [[0, 0] for i in range(len(matches))]

# ratio test as per Lowe's paper
for i, (m, n) in enumerate(matches):
    if m.distance < 0.7 * n.distance:
        matchesMask[i] = [1, 0]

draw_params = dict(matchColor=(0, 255, 0),
                   singlePointColor=(255, 0, 0),
                   matchesMask=matchesMask,
                   flags=cv.DrawMatchesFlags_DEFAULT)
Ejemplo n.º 19
0
}

descriptors = {
    'brief': cv2.xfeatures2d.BriefDescriptorExtractor_create(),
    'orb': cv2.ORB_create(),
    'daisy': cv2.xfeatures2d.DAISY_create(),
    'boost': cv2.xfeatures2d.BoostDesc_create(),
    'freak': cv2.xfeatures2d.FREAK_create(),
    'latch': cv2.xfeatures2d.LATCH_create(),
    'lucid': cv2.xfeatures2d.LUCID_create(),
    'vgg': cv2.xfeatures2d.VGG_create()
}

matchers = {
    'bruteForce': cv2.BFMatcher(cv2.NORM_HAMMING),
    'flann': cv2.FlannBasedMatcher_create()
}

cameraMat = np.array([[317.73273, 0, 319.9013], [0, 317.73273, 177.84988],
                      [0, 0, 1]])


class Detector:
    def __init__(self,
                 im1,
                 im2,
                 depth1=None,
                 depth2=None,
                 cameraMatrix=cameraMat,
                 detector='fast',
                 descriptor='brief',
Ejemplo n.º 20
0
    kMatches[:10],
    imgCp2,
    flags=cv2.DRAW_MATCHES_FLAGS_NOT_DRAW_SINGLE_POINTS)
# 仅取出3个匹配中的第2个
# firstMatch = []
# for m in kMatches:
#     firstMatch.append(m[1])
# firstMatch = sorted(firstMatch, key=lambda x: x.distance)
# imgCp2 = cv2.drawMatches(img, kpts1, imgRot, kpts2, firstMatch[:10],
#                         imgCp2, flags=cv2.DRAW_MATCHES_FLAGS_NOT_DRAW_SINGLE_POINTS)

# 使用FLANN匹配方法匹配两张图片
indPars = dict(algorithm=0, trees=5)
# searchPars = dict(checks=50)
searchPars = {}
flannMat = cv2.FlannBasedMatcher_create()
# flannMat = cv2.FlannBasedMatcher(indPars, searchPars)
des1 = np.float32(des1)
des2 = np.float32(des2)
matches2 = flannMat.match(des1, des2)
matches2 = sorted(matches2, key=lambda x: x.distance)
imgCp3 = cv2.drawMatches(img,
                         kpts1,
                         imgRot,
                         kpts2,
                         matches2[:10],
                         imgCp3,
                         flags=cv2.DRAW_MATCHES_FLAGS_NOT_DRAW_SINGLE_POINTS)
# 每个特征取2个最佳匹配
kMatches = flannMat.knnMatch(des1, des2, k=2)
matMask = [[0, 0] for i in range(len(kMatches))]
Ejemplo n.º 21
0
    return good


if __name__ == "__main__":
    query_img = cv2.imread(
        "/Users/prituldave/Downloads/code/book_covers/queries/query06.png")
    pathlist = Path(
        "/Users/prituldave/Downloads/code/book_covers/covers").glob('*.png')
    query_img_gray = cvtGray(query_img)

    cv2.imshow("query image", query_img)
    cv2.waitKey(0)
    sift = cv2.xfeatures2d_SIFT.create()
    kp1, dst1 = findKeypoints(sift, query_img)

    feature = cv2.FlannBasedMatcher_create()
    flag = 0
    print("processing\n.....................")
    for path in pathlist:
        testing_img = cv2.imread(str(path))
        #testing_img = cv2.imread("/Users/prituldave/Downloads/code/book_covers/covers/cover016.png")
        testing_gray = cvtGray(testing_img)

        kp2, dst2 = findKeypoints(sift, testing_gray)

        matches = cv2.FlannBasedMatcher.knnMatch(feature, dst1, dst2, 2)

        good = findMatches(matches)

        if len(good) > 130:
            print("completed")