コード例 #1
0
def draw_matches(imgL, imgR, mode='SIFT'):
    MIN_MATCH_COUNT = 100  # минимальное число заматченных точек, чтоб отрисовало

    if mode == 'ORB':
        algorithm = cv2.ORB_create(nfeatures=10000,
                                   scoreType=cv2.ORB_FAST_SCORE)

    if mode == 'SIFT':
        algorithm = cv2.SIFT_create()

    if mode == 'KAZE':
        algorithm = cv2.KAZE_create()

    matcher = Matcher(algorithm, imgL, imgR)
    bf_matches = matcher.BF_matcher(mode)

    print(f'количество заматченный точек после фильтра: {mode}',
          len(bf_matches))

    if len(bf_matches) > MIN_MATCH_COUNT:
        src_pts = np.float32([matcher.kp2[m.queryIdx].pt
                              for m in bf_matches]).reshape(-1, 1, 2)
        dst_pts = np.float32([matcher.kp1[m.trainIdx].pt
                              for m in bf_matches]).reshape(-1, 1, 2)

        matrix, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
        matchesMask = mask.ravel().tolist()

        draw_params = dict(
            outImg=None,
            # matchColor=(0, 255, 0),
            # matchesMask=matchesMask,  # draw only inliers
            flags=2)

        result_mathing_bf = cv2.drawMatches(imgR, matcher.kp2, imgL,
                                            matcher.kp1, bf_matches,
                                            **draw_params)

        cv2.namedWindow('draw_matches', cv2.WINDOW_NORMAL)
        cv2.imshow('draw_matches', result_mathing_bf)
        cv2.waitKey(0)

        if mode == 'KAZE':
            cv2.imwrite('res_kaze.png', result_mathing_bf)
        if mode == 'SIFT':
            cv2.imwrite('res_sift.png', result_mathing_bf)
        if mode == 'ORB':
            cv2.imwrite('res_orb.png', result_mathing_bf)
コード例 #2
0
    def extract_diffusion_features(self, image, vector_size=10):
        alg = cv.KAZE_create()
        # image keypoints
        kps = alg.detect(image)

        # Number of keypoints is varies depend on image size and color pallet
        # Sorting them based on keypoint response value(bigger is better)
        kps = sorted(kps, key=lambda x: -x.response)[:vector_size]
        # computing descriptors vector
        kps, dsc = alg.compute(image, kps)
        dsc = dsc.flatten()
        needed_size = (vector_size * 64)
        if dsc.size < needed_size:
            dsc = np.concatenate([dsc, np.zeros(needed_size - dsc.size)])

        return dsc
コード例 #3
0
    def train(self, listOfImages):
        #detector = cv.xfeatures2d.SIFT_create()
        detector = cv.KAZE_create()
        allDescriptors = []
        for name in listOfImages:
            img = openImage(name)
            if img is None:
                continue
            keypoints, descriptors = detector.detectAndCompute(img, None)
            allDescriptors.extend(descriptors)

        criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 10, 1.0)
        compactness, labels, centers = cv.kmeans(np.float32(allDescriptors),
                                                 self.nWords, None, criteria,
                                                 100, cv.KMEANS_PP_CENTERS)
        self.vocabulary = centers
コード例 #4
0
def match_pair(a, b):
    """
    Find SIFT matching points in two images represented as numpy arrays.

    Args:
        a, b (arrays): two numpy arrays containing the input images to match

    Return:
        pts1, pts2: two lists of pairs of coordinates of matching points
    """
    a = utils.simple_equalization_8bit(a)
    b = utils.simple_equalization_8bit(b)

    # KP
    sift = kaze = cv2.KAZE_create() #cv2.xfeatures2d.SIFT_create()
    kp1, des1 = sift.detectAndCompute(a, None)
    kp2, des2 = sift.detectAndCompute(b, None)

    # https://docs.opencv.org/3.0-beta/doc/py_tutorials/py_feature2d/py_matcher/py_matcher.html
    bf = cv2.BFMatcher()
    matches = bf.knnMatch(des1, des2, k=2)

    # Apply ratio test
    good = []
    pts1 = []
    pts2 = []
    # ratio test as per Lowe's paper
    for m, n in matches:
        if m.distance < 0.8 * n.distance:
            good.append(m)
            pts2.append(kp2[m.trainIdx].pt)
            pts1.append(kp1[m.queryIdx].pt)

    # cv2.drawMatchesKnn expects list of lists as matches.
    #img3 = cv2.drawMatchesKnn(a,kp1,b,kp2,good,a,flags=2)
    #display_image(img3)

    pts1 = np.asarray(pts1)
    pts2 = np.asarray(pts2)
    F, mask = cv2.findFundamentalMat(pts1, pts2, cv2.FM_LMEDS)

    # select only inlier points
    pts1 = pts1[mask.ravel()==1]
    pts2 = pts2[mask.ravel()==1]

    return  pts1, pts2
コード例 #5
0
ファイル: dcm_feature.py プロジェクト: fukuit/dcm_features
def select_detector(method):
    ''' select CV2 detector '''
    if method == 'FAST':
        detector = cv2.FAST_create()
    elif method == 'MSER':
        detector = cv2.MSER_create()
    elif method == 'ORB':
        detector = cv2.ORB_create()
    elif method == 'BRISK':
        detector = cv2.BRISK_create()
    elif method == 'KAZE':
        detector = cv2.KAZE_create()
    elif method == 'AKAZE':
        detector = cv2.AKAZE_create(descriptor_type=5)
    elif method == 'SIFT':
        detector = cv2.xfeatures2d.SIFT_create()
    return detector
コード例 #6
0
def get_features(f):
    # Read image and extract label from filename
    img = cv2.imread(f)
    label = f.split('/')[2]

    # We use AKAZE since SIFT has moved to a external library,
    # that is a pain to install
    # We could also use something like HOG descriptor
    alg = cv2.KAZE_create()
    alg.setThreshold(1e-3)

    # Detech keypoints
    kps = alg.detect(img)
    if len(kps) > 0:
        # Extract descriptors at each keypoint
        _, dsc = alg.compute(img, kps)
        return label, dsc
コード例 #7
0
def extract_features(image, vector_size=32):
    try:
        alg = cv2.KAZE_create()
        kps = alg.detect(image)
        kps = sorted(kps, key=lambda x: -x.response)[:vector_size]
        kps, dsc, = alg.compute(image, kps)
        if dsc is None:
            dsc = np.asarray([])
        dsc = dsc.flatten()
        needed_size = (vector_size * 64)
        if dsc.size < needed_size:
            dsc = np.concatenate([dsc, np.zeros(needed_size - dsc.size)])
    except cv2.error as e:
        print('Error: ', e)
        return None

    return dsc
コード例 #8
0
def kaze(selectors: List[ImageSectionSelectorWithLoaderButton]):
    method = cv2.KAZE_create()
    # GetImage devuelve PIL Image, no ImageWrapper

    threshold = my_config.MainWindowSelf.askForFloat(
        "Enter distance threshold", 500)
    howManyToMatch = my_config.MainWindowSelf.askForInt(
        "Enter minimum number of valid matches to consider equality", 50)

    open_cv_image_1 = numpy.array(selectors[0].getImage())[:, :, ::-1].copy()
    open_cv_image_2 = numpy.array(selectors[1].getImage())[:, :, ::-1].copy()

    img1 = cv2.cvtColor(open_cv_image_1, cv2.COLOR_BGR2GRAY)
    img2 = cv2.cvtColor(open_cv_image_2, cv2.COLOR_BGR2GRAY)

    keypoints_1, descriptors_1 = method.detectAndCompute(img1, None)
    keypoints_2, descriptors_2 = method.detectAndCompute(img2, None)

    # feature matching
    bf = cv2.BFMatcher(cv2.NORM_L1, crossCheck=True)

    matches = bf.match(descriptors_1, descriptors_2)
    matches = sorted(matches, key=lambda x: x.distance)

    passing_matches = []
    for match in matches:
        if match.distance <= threshold:
            passing_matches.append(match)

    if len(passing_matches) >= howManyToMatch:
        my_config.MainWindowSelf.showMessage(
            f"Image matchs ({len(passing_matches)} passing matches / {len(matches)} total)",
            "SIFT result")
    else:
        my_config.MainWindowSelf.showMessage("Image does not match",
                                             "SIFT result")

    img3 = cv2.drawMatches(img1,
                           keypoints_1,
                           img2,
                           keypoints_2,
                           passing_matches[:50],
                           img2,
                           flags=2)

    Image.fromarray(img3).show()
コード例 #9
0
ファイル: matcher.py プロジェクト: williammfu/algeo-facerecog
def extract_features(image_path, vector_size=24):
    image = cv2.imread(image_path, cv2.IMREAD_COLOR)
    # crop img 300x300
    image = image[0:300, 0:300]

    alg = cv2.KAZE_create()
    # keypoints
    keypoints = alg.detect(image)
    keypoints = sorted(keypoints, key=lambda x: -x.response)[:vector_size]
    keypoints, dsc = alg.compute(image, keypoints)
    dsc = dsc.flatten()

    needed_size = (vector_size * 64)
    if dsc.size < needed_size:
        # if vectornya kurang besar (keypoints detectednya sedikit, fill w/ 0's)
        dsc = np.concatenate([dsc, np.zeros(needed_size - dsc.size)])

    return dsc
コード例 #10
0
def train_vocabulary(path):
    k_means_trainer = cv2.BOWKMeansTrainer(N)

    image_paths = sorted(list(paths.list_images(path)))

    for path_to_image in image_paths:
        img = cv2.imread(path_to_image, cv2.IMREAD_GRAYSCALE)
        detector = cv2.KAZE_create()
        key_points, descriptors = detector.detectAndCompute(img, None)

        if len(key_points) <= 0:
            continue

        descriptors = np.float32(descriptors)
        k_means_trainer.add(descriptors)
    vocabulary = k_means_trainer.cluster()

    return vocabulary
コード例 #11
0
 def get_feature_vector(self, image_path, vector_size=32):
     image = cv2.imread(image_path, cv2.IMREAD_COLOR)
     try:
         alg = cv2.KAZE_create()
         key_points = alg.detect(image)  # All key points
         # Reduces top vector_size number of key points. I read that larger is better
         key_points = sorted(key_points,
                             key=lambda x: -x.response)[:vector_size]
         key_points, descriptor = alg.compute(image, key_points)
         descriptor = descriptor.flatten()
         expected_size = (vector_size * 64)
         if descriptor.size < expected_size:
             descriptor = np.concatenate(
                 [descriptor,
                  np.zeros(expected_size - descriptor.size)])
     except cv2.error as e:
         print 'Error: ', e
         return None
     return descriptor
コード例 #12
0
    def get_image_kaze(image, vector_size=32):
        alg = cv2.KAZE_create()
        kps = alg.detect(image)
        kps = sorted(kps, key=lambda x: -x.response)[:vector_size]

        # Making descriptor of same size
        # Descriptor vector size is 64
        needed_size = (vector_size * 64)
        if len(kps) == 0:
            return np.zeros(needed_size)

        kps, dsc = alg.compute(image, kps)
        dsc = dsc.flatten()

        if dsc.size < needed_size:
            # if we have less than 32 descriptors then just adding zeros at the
            # end of our feature vector
            dsc = np.concatenate([dsc, np.zeros(needed_size - dsc.size)])
        return dsc
コード例 #13
0
def kaze(detector, emparejador, opcion, nombre1, nombre2, norma):

    #Lee las imagenes a analizar
    if opcion == 'd':
        img1 = cv.imread('tpic5.png', cv.IMREAD_GRAYSCALE)
        img2 = cv.imread('tpic5_flipped.png', cv.IMREAD_GRAYSCALE)

    if opcion == 'db':
        img1 = cv.imread('thome.jpg', cv.IMREAD_GRAYSCALE)
        img2 = cv.imread('thome_escale.jpg', cv.IMREAD_GRAYSCALE)

    if opcion == 'dc':
        img1 = cv.imread('tgrafizq.png', cv.IMREAD_GRAYSCALE)
        img2 = cv.imread('tgrafder.png', cv.IMREAD_GRAYSCALE)

    if opcion == 'n':
        img1 = cv.imread(nombre1, cv.IMREAD_GRAYSCALE)
        img2 = cv.imread(nombre2, cv.IMREAD_GRAYSCALE)

    #Inicia el Detector y Descriptor
    kaze = cv.KAZE_create()

    #Detecta Rasgos y Calcula el descriptor
    kp1, des1 = kaze.detectAndCompute(img1, None)
    kp2, des2 = kaze.detectAndCompute(img2, None)

    #Muestra Rasgos y Guarda la imagen correspondiente
    imgx = cv.drawKeypoints(img1, kp1, None, color=(0, 255, 255))
    imgy = cv.drawKeypoints(img2, kp2, None, color=(0, 255, 255))
    window_namex = "Rasgos Caracteristicos imagen 1"
    window_namey = "Rasgos Caracteristicos imagen 2"
    cv.namedWindow(window_namex)
    cv.namedWindow(window_namey)
    cv.resizeWindow(window_namex, 500, 400)
    cv.resizeWindow(window_namey, 500, 400)
    cv.imshow(window_namex, imgx)
    cv.imshow(window_namey, imgy)
    save(imgx, emparejador, detector, norma, tag1)
    save(imgy, emparejador, detector, norma, tag2)

    #Envia los datos a la etapa de emparejamiento
    menu_emparejamiento(emparejador, kp1, des1, kp2, des2, img1, img2, norma,
                        detector)
コード例 #14
0
 def getKAZE(self, image, vector_size=8):
     alg = cv2.KAZE_create()
     # Finding image keypoints
     kps = alg.detect(image)
     # Getting first vector_size of them
     # Sorting them based on keypoint response value(bigger is better)
     kps = sorted(kps, key=lambda x: -x.response)[:vector_size]
     # computing descriptors vector
     kps, dsc = alg.compute(image, kps)
     # Flatten all of them in one big vector - our feature vector
     fd = dsc.flatten()
     # Making descriptor of same size  (descriptor vector size is 64)
     needed_size = (vector_size * 64)
     if dsc.size < needed_size:
         # if we have less the 32 descriptors then just adding zeros at the
         # end of our feature vector
         fd = np.concatenate([dsc, np.zeros(needed_size - dsc.size)])
     f_names = ['KAZE' + str(i).zfill(2) for i in range(len(fd))]
     return fd, f_names
コード例 #15
0
def extract(image_path, vsize=8):
    #RGB
    img = cv2.imread(image_path, 1)
    kaze = cv2.KAZE_create()
    kps = kaze.detect(img)
    kps_temp = sorted(kps, key=lambda x: abs(x.response))[:vsize // 2]
    dsc = kaze.compute(img, kps_temp)[1]
    kps_temp = sorted(kps, key=lambda x: x.size)[:vsize // 2]
    dsc2 = kaze.compute(img, kps_temp)[1]
    kps_temp = sorted(kps, key=lambda x: x.angle)[:vsize // 4]
    dsc3 = kaze.compute(img, kps_temp)[1]
    dsc = np.concatenate(
        [dsc.flatten('C'),
         dsc2.flatten('C'),
         dsc3.flatten('C')], axis=None)
    needed_size = (vsize * 2 * 64)
    if dsc.size < needed_size:
        dsc = np.concatenate([dsc, np.zeros(needed_size - dsc.size)])
    return dsc
コード例 #16
0
def extract_features(image_path, vector_size=32):
    image = imread(image_path, pilmode="RGB")
    try:
        alg = cv2.KAZE_create()
        kps = alg.detect(image)
        kps = sorted(kps, key=lambda x: -x.response)[:vector_size]
        kps, dsc = alg.compute(image, kps)
        if (len(kps) < 1 or dsc is None):
            print('\nFailed to read from %s.' % image_path)
            dsc = np.zeros(vector_size * 64)
        else:
            dsc = dsc.flatten()
            needed_size = (vector_size * 64)
            if dsc.size < needed_size:
                dsc = np.concatenate([dsc, np.zeros(needed_size - dsc.size)])
    except cv2.error as e:
        print("Error", e)
        return None
    return dsc
コード例 #17
0
def extract_features(image, vector_size=32):

    try:
        # Using KAZE, cause SIFT, ORB and other was moved to additional module
        # which is adding addtional pain during install
        alg = cv.KAZE_create()
        # Dinding image keypoints
        kps = alg.detect(image)
        print("KPS: ", kps)
        # Getting first 32 of them.
        # Number of keypoints is varies depend on image size and color pallet
        DEBUG_MODE = 1

        if DEBUG_MODE > 0:
            debug_image = image
            cv.imshow("Image", image)
            cv.waitKey(0)
            debug_image = cv.drawKeypoints(image,
                                           kps,
                                           numpy.array([]),
                                           color=(0, 255, 0),
                                           flags=0)
            cv.imshow('Image Keypoints', debug_image)
            cv.waitKey(0)

        # Sorting them based on keypoint response value(bigger is better)
        kps = sorted(kps, key=lambda x: -x.response)[:vector_size]
        # computing descriptors vector
        kps, dsc = alg.compute(image, kps)
        # Flatten all of them in one big vector - our feature vector
        dsc = dsc.flatten()
        # Making descriptor of same size
        # Descriptor vector size is 64
        needed_size = (vector_size * 64)
        if dsc.size < needed_size:
            # if we have less the 32 descriptors then just adding zeros at the
            # end of our feature vector
            dsc = numpy.concatenate([dsc, numpy.zeros(needed_size - dsc.size)])
    except Exception as e:
        print('Error: ', e)
        return None

    return dsc
コード例 #18
0
ファイル: UIsync.py プロジェクト: yesmider/stone_bot
 def check_bonus_ruby(self):
     KAZE = cv2.KAZE_create()
     BF = cv2.BFMatcher()
     ruby_area = self.img[370:650, 0:80]
     bonus_ruby = self.pics['bonus_ruby.png']
     kp1, des1 = KAZE.detectAndCompute(bonus_ruby, None)
     kp2, des2 = KAZE.detectAndCompute(ruby_area, None)
     if kp2:
         matches = BF.knnMatch(des1, des2, k=2)
         good = []
         append = good.append
         for m, n in matches:
             if m.distance < n.distance * 0.7:
                 append([m])
         if len(good) >= 2:
             good_trainIdx = [value.trainIdx for [value] in good]
             x = sum([kp2[idx].pt[0] for idx in good_trainIdx]) / len(good)
             y = sum([kp2[idx].pt[1] for idx in good_trainIdx]) / len(good)
             return x, y + 370
コード例 #19
0
ファイル: UIsync.py プロジェクト: yesmider/stone_bot
        def calc_kp_des_hist_by_table(table, masks, checked):
            KAZE = cv2.KAZE_create()

            def calc_kp_des_hist(img, mask):
                kp, des = KAZE.detectAndCompute(img, mask)
                hist = [
                    cv2.calcHist([img], [0], mask, [256], [0, 256]),
                    cv2.calcHist([img], [1], mask, [256], [0, 256]),
                    cv2.calcHist([img], [2], mask, [256], [0, 256])
                ]
                return kp, des, hist

            with ThreadPool() as pool:
                results = []
                for y, data in enumerate(table):
                    temp = []
                    for x, pic in enumerate(data):
                        if (x, y) not in checked:
                            mask = masks[y][x]
                            res = pool.apply_async(calc_kp_des_hist,
                                                   (pic, mask))
                            temp.append(res)
                        else:
                            res = None
                            temp.append(res)
                    results.append(temp)
                pool.close()
                pool.join()
                final = []
                for y in results:
                    temp = []
                    for kp_res in y:
                        if kp_res is not None:
                            kp, des, hist = kp_res.get()
                            if len(kp) > 0:
                                temp.append((kp, des, hist))
                            else:
                                temp.append(None)
                        else:
                            temp.append(None)
                    final.append(temp)
                return final
コード例 #20
0
    def create_descriptor(self, descriptor, detector):
        """ Create descriptor object.

        Parameters
        ----------
        descriptor : str
            An optional descriptor type to create.
        detector: str
            Detector name, to check if valid combination.
        """
        if descriptor is 'AKAZE':  # AKAZE only allows AKAZE or KAZE detectors
            if detector is 'AKAZE' or detector is 'KAZE':
                desc = cv2.AKAZE_create()
            else:
                return None
        elif descriptor is 'BRISK':
            desc = cv2.BRISK_create()
        elif descriptor is 'FREAK':
            desc = xfeatures2d.FREAK_create()
        elif descriptor is 'KAZE':  # KAZE only allows KAZE or AKAZE detectors
            if detector is 'AKAZE' or detector is 'KAZE':
                desc = cv2.KAZE_create()
            else:
                return None
        elif descriptor is 'ORB':
            desc = cv2.ORB_create()
        elif descriptor is 'BRIEF':
            desc = xfeatures2d.BriefDescriptorExtractor_create()
        elif descriptor is 'DAISY':
            desc = xfeatures2d.DAISY_create()
        elif descriptor is 'FREAK':
            desc = xfeatures2d.FREAK_create()
        elif descriptor is 'LATCH':
            desc = xfeatures2d.LATCH_create()
        elif descriptor is 'SIFT':
            desc = xfeatures2d.SIFT_create()
        elif descriptor is 'SURF':
            desc = xfeatures2d.SURF_create()
        else:
            raise ValueError("Unsupported descriptor")

        return desc
コード例 #21
0
ファイル: test_sift.py プロジェクト: ishan98/DIP_ShadowDraw
def make_bag_of_words():
    #resizing the images
    imdataset = []
    for root, dirnames, filenames in os.walk("./dataset/image_dataset"):
        for filename in filenames:
            filepath = os.path.join(root, filename)
            imdata = ndimage.imread(filepath, mode="L")
            imdata_resized = misc.imresize(imdata, (300, 300))
            imdataset.append(imdata_resized)
    sift = cv2.xfeatures2d.SIFT_create()
    alg = cv2.KAZE_create()

    feature_dataset = []
    for x in range(len(imdataset)):
        train_image = imdataset[x]
        getfeatures = extract_features(train_image, alg)
        #getfeatures = getFeatureMap(train_image,sift)
        feature_dataset.append(getfeatures)

    return (feature_dataset, alg, imdataset)
コード例 #22
0
    def _initialize_feature_extractor(self, feature_type: str):
        """Initializes the feature extractor.

        Args:
            feature_type: Feature extractor { SIFT, SURF, KAZE }.

        Raises:
            ValueError: If the feature type is not known.

        """
        if feature_type == 'SIFT':
            self._feature_extractor = cv2.xfeatures2d.SIFT_create()
        elif feature_type == 'SURF':
            self._feature_extractor = cv2.xfeatures2d.SURF_create()
        elif feature_type == 'KAZE':
            self._feature_extractor = cv2.KAZE_create()
        else:
            raise ValueError("Feature type not supported. Possible values are 'SIFT', 'SURF', and 'KAZE'.")

        self._feature_type = feature_type
コード例 #23
0
def create_panorama(descriptor_name, detector_name, normalized_imgs,
                    output_path, img_name):
    # (2) detect keypoints and extract local invariant descriptors
    detector = {
        "MSER": lambda: cv.MSER_create(),
        "FAST": lambda: cv.FastFeatureDetector_create(),
        "AGAST": lambda: cv.AgastFeatureDetector_create(),
        "GFFT": lambda: cv.GFTTDetector_create(),
        "STAR": lambda: cv.xfeatures2d.StarDetector_create()
    }[detector_name]()
    descriptor = {
        "sift": lambda: cv.xfeatures2d.SIFT_create(),
        "surf": lambda: cv.xfeatures2d.SURF_create(),
        "brief": lambda: cv.xfeatures2d.BriefDescriptorExtractor_create(),
        "orb": lambda: cv.ORB_create(nfeatures=1500),
        "kaze": lambda: cv.KAZE_create(),
        "akaze": lambda: cv.AKAZE_create(),
    }[descriptor_name]()

    def default_describe(img):
        return descriptor.detectAndCompute(img.astype('uint8'), None)

    descriptor_apply_function = {
        "brief":
        lambda img: extract_and_describe_with_brief(img, detector, descriptor)
    }.get(descriptor_name, lambda img: default_describe(img))
    matcher = cv.BFMatcher()
    ratio = 0.75
    reproj_threshold = 4.
    alg = descriptor_name if descriptor_name in [
        "sift", "surf", "orb", "kaze", "akaze"
    ] else "{}_{}".format(descriptor_name, detector_name)

    def r_stitch(a, b):
        return stitch(a[1], b[1], ratio, descriptor_apply_function, matcher,
                      reproj_threshold, True, a[0] + 1, b[0] + 1, img_name,
                      alg)

    result = reduce(r_stitch, enumerate(normalized_imgs))
    save_img(result[1], "{}/{}_result_{}.jpg".format(output_path, img_name,
                                                     alg))
コード例 #24
0
def AKAZE_descriptor_matcher(img1,
                             img2,
                             use_KAZE_detector=False,
                             points_mask=None,
                             show_matches=True,
                             sort_points_by_distance_parameter=True):
    # KAZE detector
    if not use_KAZE_detector:
        detector = cv2.AKAZE_create()
    else:
        detector = cv2.KAZE_create()
    # obtenemos las imágenes en blanco y negro
    img1_gray = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
    img2_gray = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)

    # detectamos y computamos los keypoints y los descriptores
    # los almacenamos. Estos descriptres serán descriptores SIFT
    keypoints1, descriptors1 = detector.detectAndCompute(image=img1_gray,
                                                         mask=points_mask)
    keypoints2, descriptors2 = detector.detectAndCompute(image=img2_gray,
                                                         mask=points_mask)
    # Creamos el BFmatcher (Brute Force) que usará validación cruzada
    bf = cv2.BFMatcher(normType=cv2.NORM_L2, crossCheck=True)
    # Detectamos las correspondencias o matches
    matches = bf.match(descriptors1, descriptors2)
    # Y las ordenamos según la distancia
    if sort_points_by_distance_parameter:
        matches = sorted(matches, key=lambda x: x.distance)

    if show_matches:
        match_img = cv2.drawMatches(img1=img1,
                                    keypoints1=keypoints1,
                                    img2=img2,
                                    keypoints2=keypoints2,
                                    matches1to2=matches[:50],
                                    outImg=None,
                                    flags=4)

        show_img(match_img, "Correspondencias")

    return [keypoints1, descriptors1], [keypoints2, descriptors2], matches
コード例 #25
0
def main():
    # paths to the thumbnails and the test images
    thumbnail_path = './Pictures'
    test_path = './TestImages'

    # path to write pickled feature vectors. naming convention: {pictures, test}FV.pck
    picklePath = 'picturesKaze.pck'

    # all the keypoint descriptors
    kaze = cv2.KAZE_create()
    orb = cv2.ORB_create()
    akaze = cv2.AKAZE_create()
    brisk = cv2.BRISK_create()

    batch_extractor(images_path=thumbnail_path,
                    alg=kaze,
                    pickled_db_path='picturesKAZE.pck')
    batch_extractor(images_path=test_path,
                    alg=kaze,
                    pickled_db_path='testKAZE.pck')

    batch_extractor(images_path=thumbnail_path,
                    alg=akaze,
                    pickled_db_path='picturesAKAZE.pck')
    batch_extractor(images_path=test_path,
                    alg=akaze,
                    pickled_db_path='testAKAZE.pck')

    batch_extractor(images_path=thumbnail_path,
                    alg=orb,
                    pickled_db_path='picturesORB.pck')
    batch_extractor(images_path=test_path,
                    alg=orb,
                    pickled_db_path='testORB.pck')

    batch_extractor(images_path=thumbnail_path,
                    alg=brisk,
                    pickled_db_path='picturesBRISK.pck')
    batch_extractor(images_path=test_path,
                    alg=brisk,
                    pickled_db_path='testBRISK.pck')
コード例 #26
0
ファイル: featureextractor.py プロジェクト: foxis/EasyVision
 def setup(self):
     super(FeatureExtraction, self).setup()
     if self._feature_type == 'ORB':
         defaults = dict(nfeatures=10000)
         defaults.update(self._kwargs)
         self._descriptor = cv2.ORB_create(**defaults)
     elif self._feature_type == 'BRISK':
         defaults = dict()
         defaults.update(self._kwargs)
         self._descriptor = cv2.BRISK_create(**defaults)
     elif self._feature_type == 'SURF':
         defaults = dict()
         defaults.update(self._kwargs)
         self._descriptor = cv2.xfeatures2d.SURF_create(**defaults)
     elif self._feature_type == 'SIFT':
         defaults = dict()
         defaults.update(self._kwargs)
         self._descriptor = cv2.xfeatures2d.SIFT_create(**defaults)
     elif self._feature_type == 'KAZE':
         defaults = dict()
         defaults.update(self._kwargs)
         self._descriptor = cv2.KAZE_create(**defaults)
     elif self._feature_type == 'AKAZE':
         defaults = dict()
         defaults.update(self._kwargs)
         self._descriptor = cv2.AKAZE_create(**defaults)
     elif self._feature_type == 'FREAK':
         defaults = dict()
         defaults.update(self._kwargs)
         self._descriptor = cv2.xfeatures2d.FREAK_create(**defaults)
         self._detector = cv2.xfeatures2d.SURF_create()
     elif self._feature_type == 'FAST':
         defaults = dict()
         defaults.update(self._kwargs)
         self._descriptor = cv2.FastFeatureDetector_create(**defaults)
     elif self._feature_type == 'GFTT':
         defaults = dict()
         defaults.update(self._kwargs)
         self._descriptor = cv2.GFTTDetector_create(**defaults)
     else:
         raise ValueError("Invalid feature type")
コード例 #27
0
ファイル: retrieval.py プロジェクト: luftj/MaRE
def feature_matching_kaze(query_image_small, reference_image_small):
    detector = cv2.KAZE_create(upright=True)
    # kp_query = detector.detect(query_image_small)
    # kp_reference = detector.detect(reference_image_small)
    # kp_query = sorted(kp_query, key=lambda x: -x.response)[:800]
    # kp_reference = sorted(kp_reference, key=lambda x: -x.response)[:800]
    # kps, dsc_q = detector.compute(query_image_small, kp_query)  # todo: use cv2.detectAndCompute instead, is faster
    # kps, dsc_r = detector.compute(reference_image_small, kp_reference)
    kp_query, dsc_q = detector.detectAndCompute(query_image_small, None)
    kp_reference, dsc_r = detector.detectAndCompute(reference_image_small,
                                                    None)
    logging.info("#kps query %d" % len(kp_query))
    logging.info("#kps reference %d" % len(kp_query))

    # create BFMatcher object
    bf = cv2.BFMatcher(cv2.NORM_L2, crossCheck=True)
    # Match descriptors.
    matches = bf.match(dsc_q, dsc_r)
    # Sort them in the order of their distance.
    # matches = sorted(matches, key = lambda x:x.distance)

    # FLANN_INDEX_KDTREE = 1
    # index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
    # search_params = dict(checks=50)   # or pass empty dictionary
    # flann = cv2.FlannBasedMatcher(index_params,search_params)
    # knn_matches = flann.knnMatch(dsc_q, dsc_r, 2)
    # logging.info("#matches raw  %d" % len(knn_matches))
    # # Filter matches using Lowe's ratio test
    # ratio_thresh = 0.7
    # matches = []
    # for m,n in knn_matches:
    #     if m.distance < ratio_thresh * n.distance:
    #         matches.append(m)
    # logging.info("#matches refined  %d" % len(matches))

    keypoints_q = [kp_query[x.queryIdx].pt for x in matches]
    keypoints_r = [kp_reference[x.trainIdx].pt for x in matches]
    keypoints_q = np.array(keypoints_q)
    keypoints_r = np.array(keypoints_r)

    return keypoints_q, keypoints_r
コード例 #28
0
def SIFT_detector(gray_path):
    images_sift = os.listdir(gray_path)
    for i, image_sift in enumerate(images_sift):
        print(i)
        print(image_sift)
        img = cv2.imread(os.path.join(gray_path, image_sift), 0)
        '''
        #sift检测
        sift = cv2.xfeatures2d.SIFT_create()
        kp = sift.detect(img,None)
        img_sift=cv2.drawKeypoints(img,kp,img,flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
        '''
        '''
        #SURF检测
        surf = cv2.xfeatures2d.SURF_create()
        kp = surf.detect(img,None)
        img_surf=cv2.drawKeypoints(img,kp,img,flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
        '''
        '''
        #ORB检测,几乎没有
        orb = cv2.ORB_create()
        kp = orb.detect(img,None)
        img_orb=cv2.drawKeypoints(img,kp,img,flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
        '''

        #KAZE检测
        kaze = cv2.KAZE_create()
        kp = kaze.detect(img, None)
        img_kaze = cv2.drawKeypoints(
            img, kp, img, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)

        #cv2.imwrite('sift_keypoints.jpg',img)

        plt.figure(figsize=(30, 30))
        plt.subplot(1, 2, 1), plt.title('img')
        plt.imshow(img, cmap=plt.cm.gray)
        plt.subplot(1, 2, 2), plt.title('img_kaze')
        plt.imshow(img_kaze, cmap=plt.cm.gray)
        #        plt.subplot(1, 3, 3), plt.title('lbp_hist')
        #        plt.imshow(lbp_hist)
        plt.show()
コード例 #29
0
def _get_kaze_vector(image_file, roi=None):
    """Generates a feature vector for the image using the KAZE keypoint detection method.
     INPUT: Image file, [Coordinates of ROI in a list [x,y,w,h]]
     OUTPUT: Descriptor vector"""
    # Check the extension of the image file
    extension = image_file.rstrip().split('.')[-1]
    if extension == 'dcm':
        dicom_image = pydicom.dcmread(image_file)
        arr = dicom_image.pixel_array
        rgb_image = apply_color_lut(arr, dicom_image, palette='PET')
        gray = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2GRAY)
        scale_percent = 60  # percent of original size
        width = int(gray.shape[1] * scale_percent / 100)
        height = int(gray.shape[0] * scale_percent / 100)
        dim = (width, height)
        gray = cv2.resize(gray, dim, interpolation=cv2.INTER_AREA)

    else:
        image = imread(image_file)
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    if roi:
        gray = gray[int(roi[1]):int(roi[1] + roi[3]), int(roi[0]):int(roi[0] + roi[2])]
    alg = cv2.KAZE_create()
    kps = alg.detect(gray)
    kps = sorted(kps, key=lambda x: -x.response)[:128]
    kps, dsc = alg.compute(gray, kps)
    if dsc is None:
        return kps, np.zeros((512,))
    dsc = dsc.flatten()
    if dsc.size < 512:
        # if we have less the 32 descriptors then just adding zeros at the
        # end of our feature vector
        dsc = np.concatenate([dsc, np.zeros(512 - dsc.size)])
    elif dsc.size > 512:
        n = 64
        while dsc.size > 512:
            n = int(n / 2)
            kps = sorted(kps, key=lambda x: -x.response)[:n]
            kps, dsc = alg.compute(gray, kps)
            dsc = dsc.flatten()
    return dsc
コード例 #30
0
def kaze_func(image):
    alg = cv2.KAZE_create()
    # Dinding image keypoints
    kps = alg.detect(image)
    # Getting first 32 of them.
    # Number of keypoints is varies depend on image size and color pallet
    # Sorting them based on keypoint response value(bigger is better)
    kps = sorted(kps, key=lambda x: -x.response)[:32]
    # computing descriptors vector
    kps, dsc = alg.compute(image, kps)
    cv2.normalize(dsc, dsc)
    # Flatten all of them in one big vector - our feature vector
    return dsc.flatten()
    '''''''''
    sobelx8u = cv2.Sobel(image, cv2.CV_8U, 1, 0, ksize=5)

    # Output dtype = cv2.CV_64F. Then take its absolute and convert to cv2.CV_8U
    sobelx64f = cv2.Sobel(image, cv2.CV_64F, 1, 0, ksize=5)
    cv2.normalize(sobelx8u, sobelx8u)
    return sobelx8u.flatten()
'''''''''