def SIFT_feature_matching(img1, img2, n=1000):
    t1 = cv2.imread(img1, 0)
    t2 = cv2.imread(img2, 0)

    sift = cv.SIFT_create()

    kp1, des1 = sift.detectAndCompute(t1, None)
    kp2, des2 = sift.detectAndCompute(t2, None)

    f = cv2.drawKeypoints(t1,
                          kp1,
                          None, [0, 0, 255],
                          flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)

    nf = cv2.drawKeypoints(t2,
                           kp2,
                           None, [255, 0, 0],
                           flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)

    bf = cv2.BFMatcher()
    matches = bf.match(des1, des2)

    matches = sorted(matches, key=lambda x: x.distance)

    result = cv2.drawMatches(t1,
                             kp1,
                             t2,
                             kp2,
                             matches[:min(n, len(matches))],
                             None, [0, 0, 255],
                             flags=2)

    plt.imshow(result, interpolation='bicubic')
    plt.axis('off')
    plt.show()
def featurematch(frame1, frame2):
    # Using SIFT to find keypoints and descriptors
    orb = xfeatures2d.SIFT_create()
    kp1, des1 = orb.detectAndCompute(frame1, None)
    kp2, des2 = orb.detectAndCompute(frame2, None)
    flann = cv2.FlannBasedMatcher(dict(algorithm=1, trees=5), dict(checks=50))
    matches = flann.knnMatch(des1, des2, 2)
    left_pts = list()
    right_pts = list()

    # Need to draw only good matches, so create a mask
    matchesMask = [[0, 0] for i in range(len(matches))]
    # Ratio criteria according to Lowe's paper
    for i, (m, n) in enumerate(matches):
        if m.distance < 0.75 * n.distance:
            left_pts.append(kp2[m.trainIdx].pt)
            right_pts.append(kp1[m.queryIdx].pt)
            matchesMask[i] = [1, 0]

    draw_params = dict(matchColor=(0, 255, 0),
                       singlePointColor=(255, 0, 0),
                       matchesMask=matchesMask,
                       flags=0)

    img3 = cv2.drawMatchesKnn(frame1, kp1, frame2, kp2, matches, None,
                              **draw_params)

    #plt.imshow(img3,),plt.show()
    left_pts = np.array(left_pts)
    right_pts = np.array(right_pts)
    return left_pts, right_pts
Beispiel #3
0
def compute_sift(img):
    sift = xfeatures2d.SIFT_create()
    keypoints, descriptors = sift.detectAndCompute(img, None)

    keypoints = array([[k.pt[0], k.pt[1], k.size, k.angle] for k in keypoints])

    return keypoints, descriptors
def sift(img):
    from cv2 import xfeatures2d
    from cv2 import imread

    sift = xfeatures2d.SIFT_create()  # SIFT extractor
    image = imread(img, 0)
    kp, des = sift.detectAndCompute(image, None)

    return kp, des
Beispiel #5
0
    def __init__(self, **kwargs):
        super().__init__(**kwargs)

        self._algos = [cv2.AKAZE_create()]
        self._algos.append(cv2.BRISK_create())
        self._algos.append(cv2.KAZE_create())
        self._algos.append(cv2.ORB_create())
        self._algos.append(xfeatures2d.SIFT_create())
        self._algos.append(xfeatures2d.SURF_create())

        self._times = OrderedDict()
        self._nkps = OrderedDict()

        self._images = [cv2.imread(image) for image in self.files]
def compute_sift_mapping(path_a, path_b):
    print('Computing key point pairs...')
    img_a = cv2.imread(path_a, 0)
    img_b = cv2.imread(path_b, 0)
    sift = xfeatures2d.SIFT_create()
    kp_a, des_a = sift.detectAndCompute(img_a, None)
    kp_b, des_b = sift.detectAndCompute(img_b, None)
    bf = cv2.BFMatcher()
    # bf.knnMatch(query_des_set, train_des_set)
    matches = bf.knnMatch(des_a, des_b, k=2)
    good = []
    for m, n in matches:
        if m.distance < 0.75 * n.distance:
            good.append(m)
    kp_pairs = [[kp_a[match.queryIdx].pt, kp_b[match.trainIdx].pt] for match in good]
    return kp_pairs
Beispiel #7
0
def featurePointDetectAndMatcher(img0,
                                 img1,
                                 ratio=0.75,
                                 reprojThresh=4.0,
                                 saveName='Sample'):
    from cv2 import xfeatures2d, DescriptorMatcher_create, findHomography, line, RANSAC, warpPerspective
    from numpy import float32
    from matplotlib.pyplot import imsave, imshow
    sift = xfeatures2d.SIFT_create()
    kps0, des0 = sift.detectAndCompute(img0, None)
    kps1, des1 = sift.detectAndCompute(img1, None)
    kp0 = float32([kp.pt for kp in kps0])
    kp1 = float32([kp.pt for kp in kps1])
    matcher = DescriptorMatcher_create('BruteForce')
    matches = matcher.knnMatch(des0, des1, 2)
    good = []
    for m in matches:
        if len(m) == 2 and m[0].distance < ratio * m[1].distance:
            good.append((m[0].queryIdx, m[0].trainIdx))
    if len(good) > 4:
        src_pts = np.float32([kp0[i] for i, _ in good])
        dst_pts = np.float32([kp1[i] for _, i in good])
    (M, mask) = findHomography(src_pts, dst_pts, RANSAC, reprojThresh)
    result = warpPerspective(img0, M,
                             (img0.shape[1] + img1.shape[1], img0.shape[0]))
    result[0:img1.shape[0], 0:img1.shape[1]] = img1
    imshow(result)
    imsave(saveName + '_Stitcher.jpg', result)
    (hA, wA) = img0.shape[:2]
    (hB, wB) = img1.shape[:2]
    vis = np.zeros((max(hA, hB), wA + wB, 3), dtype='uint8')
    vis[0:hA, 0:wA] = img0
    vis[0:hB, wA:] = img1
    imshow(vis)
    imsave(saveName + '_TwoPic.jpg', vis)
    for ((queryIdx, trainIdx), s) in zip(good, mask):
        if s == 1:
            ptA = (int(kp0[queryIdx][0]), int(kp0[queryIdx][1]))
            ptB = (int(kp1[trainIdx][0]) + wA, int(kp1[trainIdx][1]))
            line(vis, ptA, ptB, (0, 255, 255), 1)
    imshow(vis)
    imsave(saveName + '_TwoPicFeaturePointMatcher.jpg', vis)
    return None
Beispiel #8
0
    def create_descriptor(self, descriptor, detector):
        """ Create descriptor object.

        Parameters
        ----------
        descriptor : str
            An optional descriptor type to create.
        detector: str
            Detector name, to check if valid combination.
        """
        if descriptor is 'AKAZE':  # AKAZE only allows AKAZE or KAZE detectors
            if detector is 'AKAZE' or detector is 'KAZE':
                desc = cv2.AKAZE_create()
            else:
                return None
        elif descriptor is 'BRISK':
            desc = cv2.BRISK_create()
        elif descriptor is 'FREAK':
            desc = xfeatures2d.FREAK_create()
        elif descriptor is 'KAZE':  # KAZE only allows KAZE or AKAZE detectors
            if detector is 'AKAZE' or detector is 'KAZE':
                desc = cv2.KAZE_create()
            else:
                return None
        elif descriptor is 'ORB':
            desc = cv2.ORB_create()
        elif descriptor is 'BRIEF':
            desc = xfeatures2d.BriefDescriptorExtractor_create()
        elif descriptor is 'DAISY':
            desc = xfeatures2d.DAISY_create()
        elif descriptor is 'FREAK':
            desc = xfeatures2d.FREAK_create()
        elif descriptor is 'LATCH':
            desc = xfeatures2d.LATCH_create()
        elif descriptor is 'SIFT':
            desc = xfeatures2d.SIFT_create()
        elif descriptor is 'SURF':
            desc = xfeatures2d.SURF_create()
        else:
            raise ValueError("Unsupported descriptor")

        return desc
def rootsift(img, eps=1e-7):
    from cv2 import xfeatures2d
    from cv2 import imread
    import numpy as np
    from scipy.cluster.vq import whiten

    image = imread(img, 0)
    sift = xfeatures2d.SIFT_create()  # SIFT extractor

    kp, des = sift.detectAndCompute(image, None)

    if des is not None:
        kp, des = sift.compute(image, kp)

        des /= (des.sum(axis=1, keepdims=True) + eps)
        des = np.sqrt(des)

        des = whiten(des)

        return kp, des
    else:
        return [], None
Beispiel #10
0
    def create_detector(self, detector):
        """ Create detector object.

        Parameters
        ----------
        detector : str
            The detector type to create.
        """
        if detector is 'Agast':
            det = cv2.AgastFeatureDetector_create()
        elif detector is 'AKAZE':
            det = cv2.AKAZE_create()
        elif detector is 'BRISK':
            det = cv2.BRISK_create()
        elif detector is 'Fast':
            det = cv2.FastFeatureDetector_create()
        elif detector is 'GFTT':
            det = cv2.GFTTDetector_create()
        elif detector is 'KAZE':
            det = cv2.KAZE_create()
        elif detector is 'MSER':
            det = cv2.MSER_create()
        elif detector is 'ORB':
            det = cv2.ORB_create()

        elif detector is 'MSD':
            det = xfeatures2d.MSDDetector_create()
        elif detector is 'SIFT':
            det = xfeatures2d.SIFT_create()
        elif detector is 'SURF':
            det = xfeatures2d.SURF_create()
        elif detector is 'Star':
            det = xfeatures2d.StarDetector_create()
        else:
            raise ValueError("Unsupported detector")

        return det
Beispiel #11
0
    # attack_image = perturb_image(x, img)[0]

    attack_image = get_file_content('use_img.png')
    obj_type, predictions = ir_api(attack_image)
    confidence = predictions
    predicted_class = obj_type
    # If the prediction is what we want (misclassification or
    # targeted classification), return True
    if(verbose):
        print('Confidence:', confidence)
    if ((targeted_attack and predicted_class == target_class) or
            (not targeted_attack and predicted_class != target_class)):
        return True


sift = xf.SIFT_create()


def getFeatureOperation(image, size):
    keypoints, descriptors = sift.detectAndCompute(image, None)
    featurePoints = [k.pt for k in keypoints]
    operation = []
    for f in featurePoints:
        for i in range(size):
            op = [(f[0] + (random() - 0.5)) / 32, (f[1] + (random() - 0.5)) / 32]
            for i in range(3):
                colorChange = random()
                op.append(colorChange)
            operation.append(op)

    if not len(operation):
Beispiel #12
0
conf_parser = ap.ArgumentParser(
    description='Detect & match 2d features in 2 images.')
conf_parser.add_argument("-f",
                         "--folder",
                         help="Folder to work in",
                         required=False,
                         default=["./"])
conf_parser.add_argument("-fn",
                         "--filenames",
                         help="Names of the left and right images",
                         required=False,
                         nargs=2,
                         default=["left.png", "right.png"])

if __name__ == "__main__":
    args = conf_parser.parse_args()
    im_l = cv2.imread(osp.join(args.folder, args.filenames[0]))
    im_r = cv2.imread(osp.join(args.folder, args.filenames[1]))

    sift = x2d.SIFT_create(1000)
    features_l, des_l = sift.detectAndCompute(im_l, None)
    features_r, des_r = sift.detectAndCompute(im_r, None)

    #flann currently seems broken due to bug in opencv
#     FLANN_INDEX_KDTREE = 1
#     index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
#     search_params = dict(checks=50)
#     flann = cv2.FlannBasedMatcher(index_params,search_params)
#
#     matches = flann.knnMatch(des_l, des_r, k=2)
Beispiel #13
0
        if len(m) == 2 and m[0].distance < m[1].distance * ratio:
            m = m[0]
            mkp1.append(kp1[m.queryIdx])
            mkp2.append(kp2[m.trainIdx])
    p1 = numpy.float32([kp.pt for kp in mkp1])
    p2 = numpy.float32([kp.pt for kp in mkp2])
    kp_pairs = zip(mkp1, mkp2)
    return p1, p2, list(kp_pairs)


# 이미지 파일 읽어 오기(img1, img2)
t1 = cv2.imread(root.filename[0], 0)
t2 = cv2.imread(root.filename[1], 0)

#SIFT 특성의 디스크립터(descriptor)를 취함
sift = cv.SIFT_create()

# detectAndCompute: 이미지 별 키포인트(keypoint)를 탐지하고, 디스크립터를 계산
# 리턴 값 : keypoint, 디스크립터(descriptor)
kp1, des1 = sift.detectAndCompute(t1, None)
kp2, des2 = sift.detectAndCompute(t2, None)

# img1에서 찾은 키포인트는 파랑색, img2에서 찾은 키포인트는 빨간색으로 표시
# drawKeypoints 의 flags 목룍
# cv2.DRAW_MATCHES_FLAGS_DEFAULT
# cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS -> 참조한 코드였지만 지저분해보여서 기본으로 수정
# cv2.DRAW_MATCHES_FLAGS_DRAW_OVER_OUTIMG
# cv2.DRAW_MATCHES_FLAGS_NOT_DRAW_SINGLE_POINTS

f = cv2.drawKeypoints(t1,
                      kp1,
Beispiel #14
0
def sift_sim(l_crop, r_crop):
  '''   Use SIFT features to measure image similarity  '''
  # get the detection regions: l_crop, r_crop
  
  # initialize the sift feature detector
  #orb = cv2.ORB_create()    # cv.SIFT_create()
  # find the keypoints and descriptors with SIFT (inputs should be grayscale)
  #kp_a, desc_a = orb.detectAndCompute(l_crop, None)  # sift.detectAndCompute(l_crop, None)
  #kp_b, desc_b = orb.detectAndCompute(r_crop, None)

  # initialize the bruteforce matcher
  #bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
  
  # match.distance is a float between {0:100} - lower means more similar
  #matches = bf.match(desc_a, desc_b)
  #similar_regions = [i for i in matches if i.distance < 70]
  #if len(matches) == 0:
  #  return 0
  #return len(similar_regions) / len(matches)  
  
  
  # initialize detector
  sift=cv.SIFT_create()
  # find the keypoints and descriptors with SIFT (inputs should be grayscale)
  kp1, desc_a = sift.detectAndCompute(l_crop, None)
  kp2, desc_b = sift.detectAndCompute(r_crop, None)
  

  # initialize the bruteforce matcher
  bf = cv2.BFMatcher()

  # match
  matches = bf.knnMatch(desc_a,desc_b, k=2)
  good1 = []
  for m,n in matches:
      if m.distance < 0.65*n.distance:  # 0.65
        good1.append([m])

  matches = bf.knnMatch(desc_b,desc_a, k=2)
  good2 = []
  for m,n in matches:
    if m.distance < 0.65*n.distance:  # 0.65
        good2.append([m])
  
  good=[]
  for i in good1:
    img1_id1=i[0].queryIdx
    img2_id1=i[0].trainIdx

    (x1,y1)=kp1[img1_id1].pt
    (x2,y2)=kp2[img2_id1].pt

    for j in good2:
        img1_id2=j[0].queryIdx
        img2_id2=j[0].trainIdx

        (a1,b1)=kp2[img1_id2].pt
        (a2,b2)=kp1[img2_id2].pt

        if (a1 == x2 and b1 == y2) and (a2 == x1 and b2 == y1):
            good.append(i)

  
  if len(matches) == 0:
    return 0
  return len(good) / len(matches)
Beispiel #15
0
#!/usr/bin/env python3

import cv2
from cv2 import xfeatures2d

file = '../performance/boat/img1.pgm'
img = cv2.imread(file)

akaze = cv2.AKAZE_create()
points = akaze.detect(img)

descriptors = [akaze]
descriptors.append(cv2.BRISK_create())
descriptors.append(cv2.KAZE_create())
descriptors.append(cv2.ORB_create())
descriptors.append(xfeatures2d.BriefDescriptorExtractor_create())
descriptors.append(xfeatures2d.DAISY_create())
descriptors.append(xfeatures2d.FREAK_create())
descriptors.append(xfeatures2d.LATCH_create())
descriptors.append(xfeatures2d.LUCID_create(1, 1))
descriptors.append(xfeatures2d.SIFT_create())
descriptors.append(xfeatures2d.SURF_create())

for descriptor in descriptors:
    kps, des = descriptor.compute(img, points)
    print("Algorithm: {}, size: {}, type: {}".format(descriptor, des[0].size,
                                                     des[0].dtype))
    def main(self):
        # create sift
        sift = cv.SIFT_create()

        # set window size
        cv2.namedWindow("window", cv2.WINDOW_NORMAL)
        cv2.resizeWindow('window', int(640 * 2 / 3), 640)

        # create log file
        writer = csv.writer(self.fh)
        writer.writerow([
            "File name", "number of matches", "area of sign", "Blank type",
            "Blank status", "OK?"
        ])

        # main variables
        def_h = 750  # image resolution (y axis) - dependent (TODO independent) - don't set less than 700px
        min_match = 50  # min number of matches (correct = 70-364, wrong = 0-33) TODO func(def_h) without user correction
        min_sign_area = [
            500, 750, 550
        ]  # min area of sign, correct = 599-899/1057-1142/752-897, wrong = 0-392/0-491/0-366
        # TODO minimaze sign frame to minimaze cnt area of empty blank -> sp
        # min_sign_area should be the same
        # min sign area should be about 30% - make func(def_h)

        # load default clear blanks
        ref = {}
        kp2 = {}
        des2 = {}
        for ref_name in self.ref_names:
            ref[ref_name] = cv2.imread(self.ref_folder + ref_name, 0)
            ref[ref_name] = cv2.resize(ref[ref_name],
                                       (int(def_h * 2 / 3), def_h))
            kp2[ref_name], des2[ref_name] = sift.detectAndCompute(
                ref[ref_name], None)

        # text bounds
        h, w = ref[self.ref_names[0]].shape
        bounds = [-20, -30, w + 20, h + 50]  # format [x_l, y_up, x_r, y_down]

        # sign position, format [y1, y2, x1, x2]
        sp = [[650, 690, 350, 400], [615, 645, 135, 185], [660, 690, 190, 250]]
        # TODO sp = [int(i * def_h/750) for i in sp]

        # user uploaded images
        file_names = glob.glob(self.data_folder + '*.jpg')
        self.blank_count = 0

        for file_name in file_names:
            self.blank_count += 1
            img = cv2.imread(file_name, 0)
            img = cv2.resize(img, (int(def_h * 2 / 3), def_h))
            kp1, des1 = sift.detectAndCompute(img, None)

            match_count = 0
            matches_list = []
            for ref_name in self.ref_names:
                matches = self.match_features(des1, des2[ref_name], kp1,
                                              kp2[ref_name])
                if matches is None:
                    continue
                matches_list.append(matches)
                if len(matches) > match_count:
                    match_count = len(matches)
                    ind = self.ref_names.index(ref_name)

            sign_area = 0
            if len(matches_list) > 0:
                blank_type = self.blank_types[ind]
                matches = matches_list[ind]
                ref_name = self.ref_names[ind]

                blank_status = self.verify_blank(matches, min_match)
                if blank_status == BlankStatus.CORRECT:
                    cropped, corners = self.transform(img, matches, kp1,
                                                      kp2[ref_name])
                    sign = cropped[sp[ind][0]:sp[ind][1], sp[ind][2]:sp[ind]
                                   [3]]  # attention: format [y1:y2, x1:x2]
                    blank_status = self.check_shot(cropped, bounds)
                    #print(blank_status)
                    if blank_status != BlankStatus.OUTOFSHOT:
                        blank_status, sign_area = self.confirm_sign(
                            sign, min_sign_area[ind])
                        cv2.rectangle(cropped, (sp[ind][2], sp[ind][0]),
                                      (sp[ind][3], sp[ind][1]), (0, 0, 255), 2)
                    cv2.rectangle(cropped, (bounds[0], bounds[1]),
                                  (bounds[2], bounds[3]), (0, 0, 255), 3)
                    cv2.imshow('window', cropped)
            else:
                matches = []
                blank_type = BlankType.UNDEFINED
            if blank_status == BlankStatus.CONFIRMED:
                ok = True
            else:
                ok = False
            writer.writerow([
                file_name.split("\\")[1],
                str(len(matches)),
                str(sign_area),
                str(blank_type.value),
                str(blank_status.value),
                str(ok)
            ])
            k = cv2.waitKey(1) & 0xff
            if k == ord('q'):
                exit()
Beispiel #17
0
    assert len(pts1)==len(pts2)
    errors = []
    for n in range(N):
        v1 = np.array([[pts1[n][0], pts1[n][1], 1]])#size(1,3)
        v2 = np.array([[pts2[n][0]], [pts2[n][1]], [1]])#size(3,1)
        error = np.abs((v1@fundMat@v2)[0][0])
        errors.append(error)
    error = sum(errors)/len(errors)
    return error
    
if __name__ == "__main__":
    img1 = imread('rect_left.jpeg') 
    img2 = imread('rect_right.jpeg')

    # find the keypoints and descriptors with SIFT
    sift = xfeatures2d.SIFT_create()
    kp1, des1 = sift.detectAndCompute(img1,None)
    kp2, des2 = sift.detectAndCompute(img2,None)

    # FLANN parameters for points match
    FLANN_INDEX_KDTREE = 0
    index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
    search_params = dict(checks=50)
    flann = FlannBasedMatcher(index_params,search_params)
    matches = flann.knnMatch(des1,des2,k=2)
    good = []
    pts1 = []
    pts2 = []
    dis_ratio = []
    for i,(m,n) in enumerate(matches):
        if m.distance < 0.3*n.distance:
Beispiel #18
0
import cv2.xfeatures2d as cv  # only for the new version
from matplotlib import pyplot as plt
import numpy as np

MIN_MATCH_COUNT = 75

img1 = cv2.imread('default.jpg', 0)  # queryImage
img2 = cv2.imread('photos/IMG_2074_wrong.jpg', 0)  # trainImage

# resize images
def_h = 720  # TODO find the best. Maximaze delta matches between correct and wrong images
img1 = cv2.resize(img1, (int(def_h * 2 / 3), def_h))
img2 = cv2.resize(img2, (int(def_h * 2 / 3), def_h))

# Initiate SIFT detector
sift = cv.SIFT_create()  # cv2.SIFT() - old version

# find the keypoints and descriptors with SIFT
kp1, des1 = sift.detectAndCompute(img2, None)
kp2, des2 = sift.detectAndCompute(img1, None)

FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
search_params = dict(checks=50)

flann = cv2.FlannBasedMatcher(index_params, search_params)

matches = flann.knnMatch(des1, des2, k=2)

# store all the good matches as per Lowe's ratio test.
good = []
Beispiel #19
0
def solution(left_img, right_img):
    """
    :param left_img:
    :param right_img:
    :return: you need to return the result image which is stitched by left_img and right_img
    """
    #step1:  find keypoints with SIFT point detector
    from cv2 import xfeatures2d, cvtColor, COLOR_BGR2GRAY
    sift = xfeatures2d.SIFT_create()
    gray_left_img = cvtColor(left_img, cv2.COLOR_BGR2GRAY)
    gray_right_img = cvtColor(right_img, cv2.COLOR_BGR2GRAY)
    kp_left, des_left = sift.detectAndCompute(gray_left_img, None)
    kp_right, des_right = sift.detectAndCompute(gray_right_img, None)

    #kp_img_left = cv2.drawKeypoints(left_img, kp_left, None)
    #kp_img_right = cv2.drawKeypoints(right_img, kp_right, None)

    def compute_ED(A, B):
        """
        calculate euclidean distance matrix 'dist',
        for example, dist[i,j] refers to the distance between A[i,] and B[j,]
        Some codes in function "compute_ED" are from "https://medium.com/swlh/euclidean-distance-matrix-4c3e1378d87f" 
        """
        dist_ = np.sum(A**2, axis=1)[:, np.newaxis] + np.sum(
            B**2, axis=1) - 2 * np.dot(A, B.T)
        dist = np.sqrt(dist_)
        return dist

    #step2: match the keypoints
    def match_kps(kp1, kp2, des1, des2):
        """
        find the matched pairs of keypoints'matched_kps',
        for example, in one row of matched_kps, it will contain the coordinates of one good pair of keypoints [x1, y1, x2, y2]
        """

        des1_idx = []
        des2_idx = []
        all_dist = compute_ED(des1, des2)
        n = all_dist.shape[0]

        for i in range(0, n):
            tmp = np.argsort(all_dist[i])
            dis1 = all_dist[i, tmp[0]]
            dis2 = all_dist[i, tmp[1]]
            if (dis1 / dis2 < 0.8):
                des1_idx.append(i)
                des2_idx.append(tmp[0])

        # Find the corresponding keypoint coordinates.
        coord1 = np.array([kp1[idx].pt for idx in des1_idx])
        coord2 = np.array([kp2[idx].pt for idx in des2_idx])

        return coord1, coord2

    src_pts, dst_pts = match_kps(kp_left, kp_right, des_left, des_right)

    #step3:calculate homography
    H, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)

    #step4: stitch images
    def stitch(img1, img2, H):
        # warp img1 to img2 with homography H and stitch them

        #get the corners of img1 and img2
        h1, w1 = img1.shape[:2]
        h2, w2 = img2.shape[:2]
        corners1 = np.float32([[0, 0], [0, h1], [w1, h1],
                               [w1, 0]]).reshape(-1, 1, 2)
        corners2 = np.float32([[0, 0], [0, h2], [w2, h2],
                               [w2, 0]]).reshape(-1, 1, 2)

        #make sure all parts of img1 will be visiable
        corners1_ = cv2.perspectiveTransform(corners1, H)

        #combine all corners and get the new image's corners
        corners = np.concatenate((corners1_, corners2), axis=0)
        [xmin, ymin] = np.int32(corners.min(axis=0).ravel() - 0.5)
        [xmax, ymax] = np.int32(corners.max(axis=0).ravel() + 0.5)

        # get the translation matrix and caluclate new Homography
        translation_mat = np.array([[1, 0, -xmin], [0, 1, -ymin], [0, 0, 1]])
        H_ = np.dot(translation_mat, H)

        warped_img = cv2.warpPerspective(img1, H_, (xmax - xmin, ymax - ymin))
        warped_img[-ymin:h1 - ymin, -xmin:w1 - xmin] = img2

        return warped_img

    res = stitch(left_img, right_img, H)

    return res
    raise NotImplementedError
Beispiel #20
0
def SIFT(img1, img2):
    sift = cv.SIFT_create()
    kp1, des1 = sift.detectAndCompute(img1, None)
    kp2, des2 = sift.detectAndCompute(img2, None)
    return des1, des2, kp1, kp2
def main():
    # create sift
    sift = cv.SIFT_create()

    # set window size
    cv2.namedWindow("window", cv2.WINDOW_NORMAL)
    cv2.resizeWindow('window', int(640 * 2 / 3), 640)

    # create log file
    os.chdir("../")
    fh = open('results1.csv', 'w')
    writer = csv.writer(fh)
    writer.writerow(
        ["File name", "number of matches", "area of sign", "Blank type"])

    # main variables
    def_h = 750  # image resolution (y axis) - dependent (TODO make independent) - don't set less than 700px
    min_match = 50  # = func(def_h) # min number of matches, correct = 70-364, wrong = 0-33
    min_sign_area = [
        500, 750, 550
    ]  # min area of sign, correct = 599-899/1057-1142/752-897, wrong = 0-392/0-491/0-366
    # TODO minimaze sign frame to minimaze cnt area of empty blank -> sp
    # min_sign_area should be the same
    # min sign area about 30%

    # load default clear blanks
    ref_folder = 'reference/'
    ref_names = ['default.jpg', 'default2.jpg', 'default3.jpg']
    blank_types = [BlankType.GENERAL, BlankType.CHILD18, BlankType.CHILD14]
    ref = {}
    kp2 = {}
    des2 = {}
    for ref_name in ref_names:
        ref[ref_name] = cv2.imread(ref_folder + ref_name, 0)
        ref[ref_name] = cv2.resize(ref[ref_name], (int(def_h * 2 / 3), def_h))
        kp2[ref_name], des2[ref_name] = sift.detectAndCompute(
            ref[ref_name], None)

    # sign position, format [y1, y2, x1, x2]
    sp = [[650, 690, 350, 400], [615, 645, 135, 185], [660, 690, 190, 250]]
    #sp = [int(i * def_h/750) for i in sp] TODO
    #TODO sp*def_h/750, also for min match and sign area

    # user uploaded images
    data_folder = "photos/"
    file_names = glob.glob(data_folder + '*.jpg')

    for file_name in file_names:
        img = cv2.imread(file_name, 0)
        img = cv2.resize(img, (int(def_h * 2 / 3), def_h))
        kp1, des1 = sift.detectAndCompute(img, None)

        match_count = 0
        matches_list = []
        for ref_name in ref_names:
            matches = Match(des1, des2[ref_name], kp1, kp2[ref_name])
            if matches is None:
                continue
            matches_list.append(matches)
            if len(matches) > match_count:
                match_count = len(matches)
                ind = ref_names.index(ref_name)

        sign_area = 0
        if len(matches_list) > 0:
            blank_type = blank_types[ind]
            matches = matches_list[ind]
            ref_name = ref_names[ind]

            blank_status = verify_blank(matches, min_match)
            if blank_status == BlankStatus.CORRECT:
                cropped = transform(img, matches, kp1, kp2[ref_name])
                sign = cropped[sp[ind][0]:sp[ind][1], sp[ind][2]:sp[ind]
                               [3]]  # attention: format [y1:y2, x1:x2]
                # TODO inshot = check_shot(cropped) - in func: threshhold and calculate area of blank (should be more than 90%)
                blank_status, sign_area = confirm_sign(sign,
                                                       min_sign_area[ind])
                cv2.rectangle(cropped, (sp[ind][2], sp[ind][0]),
                              (sp[ind][3], sp[ind][1]), (0, 0, 255), 2)
                cv2.imshow('window', cropped)
        else:
            matches = []
            blank_type = BlankType.UNDEFINED
        writer.writerow([
            file_name.split("\\")[1],
            str(len(matches)),
            str(sign_area),
            str(blank_type)
        ])
        k = cv2.waitKey(1) & 0xff
        if k == ord('q'):
            exit()
def SIFT_KNN_BBS(img1, img2):
    #t1 = cv2.imread(img1,0)
    #t2 = cv2.imread(img2,0)
    t1 = img1
    t2 = img2

    sift = cv.SIFT_create()

    kp1, des1 = sift.detectAndCompute(t1, None)
    kp2, des2 = sift.detectAndCompute(t2, None)

    f = cv2.drawKeypoints(t1,
                          kp1,
                          None, [0, 0, 255],
                          flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)

    nf = cv2.drawKeypoints(t2,
                           kp2,
                           None, [255, 0, 0],
                           flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)

    bf = cv2.BFMatcher()
    matches = bf.knnMatch(des1, des2, k=2)

    good1 = []
    for m, n in matches:
        if m.distance < 0.75 * n.distance:
            good1.append([m])

    matches = bf.knnMatch(des2, des1, k=2)

    good2 = []
    for m, n in matches:
        if m.distance < 0.75 * n.distance:
            good2.append([m])

    good = []

    for i in good1:
        img1_id1 = i[0].queryIdx
        img2_id1 = i[0].trainIdx

        (x1, y1) = kp1[img1_id1].pt
        (x2, y2) = kp2[img2_id1].pt

        for j in good2:
            img1_id2 = j[0].queryIdx
            img2_id2 = j[0].trainIdx

            (a1, b1) = kp2[img1_id2].pt
            (a2, b2) = kp1[img2_id2].pt

            if (a1 == x2 and b1 == y2) and (a2 == x1 and b2 == y1):
                good.append(i)

    #print (kp1, kp2, good)
    result = cv2.drawMatchesKnn(t1,
                                kp1,
                                t2,
                                kp2,
                                good,
                                None, [0, 0, 255],
                                flags=2)

    #plt.imshow(result, interpolation = 'bicubic')
    #plt.axis('off')
    #plt.show()

    return kp1, kp2, good
Beispiel #23
0
def MatchFeatures(img1, img2):
    sift = cv.SIFT_create()
    kp1, des1 = sift.detectAndCompute(img1, None)
    kp2, des2 = sift.detectAndCompute(img2, None)

    f = cv2.drawKeypoints(img1,
                          kp1,
                          None, [0, 0, 255],
                          flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
    nf = cv2.drawKeypoints(img2,
                           kp2,
                           None, [255, 0, 0],
                           flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)

    bf = cv2.BFMatcher()
    matches = bf.knnMatch(des1, des2, k=2)

    good1 = []
    try:
        for m, n in matches:
            if m.distance < 0.75 * n.distance:
                good1.append([m])
    except ValueError:
        print("not enough matches")
        return False, [], []

    matches = bf.knnMatch(des2, des1, k=2)

    good2 = []
    try:
        for m, n in matches:
            if m.distance < 0.75 * n.distance:
                good2.append([m])
    except ValueError:
        print("not enough matches")
        return False, [], []

    good = []
    A = []
    B = []
    for i in good1:
        img1_id1 = i[0].queryIdx
        img2_id1 = i[0].trainIdx
        (x1, y1) = kp1[img1_id1].pt
        (x2, y2) = kp2[img2_id1].pt

        for j in good2:
            img1_id2 = j[0].queryIdx
            img2_id2 = j[0].trainIdx

            (a1, b1) = kp2[img1_id2].pt
            (a2, b2) = kp1[img2_id2].pt

            if (a1 == x2 and b1 == y2) and (a2 == x1 and b2 == y1):
                good.append(i)
                A.append([a2, b2])
                B.append([a1, b1])

    result = cv2.drawMatchesKnn(img1,
                                kp1,
                                img2,
                                kp2,
                                good,
                                None, [0, 0, 255],
                                flags=2)
    if (len(B) > 5) and (len(B) > 5):
        ok = True
    else:
        ok = False
    return ok, transpose(A), transpose(B)