def featureAlign(im1, im2):

    # Convert images to grayscale
    im1Gray = cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY)
    im2Gray = cv2.cvtColor(im2, cv2.COLOR_BGR2GRAY)

    # Detect ORB features and compute descriptors.
    orb = cv2.ORB_create(max_features)
    keypoints1, descriptors1 = orb.detectAndCompute(im1Gray, None)
    keypoints2, descriptors2 = orb.detectAndCompute(im2Gray, None)

    # Match features.
    matcher = cv2.DescriptorMatcher_create(
        cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING)
    matches = matcher.match(descriptors1, descriptors2, None)

    # Sort matches by score
    matches.sort(key=lambda x: x.distance, reverse=False)

    # Remove not so good matches
    numGoodMatches = int(len(matches) * feature_retention)
    matches = matches[:numGoodMatches]

    # Draw top matches
    imMatches = cv2.drawMatches(im1, keypoints1, im2, keypoints2, matches,
                                None)
    #cv2.imwrite("matches.jpg", imMatches)

    # Extract location of good matches
    points1 = np.zeros((len(matches), 2), dtype=np.float32)
    points2 = np.zeros((len(matches), 2), dtype=np.float32)

    for i, match in enumerate(matches):
        points1[i, :] = keypoints1[match.queryIdx].pt
        points2[i, :] = keypoints2[match.trainIdx].pt

    # Find homography
    h, mask = cv2.findHomography(points1, points2, cv2.RANSAC)

    # Use homography
    height, width, channels = im2.shape
    im1Reg = cv2.warpPerspective(im1, h, (width, height))

    return im1Reg, h
Beispiel #2
0
def reg_sift(mover, target):
  MAX_FEATURES = 500
  GOOD_MATCH_PERCENT = 0.15
  # print(target)
  orb = cv2.ORB_create(nfeatures=1500)
  keypoints1, descriptors1 = orb.detectAndCompute(target, None)
  keypoints2, descriptors2 = orb.detectAndCompute(mover, None)

  img = cv2.drawKeypoints(target, keypoints1, None)
  cv2.waitKey(0)
  cv2.destroyAllWindows()


  # Match features.
  matcher = cv2.DescriptorMatcher_create(cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING)
  matches = matcher.match(descriptors1, descriptors2, None)
   
  # Sort matches by score
  matches.sort(key=lambda x: x.distance, reverse=False)
 
  # Remove not so good matches
  numGoodMatches = int(len(matches) * GOOD_MATCH_PERCENT)
  matches = matches[:numGoodMatches]
 
  # Draw top matches
  imMatches = cv2.drawMatches(target, keypoints1, mover, keypoints2, matches, None)
  # cv2.imwrite("matches.jpg", imMatches)
   
  # Extract location of good matches
  points1 = np.zeros((len(matches), 2), dtype=np.float32)
  points2 = np.zeros((len(matches), 2), dtype=np.float32)
 
  for i, match in enumerate(matches):
    points1[i, :] = keypoints1[match.queryIdx].pt
    points2[i, :] = keypoints2[match.trainIdx].pt
   
  # Find homography
  h, mask = cv2.findHomography(points1, points2, cv2.RANSAC)
 
  # Use homography
  height, width = mover.shape
  targetReg = cv2.warpPerspective(target, h, (width, height))
   
  return targetReg, h
Beispiel #3
0
def alignImages(im1, im2):
    max_features = 500
    good_match_percent = 0.15

    # Making gray images
    im1Gray = cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY)
    im2Gray = cv2.cvtColor(im2, cv2.COLOR_BGR2GRAY)

    # Detect ORB features and compute descriptors.
    orb = cv2.ORB_create(max_features)
    keypoints1, descriptors1 = orb.detectAndCompute(im1Gray, None)
    keypoints2, descriptors2 = orb.detectAndCompute(im2Gray, None)

    # Match features.
    matcher = cv2.DescriptorMatcher_create("BruteForce-Hamming")
    matches = matcher.match(descriptors1, descriptors2, None)

    # Sort matches by score
    matches.sort(key=lambda x: x.distance, reverse=False)

    # Remove not so good matches
    numGoodMatches = int(len(matches) * good_match_percent)
    matches = matches[:numGoodMatches]

    # Draw top matches
    imMatches = cv2.drawMatches(im1, keypoints1, im2, keypoints2, matches,
                                None)

    # Extract location of good matches
    points1 = np.zeros((len(matches), 2), dtype=np.float32)
    points2 = np.zeros((len(matches), 2), dtype=np.float32)

    for i, match in enumerate(matches):
        points1[i, :] = keypoints1[match.queryIdx].pt
        points2[i, :] = keypoints2[match.trainIdx].pt

    # Find homography
    homog, mask = cv2.findHomography(points1, points2, cv2.RANSAC)

    # Use homography
    height, width, channels = im2.shape
    im1Reg = cv2.warpPerspective(im1, homog, (width, height))

    return im1Reg, homog, imMatches
Beispiel #4
0
def align_images(src, template):

    MAX_FEATURES = 500
    GOOD_MATCH_PERCENT = 0.15

    # Convierto a escala de gris.
    src_gs = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)
    template_gs = cv2.cvtColor(template, cv2.COLOR_BGR2GRAY)

    # Detecto features ORB and computo descriptores
    orb = cv2.ORB_create(MAX_FEATURES)
    keypoints1, descriptors1 = orb.detectAndCompute(src_gs, None)
    keypoints2, descriptors2 = orb.detectAndCompute(template_gs, None)

    # Matchear features.
    matcher = cv2.DescriptorMatcher_create(cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING)
    matches = matcher.match(descriptors1, descriptors2, None)

    # Ordeno por orden.
    matches.sort(key=lambda x: x.distance, reverse=False)

    # Saco los peores
    numGoodMatches = int(len(matches) * GOOD_MATCH_PERCENT)
    matches = matches[:numGoodMatches]

    # Dibujo
    #im_matches = cv2.drawMatches(src, keypoints1, template, keypoints2, matches, None)
    #cv2.imwrite("matches.jpg", imMatches)

    # Ubicacion de los best matches
    points1 = np.zeros((len(matches), 2), dtype=np.float32)
    points2 = np.zeros((len(matches), 2), dtype=np.float32)

    for i, match in enumerate(matches):
        points1[i, :] = keypoints1[match.queryIdx].pt
        points2[i, :] = keypoints2[match.trainIdx].pt

    # Calculo homografia
    h, mask = cv2.findHomography(points1, points2, cv2.RANSAC)

    height, width, channels = template.shape
    src_reg = cv2.warpPerspective(src, h, (width, height))

    return src_reg, h
def align_images(image,
                 template,
                 maxFeatures=500,
                 keepPercent=0.2,
                 debug=False):
    # Convert both the input image and template to grayscale
    imageGray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    templateGray = cv2.cvtColor(template, cv2.COLOR_BGR2GRAY)
    # Use ORB to detect keypoints and extract (binary) local invariant features
    orb = cv2.ORB_create(maxFeatures)
    (kpsA, descsA) = orb.detectAndCompute(imageGray, None)
    (kpsB, descsB) = orb.detectAndCompute(templateGray, None)
    # Match the features
    method = cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING
    matcher = cv2.DescriptorMatcher_create(method)
    matches = matcher.match(descsA, descsB, None)
    # Sort the matches by their distance (the smaller the distance, the "more similar" the features are)
    matches = sorted(matches, key=lambda x: x.distance)
    # Keep only the top matches
    keep = int(len(matches) * keepPercent)
    matches = matches[:keep]
    # Check to see if we should visualize the matched keypoints
    if debug:
        matchedVis = cv2.drawMatches(image, kpsA, template, kpsB, matches,
                                     None)
        matchedVis = imutils.resize(matchedVis, width=1000)
        cv2.imshow("Matched Keypoints", matchedVis)
        cv2.waitKey(0)
    # Allocate memory for the keypoints (x,y-coordinates) from the top matches
    # -- These coordinates are going to be used to compute our homography matrix
    ptsA = np.zeros((len(matches), 2), dtype="float")
    ptsB = np.zeros((len(matches), 2), dtype="float")
    # Loop over the top matches
    for (i, m) in enumerate(matches):
        # Indicate that the two keypoints in the respective images map to each other
        ptsA[i] = kpsA[m.queryIdx].pt
        ptsB[i] = kpsB[m.trainIdx].pt
    # Compute the homography matrix between the two sets of matched points
    (H, mask) = cv2.findHomography(ptsA, ptsB, method=cv2.RANSAC)
    # Use the homography matrix to align the images
    (h, w) = template.shape[:2]
    aligned = cv2.warpPerspective(image, H, (w, h))
    # Return the aligned image
    return aligned
def translateImage(im1, im2):
    try:
        # Convert images to grayscale
        im1Gray = cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY)
        im2Gray = cv2.cvtColor(im2, cv2.COLOR_BGR2GRAY)
        # Detect ORB features and compute descriptors.
        orb = cv2.ORB_create(MAX_FEATURES)
        keypoints1, descriptors1 = orb.detectAndCompute(im1Gray, None)
        keypoints2, descriptors2 = orb.detectAndCompute(im2Gray, None)
        # Match features.
        matcher = cv2.DescriptorMatcher_create(
            cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING)
        matches = matcher.match(descriptors1, descriptors2, None)
        # Sort matches by score
        matches.sort(key=lambda x: x.distance, reverse=False)
        # Remove not so good matches
        numGoodMatches = int(len(matches) * GOOD_MATCH_PERCENT)
        matches = matches[:numGoodMatches]
        # Draw top matches
        imMatches = cv2.drawMatches(im1, keypoints1, im2, keypoints2, matches,
                                    None)
        #cv2.imwrite("matches.jpg", imMatches)
        # Extract location of good matches
        points1 = np.zeros((len(matches), 2), dtype=np.float32)
        points2 = np.zeros((len(matches), 2), dtype=np.float32)
        for i, match in enumerate(matches):
            points1[i, :] = keypoints1[match.queryIdx].pt
            points2[i, :] = keypoints2[match.trainIdx].pt
        # Find homography
        h, mask = cv2.estimateAffine2D(points1, points2)
        h[0][0] = 1
        h[0][1] = 0
        h[1][0] = 0
        h[1][1] = 1
        #print(h)
        # Use homography
        height, width, channels = im2.shape
        im1Reg = cv2.warpAffine(im1,
                                h, (width, height),
                                borderValue=(255, 255, 255))
        return im1Reg, h
    except:
        h = "error"
        return im1, h
Beispiel #7
0
def main():
    imgs = []
    for i in xrange(3):
        img = cv2.imread('frame{}.png'.format(str(i).zfill(4)))
        dsize = (int(img.shape[1] * 0.4), int(img.shape[0] * 0.4))
        img = cv2.resize(img, dsize)
        imgray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
        imgs.append(imgray)
        del img

    sift = cv2.SIFT()
    matcher = cv2.DescriptorMatcher_create('FlannBased')
    for img1, img2 in itertools.combinations(imgs, 2):
        kp1, des1 = sift.detectAndCompute(img1, None)
        kp2, des2 = sift.detectAndCompute(img2, None)
        matches = matcher.match(des1, des2)
        yield matches
        good_maches = [dmatch for dmatch in matches if dmatch.distance < 100]
        drawMatches(img1, kp1, img2, kp2, good_maches)
Beispiel #8
0
def featurecompare(des1, des2):
    # FLANN parameters
    # FLANN_INDEX_KDTREE = 1
    # index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
    # search_params = dict(checks=50)   # or pass empty dictionary

    matcher = cv2.DescriptorMatcher_create(cv2.DescriptorMatcher_FLANNBASED)
    matches = matcher.knnMatch(np.asarray(des1, np.float32),
                               np.asarray(des2, np.float32), 2)  #2

    # ratio_thresh = 0.7
    # good_matches = []
    # for m,n in matches:
    #     if m.distance < ratio_thresh * n.distance:
    #         good_matches.append(m)

    # flann = cv2.FlannBasedMatcher(index_params,search_params)
    # matches = flann.knnMatch(des1,des2,k=2)
    return matches
def match_descriptors(desc_a,
                      desc_b,
                      m_type="Brute",
                      norm_type=cv2.NORM_L2,
                      knn=1,
                      matcher=None):
    """ Matches the given descriptors using OpenCV matchers. KNN matching.

    Two M dimensional sets of N descriptors are given as input, as NxM numpy arrays. Type of matching can be
    specified as either Brute Force or Flann. Distance used is also specified using OpenCV's norm types.
    An input matcher can be given optionally.

    :param desc_a: first set of descriptors;
    :param desc_b: second set of descriptors;
    :param m_type: type of matcher. Either \'Brute\' or \'Flann\';
    :param norm_type: tye of distance. For available types, check cv2.NORM_*. Default is cv2.NORM_L2;
    :param knn: Number of neighbors;
    :param matcher: pre allocated matcher.

    :return: list of lists, each containing K DMatch structures. List of index i contains the list of K
             nearest neighbors in desc_b of the i-th descriptor of desc_a; matching time;

    """

    if not matcher:
        if m_type == "Brute":
            matcher = cv2.BFMatcher(normType=norm_type, crossCheck=False)
        elif m_type == "Flann":
            matcher = cv2.DescriptorMatcher_create("FlannBased")
        else:
            raise Exception(
                "Unsuported matcher type! {0:s} not in ({1:s})".format(
                    m_type, ", ".join(mtype_list)))

        ts = time.time()
        matched_desc = matcher.knnMatch(desc_a, desc_b, k=knn)
        te = time.time()
    else:
        ts = time.time()
        matched_desc = matcher.knnMatch(desc_a, k=knn)
        te = time.time()

    return matched_desc, te - ts
Beispiel #10
0
def alignImagesORB(im1, im2):
    orb1 = cv2.ORB_create(FEATURES_DENSITY)
    orb2 = cv2.ORB_create(FEATURES_DENSITY * int(round((im1.size / im2.size))))
    keypoints1, descriptors1 = orb1.detectAndCompute(im1, None)
    keypoints2, descriptors2 = orb2.detectAndCompute(im2, None)

    print("keypoints1 len : ", len(keypoints1))
    print("keypoints2 len : ", len(keypoints2))
    # Match features.
    matcher = cv2.DescriptorMatcher_create(
        cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING)
    matches = matcher.match(descriptors1, descriptors2, None)
    # Sort matches by score
    print("matches len : ", len(matches))
    matches.sort(key=lambda x: x.distance, reverse=False)
    # Remove not so good matches
    numGoodMatches = int(len(matches) * GOOD_MATCH_PERCENT)
    print("numGoodMatches : ", numGoodMatches)
    matches = matches[:numGoodMatches]
Beispiel #11
0
def alignImages(im1, im2):

    MAX_FEATURES = 400
    GOOD_MATCH_PERCENT = 0.05

    # Detect ORB features and compute descriptors.

    orb = cv2.ORB_create(MAX_FEATURES)
    keypoints1, descriptors1 = orb.detectAndCompute(im1, None)
    keypoints2, descriptors2 = orb.detectAndCompute(im2, None)

    # Match features.
    matcher = cv2.DescriptorMatcher_create(
        cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING)
    matches = matcher.match(descriptors1, descriptors2, None)

    # Sort matches by score
    matches.sort(key=lambda x: x.distance, reverse=False)

    # Remove not so good matches
    numGoodMatches = int(len(matches) * GOOD_MATCH_PERCENT)
    matches = matches[:numGoodMatches]

    # Draw top matches
    imMatches = cv2.drawMatches(im1, keypoints1, im2, keypoints2, matches,
                                None)

    # Extract location of good matches
    points1 = np.zeros((len(matches), 2), dtype=np.float32)
    points2 = np.zeros((len(matches), 2), dtype=np.float32)

    for i, match in enumerate(matches):
        points1[i, :] = keypoints1[match.queryIdx].pt
        points2[i, :] = keypoints2[match.trainIdx].pt

# Find homography
    h, mask = cv2.findHomography(points1, points2, cv2.RANSAC)

    # Use homography
    height, width = im1.shape
    im1Reg = cv2.warpPerspective(im1, h, (width, height))

    return im1Reg, h
Beispiel #12
0
def alignment(x_target, x_ref):
    MAX_FEATURES = 500
    GOOD_MATCH_PERCENT = 0.15

    # Convert images to grayscale
    im1 = (x_ref.permute(1, 2, 0).numpy() * 255).astype(np.uint8)
    im2 = (x_target.permute(1, 2, 0).numpy() * 255).astype(np.uint8)
    im1Gray = cv2.cvtColor(im1, cv2.COLOR_RGB2GRAY)
    im2Gray = cv2.cvtColor(im2, cv2.COLOR_RGB2GRAY)

    # Detect ORB features and compute descriptors.
    orb = cv2.ORB_create(MAX_FEATURES)
    keypoints1, descriptors1 = orb.detectAndCompute(im1Gray, None)
    keypoints2, descriptors2 = orb.detectAndCompute(im2Gray, None)

    # Match features.
    matcher = cv2.DescriptorMatcher_create(
        cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING)
    matches = matcher.match(descriptors1, descriptors2, None)

    # Sort matches by score
    matches.sort(key=lambda x: x.distance, reverse=False)

    # Remove not so good matches
    numGoodMatches = int(len(matches) * GOOD_MATCH_PERCENT)
    matches = matches[:numGoodMatches]

    # Extract location of good matches
    points1 = np.zeros((len(matches), 2), dtype=np.float32)
    points2 = np.zeros((len(matches), 2), dtype=np.float32)

    for i, match in enumerate(matches):
        points1[i, :] = keypoints1[match.queryIdx].pt
        points2[i, :] = keypoints2[match.trainIdx].pt

    # Find homography
    h, mask = cv2.findHomography(points1, points2, cv2.RANSAC)

    # Use homography
    height, width, channels = im2.shape
    im1Reg = cv2.warpPerspective(im1, h, (width, height))

    return torch.from_numpy(im1Reg.astype(np.float) / 255).permute(2, 0, 1)
Beispiel #13
0
def align_images(img, ref, max_matches, good_match_percent):
    # Convert images to grayscale
    img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    ref_gray = cv2.cvtColor(ref, cv2.COLOR_BGR2GRAY)

    # Detect ORB features and compute descriptors.
    orb = cv2.ORB_create(max_matches)
    keypoints_img, descriptors_img = orb.detectAndCompute(img_gray, None)
    keypoints_ref, descriptors_ref = orb.detectAndCompute(ref_gray, None)

    # Match features.
    matcher = cv2.DescriptorMatcher_create(
        cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING)
    matches = matcher.match(descriptors_img, descriptors_ref, None)

    # Sort matches by score
    matches.sort(key=lambda x: x.distance, reverse=False)

    # Remove not so good matches
    num_good_matches = int(len(matches) * good_match_percent)
    matches = matches[:num_good_matches]

    # Draw top matches
    img_matches = cv2.drawMatches(img, keypoints_img, ref, keypoints_ref,
                                  matches, None)
    cv2.imwrite('matches.jpg', img_matches)

    # Extract location of good matches
    points_img = np.zeros((len(matches), 2), dtype=np.float32)
    points_ref = np.zeros((len(matches), 2), dtype=np.float32)

    for i, match in enumerate(matches):
        points_img[i, :] = keypoints_img[match.queryIdx].pt
        points_ref[i, :] = keypoints_ref[match.trainIdx].pt

    # Find homography
    h, mask = cv2.findHomography(points_img, points_ref, cv2.RANSAC)

    # Use homography
    height, width, channels = ref.shape
    img_reg = cv2.warpPerspective(img, h, (width, height))

    return img_reg, h
def alignImages(img1, img2):
	gray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
	gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)

	# Detecting ORB (Oriented Fast and Rotated BRIEF) features and descriptors
	orb = cv2.ORB_create(MAX_FEATURES)
	keypoints1, descriptors1 = orb.detectAndCompute(gray1, None)
	keypoints2, descriptors2 = orb.detectAndCompute(gray2, None)

	# Matching the features
	matcher = cv2.DescriptorMatcher_create(cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING)
	matches = matcher.match(descriptors1, descriptors2, None)
	# print("Matches before sorting", matches)

	# Sort matches by score
	matches.sort(key = lambda x:x.distance, reverse=False)
	# print("After sorting ", matches)

	# Remove not so good matches
	num_goodMatches = round(len(matches) * MATCH_PERCENT)
	print(num_goodMatches)
	matches = matches[:num_goodMatches]

	# Draw the top matches
	matched_img = cv2.drawMatches(img1, keypoints1, img2, keypoints2, matches, None)
	cv2.imwrite("matched.png", matched_img)

	# Extract location of good matches
	points1 = np.zeros((len(matches), 2), dtype=np.float32)
	points2 = np.zeros((len(matches), 2), dtype=np.float32)

	for i, match in enumerate(matches):
		points1[i, :] = keypoints1[match.queryIdx].pt
		points2[i, :] = keypoints2[match.trainIdx].pt

	# Find Homography
	h**o, mask = cv2.findHomography(points1, points2, cv2.RANSAC)
	print(h**o)
	height, width, channels = img2.shape
	print(height, width)
	warped = cv2.warpPerspective(img1, h**o, (width, height))

	return warped, h**o
Beispiel #15
0
    def __match_keypoints(kps_a, kps_b, features_a, features_b, ratio,
                          reproj_tresh):
        matcher = cv2.DescriptorMatcher_create("BruteForce")
        raw_matches = matcher.knnMatch(features_a, features_b, 2)
        matches = []

        for m in raw_matches:
            if len(m) == 2 and m[0].distance < m[1].distance * ratio:
                matches.append((m[0].trainIdx, m[0].queryIdx))

        if len(matches) > 4:
            pts_a = np.float32([kps_a[i] for (_, i) in matches])
            pts_b = np.float32([kps_b[i] for (i, _) in matches])

            H, status = cv2.findHomography(pts_a, pts_b, cv2.RANSAC,
                                           reproj_tresh)
            return matches, H, status

        return None
Beispiel #16
0
def alignImages(image, reference):
	#get grayscale
	imgG = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
	refG = cv2.cvtColor(reference, cv2.COLOR_BGR2GRAY)

	#apply ORB for features and descriptors
	orb = cv2.ORB_create(MAX_FEATURES)
	imgKeyPoints, imgDescriptors = orb.detectAndCompute(imgG, None)
	refKeyPoints, refDescriptors = orb.detectAndCompute(refG, None)

	#match features from image to reference
	matcher = cv2.DescriptorMatcher_create(cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING)
	matches = matcher.match(imgDescriptors, refDescriptors, None)

	#sort matches by score for filtering
	matches.sort(key=lambda x: x.distance, reverse=False)
	goodMatcheTotal = int(len(matches) * GOOD_MATCH_PERCENTAGE)
	matches = matches[:goodMatcheTotal]

	#draw top matches of reference points
	topMatches = cv2.drawMatches(image, imgKeyPoints, reference, refKeyPoints, matches, None)

	#Gather coords for best matches
	imgPoints = np.zeros((len(matches), 2), dtype = np.float32)
	refPoints = np.zeros((len(matches), 2), dtype = np.float32)

	for i, match in enumerate(matches):
		imgPoints[i, :] = imgKeyPoints[match.queryIdx].pt
		refPoints[i, :] = refKeyPoints[match.trainIdx].pt

	#calculate homography
	homography, mask = cv2.findHomography(imgPoints, refPoints, cv2.RANSAC)

	#apply homography
	height, width, channels = image.shape

	#frame = np.float32([[0,0], [0,height], [width,height], [width, 0]]).reshape(-1,1,2)
	#result = cv2.perspectiveTransform(frame, homography)

	result = cv2.warpPerspective(image, homography, (width, height))


	return result, homography
Beispiel #17
0
def match(file_reference, file_input):
    """Match characteristics features between the two input images"""
    try:
        MAX_FEATURES = 500
        GOOD_MATCH_PERCENT = 0.15

        imReference = cv2.imread(file_reference, 0)  # cv2.IMREAD_COLOR
        imInput = cv2.imread(file_input, 0)

        # Convert images to gray scale
        im1Gray = imReference  # cv2.cvtColor(imReference, cv2.COLOR_BGR2GRAY)
        im2Gray = imInput  # cv2.cvtColor(imInput, cv2.COLOR_BGR2GRAY)

        # Detect ORB features and compute descriptors.
        orb = cv2.ORB_create(MAX_FEATURES)
        keypoints1, descriptors1 = orb.detectAndCompute(im1Gray, None)
        keypoints2, descriptors2 = orb.detectAndCompute(im2Gray, None)

        # Match features.
        matcher = cv2.DescriptorMatcher_create(
            cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING)
        # matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
        matches = matcher.match(descriptors1, descriptors2, None)

        # Sort matches by score
        matches.sort(key=lambda x: x.distance, reverse=False)

        # Remove not so good matches
        numGoodMatches = int(len(matches) * GOOD_MATCH_PERCENT)
        matches = matches[:numGoodMatches]

        # Draw top matches
        imMatches = cv2.drawMatches(imReference,
                                    keypoints1,
                                    imInput,
                                    keypoints2,
                                    matches,
                                    None,
                                    flags=2)

        return imMatches
    except:
        pass
def align_image(image1, image2):
    """Takes two images, returns homography and corrected image"""
    # convert images to grayscale
    gray_image1 = cv2.cvtColor(image1, cv2.COLOR_BGR2GRAY)
    gray_image2 = cv2.cvtColor(image2, cv2.COLOR_BGR2GRAY)

    # detect ORB features and compute descriptors
    orb = cv2.ORB_create(MAX_FEATURES)
    keypoints1, descriptors1 = orb.detectAndCompute(gray_image1, None)
    keypoints2, descriptors2 = orb.detectAndCompute(gray_image2, None)

    # match features
    matcher = cv2.DescriptorMatcher_create('BruteForce-Hamming')
    matches = matcher.match(descriptors1, descriptors2, None)

    # sort matches by score
    matches.sort(key=lambda x: x.distance, reverse=False)

    # remove poor matches
    num_good_matches = int(len(matches) * GOOD_MATCH_PERCENT)
    matches = matches[:num_good_matches]

    # draw top matches
    image_matches = cv2.drawMatches(image1, keypoints1, image2, keypoints2,
                                    matches, None)
    cv2.imwrite('matches.jpg', image_matches)

    # extract location of good matches
    points1 = np.zeros((len(matches), 2), dtype=np.float32)
    points2 = np.zeros((len(matches), 2), dtype=np.float32)

    for i, match in enumerate(matches):
        points1[i, :] = keypoints1[match.queryIdx].pt
        points2[i, :] = keypoints2[match.trainIdx].pt

    # find homography
    h, mask = cv2.findHomography(points1, points2, cv2.RANSAC)

    # use homography
    height, width, channels = image2.shape
    registered_image = cv2.warpPerspective(image1, h, (width, height))

    return registered_image, h
Beispiel #19
0
    def matchKeypoints(self, kpsA, kpsB, featuresA, featuresB, ratio,
                       reprojThresh):
        # compute the raw matches and build list of actual matches that pass check

        # FLANN_INDEX_KDTREE = 0
        # index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
        # search_params = dict(checks = 50)   # or pass empty dictionary
        # flann = cv2.FlannBasedMatcher(index_params,search_params)
        # rawMatches = flann.knnMatch(featuresA,featuresB,k=2)

        matcher = cv2.DescriptorMatcher_create("BruteForce")
        rawMatches = matcher.knnMatch(featuresA, featuresB, 2)

        self.logger.info("Found {} raw matches".format(len(rawMatches)))

        matches = []
        # loop over the raw matches and remove outliers
        for m in rawMatches:
            # ensure the distance is within a certain ratio of each
            # other (i.e. Lowe's ratio test)
            if len(m) == 2 and m[0].distance < m[1].distance * ratio:
                matches.append((m[0].trainIdx, m[0].queryIdx))

        self.logger.info("Found {} matches after Lowe's test".format(
            len(matches)))

        # computing a homography requires at least 4 matches
        if len(matches) > 4:
            # construct the two sets of points
            ptsA = np.float32([kpsA[i] for (_, i) in matches])
            ptsB = np.float32([kpsB[i] for (i, _) in matches])

            # compute the homography between the two sets of points
            (H, status) = cv2.findHomography(ptsA, ptsB, cv2.RANSAC,
                                             reprojThresh)

            # return the matches along with the homograpy matrix
            # and status of each matched point
            return (matches, H, status)

        # otherwise, no homograpy could be computed
        self.logger.warning("Homography could not be computed!")
        return None
def match_keys(kpsA, kpsB, featuresA, featuresB, ratio, thresh):
    """
    DESCRIPTION
    This function compares the features for the two images and creates a 
    matching keys set.
    
    INPUT
    kpsA = 
    kpsB =
    featuresA = 
    featuresB = 
    ratio = 
    threshold =
    
    OUTPUT
    matches = the matching key points between images
    h_matrix = the homography matrix to merge both images
    status = the status of each key point??? check this
    """

    # compute the raw matches and initialize the list of actual matches
    matcher = cv2.DescriptorMatcher_create("BruteForce")
    rawMatches = matcher.knnMatch(featuresA, featuresB, 2)
    matches = []

    # loop over the raw matches
    for m in rawMatches:
        # ensure the distance is within a certain ratio of each
        # other (i.e. Lowe's ratio test)
        if len(m) == 2 and m[0].distance < m[1].distance * ratio:
            matches.append((m[0].trainIdx, m[0].queryIdx))

    # computing a homography requires at least 4 matches
    if len(matches) > 4:
        # construct the two sets of points
        ptsA = numpy.float32([kpsA[i] for (_, i) in matches])
        ptsB = numpy.float32([kpsB[i] for (i, _) in matches])
        # compute the homography between the two sets of points
        h_matrix, status = cv2.findHomography(ptsA, ptsB, cv2.RANSAC, thresh)
        #return
        return matches, h_matrix, status
    # otherwise, no homograpy could be computed
    return None, None, None
Beispiel #21
0
def match_key_points(right_key_points, left_key_points, right_features,
                     left_features, ratio, reproj_thresh):
    # A cv2 class that matches keypoint descriptors
    # FLANN is a much faster method for large datasets, so it may be a good
    # idea to switch to that. However it is a very different code set up
    # that uses a couple dictionaries, so there's a bit that'll have to
    # change
    matcher = cv2.DescriptorMatcher_create("BruteForce")

    # knnMatch makes a whole bunch of matches (as a DMatch class)
    # The k stands for how large the tuple will be (because that's
    # basically what DMatches are)
    # i picked two because straight lines
    raw_matches = matcher.knnMatch(right_features, left_features, 2)

    # Turns the raw_matches into tuples we can work with, while also
    # filtering out matches that occured on the outside edges of the
    # pictures where matche really shouldn't have occured
    # Is equivilent to the following for loop
    #        matches = []
    #        for m in raw_matches:
    #            if len(m) == 2 and m[0].distance < m[1].distance * ratio:
    #                matches.append((m[0].trainIdx, m[0].queryIdx))
    matches = [(m[0].trainIdx, m[0].queryIdx) for m in raw_matches
               if len(m) == 2 and m[0].distance < m[1].distance * ratio]

    # Converts the tuples into a numphy array (for working with the
    # homograph), while also splitting up the right and left points
    # We are making a homograph of the matches to apply a ratio test, and
    # determine which of the matches are of a high quility. Typical ratio
    # values are between 0.7 and 0.8
    # Computing a homography requires at least 4 matches
    if len(matches) > 4:
        # Split right and left into numphy arrays
        right_points = np.float32([right_key_points[i] for (_, i) in matches])
        left_points = np.float32([left_key_points[i] for (i, _) in matches])

        # Use the cv2 to actually connect the dots between the two pictures
        (H, status) = cv2.findHomography(right_points, left_points, cv2.RANSAC,
                                         reproj_thresh)
        return (matches, H, status)
    else:
        return None
def getGoodMatchPoint(kp1, kp2, des1, des2, ratio, reprojThresh):
    print('C')
    #初始化BF,因为使用的是SIFT ,所以使用默认参数
    matcher = cv2.DescriptorMatcher_create('BruteForce')
    # bf = cv2.BFMatcher()
    # matches = bf.knnMatch(des1, des2, k=2)
    matches = matcher.knnMatch(des1, des2,
                               2)  #***********************************

    #获取理想匹配
    good = []
    for m in matches:
        if len(m) == 2 and m[0].distance < ratio * m[1].distance:
            good.append((m[0].trainIdx, m[0].queryIdx))

    src_pts = np.float32([kp1[i] for (_, i) in good])
    dst_pts = np.float32([kp2[i] for (i, _) in good])

    return [src_pts, dst_pts]
def solution(left_img, right_img):
    """
    :param left_img:
    :param right_img:
    :return: you need to return the result image which is stitched by left_img and right_img
    """

    ratio = 0.8
    ransac_threshold = 4.0

    descriptor = cv2.xfeatures2d.SIFT_create()

    (keypoints_right_img, features_right_img) = descriptor.detectAndCompute(right_img, None)
    keypoints_right_img = np.float32([keypoint_right_img.pt for keypoint_right_img in keypoints_right_img])

    (keypoints_left_img, features_left_img) = descriptor.detectAndCompute(left_img, None)
    keypoints_left_img = np.float32([keypoint_left_img.pt for keypoint_left_img in keypoints_left_img])

    matcher = cv2.DescriptorMatcher_create("BruteForce")
    raw_matches = matcher.knnMatch(features_right_img, features_left_img, 2)
    matches = list()
    for match in raw_matches:
        if len(match) == 2 and match[0].distance < match[1].distance * ratio:
            matches.append((match[0].trainIdx, match[0].queryIdx))

    if len(matches) > 4:
        points_right_img = np.float32([keypoints_right_img[i] for (_, i) in matches])
        points_left_img = np.float32([keypoints_left_img[i] for (i, _) in matches])

        (homography, mask) = cv2.findHomography(points_right_img, points_left_img, cv2.RANSAC, ransac_threshold)

    # M = (matches, H, status)

    if homography is None:
        return None
    # (matches, H, status) = M

    result = cv2.warpPerspective(right_img, homography, (right_img.shape[1] + right_img.shape[1], right_img.shape[0]))
    result[0:left_img.shape[0], 0:left_img.shape[1]] = left_img

    return result

    raise NotImplementedError
Beispiel #24
0
		def match(self, im1,im2, direction=None, method='homography'):
			# compute the raw matches and initialize the list of actual
			# matches
			print("Direction : ", direction)
			kpsA, featuresA = self.detectAndDescribe(im1)
			kpsB, featuresB = self.detectAndDescribe(im2)
			matcher = cv2.DescriptorMatcher_create("BruteForce")
			rawMatches = matcher.knnMatch(featuresA, featuresB, 2)
			matches = []

			# loop over the raw matches
			for m in rawMatches:
				# ensure the distance is within a certain ratio of each
				# other (i.e. Lowe's ratio test)
				if len(m) == 2 and m[0].distance < m[1].distance * 0.65:
					matches.append((m[0].trainIdx, m[0].queryIdx))

			print("matches selected: ",len(matches))
			# computing a homography requires at least 4 matches
			if len(matches) > 4:
				# construct the two sets of points
				ptsA = np.float32([kpsA[i] for (_, i) in matches])
				ptsB = np.float32([kpsB[i] for (i, _) in matches])

				# compute the homography between the two sets of points

				if method == 'affine':
					partial_homo, ma = cv2.estimateAffine2D(ptsA, ptsB, cv2.RANSAC, ransacReprojThreshold=5.0)
					H = np.array([[0.0, 0, 0], [0.0, 0, 0], [0.0, 0, 1]])
					H[0:partial_homo.shape[0],0:partial_homo.shape[1]] = partial_homo
					print(H)
					print(H.shape)
        			#M = np.append(M, [[0,0,1]], axis=0)

				if method == 'homography':
					H, mask = cv2.findHomography(ptsA, ptsB, cv2.RANSAC,5.0)

				# return the matches along with the homograpy matrix
				# and status of each matched point
			return H

			# otherwise, no homograpy could be computed
			return None
Beispiel #25
0
    def matchKeypoints(self, kpsA, kpsB, featuresA, featuresB, ratio,
                       reProjThresh):
        matcher = cv2.DescriptorMatcher_create("BruteForce")
        rowMatches = matcher.knnMatch(featuresA, featuresB, 2)
        matches = []

        for m in rowMatches:
            if len(m) == 2 and m[0].distance < m[1].distance * ratio:
                matches.append((m[0].trainIdx, m[0].queryIdx))

            if (len(matches) > 4):
                ptsA = np.float32([kpsA[i] for (_, i) in matches])
                ptsB = np.float32([kpsB[i] for (i, _) in matches])

                (H, status) = cv2.findHomography(ptsA, ptsB, cv2.RANSAC,
                                                 reProjThresh)

                return (H, status)
            return None
    def __call__(self, images1, image2, warp_images1, warp_image2):

        h_seq = []
        for image1, warp_image1 in zip(images1, warp_images1):

            self.warp_image2 = warp_image2
            self.warp_image1 = warp_image1

            descriptor = cv2.xfeatures2d.SIFT_create()
            (kpsA, featuresA) = descriptor.detectAndCompute(warp_image2, None)

            self.kpsA = numpy.float32([kp.pt for kp in kpsA])

            descriptor = cv2.xfeatures2d.SIFT_create()
            (kpsB, featuresB) = descriptor.detectAndCompute(warp_image1, None)

            self.kpsB = numpy.float32([kp.pt for kp in kpsB])

            reprojThresh = 4.0
            ratio = 0.75

            matcher = cv2.DescriptorMatcher_create("BruteForce")
            self.matches = []
            if featuresA is not None and featuresB is not None:
                rawMatches = matcher.knnMatch(featuresA, featuresB, 2)

                for m in rawMatches:
                    if (len(m) == 2) and m[0].distance < m[1].distance * ratio:
                        self.matches.append((m[0].trainIdx, m[0].queryIdx))

            if len(self.matches) > 4:

                ptsA = numpy.float32([self.kpsA[i] for (_, i) in self.matches])
                ptsB = numpy.float32([self.kpsB[i] for (i, _) in self.matches])

                (H, self.status) = cv2.findHomography(ptsA, ptsB, cv2.RANSAC,
                                                      reprojThresh)

                h_seq.append(H)
            else:
                self.status = [False for i in self.matches]

        return h_seq
Beispiel #27
0
def getTranslationModel(images):
    '''
	Calculates the motion vector for each image
	with respect to the first image
	'''

    F = [np.array([[1, 0, 0], [0, 1, 0]])]
    # Create a sift detector/computer
    sift = cv2.xfeatures2d.SIFT_create()
    # Create a matcher to find matching points
    matcher = cv2.DescriptorMatcher_create("BruteForce")

    # For each images in the image set, register it to the first image.
    kp1, features1 = sift.detectAndCompute(images[0, :, :], None)
    kp1 = np.float32([kp.pt for kp in kp1])

    for i in range(1, images.shape[0]):
        # Get the feature points
        kp2, features2 = sift.detectAndCompute(images[i, :, :], None)
        # Convert the points to indexable points
        kp2 = np.float32([kp.pt for kp in kp2])
        # Get the matches
        rawMatches = matcher.knnMatch(features1, features2, k=2)
        matches = []

        r = 0.8
        for m in rawMatches:
            # ensure the distance is within a certain ratio of each
            # other (i.e. Lowe's ratio test)
            temp_dist = np.array([m[0].distance, m[1].distance])
            ratio = np.min(temp_dist) / np.max(temp_dist)
            if len(m) == 2 and ratio < r:
                matches.append((m[0].trainIdx, m[0].queryIdx))

        # Convert the matches to a format used by cv2.estimateAffinePartial2D
        ptsA = np.float32([kp1[i] for (_, i) in matches])
        ptsB = np.float32([kp2[i] for (i, _) in matches])
        # Calculate the translational model.
        M, w = cv2.estimateAffinePartial2D(ptsA, ptsB)
        M[0:2, 0:2] = np.array([[1, 0], [0, 1]])
        F.append(M)
    return np.array(F)
Beispiel #28
0
def panorama(img1, img2):
    '''
    img1 left image (RGB)
    img2 right image (RGB)
    result (output) panorama image
    '''

    gray_image1 = cv2.cvtColor(img1, cv2.cv.CV_RGB2GRAY)
    gray_image2 = cv2.cvtColor(img2, cv2.cv.CV_RGB2GRAY)

#   detector = cv2.FeatureDetector_create("SURF")
    detector = cv2.SURF(400)
    kp1 = detector.detect(gray_image1)
    kp2 = detector.detect(gray_image2)
    print 'keypoints in image1: %d, image2: %d' % (len(kp1), len(kp2))

    descriptor = cv2.DescriptorExtractor_create("SURF")
    k1, d1 = descriptor.compute(gray_image1, kp1)  # keypoints object
    k2, d2 = descriptor.compute(gray_image2, kp2)  # keypoints scene

    # match the keypoints
    matcher = cv2.DescriptorMatcher_create("FlannBased")
    matches = matcher.knnMatch(d1, d2, k=2)
    print 'number of matches: %d' % len(matches)

    # dist = [m.distance for m in matches]
    # min_dist = np.min(dist)
    # max_dist = np.max(dist)
    # good_matches = [m for m in matches if m.distance < (3 * min_dist)]
    good_matches = filter_matches(k1, k2, matches, ratio=0.75)
    print 'number of matches higher than threshold: %d' % len(good_matches)

    obj = np.array([k1[m.queryIdx].pt for m in good_matches])
    scn = np.array([k2[m.trainIdx].pt for m in good_matches])

    homography_matrix = cv2.findHomography(obj, scn, cv2.cv.CV_RANSAC)
    r1, c1 = gray_image1.shape
    r2, c2 = gray_image2.shape
    result = cv2.warpPerspective(img1, homography_matrix[0], (c1 + c2, r1))
    result[:r2, :c2, :] = img2

    return result
Beispiel #29
0
def align_images(image, goldimg, max_feats=300, max_percent=0.5):
    gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    gray_goldimg = cv2.cvtColor(goldimg, cv2.COLOR_BGR2GRAY)

    # Use ORB to detect features
    orb = cv2.ORB_create(max_feats)
    keypts_image, desc_image = orb.detectAndCompute(gray_image, None)
    keypts_goldimg, desc_goldimg = orb.detectAndCompute(gray_goldimg, None)

    # Match features
    matcher = cv2.DescriptorMatcher_create(
        cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING)
    matches = matcher.match(desc_image, desc_goldimg, None)

    # Sort matches by score
    matches.sort(key=lambda x: x.distance, reverse=False)

    # Remove not so good matches
    nb_matches = int(len(matches) * max_percent)
    matches = matches[:nb_matches]

    # Draw top matches
    img_matches = cv2.drawMatches(image, keypts_image, goldimg, keypts_goldimg,
                                  matches, None)

    # Extract location of good matches
    points_image = np.zeros((len(matches), 2), dtype=np.float32)
    points_goldimg = np.zeros((len(matches), 2), dtype=np.float32)

    for i, match in enumerate(matches):
        points_image[i, :] = keypts_image[match.queryIdx].pt
        points_goldimg[i, :] = keypts_goldimg[match.trainIdx].pt

    # Find homography
    h, mask = cv2.findHomography(points_image, points_goldimg, cv2.RANSAC)

    # Use homography
    height, width, channels = goldimg.shape
    aligned = cv2.warpPerspective(image,
                                  h, (width, height),
                                  borderValue=(205, 205, 205))
    return aligned, h, img_matches
    def get_circular_features_akaze(self, cl_img, cr_img, nl_img, nr_img, cl_feats=None, y_err=1):
        img_sz = cl_img.shape[:2]
        ftr_det = cv2.AKAZE_create()
        # ftr_det = cv2.BRISK_create()
        matcher = cv2.DescriptorMatcher_create(cv2.DescriptorMatcher_BRUTEFORCE_HAMMING)

        if len(cl_img.shape) == 3 and cl_img.shape[2] > 1:
            cl_img = cv2.cvtColor(cl_img, cv2.COLOR_RGB2GRAY)
            cr_img = cv2.cvtColor(cr_img, cv2.COLOR_RGB2GRAY)
            nl_img = cv2.cvtColor(nl_img, cv2.COLOR_RGB2GRAY)
            nr_img = cv2.cvtColor(nr_img, cv2.COLOR_RGB2GRAY)

        cl_det = ftr_det.detectAndCompute(cl_img, None)
        nl_det = ftr_det.detectAndCompute(nl_img, None)
        nr_det = ftr_det.detectAndCompute(nr_img, None)

        nl_kp, nl_des, nr_kp, nr_des = self._disparity_feature_matching(nl_det, nr_det, y_err=y_err)
        cl_kp, cl_des = cl_det

        nl_new_kp = []
        nr_new_kp = []
        cl_new_kp = []
        matches = matcher.knnMatch(cl_des, nl_des, 2)
        for m, n in matches:
            if m.distance < 0.8 * n.distance:
                pt1 = cl_kp[m.queryIdx].pt
                pt2 = nl_kp[m.trainIdx].pt

                y_err = abs(pt1[1]-pt2[1])
                x_err = abs(pt1[0]-pt2[0])
                err = np.array([x_err, y_err])

                if np.linalg.norm(err) < 150:
                    cl_new_kp.append(cl_kp[m.queryIdx])
                    nl_new_kp.append(nl_kp[m.trainIdx])
                    nr_new_kp.append(nr_kp[m.trainIdx])

        cl_feats = [kp.pt for kp in cl_new_kp]
        nl_feats = [kp.pt for kp in nl_new_kp]
        nr_feats = [kp.pt for kp in nr_new_kp]

        return np.array(cl_feats), None, np.array(nl_feats), np.array(nr_feats)