Esempio n. 1
0
def q1(image):
    sd = SiftDetector()
    # a) Extract SIFT features with default parameters and show the keypoints on the image
    keyPoints = sd.detector.detect(image, None)

    #print(len(keyPoints))
    # b) To achieve better visualization of the keypoints, reduce the number of keypoints.
    #Hint:vary nfeatures so that the number of keypoints becomes about 10 % of all default keypoints i.e 10% of 6233 = 623
    params = {}
    params["n_features"] = 623
    params["n_octave_layers"] = 3
    #params["contrast_threshold"] = 0.03
    params["contrast_threshold"] = 0.1
    params["edge_threshold"] = 10
    #Orientation Assignment
    #params["sigma"] = 1.5
    params["sigma"] = 1.6
    sift = SiftDetector(params=params)
    keyPoints1 = sift.detector.detect(image, None)
    
    img = cv2.drawKeypoints(image, keyPoints, image)
    img2 = cv2.drawKeypoints(image, keyPoints1, image)
    
    cv2.imwrite('a.jpg', img)
    cv2.imwrite('b.jpg', img2)
    return sift
def sift_feature_descript(image):
    gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
    sift = cv.xfeatures2d.SIFT_create()
    # 图像中查找关键点, 如果只想搜索图像的一部分,可以传递掩膜
    kp = sift.detect(gray, None)
    # DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS,它将绘制一个大小为keypoint的圆圈并显示它的方向
    cv.drawKeypoints(gray, kp, image, flags=cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
    cv.imshow('sift point', image)
Esempio n. 3
0
def q3(image, sift1):
    center = get_img_center(image)
    # a) Rotate the given image clockwise by 60 degrees.
    rot = imutils.rotate_bound(image, 60)
    #rot = rotate(image, center[0], center[1], 60)
    plt.imshow(rot), plt.show()
    # b) Extract the SIFT features and show the keypoints on the rotated image using the same
    # parameter setting as for Task 1 (for the reduced number of keypoints).
    keyPoints1, des1 = sift1.detector.detectAndCompute(image, None)
    keyPoints2, des2 = sift1.detector.detectAndCompute(rot, None)

    bf = cv2.BFMatcher()
    # c)the keypoints in both images similar which shows that they share the same common features.

    # d) Match the SIFT descriptors of the keypoints of the rotated image with those of the original
    #image using the nearest-neighbour distance ratio method
    matches = bf.match(des1, des2)
    # We sort them in ascending order of their distances so that best matches (with low distance) come to front
    matches = sorted(matches, key=lambda x: x.distance)

    # Show the keypoints of the 5 best-matching descriptors on both the original and the scaled image.
    img_q3 = cv2.drawMatches(
        image, keyPoints1, rot, keyPoints2, matches[:7], None, flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
    plt.imshow(img_q3), plt.show()
    img3 = cv2.drawKeypoints(image, keyPoints1, image)
    cv2.imwrite('b3.jpg', img3)
    cv2.imwrite('d3.jpg', img_q3)
Esempio n. 4
0
def fast_corner_detect(image):
    gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
    gray = cv.GaussianBlur(gray, (9, 9), 25)  # sigma
    fast = cv.FastFeatureDetector_create()
    kp = fast.detect(gray, None)
    image = cv.drawKeypoints(image, kp, None, color=(255, 0, 0))
    cv.imshow('image for fast', image)
Esempio n. 5
0
    def draw_keypoints(self, img, kps):

        img = cv2.drawKeypoints(img, kps, None, (255, 0, 0), 4)

        plt.figure('Keypoints')
        plt.title('Image')
        plt.imshow(img)
        plt.show()
Esempio n. 6
0
def fast_detection(image):
    fast = cv2.FastFeatureDetector_create()
    kp = fast.detect(image, None)
    fast.setNonmaxSuppression(0)
    image_with_keypoints = cv2.drawKeypoints(image,
                                             kp,
                                             None,
                                             color=(255, 0, 0))
    return image_with_keypoints
Esempio n. 7
0
 def SURF(self):
     img = cv2.imread(os.path.join(root, '..', 'static', 'photos', session['org_img']))
     gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
     surf = cv2.xfeatures2d.SURF_create()
     kp = surf.detect(gray, None)
     img = cv2.drawKeypoints(gray, kp, img)
     filename = str(randint(1000000000, 9999999999)) + session['org_img']
     cv2.imwrite(os.path.join(root, '..', 'static', 'photos', filename), img)
     session['corner_img'] = filename
Esempio n. 8
0
 def ORB(self):
     img = cv2.imread(os.path.join(root, '..', 'static', 'photos', session['org_img']))
     gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
     orb = cv2.ORB_create(nfeatures=1500)
     kp = orb.detect(gray, None)
     img = cv2.drawKeypoints(gray, kp, img)
     filename = str(randint(1000000000, 9999999999)) + session['org_img']
     cv2.imwrite(os.path.join(root, '..', 'static', 'photos', filename), img)
     session['corner_img'] = filename
Esempio n. 9
0
 def FAST(self):
     img = cv2.imread(os.path.join(root, '..', 'static', 'photos', session['org_img']))
     gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
     fast = cv2.FastFeatureDetector_create(threshold=25)
     # find and draw the keypoints
     kp = fast.detect(gray, None)
     gray = cv2.drawKeypoints(gray, kp, None, color=(255, 0, 0))
     filename = str(randint(1000000000, 9999999999)) + session['org_img']
     cv2.imwrite(os.path.join(root, '..', 'static', 'photos', filename), gray)
     session['corner_img'] = filename
Esempio n. 10
0
 def _test_draw_features(self, img):
     '''
     Build-in test function for feature extraction.\n
     img: np.array
     '''
     gimg = self._gaussian_filter(img)
     kp, _ = self._get_feature(gimg, 0)
     out = None
     out = cv.drawKeypoints(img, kp, out, color=(0, 0, 255))
     cv.imshow('test_draw_features', out)
     cv.waitKey()
     cv.destroyAllWindows()
Esempio n. 11
0
 def draw(self, source, keypoints, dest=None):
     try:
         if dest is None:
             dest = source
         return cv2.drawKeypoints(
             source,
             keypoints,
             dest,
             (0, 0, 255),
             cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS,
         )
     except cv2.error as e:
         raise CV2Error(str(e))
Esempio n. 12
0
 def feature_extraction(image):
     """
     特征点检测,没用到
     :param image:
     :return:
     """
     if len(image.shape) == 3:
         image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
     akaze = cv2.AKAZE_create()
     kps = akaze.detect(image, None)
     draw = image.copy()
     draw = cv2.drawKeypoints(image, kps, draw)
     return draw, kps
Esempio n. 13
0
def draw_blob_circles(image, blobs):
    """draws the blobs from the detection on to the image for visualisation

    https://www.learnopencv.com/blob-detection-using-opencv-python-c/

    :param blobs: the blobs from cv2s blob detector
    :type blobs: cv2 blobs
    :param image: the image to draw the blobs onto
    :type image: cv2 image
    :return: the image with the blobs as circles
    :rtype: cv2 image
    """
    # cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures the size of the circle corresponds to the size of blob
    img = cv2.drawKeypoints(image, blobs, np.array(
        []), (0, 255, 0), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)

    return img
Esempio n. 14
0
def getSymmetry(img):

    # Initiate ORB detector
    orb = cv.ORB_create()
    # find the keypoints with ORB
    kp = orb.detect(img, None)
    # compute the descriptors with ORB
    kp, des = orb.compute(img, kp)
    # draw only keypoints location,not size and orientation
    img2 = cv.drawKeypoints(img,
                            kp,
                            None,
                            color=(0, 255, 0),
                            flags=(cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS))
    plt.imshow(img2), plt.show()

    # for each point compare to each other point and call it symmetry if there is a corresponding point?
    # cases of symmetry
    # opposite direction parrallel
    #
    numKeyPoints = len(kp)
    matched = []
    index = 0

    while index < len(kp):
        for i in range(1, len(kp)):
            # simple check for whether size is close enough and angle reflected along Y is close enough
            if anglesClose(kp[0].angle, 360 - kp[i].angle) and sizeClose(
                    kp[0].size, kp[i].size):
                matched.append(kp[0])
                matched.append(kp[i])
                del kp[i]
                del kp[0]
                break
        index += 1

    return len(matched) / numKeyPoints
Esempio n. 15
0
def q2(image, sift):
    # a)Enlarge the given image by a scale percentage of 115.
    scale = 115
    scale = scale/100

    width = int(image.shape[1] * scale)
    height = int(image.shape[0] * scale)
    new_dim = (width, height)
    resized = cv2.resize(image, new_dim)
    

    # b) Extract the SIFT features and show the keypoints on the scaled image using the same
    # parameter setting as for Task 1 (for the reduced number of keypoints).
    ## find the keypoints and descriptors using sift detector
    keyPoints1, des1 = sift.detector.detectAndCompute(image, None)
    # Since I have  already found keypoints, call sift.compute() which computes the descriptors 
    # from the keypoints that has been already found
    keyPoints2, des2 = sift.detector.detectAndCompute(resized, None)
    img2 = cv2.drawKeypoints(image, keyPoints1, image)
    # Hint: Brute-force matching is available in OpenCV for feature matching.
    bf_matcher = cv2.BFMatcher()
    #use Matcher.match() method to get the best matches in two images
    matches = bf_matcher.match(des1, des2)
    #matches = bf_matcher.knnMatch(des1, des2, k=2)
    # c)the keypoints in both images similar which shows that they share the same common features.

    # d) Match the SIFT descriptors of the keypoints of the scaled image with those of the original image 
    # using the nearest-neighbour distance ratio method
    # We sort them in ascending order of their distances so that best matches (with low distance) come to front
    matches = sorted(matches, key=lambda x: x.distance)

    # Show the keypoints of the 5 best-matching descriptors on both the original and the scaled image.
    img_q2=cv2.drawMatches(image, keyPoints1, resized, keyPoints2,
                           matches[:6], None, flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
    plt.imshow(img_q2), plt.show()
    cv2.imwrite('d2.jpg', img_q2)
    cv2.imwrite('b2.jpg', img2)
Esempio n. 16
0
def rectify_pair(image_left, image_right, viz=False):
    #特征点匹配
    #1.用surf进行特征点检测
    grayL = cv2.cvtColor(image_left, cv2.COLOR_BGR2GRAY)
    grayR = cv2.cvtColor(image_right, cv2.COLOR_BGR2GRAY)
    surf = cv2.xfeatures2d.SURF_create()

    # find the keypoints and descriptors with SIFT
    kp1, des1 = surf.detectAndCompute(grayL, None)
    kp2, des2 = surf.detectAndCompute(grayR, None)
    img = cv2.drawKeypoints(grayL, kp1, image_left)
    cv2.imshow("keyPointsOfLeft", img)

    #2.用BFMATCHER进行特征点匹配
    bf = cv2.BFMatcher(cv2.NORM_L1, crossCheck=False)
    # 特征描述子匹配
    matches = bf.match(des1, des2)
    points1 = []
    points2 = []
    for match in matches:
        points1.append(kp1[match.queryIdx].pt)
        points2.append(kp2[match.trainIdx].pt)
    #matches=sorted(matches,key=lambda x:x.distance)
    # print(len(matches))
    img3 = cv2.drawMatches(grayL, kp1, grayR, kp2, matches[:20], None, flags=2)
    cv2.imshow('matches', img3)

    # find the fundamental matrix
    F, mask = cv2.findFundamentalMat(np.array(points1), np.array(points2),
                                     cv2.RANSAC, 3, 0.99)

    # rectify the images, produce the homographies: H_left and H_right
    retval, H_left, H_right = cv2.stereoRectifyUncalibrated(
        np.array(points1), np.array(points2), F, image_left.shape[:2])

    return F, H_left, H_right
Esempio n. 17
0
import numpy as np
from cv2 import cv2

img = cv2.imread("street.jpg")
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

# 需安装包opencv_contrib_python
sift = cv2.xfeatures2d.SIFT_create()
kp = sift.detect(gray, None)
img = cv2.drawKeypoints(gray, kp, img)

# cv2.imwrite('sift_keypoints.jpg',img)
img = cv2.drawKeypoints(gray,
                        kp,
                        img,
                        flags=cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
cv2.imwrite("sift_keypoints.jpg", img)
Esempio n. 18
0
def get_orb(image):
    orb = cv2.ORB_create(nfeatures=2000)
    keypoints_orb, descriptors = orb.detectAndCompute(image, None)
    image_with_keypoints = cv2.drawKeypoints(image, keypoints_orb, None)
    return image_with_keypoints
Esempio n. 19
0
def main(args=None):
    """
    Main entry point.

    Args:
        args : list
            A of arguments as if they were input in the command line.
    """

    parser = get_parser()
    args = parser.parse_args(args)

    if not args.input:
        print(
            "There was no input file set! Please use --input path_to_file or use --help for more information."
        )
    else:
        if not os.path.isfile(args.input):
            print("%s is no file!" % args.input)
        else:
            # start program

            # Read image
            im = cv2.imread(args.input, cv2.IMREAD_GRAYSCALE)

            # Apply threshold
            ret, im = cv2.threshold(im, float(args.threshold), 255,
                                    cv2.THRESH_BINARY)

            kernel = np.ones((6, 6), np.uint8)
            erosion = cv2.erode(im, kernel, iterations=1)
            opening = cv2.morphologyEx(im, cv2.MORPH_OPEN, kernel)
            im = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernel)

            # Setup SimpleBlobDetector parameters.
            params = cv2.SimpleBlobDetector_Params()

            # filter by color
            if args.color:
                params.filterByColor = True
                params.blobColor = int(args.color)
            else:
                params.filterByColor = False

            # Filter by Circularity
            if args.circularity:
                params.filterByCircularity = True
                params.minCircularity = float(args.circularity)
            else:
                params.filterByCircularity = False

            # Filter by Convexity
            if args.convexity:
                params.filterByConvexity = True
                params.minConvexity = float(args.convexity)
            else:
                params.filterByConvexity = False

            # Filter by Inertia
            if args.inertia:
                params.filterByInertia = True
                params.minInertiaRatio = float(args.inertia)
            else:
                params.filterByInertia = False

            # Filter by Size
            if args.min or args.max:
                params.filterByArea = True
                params.minArea = int(args.min)
                params.maxArea = int(args.max)
            else:
                params.filterByArea = False

            # Create a detector with the parameters
            detector = cv2.SimpleBlobDetector_create(params)

            # Detect blobs.
            keypoints = detector.detect(im)

            # Draw detected blobs as red circles.
            # cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures the size of the circle
            # corresponds to the size of blob
            im_with_keypoints = cv2.drawKeypoints(
                im, keypoints, np.array([]), (0, 0, 255),
                cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)

            # Show keypoints
            print("%s: %i" % (args.input, len(keypoints)))

            if args.output:
                cv2.imwrite(args.output, im_with_keypoints)

            if args.plot:
                plt.imshow(im_with_keypoints)
                plt.show()
Esempio n. 20
0
import numpy as np
from cv2 import cv2

img = cv2.imread('resource/block_test.png', 0)

# Initiate FAST object with default values
fast = cv2.FastFeatureDetector_create()

# find and draw the keypoints
kp = fast.detect(img, None)
img2 = None
img2 = cv2.drawKeypoints(img, kp, img2, color=(255, 0, 0))

# Print all default params
# print "Threshold: ", fast.('threshold')
# print "nonmaxSuppression: ", fast.getBool('nonmaxSuppression')
# print "neighborhood: ", fast.getInt('type')
# print "Total Keypoints with nonmaxSuppression: ", len(kp)

# Disable nonmaxSuppression
# fast.setBool('nonmaxSuppression',0)
fast = cv2.FastFeatureDetector_create(nonmaxSuppression=0)
kp = fast.detect(img)

print "Total Keypoints without nonmaxSuppression: ", len(kp)

img3 = None
img3 = cv2.drawKeypoints(img, kp, img3, color=(255, 0, 0))

cv2.imshow('fast', img2)
cv2.imshow('fast nonmaxSuppression', img3)
Esempio n. 21
0
def process_image(msg):
    try:
        # convert sensor_msgs/Image to OpenCV Image
        bridge = CvBridge()

        # original Image
        orig = bridge.imgmsg_to_cv2(msg, "bgr8")
        # drawImg = orig

        # Resized Image
        # Resize the Single-Channel Image
        resized = cv2.resize(src=orig, dsize=None, fx=0.5, fy=0.5)
        drawImg = resized

        # Convert to Single Channel
        gray = cv2.cvtColor(src=resized, code=cv2.COLOR_BGR2GRAY)
        drawImg = cv2.cvtColor(src=gray, code=cv2.COLOR_GRAY2BGR)

        # Applying a thresholding to the image
        threshVal = 150
        ret, thresh = cv2.threshold(src=gray,
                                    thresh=threshVal,
                                    maxval=255,
                                    type=cv2.THRESH_BINARY)
        drawImg = cv2.cvtColor(src=thresh, code=cv2.COLOR_GRAY2BGR)

        # detect the outer pump circle
        pumpRadiusRange = (PUMP_DIAMETER / 2 - 2, PUMP_DIAMETER / 2 + 2)
        pumpCircles = cv2.HoughCircles(image=thresh,
                                       method=cv2.HOUGH_GRADIENT,
                                       dp=1,
                                       minDist=PUMP_DIAMETER,
                                       param2=2,
                                       minRadius=pumpRadiusRange[0],
                                       maxRadius=pumpRadiusRange[1])

        # Plotting the circles with Error Checking
        plotCircles(img=drawImg, circles=pumpCircles, color=(255, 0, 0))
        if (pumpCircles is None):
            raise Exception("No pump circles found")
        elif len(pumpCircles[0]) != 1:
            raise Exception(
                "Wrong # of pump circles found: {} expected {} ".format(
                    len(pumpCircles[0]), 1))
        else:
            pumpCircle = pumpCircles[0][0]

        # detect the blobs inside the pump body
        pistonArea = 3.14159 * PISTON_DIAMETER**2 / 4
        blobParams = cv2.SimpleBlobDetector_Params()
        blobParams.filterByArea = True
        blobParams.minArea = 0.8 * pistonArea
        blobParams.maxArea = 1.2 * pistonArea
        blobDetector = cv2.SimpleBlobDetector_create(blobParams)
        blobs = blobDetector.detect(thresh)

        # OpenCV check
        drawImg = cv2.drawKeypoints(
            image=drawImg,
            keypoints=blobs,
            outImage=(),
            color=(0, 255, 0),
            flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)

        if len(blobs) != PISTON_COUNT:
            raise Exception("Wrong # of pistons: found {} expected {}".format(
                len(blobs), PISTON_COUNT))
        # pistonCenters = [(int(b.pt[0], int(b.pt[1]))) for b in blobs]

        # Finally showing the size
        ShowImage(drawImg)
    except Exception as err:
        print(err)
from cv2 import cv2
import numpy as np

img = cv2.imread('resource/building.jpg')
gray= cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)

sift = cv2.xfeatures2d.SIFT_create() # this SIFT algorithm is patented, I don't know how to use it
kp = sift.detect(gray,None)

img=cv2.drawKeypoints(gray,kp)

cv2.imshow('sift',img)
cv2.waitKey(0)
cv2.destroyAllWindows()
Esempio n. 23
0
from cv2 import cv2
import numpy as np
from matplotlib import pyplot as plt

img = cv2.imread("F:\Percobaan\1.png", cv2.IMREAD_GRAYSCALE)

sift = cv2.xfeatures2d.SIFT_create()
kp = sift.detect(img, None)

keypoints_sift, descriptors = sift.detectAndCompute(img, None)
img = cv2.drawKeypoints(img, kp, None)

#cv2.imwrite('kawung 1 rotate.png', img)
plt.imshow(img), plt.title('image')
plt.xticks([]), plt.yticks([])
plt.show()
# ORB discretize the angle to increments of 2 \pi /30 (12 degrees), and construct a lookup table of precomputed BRIEF patterns. As long as the keypoint orientation \theta is consistent across views, the correct set of points S_\theta will be used to compute its descriptor.

# BRIEF has an important property that each bit feature has a large variance and a mean near 0.5. But once it is oriented along keypoint direction, it loses this property and become more distributed. High variance makes a feature more discriminative, since it responds differentially to inputs. Another desirable property is to have the tests uncorrelated, since then each test will contribute to the result. To resolve all these, ORB runs a greedy search among all possible binary tests to find the ones that have both high variance and means close to 0.5, as well as being uncorrelated. The result is called rBRIEF.

# For descriptor matching, multi-probe LSH which improves on the traditional LSH, is used. The paper says ORB is much faster than SURF and SIFT and ORB descriptor works better than SURF. ORB is a good choice in low-power devices for panorama stitching etc.

# ORB in OpenCV

# As usual, we have to create an ORB object with the function, cv2.ORB() or using feature2d common interface. It has a number of optional parameters. Most useful ones are nFeatures which denotes maximum number of features to be retained (by default 500), scoreType which denotes whether Harris score or FAST score to rank the features (by default, Harris score) etc. Another parameter, WTA_K decides number of points that produce each element of the oriented BRIEF descriptor. By default it is two, ie selects two points at a time. In that case, for matching, NORM_HAMMING distance is used. If WTA_K is 3 or 4, which takes 3 or 4 points to produce BRIEF descriptor, then matching distance is defined by NORM_HAMMING2.

# Below is a simple code which shows the use of ORB.

import numpy as np
from cv2 import cv2
from matplotlib import pyplot as plt

img = cv2.imread('resource/block_test.png',0)

orb = cv2.ORB_create()

# find the keypoints with ORB
kp = orb.detect(img,None)

# compute the descriptors with ORB
kp, des = orb.compute(img, kp)

# draw only keypoints location,not size and orientation

img2 = cv2.drawKeypoints(img,kp,None,color=(0,255,0), flags=0)
plt.imshow(img2),plt.show()
Esempio n. 25
0
    params.filterByConvexity = False
    params.minConvexity = 0.87

    # Inertia
    params.filterByInertia = False
    params.minInertiaRatio = 0.01

    # Blob detector picks up params
    ver = (cv2.__version__).split('.')

    if int(ver[0]) < 3:
        detector = cv2.SimpleBlobDetector(params)
    else:
        detector = cv2.SimpleBlobDetector_create(params)
    keypoints = detector.detect(frame)
    with_kp = cv2.drawKeypoints(frame, keypoints, np.array([]), (0, 0, 255),
                                cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)

    # If video is open calls keypoints to be displayed
    if ret == True:
        video.write(with_kp)
        cv2.imshow('frame', with_kp)
    else:
        vid.release()
        break
    # Close out / pause video
    key = cv2.waitKey(1)
    if key == ord('q'):
        break
    if key == ord('p'):
        cv2.waitKey(-1)
Esempio n. 26
0
# ORB特徴量+キーポイントを表示
import cv2.cv2 as cv

# 画像ファイルの読み込み
img = cv.imread('./imagedata/image6.jpg')
img = cv.resize(img, (600, 400))
# ORB特徴量検出器
detector = cv.ORB_create()
# 特徴点と特徴量検出
kp, des = detector.detectAndCompute(img, None)
print("特徴量:")
print(des)
print("特徴量のサイズ:")
print(des.shape)
print("特徴量のデータタイプ:")
print(des.dtype)
# 画像への特徴点の書き込み
imgkp = cv.drawKeypoints(img, kp, None)
# 画像表示
cv.imshow("keypoints", imgkp)

cv.waitKey(0)
cv.destroyAllWindows()
Esempio n. 27
0
# plt.title("Transformed")
# plt.imshow(img2), plt.show()

# SIFT  ################################################################
print("[INFO] Starting SIFT!")

# Making a instance of class SIFT
sift = cv2.SIFT_create()

print("[INFO] stage 1: extracting features!")
kp1 = sift.detect(img1, None)
kp2 = sift.detect(img2, None)

# showing keypoints in images
img11 = cv2.drawKeypoints(img1,
                          kp1,
                          None,
                          flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
img22 = cv2.drawKeypoints(img2,
                          kp2,
                          None,
                          flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)

# storing the image with its keypoints
cv2.imwrite("images/outputs/sift_keypoints_img1.jpg", img11)
cv2.imwrite("images/outputs/sift_keypoints_img2.jpg", img22)

# plt.imshow(img1), plt.show()
# plt.imshow(img2), plt.show()

print("[INFO] stage 2: computing description!")
kp1, des1 = sift.compute(img1, kp1)