コード例 #1
0
ファイル: cv2_helpers.py プロジェクト: syedomair0/CV-learning
def create_flann_picture(self, left_image, right_image):
    orb = cv2.ORB_create(nfeatures=2000)
    left_keypoint, left_descriptors = orb.detectAndCompute(left_image, None)
    right_keypoints, right_descriptors = orb.detectAndCompute(
        right_image, None)

    FLANN_INDEX_LSH = 6
    index_params = dict(
        algorithm=FLANN_INDEX_LSH,
        table_number=6,  # 12
        key_size=12,  # 20
        multi_probe_level=1)  #2

    search_params = dict(checks=50)

    flann = cv2.FlannBasedMatcher(index_params, search_params)
    matches = flann.knnMatch(left_descriptors, right_descriptors, k=2)

    matchesMask = [[0, 0] for i in range(len(matches))]

    for i, (m, n) in enumerate(matches):
        if m.distance < 0.7 * n.distance:
            matchesMask[i] = [1, 0]
    draw_params = dict(matchColor=(0, 255, 0),
                       singlePointColor=(255, 0, 0),
                       matchesMask=matchesMask,
                       flags=cv2.DrawMatchesFlags_DEFAULT)
    final_image = cv2.drawMatchesKnn(left_image, left_keypoint, right_image,
                                     right_keypoints, matches, None,
                                     **draw_params)
    cv2.imshow("something", final_image)
    if cv2.waitKey(25) & 0xFF == ord('q'):
        cv2.destroyAllWindows()
コード例 #2
0
 def _get_feature(self, img, feature_type=0):
     '''
     Get features from give image.\n
     img: np.array\n
     feature_type: int, 0:ORB, 1:SIFT, 2:FAST+ORB descriptor\n
     return: keypoints, descriptor
     '''
     img = self._gaussian_filter(img)
     if feature_type == 0:
         # ORB
         orb = cv.ORB_create()
         kp, des = orb.detectAndCompute(img, None)
         return kp, des
     elif feature_type == 1:
         # SIFT
         sift = cv.xfeatures2d.SIFT_create()
         kp, des = sift.detectAndCompute(img, None)
         return kp, des
     elif feature_type == 2:
         # FAST corners + SIFT descriptors
         fast = cv.FastFeatureDetector_create()
         sift = cv.xfeatures2d.SIFT_create()
         kp = fast.detect(img)
         kp, des = sift.compute(img, kp)
         return kp, des
     else:
         print('Wrong feature type code!')
         return None
コード例 #3
0
ファイル: func.py プロジェクト: december403/Image-Stitching
def findMatchPair_ORB(tar_img,
                      ref_img,
                      ptsNum=10000,
                      save=False,
                      fileName='ORB_matching_pair.npy',
                      RANSAC=True,
                      projError=50):
    mask = None
    H = None
    # Initiate ORB detector

    orb = cv2.ORB_create(ptsNum)
    # find the keypoints and descriptors with ORB
    ref_kp, ref_des = orb.detectAndCompute(ref_img, None)
    tar_kp, tar_des = orb.detectAndCompute(tar_img, None)
    bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
    matches = bf.match(ref_des, tar_des)
    src_pts = np.array([tar_kp[match.trainIdx].pt for match in matches])
    dst_pts = np.array([ref_kp[match.queryIdx].pt for match in matches])
    if RANSAC:
        H, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, projError)
        src_pts = np.array([
            tar_kp[match.trainIdx].pt for idx, match in enumerate(matches)
            if mask[idx] == 1
        ])
        dst_pts = np.array([
            ref_kp[match.queryIdx].pt for idx, match in enumerate(matches)
            if mask[idx] == 1
        ])

    return src_pts, dst_pts, tar_kp, ref_kp, matches, mask
コード例 #4
0
def ORB_matching(img1, img2):
    # Initiate ORB detector
    orb = cv.ORB_create(nfeatures = 120000) #specifying maximum nr of keypoints to locate

    image1 = cv.cvtColor(img1, cv.COLOR_BGR2RGB)
    image2 = cv.cvtColor(img2, cv.COLOR_BGR2RGB)
    image1_gray = cv.cvtColor(image1, cv.COLOR_BGR2GRAY)
    image2_gray = cv.cvtColor(image2, cv.COLOR_BGR2GRAY)

    # find the keypoints and descriptors with ORB
    kp1, des1 = orb.detectAndCompute(image1_gray, None)
    kp2, des2 = orb.detectAndCompute(image2_gray, None)

    # create BFMatcher object
    bf = cv.BFMatcher(cv.NORM_HAMMING, crossCheck=True)

    # Match descriptors.
    matches = bf.match(des1, des2)

    # Sort them in the order of their distance.
    matches = sorted(matches, key = lambda x:x.distance)

    # Need to draw only good matches, using 4500 best
    p1 = np.array([kp1[m.queryIdx].pt for m in matches[:4500]])
    p2 = np.array([kp2[m.trainIdx].pt for m in matches[:4500]])

    des = des1[[m.queryIdx for m in matches[:4500]], :]

    print(f"Found {len(matches)} matches. Using {len(p1)} matches with shortest distance.")

    return p1, p2, des
コード例 #5
0
ファイル: ORBmin.py プロジェクト: chenzhike110/Fast-tracking
 def __init__(self,video_name,modelpath=None):
     self.maskradio=2
     edgeThreshold=2
     patchSize=2
     self.orb = cv.ORB_create(edgeThreshold = edgeThreshold,patchSize=patchSize)
     self.video_name=video_name
     self.KNNModel=self.getKNNmodel(modelpath)
コード例 #6
0
ファイル: corners.py プロジェクト: ntajxyooj/computer-gv
 def ORB(self):
     img = cv2.imread(os.path.join(root, '..', 'static', 'photos', session['org_img']))
     gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
     orb = cv2.ORB_create(nfeatures=1500)
     kp = orb.detect(gray, None)
     img = cv2.drawKeypoints(gray, kp, img)
     filename = str(randint(1000000000, 9999999999)) + session['org_img']
     cv2.imwrite(os.path.join(root, '..', 'static', 'photos', filename), img)
     session['corner_img'] = filename
コード例 #7
0
def orb_stitcher(imgs):
    # find the keypoints with ORB
    orb1 = cv2.ORB_create(1000, 1.1, 13)
    bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=False)

    kp_master, des_master = orb1.detectAndCompute(imgs[0], None)
    kp_secondary, des_secondary = orb1.detectAndCompute(imgs[1], None)

    matches = bf.match(des_secondary, des_master)
    # Sort them in the order of their distance.
    matches = sorted(matches, key=lambda x: x.distance)

    selected = []
    for m in matches:
        if m.distance < 40:
            selected.append(m)

    out_img = cv2.drawMatches(imgs[1], kp_secondary, imgs[0], kp_master,
                              selected, None)
    cv2.namedWindow('www', cv2.WINDOW_NORMAL)
    cv2.imshow('www', out_img)
    # cv2.imwrite('matches.jpg',out_img)
    cv2.waitKey(0)

    warped = None
    if len(selected) > 10:

        dst_pts = np.float32([kp_master[m.trainIdx].pt
                              for m in selected]).reshape(-1, 1, 2)
        src_pts = np.float32([kp_secondary[m.queryIdx].pt
                              for m in selected]).reshape(-1, 1, 2)

        M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)

        h, w = imgs[0].shape[0:2]
        pts = np.float32([[0, 0], [w, 0], [w, h], [0, h],
                          [0, 0]]).reshape(-1, 1, 2)
        dst = cv2.perspectiveTransform(pts, M)
        max_extent = np.max(dst, axis=0)[0].astype(np.int)[::-1]
        sz_out = (max(max_extent[1],
                      imgs[0].shape[1]), max(max_extent[0], imgs[0].shape[0]))

        # img2 = cv2.polylines(imgs[0], [np.int32(dst)], True, [0,255,0], 3, cv2.LINE_AA)

        cv2.namedWindow('w', cv2.WINDOW_NORMAL)

        warped = cv2.warpPerspective(imgs[1], M, dsize=sz_out)
        img_for_show = warped.copy()
        img_for_show[0:imgs[0].shape[0], 0:imgs[0].shape[1], 1] = imgs[0][:, :,
                                                                          1]
        cv2.imshow('w', img_for_show)
        cv2.waitKey(0)
    return warped
コード例 #8
0
def getKeyPoints(image):
    """[takes in an image and detects the key points using cv2's ORB classifier]
    
    Arguments:
        image {[Object]} -- [the input image]
    
    Returns:
        [tuple] -- [tuple of the key points list and descriptor list]
    """
    orb = cv2.ORB_create(nfeatures=50, WTA_K=4)
    kp = orb.detect(image, None)
    kp, des = orb.compute(image, kp)
    return kp, des
コード例 #9
0
ファイル: orb.py プロジェクト: kevinglasson/mp_utils
def match(test, exemplar_descs):
    """using ORB decriptors compute match between test and exemplar_descs.
    
    :param test: image to test
    :type test: cv2 image
    :param exemplar: exemplar descriptions
    :type exemplar: list of orb descriptors
    :return: distance sorted matches
    :rtype: list
    """
    # create an ORB object
    orb = cv2.ORB_create()

    # convert to GRAYSCALE
    test_g = convert.bgr_to_gray(test)

    # Find the keypoints and descriptors with ORB
    _, descs = orb.detectAndCompute(test_g, None)

    # create a brute force matcher object
    bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)

    # init to hold the best prediciton
    prediction = ('(none)', np.inf)

    # match with each exmplar
    for d in exemplar_descs:
        # match the descriptors
        matches = bf.match(d[1], descs)

        # sort in order of distance, lowest first
        sorted_matches = sorted(matches, key=lambda x: x.distance)

        # extract just the distances
        distances = [m.distance for m in sorted_matches]

        # calculate a score
        score = sum(distances[:4])

        # no matches
        if score == 0:
            score = np.inf
        # update prediction because this match is closer
        if score < prediction[1]:
            prediction = (d[0], score)

    return prediction[0]
コード例 #10
0
def detectAndComputeAllImages(listOfImages):
    destances = []
    # Initiates SIFT detector
    orb = cv2.ORB_create()
    for imageURL in listOfImages:
        try:
            URL_to_PNG(imageURL, "tempImage")
            image = cv2.imread(
                r'ImageSimilartion\\Products image after convert\\tempImage.png',
                0)
            # Finds the keypoints and descriptors with SIFT
            params = orb.detectAndCompute(image, None)
            destances.append(params[1])  # destance
            deleteImage("tempImage")
        except:
            destances.append(None)
    return destances
コード例 #11
0
ファイル: orb.py プロジェクト: kevinglasson/mp_utils
def create_descriptions(dirname):
    """pre-compute a list of descriptors for ORB matching.
    
    :param dirname: directory to find the images to use
    :type dirname: str
    :return: descriptors and filenames
    :rtype: list of tuples
    """
    descs = []
    symbols = load_symbols(dirname)

    orb = cv2.ORB_create()

    for s in symbols:
        gray = convert.bgr_to_gray(s[1])
        _, desc = orb.detectAndCompute(gray, None)
        descs.append((s[0], desc))

    return descs
コード例 #12
0
ファイル: func.py プロジェクト: december403/Image-Stitching
def findMatchPair_GMS(tar_img,
                      ref_img,
                      ptsNum=10000,
                      save=False,
                      fileName='GMS_matching_pair.npy',
                      RANSAC=True,
                      projError=50):
    # Initiate ORB detector
    orb = cv2.ORB_create(ptsNum, fastThreshold=0)
    # find the keypoints and descriptors with ORB
    ref_kp, ref_des = orb.detectAndCompute(ref_img, None)
    tar_kp, tar_des = orb.detectAndCompute(tar_img, None)

    # bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
    bf = cv2.BFMatcher(cv2.NORM_HAMMING)
    matches = bf.match(ref_des, tar_des)
    matches_GMS = cv2.xfeatures2d.matchGMS(ref_img.shape[0:2],
                                           tar_img.shape[0:2],
                                           ref_kp,
                                           tar_kp,
                                           matches,
                                           withRotation=True)
    src_pts = np.array([tar_kp[match.trainIdx].pt for match in matches_GMS])
    dst_pts = np.array([ref_kp[match.queryIdx].pt for match in matches_GMS])
    if RANSAC:
        H, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, projError)
        src_pts = np.array([
            tar_kp[match.trainIdx].pt for idx, match in enumerate(matches_GMS)
            if mask[idx] == 1
        ])
        dst_pts = np.array([
            ref_kp[match.queryIdx].pt for idx, match in enumerate(matches_GMS)
            if mask[idx] == 1
        ])

    if save:
        with open(fileName, 'wb') as f:
            np.save(f, src_pts)
            np.save(f, dst_pts)

        print('GMS matching pairs saved')
    return src_pts, dst_pts, tar_kp, ref_kp, matches_GMS, mask
コード例 #13
0
def align_image(feature_image, target_image):
    ORB = cv2.ORB_create(MAX_FEATURE)
    feature_keypoints, feature_descriptors = ORB.detectAndCompute(
        feature_image, None)
    target_keypoints, target_descriptors = ORB.detectAndCompute(
        target_image, None)

    matcher = cv2.DescriptorMatcher_create(
        cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING)
    matches = matcher.match(feature_descriptors, target_descriptors, None)

    matches.sort(key=lambda x: x.distance, reverse=False)

    num_good_matches = int(len(matches) * GOOD_MATCH_PERCENT)
    matches = matches[:num_good_matches]

    img_matches = cv2.drawMatches(feature_image, feature_keypoints,
                                  target_image, target_keypoints, matches,
                                  None)
    cv2.imwrite("image-matches.jpg", img_matches)

    feature_points = np.zeros((len(matches), 2), dtype=np.float32)
    target_points = np.zeros((len(matches), 2), dtype=np.float32)

    for i, match in enumerate(matches):
        feature_points[i, :] = feature_keypoints[match.queryIdx].pt
        target_points[i, :] = target_keypoints[match.trainIdx].pt

    homography_matrix, mask = cv2.findHomography(target_points, feature_points,
                                                 cv2.RANSAC)

    height, width, channels = feature_image.shape

    perspective_corrected_image = cv2.warpPerspective(target_image,
                                                      homography_matrix,
                                                      (width, height))

    return perspective_corrected_image, homography_matrix
コード例 #14
0
def match_image_to_model(X, model_des, query_img, threshold = 0.75):


    orb = cv.ORB_create(nfeatures = 120000) #specifying maximum nr of keypoints to locate

    img_gray = cv.cvtColor(query_img, cv.COLOR_BGR2GRAY)

    # find the keypoints and descriptors with ORB
    query_kp, query_des = orb.detectAndCompute(img_gray, None)

    # create BFMatcher object
    bf = cv.BFMatcher(cv.NORM_HAMMING, crossCheck=True)

    # Match descriptors.
    matches = bf.match(model_des, query_des)

    # Need to draw only good matches
    matches = sorted(matches, key = lambda x:x.distance)
    
    matched_2D_points = np.array([query_kp[m.trainIdx].pt for m in matches[:4000]])
    matched_3D_points = X[:,[m.queryIdx for m in matches[:4000]]]

    return matched_2D_points, matched_3D_points
コード例 #15
0
def getSymmetry(img):

    # Initiate ORB detector
    orb = cv.ORB_create()
    # find the keypoints with ORB
    kp = orb.detect(img, None)
    # compute the descriptors with ORB
    kp, des = orb.compute(img, kp)
    # draw only keypoints location,not size and orientation
    img2 = cv.drawKeypoints(img,
                            kp,
                            None,
                            color=(0, 255, 0),
                            flags=(cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS))
    plt.imshow(img2), plt.show()

    # for each point compare to each other point and call it symmetry if there is a corresponding point?
    # cases of symmetry
    # opposite direction parrallel
    #
    numKeyPoints = len(kp)
    matched = []
    index = 0

    while index < len(kp):
        for i in range(1, len(kp)):
            # simple check for whether size is close enough and angle reflected along Y is close enough
            if anglesClose(kp[0].angle, 360 - kp[i].angle) and sizeClose(
                    kp[0].size, kp[i].size):
                matched.append(kp[0])
                matched.append(kp[i])
                del kp[i]
                del kp[0]
                break
        index += 1

    return len(matched) / numKeyPoints
コード例 #16
0
def init_feature(name):
    chunks = name.split('-')

    if chunks[0] == 'sift':
        detector = cv2.SIFT_create()
        norm = cv2.NORM_L2
    elif chunks[0] == 'surf':
        detector = cv2.xfeatures2d.SURF_create(800)
        norm = cv2.NORM_L2
    elif chunks[0] == 'orb':
        detector = cv2.ORB_create(400)
        norm = cv2.NORM_HAMMING
    elif chunks[0] == 'akaze':
        detector = cv2.AKAZE_create()
        norm = cv2.NORM_HAMMING
    elif chunks[0] == 'brisk':
        detector = cv2.BRISK_create()
        norm = cv2.NORM_HAMMING
    else:
        return None, None  # Return None if unknown detector name

    if 'flann' in chunks:
        if norm == cv2.NORM_L2:
            flann_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
        else:
            flann_params = dict(
                algorithm=FLANN_INDEX_LSH,
                table_number=6,  # 12
                key_size=12,  # 20
                multi_probe_level=1)  # 2

        matcher = cv2.FlannBasedMatcher(flann_params)
    else:
        matcher = cv2.BFMatcher(norm)

    return detector, matcher
コード例 #17
0
def ORB_descriptor(img, num_features=1000):
    orb = cv2.ORB_create(nfeatures=num_features, scoreType=cv2.ORB_FAST_SCORE)
    points, desc = orb.detectAndCompute(img, None)

    return points, desc
コード例 #18
0
def tracking_lucas_kanade():

    cap = cv2.VideoCapture('find_chocolate.mp4')

    img_chocolate = cv2.imread('marker.jpg')
    gray_chocolate = cv2.cvtColor(img_chocolate, cv2.COLOR_BGR2GRAY)

    # params for ShiTomasi corner detection
    feature_params = dict(maxCorners=1000,
                          qualityLevel=0.2,
                          minDistance=7,
                          blockSize=7)

    # Parameters for lucas kanade optical flow
    lk_params = dict(winSize=(35, 35),
                     maxLevel=4,
                     criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT,
                               10, 0.03))

    # Create some random colors
    color = np.random.randint(0, 255, (1000, 3))

    # Take first frame and find corners in it

    ret, old_frame = cap.read()

    old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)

    orb = cv2.ORB_create(1000, 1.1, 13)
    bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=False)
    kpts1, descs1 = orb.detectAndCompute(gray_chocolate, None)

    # Setting video format. Google for "fourcc"
    fourcc = cv2.VideoWriter_fourcc(*'XVID')

    # Setting up new video writer
    image_size = (old_frame.shape[1], old_frame.shape[0])
    # writer = cv2.VideoWriter('sample_tracking_orb.avi', fourcc, frames_per_second, image_size)
    out = cv2.VideoWriter('sample_tracking_lucas_kanade.avi', fourcc, 30.0,
                          image_size)

    frno = 0
    restart = False
    while (1):
        frno += 1
        ret, frame = cap.read()
        if ret:

            frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            if restart:
                orb = cv2.ORB_create(1000, 1.1, 13)
                kpts2, descs2 = orb.detectAndCompute(frame_gray, None)
                restart = False

            kpts2, descs2 = orb.detectAndCompute(frame_gray, None)

            matches = bf.match(descs1, descs2)
            # Sort them in the order of their distance.
            dmatches = sorted(matches, key=lambda x: x.distance)

            ## extract the matched keypoints
            src_pts = np.float32([kpts1[m.queryIdx].pt
                                  for m in dmatches]).reshape(-1, 1, 2)
            dst_pts = np.float32([kpts2[m.trainIdx].pt
                                  for m in dmatches]).reshape(-1, 1, 2)

            ## find homography matrix and do perspective transform
            M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
            h, w = img_chocolate.shape[:2]
            pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1],
                              [w - 1, 0]]).reshape(-1, 1, 2)
            dst = cv2.perspectiveTransform(pts, M)

            ## draw found regions
            frm = cv2.polylines(frame, [np.int32(dst)], True, (0, 0, 255), 1,
                                cv2.LINE_AA)

            # ## draw match lines
            # res = cv2.drawMatches(img_chocolate, kpts1, frm, kpts2, dmatches[:8], None, flags=2)

            p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray,
                                                   dst_pts, None, **lk_params)
            successful = (st == 1)
            if np.sum(successful) == 0:
                restart = True
            # Select good points
            good_new = p1[successful]
            good_old = dst_pts[successful]

            # draw the tracks
            count_of_moved = 0
            for i, (new, old) in enumerate(zip(good_new, good_old)):
                a, b = new.ravel()
                c, d = old.ravel()
                velocity = np.sqrt((a - c)**2 + (b - d)**2)
                if velocity > 1:
                    mask = cv2.line(mask, (a, b), (c, d), color[i].tolist(), 2)
                    frame = cv2.circle(frame, (a, b), 4, color[i].tolist(), -1)
                    count_of_moved += 1

            # res = cv2.drawMatches(img_chocolate, kpts1, frm, kpts2, dmatches, None, flags=2) #[:8]
            out.write(frame)

            cv2.namedWindow('orb_match', cv2.WINDOW_NORMAL)

            cv2.imshow('orb_match', frame)

            k = cv2.waitKey(30) & 0xff
            if k == 27:
                break

            # Now update the previous frame and previous points
            old_gray = frame_gray.copy()
            p0 = good_new.reshape(-1, 1, 2)

        else:
            break

    cv2.destroyAllWindows()
    cap.release()
    out.release()
コード例 #19
0
# 特徴量の抽出と画像間チング
# 特徴量:ORB
import cv2.cv2 as cv

# 画像ファイルの読み込み
img1 = cv.imread('./imagedata/image6.jpg')
img1 = cv.resize(img1, (600, 400))
img2 = cv.imread('./imagedata/image2.jpg')
img2 = cv.resize(img2, (600, 400))
# ORB特徴検出器
detector = cv.ORB_create()
# 特徴量抽出
kp1, des1 = detector.detectAndCompute(img1, None)
kp2, des2 = detector.detectAndCompute(img2, None)
bf = cv.BFMatcher()
matches = bf.knnMatch(des1, des2, k=2)
#マッチングリスト
matched = []
for match1, match2 in matches:
    ratio = match1.distance / match2.distance
    if ratio < 0.8:
        matched.append([match1])
# 画像表示
imgmatches = cv.drawMatchesKnn(img1, kp1, img2, kp2, matched, None, flags=2)
cv.imshow("image matching", imgmatches)

cv.waitKey(0)
cv.destroyAllWindows()
コード例 #20
0
#opencv----特征匹配----BFMatching
from cv2 import cv2
from matplotlib import pyplot as plt
#读取需要特征匹配的两张照片,格式为灰度图。
template = cv2.imread("template_adjust.jpg", 0)
target = cv2.imread("target1.jpg", 0)
orb = cv2.ORB_create()  #建立orb特征检测器
kp1, des1 = orb.detectAndCompute(template, None)  #计算template中的特征点和描述符
kp2, des2 = orb.detectAndCompute(target, None)  #计算target中的
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)  #建立匹配关系
mathces = bf.match(des1, des2)  #匹配描述符
mathces = sorted(mathces, key=lambda x: x.distance)  #据距离来排序
result = cv2.drawMatches(template,
                         kp1,
                         target,
                         kp2,
                         mathces[:40],
                         None,
                         flags=2)  #画出匹配关系
plt.imshow(result), plt.show()  #matplotlib描绘出来
コード例 #21
0
def main():
    homography = None
    # matrix of camera parameters (made up but works quite well for me)
    camera_parameters = np.array([[800, 0, 320], [0, 800, 240], [0, 0, 1]])

    # create ORB - Oriented FAST and Rotated BRIEF - keypoint detector
    orb = cv2.ORB_create(nfeatures=1000)  # retain max 1000 features

    # create BFMatcher object
    bf = cv2.BFMatcher()

    # image target
    model = cv2.imread('target_img.jpg')

    # calculate key point and description
    kp_model, des_model = orb.detectAndCompute(
        model, None)  # kp: key point, des: description

    # obj file
    obj = OBJ('wolf.obj', swapyz=True)

    # Webcam
    webcam = cv2.VideoCapture(0)

    while True:
        success, imgwebcam = webcam.read()
        # find and draw the keypoints of the frame
        kp_webcam, des_webcam = orb.detectAndCompute(imgwebcam, None)

        # finding match between 2 img
        matches = bf.knnMatch(des_model, des_webcam, k=2)
        # Taking good keypoints
        good = []
        for m, n in matches:
            if m.distance < 0.75 * n.distance:
                good.append(m)

        # compute Homography if enough matches are found
        if len(good) > 15:
            # differenciate between source points and destination points
            srcpts = np.float32([kp_model[m.queryIdx].pt
                                 for m in good]).reshape(-1, 1, 2)
            dstpts = np.float32([kp_webcam[m.trainIdx].pt
                                 for m in good]).reshape(-1, 1, 2)

            # compute Homography
            homography, mask = cv2.findHomography(srcpts, dstpts, cv2.RANSAC,
                                                  5)

            #find boundary around model
            h, w, channel = model.shape
            pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1],
                              [w - 1, 0]]).reshape(-1, 1, 2)
            # project corners into frame
            dst = cv2.perspectiveTransform(pts, homography)
            # connect them with lines
            #imgwebcam = cv2.polylines(imgwebcam,[np.int32(dst)], True, 255, 3, cv2.LINE_AA)

            # if a valid homography matrix was found render object on model plane
            if homography is not None:
                # obtain 3D projection matrix from homography matrix and camera parameters
                projection = projection_matrix(camera_parameters, homography)
                # render object
                imgwebcam = render(imgwebcam, obj, projection, model)
                #imgwebcam = render(imgwebcam, model, projection)

        cv2.imshow('result', imgwebcam)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    webcam.release()
    cv2.destroyAllWindows()
    return 0
コード例 #22
0
ファイル: find_feature.py プロジェクト: Xenos24R/vscode
import cv2.cv2 as cv
import numpy as np

cap = cv.imread("C:/Users/32936/Desktop/2/cap.jpg")
model = cv.imread("C:/Users/32936/Desktop/2/book.jpg")

cap_slave = cap
MIN_MATCHES = 15

#启动ORB探测器
orb = cv.ORB_create()

#matcher对象
bf = cv.BFMatcher(cv.NORM_HAMMING, crossCheck=True)

#计算模型关键点及描述符
kp_model, des_model = orb.detectAndCompute(model, None)

#计算场景关键点及描述符
kp_frame, des_frame = orb.detectAndCompute(cap, None)

#匹配帧描述符和模型描述符
matches = bf.match(des_model, des_frame)

#按距离排序
matches = sorted(matches, key=lambda x: x.distance)

if len(matches) > MIN_MATCHES:
    #cap = cv.drawMatches(model,kp_model,cap,kp_frame,matches[:MIN_MATCHES],0,flags=2)

    cv.imshow('frame', cap)
コード例 #23
0
from cv2 import cv2
import numpy as np

img1 = cv2.imread(r'Image Test/tiger.jpg', 0)
img2 = cv2.imread(r'Image Train/tiger.jpg', 0)

orb = cv2.ORB_create(nfeatures=1000)
kp1, des1 = orb.detectAndCompute(img1, None)
kp2, des2 = orb.detectAndCompute(img2, None)
bf = cv2.BFMatcher()
matches = bf.knnMatch(des1, des2, k=2)
good = []
for m, n in matches:
    if m.distance < 0.75 * n.distance:
        good.append([m])
print(len(good))
img3 = cv2.drawMatchesKnn(img1, kp1, img2, kp2, good, None, flags=2)
cv2.imshow('img1', img1)
cv2.imshow('img2', img2)
cv2.imshow('img3', img3)
cv2.waitKey(0)
コード例 #24
0
def find_3d_points(image1_path, image2_path):
    img1 = cv2.imread(image1_path, cv2.IMREAD_GRAYSCALE)  # queryImage
    img2 = cv2.imread(image2_path, cv2.IMREAD_GRAYSCALE)  # trainImage

    # Initial calibration matrix from camera
    init_calibration_matrix = np.array(
        [
            [2.78228443e03, 0.00000000e00, 1.65670819e03],
            [0.00000000e00, 2.77797243e03, 1.19855894e03],
            [0.00000000e00, 0.00000000e00, 1.00000000e00],
        ]
    )
    distortion_coefficients = np.array(
        [0.07874525, -0.07184864, -0.00619498, 0.00252332, -0.09900985]
    )

    # Undistort images. getOptimalNewCameraMatrix: 1 tells us that we want to see the "black hills" after undistorting. Exchanging for 0 removes them.
    height, width = img1.shape[:2]
    calibration_matrix, roi = cv2.getOptimalNewCameraMatrix(
        init_calibration_matrix,
        distortion_coefficients,
        (width, height),
        1,
        (width, height),
    )
    img1_distorted = cv2.undistort(
        img1, init_calibration_matrix, distortion_coefficients, None, calibration_matrix
    )
    img2_distorted = cv2.undistort(
        img2, init_calibration_matrix, distortion_coefficients, None, calibration_matrix
    )

    # Crop images
    x, y, w, h = roi
    img1_distorted = img1_distorted[y : y + h, x : x + w]
    img2_distorted = img2_distorted[y : y + h, x : x + w]

    # To display the undistorted images:
    # plt.imshow(img1_distorted), plt.show()
    # plt.imshow(img2_distorted), plt.show()

    # Create an ORB object
    orb = cv2.ORB_create()

    # Detect keypoints
    kp1 = orb.detect(img1_distorted, None)
    kp2 = orb.detect(img2_distorted, None)

    # Find descriptors
    kp1, des1 = orb.compute(img1_distorted, kp1)
    kp2, des2 = orb.compute(img2_distorted, kp2)

    # To draw the keypoints:
    #img1kp = cv2.drawKeypoints(img1, kp1, None, color=(0, 255, 0), flags=0) #flags = cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS
    # img2kp = cv2.drawKeypoints(img2, kp2, None, color=(0, 255, 0), flags=0)
    #plt.imshow(img1kp), plt.show()
    # plt.imshow(img2kp), plt.show()

    # Brute-force matcher object. crossCheck=True means that it has to match both ways
    brute_force = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)

    # Matching descriptors
    matches = brute_force.match(des1, des2)

    # Clean the matches by distance
    matches = clean_matches(matches)

    # Sort matches in order of distance
    matches = sorted(matches, key=lambda x: x.distance)

    # To draw the first 20 matches:
    #img_matches = cv2.drawMatches(img1_distorted, kp1, img2_distorted, kp2, matches[:], None, flags = 2)
    #plt.imshow(img_matches), plt.show()

    # Extract coordinates
    points1 = extract_coordinates(matches, kp1, "queryIdx")
    points2 = extract_coordinates(matches, kp2, "trainIdx")

    # Find essential Matrix
    essential_matrix, _ = cv2.findEssentialMat(
        points1, points2, calibration_matrix, method=cv2.RANSAC, prob=0.999, threshold=3
    )
    determinant = mlin.det(essential_matrix)
    eps = 1e-10
    if determinant > eps:
        raise Exception(
            "expected determinant to be close to zero, but is {}".format(determinant)
        )

    # Find camera2 position relative to camera1 (t is only in unit)
    _, R, t, _ = cv2.recoverPose(essential_matrix, points1, points2, calibration_matrix)

    # Create camera matrices
    M1 = np.hstack((np.eye(3, 3), np.zeros((3, 1))))
    M2 = np.hstack((R, t))
    camera_matrix1 = np.dot(calibration_matrix, M1)
    camera_matrix2 = np.dot(calibration_matrix, M2)

    # Compute 3D points
    points_3d = []
    for c1, c2 in zip(points1, points2):
        point = cv2.triangulatePoints(camera_matrix1, camera_matrix2, c1, c2)
        points_3d.append(point)
    points_3d = cv2.convertPointsFromHomogeneous(np.array(points_3d))

    return points_3d, t
コード例 #25
0
import numpy as np
from cv2 import cv2
from matplotlib import pyplot as plt

MIN_MATCH_COUNT = 10

img1 = cv2.imread('resource/box.jpg', 0)  # queryImage
img2 = cv2.imread('resource/box_in_scene.jpg', 0)  # trainImage

# Initiate SIFT detector
orb = cv2.ORB_create()

# find the keypoints and descriptors with SIFT
kp1, des1 = orb.detectAndCompute(img1, None)
kp2, des2 = orb.detectAndCompute(img2, None)

bf = cv2.BFMatcher_create()
matches = bf.knnMatch(des1, des2, 2)
good = []
for m, n in matches:
    if m.distance < 0.9 * n.distance:
        good.append([m])

# img3 = cv2.drawMatchesKnn(img1,kp1,img2,kp2,good,None,flags = 2)
matchesMask = None
if len(good) > MIN_MATCH_COUNT:
    # what is it ?
    src_pts = np.float_([kp1[m[0].queryIdx].pt
                         for m in good]).reshape(-1, 1, 2)
    dst_pts = np.float_([kp2[m[0].trainIdx].pt
                         for m in good]).reshape(-1, 1, 2)
コード例 #26
0
def tracking_orb():

    cap = cv2.VideoCapture('find_chocolate.mp4')

    ret, frm = cap.read()
    img_chocolate = cv2.imread('marker.jpg')

    frm_count = 0
    key = None

    # Setting video format. Google for "fourcc"
    fourcc = cv2.VideoWriter_fourcc(*'XVID')

    # Setting up new video writer
    image_size = (frm.shape[1], frm.shape[0])
    # writer = cv2.VideoWriter('sample_tracking_orb.avi', fourcc, frames_per_second, image_size)
    out = cv2.VideoWriter('sample_tracking_orb.mp4', fourcc, 30.0, image_size)

    while ret:

        ## Create ORB object and BF object(using HAMMING)
        orb = cv2.ORB_create()

        gray2 = cv2.cvtColor(frm, cv2.COLOR_BGR2GRAY)
        gray1 = cv2.cvtColor(img_chocolate, cv2.COLOR_BGR2GRAY)

        # gray2 = cv2.equalizeHist(gray2)
        # gray1 = cv2.equalizeHist(gray1)

        ## Find the keypoints and descriptors with ORB
        kpts1, descs1 = orb.detectAndCompute(gray1, None)
        kpts2, descs2 = orb.detectAndCompute(gray2, None)

        # create BFMatcher object
        ## match descriptors and sort them in the order of their distance
        bf = cv2.BFMatcher(cv2.NORM_HAMMING2, crossCheck=True)

        # Match descriptors.
        matches = bf.match(descs1, descs2)

        # Sort them in the order of their distance.
        dmatches = sorted(matches, key=lambda x: x.distance)

        ## extract the matched keypoints
        src_pts = np.float32([kpts1[m.queryIdx].pt
                              for m in dmatches]).reshape(-1, 1, 2)
        dst_pts = np.float32([kpts2[m.trainIdx].pt
                              for m in dmatches]).reshape(-1, 1, 2)

        ## find homography matrix and do perspective transform
        M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
        h, w = img_chocolate.shape[:2]
        pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1],
                          [w - 1, 0]]).reshape(-1, 1, 2)
        dst = cv2.perspectiveTransform(pts, M)

        ## draw found regions
        frm = cv2.polylines(frm, [np.int32(dst)], True, (0, 0, 255), 1,
                            cv2.LINE_AA)

        ## draw match lines
        res = cv2.drawMatches(img_chocolate,
                              kpts1,
                              frm,
                              kpts2,
                              dmatches[:8],
                              None,
                              flags=2)

        # writer.write(res)
        cv2.namedWindow('orb_match', cv2.WINDOW_NORMAL)
        # cv2.imshow("orb_match", frm)
        out.write(frm)
        cv2.imshow("orb_match", res)

        # Pause on pressing of space.
        if key == ord(' '):
            wait_period = 0
        else:
            wait_period = 30

        key = cv2.waitKey(wait_period)
        ret, frm = cap.read()
        frm_count += 1

    cv2.destroyAllWindows()
    cap.release()
    out.release()

    return 0
コード例 #27
0
print("[INFO] computing Precision!")
correct_matches = int(
    input("Please enter the number of correct matches:        "))
incorrect_matches = 10 - correct_matches

coresponding_matches = len(matches[:10])

thePrecision[0, 3] = correct_matches / (correct_matches + incorrect_matches)
print("[INFO] FREAK was done...!")

# ORB ################################################################
print("[INFO] Starting ORB!")

# Making a instance of class ORB
ORB = cv2.ORB_create()

print("[INFO] stage 1: extracting features!")
kp1 = ORB.detect(img1)
kp2 = ORB.detect(img2)

print("[INFO] flag done!")

# showing keypoints in images
img11 = cv2.drawKeypoints(img1,
                          kp1,
                          None,
                          flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
img22 = cv2.drawKeypoints(img2,
                          kp2,
                          None,
コード例 #28
0
 def detectORB(self,ptsNum=10000):
     orb = cv2.ORB_create(ptsNum)
     self.ref_kps, self.ref_des = orb.detectAndCompute(self.__ref_img, None)
     self.tar_kps, self.tar_des = orb.detectAndCompute(self.__tar_img, None)
コード例 #29
0
ファイル: cv2_helpers.py プロジェクト: syedomair0/CV-learning
def get_orb(image):
    orb = cv2.ORB_create(nfeatures=2000)
    keypoints_orb, descriptors = orb.detectAndCompute(image, None)
    image_with_keypoints = cv2.drawKeypoints(image, keypoints_orb, None)
    return image_with_keypoints