コード例 #1
0
matcher = cv2.BFMatcher(norm)
points, desccriptor = detector.detectAndCompute(image, None)

im_points=image.copy()
cv2.drawKeypoints(image, points,im_points)
#pl.imshow(im_points[1480:1840,1200:1450])
"""

im1 = cv2.imread("12a.jpg")
im2 = cv2.imread("12b.jpg")

im1 = cv2.pyrDown(im1)
im2 = cv2.pyrDown(im2)

#detector = cv2.xfeatures2d.SIFT_create()
detector = cv2.AKAZE_create()  #Alternative if xfeature2d not available

p1, d1 = detector.detectAndCompute(im1, None)
p2, d2 = detector.detectAndCompute(im2, None)

norm = cv2.NORM_L2
matcher = cv2.BFMatcher(norm)
raw_matches = matcher.knnMatch(d1, trainDescriptors=d2, k=2)

#match_pairs=filter_matches(p1, p2, raw_matches, ratio = 0.65)
#draw_matches(im1, im2, ms,imname, lwidth=5,r=8)
ratio = .75
mp1, mp2 = [], []
for m in raw_matches:
    if len(m) == 2 and m[0].distance < m[1].distance * ratio:
        mp1.append(p1[m[0].queryIdx])
コード例 #2
0
ファイル: TestText.py プロジェクト: kjw227/2016spring_CITE201
import cv2
import time
import random

cap = cv2.VideoCapture(0)

if not cap.isOpened():
    cap.open(0)

while (True):
    ret, printimg = cap.read()

    akaze_detector = cv2.AKAZE_create()
    akaze_kp, akaze_des = akaze_detector.detectAndCompute(printimg, None)

    printimg = cv2.drawKeypoints(printimg, akaze_kp, None, (255, 0, 0), 4)
    dis = 0.0

    if (len(akaze_kp) > 0):
        key = []
        d = akaze_kp[0].pt[1]
        for it in akaze_kp:
            if it.pt[1] > d:
                d = it.pt[1]
                key = [it]
        dis = 0.0006 * d * d - 0.5969 * d + 239.83

        printimg = cv2.drawKeypoints(printimg, key, None, (0, 255, 0), 4)

    font = cv2.FONT_HERSHEY_SIMPLEX
コード例 #3
0
ファイル: kp.py プロジェクト: TristHas/sem3d
import numpy as np
import cv2

detectors = {
    "akaze": cv2.AKAZE_create(),
    "sift": cv2.xfeatures2d.SIFT_create()
}

matchers = {
    "akaze": cv2.BFMatcher(cv2.NORM_HAMMING),
    "sift": cv2.FlannBasedMatcher({
        "algorithm": 0,
        "trees": 5
    }, {"checks": 50})
}


def match_keypoints(img1,
                    img2,
                    feat="sift",
                    filter_coef=.7,
                    filter_dist=50,
                    filter_intesity=20):
    """
    """
    kp1, des1 = detectors[feat].detectAndCompute(img1, None)
    kp2, des2 = detectors[feat].detectAndCompute(img2, None)
    matches = matchers[feat].knnMatch(des1, des2, k=2)
    nkp = len(matches)

    print(f"{nkp} keypoints matched")
コード例 #4
0
 def extract_all(self, image):
     akaze = cv2.AKAZE_create()
     features, descriptors = akaze.detectAndCompute(image, None)
     pts, descriptors = fu.filter_by_kpt_response(MAX_CV_KPTS, features,
                                                  descriptors)
     return (pts, descriptors)
コード例 #5
0
def main():
    """Main."""

    parser = argparse.ArgumentParser()
    parser.description = "Convert the TARTANAIR dataset -> EuRoC-like format"
    parser.add_argument(
        "-i",
        "--input",
        required=True,
        help=("Path to the top-level input directory for conversion"),
    )
    parser.add_argument(
        "-o",
        "--output",
        default="output",
        required=False,
        help=("Path to the top-level input directory for conversion"),
    )
    parser.add_argument(
        "-e",
        "--error",
        default=1,
        required=False,
        help=("Measurement error in pixels (default is 1px)"),
    )

    # parse cmdline args
    args = parser.parse_args()

    # Initiate AKAZE detector
    akaze = cv2.AKAZE_create()

    for split in ['train', 'val', 'test']:
        for env in ['hospital']:  #os.listdir(os.path.join(args.input, split)):
            for seqpath in os.listdir(join(args.input, split, env, 'Easy')):
                if not os.path.isdir(
                        join(args.input, split, env, 'Easy', seqpath)):
                    continue
                if seqpath == '.ipynb_checkpoints': continue

                dirs = create_euroc_filestruct(
                    join(args.output, split, env, 'Easy', seqpath))

                for im in tqdm(sorted(
                        os.listdir(
                            join(args.input, split, env, 'Easy', seqpath,
                                 'image_right'))),
                               desc="Converting {}".format(seqpath)):
                    im = im.replace('_right.png', '')

                    # load stereo data (RGB, depth, pose)
                    imR = np.asarray(
                        Image.open(
                            join(args.input, split, env, 'Easy', seqpath,
                                 'image_right', im + '_right.png')))
                    imL = np.asarray(
                        Image.open(
                            join(args.input, split, env, 'Easy', seqpath,
                                 'image_left', im + '_left.png')))
                    """
                    # estimate feature based sparse depth
                    est_features_and_uncertainties = computeDepthAndUncertaintyFromFeatures(imR, imL, akaze, args.error) 

                    # save estimated features and uncertainties
                    fname = join(dirs[2], im + '.csv')
                    np.savetxt(fname, est_features_and_uncertainties, delimiter = ',')
                    """
                    # estimate SGBM based semi-dense depth
                    est_depth_SGBM, est_uncertainties_SGBM = computeDepthSGBM(
                        imR, imL)

                    # save estimated depth map and uncertainties based on SGBM
                    np.save(join(dirs[3], im + '_depth.npy'), est_depth_SGBM)
                    np.save(join(dirs[3], im + '_uncertainty.npy'),
                            est_uncertainties_SGBM)
                    """
                    # copy images
                    for i, cam in enumerate(['right', 'left']):
                        src = join(args.input, split, env, 'Easy', seqpath, 'image_' + cam, im + '_' + cam + '.png')
                        dst = join(dirs[0][i], im + '.png')
                        shutil.copy(src, dst)

                    # copy depth images
                    for i, cam in enumerate(['right', 'left']):
                        src = join(args.input, split, env, 'Easy', seqpath, 'depth_' + cam, im + '_' + cam + '_depth.npy')
                        dst = join(dirs[1][i], im + '.npy')
                        shutil.copy(src, dst)


                    # copy segmentations images
                    for i, cam in enumerate(['right', 'left']):
                        src = join(args.input, split, env, 'Easy', seqpath, 'seg_' + cam, im + '_' + cam + '_seg.npy')
                        dst = join(dirs[5][i], im + '.npy')
                        shutil.copy(src, dst)

                    """
                # copy pose
                """
コード例 #6
0
        image_test = np.array(before_0)
        if image_test[90:190, 400].mean() > 250:
            W, H = image_test.shape[:2]
            image_test = image_test[80:W - 80, 80:H - 80]
            image_test = cv2.resize(image_test, (H, W))
        img1 = image_standard.copy()
        # オリジナル答案のマッチングに不要な情報削除→かなり白紙にする
        img1[300:1500, 110:1600] = 255
        img2 = image_test.copy()
        # 採点する答案の端っこの余分な情報削除
        img2[:, 2000:] = 255
        img2[:, :100] = 255

        #特徴点の検索
        #detecter = cv2.ORB_create() #ORB
        detecter = cv2.AKAZE_create()  #AKAZE

        kp1, des1, scale1, imgSz1 = DetectKeyPoint(detecter, img1, 1200)
        kp2, des2, scale2, imgSz2 = DetectKeyPoint(detecter, img2, 1200)

        #マッチング
        bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
        matches = bf.match(des1, des2)

        #マッチングの選別
        goodmatch, badmatch1 = MatchFilterDist(matches, None,
                                               50)  #distance が75以下をgoodmatch
        goodmatch, badmatch2 = MatchFilterHelmart(kp1, scale1, kp2, scale2,
                                                  goodmatch, None, 3)

        img1_pt = [list(map(int, kp1[m.queryIdx].pt)) for m in goodmatch]
コード例 #7
0
    def detect_feature(self, image):
        akaze = cv2.AKAZE_create()
        features = akaze.detect(image, None)
        pts = fu.filter_by_kpt_response(MAX_CV_KPTS, features)

        return pts
コード例 #8
0
def compute_rasac_homology(img_query_orig,
                           img_train_orig,
                           MIN_MATCH_COUNT=10,
                           show_detail=False,
                           save_result=False):
    """
    query画像とtrain画像についてakazeでマッチングし、
    ransacによって外れ値を除去してHomology行列を算出する。

    Args:
        MIN_MATCH_COUNT (int):mathesの数の最小値。これ以下だとHomologyを計算しない
    """
    img_query = img_query_orig.copy()
    img_train = img_train_orig.copy()

    # Initiate AKAZE detector
    akaze = cv2.AKAZE_create()

    # key pointとdescriptorを計算
    kp1, des1 = akaze.detectAndCompute(img_query, None)
    kp2, des2 = akaze.detectAndCompute(img_train, None)

    # matcherとしてflannを使用。
    # FLANN parameters
    FLANN_INDEX_LSH = 6
    index_params = dict(algorithm=FLANN_INDEX_LSH,
                        table_number=6,
                        key_size=12,
                        multi_probe_level=1)
    search_params = dict(checks=50)

    # ANNで近傍2位までを出力
    flann = cv2.FlannBasedMatcher(index_params, search_params)
    matches = flann.knnMatch(des1, des2, k=2)

    # store all the good matches as per Lowe's ratio test.
    # 2番めに近かったkey pointと差があるものをいいkey pointとする。
    good_matches = []
    for i in range(len(matches)):
        if (len(matches[i]) < 2):
            continue
        m, n = matches[i]
        if m.distance < 0.7 * n.distance:
            good_matches.append(m)

    # descriptorの距離が近かったもの順に並び替え
    good_matches = sorted(good_matches, key=lambda x: x.distance)

    if (show_detail):
        # 結果を描写
        img_result = cv2.drawMatches(img_query,
                                     kp1,
                                     img_train,
                                     kp2,
                                     good_matches[:10],
                                     None,
                                     flags=2)
        ip.show_img(img_result, figsize=(20, 30))
        print('queryのkp:{}個、trainのkp:{}個、good matchesは:{}個'.format(
            len(kp1), len(kp2), len(good_matches)))

    # ransacによって外れ値を除去してHomology行列を算出する。
    # opencvの座標は3次元のarrayで表さなければならないのに注意

    if len(good_matches) > MIN_MATCH_COUNT:
        # matching点の座標を取り出す
        src_pts = np.float32([kp1[m.queryIdx].pt
                              for m in good_matches]).reshape(-1, 1, 2)
        dst_pts = np.float32([kp2[m.trainIdx].pt
                              for m in good_matches]).reshape(-1, 1, 2)

        # ransacによって外れ値を除去
        homology_matrix, mask = cv2.findHomography(src_pts, dst_pts,
                                                   cv2.RANSAC, 5.0)

    else:
        print("Not enough matches are found - %d/%d" %
              (len(good_matches), MIN_MATCH_COUNT))
        matchesMask = None
        return None, None

    if (show_detail or save_result):
        # 結果を描写
        matchesMask = mask.ravel().tolist()

        # query画像の高さ、幅を取得し、query画像を囲う長方形の座標を取得し、
        # それを算出された変換行列homology_matrixで変換する
        # 変換した長方形をtrain画像に描写
        h, w = img_query.shape[:2]
        pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1],
                          [w - 1, 0]]).reshape(-1, 1, 2)
        dst = cv2.perspectiveTransform(pts, homology_matrix)
        cv2.polylines(img_train, [np.int32(dst)], True, (255, 100, 0), 3,
                      cv2.LINE_AA)

        num_draw = 50

        draw_params = dict(
            #     matchColor = (0,255,0), # draw matches in green color
            singlePointColor=None,
            matchesMask=matchesMask[:num_draw],  # draw only inliers
            flags=2)

        img_result_2 = cv2.drawMatches(img_query, kp1, img_train, kp2,
                                       good_matches[:num_draw], None,
                                       **draw_params)

    if (show_detail):
        ip.show_img(img_result_2, figsize=(20, 30))
        num_inlier = (mask == 1).sum()
        print('inlier:%d個' % num_inlier)
    if (save_result):
        ip.imwrite('ransac_match.jpg', img_result_2)

    return homology_matrix, mask
コード例 #9
0
 def __init__(self):
     self.detector = cv.AKAZE_create(threshold=0.003)
     self.matcher = cv.FlannBasedMatcher(
         flann_params, {})  # bug : need to pass empty dict (#1329)
     self.targets = []
     self.frame_points = []
コード例 #10
0
    def __init__(self):
        self.next_state = 'None'
        self.number_of_egg = 11
        self.hatched_egg = 0
        self.shiny_number = 0
        self._detect_frame_count = 0
        self._good = []
        self._good_without_list = []
        self._bf = cv2.BFMatcher()
        self._breeder_house_flg = 0
        self.send_command = 'None'
        self._send_command_enb = 0
        self._control_frame_count = 0
        self._temp_y = 408
        self.run_control_state = 'None'
        self.a_breeder_count = 0
        self.run_l_count = 0
        self._sel_poke_flg = 0
        self.check_egg_frames = 0
        self._breeder_comment = cv2.imread("data\\breeder_comment.png")
        self._sora_sel_img = cv2.imread("data\\sora_sel.png")
        # self.last_save_time = time.time()

        self.cut_frame_h = 237
        self.cut_frame_w = 1280
        self.cross_arm_breeder1 = cv2.imread("data\\a_breeder1.png")
        self.cross_arm_breeder2 = cv2.imread("data\\a_breeder2.png")
        self.cross_arm_breeder3 = cv2.imread("data\\a_breeder3.png")
        self.breeder1 = cv2.imread("data\\breeder1.png")
        self.breeder2 = cv2.imread("data\\breeder2.png")
        self.breeder3 = cv2.imread("data\\breeder3.png")
        self.hand_breeder_with_egg = cv2.imread("data\\f_a_breeder_hand.png")
        self.f_breeder3 = cv2.imread("data\\f_breeder_hand.png")
        self.menu_icon_pokemon = cv2.imread("data\\menu_sel_poke.png")
        self.gray_cross_arm_breeder1 = cv2.cvtColor(self.cross_arm_breeder1,
                                                    cv2.COLOR_RGB2GRAY)
        self.gray_cross_arm_breeder2 = cv2.cvtColor(self.cross_arm_breeder2,
                                                    cv2.COLOR_RGB2GRAY)
        self.gray_cross_arm_breeder3 = cv2.cvtColor(self.cross_arm_breeder3,
                                                    cv2.COLOR_RGB2GRAY)
        self.gray_breeder1 = cv2.cvtColor(self.breeder1, cv2.COLOR_RGB2GRAY)
        self.gray_breeder2 = cv2.cvtColor(self.breeder2, cv2.COLOR_RGB2GRAY)
        self.gray_breeder3 = cv2.cvtColor(self.breeder3, cv2.COLOR_RGB2GRAY)
        self.gray_f_a_breeder = cv2.cvtColor(self.hand_breeder_with_egg,
                                             cv2.COLOR_RGB2GRAY)
        self.gray_f_breeder = cv2.cvtColor(self.f_breeder3, cv2.COLOR_RGB2GRAY)
        self.ah1, self.aw1 = self.gray_cross_arm_breeder1.shape
        self.ah2, self.aw2 = self.gray_cross_arm_breeder2.shape
        self.ah3, self.aw3 = self.gray_cross_arm_breeder3.shape
        self.h1, self.w1 = self.gray_breeder1.shape
        self.h2, self.w2 = self.gray_breeder2.shape
        self.h3, self.w3 = self.gray_breeder3.shape
        self.hf, self.wf = self.gray_f_breeder.shape
        #self.gray_cross_arm_breeder1 = np.array(self.gray_cross_arm_breeder1, dtype="float")
        #self.gray_cross_arm_breeder2 = np.array(self.gray_cross_arm_breeder2, dtype="float")
        #self.gray_cross_arm_breeder3 = np.array(self.gray_cross_arm_breeder3, dtype="float")
        self.mu_t1 = np.mean(self.gray_cross_arm_breeder1)
        self.mu_t2 = np.mean(self.gray_cross_arm_breeder2)
        self.mu_t3 = np.mean(self.gray_cross_arm_breeder3)
        self.temp1 = self.gray_cross_arm_breeder1 - self.mu_t1
        self.temp2 = self.gray_cross_arm_breeder2 - self.mu_t2
        self.temp3 = self.gray_cross_arm_breeder3 - self.mu_t3
        #self.dst = 0
        if VolatileClassRun.ALGOLISM == 'AKAZE':
            VolatileClassRun.MIN_MATCH_COUNT = 20  #あんまりすくないとfindHomographyがNoneになる
            VolatileClassRun.ratio = 0.7
            self._akaze = cv2.AKAZE_create()
            self._kp1, self._des1 = self._akaze.detectAndCompute(
                VolatileClassRun.breeder_house_img, None)
        elif VolatileClassRun.ALGOLISM == 'SURF':
            VolatileClassRun.MIN_MATCH_COUNT = 30  #あんまりすくないとfindHomographyがNoneになる
            VolatileClassRun.ratio = 0.5
            self._surf = cv2.xfeatures2d.SURF_create(400)
            self._kp1, self._des1 = self._surf.detectAndCompute(
                VolatileClassRun.breeder_house_img, None)
        elif VolatileClassRun.ALGOLISM == 'USURF':
            VolatileClassRun.MIN_MATCH_COUNT = 18  #あんまりすくないとfindHomographyがNoneになる
            VolatileClassRun.ratio = 0.6
            self._surf = cv2.xfeatures2d.SURF_create(400)
            self._surf.setUpright(True)
            self._kp1, self._des1 = self._surf.detectAndCompute(
                VolatileClassRun.breeder_house_img, None)
        elif VolatileClassRun.ALGOLISM == 'FAST':
            VolatileClassRun.MIN_MATCH_COUNT = 30  #あんまりすくないとfindHomographyがNoneになる
            VolatileClassRun.ratio = 0.5
            self._fast = cv2.FastFeatureDetector_create()
            self._brief = cv2.xfeatures2d.BriefDescriptorExtractor_create()
            self._kp1 = self._fast.detect(VolatileClassRun.breeder_house_img,
                                          None)
            self._kp1, self._des1 = self._brief.compute(
                VolatileClassRun.breeder_house_img, self._kp1)
        self.matches = self._bf.knnMatch(self._des1, self._des1, k=2)
コード例 #11
0
def homography_lensdist(source_path, destination_path, result_path):
    # Read images
    im1 = cv2.imread(source_path)
    im2 = cv2.imread(destination_path)
    # Akaze descripter
    akaze = cv2.AKAZE_create()
    kp1, des1 = akaze.detectAndCompute(im1, None)
    kp2, des2 = akaze.detectAndCompute(im2, None)
    # Flann matching
    FLANN_INDEX_LSH = 6
    index_params = dict(algorithm=FLANN_INDEX_LSH,
                        table_number=6,
                        key_size=12,
                        multi_probe_level=1)
    search_params = dict(checks=50)
    flann = cv2.FlannBasedMatcher(index_params, search_params)
    matches = flann.knnMatch(des1, des2, k=2)
    ratio = 0.8
    good = []
    for m, n in matches:
        if m.distance < ratio * n.distance:
            good.append(m)

    pts1 = np.float32([kp1[match.queryIdx].pt for match in good])
    pts2 = np.float32([kp2[match.trainIdx].pt for match in good])
    pts1 = pts1.reshape(-1, 1, 2)
    pts2 = pts2.reshape(-1, 1, 2)
    hm, mask = cv2.findHomography(pts1, pts2, cv2.RANSAC, 100)

    pts1 = pts1[mask.astype('bool')]
    pts2 = pts2[mask.astype('bool')]

    def distort(pts, params):
        k1 = params[0]
        k2 = params[1]
        k3 = params[2]
        p1 = params[3]
        p2 = params[4]
        k4 = params[5]
        k5 = params[6]
        k6 = params[7]
        s1 = 0  #params[8]
        s2 = 0  #params[9]
        s3 = 0  #params[10]
        s4 = 0  #params[11]
        w = im1.shape[1]
        h = im1.shape[0]
        centre = np.array([(w - 1) / 2, (h - 1) / 2], dtype='float32')
        x1 = (pts[:, 0] - centre[0]) / centre[0]
        y1 = (h / w) * (pts[:, 1] - centre[1]) / centre[1]
        r = (x1**2 + y1**2)**0.5
        r2 = r**2
        r4 = r**4
        r6 = r**6

        x1_d = x1 * (1 + k1 * r2 + k2 * r4 + k3 * r6) / (
            1 + k4 * r2 + k5 * r4 + k6 * r6) + 2 * p1 * x1 * y1 + p2 * (
                r2 * 2 * x1**2) + s1 * r2 + s2 * r4
        y1_d = y1 * (1 + k1 * r2 + k2 * r4 + k3 * r6) / (
            1 + k4 * r2 + k5 * r4 + k6 * r6) + 2 * p2 * x1 * y1 + p1 * (
                r2 * 2 * y1**2) + s3 * r2 + s4 * r4
        x1_d = x1_d * centre[0] + centre[0]
        y1_d = (w / h) * y1_d * centre[1] + centre[1]
        pts_d = np.stack([x1_d, y1_d], axis=0).T
        return pts_d

    def homography(pts1, pts2):
        pts1 = pts1.reshape(-1, 1, 2)
        pts2 = pts2.reshape(-1, 1, 2)
        hmat, mask = cv2.findHomography(pts1, pts2)
        pts1 = pts1.reshape(-1, 2)
        pts2 = pts2.reshape(-1, 2)

        pts1 = np.insert(pts1, 2, 1, axis=1)
        pts1 = np.dot(hmat, pts1.T).T
        pts1[:, 0] = pts1[:, 0] / pts1[:, 2]
        pts1[:, 1] = pts1[:, 1] / pts1[:, 2]
        pts1 = pts1[:, 0:2]
        rmse = np.mean(
            ((pts1[:, 0] - pts2[:, 0])**2 + (pts1[:, 1] - pts2[:, 1])**2)**0.5)

        return pts1, rmse, hmat

    def distort_rmse(params):
        pts1_d = distort(pts1, params)
        pts1_dh = homography(pts1_d, pts2)
        return pts1_dh[1]

    res = opt.minimize(distort_rmse, x0=[0] * 8, method='Nelder-Mead')

    height, width, channels = im1.shape
    map_x, map_y = np.meshgrid(np.arange(width), np.arange(height))
    grid = np.stack([map_x.flatten(), map_y.flatten()]).T
    p = copy.copy(res.x)
    p = -p
    grid_d = distort(grid, p)
    map_d = grid_d.T.reshape([2, height, width]).astype('float32')
    im1_d = cv2.remap(im1,
                      map_d[0, :, :],
                      map_d[1, :, :],
                      interpolation=cv2.INTER_LINEAR)
    pts1_d = distort(pts1, res.x)
    pts1_dh, rmse, hmat = homography(pts1_d, pts2)

    im1_dh = cv2.warpPerspective(im1_d, hmat, (im2.shape[1], im2.shape[0]))
    cv2.imwrite(result_path, im1_dh)
    return im1_dh
コード例 #12
0
class MatchMoveWidget(Widget):
    min_match_count = NumericProperty(10)
    flann_index_kdtree = NumericProperty(0)
    video_width = NumericProperty(1024)
    is_optical = BooleanProperty(False)

    fmt = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')

    algorithms = {
        "AKAZE": cv2.AKAZE_create(),
        "SIFT": cv2.xfeatures2d.SIFT_create()
    }

    gamma = 1 / 1.8
    gamma_cvt = np.uint8(255 * (np.linspace(0, 1, 256)**gamma))

    def correct(self, img):
        return cv2.LUT(img, self.gamma_cvt)

    def save_to(self, to):
        return get_save_path("result", "matchmove", "eval", to)

    def set_reference(self, reference, points):
        self.reference = reference
        self.points = np.array(points)

    def set_destination(self, dest):
        async def task():
            # h = np.sqrt(np.sum((self.points[1] - self.points[0])**2)).astype(np.int16)
            # w = np.sqrt(np.sum((self.points[2] - self.points[1])**2)).astype(np.int16)
            h, w, *_ = dest.shape
            self.destination = cv2.resize(self.correct(tex2cv_format(dest)),
                                          (w, h))
            self.reference = await popup_task("Calculating...", warp,
                                              self.reference, self.points[0],
                                              self.points[1], self.points[2],
                                              self.points[3], np.array([0, 0]),
                                              np.array([h,
                                                        0]), np.array([h, w]),
                                              np.array([0, w]), h, w)
            self.reference = tex2cv_format(self.reference)
            cv2.imwrite(self.save_to("destination.png"), self.destination)
            cv2.imwrite(self.save_to("reference.png"), self.reference)
            self.reference = self.correct(self.reference)
            await sleep(0.333)

        forget(task())

    def set_target(self, source, key):
        self.source = source
        ext = "*" + get_file_ext(self.source)
        if ext in VIDEO_EXT:
            self.set_video_target(key)
        else:
            self.set_image_target(key)

    def set_image_target(self, key):
        async def task():
            try:
                folder, file = os.path.split(self.source)
                import re
                *_, typ, _ = re.split(r"[\._]", file)
                corners = np.load(os.path.join(folder, f"points_{typ}.npy"))
            except Exception as e:
                corners = None
                print("no corners file:", e)

            await popup_task("Calculating...", self.execute_image, key)

        forget(task())

    def set_video_target(self, key):
        async def task():
            await popup_task("Calculating...", self.execute_video, key)

        forget(task())

    def execute_image(self, algorithm, corners=None, typ=""):
        frame = cv2.imread(self.source)
        size_h, size_w, *_ = frame.shape

        frame = cv2.resize(frame, (size_w, size_h))
        frame = self.correct(frame)

        ref_kp, ref_des = detect_keypoint(self.reference,
                                          self.algorithms[algorithm])
        tar_kp, tar_des = detect_keypoint(frame, self.algorithms[algorithm])

        src_pts, dst_pts, good = match_points(ref_kp, ref_des, tar_kp, tar_des,
                                              self.min_match_count,
                                              self.flann_index_kdtree)

        cv2.imwrite(
            self.save_to(f"keypoints_frame_image_{algorithm}_{typ}.png"),
            cv2.drawKeypoints(frame, tar_kp, None, flags=4))
        cv2.imwrite(
            self.save_to(f"matches_image_{algorithm}_{len(good)}_{typ}.png"),
            cv2.drawMatchesKnn(frame,
                               tar_kp,
                               self.reference,
                               ref_kp,
                               good,
                               None,
                               matchColor=(0, 255, 0),
                               matchesMask=None,
                               singlePointColor=(255, 0, 0),
                               flags=0))

        if src_pts is not None or dst_pts is not None:
            # frameからreferenceの変換を取得する
            H = get_homography(src_pts, dst_pts)
            frame = replace_image(self.destination, frame, H).astype(np.uint8)
            cv2.imwrite(self.save_to(f"result_{algorithm}_{typ}.png"), frame)

    def execute_video(self, algorithm, max_speed=1):
        cap = cv2.VideoCapture(self.source)
        if not cap:
            return

        w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
        h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
        fps = cap.get(cv2.CAP_PROP_FPS)

        size_w = self.video_width
        size_h = self.video_width * h // w

        writer = cv2.VideoWriter(self.save_to(f"result_{algorithm}.mp4"),
                                 self.fmt, fps, (size_w, size_h))

        ref_kp, ref_des = detect_keypoint(self.reference,
                                          self.algorithms[algorithm])
        cv2.imwrite(self.save_to(f"keypoints_reference_{algorithm}.png"),
                    cv2.drawKeypoints(self.reference, ref_kp, None, flags=4))

        i = 0
        minh = 0
        minw = 0
        maxh = size_h
        maxw = size_w
        start = time.time()
        while cap.isOpened():
            ret, frame = cap.read()
            if not ret:
                t = time.time()
                print("\nend process:", (t - start) / max(i, 1))
                break

            frame = cv2.resize(frame, (size_w, size_h))
            frame = self.correct(frame)

            print(f"\rdesctipt frame: {i}\t\t\t\t", end="")
            tar_kp, tar_des = detect_keypoint(frame[minh:maxh, minw:maxw],
                                              self.algorithms[algorithm])

            h_c, w_c, *_ = frame[minh:maxh, minw:maxw].shape
            print(f"\rmatch frame: {i}\t\t\t\t", end="")
            src_pts, dst_pts, good = match_points(ref_kp, ref_des, tar_kp,
                                                  tar_des,
                                                  self.min_match_count,
                                                  self.flann_index_kdtree)

            if i == 0:
                print(f"\save frame: {i}\t\t\t\t", end="")
                cv2.imwrite(self.save_to(f"keypoints_frame_{algorithm}.png"),
                            cv2.drawKeypoints(frame, tar_kp, None, flags=4))
                cv2.imwrite(
                    self.save_to(f"matches_{algorithm}.png"),
                    cv2.drawMatchesKnn(frame,
                                       tar_kp,
                                       self.reference,
                                       ref_kp,
                                       good,
                                       None,
                                       matchColor=(0, 255, 0),
                                       matchesMask=None,
                                       singlePointColor=(255, 0, 0),
                                       flags=0))
                start = time.time()

            if src_pts is not None or dst_pts is not None:
                # frameからreferenceの変換を取得する
                H = get_homography(src_pts, dst_pts)
                if self.is_optical:
                    replaced = warp_only(self.destination, frame, H, minh,
                                         minw)
                    mask = np.sum(replaced > 0, axis=2, dtype=bool)

                    print(f"\rreplace frame: {i}\t\t\t\t", end="")
                    frame = np.where(mask[:, :, None], replaced,
                                     frame).astype(np.uint8)

                    mask_id = np.array(np.where(mask))
                    minh = min(np.min(mask_id[0]) - max_speed, 0)
                    minw = min(np.min(mask_id[1]) - max_speed, 0)
                    maxh = min(np.max(mask_id[0]) + max_speed, size_h)
                    maxw = min(np.max(mask_id[1]) + max_speed, size_w)
                else:
                    frame = replace_image(self.destination, frame,
                                          H).astype(np.uint8)

            writer.write(frame)
            i += 1

        writer.release()
        cap.release()
コード例 #13
0
FLANN_INDEX_KDTREE = 1# bug: flann enums are missing
FLANN_INDEX_LSH = 6

def init_feature(name):
  chunks = name.split('-')
if chunks[0] == 'sift':
  detector = cv.xfeatures2d.SIFT_create()
norm = cv.NORM_L2
elif chunks[0] == 'surf':
  detector = cv.xfeatures2d.SURF_create(200)
norm = cv.NORM_L2
elif chunks[0] == 'orb':
  detector = cv.ORB_create(1400)
norm = cv.NORM_HAMMING
elif chunks[0] == 'akaze':
  detector = cv.AKAZE_create()
norm = cv.NORM_HAMMING
elif chunks[0] == 'brisk':
  detector = cv.BRISK_create()
norm = cv.NORM_HAMMING
else :
  return None, None
if 'flann' in chunks:
  if norm == cv.NORM_L2:
  flann_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
else :
  flann_params = dict(algorithm = FLANN_INDEX_LSH,
    table_number = 6, #12 key_size = 12, #20 multi_probe_level = 1) #2

            matcher = cv.FlannBasedMatcher(flann_params, {})# bug: need to pass empty dict(#1329)
コード例 #14
0
import cv2

# 영상 불러오기
src1 = cv2.imread(
    '/Users/hyunsul/Desktop/ai-room/OpenCV2_python/ch09/graf1.png',
    cv2.IMREAD_GRAYSCALE)
src2 = cv2.imread(
    '/Users/hyunsul/Desktop/ai-room/OpenCV2_python/ch09/graf3.png',
    cv2.IMREAD_GRAYSCALE)

if src1 is None or src2 is None:
    print('Image load failed!')
    sys.exit()

# 특징점 알고리즘 객체 생성 (KAZE, AKAZE, ORB 등)
feature = cv2.AKAZE_create()  #hamming distance를 사용

# 특징점 검출 및 기술자 계산
kp1, desc1 = feature.detectAndCompute(src1, None)
kp2, desc2 = feature.detectAndCompute(src2, None)

# 특징점 매칭
matcher = cv2.BFMatcher_create(cv2.NORM_HAMMING)
matches = matcher.match(desc1, desc2)

print('# of kp1:', len(kp1))
print('# of kp2:', len(kp2))
print('# of matches:', len(matches))

# 특징점 매칭 결과 영상 생성
dst = cv2.drawMatches(src1, kp1, src2, kp2, matches, None)
コード例 #15
0
def run_for_images(dataset, image1, image2):

    runs = [
        ("SIFT", cv2.xfeatures2d.SIFT_create(), cv2.xfeatures2d.SIFT_create(),
         cv2.NORM_L2, flann_params1),
        ("SURF", cv2.xfeatures2d.SURF_create(), cv2.xfeatures2d.SURF_create(),
         cv2.NORM_L2, flann_params1),
        ("BRISK", cv2.BRISK_create(), cv2.BRISK_create(), cv2.NORM_HAMMING,
         flann_params2),
        ("ORB", cv2.ORB_create(nfeatures=3000), cv2.ORB_create(),
         cv2.NORM_HAMMING, flann_params2),
        #("FAST+ORB",    cv2.FastFeatureDetector_create(),       cv2.ORB_create(),                                   cv2.NORM_HAMMING,   flann_params2),
        ("AKAZE", cv2.AKAZE_create(), cv2.AKAZE_create(), cv2.NORM_HAMMING,
         flann_params2),
        ("CenSurE+BRIEF", cv2.xfeatures2d.StarDetector_create(),
         cv2.xfeatures2d.BriefDescriptorExtractor_create(), cv2.NORM_HAMMING,
         flann_params2)
    ]

    plt_legends_500 = []
    plt_legends_20 = []

    for (name, detector, extractor, norm, flann_params) in runs:

        img1_src = cv2.imread(image1)
        img2_src = cv2.imread(image2)

        img1 = cv2.resize(img1_src,
                          (640 * img1_src.shape[1] / img1_src.shape[0], 640))
        img2 = cv2.resize(img2_src,
                          (640 * img2_src.shape[1] / img2_src.shape[0], 640))
        #print "IMAGE SIZES: {}  {}".format(img1.shape[:2], img2.shape[:2])

        gray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
        gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)

        start_detection = time.time()
        kp1 = detector.detect(gray1)
        kp2 = detector.detect(gray2)
        detection_time = (time.time() - start_detection)

        print " {}  {} {} {:.4f} {}".format(
            name, len(kp1), len(kp2), detection_time,
            detection_time / (len(kp1) + len(kp2)))

        start_computation = time.time()
        kp1, descr1 = extractor.compute(gray1, kp1)
        kp2, descr2 = extractor.compute(gray2, kp2)
        computation_time = (time.time() - start_computation)

        print "    computing:  {:.4f}  {}    {} {}".format(
            computation_time, computation_time / (len(kp1) + len(kp2)),
            len(descr1), len(descr2))

        print "    for TABLE 1:"
        print "{} & {} & {:.4f} & {:.7f} & {} & {:.4f} & {:.7f}".format(
            name, (len(kp1) + len(kp2)) / 2, detection_time,
            detection_time / (len(kp1) + len(kp2)), name, computation_time,
            computation_time / (len(kp1) + len(kp2)))

        cv2.drawKeypoints(gray1, kp1, img1)
        cv2.imwrite("out/" + dataset + "_" + name + "_keypoints_1.jpg", img1)
        cv2.drawKeypoints(gray2, kp2, img2)
        cv2.imwrite("out/" + dataset + "_" + name + "_keypoints_2.jpg", img2)

        # ok let's try different matchers

        # BruteForce with crossCheck
        matcher = cv2.BFMatcher(norm, crossCheck=True)
        start_matching = time.time()
        matches = matcher.match(descr1, descr2)
        bf1_num_matches = len(matches)
        bf1_matching_time = time.time() - start_matching
        matches = sorted(matches, key=lambda x: x.distance)
        print "    BF crossCheck matching:  {}  {:.4f}".format(
            len(matches), bf1_matching_time)
        best_matches = matches[:num_best_matches]

        plt.figure(1)
        line, = plt.plot(
            [m.distance / best_matches[-1].distance for m in best_matches],
            label=name)
        plt_legends_20.append(line)

        plt.figure(2)
        matches_500 = matches[:250]
        line, = plt.plot(
            [m.distance / matches_500[-1].distance for m in matches_500],
            label=name)
        plt_legends_500.append(line)

        img3 = cv2.drawMatches(img1,
                               kp1,
                               img2,
                               kp2,
                               best_matches,
                               None,
                               flags=2)
        cv2.imwrite(
            "out/" + dataset + "_" + name + "_bf_crossCheck_matches.jpg", img3)

        # BruteForce without crossCheck with Lowes ratio
        matcher = cv2.BFMatcher(norm, crossCheck=False)
        start_matching = time.time()
        matches = matcher.knnMatch(descr1, descr2, k=2)
        bf2_matching_time = time.time() - start_matching
        print "    BF knn        matching:  {}  {:.4f}".format(
            len(matches), bf2_matching_time)

        good = []
        for m, n in matches:
            if m.distance < 0.75 * n.distance:
                good.append([m])
        print "              good matches:   {}".format(len(good))
        bf2_num_matches = len(good)

        best_matches = good[:num_best_matches]

        img3 = cv2.drawMatchesKnn(img1,
                                  kp1,
                                  img2,
                                  kp2,
                                  best_matches,
                                  None,
                                  flags=2)
        cv2.imwrite("out/" + dataset + "_" + name + "_bf_knn_matches.jpg",
                    img3)

        # FLANN-based
        matcher = cv2.FlannBasedMatcher(flann_params, dict())
        if norm == cv2.NORM_L2:
            descr1 = np.asarray(descr1, np.float32)
            descr2 = np.asarray(descr2, np.float32)
        elif norm == cv2.NORM_HAMMING:
            descr1 = np.asarray(descr1, np.uint8)
            descr2 = np.asarray(descr2, np.uint8)
        start_matching = time.time()
        matches = matcher.knnMatch(descr1, descr2, k=2)
        flann_matching_time = time.time() - start_matching
        print "    FLANN knn     matching:  {}  {:.4f}".format(
            len(matches), flann_matching_time)
        matchesMask = [[0, 0] for i in xrange(len(matches))]
        # ratio test as per Lowe's paper
        best_matches = []
        for i, m_n in enumerate(matches):
            if len(m_n) != 2:
                continue
            (m, n) = m_n
            if m.distance < 0.7 * n.distance:
                best_matches.append([m])
        flann_num_matches = len(best_matches)
        draw_params = dict(
            matchColor=(0, 255, 0),
            singlePointColor=(255, 0, 0),
            #matchesMask = matchesMask,
            flags=0)
        img3 = cv2.drawMatchesKnn(img1, kp1, img2, kp2,
                                  best_matches[:num_best_matches], None,
                                  **draw_params)
        cv2.imwrite("out/" + dataset + "_" + name + "_flann_knn_matches.jpg",
                    img3)

        print "    for TABLE 2:"
        print "{} & {} & {:.4f} & {} & {:.4f} & {} & {:.4f}".format(
            name, bf1_num_matches, bf1_matching_time, bf2_num_matches,
            bf2_matching_time, flann_num_matches, flann_matching_time)

    global current_dataset
    if current_dataset == "swiss-church":
        current_dataset = "church"

    plt.figure(1)
    plt.title(current_dataset + " best 20")
    plt.legend(handles=plt_legends_20, loc=4)
    plt.savefig("graphs/" + current_dataset + "_top20.jpg")
    plt.clf()

    plt.figure(2)
    plt.title(current_dataset + " best 250")
    plt.legend(handles=plt_legends_500, loc=4)
    plt.savefig("graphs/" + current_dataset + "_top250.jpg")
    plt.clf()
コード例 #16
0
def open_file():
    from tkinter.filedialog import askopenfilename
    file_path = askopenfilename(title=u'select file')
    name = file_path.split("/")[-1]
    img_to_match = cv2.imread(file_path, 0)
    img_to_match_pillow = ImageTk.PhotoImage(
        Image.fromarray(img_to_match).resize((320, 240), Image.ANTIALIAS))

    if v.get() == 1:
        ALG = cv2.xfeatures2d.SURF_create()
    elif v.get() == 2:
        ALG = cv2.xfeatures2d.SURF_create()
    elif v.get() == 3:
        ALG = cv2.BRISK_create()
    elif v.get() == 4:
        ALG = cv2.AKAZE_create()
    elif v.get() == 5:
        ALG = cv2.KAZE_create()
    img_database_fts = [
        ALG.detectAndCompute(img, None) for img in img_database
    ]
    draw_database = [
        ImageTk.PhotoImage(
            Image.fromarray(
                cv2.drawKeypoints(img_from_database, img_database_fts[nr][0],
                                  None)).resize((320, 240), Image.ANTIALIAS))
        for nr, img_from_database in enumerate(img_database)
    ]

    (kps1, descs1) = ALG.detectAndCompute(img_to_match, None)

    layout2 = tk.Label(root)
    layout2.place(relx=0.5, rely=0, relwidth=1, relheight=1, anchor='n')

    label_img_to_match = tk.Label(layout2, image=img_to_match_pillow)
    label_img_to_match.image = img_to_match_pillow
    label_img_to_match.place(relx=0.3,
                             rely=0.1,
                             width=320,
                             height=240,
                             anchor='n')

    label_img_database = tk.Label(layout2, image=draw_database[0])
    label_img_database.image = draw_database[0]
    label_img_database.place(relx=0.7,
                             rely=0.1,
                             width=320,
                             height=240,
                             anchor='n')

    label_img_matched = tk.Label(layout2, image=draw_database[0])
    label_img_matched.image = draw_database[0]
    label_img_matched.place(relx=0.5,
                            rely=0.5,
                            width=640,
                            height=240,
                            anchor='n')

    nrOfGoodPerImage = np.zeros([nrOfFiles, 1])

    image_list_matched = []

    def calc(j):
        if j < nrOfFiles - 1:
            bf = cv2.BFMatcher()
            kps2 = img_database_fts[j][0]
            descs2 = img_database_fts[j][1]
            matches = bf.knnMatch(descs1, descs2, k=2)
            matchesMask = [[0, 0] for i in range(len(matches))]

            for i, (m, n) in enumerate(matches):
                if m.distance < 0.75 * n.distance:
                    matchesMask[i] = [1, 0]
            good = []
            for m, n in matches:
                if m.distance < 0.75 * n.distance:
                    good.append([m])

            nrOfGoodPerImage[j] = np.sum(matchesMask[:])

            img3 = cv2.drawMatchesKnn(
                img_to_match,
                kps1,
                img_database[j],
                kps2,
                good,
                None,
                flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
            img3_pillow = ImageTk.PhotoImage(
                Image.fromarray(img3).resize((640, 240), Image.ANTIALIAS))
            image_list_matched.append(img3_pillow)
            root.after(0, calc(j + 1))

    calc(0)
    idx = (-np.squeeze(nrOfGoodPerImage)).argsort()[:3]

    def matching(i):
        if i < nrOfFiles - 1:

            label_img_database.config(image=draw_database[i])
            label_img_matched.config(image=image_list_matched[i])
            root.after(DELAY, lambda: matching(i + 1))
        elif i == nrOfFiles - 1:
            # my_label4=tk.Label(root,bg='#80c1ff')
            my_label4 = tk.Label(root, bg="LightSteelBlue1")
            my_label4.place(relx=0.5,
                            rely=0,
                            relwidth=1,
                            relheight=1,
                            anchor='n')

            org_label = tk.Label(my_label4, text="Image to match:\n" + name)
            org_label.place(relx=0.15, rely=0.5, anchor='e')
            org = tk.Label(my_label4, image=img_to_match_pillow)
            org.place(relx=0.15, rely=0.5, width=320, height=240, anchor='w')

            best_match_label = tk.Label(my_label4,
                                        text="Best Match:\n" + files[idx[0]])
            best_match_label.place(relx=0.8, rely=0.2, anchor='w')
            best_match = tk.Label(my_label4, image=img_database_pillow[idx[0]])
            best_match.place(relx=0.8,
                             rely=0.2,
                             width=320,
                             height=240,
                             anchor='e')

            best_match2_label = tk.Label(my_label4,
                                         text="Second Best Match:\n" +
                                         files[idx[1]])
            best_match2_label.place(relx=0.8, rely=0.5, anchor='w')
            best_match2 = tk.Label(my_label4,
                                   image=img_database_pillow[idx[1]])
            best_match2.place(relx=0.8,
                              rely=0.5,
                              width=320,
                              height=240,
                              anchor='e')

            best_match3_label = tk.Label(my_label4,
                                         text="Third Best Match:\n" +
                                         files[idx[2]])
            best_match3_label.place(relx=0.8, rely=0.8, anchor='w')
            best_match3 = tk.Label(my_label4,
                                   image=img_database_pillow[idx[2]])
            best_match3.place(relx=0.8,
                              rely=0.8,
                              width=320,
                              height=240,
                              anchor='e')

            # my_title2=tk.Label(my_label4,text="Matching finished! Displaying results...",font=("Helvetica",20), bg='#80c1ff')
            my_title2 = tk.Label(
                my_label4,
                text="Matching finished! Displaying results...",
                font=("Helvetica", 20),
                bg="LightSteelBlue1")
            my_title2.place(relx=0.5,
                            rely=0.0,
                            relwidth=0.4,
                            relheight=0.05,
                            anchor='n')

    matching(0)
コード例 #17
0
def main(imgLpath, imgRpath, nb_matches, i, not_rectified):
    print("Reading images...")
    left = cv2.imread(imgLpath, 0)
    cv2.imshow("left", left)
    right = cv2.imread(imgRpath, 0)
    cv2.imshow("right", right)
    cv2.waitKey(0)

    if not_rectified:
        print("Compute keypoints matching...")
        akaze = cv2.AKAZE_create()
        # akaze = cv2.xfeatures2d.SIFT_create()
        kpts1, desc1 = akaze.detectAndCompute(left, None)
        kpts2, desc2 = akaze.detectAndCompute(right, None)
        matcher = cv2.BFMatcher(cv2.NORM_L2, True)
        matches = matcher.match(desc1, desc2)
        sortedmatches = sorted(matches, key=lambda x: x.distance)
        good_matches = sortedmatches[:nb_matches]

        obj = []
        scene = []

        for i in range(len(good_matches)):
            # -- Get the keypoints from the good matches
            obj.append(kpts1[good_matches[i].queryIdx].pt)
            scene.append(kpts2[good_matches[i].trainIdx].pt)

        F, mask = cv2.findFundamentalMat(np.array(obj), np.array(scene),
                                         cv2.FM_RANSAC)
        correct_kpts1 = []
        correct_kpts2 = []
        correct_matches = []
        for i in range(len(mask)):
            if mask[i, 0] > 0:
                correct_kpts1.append(obj[i])
                correct_kpts2.append(scene[i])
                correct_matches.append(good_matches[i])

        res = np.empty((max(
            left.shape[0], right.shape[0]), left.shape[1] + right.shape[1], 3),
                       dtype=np.uint8)
        cv2.drawMatches(left, kpts1, right, kpts2, correct_matches, res)
        cv2.imwrite("./results/python/keypoints.jpg", res)
        cv2.imshow("keypoints", res)
        cv2.drawMatches(left, kpts1, right, kpts2, good_matches, res)
        cv2.imwrite("./results/python/keypoints_without_RANSAC.jpg", res)
        cv2.waitKey(0)

        print("Computing rectification...")
        # grey1 = cv2.cvtColor(left, cv2.COLOR_BGR2GRAY)
        # grey2 = cv2.cvtColor(right, cv2.COLOR_BGR2GRAY)
        dst1, dst2, theta1, theta2, s, T1, T2 = rectify(
            np.array(obj), np.array(scene), left, right)
    else:
        dst1 = left
        dst2 = right
    cv2.imwrite("./results/python/rectify1.png", dst1)
    cv2.imwrite("./results/python/rectify2.png", dst2)
    cv2.imshow("left_rectified", dst1)
    cv2.imshow("right_rectified", dst2)
    cv2.waitKey(0)

    print("Computing disparity...")
    disparity = disparity_map(dst1, dst2)
    disparity_img = grayImage(disparity)
    cv2.imwrite("./results/python/disparity.png", disparity_img)
    print(disparity)
    cv2.imshow("disparity", disparity_img)
    cv2.waitKey(0)

    print("Computing interpolation...")
    ir = interpolate(i, dst1, dst2, disparity)
    cv2.imwrite("./results/python/interpolated.png", ir)
    cv2.imshow("interpolated_rectified", ir)
    print(ir)
    cv2.waitKey(0)

    if not_rectified:
        print("Computing derectification...")
        de_rect = deRectify(ir, theta1, theta2, T1, T2, s, i)
        cv2.imwrite("./results/python/derectified.png", de_rect)
        cv2.imshow("interpolated", de_rect)
        cv2.waitKey(0)
コード例 #18
0
    F, mask = pydegensac.findFundamentalMatrix(src_pts,
                                               dst_pts,
                                               th,
                                               0.999,
                                               n_iter,
                                               enable_degeneracy_check=True)
    print('pydegensac found {} inliers'.format(
        int(deepcopy(mask).astype(np.float32).sum())))
    return F, mask


if __name__ == '__main__':
    img1 = cv2.cvtColor(cv2.imread('img/v_dogman/1.ppm'), cv2.COLOR_BGR2RGB)
    img2 = cv2.cvtColor(cv2.imread('img/v_dogman/6.ppm'), cv2.COLOR_BGR2RGB)
    # SIFT is not available by pip install, so lets use AKAZE features
    det = cv2.AKAZE_create(descriptor_type=3, threshold=0.00001)
    kps1, descs1 = det.detectAndCompute(img1, None)
    kps2, descs2 = det.detectAndCompute(img2, None)
    bf = cv2.BFMatcher()
    matches = bf.knnMatch(descs1, descs2, k=2)
    matchesMask = [False for i in range(len(matches))]
    # SNN ratio test
    for i, (m, n) in enumerate(matches):
        if m.distance < 0.9 * n.distance:
            matchesMask[i] = True
    tentatives = [m[0] for i, m in enumerate(matches) if matchesMask[i]]

    th = 4.0
    n_iter = 2000
    t = time()
    print("Running homography estimation")
コード例 #19
0
def feature_match():
    # load the image and convert it to grayscale
    im1 = cv2.imread(path_to_perfect_image)
    im2 = cv2.imread(path_to_plot)

    gray1 = cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY)
    gray2 = cv2.cvtColor(im2, cv2.COLOR_BGR2GRAY)

    # initialize the AKAZE,BRISK descriptor, then detect keypoints and extract
    # local invariant descriptors from the image
    sift = cv2.xfeatures2d.SIFT_create()
    surf = cv2.xfeatures2d.SURF_create()
    akaze = cv2.AKAZE_create()
    brisk = cv2.BRISK_create()
    orb = cv2.ORB_create()

    #compute the descriptors and keypoints using AKAZE BRISK ORB SIFT and SURF
    (akazekps1, akazedescs1) = akaze.detectAndCompute(gray1, None)
    (akazekps2, akazedescs2) = akaze.detectAndCompute(gray2, None)
    (siftkps1, siftdescs1) = sift.detectAndCompute(gray1, None)
    (siftkps2, siftdescs2) = sift.detectAndCompute(gray2, None)
    (surfkps1, surfdescs1) = surf.detectAndCompute(gray1, None)
    (surfkps2, surfdescs2) = surf.detectAndCompute(gray2, None)
    (briskkps1, briskdescs1) = brisk.detectAndCompute(gray1, None)
    (briskkps2, briskdescs2) = brisk.detectAndCompute(gray2, None)
    (orbkps1, orbdescs1) = orb.detectAndCompute(gray1, None)
    (orbkps2, orbdescs2) = orb.detectAndCompute(gray2, None)

    # Match the fezatures using the Brute Force Matcher
    bfakaze = cv2.BFMatcher(cv2.NORM_HAMMING)
    bf = cv2.BFMatcher(cv2.NORM_L2)

    #Refine the Brute Force Matches using the KNN Matcher
    akazematches = bfakaze.knnMatch(akazedescs1, akazedescs2, k=2)
    siftmatches = bf.knnMatch(siftdescs1, siftdescs2, k=2)
    surfmatches = bf.knnMatch(surfdescs1, surfdescs2, k=2)
    briskmatches = bf.knnMatch(briskdescs1, briskdescs2, k=2)
    orbmatches = bf.knnMatch(orbdescs1, orbdescs2, k=2)

    # Apply ratio test on AKAZE matches
    goodakaze = []
    for m, n in akazematches:
        if m.distance < 0.9 * n.distance:
            goodakaze.append([m])
    goodakaze = np.asarray(goodakaze)
    #print(feature_list)
    #print(goodakaze.shape[0])
    #calculate the Akaze core using the number of good matches
    #print(goodakaze.shape[0])
    #print(feature_list[0])
    similarity_akaze = (goodakaze.shape[0]/feature_list[0])*100

    # Apply ratio test on SIFT matches
    goodsift = []
    for m, n in siftmatches:
        if m.distance < 0.9 * n.distance:
            goodsift.append([m])
    #im3sift = cv2.drawMatchesKnn(img_perfect, siftkps1, img_akaze, siftkps2, goodsift[:], None, flags=2)
    goodsift = np.asarray(goodsift)
    similarity_sift = (goodsift.shape[0] / feature_list[1]) * 100

    # Apply ratio test on SURF matches
    goodsurf = []
    for m, n in surfmatches:
        if m.distance < 0.9 * n.distance:
            goodsurf.append([m])
    goodsurf = np.asarray(goodsurf)
    similarity_surf = (goodsurf.shape[0] / feature_list[2]) * 100

    # Apply ratio test on ORB matches
    goodorb = []
    for m, n in orbmatches:
        if m.distance < 0.9 * n.distance:
            goodorb.append([m])
    goodorb = np.asarray(goodorb)
    similarity_orb = (goodorb.shape[0] / feature_list[3]) * 100

    # Apply ratio test on BRISK matches
    goodbrisk = []
    for m, n in briskmatches:
        if m.distance < 0.9 * n.distance:
            goodbrisk.append([m])
    goodbrisk = np.asarray(goodbrisk)

    #Calculating the Similarity using the BRISK algorithm
    similarity_brisk = (goodbrisk.shape[0] / feature_list[4]) * 100
    features_result = (similarity_akaze+similarity_brisk+similarity_orb+similarity_sift+similarity_surf)/5

   #calculating overall similarity by aggregating the results of various feature actching algorithms
    #print("Overall similarity using features: ")
    #print(features_result)
    return features_result
コード例 #20
0
import cv2
import numpy as np

AKAZE = cv2.AKAZE_create()
BFMatcher = cv2.BFMatcher()
image = cv2.imread('media/lena.jpg', 0)
templ = cv2.flip(image[200:400, 200:400], -1)
kp_desc1 = (AKAZE.detectAndCompute(templ, None))
kp_desc2 = (AKAZE.detectAndCompute(image, None))
matches = BFMatcher.match(kp_desc1[1], kp_desc2[1])
print((str(len(matches))) + ' matches.')
cv2.imshow(
    'akaze match',
    cv2.drawMatches(templ, kp_desc1[0], image, kp_desc2[0], matches, image))
if cv2.waitKey(0) & 0xff == 27:
    pass
cv2.destroyAllWindows()
コード例 #21
0
 def extract_descriptor(self, image, feature):
     akaze = cv2.AKAZE_create()
     _, descriptors = akaze.compute(image, feature)
     return descriptors
コード例 #22
0
def upload():
    conn = psycopg2.connect(DATABASE_URL, sslmode='require')
    # conn = psycopg2.connect(
    # host = "0.0.0.0",
    # port = 5432,
    # database=POSTG_DB,
    # user=POSTG_ID,
    # password=POSTG_PW)
    shutil.rmtree(SAVE_DIR)
    os.mkdir(SAVE_DIR)
    s3 = boto3.client('s3', region_name='ap-northeast-1',config=Config(signature_version='s3v4'))
    # # ファイルがなかった場合の処理
    if 'image' not in request.files:
        flash('ファイルがありません','failed')
        return redirect(request.url)
    img1 = request.files['image']
                # ファイルのチェック
    if img1 and allowed_file(img1.filename):
        img1_secure = secure_filename(img1.filename)
    else:
        flash('画像ファイルを入れてください','failed')
        sys.exit(1)
    stream = img1.stream
    img_array = np.asarray(bytearray(stream.read()), dtype=np.uint8)
    img = cv2.imdecode(img_array, 1)
    img_size = (200, 200)
    ret = {}
    Img =  Image.open(img1)
    dt_now = datetime.now().strftime("%Y_%m_%d%_H_%M_%S_%f")
    save_path = os.path.join(SAVE_DIR, dt_now + "." + img1_secure)
    Img.save(save_path)

    img1 = glob.glob(save_path)
    img_url = img1[0]
    
    #####################################

    target_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
    target_img = cv2.resize(target_img, img_size)

    bf = cv2.BFMatcher(cv2.NORM_HAMMING)
    # detector = cv2.ORB_create()
    detector = cv2.AKAZE_create()
    (_, target_des) = detector.detectAndCompute(target_img, None)
    # conn = psycopg2.connect(
    # host = "0.0.0.0",
    # port = 5432,
    # database=POSTG_DB,
    # user=POSTG_ID,
    # password=POSTG_PW)
    c = conn.cursor()
    c.execute('SELECT * FROM flask_similar')
    rows = c.fetchall()
    for row in rows:
        if not row[1].endswith(('.png', '.jpg', '.jpeg')):
            continue

        try:
            numpy_img_data = np.array(row[2][row[1]]).astype(np.uint8)
            matches = bf.match(target_des, numpy_img_data)
            dist = [m.distance for m in matches]
            score = sum(dist) / len(dist)
            if score <= 100:
                score = 100
            score = 100.0 / score
        except cv2.error:
            score = 100000
        ret[row[1]] = score
    conn.close()

    ############################################################
    
    dic_sorted = sorted(ret.items(), reverse=True,key=lambda x:x[1])[:3]
    # dic_sorted = sorted(ret.items(), reverse=True,key=lambda x:x[1])[:10]
    # dic_sorted = random.sample(dic_sorted,2)
    # dic_sorted = sorted(dic_sorted, reverse=True,key=lambda x:x[1])
    estimated_d =[]
    exists_img =[]
    for file in dic_sorted:
        img_path = s3.generate_presigned_url(
        ClientMethod = 'get_object',
        Params = {'Bucket' : AWS_STORAGE_BUCKET_NAME, 'Key' : "actress/"+ file[0]},
        ExpiresIn = 600,
        HttpMethod = 'GET')
        
        if file[1] >= 0.85:
            estimated_d.append("類似度 高")
        elif file[1] >= 0.8:
            estimated_d.append("類似度 中")
        else:
            estimated_d.append("類似度 低")
        exists_img.append(img_path)
        
    return render_template('index.html',img_url=img_url, data= zip(exists_img,estimated_d))
import cv2
import numpy as np
import matplotlib.pyplot as plt
from utils import *

model_img = cv2.imread('model.jpg', 0)
model_img = cv2.pyrDown(model_img)
model_img = cv2.pyrDown(model_img)

akaze = cv2.AKAZE_create()
matcher = cv2.DescriptorMatcher_create(
    cv2.DescriptorMatcher_BRUTEFORCE_HAMMING)
kp_model, des_model = akaze.detectAndCompute(model_img, None)
obj = OBJ('key.obj', swapyz=True)

cam = cv2.VideoCapture(0)
while True:
    ret, frame = cam.read()
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    kp_frame, des_frame = akaze.detectAndCompute(gray, None)
    matches = matcher.knnMatch(des_model, des_frame, 2)
    good = []
    nn_match_ratio = 0.9
    for m, n in matches:
        if m.distance < nn_match_ratio * n.distance:
            good.append(m)

    if len(good) > 21:
        src_pts = np.float32([kp_model[m.queryIdx].pt
                              for m in good]).reshape(-1, 1, 2)
コード例 #24
0
def handle_image_message(event):
    s3 = boto3.client('s3', region_name='ap-northeast-1',config=Config(signature_version='s3v4'))
    conn = psycopg2.connect(DATABASE_URL, sslmode='require')
    message_content = line_bot_api.get_message_content(event.message.id)
    shutil.rmtree(SAVE_DIR)
    os.mkdir(SAVE_DIR)
    i = Image.open(BytesIO(message_content.content))
    save_path = SAVE_DIR +"/" + event.message.id + '.jpg'
    i.save(save_path)
    filename = os.listdir(SAVE_DIR +"/")
    img_size = (200, 200)
    ret = {}

#     #####################################

    filename = SAVE_DIR +"/" + filename[0]
    target_img = cv2.imread(filename, cv2.IMREAD_GRAYSCALE)
    target_img = cv2.resize(target_img, img_size)

    bf = cv2.BFMatcher(cv2.NORM_HAMMING)
    # detector = cv2.ORB_create()
    detector = cv2.AKAZE_create()
    (_, target_des) = detector.detectAndCompute(target_img, None)
    # conn = psycopg2.connect(
    # host = "0.0.0.0",
    # port = 5432,
    # database=POSTG_DB,
    # user=POSTG_ID,
    # password=POSTG_PW)
    c = conn.cursor()
    c.execute('SELECT * FROM flask_similar')
    rows = c.fetchall()
    for row in rows:
        if not row[1].endswith(('.png', '.jpg', '.jpeg')):
            continue

        try:
            numpy_img_data = np.array(row[2][row[1]]).astype(np.uint8)
            matches = bf.match(target_des, numpy_img_data)
            dist = [m.distance for m in matches]
            score = sum(dist) / len(dist)
            if score <= 100:
                score = 100
            score = 100.0 / score
        except cv2.error:
            score = 100000
        ret[row[1]] = score
    conn.close()
#     ############################################################
    
    
    
    dic_sorted = sorted(ret.items(), reverse=True,key=lambda x:x[1])[:3]
    estimated_d =[]
    exists_img =[]
    for file in dic_sorted:
        img_path = s3.generate_presigned_url(
        ClientMethod = 'get_object',
        Params = {'Bucket' : AWS_STORAGE_BUCKET_NAME, 'Key' : "actress/"+ file[0]},
        ExpiresIn = 600,
        HttpMethod = 'GET')
        if file[1] >= 0.85:
            estimated_d.append("類似度 高")
        elif file[1] >= 0.8:
            estimated_d.append("類似度 中")
        else:
            estimated_d.append("類似度 低")

        exists_img.append(img_path)
    
    line_bot_api.reply_message(
        event.reply_token,
        [
        ImageSendMessage(original_content_url = exists_img[0],
        preview_image_url=exists_img[0]),
        TextSendMessage(text=estimated_d[0]),
        ImageSendMessage(original_content_url = exists_img[1],
        preview_image_url=exists_img[1]),
        TextSendMessage(text=estimated_d[1])]
    )
コード例 #25
0
ファイル: cloning.py プロジェクト: AlexRiina/sherloq
    def process(self):
        start = time()
        self.status_label.setText(self.tr('Processing, please wait...'))
        algorithm = self.detector_combo.currentIndex()
        response = 100 - self.response_spin.value()
        matching = self.matching_spin.value() / 100 * 255
        distance = self.distance_spin.value() / 100
        cluster = self.cluster_spin.value()
        modify_font(self.status_label, bold=False, italic=True)
        QCoreApplication.processEvents()

        if self.kpts is None:
            if algorithm == 0:
                detector = cv.BRISK_create()
            elif algorithm == 1:
                detector = cv.ORB_create()
            elif algorithm == 2:
                detector = cv.AKAZE_create()
            else:
                return
            mask = self.mask if self.onoff_button.isChecked() else None
            self.kpts, self.desc = detector.detectAndCompute(self.gray, mask)
            self.total = len(self.kpts)
            responses = np.array([k.response for k in self.kpts])
            strongest = (cv.normalize(responses, None, 0, 100, cv.NORM_MINMAX)
                         >= response).flatten()
            self.kpts = list(compress(self.kpts, strongest))
            self.desc = self.desc[strongest]

        if self.matches is None:
            matcher = cv.BFMatcher_create(cv.NORM_HAMMING, True)
            self.matches = matcher.radiusMatch(self.desc, self.desc, matching)
            if self.matches is None:
                self.status_label.setText(
                    self.tr('No keypoint match found with current settings'))
                modify_font(self.status_label, italic=False, bold=True)
                return
            self.matches = [
                item for sublist in self.matches for item in sublist
            ]
            self.matches = [
                m for m in self.matches if m.queryIdx != m.trainIdx
            ]

        if self.clusters is None:
            self.clusters = []
            total = len(self.matches)
            min_dist = distance * np.min(self.gray.shape) / 2
            kpts_a = np.array([p.pt for p in self.kpts])
            ds = np.linalg.norm([
                kpts_a[m.queryIdx] - kpts_a[m.trainIdx] for m in self.matches
            ],
                                axis=1)

            self.matches = [
                m for i, m in enumerate(self.matches) if ds[i] > min_dist
            ]

            total = len(self.matches)
            progress = QProgressDialog(self.tr('Clustering matches...'),
                                       self.tr('Cancel'), 0, total, self)
            progress.canceled.connect(self.cancel)
            progress.setWindowModality(Qt.WindowModal)

            for i in range(total):
                match0 = self.matches[i]
                d0 = ds[i]

                query0 = match0.queryIdx
                train0 = match0.trainIdx
                group = [match0]

                for j in range(i + 1, total):
                    match1 = self.matches[j]
                    query1 = match1.queryIdx
                    train1 = match1.trainIdx
                    if query1 == train0 and train1 == query0:
                        continue
                    d1 = ds[j]
                    if np.abs(d0 - d1) > min_dist:
                        continue

                    a0 = np.array(self.kpts[query0].pt)
                    b0 = np.array(self.kpts[train0].pt)

                    a1 = np.array(self.kpts[query1].pt)
                    b1 = np.array(self.kpts[train1].pt)

                    aa = np.linalg.norm(a0 - a1)
                    bb = np.linalg.norm(b0 - b1)
                    ab = np.linalg.norm(a0 - b1)
                    ba = np.linalg.norm(b0 - a1)

                    if 0 < aa < min_dist and 0 < bb < min_dist:
                        pass
                    elif 0 < ab < min_dist and 0 < ba < min_dist:
                        pass
                    else:
                        continue

                    for g in group:
                        if g.queryIdx == train1 and g.trainIdx == query1:
                            break
                    else:
                        group.append(match1)

                if len(group) >= cluster:
                    self.clusters.append(group)
                progress.setValue(i)
                if self.canceled:
                    self.canceled = False
                    return
            progress.setValue(total)

        output = np.copy(self.image)
        hsv = np.zeros((1, 1, 3))
        nolines = self.nolines_check.isChecked()
        angles = []
        for c in self.clusters:
            for m in c:
                ka = self.kpts[m.queryIdx]
                pa = tuple(map(int, ka.pt))
                sa = int(np.round(ka.size))
                kb = self.kpts[m.trainIdx]
                pb = tuple(map(int, kb.pt))
                sb = int(np.round(kb.size))
                angle = np.arctan2(pb[1] - pa[1], pb[0] - pa[0])
                if angle < 0:
                    angle += np.pi
                angles.append(angle)
                hsv[0, 0, 0] = angle / np.pi * 180
                hsv[0, 0, 1] = 255
                hsv[0, 0, 2] = m.distance / matching * 255
                rgb = cv.cvtColor(hsv.astype(np.uint8), cv.COLOR_HSV2BGR)
                rgb = tuple([int(x) for x in rgb[0, 0]])
                cv.circle(output, pa, sa, rgb, 1, cv.LINE_AA)
                cv.circle(output, pb, sb, rgb, 1, cv.LINE_AA)
                if not nolines:
                    cv.line(output, pa, pb, rgb, 1, cv.LINE_AA)

        regions = 0
        if angles:
            angles = np.reshape(np.array(angles, dtype=np.float32),
                                (len(angles), 1))
            if np.std(angles) < 0.1:
                regions = 1
            else:
                criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER,
                            10, 1.0)
                attempts = 10
                flags = cv.KMEANS_PP_CENTERS
                compact = [
                    cv.kmeans(angles, k, None, criteria, attempts, flags)[0]
                    for k in range(1, 11)
                ]
                compact = cv.normalize(np.array(compact), None, 0, 1,
                                       cv.NORM_MINMAX)
                regions = np.argmax(compact < 0.005) + 1
        self.viewer.update_processed(output)
        self.process_button.setEnabled(False)
        modify_font(self.status_label, italic=False, bold=True)
        self.status_label.setText(
            self.
            tr('Keypoints: {} --> Filtered: {} --> Matches: {} --> Clusters: {} --> Regions: {}'
               .format(self.total, len(self.kpts), len(self.matches),
                       len(self.clusters), regions)))
        self.info_message.emit(
            self.tr('Copy-Move Forgery = {}'.format(elapsed_time(start))))
コード例 #26
0
def admin_ins():
    # カーソル作成
    conn = psycopg2.connect(DATABASE_URL, sslmode='require')
    # conn = psycopg2.connect(
    # host = "0.0.0.0",
    # port = 5432,
    # database=POSTG_DB,
    # user=POSTG_ID,
    # password=POSTG_PW)
    
    cur = conn.cursor()
    ##############################################
    
    if 'insert_img1' not in request.files:
        flash('ファイルがありません','failed')
        return redirect(request.url)
    fileToUpload = request.files['insert_img1']
                # ファイルのチェック
    if fileToUpload and allowed_file(fileToUpload.filename):
        fileToUpload
    else:
        flash('画像ファイルを入れてください','failed')
        sys.exit(1)

    shutil.rmtree(SAVE_DIR)
    os.mkdir(SAVE_DIR)
    file_url = str(fileToUpload)
    ins_str = file_url.split(" ")
    ins_img_url = ins_str[1][1:-1]
    i = Image.open(fileToUpload)
    save_path = SAVE_DIR +"/" + ins_img_url
    i.save(save_path)
    filename = os.listdir(SAVE_DIR +"/")
    img_size = (200, 200)
    filename1 = SAVE_DIR +"/" + filename[0]
    
    target_img = cv2.imread(filename1, cv2.IMREAD_GRAYSCALE)
    target_img = cv2.resize(target_img, img_size)

    bf = cv2.BFMatcher(cv2.NORM_HAMMING)
    # detector = cv2.ORB_create()
    detector = cv2.AKAZE_create()
    (_, target_des) = detector.detectAndCompute(target_img, None)
    # SQLコマンド実行 (プレースホルダー使用。エスケープも勝手にされる)

    cur.execute("INSERT INTO flask_similar (image_name, image_json_feature) VALUES (%s, %s)", (filename[0], json.dumps({filename[0]:target_des.tolist()})))
    # SQL結果を受け取る
    # コミット
    # カーソル作成
    conn.commit()
    # クローズ
    cur.close()
    conn.close()
    s3 = boto3.resource('s3') #S3オブジェクトを取得

    bucket = s3.Bucket(AWS_STORAGE_BUCKET_NAME)
    print("filename[0]",filename[0])
    bucket.upload_file(filename1, 'actress/' + filename[0])
###################################################

    return render_template("admin.html")
コード例 #27
0
 def __init__(self) :
     self.ratio=0.9
     self.min_match=10
     #self.sift=cv2.xfeatures2d.SIFT_create()
     self.akaze=cv2.AKAZE_create()
     self.smoothing_window_size=800
コード例 #28
0
import cv2
img1 = cv2.imread(r'C:\Users\mueda\Documents\S__41476104.jpg')
img2 = cv2.imread(r'C:\Users\mueda\Documents\S__41476106.jpg')
detector = detector = cv2.AKAZE_create()
kp1, des1 = detector.detectAndCompute(img1, None)
kp2, des2 = detector.detectAndCompute(img2, None)
bf = cv2.BFMatcher()
matches = bf.knnMatch(des1, des2, k=2)
ratio = 0.5
good = []
for m, n in matches:
    if m.distance < ratio * n.distance:
        good.append([m])
img3 = cv2.drawMatchesKnn(img1, kp1, img2, kp2, good[:100], None, flags=2)
cv2.imshow('img', img3)
cv2.waitKey(0)
cv2.destroyAllWindows()
cv2.imwrite('akaze_matching.jpg', img3)
コード例 #29
0
ファイル: imageComp.py プロジェクト: MatchaSlap/mlPractice
 def calcAKAZE(self, img, IMG_SIZE=(200,200)):
     img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
     detector = cv2.AKAZE_create()
     (kp, des) = detector.detectAndCompute(img, None)
     print(type(des),des.shape)
     return des
コード例 #30
0
def main():
    print("----------matching start----------")
    time_start = time.time()
    akaze = cv2.AKAZE_create()
    #akaze = cv2.ORB_create()

    # query = カメラ画像
    # map = マップ

    # gamma補正の関数
    gamma = 1.8
    gamma_cvt = np.zeros((256, 1), dtype='uint8')
    for i in range(256):
        gamma_cvt[i][0] = 255 * (float(i) / 255)**(1.0 / gamma)

    # 画像の拡大,縮小の割合(重要パラメータ)
    expand_query = 0.5
    expand_map = 2

    # クエリ画像を読み込んで特徴量計算
    query_img = cv2.imread('./img/query/img_camera1.png', 0)
    query_img = cv2.LUT(query_img, gamma_cvt)
    # cv2.imwrite('./log/input_img.png', query_img)
    query_img = cv2.resize(query_img, (int(query_img.shape[1] * expand_query),
                                       int(query_img.shape[0] * expand_query)))
    height_query, width_query = query_img.shape[:2]
    kp_query, des_query = akaze.detectAndCompute(query_img, None)
    # print('[time] feature calculation query: ', time.time() - time_start)

    # マップ画像を読み込んで特徴量計算
    map_img = cv2.imread('./img/map/field.png', 0)
    map_img = cv2.resize(map_img, (int(
        map_img.shape[1] * expand_map), int(map_img.shape[0] * expand_map)))
    height_map, width_map = map_img.shape[:2]
    # cv2.imwrite('./log/fig/sample_img.png', sample_img)
    kp_map, des_map = akaze.detectAndCompute(map_img, None)
    # print('[time] feature calculation map: ', time.time() - time_start)

    # 特徴量マッチング実行,k近傍法
    bf = cv2.BFMatcher()
    matches = bf.knnMatch(des_query, des_map, k=2)
    print('[time] feature matching: ', time.time() - time_start)

    # マッチング精度が高いもののみ抽出
    ratio = 0.8  # 重要パラメータ
    good = []
    for m, n in matches:
        if m.distance < ratio * n.distance:
            good.append([m])

    # 対応点が1個以下なら相対関係を求められないのでNoneを返す
    if len(good) <= 1:
        print("[error] can't detect matching feature point")
        return None, None, None

    # 精度が高かったもののうちスコアが高いものから指定個取り出す
    good = sorted(good, key=lambda x: x[0].distance)
    print("valid point number: ",
          len(good))  # これがあまりに多すぎたり少なすぎたりする場合はパラメータを変える
    point_num = 20  # 上位何個の点をマッチングに使うか(重要パラメータ)
    if len(good) < point_num:
        point_num = len(good)  # もし20個なかったら全て使う

    # マッチング結果の描画
    result_img = cv2.drawMatchesKnn(query_img,
                                    kp_query,
                                    map_img,
                                    kp_map,
                                    good[:point_num],
                                    None,
                                    flags=0)
    img_matching = cv2.cvtColor(result_img, cv2.COLOR_BGR2RGB)
    plt.imshow(img_matching)
    plt.show()
    print('[time] draw mid result: ', time.time() - time_start)

    #------ これ以降位置と向きの計算 -------
    query_kp = []
    map_kp = []

    # 2つの画像で対応するキーポイントを抽出
    for p in good[:point_num]:
        query_kp.append(kp_query[p[0].queryIdx])
        map_kp.append(kp_map[p[0].trainIdx])

    # 投票によって2画像の相対角度,相対比率,もっとも一致度の高い点のペアが計算される
    deg_value, size_rate, m1, m2 = vote_point(query_kp, map_kp, point_num)
    if deg_value is None:
        return None, None, None

    # print(f"calcurated deg: {deg_value}, size_rate: {size_rate}")
    # print(f"two matching point index: {m1}, {m2}")
    # print('[time] point calculation: ', time.time() - time_start)

    # クエリ画像の1点目とクエリ画像の中心の相対的な関係
    q_x1, q_y1 = query_kp[m1].pt
    m_x1, m_y1 = map_kp[m1].pt
    q_xcenter = int(width_query / 2)
    q_ycenter = int(height_query / 2)
    q_center_deg = math.atan2(q_ycenter - q_y1,
                              q_xcenter - q_x1) * 180 / math.pi
    q_center_len = math.sqrt((q_xcenter - q_x1)**2 + (q_ycenter - q_y1)**2)
    #print(q_xcenter, q_ycenter, q_center_deg, q_center_len)

    # 上の関係をマップ画像上のパラメータに変換
    m_center_deg = q_center_deg - deg_value
    m_center_len = q_center_len / size_rate
    #print(t_center_deg, t_center_len)

    # 中心点のマップ画像上での位置
    m_center_rad = m_center_deg * math.pi / 180
    m_xcenter = m_x1 + m_center_len * math.cos(m_center_rad)
    m_ycenter = m_y1 + m_center_len * math.sin(m_center_rad)
    # print(m_center_rad, math.cos(m_center_rad), math.sin(m_center_rad), m_xcenter, m_ycenter)

    # 算出された値が正しい座標範囲に入っているかどうか
    if (m_xcenter < 0) or (m_xcenter > width_map):
        print("[error] invalid x value")
        return None, None, None
    if (m_ycenter < 0) or (m_ycenter > height_map):
        print("[error] invalid y value")
        return None, None, None
    if (deg_value < 0) or (deg_value > 360):
        print("[error] invalid deg value")
        return None, None, None

    x_current = int(m_xcenter / expand_map)
    y_current = int(m_ycenter / expand_map)
    drc_current = deg_value

    print('*****detection scceeded!*****')
    print('[time] final time: {:.4f} (s)'.format(time.time() - time_start))
    print("final output score-> x: {}, y: {}, drc: {:.2f}°".format(
        x_current, y_current, drc_current))

    # 中心点描画
    draw_final(result_img, m_xcenter, m_ycenter, deg_value, width_query)
    return x_current, y_current, drc_current