def plot_matches(im1, im2, locs1, locs2, match_indices, show_below=True, figsize=(6, 4)): '''対応点を線で結んで画像を表示する''' im3 = concatenate_img_horiz(im1, im2) if show_below: im3 = np.vstack((im3, im3)) mip.show_img(im3, figsize=figsize) width1 = im1.shape[1] for i, m in enumerate(match_indices): if m >= 0: plt.plot([locs1[i][1], locs2[m][1] + width1], [locs1[i][0], locs2[m][0]], 'c') plt.axis('off')
def load_merton_data(data_path='./MertonCollege', show_detail=False): """ https://www.robots.ox.ac.uk/~vgg/data/mview/の"Merton College I"データを読み込む Args: data_path (str):データを展開したディレクトリのpath。image,2D,3Dがある前提 Returns: corr:ある717点ある3次元上の点が、2D/001.cornes内で示されたのどの2D上の点に対応しているか。 indexを表す。 """ # 画像を読み込む im1 = ip.imread(os.path.join(data_path, 'image/001.jpg')) im2 = ip.imread(os.path.join(data_path, 'image/002.jpg')) if (show_detail): ip.show_img(im1) ip.show_img(im2) # 画像上の2Dの点をリストに読み込む points2D = [] for i in range(3): file_path = os.path.join(data_path, '2D/{:0=3}.corners'.format(i + 1)) points2D.append(np.loadtxt(file_path).T) # 3Dの点を読み込む points3D = np.loadtxt(os.path.join(data_path, '3D/p3d')).T # 対応関係を読み込む corr = np.genfromtxt(os.path.join(data_path, '2D/nview-corners'), dtype='int', missing_values='*') # カメラパラメーラをCameraオブジェクトに読み込む P = [] for i in range(3): istrct_p = np.loadtxt('MertonCollege/2D/{:0=3}.P'.format(i + 1)) P.append(camera.Camera(istrct_p)) return im1, im2, points2D, points3D, corr, P
p.append([c[0] - wid, c[1] + wid, c[2] + wid]) p.append([c[0] - wid, c[1] + wid, c[2] - wid]) p.append([c[0] + wid, c[1] + wid, c[2] - wid]) p.append([c[0] + wid, c[1] + wid, c[2] + wid]) p.append([c[0] + wid, c[1] - wid, c[2] + wid]) p.append([c[0] + wid, c[1] - wid, c[2] - wid]) return np.array(p).T cap_file.get(cv2.CAP_PROP_FRAME_COUNT) # + img_query = ip.imread('CalibrationImage/query.JPG') #画像を縮小 img_query = img_query[900:2400, 2200:3700, :] ip.show_img(img_query, show_axis=True) K = camera.calculate_camera_matrix_w_sz(sz=(6000, 4000), lens='PZ') K[0, 2] = img_query.shape[1] / 2 K[1, 2] = img_query.shape[0] / 2 cap_file = cv2.VideoCapture('./CalibrationImage/material.MP4') print(type(cap_file)) print("suceed open video:", cap_file.isOpened()) width = int(cap_file.get(cv2.CAP_PROP_FRAME_WIDTH)) height = int(cap_file.get(cv2.CAP_PROP_FRAME_HEIGHT)) fps = cap_file.get(cv2.CAP_PROP_FPS) total_frame = int(cap_file.get(cv2.CAP_PROP_FRAME_COUNT)) print(cap_file.set(cv2.CAP_PROP_POS_FRAMES, 0)) # VideoWriter を作成する。
import image_processing as ip import desc_val as dv import homography import sfm im1, im2, points2D, points3D, corr, P = sfm.load_merton_data(show_detail=True) #3Dの点を同次座標系にして射影する X = homography.make_homog(points3D) x = P[0].project(X) points2D[0] # + #画像1の上に点を描写する ip.show_img(im1, figsize=(6, 4)) plt.plot(points2D[0][0], points2D[0][1], '*') plt.show() ip.show_img(im1, figsize=(6, 4)) plt.plot(x[0], x[1], '*') plt.show() # - from mpl_toolkits.mplot3d import Axes3D from mpl_toolkits.mplot3d import axes3d import matplotlib print(matplotlib.__version__) # +
import sfm # + # %matplotlib inline show_detail = True ratio = 0.5 #画像を読み込み特徴点を計算する im1 = ip.imread('./CalibrationImage/sfm_005.JPG') #画像を縮小 im1 = cv2.resize(im1, None, fx=ratio, fy=ratio, interpolation=cv2.INTER_AREA) im2 = ip.imread('./CalibrationImage/sfm_006.JPG') #画像を縮小 im2 = cv2.resize(im2, None, fx=ratio, fy=ratio, interpolation=cv2.INTER_AREA) ip.show_img(im1, show_axis=True) ip.show_img(im2, show_axis=True) plt.show() # + # K = np.array([[2394,0,932],[0,2398,628],[0,0,1]]) K = camera.calculate_camera_matrix_w_sz(im1.shape[1::-1], lens='PZ') # Initiate AKAZE detector akaze = cv2.AKAZE_create() # key pointとdescriptorを計算 kp1, des1 = akaze.detectAndCompute(im1, None) kp2, des2 = akaze.detectAndCompute(im2, None) # matcherとしてflannを使用。
import matplotlib.pyplot as plt #自作モジュール import image_processing as ip # import desc_val as dv # import camera # import feature_detection as fd # import homography # + ratio=0.3 img_query=ip.imread('CalibrationImage/FeatureDetection00001.JPG') #画像を縮小 img_query=cv2.resize(img_query,None,fx=ratio,fy=ratio,interpolation=cv2.INTER_AREA) ip.show_img(img_query,show_axis=True) img_train=ip.imread('CalibrationImage/FeatureDetection00003.JPG') #画像を縮小 img_train=cv2.resize(img_train,None,fx=ratio,fy=ratio,interpolation=cv2.INTER_AREA) ip.show_img(img_train,show_axis=True) # + # Initiate AKAZE detector akaze = cv2.AKAZE_create() # key pointとdescriptorを計算 kp1, des1 = akaze.detectAndCompute(img_query, None) kp2, des2 = akaze.detectAndCompute(img_train, None) #matcherとしてflannを使用。
def expand(image, ratio): h, w = image.shape[:2] src = np.array([[0.0, 0.0],[0.0, 1.0],[1.0, 0.0]], np.float32) dest = src * ratio affine = cv2.getAffineTransform(src, dest) return cv2.warpAffine(image, affine, (2*w, 2*h), cv2.INTER_LANCZOS4) # 補間法も指定できる # + ratio=0.3 img_for_camera_calibration=ip.imread('CalibrationImage/FeatureDetection00001.JPG') im1=expand(img_for_camera_calibration,ratio) im1=im1[:int(4000*ratio),:int(6000*ratio)] ip.show_img(im1,show_axis=True) img_math_test=ip.imread('CalibrationImage/FeatureDetection00003.JPG') im2=expand(img_math_test,ratio) im2=im2[:int(4000*ratio),:int(6000*ratio)] ip.show_img(im2,show_axis=True) # + {} akaze = cv2.AKAZE_create() kp1, des1 = akaze.detectAndCompute(im1,None) kp2, des2 = akaze.detectAndCompute(im2,None) # Brute-Force Matcher生成 # create BFMatcher object bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True) matches = bf.match(des1, des2)
def compute_rasac_homology(img_query_orig, img_train_orig, MIN_MATCH_COUNT=10, show_detail=False, save_result=False): """ query画像とtrain画像についてakazeでマッチングし、 ransacによって外れ値を除去してHomology行列を算出する。 Args: MIN_MATCH_COUNT (int):mathesの数の最小値。これ以下だとHomologyを計算しない """ img_query = img_query_orig.copy() img_train = img_train_orig.copy() # Initiate AKAZE detector akaze = cv2.AKAZE_create() # key pointとdescriptorを計算 kp1, des1 = akaze.detectAndCompute(img_query, None) kp2, des2 = akaze.detectAndCompute(img_train, None) # matcherとしてflannを使用。 # FLANN parameters FLANN_INDEX_LSH = 6 index_params = dict(algorithm=FLANN_INDEX_LSH, table_number=6, key_size=12, multi_probe_level=1) search_params = dict(checks=50) # ANNで近傍2位までを出力 flann = cv2.FlannBasedMatcher(index_params, search_params) matches = flann.knnMatch(des1, des2, k=2) # store all the good matches as per Lowe's ratio test. # 2番めに近かったkey pointと差があるものをいいkey pointとする。 good_matches = [] for i in range(len(matches)): if (len(matches[i]) < 2): continue m, n = matches[i] if m.distance < 0.7 * n.distance: good_matches.append(m) # descriptorの距離が近かったもの順に並び替え good_matches = sorted(good_matches, key=lambda x: x.distance) if (show_detail): # 結果を描写 img_result = cv2.drawMatches(img_query, kp1, img_train, kp2, good_matches[:10], None, flags=2) ip.show_img(img_result, figsize=(20, 30)) print('queryのkp:{}個、trainのkp:{}個、good matchesは:{}個'.format( len(kp1), len(kp2), len(good_matches))) # ransacによって外れ値を除去してHomology行列を算出する。 # opencvの座標は3次元のarrayで表さなければならないのに注意 if len(good_matches) > MIN_MATCH_COUNT: # matching点の座標を取り出す src_pts = np.float32([kp1[m.queryIdx].pt for m in good_matches]).reshape(-1, 1, 2) dst_pts = np.float32([kp2[m.trainIdx].pt for m in good_matches]).reshape(-1, 1, 2) # ransacによって外れ値を除去 homology_matrix, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0) else: print("Not enough matches are found - %d/%d" % (len(good_matches), MIN_MATCH_COUNT)) matchesMask = None return None, None if (show_detail or save_result): # 結果を描写 matchesMask = mask.ravel().tolist() # query画像の高さ、幅を取得し、query画像を囲う長方形の座標を取得し、 # それを算出された変換行列homology_matrixで変換する # 変換した長方形をtrain画像に描写 h, w = img_query.shape[:2] pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]).reshape(-1, 1, 2) dst = cv2.perspectiveTransform(pts, homology_matrix) cv2.polylines(img_train, [np.int32(dst)], True, (255, 100, 0), 3, cv2.LINE_AA) num_draw = 50 draw_params = dict( # matchColor = (0,255,0), # draw matches in green color singlePointColor=None, matchesMask=matchesMask[:num_draw], # draw only inliers flags=2) img_result_2 = cv2.drawMatches(img_query, kp1, img_train, kp2, good_matches[:num_draw], None, **draw_params) if (show_detail): ip.show_img(img_result_2, figsize=(20, 30)) num_inlier = (mask == 1).sum() print('inlier:%d個' % num_inlier) if (save_result): ip.imwrite('ransac_match.jpg', img_result_2) return homology_matrix, mask
import camera import feature_detection as fd import homography # + ratio = 0.3 img_for_camera_calibration = ip.imread( 'CalibrationImage/ImageforCameraCalibration.jpg') #画像を縮小 im1 = cv2.resize(img_for_camera_calibration, None, fx=ratio, fy=ratio, interpolation=cv2.INTER_AREA) ip.show_img(im1, show_axis=True) # - #本の左上にプロットしてみる img_for_edit = img_for_camera_calibration.copy() cv2.circle(img_for_edit, (2789, 1140), 50, (255, 255, 255), thickness=-1) ip.show_img(img_for_edit, show_axis=True) # 写真上での四隅の座標 # (左上の隅からのx座標、y座標) # 左上(2789,1140) # 右上(3910,1135) # 左下(2782,2685) # 右下(3902,2693) #左上-左下→縦の長さ