Exemplo n.º 1
0
def load_merton_data(data_path='./MertonCollege', show_detail=False):
    """
    https://www.robots.ox.ac.uk/~vgg/data/mview/の"Merton College I"データを読み込む
    Args:
        data_path (str):データを展開したディレクトリのpath。image,2D,3Dがある前提
    Returns:
        corr:ある717点ある3次元上の点が、2D/001.cornes内で示されたのどの2D上の点に対応しているか。
            indexを表す。
    """
    # 画像を読み込む
    im1 = ip.imread(os.path.join(data_path, 'image/001.jpg'))
    im2 = ip.imread(os.path.join(data_path, 'image/002.jpg'))
    if (show_detail):
        ip.show_img(im1)
        ip.show_img(im2)

    # 画像上の2Dの点をリストに読み込む
    points2D = []
    for i in range(3):
        file_path = os.path.join(data_path, '2D/{:0=3}.corners'.format(i + 1))
        points2D.append(np.loadtxt(file_path).T)

    # 3Dの点を読み込む
    points3D = np.loadtxt(os.path.join(data_path, '3D/p3d')).T

    # 対応関係を読み込む
    corr = np.genfromtxt(os.path.join(data_path, '2D/nview-corners'),
                         dtype='int',
                         missing_values='*')

    # カメラパラメーラをCameraオブジェクトに読み込む
    P = []
    for i in range(3):
        istrct_p = np.loadtxt('MertonCollege/2D/{:0=3}.P'.format(i + 1))
        P.append(camera.Camera(istrct_p))

    return im1, im2, points2D, points3D, corr, P
Exemplo n.º 2
0
    p.append([c[0] - wid, c[1] - wid, c[2] + wid])  # 描画を閉じるため第一点と同じ
    # 垂直の辺
    p.append([c[0] - wid, c[1] - wid, c[2] + wid])
    p.append([c[0] - wid, c[1] + wid, c[2] + wid])
    p.append([c[0] - wid, c[1] + wid, c[2] - wid])
    p.append([c[0] + wid, c[1] + wid, c[2] - wid])
    p.append([c[0] + wid, c[1] + wid, c[2] + wid])
    p.append([c[0] + wid, c[1] - wid, c[2] + wid])
    p.append([c[0] + wid, c[1] - wid, c[2] - wid])
    return np.array(p).T


cap_file.get(cv2.CAP_PROP_FRAME_COUNT)

# +
img_query = ip.imread('CalibrationImage/query.JPG')
#画像を縮小
img_query = img_query[900:2400, 2200:3700, :]
ip.show_img(img_query, show_axis=True)

K = camera.calculate_camera_matrix_w_sz(sz=(6000, 4000), lens='PZ')
K[0, 2] = img_query.shape[1] / 2
K[1, 2] = img_query.shape[0] / 2

cap_file = cv2.VideoCapture('./CalibrationImage/material.MP4')
print(type(cap_file))
print("suceed open video:", cap_file.isOpened())
width = int(cap_file.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap_file.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap_file.get(cv2.CAP_PROP_FPS)
total_frame = int(cap_file.get(cv2.CAP_PROP_FRAME_COUNT))
Exemplo n.º 3
0
print(im1.shape[0] / 2, im1.shape[1] / 2)

camera.calculate_camera_matrix_w_sz(im1.shape[1::-1],
                                    sz_orig=(932 * 2, 628 * 2),
                                    lens=None,
                                    f_orig=(2394, 2398))

# +
# %matplotlib inline
show_detail = True

K = np.array([[2394, 0, 932], [0, 2398, 628], [0, 0, 1]])

# 画像を読み込み特徴点を計算する
im1 = ip.imread('./Carl Olsson/Alcatraz_courtyard/San_Francisco_2313.jpg')
im2 = ip.imread('./Carl Olsson/Alcatraz_courtyard/San_Francisco_2314.jpg')

ip.show_img(im1)
ip.show_img(im2)
plt.show()

# K = camera.calculate_camera_matrix_w_sz(
#     im1.shape[1::-1], sz_orig=(932*2, 628*2), lens=None, f_orig=(2394, 2398))

# Initiate AKAZE detector
akaze = cv2.AKAZE_create()

# key pointとdescriptorを計算
kp1, des1 = akaze.detectAndCompute(im1, None)
kp2, des2 = akaze.detectAndCompute(im2, None)
Exemplo n.º 4
0
import numpy as np
import matplotlib.pyplot as plt
import cv2
#自作モジュール
import camera
import image_processing as ip
import desc_val as dv
import homography
import sfm

# +
# %matplotlib inline
show_detail = True
ratio = 0.5
#画像を読み込み特徴点を計算する
im1 = ip.imread('./CalibrationImage/sfm_005.JPG')
#画像を縮小
im1 = cv2.resize(im1, None, fx=ratio, fy=ratio, interpolation=cv2.INTER_AREA)

im2 = ip.imread('./CalibrationImage/sfm_006.JPG')
#画像を縮小
im2 = cv2.resize(im2, None, fx=ratio, fy=ratio, interpolation=cv2.INTER_AREA)

ip.show_img(im1, show_axis=True)
ip.show_img(im2, show_axis=True)
plt.show()

# +
# K = np.array([[2394,0,932],[0,2398,628],[0,0,1]])
K = camera.calculate_camera_matrix_w_sz(im1.shape[1::-1], lens='PZ')
Exemplo n.º 5
0
import numpy as np
import cv2
import matplotlib.pyplot as plt

#自作モジュール
import image_processing as ip
# import desc_val as dv
# import camera
# import feature_detection as fd
# import homography

# +
ratio=0.3

img_query=ip.imread('CalibrationImage/FeatureDetection00001.JPG')
#画像を縮小
img_query=cv2.resize(img_query,None,fx=ratio,fy=ratio,interpolation=cv2.INTER_AREA)
ip.show_img(img_query,show_axis=True)

img_train=ip.imread('CalibrationImage/FeatureDetection00003.JPG')
#画像を縮小
img_train=cv2.resize(img_train,None,fx=ratio,fy=ratio,interpolation=cv2.INTER_AREA)
ip.show_img(img_train,show_axis=True)

# +
# Initiate AKAZE detector
akaze = cv2.AKAZE_create()

# key pointとdescriptorを計算
kp1, des1 = akaze.detectAndCompute(img_query, None)
Exemplo n.º 6
0
import feature_detection as fd
import homography


def expand(image, ratio):
    h, w = image.shape[:2]
    src = np.array([[0.0, 0.0],[0.0, 1.0],[1.0, 0.0]], np.float32)
    dest = src * ratio
    affine = cv2.getAffineTransform(src, dest)
    return cv2.warpAffine(image, affine, (2*w, 2*h), cv2.INTER_LANCZOS4) # 補間法も指定できる


# +
ratio=0.3

img_for_camera_calibration=ip.imread('CalibrationImage/FeatureDetection00001.JPG')
im1=expand(img_for_camera_calibration,ratio)
im1=im1[:int(4000*ratio),:int(6000*ratio)]
ip.show_img(im1,show_axis=True)

img_math_test=ip.imread('CalibrationImage/FeatureDetection00003.JPG')
im2=expand(img_math_test,ratio)
im2=im2[:int(4000*ratio),:int(6000*ratio)]
ip.show_img(im2,show_axis=True)
# + {}
akaze = cv2.AKAZE_create()

kp1, des1 = akaze.detectAndCompute(im1,None)
kp2, des2 = akaze.detectAndCompute(im2,None)

# Brute-Force Matcher生成
#     name: python3
# ---

import numpy as np
import cv2
import matplotlib.pyplot as plt

#自作モジュール
import image_processing as ip
import camera
import homography

# +
ratio=0.3

img_query=ip.imread('CalibrationImage/FeatureDetection00001.JPG')
#画像を縮小
img_query=cv2.resize(img_query,None,fx=ratio,fy=ratio,interpolation=cv2.INTER_AREA)
ip.show_img(img_query,show_axis=True)

img_train=ip.imread('CalibrationImage/FeatureDetection00003.JPG')
#画像を縮小
img_train=cv2.resize(img_train,None,fx=ratio,fy=ratio,interpolation=cv2.INTER_AREA)
ip.show_img(img_train,show_axis=True)
# -

homology_matrix, mask=homography.compute_rasac_homology(img_query,img_train,show_detail=True)


def cube_points(c,wid):
    """
import numpy as np
import cv2
import matplotlib.pyplot as plt

#自作モジュール
import image_processing as ip
import desc_val as dv
import camera
import feature_detection as fd
import homography

# +
ratio = 0.3

img_for_camera_calibration = ip.imread(
    'CalibrationImage/ImageforCameraCalibration.jpg')
#画像を縮小
im1 = cv2.resize(img_for_camera_calibration,
                 None,
                 fx=ratio,
                 fy=ratio,
                 interpolation=cv2.INTER_AREA)
ip.show_img(im1, show_axis=True)
# -

#本の左上にプロットしてみる
img_for_edit = img_for_camera_calibration.copy()
cv2.circle(img_for_edit, (2789, 1140), 50, (255, 255, 255), thickness=-1)
ip.show_img(img_for_edit, show_axis=True)

# 写真上での四隅の座標