Exemplo n.º 1
0
def undistort_img(img, K, distortion, dist_std, random_dist=False):
    '''Undistorts and displays a given image'''

    if random_dist:
        dist = np.random.normal(distortion, dist_std)
    else:
        dist = distortion

    h, w = img.shape[:2]
    newK, roi = cv.getOptimalNewCameraMatrix(K, dist, (w, h), 1, (w, h))

    #Undistort
    undistorted = cv.undistort(img, K, dist, None, newK)

    #Crop
    x, y, w, h = roi
    undistorted = undistorted[y:y + h, x:x + w]
    # undist_resized = cv.resize(undistorted, (1440, 960))

    #Display undistorted image
    # cv.imshow('undistorted', undist_resized)
    # cv.waitKey(-1)

    # cv.destroyAllWindows()

    return undistorted
Exemplo n.º 2
0
def calibrateOne(img_path, cameraParam):
    criteria = (cv2.TERM_CRITERIA_MAX_ITER | cv2.TERM_CRITERIA_EPS, 30, 0.001)
    #设置寻找亚像素角点的参数,采用的停止准则是最大循环次数30和最大误差容限0.001
    #获取标定板角点的位置
    objp = np.zeros((4 * 7, 3), np.float32)
    objp[:, :2] = np.mgrid[0:7, 0:4].T.reshape(-1, 2)  #将世界坐标系建在标定板上

    obj_points = []  #3D points
    img_points = []  #2D points

    images = glob.glob(img_path)
    i = 0
    for fname in images:
        img = cv2.imread(fname)
        #print(fname)
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        size = gray.shape[::-1]
        h, w = gray.shape[:2]
        ret, corners = cv2.findChessboardCorners(gray, (7, 4), None)
        if ret:
            obj_points.append(objp)

            corners2 = cv2.cornerSubPix(gray, corners, (5, 5), (-1, -1),
                                        criteria)
            #在原角点的基础上寻找亚像素点
            #print(corners2)
            if [corners2]:
                img_points.append(corners2)
            else:
                img_points.append(corners)
            cv2.drawChessboardCorners(img, (7, 4), corners, ret)
            i += 1

    ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(obj_points, img_points,
                                                       size, None, None,
                                                       criteria)
    #dist——>dist coffients(就是k、p的值)

    newcameramtx, roi = cv2.getOptimalNewCameraMatrix(
        mtx, dist, (w, h), 1, (w, h))  #显示更大范围的图片(正常重映射之后会删掉一部分图像)
    #将fx,fy(世界坐标系的焦距)投射到fu,fv(图像坐标系的焦距)上,立体矫正时会用
    #dst = cv2.undistort(img,mtx,dist,None,newcameramtx)
    #x,y,w,h = roi

    cameraParam['ret'] = ret
    cameraParam['mtx'] = mtx
    cameraParam['dist'] = dist
    cameraParam['rvecs'] = rvecs
    cameraParam['tvecs'] = tvecs
    cameraParam['img_points'] = img_points
    cameraParam['obj_points'] = obj_points
    cameraParam['size'] = size
    cameraParam['newmtx'] = newcameramtx
    print("Calibrate One Camera Success")
Exemplo n.º 3
0
def calculate_camera_matrix_and_distortion_coefficients(camera_model_name):
    # termination criteria
    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)

    # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(7,5,0)
    object_points = np.zeros((6 * 8, 3), np.float32)
    object_points[:, :2] = np.mgrid[0:8, 0:6].T.reshape(-1, 2)

    # Arrays to store object points and image points from all the images.
    real_world_points = []  # 3d point in real world space
    image_points = []  # 2d points in image plane.

    image_paths = glob.glob("calibration_images/*.png")

    for path_to_image in image_paths:
        img = cv2.imread(path_to_image)
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

        # Find the chess board corners
        chess_board_successfully_detected, corners = cv2.findChessboardCorners(
            gray, (8, 6), None)
        print("Found chess board in: " + path_to_image + " : " +
              str(chess_board_successfully_detected))

        # If found, add object points, image points (after refining them)
        if chess_board_successfully_detected:
            real_world_points.append(object_points)

            refined_corners = cv2.cornerSubPix(gray, corners, (11, 11),
                                               (-1, -1), criteria)
            image_points.append(refined_corners)

            # Draw and display the corners
            # img = cv2.drawChessboardCorners(img, (7, 6), refined_corners, chess_board_successfully_detected)
            # cv2.imshow('img', resize_image(img, (1920, 1080)))
            # cv2.waitKey()

    cv2.destroyAllWindows()

    ret, camera_matrix, distortion_coeffs, rvecs, tvecs = cv2.calibrateCamera(
        real_world_points, image_points, gray.shape[::-1], None, None)

    h, w = gray.shape[:2]
    camera_matrix_with_crop, roi = cv2.getOptimalNewCameraMatrix(
        camera_matrix, distortion_coeffs, (w, h), 1, (w, h))

    file_name = f"camera_calibration_{camera_model_name}.npz"
    np.savez(file_name,
             camera_matrix=camera_matrix,
             distortion_coeffs=distortion_coeffs,
             camera_matrix_with_crop=camera_matrix_with_crop)
Exemplo n.º 4
0
def undistortion(img, mtx, dist):
    h, w = img.shape[:2]
    newcameramtx, roi = cv2.getOptimalNewCameraMatrix(mtx, dist, (w, h), 1,
                                                      (w, h))

    print('roi ', roi)

    dst = cv2.undistort(img, mtx, dist, None, newcameramtx)

    # crop the image
    x, y, w, h = roi
    if roi != (0, 0, 0, 0):
        dst = dst[y:y + h, x:x + w]

    return dst
        cv2.imshow('findCorners',img)
        cv2.waitKey(200)
cv2.destroyAllWindows()
#%% 标定
print('正在计算')
#标定
ret, mtx, dist, rvecs, tvecs = \
    cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)


print("ret:",ret  )
print("mtx:\n",mtx)      # 内参数矩阵
print("dist畸变值:\n",dist   )   # 畸变系数   distortion cofficients = (k_1,k_2,p_1,p_2,k_3)
print("rvecs旋转(向量)外参:\n",rvecs)   # 旋转向量  # 外参数
print("tvecs平移(向量)外参:\n",tvecs  )  # 平移向量  # 外参数
newcameramtx, roi = cv2.getOptimalNewCameraMatrix(mtx, dist, (u, v), 0, (u, v))
print('newcameramtx外参',newcameramtx)
#打开摄像机
camera=cv2.VideoCapture(0)
while True:
    (grabbed,frame)=camera.read()
    h1, w1 = frame.shape[:2]
    newcameramtx, roi = cv2.getOptimalNewCameraMatrix(mtx, dist, (u, v), 0, (u, v))
    # 纠正畸变
    dst1 = cv2.undistort(frame, mtx, dist, None, newcameramtx)
    #dst2 = cv2.undistort(frame, mtx, dist, None, newcameramtx)
    mapx,mapy=cv2.initUndistortRectifyMap(mtx,dist,None,newcameramtx,(w1,h1),5)
    dst2=cv2.remap(frame,mapx,mapy,cv2.INTER_LINEAR)
    # 裁剪图像,输出纠正畸变以后的图片
    x, y, w1, h1 = roi
    dst1 = dst1[y:y + h1, x:x + w1]
Exemplo n.º 6
0
                    cv2.cornerSubPix(ChessImaR, cornersR, (11, 11), (-1, -1),
                                     criteria)
                    cv2.cornerSubPix(ChessImaL, cornersL, (11, 11), (-1, -1),
                                     criteria)
                    imgpointsR.append(cornersR)
                    imgpointsL.append(cornersL)
                break
            elif (cv2.waitKey(2) & 0xFF == ord('q')):
                break
    cv2.destroyAllWindows()
    # Determine the new values for different parameters
    #   Right Side
    retR, mtxR, distR, rvecsR, tvecsR = cv2.calibrateCamera(
        objpoints, imgpointsR, ChessImaR.shape[::-1], None, None)
    hR, wR = ChessImaR.shape[:2]
    OmtxR, roiR = cv2.getOptimalNewCameraMatrix(mtxR, distR, (wR, hR), 1,
                                                (wR, hR))

    #   Left Side
    retL, mtxL, distL, rvecsL, tvecsL = cv2.calibrateCamera(
        objpoints, imgpointsL, ChessImaL.shape[::-1], None, None)
    hL, wL = ChessImaL.shape[:2]
    OmtxL, roiL = cv2.getOptimalNewCameraMatrix(mtxL, distL, (wL, hL), 1,
                                                (wL, hL))

    print('Cameras Ready to use')
    print("Camera calibration value: " + str(retL) + " and " + str(retR))

    # ********************************************
    # ***** Calibrate the Cameras for Stereo *****
    # ********************************************
                    objpoints.append(objp)
                    cv2.cornerSubPix(ChessImaL, cornersL, (11, 11), (-1, -1),
                                     criteria)
                    imgpointsL.append(cornersL)
                break
            elif (cv2.waitKey(2) & 0xFF == ord('q')):
                break
    cv2.destroyAllWindows()
    # Determine the new values for different parameters
    #   Right Side

    #   Left Side
    retL, mtxL, distL, rvecsL, tvecsL = cv2.calibrateCamera(
        objpoints, imgpointsL, ChessImaL.shape[::-1], None, None)
    hL, wL = ChessImaL.shape[:2]
    OmtxL, roiL = cv2.getOptimalNewCameraMatrix(mtxL, distL, (wL, hL), 1,
                                                (wL, hL))

    print('Cameras Ready to use')
    print("Camera calibration value: " + str(retL))

    # ********************************************
    # ***** Calibrate the Cameras for Stereo *****
    # ********************************************

    print("Stereo Camera calibration value: " + str(retL))
    log = SaveSingleCalibration(destination=DESTINATION_FILE,
                                mtxL=mtxL,
                                disL=distL)
    print(log)
Exemplo n.º 8
0
def calibrate():
    #棋盘角点数col*row
    col = 13
    row = 6

    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)

    objp = np.zeros((row * col, 3), np.float32)
    # 用于标定的棋盘每个方格边长为22mm
    objp[:, :2] = 22*np.mgrid[0:col, 0:row].T.reshape(-1, 2)

    objpoints = []  # 世界坐标系下的点坐标
    imgpoints = []  # 像素平面坐标系下的点坐标
    print("请选择标定用到的照片所在的文件夹", "\n")

    root = tkinter.Tk()
    root.withdraw()

    global path  # 用于标定的照片所在目录
    path = tkinter.filedialog.askdirectory(
        title="选择标定用到的照片所在的文件夹")  # 选择标定用到的照片所在的文件夹
    images = glob.glob(path+"/*.jpg")
    found = 0  # 记录用于标定的图像数目
    for k, fname in enumerate(images):
        img = cv2.imread(fname)

        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

        ret, corners = cv2.findChessboardCorners(gray, (col, row), None)
        # 角点检测

        if ret is True:
            print("读取", fname)
            objpoints.append(objp)

            
            # 角点检测精度会影响标定的精度
            corners2 = cv2.cornerSubPix(
                gray, corners, (11, 11), (-1, -1), criteria)#亚像素角点位置
            # corners2=corners
            #_,corners2=cv2.find4QuadCornerSubpix(gray, corners, (11, 11))

            imgpoints.append(corners2)
            img = cv2.drawChessboardCorners(img, (col, row), corners2, ret)#标记角点
            found += 1
            if len(images) < 16:  # 图片过多时,不在UI中展示,避免弹窗过多
                cv2.namedWindow('press any key to continue', cv2.WINDOW_NORMAL)
                cv2.imshow('press any key to continue', img)
                cv2.waitKey(0)

            #image_name = path2 + "//corner"+str(found) + '.png'
            #cv2.imwrite(image_name, img)
            #存储已标出角点的照片
            
    global path2  # 存放结果的目录(含记录相机参数的文件,和畸变矫正后的照片,3-D box照片)
    path2 = tkinter.filedialog.askdirectory(
        title="选择结果存放的文件夹(应与用于标定的照片所在的文件夹不同)")  # 选择结果存放的文件夹

    print("Number of images used for calibration: ", found)

    # 相机标定
    ret2, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints,
                                                        gray.shape[::-1], None, None)

    print("reprojection error:", ret2)
    print("内参矩阵:", mtx)
    print("畸变系数:", dist)
    print("旋转向量:", rvecs)
    print("平移向量:", tvecs)

    images = glob.glob(path+"//*.jpg")
    for i, fname in enumerate(images):
        img = cv2.imread(fname)
        if img is None:
            continue
        h, w = img.shape[:2]
        newcameramtx, roi = cv2.getOptimalNewCameraMatrix(mtx, dist, (w, h), 1,
                                                            (w, h))
        dst = cv2.undistort(img, mtx, dist, None, newcameramtx)  # 矫正畸变

        x, y, w, h = roi
        dst = dst[y:y + h, x:x + w]#裁剪
        outpath = path2+"//tianyi_gao_undistorted" + str(i + 1) + ".jpg"
        cv2.imwrite(outpath, dst)
    print("新内参矩阵:", newcameramtx)
    
    data = {
        'camera_matrix': np.asarray(mtx).tolist(),
        'dist_coeff': np.asarray(dist).tolist(),
        'new_camera_matrix': np.asarray(newcameramtx).tolist(),
        'rvecs': np.asarray(rvecs).tolist(),
        'tvecs': np.asarray(tvecs).tolist(),
        'reprojection_error': np.asarray(ret2).tolist()
    }
    # 存储相机参数(yaml)
    with open(path2+"//calibration_parameters.yaml", "w") as f:
        yaml.dump(data, f)
    # 存储相机参数(txt)
    with open(path2+"//tianyi_gao_cam.txt", "w") as f2:
        name = list(data.keys())
        value = list(data.values())
        for i in range(len(name)):
            f2.write(name[i] + ":" + "\n" + str(value[i]) + "\n")

    print('Calibrate Done')
    cv2.destroyAllWindows()
    return mtx, dist, rvecs, tvecs, ret2, path2
Exemplo n.º 9
0
    gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
    # Find the chess board corners
    ret, corners = cv.findChessboardCorners(gray, (11, 8), None)
    # If found, add object points, image points (after refining them)
    if ret == True:
        objpoints.append(objp)
        corners2 = cv.cornerSubPix(gray, corners, (11, 11), (-1, -1), criteria)
        imgpoints.append(corners)
        # Draw and display the corners
        cv.drawChessboardCorners(img, (11, 8), corners2, ret)

        ret, mtx, dist, rvecs, tvecs = cv.calibrateCamera(
            objpoints, imgpoints, gray.shape[::-1], None, None)

        h, w = img.shape[:2]
        newcameramtx, roi = cv.getOptimalNewCameraMatrix(
            mtx, dist, (w, h), 1, (w, h))
        # print(roi)
        mapx, mapy = cv.initUndistortRectifyMap(mtx, dist, None, newcameramtx,
                                                (w, h), 5)
        dst = cv.remap(img, mapx, mapy, cv.INTER_LINEAR)

        x, y, w, h = roi
        dst = dst[y:y + h, x:x + w]

        npz = np.load('./output.npz')
        print(npz.files)

        cv.imwrite('calibresult.png', dst)

        plt_img = img[:, :, ::-1]
        plt.figure(fname)
Exemplo n.º 10
0
def find_3d_points(image1_path, image2_path):
    img1 = cv2.imread(image1_path, cv2.IMREAD_GRAYSCALE)  # queryImage
    img2 = cv2.imread(image2_path, cv2.IMREAD_GRAYSCALE)  # trainImage

    # Initial calibration matrix from camera
    init_calibration_matrix = np.array(
        [
            [2.78228443e03, 0.00000000e00, 1.65670819e03],
            [0.00000000e00, 2.77797243e03, 1.19855894e03],
            [0.00000000e00, 0.00000000e00, 1.00000000e00],
        ]
    )
    distortion_coefficients = np.array(
        [0.07874525, -0.07184864, -0.00619498, 0.00252332, -0.09900985]
    )

    # Undistort images. getOptimalNewCameraMatrix: 1 tells us that we want to see the "black hills" after undistorting. Exchanging for 0 removes them.
    height, width = img1.shape[:2]
    calibration_matrix, roi = cv2.getOptimalNewCameraMatrix(
        init_calibration_matrix,
        distortion_coefficients,
        (width, height),
        1,
        (width, height),
    )
    img1_distorted = cv2.undistort(
        img1, init_calibration_matrix, distortion_coefficients, None, calibration_matrix
    )
    img2_distorted = cv2.undistort(
        img2, init_calibration_matrix, distortion_coefficients, None, calibration_matrix
    )

    # Crop images
    x, y, w, h = roi
    img1_distorted = img1_distorted[y : y + h, x : x + w]
    img2_distorted = img2_distorted[y : y + h, x : x + w]

    # To display the undistorted images:
    # plt.imshow(img1_distorted), plt.show()
    # plt.imshow(img2_distorted), plt.show()

    # Create an ORB object
    orb = cv2.ORB_create()

    # Detect keypoints
    kp1 = orb.detect(img1_distorted, None)
    kp2 = orb.detect(img2_distorted, None)

    # Find descriptors
    kp1, des1 = orb.compute(img1_distorted, kp1)
    kp2, des2 = orb.compute(img2_distorted, kp2)

    # To draw the keypoints:
    #img1kp = cv2.drawKeypoints(img1, kp1, None, color=(0, 255, 0), flags=0) #flags = cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS
    # img2kp = cv2.drawKeypoints(img2, kp2, None, color=(0, 255, 0), flags=0)
    #plt.imshow(img1kp), plt.show()
    # plt.imshow(img2kp), plt.show()

    # Brute-force matcher object. crossCheck=True means that it has to match both ways
    brute_force = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)

    # Matching descriptors
    matches = brute_force.match(des1, des2)

    # Clean the matches by distance
    matches = clean_matches(matches)

    # Sort matches in order of distance
    matches = sorted(matches, key=lambda x: x.distance)

    # To draw the first 20 matches:
    #img_matches = cv2.drawMatches(img1_distorted, kp1, img2_distorted, kp2, matches[:], None, flags = 2)
    #plt.imshow(img_matches), plt.show()

    # Extract coordinates
    points1 = extract_coordinates(matches, kp1, "queryIdx")
    points2 = extract_coordinates(matches, kp2, "trainIdx")

    # Find essential Matrix
    essential_matrix, _ = cv2.findEssentialMat(
        points1, points2, calibration_matrix, method=cv2.RANSAC, prob=0.999, threshold=3
    )
    determinant = mlin.det(essential_matrix)
    eps = 1e-10
    if determinant > eps:
        raise Exception(
            "expected determinant to be close to zero, but is {}".format(determinant)
        )

    # Find camera2 position relative to camera1 (t is only in unit)
    _, R, t, _ = cv2.recoverPose(essential_matrix, points1, points2, calibration_matrix)

    # Create camera matrices
    M1 = np.hstack((np.eye(3, 3), np.zeros((3, 1))))
    M2 = np.hstack((R, t))
    camera_matrix1 = np.dot(calibration_matrix, M1)
    camera_matrix2 = np.dot(calibration_matrix, M2)

    # Compute 3D points
    points_3d = []
    for c1, c2 in zip(points1, points2):
        point = cv2.triangulatePoints(camera_matrix1, camera_matrix2, c1, c2)
        points_3d.append(point)
    points_3d = cv2.convertPointsFromHomogeneous(np.array(points_3d))

    return points_3d, t