Exemplo n.º 1
0
    def mono_cal_fromcorners(self, cam1_ipts, cam2_ipts, opts):
        #TODO get img size for both cams
        # Calib for cam1:
        if self.cam1_do_mono_calib:
            intrinsics = numpy.zeros((3, 3), numpy.float64)
            distortion = numpy.zeros((8, 1), numpy.float64) # rational polynomial
            #TODO rational distortion always returns zero vector...
            """
            dist_model = "rational"
            if cv2.CALIB_RATIONAL_MODEL:
                distortion = numpy.zeros((8, 1), numpy.float64) # rational polynomial
            else:
            """
            distortion = numpy.zeros((5, 1), numpy.float64) # plumb bob
            dist_model = "plumb_bob"
            # If FIX_ASPECT_RATIO flag set, enforce focal lengths have 1/1 ratio
            intrinsics[0,0] = 1.0
            intrinsics[1,1] = 1.0
            cv2.calibrateCamera(
                       opts, cam1_ipts,
                       self.cam1_size, intrinsics,
                       distortion)
            self.cam1_K, self.cam1_D = intrinsics, distortion
            print "======CAM1====="
            print "INTRINSICS:"
            print intrinsics
            print "DISTORTION:"
            print distortion
            if self.save_calib_results:
                self.save_mono_calib_results(self.cam1_K, self.cam1_D, 
                        dist_model, self.cam1_size, "cam1")

        if self.cam2_do_mono_calib:
            # Calib for cam2:
            intrinsics = numpy.zeros((3, 3), numpy.float64)
            distortion = numpy.zeros((8, 1), numpy.float64) # rational polynomial
            """
            dist_model = "rational"
            if cv2.CALIB_RATIONAL_MODEL:
                distortion = numpy.zeros((8, 1), numpy.float64) # rational polynomial
            else:
            """
            distortion = numpy.zeros((5, 1), numpy.float64) # plumb bob
            dist_model = "plumb_bob"
            # If FIX_ASPECT_RATIO flag set, enforce focal lengths have 1/1 ratio
            intrinsics[0,0] = 1.0
            intrinsics[1,1] = 1.0
            cv2.calibrateCamera(
                       opts, cam2_ipts,
                       self.cam2_size, intrinsics,
                       distortion)
            self.cam2_K, self.cam2_D = intrinsics, distortion
            print "======CAM2====="
            print "INTRINSICS:"
            print intrinsics
            print "DISTORTION:"
            print distortion
            if self.save_calib_results:
                self.save_mono_calib_results(self.cam2_K, self.cam2_D, 
                        dist_model, self.cam2_size, "cam2")
Exemplo n.º 2
0
    def cal_fromcorners(self, good):
        """
        :param good: Good corner positions and boards 
        :type good: [(corners, ChessboardInfo)]

        
        """
        boards = [ b for (_, b) in good ]

        ipts = [ points for (points, _) in good ]
        opts = self.mk_object_points(boards)

        self.intrinsics = numpy.zeros((3, 3), numpy.float64)
        if self.calib_flags & cv2.CALIB_RATIONAL_MODEL:
            self.distortion = numpy.zeros((8, 1), numpy.float64) # rational polynomial
        else:
            self.distortion = numpy.zeros((5, 1), numpy.float64) # plumb bob
        # If FIX_ASPECT_RATIO flag set, enforce focal lengths have 1/1 ratio
        self.intrinsics[0,0] = 1.0
        self.intrinsics[1,1] = 1.0
        cv2.calibrateCamera(
                   opts, ipts,
                   self.size, self.intrinsics,
                   self.distortion,
                   flags = self.calib_flags)

        # R is identity matrix for monocular calibration
        self.R = numpy.eye(3, dtype=numpy.float64)
        self.P = numpy.zeros((3, 4), dtype=numpy.float64)

        self.set_alpha(0.0)
Exemplo n.º 3
0
	def calibrate_camera(img_pts, board, img_size, noDistortion=False):
		"""
		Given image coordinates of points and actual 3D points, return a list of
		intrinsics and extrinsics of camera estimated from the point coordinates.
		Args:
			img_pts: list of 2xN np array 
			board: a Board
			img_size: (img_width, img_height)
		"""
		# Save all seen images to file
		#vis.plot_all_chessboards_in_camera(img_pts, img_size, save_name='debug_calibrate_camera.pdf')

		board_list = []
		view_id = []
		b_pts = board.get_orig_points().astype(np.float32)
		for i in range(len(img_pts)):
			#pts_id = img_pts[i].keys()
			if img_pts[i].shape < board.num_points():
				print 'Cannot see the whole board in image', i
				continue
			view_id.append(i)
			board_list.append(b_pts.T.copy())
			img_pts[i] = img_pts[i].T.astype(np.float32).reshape((-1, 1, 2))
			# print str(board_list[-1].shape) + " == " + str(img_pts[-1].shape)

		# Inputs format:
		# board_list list of np(N, 3) float32
		# img_pts_list list of np(N, 1, 2) float32
		# (1260, 1080) (x, y)
		if noDistortion:
			retval, cameraMatrix, distCoeffs, rvecs, tvecs  = cv2.calibrateCamera( board_list, img_pts, (img_size[0], img_size[1]), None, np.zeros((8,1)), None, None, 8|32|64|128)
		else:
			retval, cameraMatrix, distCoeffs, rvecs, tvecs  = cv2.calibrateCamera( board_list, img_pts, (img_size[0], img_size[1]), None, None)
		print 'Calibration RMS re-projection error', retval
		
		# put img_pts[i] back to 2xN format
		for i in range(len(img_pts)):
			img_pts[i] = img_pts[i].reshape(-1, 2).T

		# package return vale
		intrinsics_ = Intrinsics(cameraMatrix, \
			np.concatenate( (distCoeffs[0][0:2], distCoeffs[0][4:5]), axis=0 ), \
			distCoeffs[0][2:4])
		extrinsics_ = dict()
		for i in range(len(rvecs)):
			extrinsics_[view_id[i]] = Extrinsics.init_with_rotation_vec(tvecs[i][:,0], rvecs[i][:,0])
		
		size = img_size
		aov = None
		name = "calibrated cam"
		return Camera(intrinsics_, extrinsics_, size, aov, name)
    def read_images(self, cal_path):
        images_right = glob.glob(cal_path + 'RIGHT/*.JPG')
        images_left = glob.glob(cal_path + 'LEFT/*.JPG')
        images_left.sort()
        images_right.sort()

        for i, fname in enumerate(images_right):
            img_l = cv2.imread(images_left[i])
            img_r = cv2.imread(images_right[i])

            gray_l = cv2.cvtColor(img_l, cv2.COLOR_BGR2GRAY)
            gray_r = cv2.cvtColor(img_r, cv2.COLOR_BGR2GRAY)

            # Find the chess board corners
            ret_l, corners_l = cv2.findChessboardCorners(gray_l, (9, 6), None)
            ret_r, corners_r = cv2.findChessboardCorners(gray_r, (9, 6), None)

            # If found, add object points, image points (after refining them)
            self.objpoints.append(self.objp)

            if ret_l is True:
                rt = cv2.cornerSubPix(gray_l, corners_l, (11, 11),
                                      (-1, -1), self.criteria)
                self.imgpoints_l.append(corners_l)

                # Draw and display the corners
                ret_l = cv2.drawChessboardCorners(img_l, (9, 6),
                                                  corners_l, ret_l)
                cv2.imshow(images_left[i], img_l)
                cv2.waitKey(500)

            if ret_r is True:
                rt = cv2.cornerSubPix(gray_r, corners_r, (11, 11),
                                      (-1, -1), self.criteria)
                self.imgpoints_r.append(corners_r)

                # Draw and display the corners
                ret_r = cv2.drawChessboardCorners(img_r, (9, 6),
                                                  corners_r, ret_r)
                cv2.imshow(images_right[i], img_r)
                cv2.waitKey(500)
            img_shape = gray_l.shape[::-1]

        rt, self.M1, self.d1, self.r1, self.t1 = cv2.calibrateCamera(
            self.objpoints, self.imgpoints_l, img_shape, None, None)
        rt, self.M2, self.d2, self.r2, self.t2 = cv2.calibrateCamera(
            self.objpoints, self.imgpoints_r, img_shape, None, None)

        self.camera_model = self.stereo_calibrate(img_shape)
Exemplo n.º 5
0
def calibrate_lens(image_list):
    img_points, obj_points = [], []
    h,w = 0, 0
    for img in image_list:
        h, w = img.shape[:2]
        found,corners = find_corners(img, pattern_size)
        if not found:
            raise Exception("chessboard calibrate_lens Failed to find corners in img")
        img_points.append(corners.reshape(-1, 2))
        obj_points.append(pattern_points)
    camera_matrix = numpy.zeros((3,3))
    dist_coeffs = numpy.zeros(5)
#    rms, camera_matrix, dist_coeffs, rvecs, tvecs = cv2.calibrateCamera(obj_points, img_points, (w,h))
    cv2.calibrateCamera(obj_points, img_points, (w,h), camera_matrix, dist_coeffs)
    return camera_matrix, dist_coeffs
Exemplo n.º 6
0
def calibrate(filenames):
    # termination criteria
    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
    # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
    objp = np.zeros((6*7,3), np.float32)
    objp[:,:2] = np.mgrid[0:7,0:6].T.reshape(-1,2)
    # Arrays to store object points and image points from all the images.
    objpoints = [] # 3d point in real world space
    imgpoints = [] # 2d points in image plane.
    images = []
    for filename in filenames:
        # Find the chess board corners. If found, add object points, image points (after refining them)
        img = cv2.imread(filename)
        if img != None:
            print "Loaded " + repr(filename)
        else:
            print "Unable to load image " + repr(filename)
            continue
        images.append(img)
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        ret, corners = cv2.findChessboardCorners(gray, (7,6), None)
        if ret == True:
            objpoints.append(objp)
            corners2 = cv2.cornerSubPix(gray, corners, (11,11), (-1,-1), criteria)
            imgpoints.append(corners2)
    ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1],None,None)
    print "Loaded all images and calbulated calibration"
    for i, img in enumerate(images):
        img = images[i]
        h, w = img.shape[:2]
        newcameramtx, roi = cv2.getOptimalNewCameraMatrix(mtx, dist, (w,h), 1, (w,h))
        dst = cv2.undistort(img, mtx, dist, None, newcameramtx)
        x, y, w, h = roi
        cv2.imwrite( 'calibrated/out_' + str(i) + '.png', dst[ y : y+h, x : x+w ])
        print "Outputted calibrated image: 'calibrated/out_" + str(i) + ".png'"
Exemplo n.º 7
0
def webcam_calibration(cap, limit=8):
    '''Calibrate webcam with chessboard method.'''

    # Arrays to store object points and image points of all the images.
    object_points = [] # 3d points in real world space.
    image_points = [] # 2d points in image plane.

    height, width = 0, 0

    while len(image_points) != limit:
        time.sleep(0.5)

        ret, frame = cap.read()
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        height, width = frame.shape[:2]
        cv2.imshow('Chessboard', frame)

        ret, chessboard, object_point, image_point = detect_chessboard(gray)

        if ret:
            object_points.append(object_point)
            image_points.append(image_point)
            print 'Found {0} chessboards.'.format(len(image_points))

    return cv2.calibrateCamera(object_points, image_points, (width, height), None, None)
Exemplo n.º 8
0
def calibrate_camera(img_pts, obj_pts, img_size):
    # generate pattern size
    camera_matrix = np.zeros((3,3))
    dist_coef = np.zeros(4)
    rms, camera_matrix, dist_coefs, rvecs, tvecs = cv2.calibrateCamera(obj_pts, img_pts,
                                                    img_size, camera_matrix, dist_coef)
    return camera_matrix, dist_coefs
def firstPart():
    ''' <002> Here Define the camera matrix of the first view image (01_daniel.png) recorded by the cameraCalibrate2'''
    firstFrame = cv2.imread(videoSequence)
    pattern_size = (9,6)
    obj_points = np.zeros( (np.prod(pattern_size), 3), np.float32 )
    obj_points[:,:2] = np.indices(pattern_size).T.reshape(-1, 2)
    
    camera = np.zeros((3, 3))
    h, w, _ = firstFrame.shape
    
    found,corners = cv2.findChessboardCorners(firstFrame, pattern_size)
    img_points = corners.reshape(-1, 2)
    
    
    if found != 0:
        _, camera, _, rotation, translation  = cv2.calibrateCamera([obj_points], [img_points], (w, h) ,camera, np.zeros(4) ,flags = 0)
    #constructing the projection matrix R|t
    global Kt
    Kt = np.zeros((3, 4))
    rotMatrix = cv2.Rodrigues(rotation[0])[0]
    Kt[:, :3] = rotMatrix
    Kt[:, 3:] = translation[0]
    ''' <003> Here Load the first view image (01_daniel.png) and find the chess pattern and store the 4 corners of the pattern needed for homography estimation''' 
    for p in obj_points:
        imgH = np.dot(camera, np.dot(Kt, [p[0], p[1], p[2], 1]))
        imgP = [imgH[0] / imgH[2], imgH[1] / imgH[2]]
        cv2.circle(firstFrame, (int(imgP[0]), int(imgP[1])), 3, (0, 0, 255), 4)
Exemplo n.º 10
0
def calibrate(imagedir, cbrow, cbcol):
    nimages = 0
    datapoints = []
    im_dims = (0,0)
    # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
    objp = numpy.zeros((cbrow * cbcol, 3), numpy.float32)
    objp[:, :2] = numpy.mgrid[0:cbcol, 0:cbrow].T.reshape(-1, 2)
    # Arrays to store object points and image points from all the images.
    objpoints = [] # 3d point in real world space
    imgpoints = [] # 2d points in image plane.

    files = file_list(imagedir, ['jpg', 'jpeg', 'png'])
    for f in files:
        colour = cv2.imread(f)
        grey = cv2.cvtColor(colour, cv2.COLOR_BGR2GRAY)
        ret, corners = cv2.findChessboardCorners(grey, (cbcol, cbrow), flags=cv2.CALIB_CB_ADAPTIVE_THRESH)
        
        if (ret):
            print('using ' + f)
            cv2.cornerSubPix(grey,corners,(11,11),(-1,-1),(cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.01))
            objpoints.append(objp)
            imgpoints.append(corners)
            im_dims = grey.shape[:2]

    if len(imgpoints) == 0:
        print("Not enough good quality images. Aborting")
        return
    
    ret, K, D, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, grey.shape[::-1], None, None)

    # storing results using CameraParams
    C = CameraParams(lens=lens, sensorwidth=sensorwidth, xresolution=im_dims[1], yresolution=im_dims[0])
    C.setParams(K, D)
    C.save(os.path.join(imagedir, "paramsout.json"))
    print("Saved params in " + os.path.join(imagedir, "paramsout.json"))
Exemplo n.º 11
0
def calibrate(image_folder_path, nx=9, ny=6, demo=False):
    raw_imgs = [cv2.imread(image_path, cv2.IMREAD_GRAYSCALE) for image_path in glob.iglob(os.path.join(image_folder_path, "*.jpg"))]
    objp = np.zeros((nx * ny, 3), np.float32)
    objp[:, :2] = np.mgrid[0:nx, 0:ny].T.reshape(-1, 2)
    imgpoints = []
    objpoints = []
    for i, img in enumerate(raw_imgs):
        ret, corners = cv2.findChessboardCorners(img, (nx, ny), None)
        if ret:
            imgpoints.append(corners)
            objpoints.append(objp)
        # print(corners)
        # cv2.drawChessboardCorners(img, (nx, ny), corners, ret)
        # plt.imsave("results/%d.jpg" % i, img)

    ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img.shape[::-1], None, None)
    with open("calibrate.pickle", mode="wb") as f:
        pickle.dump({"mtx": mtx, "dist": dist}, f)

    if demo:
        # img = random.choice(raw_imgs)
        img = raw_imgs[0]
        undistort_img = cv2.undistort(img, mtx, dist, dst=None, newCameraMatrix=mtx)
        cv2.imshow("undistort", undistort_img)
        cv2.waitKey()
Exemplo n.º 12
0
def computeCameraMatrix():
    counter = int(x=1)

    h, w = 0, 0
    for fname in images:
        img = cv2.imread(fname)
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        h, w = gray.shape[:2]

        # Find the chess board corners
        ret, corners = cv2.findChessboardCorners(gray, pattern_size, None)
        # If found, add object points, image points (after refining them)
        if ret == True:
            objpoints.append(objp)
            cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1), criteria)

            imgpoints.append(corners)
            # Draw and display the corners
            cv2.drawChessboardCorners(img, pattern_size, corners, ret)
            cv2.imshow("img", img)

            rms, camera_matrix, dist_coefs, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, (w, h))

            newcameramtx, roi = cv2.getOptimalNewCameraMatrix(camera_matrix, dist_coefs, (w, h), 1, (w, h))

            dst = cv2.undistort(gray, camera_matrix, dist_coefs, None, newcameramtx)
            cv2.imshow("undistort image", dst)
            cv2.waitKey(100)
            counter = counter + 1
        else:
            print ("No corners found on Picture " + str(counter))

    cv2.destroyAllWindows()
def get_camera_matrix():
    square_size = 0.3
    pattern_size = (8, 6)

    pattern_points = np.zeros((np.prod(pattern_size), 3), np.float32)
    pattern_points[:,:2] = np.indices(pattern_size).T.reshape(-1, 2)
    pattern_points *= square_size

    obj_points = []
    img_points = []
    h, w = 0, 0

    img_names = glob.glob('*.png')

    for fn in img_names:
        img = cv2.imread(fn, 0)
        h, w = img.shape[:2]
        found, corners = cv2.findChessboardCorners(img, pattern_size)
        if found:
            term = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_COUNT, 30, 0.1)
            cv2.cornerSubPix(img, corners, (5, 5), (-1, -1), term)

            img_points.append(corners.reshape(-1, 2))
            obj_points.append(pattern_points)

    _, camera_matrix, _, _, _ = cv2.calibrateCamera(obj_points, img_points, (w, h))

    return camera_matrix
Exemplo n.º 14
0
    def calibrate(self, print_raw_results = True):
        rms, camera_matrix, dist_coefs, rvecs, tvecs = cv2.calibrateCamera(self.obj_points, self.img_points, (self.w, self.h), None, None)
        calib_data = Calibration_Data(rms, camera_matrix, dist_coefs, self.w, self.h, rvecs, tvecs)
        if print_raw_results:
            print(calib_data)

        return calib_data
Exemplo n.º 15
0
def calibrate_camera(images, pattern_size, square_size, chessboard_corners_results):
    '''
    Conducts camera calibration using the photos of chessboard pattern

    Arguments:
    images -- a list of images to process
    pattern_size -- dimmension of the chessboard pattern, e.g. (7, 8)
    square_size -- size of a square edge on the chessboard
    chessboard_corners_results -- a list of tuples got from the
                                  cv2.findChessboardCorners function call
                                  for each image
    
    IMPORTANT: all the images passed to the function must already be 
    filtered out, so that there is no images that didn't succeed in being
    passed to cv2.findChessboardCorners function; use 
    cvfunctions.chessboard.filter_chessboard_corners_results function
    to achieve this
    
    Returns a tuple as a result of the cv2.calibrateCamera function call,
    containing the following calibration results:
    rms, camera_matrix, dist_coefs, rvecs, tvecs
    '''
    
    image_size = get_image_size(images[0])
    
    object_points = get_object_points(len(images), pattern_size, square_size)
    image_points = get_image_points(chessboard_corners_results)
     
    res = cv2.calibrateCamera(object_points, image_points, image_size, None, None)
    return res
Exemplo n.º 16
0
def __calibrate_intrinsics(camera, image_points, object_points, flags, criteria):
    """
    Calibrate intrinsics of the provided camera using provided image & object points & calibration flags & criteria.
    @param camera: camera to calibrate
    @param image_points: points in images taken with the camera that correspond to the 3d object_points.
    @param object_points: 3d points on the object that appears in *each* of the images.
    Usually, inner corners of a calibration board. Note: assumes *the same* object appears in all of the images.
    @param flags: OpenCV camera calibration flags. For details, see OpenCV calib3d documentation, calibrate function.
    @param criteria: OpenCV criteria.
    @return: estimated object-space rotation & translation vectors of the camera (assuming object is static)
    """
    # OpenCV prefers [width x height] as "Size" to [height x width]
    frame_dims = (camera.intrinsics.resolution[1], camera.intrinsics.resolution[0])
    start = time.time()
    camera.intrinsics.error, camera.intrinsics.intrinsic_mat, camera.intrinsics.distortion_coeffs, \
    rotation_vectors, translation_vectors = \
        cv2.calibrateCamera(objectPoints=np.array([object_points]*len(image_points)), imagePoints=image_points,
                            imageSize=frame_dims, cameraMatrix=camera.intrinsics.intrinsic_mat,
                            distCoeffs=camera.intrinsics.distortion_coeffs,
                            flags=flags, criteria=criteria)
    end = time.time()
    camera.intrinsics.time = end - start
    camera.intrinsics.timestamp = end
    camera.intrinsics.calibration_image_count = len(image_points)
    return rotation_vectors, translation_vectors
    def calibration(self):

        for fname in self.img_list:
            img = cv2.imread(fname)
            grey = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

            ret, corners = cv2.findChessboardCorners(grey, self.size, None)
            cv2.drawChessboardCorners(img, self.size, corners,ret)

            # if found, show imgs
            if ret:
                criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
                cv2.cornerSubPix(grey,corners,(11,11),(-1,-1),criteria)
                self.imgpoints.append(corners)
                self.objpoints.append(self.objp)
                self.img_list_detected.append(fname)
                print fname

            cv2.imshow('img',img)
            cv2.waitKey(500)

        cv2.destroyAllWindows()


        # Step 2: Calibration
        # shape[::-1]: (480,640) => (640,480)
        ret, cmx, dist, rvecs, tvecs = cv2.calibrateCamera(
            self.objpoints, self.imgpoints, grey.shape[::-1],None,None)
        print cmx
        print dist
        # save calibration result
        np.savez('./calibFile/calib.npz', cmx=cmx, dist=dist, rvecs=rvecs, tvecs=tvecs)
Exemplo n.º 18
0
def undistort_img(img, points3d, points2d):
    img_size = (img.shape[1], img.shape[0])

    # Do camera calibration given object points and image points
    ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(points3d, points2d, img_size, None, None)
    dst = cv2.undistort(img, mtx, dist, None, mtx)
    return dst
Exemplo n.º 19
0
 def calibrate(self,obj_points,img_points,size):
     rms,C,dist,r,t = cv2.calibrateCamera(obj_points,img_points,size, None, None)
     self.rms = rms
     self.C = np.matrix(C)
     self.dist = np.array(dist)
     self.r = np.matrix(r[0])
     self.t = np.matrix(t[0])
def cal_undistort(img, objpoints, imgpoints):
    img_size = (img.shape[1], img.shape[0])
    # Use cv2.calibrateCamera() and cv2.undistort()
    ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img_size,None,None)
    dst = cv2.undistort(img, mtx, dist, None, mtx)
    
    return dst
Exemplo n.º 21
0
def get_transform():
    # termination criteria
    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
    pattern_size = (9, 6)
    pattern_points = np.zeros( (np.prod(pattern_size), 3), np.float32 )
    pattern_points[:,:2] = np.indices(pattern_size).T.reshape(-1, 2)
    # pattern_points *= square_size
    obj_points = []
    img_points = []
    w,h = 0,0

    for fname in os.listdir('chessboard'):
        if fname[0] == '.': continue
        img = cv2.imread('chessboard/' + fname)
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        h, w = img.shape[:2]
        found, corners = cv2.findChessboardCorners(gray, pattern_size)
        print found
        if found:
            corners2 = cv2.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria)
        
            img_points.append(corners.reshape(-1, 2))
            obj_points.append(pattern_points)
        
            vis = cv2.cvtColor(gray, cv2.COLOR_GRAY2BGR)
            cv2.drawChessboardCorners(vis, pattern_size, corners, found)
        
            # cv2.imshow('img', vis)
            # cv2.waitKey(500)

    ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(obj_points, img_points, (w,h), None, None)
    newcameramtx, roi = cv2.getOptimalNewCameraMatrix(mtx,dist,(w,h),1,(w,h))
    return mtx, roi, newcameramtx, dist
def correctDistortion(image):
   
   size = image.shape[1],image.shape[0]
   
   corners = findCorners(image)

   patternPoints = np.zeros( (np.prod(boardSize), 3), np.float32 )
   patternPoints[:,:2] = np.indices(boardSize).T.reshape(-1, 2)
   
   imagePoints = np.array([corners.reshape(-1, 2)])
   objectPoints = np.array([patternPoints])
   cameraMatrix = np.zeros((3, 3))
   distCoefs = np.zeros(4)
   rc,cameraMatrix,distCoeffs,rvecs,tvecs = cv2.calibrateCamera(
      objectPoints,
      imagePoints,
      boardSize,
      cameraMatrix,
      distCoefs
   )
   
   newCameraMatrix,newExtents = cv2.getOptimalNewCameraMatrix(cameraMatrix,distCoeffs,size,0.0)
   
   mapx, mapy = cv2.initUndistortRectifyMap(
      cameraMatrix,
      distCoeffs,
      None,
      cameraMatrix,
      size,
      cv2.CV_32FC1
   )
   newImage = cv2.remap( image, mapx, mapy, cv2.INTER_LANCZOS4 )
   return newImage
Exemplo n.º 23
0
def calibrate_camera_interactive(images, objp, boardSize):
    # Arrays to store object points and image points from all the images.
    objectPoints = []    # 3d point in real world space
    imagePoints = []    # 2d points in image plane

    test_image = cv2.imread(images[0])
    imageSize = (test_image.shape[1], test_image.shape[0])

    # Read images
    for fname in images:
        img = cv2.imread(fname)
        ret, corners = cvh.extractChessboardFeatures(img, boardSize)

        # If chessboard corners are found, add object points and image points
        if ret == True:
            objectPoints.append(objp)
            imagePoints.append(corners)

            # Draw and display the corners
            cv2.drawChessboardCorners(
                    img, boardSize, corners, ret )
            cv2.imshow("img", img)
            cv2.waitKey(100)

    # Calibration
    reproj_error, cameraMatrix, distCoeffs, rvecs, tvecs = cv2.calibrateCamera(
            objectPoints, imagePoints, imageSize )
    distCoeffs = distCoeffs.reshape((-1))    # convert to vector
    
    return reproj_error, cameraMatrix, distCoeffs, rvecs, tvecs, \
            objectPoints, imagePoints, imageSize
Exemplo n.º 24
0
    def set_camera_params(self):
        print 'start calibration'
        # チェスボード(X,Y,Z)座標の指定 (Z=0)
        pattern_points = np.zeros((np.prod(self.pattern_size), 3), np.float32)
        pattern_points[:, :2] = np.indices(self.pattern_size).T.reshape(-1, 2)
        pattern_points *= self.square_size
        obj_points = []
        img_points = []

        for fn in glob(self.img_path+"*.png"):
            # 画像の取得
            im = cv2.imread(fn, 0)
            # チェスボードのコーナーを検出
            found, corner = cv2.findChessboardCorners(im, self.pattern_size)
            # コーナーがあれば
            if found:
                term = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_COUNT, 30, 0.1)
                cv2.cornerSubPix(im, corner, (5, 5), (-1, -1), term)
            # コーナーがない場合のエラー処理
            if not found:
                print 'chessboard not found'
                continue
            # appendメソッド:リストの最後に因数のオブジェクトを追加
            img_points.append(corner.reshape(-1, 2))
            obj_points.append(pattern_points)
            # corner.reshape(-1, 2) : 検出したコーナーの画像内座標値(x, y)

            # 内部パラメータを計算
        self.ret, self.mtx, self.dist, self.rvecs, self.tvecs = cv2.calibrateCamera(
            obj_points,
            img_points,
            (im.shape[1], im.shape[0]))
        print 'finish calibration'
def calibration_parameters(path, cshape):
    """Compute calibration parameters from a set of calibration images.

    Params:
      path: Directory of calibration images.
      cshape: Shape of grid used in the latter.
    Return:
      mtx, dist
    """
    # Object / image points collections.
    objpoints = []
    imgpoints = []

    # Calibration points from images.
    filenames = os.listdir(path)
    for fname in filenames:
        img = cv2.imread(path + fname)
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

        # Theoretical Grid.
        objp = np.zeros((cshape[0] * cshape[1], 3), np.float32)
        objp[:, :2] = np.mgrid[0:cshape[0], 0:cshape[1]].T.reshape(-1, 2)
        # Corners in the image.
        ret, corners = cv2.findChessboardCorners(gray, cshape, None)
        if ret:
            objpoints.append(objp)
            imgpoints.append(corners)
        else:
            print('Warning! Not chessboard found in image', fname)
    # Calibration from image points.
    ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints,
                                                       imgpoints,
                                                       img.shape[0:2],
                                                       None, None)
    return mtx, dist
Exemplo n.º 26
0
 def detect_squares(self, images):
     pattern_size = (7, 6)
     pattern_points = np.zeros( (np.prod(pattern_size), 3), np.float32 )
     pattern_points[:,:2] = np.indices(pattern_size).T.reshape(-1, 2)
     pattern_points *= self.square_size
 
     obj_points = []
     img_points = []
     h, w = 0, 0
     for fn in images:
         print 'processing %s...' % fn,
         img = cv2.imread(os.path.join(os.path.dirname(os.path.realpath(__file__)), "cal", fn), 0)
         h, w = img.shape[:2]
         found, corners = cv2.findChessboardCorners(img, pattern_size)
         if found:
             term = ( cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_COUNT, 30, 0.1 )
             cv2.cornerSubPix(img, corners, (5, 5), (-1, -1), term)
         if self.debug_dir:
             vis = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
             cv2.drawChessboardCorners(vis, pattern_size, corners, found)
         if not found:
             print 'chessboard not found'
             continue
         img_points.append(corners.reshape(-1, 2))
         obj_points.append(pattern_points)
         
         print 'ok'
     print "Calibration is a go... please wait.."
     camera_matrix = np.zeros((3, 3))
     dist_coefs = np.zeros(4)
     img_n = len(img_points)
     rms, camera_matrix, dist_coefs, rvecs, tvecs = cv2.calibrateCamera(obj_points, img_points, (w, h), camera_matrix, dist_coefs)
     return (obj_points, img_points, (w, h), camera_matrix, dist_coefs, rvecs, tvecs)
     """print "RMS:", rms
Exemplo n.º 27
0
	def calibrate(self,images):
		# Arrays to store object points and image points from all the images.
		objpoints = [] # 3d point in real world space
		imgpoints = [] # 2d points in image plane.
		w,h=0,0

		for fname in images:
			gray = cv2.imread(fname,0)
			ret,objpoints,imgpoints = self.findMarkers(gray,objpoints,imgpoints)
			h,w=gray.shape[:2]

		#print len(objpoints),len(imgpoints),w,h

		rms, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, (w,h), None, None)

		print 25*'-'
		print "RMS:", rms
		print "camera matrix:\n", mtx
		print "distortion coefficients: ", dist.ravel()
		print 25*'-'
		# not sure of the value of this
		alpha = 0.5
		newcameramtx, roi=cv2.getOptimalNewCameraMatrix(mtx,dist,(w,h),alpha)
		# newcameramtx = 0
		# roi = 0
		self.data = {'camera_matrix': mtx, 'dist_coeff': dist, 'newcameramtx': newcameramtx, 'rms': rms, 'rvecs': rvecs, 'tvecs': tvecs}
Exemplo n.º 28
0
def calibrate_camera(img):
    # termination criteria
    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)

    objp = np.zeros((6*9, 3), np.float32)
    objp[:, :2] = np.mgrid[0:9, 0:6].T.reshape(-1, 2)

    # Arrays to store object points and image points from all the images.
    objpoints = [] # 3d point in real world space
    imgpoints = [] # 2d points in image plane.

    images = glob.glob('images/chess_calibration/*.jpg')

    for fname in images:
        img = cv2.imread(fname)
        gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)

        # Find the chess board corners
        ret, corners = cv2.findChessboardCorners(gray, (9,6),None)

        if ret:
            objpoints.append(objp)

            cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1), criteria)
            imgpoints.append(corners)

    ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)
    return ret, mtx, dist
Exemplo n.º 29
0
def main():

    square_size = 1.0      # 正方形のサイズ
    pattern_size = (9, 6)  # 模様のサイズ
    pattern_points = np.zeros( (np.prod(pattern_size), 3), np.float32 )
    pattern_points[:,:2] = np.indices(pattern_size).T.reshape(-1, 2)
    pattern_points *= square_size
    obj_points = []
    img_points = []

    for fn in glob("*.jpg"):
        # 画像の取得
        im = cv2.imread(fn, 0)
        print "loading..." + fn
        # チェスボードのコーナーを検出
        found, corner = cv2.findChessboardCorners(im, pattern_size)
        # コーナーがあれば
        if found:
            term = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_COUNT, 30, 0.1)
            cv2.cornerSubPix(im, corner, (5,5), (-1,-1), term)
        # コーナーがない場合のエラー処理
        if not found:
            print 'chessboard not found'
            continue
        img_points.append(corner.reshape(-1, 2))
        obj_points.append(pattern_points)

    # 内部パラメータを計算
    rms, K, d, r, t = cv2.calibrateCamera(obj_points,img_points,(im.shape[1],im.shape[0]))
    # 計算結果を表示
    print "RMS = ", rms
    print "K = \n", K
    print "d = ", d.ravel()
Exemplo n.º 30
0
        def __init__(self, LOG_LEVEL='NONE'):
            if LOG_LEVEL == 'INFO':
                self.LOG_LEVEL = 1
            elif LOG_LEVEL == 'DEBUG':
                self.LOG_LEVEL = 2
            else: # No logging
                self.LOG_LEVEL = 0

            self.objpoints = None
            self.imgpoints = None
            self.shape = None

            try:
                self.objpoints = np.load('data/objpoints.npy')
                self.imgpoints = np.load('data/imgpoints.npy')
                self.shape = tuple(np.load('data/shape.npy'))
            except:
                pass

            if self.objpoints is None or self.imgpoints is None:
                print 'No data files found, calibrating camera...'
                self.find_corners()

            #print self.shape
            ret, self.mtx, self.dist, self.rvecs, self.tvecs = cv2.calibrateCamera(self.objpoints, self.imgpoints, self.shape, None, None)
ny = 6  # the number of inside corners in y

# Find the chessboard corners
gray = cv2.cvtColor(undist, cv2.COLOR_BGR2GRAY)
ret, corners = cv2.findChessboardCorners(gray, (nx, ny), None)
#print(ret)
#print(corners)

# If found, add object points, image points
if ret == True:
    objpoints.append(objp)
    imgpoints.append(corners)

# calibrate camera
et2, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints,
                                                   img.shape[1::-1], None,
                                                   None)

#undistort
undist = cv2.undistort(img, mtx, dist, None, mtx)

cv2.drawChessboardCorners(img_orig, (nx, ny), corners, ret)

src = np.float32([corners[0], corners[nx - 1], corners[-1], corners[-nx]])
offset = 100
img_size = (gray.shape[1], gray.shape[0])
dst = np.float32([[offset, offset], [img_size[0] - offset, offset],
                  [img_size[0] - offset, img_size[1] - offset],
                  [offset, img_size[1] - offset]])
M = cv2.getPerspectiveTransform(src, dst)
warped = cv2.warpPerspective(undist, M, img_size)
Exemplo n.º 32
0
        result, corners = cv2.findChessboardCorners(gray, pattern_size, None)

        if result:
            object_points.append(object_points_single_group)
            cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1),
                             termination_criteria)
            image_points.append(corners)

            # Draw and display the corners
            # cv2.drawChessboardCorners(img, patternSize, corners, ret)
            # cv2.imshow('Image', img)
            # cv2.waitKey(0)

    cv2.destroyAllWindows()

    result, camera_matrix, distortion_coefficients, rotation_vectors, translation_vectors = cv2.calibrateCamera(
        object_points, image_points, gray.shape[::-1], None, None)

    mean_error = calc_mean_error(object_points, image_points, rotation_vectors,
                                 translation_vectors, camera_matrix,
                                 distortion_coefficients)

    print("mean error: ", mean_error)
    save_calibration_coefficients(camera_matrix, distortion_coefficients)

    # create_window("Undistorted Image")
    # create_window("Original Image")
    #
    # for fileName in images:
    #     img = cv2.imread(fileName)
    #     h, w = img.shape[:2]
    #     newcameramtx, roi = cv2.getOptimalNewCameraMatrix(mtx, dist, (w, h), 1, (w, h))
Exemplo n.º 33
0
        # dolaczenie poprawionych punktow
        cv2.drawChessboardCorners(img_L, (7, 6), corners2_L, ret_L)
        cv2.drawChessboardCorners(img_R, (7, 6), corners2_R, ret_R)
        # wizualizacja wykrytych naroznikow
        # cv2.drawChessboardCorners(img_L, (7, 6), corners2_L, ret_L)
        # cv2.imshow("Corners", img_L)
    # if ret_R == True:
    #     # dolaczenie wspolrzednych 3D
    #     objpoints_R.append(objp)
    #     # poprawa lokalizacji punktow (podpiskelowo)
    #
    #     # wizualizacja wykrytych naroznikow
    #     # cv2.drawChessboardCorners(img_L, (7, 6), corners2_L, ret_L)
    #     # cv2.imshow("Corners", img_L)

ret_L, mtx_L, dist_L, rvecs_L, tvecs_L = cv2.calibrateCamera(
    objpoints, imgpoints_L, gray_L.shape[::-1], None, None)
ret_R, mtx_R, dist_R, rvecs_R, tvecs_R = cv2.calibrateCamera(
    objpoints, imgpoints_R, gray_R.shape[::-1], None, None)
retval, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, R, T, E, F = cv2.stereoCalibrate(
    objpoints, imgpoints_L, imgpoints_R, mtx_L, dist_L, mtx_R, dist_R,
    gray_L.shape[::-1])

R1, R2, P1, P2, Q, validPixROI1, validPixROI2 = cv2.stereoRectify(
    cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, gray_R.shape[::-1],
    R, T)

map1_L, map2_L = cv2.initUndistortRectifyMap(cameraMatrix1, distCoeffs1, R1,
                                             P1, gray_L.shape[::-1],
                                             cv2.CV_16SC2)
map1_R, map2_R = cv2.initUndistortRectifyMap(cameraMatrix2, distCoeffs2, R2,
                                             P2, gray_L.shape[::-1],
Exemplo n.º 34
0
def calibrate_camera():
    """Find the camera matrix and distortion coefficients for the camera.
    The directory should contain approx 20 photos of the chessboard from different angles.
    Returns the camera matrix and distortion coefficients. Also saves a file containing the
    coefficients to the same directory as the program.
    """

    import pickle
    import os
    import glob
    import cv2
    from archive.create_chess_grid import create_chess_grid
    import numpy as np

    # Save working directory for later
    workdir = os.getcwd()

    # Directory in which the chessboard photos are stored
    os.chdir("/calibrationphotos")

    # Size of the chessboard in the photos.
    grid = (7, 8)
    # Dimensions of the chessboard. Poss not required.
    dims = (20, 22)

    # Define the coordinates of the chessboard
    objp = create_chess_grid((grid, dims))

    # Intialise containers
    objpoints = []
    imgpoints = []

    # Make a list of calibration photos
    images = glob.glob("*.JPG")

    # Termination criteria
    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)

    for fname in images:
        img = cv2.imread(fname)
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY).astype(np.uint8)
        ret, corners = cv2.findChessboardCorners(gray, grid, None,
                                                 cv2.CALIB_CB_FILTER_QUADS)
        if ret:
            objpoints.append(objp)
            cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1), criteria)
            imgpoints.append(corners)

    # Check chessboard found in all images
    if len(images) != len(objpoints):
        print("Warning, chessboard not detected in all images")

    # Perform camera calibration
    ret, K, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints,
                                                     gray.shape[::-1], None,
                                                     None)

    # Calculate the mean error of calibration
    mean_error = 0
    for i in xrange(len(objpoints)):
        imgpointsrec, _ = cv2.projectPoints(objpoints[i], rvecs[i], tvecs[i],
                                            K, dist)
        error = cv2.norm(imgpoints[i], imgpointsrec,
                         cv2.NORM_L2) / len(imgpointsrec)
        mean_error = +error

    print("Mean error = {} pixels".format(mean_error))

    # Save the coefficients
    print("Saving camera information to file")
    os.chdir(workdir)
    pickle.dump((K, dist), open("camera_data.p", "wb"))

    return K, dist
 radius_l = ((1 + (2 * leftz[0] * 45 * xm_per_pix + leftz[1]) ** 2) ** 1.5) / np.absolute(2 * leftz[0])
 radius_r = ((1 + (2 * rightz[0] * 45 * xm_per_pix + rightz[1]) ** 2) ** 1.5) / np.absolute(2 * rightz[0])
 #print("left lane radius: ",radius_l)
 #print("right lane radius: ",radius_r)
 return radius_l, radius_r

###################################################################################################################################

model=load_model('/home/ghosh/Documents/full_CNN_model.h5')

xm_per_pix = 27/45  
ym_per_pix = 3.7/35

objpoints, imgpoints, sh=cameracalib()
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, sh, None, None)

cap = cv2.VideoCapture(0)
while(True):
 ret, frame = cap.read()
 frame = cv2.undistort(frame, mtx, dist, None, mtx)

 small_img = imresize(frame, (80,160,3))
 small_img = np.array(small_img)
 small_img = small_img[None,:,:,:]

 pred=model.predict(small_img)[0] *255

 recent_fit.append(pred)
 if len(recent_fit)>10:
  recent_fit= recent_fit[1:]
Exemplo n.º 36
0
 def recalibrate_camera(self):
     if self.successful_cal:
         self.ret, self.mtx, self.dist, self.rvecs, self.tvecs = cv2.calibrateCamera(
             self.objpoints, self.imgpoints, self.imgsize, None, None)
Exemplo n.º 37
0
def calibrate_camera(camera_folder='camera_cal',
                     output_folder='output_images',
                     test_folder='test_images'):
    '''
    Camera calibration
    '''
    if (os.path.isfile('calibration.p')):
        calibration = pickle.load(open('calibration.p', 'rb'))
        ret, mtx, dist, rvecs, tvecs = calibration
    else:
        criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30,
                    0.001)
        objpoints = []
        imgpoints = []
        objp = np.zeros((6 * 9, 3), np.float32)
        objp[:, :2] = np.mgrid[0:9, 0:6].T.reshape(-1, 2)

        camera_images = glob.glob(camera_folder + '/*.jpg')
        test_images = glob.glob(test_folder + '/*.jpg')

        for imagefile in camera_images:
            img = cv2.imread(imagefile)
            gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
            ret, corners = cv2.findChessboardCorners(gray, (9, 6), None)

            if ret == True:
                objpoints.append(objp)
                corners2 = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1),
                                            criteria)
                imgpoints.append(corners2)
                img_board = cv2.drawChessboardCorners(img, (9, 6), corners2,
                                                      ret)

                filename = os.path.basename(imagefile)
                cv2.imwrite('./' + output_folder + '/camera/calib/' + filename,
                            img_board)

        calibration = cv2.calibrateCamera(objpoints, imgpoints,
                                          gray.shape[::-1], None, None)
        ret, mtx, dist, rvecs, tvecs = calibration
        pickle.dump(calibration, open('calibration.p', 'wb'))

        for imagefile in camera_images:
            filename = os.path.basename(imagefile)
            img = cv2.imread(imagefile)
            h, w = img.shape[:2]
            newcameramtx, roi = cv2.getOptimalNewCameraMatrix(
                mtx, dist, (w, h), 1, (w, h))
            dst = cv2.undistort(img, mtx, dist, None, newcameramtx)
            x, y, w, h = roi
            dst = dst[y:y + h, x:x + w]
            cv2.imwrite('./' + output_folder + '/camera/undist/' + filename,
                        dst)

        for testfile in test_images:
            filename = os.path.basename(testfile)
            img = cv2.imread(testfile)
            h, w = img.shape[:2]
            dst = cv2.undistort(img, mtx, dist)

            # Draw perspective polygon
            pts = np.array([[516, 460], [756, 460], [1200, 720], [100, 720]],
                           np.int32)
            pts = pts.reshape((-1, 1, 2))
            cv2.polylines(dst, [pts], True, (0, 0, 255))

            cv2.imwrite('./' + output_folder + '/test/' + filename, dst)

    return ret, mtx, dist, rvecs, tvecs
Exemplo n.º 38
0
def calibrate_stereo(img_folder, img_idx, write_folder, \
                     chessboard_size=(9, 6), square_size=0.02):
    cb_row, cb_col = chessboard_size
    num_camera = 2
    # termination criteria
    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)

    # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
    objp = np.zeros((cb_col * cb_row, 3), np.float32)
    objp[:, :2] = square_size * np.mgrid[0:cb_row, 0:cb_col].T.reshape(-1, 2)

    # Arrays to store object points and image points from all the images.
    objpoints = []  # 3d point in real world space
    imgpoints_cam0 = []  # 2d points in image plane.
    imgpoints_cam1 = []

    images = ["cam0_%06d.jpg" % i for i in img_idx]

    for img_i in img_idx:
        gray_cam = []
        ret_cam = []
        corners_cam = []
        for camera_idx in range(num_camera):
            fname = "cam%d_%06d.jpg" % (camera_idx, img_i)
            img_path = os.path.join(img_folder, fname)
            img = cv2.imread(img_path)
            gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
            ret, corners = cv2.findChessboardCorners(gray, chessboard_size,
                                                     None)
            gray_cam.append(gray)
            ret_cam.append(ret)
            corners_cam.append(corners)
        if (ret_cam[0] == True) and (ret_cam[1] == True):
            objpoints.append(objp)
            corners_refined_cam0 = cv2.cornerSubPix(gray_cam[0],
                                                    corners_cam[0], (11, 11),
                                                    (-1, -1), criteria)
            corners_refined_cam1 = cv2.cornerSubPix(gray_cam[1],
                                                    corners_cam[1], (11, 11),
                                                    (-1, -1), criteria)
            imgpoints_cam0.append(corners_refined_cam0)
            imgpoints_cam1.append(corners_refined_cam1)

    # calibrate camera intrinsics and distortions
    _, mtx0, dist0, _, _ = cv2.calibrateCamera(objpoints, imgpoints_cam0,
                                               gray.shape[::-1], None, None)
    _, mtx1, dist1, _, _ = cv2.calibrateCamera(objpoints, imgpoints_cam1,
                                               gray.shape[::-1], None, None)

    # calibrate relative pose (1st camera relative to 2nd camera)
    ret, mtx0, dist0, mtx1, dist1, R, T, _, _ = cv2.stereoCalibrate(objpoints, imgpoints_cam0, imgpoints_cam1, \
                                                                    mtx0, dist0, mtx1, dist1, \
                                                                    gray.shape[::-1])

    cam0 = {}
    cam0["int"] = mtx0.tolist()
    cam0["ext"] = np.hstack((R.T, -R.T @ T)).tolist()
    cam0["dist"] = dist0.tolist()
    with open(os.path.join(write_folder, "cam0.json"), 'w') as json_file:
        json.dump(cam0, json_file)

    cam1 = {}
    cam1["int"] = mtx1.tolist()
    cam1["ext"] = np.eye(4)[0:3, :].tolist()
    cam1["dist"] = dist1.tolist()
    with open(os.path.join(write_folder, "cam1.json"), 'w') as json_file:
        json.dump(cam1, json_file)
def __start_video_calibration(videoFN, cols, rows, skip, dim, objp, objpoints,
                              imgpoints, matrix_filename, distortion_filename,
                              start_video):
    """
    Calculate the calibration matrix and dist array for a given video and output these results to there speciefied files.

    Parameters
    ----------
    - videoFN -- filename of the video.
    - cols -- number of colums on the calibratrion paper.
    - rows -- number of rows on the calibratrion paper.
    - skip -- number of frames to skip when searching for the chessboard corners. (lower = more longer calculation time)
    - dim -- the dimention of the black squares in tht calibration paper in mm.
    - objp -- array containing the objectpoints.
    - objpoints -- array containing the objectpoints.
    - imgpoints -- array containing the imgpoints.
    - matrix_filename -- filename for the matix parameter
    - distortion_filename -- filename for the distortion parameter
    - start_video -- output a video for debuging.

    Returns
    -------
    Nothing
    """

    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, dim, 0.001)

    cap = cv2.VideoCapture(videoFN)
    if not cap.isOpened():
        logger.warning("Videofile not found")
        return

    count = 0

    logger.info("Start analysing video")
    while cap.isOpened():

        success, frame = cap.read()

        if success:
            gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            ret, corners = cv2.findChessboardCorners(gray_frame, (cols, rows),
                                                     None)

            if ret:
                count += 1
                objpoints.append(objp)

                corners2 = cv2.cornerSubPix(gray_frame, corners, (11, 11),
                                            (-1, -1), criteria)

                imgpoints.append(corners2)
                if start_video:
                    cv2.drawChessboardCorners(frame, (cols, rows), corners2,
                                              ret)
                    cv2.imshow('frame', frame)

        else:
            break

        for i in range(0, skip):
            cap.read()

    logger.info("Done Analysing video, got {} usable images".format(count))

    cap.release()
    logger.debug("Releasing video")

    logger.debug("Starting calibration calculations")
    ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints,
                                                       gray_frame.shape[::-1],
                                                       None, None)
    logger.info("Got matrix")

    logger.info("Got dist")

    logger.info("Writing matrix to {}".format(matrix_filename))
    np.savetxt(matrix_filename, mtx, delimiter=',')
    logger.info("Writing dist to {}".format(distortion_filename))
    np.savetxt(distortion_filename, dist, delimiter=',')
Exemplo n.º 40
0
def cal_undistort(img, objpoints, imgpoints):
    val, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints,
                                                       img.shape[0:2], None,
                                                       None)
    undist = cv2.undistort(img, mtx, dist, None, mtx)
    return undist
Exemplo n.º 41
0
                                    criteria)
        imgpoints_list.append(corners)

        # draw chessboard corners on image for visualisation
        cv2.drawChessboardCorners(img, (7, 6), corners2, success)
        cv2.imshow(fname, img)
        cv2.waitKey(500)

cv2.destroyAllWindows()

img = cv2.imread('data/left11.jpg')  # pick an image to demo calibration
height, width, channel = img.shape

# camera calibration using image points and object points
# yield: camera matrix, distortion coef, rotation & translation vectors
success, matrix, distortion, rvecs, tvecs = cv2.calibrateCamera(
    objpoints_list, imgpoints_list, (width, height), None, None)

# undistortion
newcameramtx, roi = cv2.getOptimalNewCameraMatrix(matrix, distortion,
                                                  (width, height), 1,
                                                  (width, height))
dist = cv2.undistort(img, matrix, distortion, None, newcameramtx)

# crop the image
x, y, w, h = roi
dist = dist[y:y + h, x:x + w]
cv2.imwrite('artifacts/calibresult11.png', dist)

cv2.imshow('calibresult', dist)
cv2.waitKey(500)
                    if ord('s') == key:
                        # imgPoints.append(corners)
                        objPoints.append(objp)
                        imgPoints.append(corners2)
                        break
                    elif ord('d') == key:
                        break
            else:
                objPoints.append(objp)
                corners2 = cv2.cornerSubPix(gray, corners, (3, 3), (-1, -1),
                                            criteria)
                imgPoints.append(corners2)

    if not auto_mode:
        cv2.destroyAllWindows()

    if len(objPoints) > 0:
        print("Running Calibration...")
        retval, cameraMatrix, distCoeffs, rvecs, tvecs = cv2.calibrateCamera(
            objPoints, imgPoints, chessboardDimension, None, None)

        print("Camera Matrix = |fx  0 cx|")
        print("                | 0 fy cy|")
        print("                | 0  0  1|")
        print(cameraMatrix)

        print("\nDistortion Coefficients = (k1, k2, p1, p2, k3)")
        print(distCoeffs)

        calcReprojectionError(objPoints, imgPoints, cameraMatrix, distCoeffs)
Exemplo n.º 43
0
 def calibrate(self):
     """Run calibration using points extracted by process_image."""
     rms, camera_matrix, dist_coefs, rvecs, tvecs = cv2.calibrateCamera(
         self.object_points, self.image_points, self.image_size, None, None)
     return rms, camera_matrix, dist_coefs.ravel()
Exemplo n.º 44
0
        # Refina a coordenada dos pixels para determinados pontos 2D
        corners_new = cv.cornerSubPix(gray, corners, (11, 11), (-1, -1),
                                      criteria)
        img_points.append(corners_new)

        # Exibe os cantos encontrados
        img = cv.drawChessboardCorners(img, CHECKERBOARD, corners_new, result)

        cv.imshow('imagem', img)
        cv.waitKey(500)

cv.destroyAllWindows()

# Calibra a câmera a partir dos pontos 3D conhecidos e das coordenadas correspondentes
# Retorna a matriz K, os parâmetros de distorção, o vetor t e o vetor de rotação R
result, matrix_k, distortion, r_vect, t_vect = cv.calibrateCamera(
    obj_points, img_points, gray.shape[::-1], None, None)

# Converte o vetor de rotação R na matriz de rotação R
# Utiliza os dados da primeira imagem
rotation_mat = np.zeros(shape=(3, 3))
matrix_r = cv.Rodrigues(r_vect[0], rotation_mat)[0]

# Determina a matrix de projeção P a partir dos dados da primeira imagem
matrix_ext = np.column_stack((matrix_r, t_vect[0]))
matrix_p = np.matmul(matrix_k, matrix_ext)

# Cria e escreve o arquivo com os parâmetros obtidos
arquivo = open('dados_câmera.txt', 'w')
escreve_arquivo(arquivo, matrix_p, matrix_k, matrix_r, t_vect[0], distortion)
arquivo.close()
                                                     goal_post_width)
    calibration_points['obj'].append(objpoints)
    calibration_points['img'].append(imgpoints)

camera_matrix = np.array([[1.11615226e+03, 0.00000000e+00, 6.42354107e+02],
                          [0.00000000e+00, 1.11453403e+03, 4.53277504e+02],
                          [0.00000000e+00, 0.00000000e+00, 1.00000000e+00]])
dist_coeffs = np.array(
    [[-0.15204789, 0.14581479, -0.00107285, -0.00019929, 0.04981551]])

image_size = (1296, 864)
rms, camera_matrix, dist_coeffs, rvecs, tvecs = cv2.calibrateCamera(
    calibration_points['obj'],
    calibration_points['img'],
    image_size,
    camera_matrix,
    dist_coefs,
    None,
    None,
    flags=cv2.CALIB_USE_INTRINSIC_GUESS)
print("rms", goal_post_width, rms)

camera_calibration = {
    'rms': rms,
    'camera_matrix': camera_matrix,
    'dist_coefs': dist_coeffs,
    'rvecs': rvecs,
    'tvecs': tvecs,
    'image_size': image_size
}
Exemplo n.º 46
0
##### cordinate of a chessboar in world coordinates ####

objectp3d = np.zeros((1, board[0] * board[1], 3), np.float32)
enum = 0
for j in range(board[1]):
    for i in range(board[0]):
        #print(row)
        objectp3d[0][enum] = (i, j, 0)
        enum += 1

##### find coordinate of corners in each image in both world and image coordination #####
pnt3D = []
pnt2D = []
for i, row in enumerate(images):
    res, corner = cv2.findChessboardCorners(
        row, board, cv2.CALIB_CB_ADAPTIVE_THRESH + cv2.CALIB_CB_FAST_CHECK +
        cv2.CALIB_CB_NORMALIZE_IMAGE)
    pnt3D.append(objectp3d)
    corner2 = cv2.cornerSubPix(row, corner, (11, 11), (-1, -1), criteria)
    pnt2D.append(corner2)

##### calibrate with img 1-20
_, camera_matrix, distortion_coef, _, _ = cv2.calibrateCamera(
    pnt3D[0:10], pnt2D[0:10], images[19].shape[::-1], None, None)
file1 = open("result.txt", "w")
file1.write(" Camera matrix:\n")
file1.write(str(camera_matrix))
file1.write("\n\n\n distortion_coefortion coefficient:\n")
file1.write(str(distortion_coef))
file1.close()
    def _read_images(self, cal_path):
        """
        It reads images and find checkerboard patterns in it.

        Returns:
        camera_model: dict
            A dictionary containing stereo camera model. Intrinsic and
            Extrinsic parameters.
        """
        images_right = glob.glob(cal_path + 'RIGHT/*.jpg')
        images_left = glob.glob(cal_path + 'LEFT/*.jpg')
        if (not images_right):
            images_right = glob.glob(cal_path + 'RIGHT/*.JPG')
        if (not images_left):
            images_left = glob.glob(cal_path + 'LEFT/*.JPG')
        images_left.sort()
        images_right.sort()

        print('Starting find')
        for i, fname in enumerate(images_right):
            img_l = cv2.imread(images_left[i])
            img_r = cv2.imread(images_right[i])

            h_l, w_l = img_l.shape[:2]
            h_r, w_r = img_r.shape[:2]

            gray_l = cv2.cvtColor(img_l, cv2.COLOR_BGR2GRAY)
            gray_r = cv2.cvtColor(img_r, cv2.COLOR_BGR2GRAY)
            '''
            #flags for cv2.findChessboardCorners
            flag = 0
            flag |= cv2.CALIB_CB_ADAPTIVE_THRESH
            flag |= cv2.CALIB_CB_FILTER_QUADS
            '''

            # Find the chess board corners
            ret_l, corners_l = cv2.findChessboardCorners(
                gray_l, (self.cdimsh, self.cdimsw), None)
            # either flag or None for last argument
            ret_r, corners_r = cv2.findChessboardCorners(
                gray_r, (self.cdimsh, self.cdimsw), None)

            print('Image: ', i + 1, '/', len(images_left))
            print(ret_l, ret_r)
            # If found, add object points, image points (after refining them)

            if ret_l is True and ret_r is True:
                self.objpoints.append(self.objp)
                rt = cv2.cornerSubPix(gray_l, corners_l, (11, 11), (-1, -1),
                                      self.criteria)
                self.imgpoints_l.append(corners_l)

                # Draw and display the corners
                ret_l = cv2.drawChessboardCorners(img_l,
                                                  (self.cdimsh, self.cdimsw),
                                                  corners_l, ret_l)

                rt = cv2.cornerSubPix(gray_r, corners_r, (11, 11), (-1, -1),
                                      self.criteria)
                self.imgpoints_r.append(corners_r)

                # Draw and display the corners
                ret_r = cv2.drawChessboardCorners(img_r,
                                                  (self.cdimsh, self.cdimsw),
                                                  corners_r, ret_r)

            img_shape = gray_l.shape[::-1]

        rt, self.M1, self.d1, self.r1, self.t1 = cv2.calibrateCamera(
            self.objpoints, self.imgpoints_l, img_shape, None, None)
        rt, self.M2, self.d2, self.r2, self.t2 = cv2.calibrateCamera(
            self.objpoints, self.imgpoints_r, img_shape, None, None)

        return self._stereo_calibrate(img_shape)
Exemplo n.º 48
0
def calibrate(dirnames, gc_fname_lists, proj_shape, chess_shape,
              chess_block_size, gc_step, black_thr, white_thr):
    print(chess_shape)
    objps = np.zeros((chess_shape[0] * chess_shape[1], 3), np.float32)
    objps[:, :2] = chess_block_size * \
        np.mgrid[0:chess_shape[0], 0:chess_shape[1]].T.reshape(-1, 2)

    print('Calibrating ...')
    gc_height = int((proj_shape[0] - 1) / gc_step) + 1
    gc_width = int((proj_shape[1] - 1) / gc_step) + 1
    graycode = cv2.structured_light_GrayCodePattern.create(gc_width, gc_height)
    graycode.setBlackThreshold(black_thr)
    graycode.setWhiteThreshold(white_thr)

    cam_shape = cv2.imread(gc_fname_lists[0][0], cv2.IMREAD_GRAYSCALE).shape
    patch_size_half = int(np.ceil(cam_shape[1] / 180))
    print('  patch size :', patch_size_half * 2 + 1)

    cam_corners_list = []
    cam_objps_list = []
    cam_corners_list2 = []
    proj_objps_list = []
    proj_corners_list = []
    for dname, gc_filenames in zip(dirnames, gc_fname_lists):
        print('  checking \'' + dname + '\'')
        if len(gc_filenames) != graycode.getNumberOfPatternImages() + 2:
            print('Error : invalid number of images in \'' + dname + '\'')
            return None

        imgs = []
        for fname in gc_filenames:
            print(fname)
            img = cv2.imread(fname, cv2.IMREAD_GRAYSCALE)
            if cam_shape != img.shape:
                print('Error : image size of \'' + fname + '\' is mismatch')
                return None
            imgs.append(img)
        black_img = imgs.pop()
        white_img = imgs.pop()

        res, cam_corners = cv2.findChessboardCorners(white_img, chess_shape)
        if not res:
            print('Error : chessboard was not found in \'' + gc_filenames[-2] +
                  '\'')
            return None
        cam_objps_list.append(objps)
        cam_corners_list.append(cam_corners)

        proj_objps = []
        proj_corners = []
        cam_corners2 = []
        viz_proj_points = np.zeros(proj_shape, np.uint8)
        cnt = 0
        for corner, objp in zip(cam_corners, objps):
            c_x = int(round(corner[0][0]))
            c_y = int(round(corner[0][1]))
            src_points = []
            dst_points = []
            for dx in range(-patch_size_half, patch_size_half + 1):
                for dy in range(-patch_size_half, patch_size_half + 1):
                    x = c_x + dx
                    y = c_y + dy
                    if int(white_img[y, x]) - int(black_img[y,
                                                            x]) <= black_thr:
                        continue
                    #print(x,y,graycode.getProjPixel(imgs, x, y))
                    err, proj_pix = graycode.getProjPixel(imgs, x, y)
                    if not err:
                        src_points.append((x, y))
                        dst_points.append(gc_step * np.array(proj_pix))
            if len(src_points) < patch_size_half**2:
                print(
                    '    Warning : corner', c_x, c_y,
                    'was skiped because decoded pixels were too few (check your images and threasholds)'
                )
                continue
            h_mat, inliers = cv2.findHomography(np.array(src_points),
                                                np.array(dst_points))
            point = h_mat @ np.array([corner[0][0], corner[0][1], 1
                                      ]).transpose()
            point_pix = point[0:2] / point[2]
            proj_objps.append(objp)
            proj_corners.append([point_pix])
            cam_corners2.append(corner)
            viz_proj_points[int(round(point_pix[1])),
                            int(round(point_pix[0]))] = 255
        if len(proj_corners) < 3:
            print('Error : too few corners were found in \'' + dname +
                  '\' (less than 3)')
            return None
        proj_objps_list.append(np.float32(proj_objps))
        proj_corners_list.append(np.float32(proj_corners))
        cam_corners_list2.append(np.float32(cam_corners2))
        cv2.imwrite('visualize_corners_projector_' + str(cnt) + '.png',
                    viz_proj_points)
        cnt += 1

    print('Initial solution of camera\'s intrinsic parameters')
    ret, cam_int, cam_dist, cam_rvecs, cam_tvecs = cv2.calibrateCamera(
        cam_objps_list, cam_corners_list, cam_shape, None, None, None, None)
    print('  RMS :', ret)
    print('  Intrinsic parameters :')
    printNumpyWithIndent(cam_int, '    ')
    print('  Distortion parameters :')
    printNumpyWithIndent(cam_dist, '    ')
    print()

    print('Initial solution of projector\'s parameters')
    ret, proj_int, proj_dist, proj_rvecs, proj_tvecs = cv2.calibrateCamera(
        proj_objps_list, proj_corners_list, proj_shape, None, None, None, None)
    print('  RMS :', ret)
    print('  Intrinsic parameters :')
    printNumpyWithIndent(proj_int, '    ')
    print('  Distortion parameters :')
    printNumpyWithIndent(proj_dist, '    ')
    print()

    print('=== Result ===')
    ret, cam_int, cam_dist, proj_int, proj_dist, cam_proj_rmat, cam_proj_tvec, E, F = cv2.stereoCalibrate(
        proj_objps_list, cam_corners_list2, proj_corners_list, cam_int,
        cam_dist, proj_int, proj_dist, None)
    print('  RMS :', ret)
    print('  Camera intrinsic parameters :')
    printNumpyWithIndent(cam_int, '    ')
    print('  Camera distortion parameters :')
    printNumpyWithIndent(cam_dist, '    ')
    print('  Projector intrinsic parameters :')
    printNumpyWithIndent(proj_int, '    ')
    print('  Projector distortion parameters :')
    printNumpyWithIndent(proj_dist, '    ')
    print('  Rotation matrix / translation vector from camera to projector')
    print('  (they translate points from camera coord to projector coord) :')
    printNumpyWithIndent(cam_proj_rmat, '    ')
    printNumpyWithIndent(cam_proj_tvec, '    ')
    print()

    fs = cv2.FileStorage('calibration_result.xml', cv2.FILE_STORAGE_WRITE)
    fs.write('img_shape', cam_shape)
    fs.write('rms', ret)
    fs.write('cam_int', cam_int)
    fs.write('cam_dist', cam_dist)
    fs.write('proj_int', proj_int)
    fs.write('proj_dist', proj_dist)
    fs.write('roration', cam_proj_rmat)
    fs.write('translation', cam_proj_tvec)
    fs.release()
        objpoints.append(objpoint)
        imagepoints.append(corners)
        cv2.drawChessboardCorners(img, (9, 6), corners, ret)

        cv2.imshow('img', img)
        cv2.waitKey(500)

cv2.destroyAllWindows()

# In[3]:

get_ipython().magic('matplotlib inline')
image = mping.imread('camera_cal/calibration2.jpg')
img_size = (image.shape[0], image.shape[1])

ret, mtx, dist, rvec, trvec = cv2.calibrateCamera(objpoints, imagepoints,
                                                  img_size, None, None)

undistort = cv2.undistort(image, mtx, dist, None, mtx)

cv2.imwrite('output_images/test_undist.jpg', undistort)

f, (ax1, ax2) = plt.subplots(1, 2, figsize=(20, 10))
ax1.imshow(image)
ax1.set_title('Original Image', fontsize=30)
ax2.imshow(undistort)
ax2.set_title('Undistorted Image', fontsize=30)

# In[4]:


def mag_thresh(img, sobel_kernel=3, mag_thresh=(80, 150)):
Exemplo n.º 50
0
all_object_points = []
all_image_points = []
all_images = []

for image_path in image_paths:
    image = cv2.imread(image_path)
    (foundCorners, corners) = cv2.findChessboardCorners(image, pattern_size)

    # cv2.drawChessboardCorners(image, pattern_size, corners, foundCorners)
    if foundCorners:
        all_object_points.append(image_objects)
        all_image_points.append(corners)
        all_images.append(image)

retval, cameraMatrix, distCoeffs, rvecs, tvecs = cv2.calibrateCamera(
    all_object_points, all_image_points, image_size, None, None)

print(cameraMatrix)


def projectPointsAndShowOnImage():
    for (image, object_points, rvec, tvec) in zip(all_images,
                                                  all_object_points, rvecs,
                                                  tvecs):
        image_points, jacobian = cv2.projectPoints(
            np.array(object_points, dtype="float32"), rvec, tvec, cameraMatrix,
            distCoeffs)
        cv2.drawChessboardCorners(image, pattern_size, image_points, True)
        cv2.imshow("Projected Points", image)
        cv2.waitKey(0)
Exemplo n.º 51
0
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    #Chessboard corners
    ret, corners = cv2.findChessboardCorners(gray, (rows, cols), None)
    if ret:
        # Corner position refinement
        corners = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1), criteria)
        objectPointsArray.append(objectPointsScaled)
        imgPointsArray.append(corners)
        cv2.drawChessboardCorners(img, (rows, cols), corners, ret)

# print('objpts',objectPointsArray)
# print('imgpts',imgPointsArray)

# Camera calibration (Intrinsic)
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objectPointsArray,
                                                   imgPointsArray,
                                                   gray.shape[::-1], None,
                                                   None)
np.savez('mobile_calibration/calib.npz',
         mtx=mtx,
         dist=dist,
         rvecs=rvecs,
         tvecs=tvecs)

#Finding the calibration error by reprojection
error = 0
for i in range(len(objectPointsArray)):
    imgPoints, _ = cv2.projectPoints(objectPointsArray[i], rvecs[i], tvecs[i],
                                     mtx, dist)
    error += cv2.norm(imgPointsArray[i], imgPoints,
                      cv2.NORM_L2) / len(imgPoints)
print("Total error: ", error / len(objectPointsArray))
Exemplo n.º 52
0
    def calculate(self):
        self.calculated = True
        self.count = 10
        img_shape = self.g_pool.capture.frame_size

        # Compute calibration
        try:
            if self.dist_mode == "Fisheye":
                calibration_flags = (
                    cv2.fisheye.CALIB_RECOMPUTE_EXTRINSIC
                    + cv2.fisheye.CALIB_CHECK_COND
                    + cv2.fisheye.CALIB_FIX_SKEW
                )
                max_iter = 30
                eps = 1e-6
                camera_matrix = np.zeros((3, 3))
                dist_coefs = np.zeros((4, 1))
                rvecs = [
                    np.zeros((1, 1, 3), dtype=np.float64) for i in range(self.count)
                ]
                tvecs = [
                    np.zeros((1, 1, 3), dtype=np.float64) for i in range(self.count)
                ]
                objPoints = [x.reshape(1, -1, 3) for x in self.obj_points]
                imgPoints = self.img_points
                rms, _, _, _, _ = cv2.fisheye.calibrate(
                    objPoints,
                    imgPoints,
                    img_shape,
                    camera_matrix,
                    dist_coefs,
                    rvecs,
                    tvecs,
                    calibration_flags,
                    (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, max_iter, eps),
                )
                camera_model = Fisheye_Dist_Camera(
                    self.g_pool.capture.name, img_shape, camera_matrix, dist_coefs
                )
            elif self.dist_mode == "Radial":
                rms, camera_matrix, dist_coefs, rvecs, tvecs = cv2.calibrateCamera(
                    np.array(self.obj_points),
                    np.array(self.img_points),
                    self.g_pool.capture.frame_size,
                    None,
                    None,
                )
                camera_model = Radial_Dist_Camera(
                    self.g_pool.capture.name, img_shape, camera_matrix, dist_coefs
                )
            else:
                raise ValueError("Unkown distortion model: {}".format(self.dist_mode))
        except ValueError as e:
            raise e
        except Exception as e:
            logger.warning("Camera calibration failed to converge!")
            logger.warning(
                "Please try again with a better coverage of the cameras FOV!"
            )
            return

        logger.info("Calibrated Camera, RMS:{}".format(rms))

        camera_model.save(self.g_pool.user_dir)
        self.g_pool.capture.intrinsics = camera_model

        self.show_undistortion_switch.read_only = False
Exemplo n.º 53
0

leftObjectPoints, leftImagePoints = getMatchingObjectAndImagePoints(
    filenames, leftFilenames, leftObjectPoints, leftImagePoints)
rightObjectPoints, rightImagePoints = getMatchingObjectAndImagePoints(
    filenames, rightFilenames, rightObjectPoints, rightImagePoints)

# TODO: Fix this validation
# Keep getting "Use a.any() or a.all()" even though it's already used?!
# if (leftObjectPoints != rightObjectPoints).all():
#     print("Object points do not match")
#     sys.exit(1)
objectPoints = leftObjectPoints

print("Calibrating left camera...")
_, leftCameraMatrix, leftDistortionCoefficients, _, _ = cv2.calibrateCamera(
    objectPoints, leftImagePoints, imageSize, None, None)
print("Calibrating right camera...")
_, rightCameraMatrix, rightDistortionCoefficients, _, _ = cv2.calibrateCamera(
    objectPoints, rightImagePoints, imageSize, None, None)

print("Calibrating cameras together...")
(_, _, _, _, _, rotationMatrix, translationVector, _,
 _) = cv2.stereoCalibrate(objectPoints, leftImagePoints, rightImagePoints,
                          leftCameraMatrix, leftDistortionCoefficients,
                          rightCameraMatrix, rightDistortionCoefficients,
                          imageSize, None, None, None, None,
                          cv2.CALIB_FIX_INTRINSIC, TERMINATION_CRITERIA)

print("Rectifying cameras...")
# TODO: Why do I care about the disparityToDepthMap?
(leftRectification, rightRectification, leftProjection, rightProjection,
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
        print "Collected ", no_of_collected_corners-1, "/", no_of_corners_to_be_collected
        # Get out of loop when we have enough points.
        if no_of_collected_corners > no_of_corners_to_be_collected:
            break
    else:
        cv2.imshow('Checkerboard not found. Press ''q'' to quit.', image_data)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

camera.release()
cv2.destroyAllWindows()

#Now create what we need
_, camera_matrix, distortion_coefficients, _, _ = cv2.calibrateCamera(objpoints, imgpoints, image_data.shape[::-1],None,None)
distortion_coefficients = np.squeeze(distortion_coefficients)


print "Calibration done. Here is what you need in the .ini file:"
print "-------------------"
print "camera_focal_length_x =", camera_matrix[0, 0]
print "camera_center_x = ", camera_matrix[0, 2]
print "camera_focal_length_y =", camera_matrix[1, 1]
print "camera_center_y = ", camera_matrix[1, 2]
print "-------------------"
print "camera_dist_coeff_k1 =", distortion_coefficients[0]
print "camera_dist_coeff_k2 =", distortion_coefficients[1]
print "camera_dist_coeff_p1 =", distortion_coefficients[2]
print "camera_dist_coeff_p2 =", distortion_coefficients[3]
print "camera_dist_coeff_k3 =", distortion_coefficients[4]
Exemplo n.º 55
0
    def _process_frame(self, frame):
        """Processes each frame

            If recording mode is on (self.recording==True), this method will
            perform all the hard work of the camera calibration process:
            - for every frame, until enough frames have been processed:
                - find the chessboard corners
                - refine the coordinates of the detected corners
            - after enough frames have been processed:
                - estimate the intrinsic camera matrix and distortion
                  coefficients

            :param frame: current RGB video frame
            :returns: annotated video frame showing detected chessboard corners
        """
        # if we are not recording, just display the frame
        if not self.recording:
            return frame

        # else we're recording
        img_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY).astype(np.uint8)

        if self.record_cnt < self.record_min_num_frames:
            # need at least some number of chessboard samples before we can
            # calculate the intrinsic matrix
            ret, corners = cv2.findChessboardCorners(img_gray,
                                                     self.chessboard_size,
                                                     None)

            if ret:
                cv2.drawChessboardCorners(frame, self.chessboard_size, corners,
                                          ret)

                # refine found corners
                criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER,
                            30, 0.01)
                cv2.cornerSubPix(img_gray, corners, (9, 9), (-1, -1), criteria)

                self.obj_points.append(self.objp)
                self.img_points.append(corners)
                self.record_cnt += 1

        else:
            # we have already collected enough frames, so now we want to
            # calculate the intrinsic camera matrix (K) and the distortion
            # vector (dist)
            print "Calibrating..."
            ret, K, dist, rvecs, tvecs = cv2.calibrateCamera(
                self.obj_points, self.img_points,
                (self.imgHeight, self.imgWidth), None, None)
            print "K=", K
            print "dist=", dist

            # double-check reconstruction error (should be as close to zero as
            # possible)
            mean_error = 0
            for i in xrange(len(self.obj_points)):
                img_points2, _ = cv2.projectPoints(self.obj_points[i],
                                                   rvecs[i], tvecs[i], K, dist)
                error = cv2.norm(self.img_points[i], img_points2,
                                 cv2.NORM_L2) / len(img_points2)
                mean_error += error

            print "mean error=", mean_error

            self.recording = False
            self._reset_recording()
            self.button_calibrate.Enable()

        return frame
Exemplo n.º 56
0
cap.release()
cv2.destroyAllWindows()

# calibrate projector
print("calibrate projector")
K_proj = np.zeros(shape=(3, 3))
K_proj[0][0] = 1
K_proj[1][1] = 1
K_proj[0][2] = 1
K_proj[1][2] = 1
K_proj[2][2] = 1

ret, K_proj, dist_coef_proj, rvecs, tvecs = cv2.calibrateCamera(
    objpoints,
    imgpointsProj,
    gray.shape[::-1],
    None,
    None,
    flags=cv2.CALIB_FIX_INTRINSIC)
print("proj calib mat after\n%s" % K_proj)
print("proj dist_coef %s" % dist_coef_proj.T)
print("calibration reproj err %s" % ret)

print("stereo calibration")
ret, K, dist_coef, K_proj, dist_coef_proj, proj_R, proj_T, _, _ = cv2.stereoCalibrate(
    objpoints,
    imgpoints,
    imgpointsProj,
    cameraMatrix,
    distCoeffs,
    K_proj,
Exemplo n.º 57
0
for frame in images:
    # Nacteni obray; a prevedeni do cernobile
    img = cv.imread(frame)
    gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)

    # Nalezeni sachovnice a jejich rohu v obraze
    ret, corners = cv.findChessboardCorners(gray, (7, 7), None)

    # Ulozeni bodu v obraze a vykresleni vnitrnich rohu sachovnice
    if ret:
        objectPoints.append(objectP)
        corners2 = cv.cornerSubPix(gray, corners, (11, 11), (-1, -1), criteria)
        imagePoints.append(corners)
        cv.drawChessboardCorners(img, (7, 7), corners2, ret)
        # cv.imshow('img', img)
        cv.imwrite('chess.jpg', img)
        cv.waitKey(500)

cv.destroyAllWindows()

# Ziskani kalibracnich parametru kamery
ret, cam_matrix, calib_coeffs, rvecs, tvecs = cv.calibrateCamera(
    objectPoints, imagePoints, gray.shape[::-1], None, None)

# Ukladani do souboru
np.save('calib_coeffs.npy', calib_coeffs)
np.save('cam_matrix.npy', cam_matrix)

print(cam_matrix)
Exemplo n.º 58
0
        imgpoints.append(corners2)

        # Draw and display the corners.
        im_with_keypoints = cv2.drawChessboardCorners(img, (4, 11), corners2,
                                                      ret)
        found += 1

    cv2.imshow("img", im_with_keypoints)  # display
    cv2.waitKey(10)

# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()

ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints,
                                                   gray.shape[::-1], None,
                                                   None)

# It's very important to transform the matrix to list.
data = {
    'camera_matrix': np.asarray(mtx).tolist(),
    'dist_coeff': np.asarray(dist).tolist()
}

with open("circlegrid_calibration.yaml", "w") as f:
    yaml.dump(data, f)

# You can use the following 4 lines of code to load the data in file "calibration.yaml"
# with open('calibration.yaml') as f:
#     loadeddict = yaml.load(f)
# mtxloaded = loadeddict.get('camera_matrix')
Exemplo n.º 59
0
def calibrate_camera(path):
    # !/usr/bin/env python

    # *************************************************
    # ***** Parameters for Distortion Calibration *****
    # *************************************************

    # Termination criteria
    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
    criteria_stereo = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30,
                       0.001)

    # Prepare object points
    objp = np.zeros((9 * 6, 3), np.float32)
    objp[:, :2] = np.mgrid[0:9, 0:6].T.reshape(-1, 2)

    # Arrays to store object points and image points from all images
    objpoints = []  # 3d points in real world space
    imgpointsR = []  # 2d points in image plane
    imgpointsL = []

    # Start calibration from the camera
    print('Starting calibration for the 2 cameras... ')

    # Extracting path of individual image stored in a given directory
    path1 = os.path.join(path, 'frame1/*.png')
    path2 = os.path.join(path, 'frame2/*.png')

    images1 = glob.glob(path1)
    images2 = glob.glob(path2)

    for pic1, pic2 in zip(images1, images2):
        print(pic1)

        img1 = cv2.imread(pic1)
        img2 = cv2.imread(pic2)

        ChessImaR = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
        ChessImaL = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)

        retR, cornersR = cv2.findChessboardCorners(
            ChessImaR, (9, 6),
            None)  # Define the number of chees corners we are looking for
        retL, cornersL = cv2.findChessboardCorners(ChessImaL, (9, 6),
                                                   None)  # Left side
        if retR and retL:
            objpoints.append(objp)
            cv2.cornerSubPix(ChessImaR, cornersR, (11, 11), (-1, -1), criteria)
            cv2.cornerSubPix(ChessImaL, cornersL, (11, 11), (-1, -1), criteria)
            imgpointsR.append(cornersR)
            imgpointsL.append(cornersL)

    # Determine the new values for different parameters
    #   Right Side
    retR, mtxR, distR, rvecsR, tvecsR = cv2.calibrateCamera(
        objpoints, imgpointsR, ChessImaR.shape[::-1], None, None)
    hR, wR = ChessImaR.shape[:2]
    OmtxR, roiR = cv2.getOptimalNewCameraMatrix(mtxR, distR, (wR, hR), 1,
                                                (wR, hR))

    #   Left Side
    retL, mtxL, distL, rvecsL, tvecsL = cv2.calibrateCamera(
        objpoints, imgpointsL, ChessImaL.shape[::-1], None, None)
    hL, wL = ChessImaL.shape[:2]
    OmtxL, roiL = cv2.getOptimalNewCameraMatrix(mtxL, distL, (wL, hL), 1,
                                                (wL, hL))

    print('Cameras Ready to use')

    # ********************************************
    # ***** Calibrate the Cameras for Stereo *****
    # ********************************************

    # StereoCalibrate function
    flags = 0
    flags |= cv2.CALIB_FIX_INTRINSIC
    # flags |= cv2.CALIB_FIX_PRINCIPAL_POINT
    # flags |= cv2.CALIB_USE_INTRINSIC_GUESS
    # flags |= cv2.CALIB_FIX_FOCAL_LENGTH
    # flags |= cv2.CALIB_FIX_ASPECT_RATIO
    # flags |= cv2.CALIB_ZERO_TANGENT_DIST
    # flags |= cv2.CALIB_RATIONAL_MODEL
    # flags |= cv2.CALIB_SAME_FOCAL_LENGTH
    # flags |= cv2.CALIB_FIX_K3
    # flags |= cv2.CALIB_FIX_K4
    # flags |= cv2.CALIB_FIX_K5
    retS, MLS, dLS, MRS, dRS, R, T, E, F = cv2.stereoCalibrate(
        objpoints, imgpointsL, imgpointsR, mtxL, distL, mtxR, distR,
        ChessImaR.shape[::-1], criteria_stereo, flags)

    # StereoRectify function
    rectify_scale = 0  # if 0 image croped, if 1 image nor croped
    RL, RR, PL, PR, Q, roiL, roiR = cv2.stereoRectify(
        MLS, dLS, MRS, dRS, ChessImaR.shape[::-1], R, T, rectify_scale,
        (0, 0))  # last paramater is alpha, if 0= croped, if 1= not croped
    # initUndistortRectifyMap function
    Left_Stereo_Map = cv2.initUndistortRectifyMap(
        MLS, dLS, RL, PL, ChessImaR.shape[::-1], cv2.CV_16SC2
    )  # cv2.CV_16SC2 this format enables us the programme to work faster
    Right_Stereo_Map = cv2.initUndistortRectifyMap(MRS, dRS, RR, PR,
                                                   ChessImaR.shape[::-1],
                                                   cv2.CV_16SC2)
    return Left_Stereo_Map, Right_Stereo_Map
Exemplo n.º 60
0
    def play(self, calib_button):

        calib_button.config(state="disabled")
        self.bot[5].config(relief="sunken")
        self.bot[5].config(state="active")

        self.style_pg.configure('text.Horizontal.TProgressbar', text='0 %')
        self.progbar["value"] = 0

        # reset values status for clustering
        self.label_status[1][1].config(text='')
        self.label_status[1][2].config(text='')
        self.label_status[2][1].config(text='')
        self.label_status[2][2].config(text='')
        self.label_status[3][1].config(text='')
        self.label_status[3][2].config(text='')
        self.label_status[4][1].config(text='')
        self.label_status[4][2].config(text='')
        self.label_status[5][2].config(text='')

        self.popup.update()

        self.imgpoints = [[], []]
        self.objpoints = []

        for j in range(self.n_cameras):
            for feature in self.detected_features[j]:
                self.imgpoints[j].append(feature)
                if j == 0:
                    self.objpoints.append(self.object_pattern)

        flags_parameters = int(self.p_intrinsics_guess.get()) * cv2.CALIB_USE_INTRINSIC_GUESS + \
                           int(self.p_fix_point.get()) * cv2.CALIB_FIX_PRINCIPAL_POINT + \
                           int(self.p_fix_ratio.get()) * cv2.CALIB_FIX_ASPECT_RATIO + \
                           int(self.p_zero_tangent_distance.get()) * cv2.CALIB_ZERO_TANGENT_DIST

        logging.debug('%s', self.how_to_calibrate.get())

        if "Clustering" in self.how_to_calibrate.get():
            c_r = None
            c_k = None

            b_continue = True
            try:
                c_r = self.c_r.get()
                if c_r == 0:
                    self.label_msg[1].configure(
                        text='R parameter muss be greater than zero')
                    b_continue = False
                elif c_r > self.n_total.get():
                    self.label_msg[1].configure(
                        text='R parameter muss be smaller or equal than n')
                    b_continue = False
                else:
                    self.label_msg[1].configure(text='')
            except ValueError:
                self.label_msg[1].configure(
                    text='R parameter can not be empty')
                b_continue = False
            try:
                c_k = self.c_k.get()
                if c_k == 0:
                    self.label_msg[0].configure(
                        text='K parameter muss be greater than zero')
                    b_continue = False
                else:
                    self.label_msg[0].configure(text='')
            except ValueError:
                self.label_msg[0].configure(
                    text='K parameter can not be empty')
                b_continue = False
            if not b_continue:
                self.bot[5].config(relief="raised")
                self.bot[5].config(state="normal")
                calib_button.config(state="active")
                return

            # n, number of all images
            self.samples, k = combination(len(self.objpoints), c_r, c_k)

            if k != c_k:
                self.c_k.set(int(k))
                self.label_msg[1].config(
                    text=
                    'Number of groups changed from %d to %d (maximum possible)'
                    % (c_k, k))
                self.popup.update()  # for updating while running other process

            time_play = chronometer()
            counter = 0

            C_array = []
            D_array = []
            self.R_array = []
            self.T_array = []

            self.fx_array = [[], []]
            self.fy_array = [[], []]
            self.cx_array = [[], []]
            self.cy_array = [[], []]
            self.k1_array = [[], []]
            self.k2_array = [[], []]
            self.k3_array = [[], []]
            self.k4_array = [[], []]
            self.k5_array = [[], []]
            self.RMS_array = []

            for s in self.samples:
                op = list(self.objpoints[i] for i in s)
                ip, c, d = [], [], []
                for j in range(self.n_cameras):
                    ip.append(list(self.imgpoints[j][i] for i in s))
                    c.append(np.eye(3, dtype=np.float32))
                    d.append(np.zeros((5, 1), dtype=np.float32))

                R = None
                T = None

                if self.m_stereo:
                    # move coordinates when images size are different
                    if self.size[0] != self.size[1]:
                        logging.debug('Different camera resolution')
                        ip = np.array(ip)
                        index_min = self.size.index(min(self.size))
                        index_max = self.size.index(max(self.size))
                        w_adj, h_adj = self.size[index_max]
                        w, h = self.size[index_min]
                        n_poses, n_points, _, _ = ip[index_min].shape
                        logging.debug('Transforming coordinates for camera %s',
                                      index_min + 1)
                        for pose in range(n_poses):
                            for point in range(n_points):
                                ip[index_min][pose][point] = np.sum([
                                    ip[index_min][pose][point],
                                    [[(h_adj - h) / 2, (w_adj - w) / 2]]
                                ],
                                                                    axis=0)
                    width = max(self.size[0][1], self.size[1][1])
                    height = max(self.size[0][0], self.size[1][0])
                    rms, c[0], d[0], c[1], d[
                        1], R, T, E, F = cv2.stereoCalibrate(
                            op,
                            ip[0],
                            ip[1],
                            c[0],
                            d[0],
                            c[1],
                            d[1], (width, height),
                            flags=flags_parameters)
                else:
                    width = self.size[0][1]
                    height = self.size[0][0]
                    rms, c[0], d[0], r, t = cv2.calibrateCamera(
                        op,
                        ip[0], (width, height),
                        c[0],
                        d[0],
                        flags=flags_parameters)
                logging.info('this is stereo rms error: %s', rms)

                if rms != 0:
                    counter += 1
                    # add to matrices
                    C_array.append(c)
                    D_array.append(d)
                    # add to iteration array
                    self.fx_array[0].append(c[0][0][0])
                    self.fy_array[0].append(c[0][1][1])
                    self.cx_array[0].append(c[0][0][2])
                    self.cy_array[0].append(c[0][1][2])
                    self.k1_array[0].append(d[0][0][0])
                    self.k2_array[0].append(d[0][1][0])
                    self.k3_array[0].append(d[0][2][0])
                    self.k4_array[0].append(d[0][3][0])
                    self.k5_array[0].append(d[0][4][0])
                    self.RMS_array.append(rms)
                    if self.m_stereo:
                        self.R_array.append(R)
                        self.T_array.append(T)
                        # add to iteration array
                        self.fx_array[1].append(c[1][0][0])
                        self.fy_array[1].append(c[1][1][1])
                        self.cx_array[1].append(c[1][0][2])
                        self.cy_array[1].append(c[1][1][2])
                        self.k1_array[1].append(d[1][0][0])
                        self.k2_array[1].append(d[1][1][0])
                        self.k3_array[1].append(d[1][2][0])
                        self.k4_array[1].append(d[1][3][0])
                        self.k5_array[1].append(d[1][4][0])

                    c_porcent = counter / float(len(
                        self.samples))  # percentage of completion of process
                    self.progbar["value"] = c_porcent * 10.0
                    elapsed_time_1 = time_play.gettime()
                    self.lb_time.config(
                        text='Estimated time left: %0.5f seconds' %
                        max(elapsed_time_1 * (1 / c_porcent - 1), 0))
                    self.style_pg.configure(
                        'text.Horizontal.TProgressbar',
                        text='{:g} %'.format(c_porcent *
                                             100.0))  # update label
                    self.popup.update(
                    )  # for updating while running other process

            self.label_status[1][1].config(text=u'\u2714')
            self.label_status[1][2].config(text='%0.5f' % elapsed_time_1)
            self.lb_time.config(text='')

            if len(C_array) > 0:
                self.camera_matrix = np.mean(np.array(C_array), axis=0)
                self.dist_coefs = np.mean(np.array(D_array), axis=0)
                self.dev_camera_matrix = np.std(np.array(C_array), axis=0)
                self.dev_dist_coefs = np.std(np.array(D_array), axis=0)
                if self.m_stereo:
                    self.R_stereo = averageMatrix(self.R_array)
                    self.T_stereo = np.mean(np.array(self.T_array), axis=0)
                    # Correction for cx and cy parameters
                    if self.size[0] != self.size[1]:
                        logging.debug(
                            'Correcting cx an cy for camera {0}'.format(
                                index_min + 1))
                        self.camera_matrix[index_min][0][2] -= (h_adj - h) / 2
                        self.camera_matrix[index_min][1][2] -= (w_adj - w) / 2
            else:
                self.reset_camera_parameters()

            elapsed_time_2 = time_play.gettime()
            self.label_status[2][1].config(text=u'\u2714')
            self.label_status[2][2].config(text='%0.5f' %
                                           (elapsed_time_2 - elapsed_time_1))

            if np.any(self.camera_matrix[:, 0, 0] == 1):
                self.reset_camera_parameters()
                self.reset_error()
            else:
                logging.debug('Correct!')
                # Camera projections
                self.calculate_projection()
                elapsed_time_3 = time_play.gettime()
                self.label_status[3][1].config(text=u'\u2714')
                self.label_status[3][2].config(
                    text='%0.5f' % (elapsed_time_3 - elapsed_time_2))
                # Calculate RMS error
                self.calculate_error()
                elapsed_time_4 = time_play.gettime()
                self.label_status[4][1].config(text=u'\u2714')
                self.label_status[4][2].config(
                    text='%0.5f' % (elapsed_time_4 - elapsed_time_3))
                self.label_status[5][2].config(text='%0.5f' % elapsed_time_4)
                self.bot[8].config(
                    state="normal")  # enable export parameters button
                self.bot[9].config(
                    state="normal")  # enable export parameters button
                for e in self.rms:
                    if e == float("inf") or e == float("-inf"):
                        logging.warning('Error is too high')
                        # mark X for step 3 and 4
                        self.label_status[4][1].config(text=u'\u2718')
                        self.label_status[4][1].config(text=u'\u2718')
                        self.reset_camera_parameters()
                        self.reset_error()
                        self.bot[8].config(
                            state="disable")  # enable export parameters button
                        self.bot[9].config(
                            state="disable")  # enable export parameters button
                        break

        elif "Load" in self.how_to_calibrate.get():
            b_continue = True
            for j in range(2 * (self.n_cameras - 1) + 1):
                if '.txt' not in self.l_load_files[j].cget('text'):
                    if j == 2:
                        self.l_load_files[j].config(text='Missing Extrinsics',
                                                    fg='green')
                        self.label_status_l[3][1].config(text=u'\u2718')
                        if b_continue:
                            # TODO: Adjust for different sizes in Load mode
                            width = max(self.size[0][1], self.size[1][1])
                            height = max(self.size[0][0], self.size[1][0])
                            rms, self.camera_matrix[0], self.dist_coefs[
                                0], self.camera_matrix[1], self.dist_coefs[
                                    1], R, T, E, F = cv2.stereoCalibrate(
                                        self.objpoints,
                                        self.imgpoints[0],
                                        self.imgpoints[1],
                                        self.camera_matrix[0],
                                        self.dist_coefs[0],
                                        self.camera_matrix[1],
                                        self.dist_coefs[1], (width, height),
                                        flags=cv2.CALIB_FIX_INTRINSIC +
                                        flags_parameters)
                            if rms != 0:
                                self.R_stereo = R
                                self.T_stereo = T
                                self.label_status_l[3][0].config(
                                    text='3. Calculating Extrinsics')
                                self.label_status_l[3][1].config(
                                    text=u'\u2714')
                            else:
                                logging.error('Calibration fails')
                                b_continue = False
                                self.label_status_l[j + 1][1].config(
                                    text=u'\u2718')
                                self.label_status_l[4][1].config(
                                    text=u'\u2718')
                    else:
                        self.l_load_files[j].config(
                            text='File missing, please add', fg='red')
                        b_continue = False
                        self.label_status_l[j + 1][1].config(text=u'\u2718')
                        self.label_status_l[4][1].config(text=u'\u2718')

            if b_continue:
                for i in range(self.n_cameras):
                    if self.camera_matrix[i][0][
                            0] == 0:  # Fx is zero only when reset
                        logging.debug('Data for camera %s not available',
                                      i + 1)
                        self.reset_camera_parameters()
                        self.reset_error()
                        break
                    if i == self.n_cameras - 1:
                        logging.debug('Correct!')
                        # Camera projections
                        self.calculate_projection()
                        # Calculate RMS error
                        self.calculate_error()
                        self.bot[8].config(
                            state="normal")  # enable export parameters button
                        self.bot[9].config(
                            state="normal")  # enable export parameters button

                for e in self.rms:
                    if e == float("inf") or e == float("-inf"):
                        logging.warning('Error is too high')
                        self.reset_camera_parameters()
                        self.reset_error()
                        self.label_status_l[4][1].config(text=u'\u2718')
                        self.bot[8].config(
                            state="disable")  # enable export parameters button
                        self.bot[9].config(
                            state="disable")  # enable export parameters button
                        break
                    else:
                        self.label_status_l[4][1].config(text=u'\u2714')

        self.update = True  # Update bool activated

        self.updateCameraParametersGUI()
        self.loadBarError([0, 1])
        calib_button.config(state="normal")
        self.bot[5].config(relief="raised")
        self.bot[5].config(state="normal")