Esempio n. 1
0
    def cal_fromcorners(self, good):
        # Perform monocular calibrations
        lcorners = [(l, b) for (l, r, b) in good]
        rcorners = [(r, b) for (l, r, b) in good]
        self.l.cal_fromcorners(lcorners)
        self.r.cal_fromcorners(rcorners)

        lipts = [ l for (l, _, _) in good ]
        ripts = [ r for (_, r, _) in good ]
        boards = [ b for (_, _, b) in good ]
        
        opts = self.mk_object_points(boards, True)

        flags = cv2.CALIB_FIX_INTRINSIC

        self.T = numpy.zeros((3, 1), dtype=numpy.float64)
        self.R = numpy.eye(3, dtype=numpy.float64)
        cv2.stereoCalibrate(opts, lipts, ripts, self.size,
                           self.l.intrinsics, self.l.distortion,
                           self.r.intrinsics, self.r.distortion,
                           self.R,                            # R
                           self.T,                            # T
                           criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 1, 1e-5),
                           flags = flags)

        self.set_alpha(0.0)
    def cal_fromcorners(self): 
        #                                , do_mono_calib = False):
        #if do_mono_calib:
        #    print
        #     TODO Perform monocular calibrations
        #    self.l.cal_fromcorners(cam1_corners)
        #    self.r.cal_fromcorners(cam2_corners)
        cam1_ipts, cam2_ipts, b = self.get_calib_data()
        print "Calibration data info:"
        print "  CAM1 data array shape:\t %s" % str(cam1_ipts.shape)
        print "  CAM2 data array shape:\t %s" % str(cam2_ipts.shape)
        print "  BOARD data array shape:\t %s" % str(b.shape)

        flags = cv2.CALIB_FIX_INTRINSIC

        self.T = numpy.zeros((3, 1), dtype=numpy.float64)
        self.R = numpy.eye(3, dtype=numpy.float64)
        self.cam2_D = np.zeros((5,1), np.float64)
        if LooseVersion(cv2.__version__).version[0] == 2:
            cv2.stereoCalibrate(b, cam1_ipts, cam2_ipts, 
                               self.size,
                               self.cam1_K, self.cam1_D,
                               self.cam2_K, self.cam2_D,
                               self.R,                            # R
                               self.T,                            # T
                               criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 1, 1e-5),
                               flags = flags)

        else:
            cv2.stereoCalibrate(self.CHESSBOARD, cam1_ipts, cam2_ipts,
                               self.cam1_K, self.cam1_D,
                               self.cam2_K, self.cam2_D,
                               self.size,
                               self.R,                            # R
                               self.T,                            # T
                               criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 1, 1e-5),
                               flags = flags)
        print "RESULTS (R, T):"
        print self.R, "\n", self.T
        homog = np.hstack((self.R, np.array([[0],[0],[0]])))
        homog = np.vstack((homog, np.array([0,0,0,1])))
        print "R AS A QUATERNION:"
        q = tf.transformations.quaternion_from_matrix(homog)
        print q
        # NOTE quat_from_mat() doesn't return a quaternion object!
        #   But the Pose msg requires one!  
        #   Fun times if ya like debugging cryptic errors!
        q_msg = Quaternion(q[0], q[1], q[2], q[3])
        t_msg = Point(self.T[0], self.T[1], self.T[2])

        # Send the calculated tf to the tf handler node:
        posemsg = Pose(t_msg, q_msg)
        try:
            self.publish_stereo_tf(posemsg)
        except rospy.service.ServiceException:
            print "Service request was successful!"
        print "Shutting down calibrator node %s" % rospy.get_name()
        sys.exit([0])
Esempio n. 3
0
def calibrate_stereo(config):
    term_crit = (cv2.TERM_CRITERIA_MAX_ITER + cv2.TERM_CRITERIA_EPS, 100, 1e-5)

    flags = 0
    #flags |= cv2.CALIB_FIX_INTRINSIC
    #flags |= cv2.CALIB_USE_INTRINSIC_GUESS
    #flags |= cv2.CALIB_FIX_PRINCIPAL_POINT
    #flags |= cv2.CALIB_FIX_FOCAL_LENGTH
    flags |= cv2.CALIB_FIX_ASPECT_RATIO
    flags |= cv2.CALIB_ZERO_TANGENT_DIST
    flags |= cv2.CALIB_SAME_FOCAL_LENGTH
    flags |= cv2.CALIB_RATIONAL_MODEL
    flags |= cv2.CALIB_FIX_K3
    flags |= cv2.CALIB_FIX_K4
    flags |= cv2.CALIB_FIX_K5

    (error, config.camera.left_intrinsics, config.camera.left_distortion,
            config.camera.right_intrinsics, config.camera.right_distorition,
            R, T, E, F) = cv2.stereoCalibrate(
                    config.calibration.object_points,
                    config.calibration.left_points,
                    config.calibration.right_points,
                    config.camera.size, criteria=term_crit, flags=flags)

    return error, R, T
    def StereoCalibrate(self, leftCorners, rightCorners, objectPoints):
        """Calibrates the stereo camera."""
        # <009> Prepares the external parameters.
        imagePointsLeft         = Configuration.Configuration.Instance.Pattern.LeftCorners
        imagePointsRight        = Configuration.Configuration.Instance.Pattern.RightCorners
        imageSize               = StereoCameras.Instance.Size

        cameraMatrixLeft        = StereoCameras.Instance.Parameters.CameraMatrix1
        cameraMatrixRight       = StereoCameras.Instance.Parameters.CameraMatrix2
        distCoeffLeft           = StereoCameras.Instance.Parameters.DistCoeffs1
        distCoeffRight          = StereoCameras.Instance.Parameters.DistCoeffs2


        # Defines the criterias used by stereoCalibrate() function.
        criteria = (cv2.TERM_CRITERIA_MAX_ITER + cv2.TERM_CRITERIA_EPS, 100, 1e-5)
        flags  = cv2.CALIB_FIX_ASPECT_RATIO | cv2.CALIB_ZERO_TANGENT_DIST | cv2.CALIB_SAME_FOCAL_LENGTH
        flags |= cv2.CALIB_RATIONAL_MODEL | cv2.CALIB_FIX_K3 | cv2.CALIB_FIX_K4 | cv2.CALIB_FIX_K5

        # <010> Calibrates a stereo camera setup.
        retval, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, R, T, E, F = cv2.stereoCalibrate(objectPoints, imagePointsLeft, imagePointsRight, imageSize, cameraMatrixLeft, cameraMatrixRight, distCoeffLeft, distCoeffRight)

        E2 = self.EssentialMatrix(R, T)
        F2 = self.FundamentalMatrix(cameraMatrix1, cameraMatrix2, E2)


        # TODO: Store other values?

        # <010> Records the external parameters.
        StereoCameras.Instance.Parameters.CameraMatrix1 = cameraMatrix1
        StereoCameras.Instance.Parameters.CameraMatrix2 = cameraMatrix2
        StereoCameras.Instance.Parameters.DistCoeffs1 = distCoeffs1
        StereoCameras.Instance.Parameters.DistCoeffs2 = distCoeffs2

        # <014> Return the final result.
        return R, T
Esempio n. 5
0
def calibrate_rgb_and_tv(objpoints, img_points_rgb, img_points_tv, image_size,
                         rgb_camera_matrix, rgb_dist_coeffs, tv_camera_matrix, tv_dist_coeffs):
    # matrix will be FROM tv TO rgb
    object_points = [objpoints for i in range(len(img_points_rgb))]
    return cv2.stereoCalibrate(
        objectPoints=object_points, imagePoints2=img_points_rgb, imagePoints1=img_points_tv, imageSize=image_size,
        cameraMatrix2=rgb_camera_matrix, distCoeffs2=rgb_dist_coeffs, cameraMatrix1=tv_camera_matrix, distCoeffs1=tv_dist_coeffs)
Esempio n. 6
0
 def calibrate_cameras(self):
     """Calibrate cameras based on found chessboard corners."""
     criteria = (cv2.TERM_CRITERIA_MAX_ITER + cv2.TERM_CRITERIA_EPS, 100, 1e-5)
     flags = cv2.CALIB_FIX_ASPECT_RATIO + cv2.CALIB_ZERO_TANGENT_DIST + cv2.CALIB_SAME_FOCAL_LENGTH
     calib = StereoCalibration()
     (
         calib.cam_mats["left"],
         calib.dist_coefs["left"],
         calib.cam_mats["right"],
         calib.dist_coefs["right"],
         calib.rot_mat,
         calib.trans_vec,
         calib.e_mat,
         calib.f_mat,
     ) = cv2.stereoCalibrate(
         self.object_points,
         self.image_points["left"],
         self.image_points["right"],
         self.image_size,
         criteria=criteria,
         flags=flags,
     )[
         1:
     ]
     (
         calib.rect_trans["left"],
         calib.rect_trans["right"],
         calib.proj_mats["left"],
         calib.proj_mats["right"],
         calib.disp_to_depth_mat,
         calib.valid_boxes["left"],
         calib.valid_boxes["right"],
     ) = cv2.stereoRectify(
         calib.cam_mats["left"],
         calib.dist_coefs["left"],
         calib.cam_mats["right"],
         calib.dist_coefs["right"],
         self.image_size,
         calib.rot_mat,
         calib.trans_vec,
         flags=0,
     )
     for side in ("left", "right"):
         (calib.undistortion_map[side], calib.rectification_map[side]) = cv2.initUndistortRectifyMap(
             calib.cam_mats[side],
             calib.dist_coefs[side],
             calib.rect_trans[side],
             calib.proj_mats[side],
             self.image_size,
             cv2.CV_32FC1,
         )
     # This is replaced because my results were always bad. Estimates are
     # taken from the OpenCV samples.
     width, height = self.image_size
     focal_length = 0.8 * width
     calib.disp_to_depth_mat = np.float32(
         [[1, 0, 0, -0.5 * width], [0, -1, 0, 0.5 * height], [0, 0, 0, -focal_length], [0, 0, 1, 0]]
     )
     return calib
def calibrate_pair(pattern_keys, data_1, data_2, min_matches):
    [cam1, keys1, matches1, frame_indices1, cam_ind1] = data_1
    [cam2, keys2, matches2, frame_indices2, cam_ind2] = data_2

    if frame_indices1 is None:
        assert frame_indices2 is None
    else:
        # filter frames where both views have data
        I = dict((x, i) for i, x in enumerate(frame_indices1))
        J = dict((x, i) for i, x in enumerate(frame_indices2))
        K = set(I.keys()).intersection(J.keys())

        matches1 = [matches1[I[k]] for k in K]
        matches2 = [matches2[J[k]] for k in K]

        keys1 = [keys1[I[k]] for k in K]
        keys2 = [keys2[J[k]] for k in K]
        if len(matches1) == 0:
            print "Error: No overlapping frames between views %d and %d -- double check 'order' parameter" % (
                cam_ind1 + 1,
                cam_ind2 + 1,
            )
            exit(1)

    # prune non-mutual matches
    tmp = [intersect_matches(m1, m2) for (m1, m2) in zip(matches1, matches2)]
    [matches1, matches2] = zip(*tmp)
    I = [i for i, x in enumerate(matches1) if len(x) >= min_matches]

    # prune frames with insufficient matches
    matches1 = [matches1[i] for i in I]
    matches2 = [matches2[i] for i in I]
    keys1 = [keys1[i] for i in I]
    keys2 = [keys2[i] for i in I]

    if len(matches1) == 0:
        print "Error: pair (%d, %d) have no shared keypoints" % (cam_ind1 + 1, cam_ind2 + 1)
        exit(1)
    print "pair (%d, %d):  %d matches" % (cam_ind1 + 1, cam_ind2 + 1, len(matches1))

    to_data = calib.keypoint_matches_to_calibration_data

    tmp = [to_data(m, pattern_keys, k) for (m, k) in zip(matches1, keys1)]
    [obj_pts, img_pts1] = zip(*tmp)

    tmp = [to_data(m, pattern_keys, k) for (m, k) in zip(matches2, keys2)]
    [obj_pts2, img_pts2] = zip(*tmp)

    assert all([(a == b).all() for (a, b) in zip(obj_pts, obj_pts2)])

    flags = cv2.CALIB_USE_INTRINSIC_GUESS | cv2.CALIB_FIX_INTRINSIC

    error, _, _, _, _, R, T, _, _ = cv2.stereoCalibrate(
        obj_pts, img_pts1, img_pts2, (0, 0), cam1.K, cam1.distortion, cam2.K, cam2.distortion, flags=flags
    )

    return calib.Calibrated_camera(R=R, T=T, intr=cam2)
Esempio n. 8
0
def stereoCalibration(objpoints, imgpoints1, imgpoints2,   camera_matrix1, dist_coefs1, camera_matrix2, dist_coefs2,shape):	
	T=None
	R= None
	retval, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, R, T, E, F=cv2.stereoCalibrate(objpoints, imgpoints1, imgpoints1,   camera_matrix1, dist_coefs1, camera_matrix1, dist_coefs1,shape, R, T, E=None, F=None, flags=cv2.CALIB_FIX_INTRINSIC,criteria=(cv2.TermCriteria_MAX_ITER+cv2.TermCriteria_EPS, 30, 1e-6))
	R1=None
	R2=None
	P1=None
	P2=None
	Q=None
	R1,R2,P1,P2,Q,unKnown1,unKnown2=cv2.stereoRectify(camera_matrix1,dist_coefs1,camera_matrix2,dist_coefs2,shape,R,T,R1,R2,P1,P2,Q,flags=cv2.CALIB_ZERO_DISPARITY,alpha=1,newImageSize= None)

	return Q
Esempio n. 9
0
def process(controller):
    map_initialized = False
    num_images = 0
    num_samples = 0
    while True:
        frame = controller.frame()
        images = frame.images
        if images[0].is_valid and images[1].is_valid:
            if not map_initialized:
                left_x_map, left_y_map = initDistortionMap(frame.images[0])
                right_x_map, right_y_map = initDistortionMap(frame.images[1])
                map_initialized = True
            undistorted_left = interpolate(get_image_as_array(images[0]), left_x_map, left_y_map)
            undistorted_right = interpolate(get_image_as_array(images[1]), left_x_map, left_y_map)
            left_bw = cv2.adaptiveThreshold(undistorted_left, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 13, 3)
            right_bw = cv2.adaptiveThreshold(undistorted_right, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 13, 3)
            corners_left = cv2.findChessboardCorners(left_bw, board_size, flags=cv2.CALIB_CB_FILTER_QUADS)
            corners_right = cv2.findChessboardCorners(right_bw, board_size, flags=cv2.CALIB_CB_FILTER_QUADS)
            if corners_left[0] and corners_right[0]:
                cv2.cornerSubPix(left_bw, corners_left[1], (3, 3), (-1, -1), stereocalib_criteria)
                cv2.cornerSubPix(right_bw, corners_right[1], (3, 3), (-1, -1), stereocalib_criteria)
                cv2.drawChessboardCorners(undistorted_left, board_size, corners_left[1], corners_left[0])
                cv2.drawChessboardCorners(undistorted_right, board_size, corners_right[1], corners_right[0])
                image_points1.append(corners_left[1])
                image_points2.append(corners_right[1])
                object_points.append(obj)
                num_samples += 1
                print(num_samples)
                if num_samples > num_boards:
                    break
            cv2.imshow('left', undistorted_left)
            cv2.imshow('right', undistorted_right)
            cv2.imshow('left_bw', left_bw)
            cv2.imshow('right_bw', right_bw)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                cv2.imwrite(f'left{num_images}.jpg', left_bw)
                cv2.imwrite(f'right{num_images}.jpg', right_bw)
                num_images += 1
                break

    retval,cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, R, T, E, F = cv2.stereoCalibrate(
        object_points, image_points1, image_points2, C1, D1, C2, D2, imageSize=(640, 240), flags=stereocalib_flags, criteria=stereocalib_criteria
    )

    print("params")
    print(retval,cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, R, T, E, F)

    print("reprojection matrix")
    a = cv2.stereoRectify(C1, D1, C2, D2, (640, 240), R, T)
    print(a[-3])
Esempio n. 10
0
def getCalibratefromStereoImage(objpoints, l_imgpoints, r_imgpoints):
    
    # from OpenCV docs: if any of CV_CALIB_FIX_ASPECT_RATIO... are specified, the matrix components must be initialized.
    cameraMatrix1 = cv2.initCameraMatrix2D(objpoints, l_imgpoints, (640, 480), 0);
    cameraMatrix2 = cv2.initCameraMatrix2D(objpoints, r_imgpoints, (640, 480), 0);
    distCoeffs1 = None
    distCoeffs2 = None

    # directly call stereCalibrate from OpenCV library
    retval, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, R, T, E, F = \
            cv2.stereoCalibrate(objpoints, l_imgpoints, r_imgpoints, \
            cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, (640, 480), \
            criteria=stereocalib_criteria, flags=stereocalib_flags)

    return retval, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, R, T, E, F
    def stereo_calibrate(self, dims):
        flags = 0
        flags |= cv2.CALIB_FIX_INTRINSIC
        # flags |= cv2.CALIB_FIX_PRINCIPAL_POINT
        flags |= cv2.CALIB_USE_INTRINSIC_GUESS
        flags |= cv2.CALIB_FIX_FOCAL_LENGTH
        # flags |= cv2.CALIB_FIX_ASPECT_RATIO
        flags |= cv2.CALIB_ZERO_TANGENT_DIST
        # flags |= cv2.CALIB_RATIONAL_MODEL
        # flags |= cv2.CALIB_SAME_FOCAL_LENGTH
        # flags |= cv2.CALIB_FIX_K3
        # flags |= cv2.CALIB_FIX_K4
        # flags |= cv2.CALIB_FIX_K5

        stereocalib_criteria = (cv2.TERM_CRITERIA_MAX_ITER +
                                cv2.TERM_CRITERIA_EPS, 100, 1e-5)
        ret, M1, d1, M2, d2, R, T, E, F = cv2.stereoCalibrate(
            self.objpoints, self.imgpoints_l,
            self.imgpoints_r, self.M1, self.d1, self.M2,
            self.d2, dims,
            criteria=stereocalib_criteria, flags=flags)

        print('Intrinsic_mtx_1', M1)
        print('dist_1', d1)
        print('Intrinsic_mtx_2', M2)
        print('dist_2', d2)
        print('R', R)
        print('T', T)
        print('E', E)
        print('F', F)

        # for i in range(len(self.r1)):
        #     print("--- pose[", i+1, "] ---")
        #     self.ext1, _ = cv2.Rodrigues(self.r1[i])
        #     self.ext2, _ = cv2.Rodrigues(self.r2[i])
        #     print('Ext1', self.ext1)
        #     print('Ext2', self.ext2)

        print('')

        camera_model = dict([('M1', M1), ('M2', M2), ('dist1', d1),
                            ('dist2', d2), ('rvecs1', self.r1),
                            ('rvecs2', self.r2), ('R', R), ('T', T),
                            ('E', E), ('F', F)])

        cv2.destroyAllWindows()
        return camera_model
Esempio n. 12
0
def calibrate(folder, name1, name2):    
    im1 = cv2.imread(folder + '/' + name1 +'1.jpg')
    h1 = im1.shape[0]
    w1 = im1.shape[1]
    
    im2 = cv2.imread(folder + '/' + name2 + '1.jpg')
    if im2 is None:
        im2 = cv2.imread(folder + '/' + name2 + '1.JPG')
    h2 = im2.shape[0]
    w2 = im2.shape[1]
    
    img_points1, obj_points1, valid1 = detect_points_from_pattern(folder+'/'+name1+ '*.jpg')
    if name2 == 'thermal':
        img_points2, obj_points2, valid2 = detect_points_from_pattern(folder+'/'+name2+ '*.jpg', is_thermal=True)
    else:
        img_points2, obj_points2, valid2 = detect_points_from_pattern(folder+'/'+name2+ '*.jpg')
        
    print 'valid1',valid1
    print 'valid2',valid2
    rms1, K1, d1, rvecs1, tvecs1 = cv2.calibrateCamera(obj_points1, img_points1, (w1, h1),
                                                flags=cv2.CALIB_RATIONAL_MODEL)    
    rms2, K2, d2, rvecs2, tvecs2 = cv2.calibrateCamera(obj_points2, img_points2, (w2, h2),
                                                flags=cv2.CALIB_RATIONAL_MODEL)
        
    print 'rms1:',rms1, 'rms2:',rms2
    
    obj_points = [obj_points1[j] for j,i in enumerate(valid1) if i in valid2]
    img_points1 = [img_points1[j] for j,i in enumerate(valid1) if i in valid2]
    img_points2 = [img_points2[valid2.index(i)] for j,i in enumerate(valid1) if i in valid2]
    
    # imageSize - Size of the image used only to initialize intrinsic camera matrix. Since we fix intrinsic matrix, (w,h) below can be anything.
    rms, _, _, _, _, R,T,E,F= cv2.stereoCalibrate(obj_points, img_points1, img_points2, (w1,h1), K1,d1,K2,d2, 
                                                  flags=cv2.CALIB_FIX_INTRINSIC|cv2.CALIB_RATIONAL_MODEL)
    print 'rms:',rms
    print 'R:',R
    print 'T:',T
    
    out_file_name = '/home/yuncong/Documents/dataset/calib3/'+name1+'_'+name2+'_calib_info.txt'
    out_file = open(out_file_name, 'w')
    np.savez(out_file, K1=K1,d1=d1,K2=K2,d2=d2,R=R,T=T)
    
    return K1,d1,K2,d2,R,T
Esempio n. 13
0
File: mycv.py Progetto: yieku/Python
def stereoCalibrate(imageDir, K, D, width, height, debug=False, flags=cv2.CALIB_FIX_INTRINSIC):
    """
    width:棋牌的宽
    height:棋牌的高
    debug:输出调试信息
    flags:参考opencv的stereoCalibrate函数
    """
    imagePoints, nimages, imageSize = loadImagePoints(imageDir, width, height, debug)
    objectPoints = []
    patternPoints = cvtools.createPatternPoints(width, height)
    imagePoints1 = []
    imagePoints2 = []
    for i in range(0, len(imagePoints)):
        if i % 2 == 0:
            imagePoints1.append(imagePoints[i])
        else:
            imagePoints2.append(imagePoints[i])
    for i in range(0, nimages / 2):
        objectPoints.append(patternPoints)
    return cv2.stereoCalibrate(objectPoints, imagePoints1, imagePoints2, imageSize,K,D,K,D,criteria=(cv2.TERM_CRITERIA_COUNT+cv2.TERM_CRITERIA_EPS, 100, 1e-5),flags=flags)
Esempio n. 14
0
 def calibrate(self):
     left = self.detect_squares(self.left_images)
     right = self.detect_squares(self.right_images)
     print "Sweet camera calibration is done. \n now for the stereo data. please wait..."
     (retval, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2,
       R, T, E, F) = cv2.stereoCalibrate(left[0], left[1], right[1], left[2],
                                         left[3], left[4], right[3], right[4])
     self.retval = retval
     self.size = left[3]
     self.rvects = (left[5], right[5])
     self.tvects = (left[6], right[6])
     self.camera = (left[1], right[1])#(cameraMatrix1, cameraMatrix2)
     self.distortion = (left[2], right[2])#(distCoeffs1, distCoeffs2)
     self.r = R
     self.t = T
     self.e = E
     self.f = F
     self.save()
     print "running stereo rectify in case it is needed later"
     R1, R2, P1, P2, Q, valid1, valid2 = cv2.stereoRectify(self.camera[0], 
                     self.distortion[0], self.camera[1], self.distortion[1],
                     left[3], self.r, self.t)
        imagePoints1.append(corners1)
        imagePoints2.append(corners2)
        object_points.append(obj)
        print "Corners stored\n"
        success += 1

        if success >= numBoards:
            break

cv2.destroyAllWindows()
print "Starting Calibration\n"
cameraMatrix1 = np.array((3, 3), np.float32)
cameraMatrix2 = np.array((3, 3), np.float32)

retval, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, R, T, E, F = cv2.stereoCalibrate(
    object_points, imagePoints1, imagePoints2, (width, height)
)
## , cv2.cvTermCriteria(cv2.CV_TERMCRIT_ITER+cv2.CV_TERMCRIT_EPS, 100, 1e-5),   cv2.CV_CALIB_SAME_FOCAL_LENGTH | cv2.CV_CALIB_ZERO_TANGENT_DIST)
# cv2.cv.StereoCalibrate(object_points, imagePoints1, imagePoints2, pointCounts, cv.fromarray(K1), cv.fromarray(distcoeffs1), cv.fromarray(K2), cv.fromarray(distcoeffs2), imageSize, cv.fromarray(R), cv.fromarray(T), cv.fromarray(E), cv.fromarray(F), flags = cv.CV_CALIB_FIX_INTRINSIC)
# FileStorage fs1("mystereocalib.yml", FileStorage::WRITE);
# fs1 << "CM1" << CM1;
# fs1 << "CM2" << CM2;
# #fs1 << "D1" << D1;
# fs1 << "D2" << D2;
# fs1 << "R" << R;
# fs1 << "T" << T;
# fs1 << "E" << E;
# fs1 << "F" << F;
print "Done Calibration\n"
print "Starting Rectification\n"
R1 = np.zeros(shape=(3, 3))
Esempio n. 16
0
    def calibrate(self, imgs_l, imgs_r, board, flags=None):
        """
        This will save the found markers for camera_2 (right) only in
        self.save_cal_imgs array
        """
        # so we know a little bit about the camera, so
        # start off the algorithm with a simple guess
        # h,w = imgs_l[0].shape[:2]
        # f = max(h,w)*0.8  # focal length is a function of image size in pixels
        # K = np.array([
        #     [f,0,w//2],
        #     [0,f,h//2],
        #     [0,0,1]
        # ])

        cc = CameraCalibration()
        # rms1, M1, d1, r1, t1, objpoints, imgpoints_l = cc.calibrate(imgs_l, board)
        # rms2, M2, d2, r2, t2, objpoints, imgpoints_r = cc.calibrate(imgs_r, board)

        data = cc.calibrate(imgs_l, board)
        K1 = data["cameraMatrix"]
        d1 = data["distCoeffs"]
        objpoints = data["objpoints"]
        imgpoints_l = data["imgpoints"]

        data = cc.calibrate(imgs_r, board)
        K2 = data["cameraMatrix"]
        d2 = data["distCoeffs"]
        imgpoints_r = data["imgpoints"]

        print(d1, d2)

        self.save_cal_imgs = cc.save_cal_imgs
        """
        CALIB_ZERO_DISPARITY: horizontal shift, cx1 == cx2
        """
        if flags is None:
            flags = 0
            # flags |= cv2.CALIB_FIX_INTRINSIC
            flags |= cv2.CALIB_ZERO_DISPARITY
            # flags |= cv2.CALIB_FIX_PRINCIPAL_POINT
            # flags |= cv2.CALIB_USE_INTRINSIC_GUESS
            # flags |= cv2.CALIB_FIX_FOCAL_LENGTH
            # flags |= cv2.CALIB_FIX_ASPECT_RATIO
            # flags |= cv2.CALIB_ZERO_TANGENT_DIST
            # flags |= cv2.CALIB_RATIONAL_MODEL
            # flags |= cv2.CALIB_SAME_FOCAL_LENGTH
            # flags |= cv2.CALIB_FIX_K3
            # flags |= cv2.CALIB_FIX_K4
            # flags |= cv2.CALIB_FIX_K5

        stereocalib_criteria = (cv2.TERM_CRITERIA_MAX_ITER +
                                cv2.TERM_CRITERIA_EPS, 100, 1e-5)

        h, w = imgs_l[0].shape[:2]
        ret, K1, d1, K2, d2, R, T, E, F = cv2.stereoCalibrate(
            objpoints,
            imgpoints_l,
            imgpoints_r,
            K1,
            d1,
            K2,
            d2,
            # (w,h),
            (h, w),
            # R=self.R,
            # T=self.t,
            criteria=stereocalib_criteria,
            flags=flags)

        # d1 = d1.T[0]
        # d2 = d2.T[0]

        # print('')
        camera_model = {
            'date': time.strftime("%a, %d %b %Y %H:%M:%S", time.localtime()),
            'markerType': board.type,
            'markerSize': board.marker_size,
            'imageSize': imgs_l[0].shape[:2],
            # 'cameraMatrix1': K1,
            # 'cameraMatrix2': K2,
            # 'distCoeffs1': d1,
            # 'distCoeffs2': d2,
            # 'rvecs1': r1,
            # 'rvecs2': r2,
            # 'R': R,
            # 'T': T,
            # 'E': E,
            # 'F': F,
            "objpoints": objpoints,
            "imgpointsL": imgpoints_l,
            "imgpointsR": imgpoints_r,
        }

        sc = StereoCamera()
        sc.R = R
        sc.E = E
        sc.F = F
        sc.T = T
        sc.K1 = K1
        sc.K2 = K2
        sc.d1 = d1
        sc.d2 = d2

        return ret, camera_model, sc
Esempio n. 17
0
    def calibrate(self, load_images=False, keep_chessboard_preview_imgs=False):
        """Perform a complete calibration process."""
        if load_images:
            self.load_images()

        if len(self.left_imgs) == 0:
            logging.warning("No calibration images loaded.")
            self.load_images()

        if len(self.left_imgs) != len(self.right_imgs):
            raise CalibrationImagesNotMatch()

        self.find_chessboards(keep_chessboard_preview_imgs)

        logging.info("Calibrating left camera.")
        _, left_matrix, left_dist_coeff, left_rot_vec, left_trans_vec = \
            cv.calibrateCamera(self.obj_pts, self.left_img_pts,
            (self.width, self.height), None, None
        )
        self.left_matrix = left_matrix
        self.left_dist_coeff = left_dist_coeff
        self.left_rot_vec = left_rot_vec
        self.left_trans_vec = left_trans_vec

        logging.info("Calibrating right camera.")
        _, right_matrix, right_dist_coeff, right_rot_vec, right_trans_vec = \
            cv.calibrateCamera(self.obj_pts, self.right_img_pts,
            (self.width, self.height), None, None
        )
        self.right_matrix = right_matrix
        self.right_dist_coeff = right_dist_coeff
        self.right_rot_vec = right_rot_vec
        self.right_trans_vec = right_trans_vec

        logging.info("Calibrating both cameras.")
        _, _, _, _, _, rot_matrix, trans_vec, _, _ = cv.stereoCalibrate(
            self.obj_pts, self.left_img_pts, self.right_img_pts,
            self.left_matrix, self.left_dist_coeff, self.right_matrix,
            self.right_dist_coeff, (self.width, self.height), None, None, None,
            None, cv.CALIB_FIX_INTRINSIC,
            (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 30, 0.001))
        self.both_rot_matrix = rot_matrix
        self.both_trans_vec = trans_vec

        logging.info("Rectifying cameras.")
        self.left_rectif, self.right_rectif, self.left_proj, self.right_proj, \
            _, self.left_roi, self.right_roi = cv.stereoRectify(
                self.left_matrix, self.left_dist_coeff,
                self.right_matrix, self.right_dist_coeff,
                (self.width, self.height),
                self.both_rot_matrix, self.both_trans_vec,
                None, None, None, None, None,
                cv.CALIB_ZERO_DISPARITY, 0.25
        )

        self.calibrated = True

        # Set re-projection error
        self.left_reprojection_error, self.right_reprojection_error = \
            self.calculate_reprojection_error()
        logging.info(
            "Calibration finished. Re-projection errors: {}, {}.".format(
                self.left_reprojection_error, self.right_reprojection_error))
Esempio n. 18
0
if (chessboard_pattern_detections_paired > 0):  # i.e. if did not load a calib.
    print()
    print("START - extrinsic calibration ...")
    (rms_stereo,
     camera_matrix_l,
     dist_coeffs_l,
     camera_matrix_r,
     dist_coeffs_r,
     R,
     T,
     E,
     F) = cv2.stereoCalibrate(objpoints_pairs,
                              imgpoints_left_paired,
                              imgpoints_right_paired,
                              mtxL,
                              distL,
                              mtxR,
                              distR,
                              grayL.shape[::-1],
                              criteria=termination_criteria_extrinsics,
                              flags=0)

    print("FINISHED - extrinsic calibration")

    print()
    print("Intrinsic Camera Calibration:")
    print()
    print("Intrinsic Camera Calibration Matrix, K - from \
            intrinsic calibration:")
    print("(format as follows: fx, fy - focal lengths / cx, \
            cy - optical centers)")
    print("[fx, 0, cx]\n[0, fy, cy]\n[0,  0,  1]")
Esempio n. 19
0
print("Stereo calibration .....")
flags = 0
flags |= cv2.CALIB_FIX_INTRINSIC
# Here we fix the intrinsic camara matrixes so that only Rot, Trns, Emat and Fmat are calculated.
# Hence intrinsic parameters are the same 

criteria_stereo= (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)


# This step is performed to transformation between the two cameras and calculate Essential and Fundamenatl matrix
retS, new_mtxL, distL, new_mtxR, distR, Rot, Trns, Emat, Fmat = cv2.stereoCalibrate(obj_pts,
                                                          img_ptsL,
                                                          img_ptsR,
                                                          new_mtxL,
                                                          distL,
                                                          new_mtxR,
                                                          distR,
                                                          imgL_gray.shape[::-1],
                                                          criteria_stereo,
                                                          flags)

# Once we know the transformation between the two cameras we can perform stereo rectification
# StereoRectify function
rectify_scale= 1 # if 0 image croped, if 1 image not croped
rect_l, rect_r, proj_mat_l, proj_mat_r, Q, roiL, roiR= cv2.stereoRectify(new_mtxL, distL, new_mtxR, distR,
                                                 imgL_gray.shape[::-1], Rot, Trns,
                                                 rectify_scale,(0,0))

# Use the rotation matrixes for stereo rectification and camera intrinsics for undistorting the image
# Compute the rectification map (mapping between the original image pixels and 
# their transformed values after applying rectification and undistortion) for left and right camera frames
Esempio n. 20
0

    flags = 0
    if args.fixint:
      print "fix intrinsics"
      flags |= cv2.CALIB_FIX_INTRINSIC
      flags |= cv2.CALIB_USE_INTRINSIC_GUESS
    elif cam1calib["camera_matrix"] is not None and cam2calib["camera_matrix"] is not None:
      flags |= cv2.CALIB_USE_INTRINSIC_GUESS
    #flags |= cv2.CALIB_FIX_PRINCIPAL_POINT
    #flags |= cv2.CALIB_FIX_FOCAL_LENGTH
    #flags |= cv2.CALIB_FIX_ASPECT_RATIO
    #flags |= cv2.CALIB_ZERO_TANGENT_DIST
    #flags |= cv2.CALIB_SAME_FOCAL_LENGTH
    #flags |= cv2.CALIB_RATIONAL_MODEL
    retval, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, R, T, E, F  = cv2.stereoCalibrate(obj_points,img_points1,img_points2,cam1size,cam1calib["camera_matrix"],cam1calib["dist"],cam2calib["camera_matrix"],cam2calib["dist"],criteria=term_crit,flags=flags)

    print "error is:",retval
    print "orig cameraMatrix1\n",cam1calib["camera_matrix"]
    print "cameraMatrix1\n",cameraMatrix1
    print "orig cameraMatrix2\n",cam2calib["camera_matrix"]
    print "cameraMatrix2\n",cameraMatrix2
    print "orig distCoeffs1",cam1calib["dist"]
    print "distCoeffs1",distCoeffs1.transpose()
    print "orig distCoeffs1",cam2calib["dist"]
    print "distCoeffs1",distCoeffs2.transpose()
    print "T\n",T
    print "R\n",R
    
    #cv2.destroyAllWindows()
Esempio n. 21
0
# read in intrinsic params
fs_read_l = cv.FileStorage('params/left_cam.yml',cv.FILE_STORAGE_READ)
mat_l = fs_read_l.getNode("intrinsic").mat()
dist_l = fs_read_l.getNode("distortion").mat()
fs_read_l.release()

fs_read_r = cv.FileStorage('params/right_cam.yml',cv.FILE_STORAGE_READ)
mat_r = fs_read_r.getNode("intrinsic").mat()
dist_r = fs_read_r.getNode("distortion").mat()
fs_read_r.release()

print('Calibrating stereo system ...')

retval, mat1, dist1, mat2, dist2, R, T, E, F = cv.stereoCalibrate(obj_pts, \
           img_pts_l,img_pts_r,mat_l,dist_l,mat_r, dist_r,gray_l.shape[::-1], \
           flags=cv.CALIB_FIX_INTRINSIC)

print('R: \n',R)
print('T: \n',T)
print('E: \n',E)
print('F: \n',F)

#store the files
store_location = 'params/stereo.yml'
fs = cv.FileStorage(store_location,cv.FILE_STORAGE_WRITE)
fs.write("R",R)
fs.write("T",T)
fs.write("E",E)
fs.write("F",F)
fs.release()
cameraMatrix2 = None
distCoeffs1 = None
distCoeffs2 = None
R = None
T = None
E = None
F = None
stereocalib_criteria = (cv2.TERM_CRITERIA_MAX_ITER + cv2.TERM_CRITERIA_EPS,
                        100, 1e-5)
stereocalib_flags = cv2.CALIB_FIX_ASPECT_RATIO | cv2.CALIB_ZERO_TANGENT_DIST | cv2.CALIB_SAME_FOCAL_LENGTH
stereocalib_retval, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, R, T, E, F = cv2.stereoCalibrate(
    obj_points,
    img_left_points,
    img_right_points,
    cameraMatrix1,
    distCoeffs1,
    cameraMatrix2,
    distCoeffs2,
    image_size,
    criteria=stereocalib_criteria,
    flags=stereocalib_flags)

np.savez('cameracalibmat_data/stereo_camera_mat_and_dist_coeff.npz',
         cameraMatrix1=cameraMatrix1,
         distCoeffs1=distCoeffs1,
         cameraMatrix2=cameraMatrix2,
         distCoeffs2=distCoeffs2,
         R=R,
         T=T,
         E=E,
         F=F)
        # Loading camera calibration data
        print("Loading camera calibration data...")
        fileName = cs.getFileName(cs.camera, prefix=cs.getCamera(1))
        file = cs.getCalibDataDir(cs.root) + fileName
        dataSet = msc.readData(file)
        mtx1, dist1, rvecs, tvecs = dataSet

        fileName = cs.getFileName(cs.camera, prefix=cs.getCamera(2))
        file = cs.getCalibDataDir(cs.root) + fileName
        dataSet = msc.readData(file)
        mtx2, dist2, rvecs, tvecs = dataSet

        # Performing stereo calibration
        result = cv2.stereoCalibrate(objectPoints=objpoints, imagePoints1=imgpoints1,
                                     imagePoints2=imgpoints2, cameraMatrix1=mtx1,
                                     distCoeffs1=dist1, cameraMatrix2=mtx2,
                                     distCoeffs2=dist2, imageSize=(w, h),
                                     flags=cv2.CALIB_FIX_INTRINSIC)

        # Final stereo calibration dataset
        dataSet = (result[5], result[6], result[7], result[8])

        while True:
            q = input("Would you like to test the stereo calibration " +
                      "parameters before proceeding? (y/n): ")
            if q.lower() == 'y':
                srcImage1 = ct.takePic()
                srcImage2 = ct.takeRemotePic()

                rectImage = sr.stereoRectify((mtx1, dist1, mtx2, dist2, result[5], result[6]),
                                             (srcImage1, srcImage2), cs.stream_mode)
heightR, widthR, channelsR = imgR.shape
newCameraMatrixR, roi_R = cv.getOptimalNewCameraMatrix(cameraMatrixR, distR, (widthR, heightR), 1, (widthR, heightR))



########## Stereo Vision Calibration #############################################

flags = 0
flags |= cv.CALIB_FIX_INTRINSIC
# Here we fix the intrinsic camara matrixes so that only Rot, Trns, Emat and Fmat are calculated.
# Hence intrinsic parameters are the same 

criteria_stereo= (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 30, 0.001)

# This step is performed to transformation between the two cameras and calculate Essential and Fundamenatl matrix
retStereo, newCameraMatrixL, distL, newCameraMatrixR, distR, rot, trans, essentialMatrix, fundamentalMatrix = cv.stereoCalibrate(objpoints, imgpointsL, imgpointsR, newCameraMatrixL, distL, newCameraMatrixR, distR, grayL.shape[::-1], criteria_stereo, flags)

#print(newCameraMatrixL)
#print(newCameraMatrixR)

########## Stereo Rectification #################################################

rectifyScale= 1
rectL, rectR, projMatrixL, projMatrixR, Q, roi_L, roi_R= cv.stereoRectify(newCameraMatrixL, distL, newCameraMatrixR, distR, grayL.shape[::-1], rot, trans, rectifyScale,(0,0))

stereoMapL = cv.initUndistortRectifyMap(newCameraMatrixL, distL, rectL, projMatrixL, grayL.shape[::-1], cv.CV_16SC2)
stereoMapR = cv.initUndistortRectifyMap(newCameraMatrixR, distR, rectR, projMatrixR, grayR.shape[::-1], cv.CV_16SC2)

print("Saving parameters!")
cv_file = cv.FileStorage('stereoMap.xml', cv.FILE_STORAGE_WRITE)
Esempio n. 25
0
def getStereoCalibration(phonecamPara, webcamPara):

    phonecam = {}
    webcam = {}
    ret, phonecam['camMtx'], phonecam['distMtx'], phonecam['rotMtx'], phonecam[
        'trnsMrx'] = phonecamPara
    ret, webcam['camMtx'], webcam['distMtx'], webcam['rotMtx'], webcam[
        'trnsMrx'] = webcamPara

    phonecam['optCamMtx'], phonecam['roi'] = cv.getOptimalNewCameraMatrix(
        phonecam['camMtx'], phonecam['distMtx'], IMAGE_SIZE, alpha=ALPHA)

    webcam['optCamMtx'], webcam['roi'] = cv.getOptimalNewCameraMatrix(
        webcam['camMtx'], webcam['distMtx'], IMAGE_SIZE, alpha=ALPHA)

    recalc = str(input("Calculate New Stereo Calibration Points? Y/N: "))

    if ANS[recalc]:
        objpoints, phonecamImgPoints, webcamImgPoints = getChessPatternPoints(
            "stereoCamCalibPoints")

        ret, _, _, _, _, rotMtx, trnsMtx, _, _ = cv.stereoCalibrate(
            objpoints,
            webcamImgPoints,
            phonecamImgPoints,
            webcam['optCamMtx'],
            webcam['distMtx'],
            phonecam['optCamMtx'],
            phonecam['distMtx'],
            IMAGE_SIZE,
            flags=cv.CALIB_FIX_INTRINSIC + cv.CALIB_FIX_FOCAL_LENGTH +
            cv.CALIB_FIX_ASPECT_RATIO)

        webcam['stereoRectRotMtx'], phonecam['stereoRectRotMtx'], webcam[
            'projMtx'], phonecam['projMtx'], Q, webcam[
                'stereoROI'], _ = cv.stereoRectify(
                    webcam['camMtx'],
                    webcam['distMtx'],
                    phonecam['camMtx'],
                    phonecam['distMtx'],
                    IMAGE_SIZE,
                    rotMtx,
                    trnsMtx,
                    # flags= cv.CALIB_ZERO_DISPARITY,
                    alpha=ALPHA)

        phoneCamStereoMap = cv.initUndistortRectifyMap(
            phonecam['camMtx'], phonecam['distMtx'],
            phonecam['stereoRectRotMtx'], phonecam['projMtx'], IMAGE_SIZE,
            cv.CV_16SC2)

        webCamStereoMap = cv.initUndistortRectifyMap(
            webcam['camMtx'], webcam['distMtx'], webcam['stereoRectRotMtx'],
            webcam['projMtx'], IMAGE_SIZE, cv.CV_16SC2)

        saveCamPara = input("Save Stereo Camera Parameters? Y/N: ")

        if ANS[saveCamPara]:
            with open("./camera_params/phoneCamStereoMap", "wb") as file:
                pickle.dump(phoneCamStereoMap, file)
            file.close()
            with open("./camera_params/webCamStereoMap", "wb") as file:
                pickle.dump(webCamStereoMap, file)
            file.close()
        return (phoneCamStereoMap, webCamStereoMap)

    else:
        with open("./camera_params/phoneCamStereoMap", "rb") as file:
            phoneCamStereoMap = pickle.load(file)
        file.close()
        with open("./camera_params/webCamStereoMap", "rb") as file:
            webCamStereoMap = pickle.load(file)
        file.close()
        return (phoneCamStereoMap, webCamStereoMap)
Esempio n. 26
0
    def stereoCalibrate(self):
        print "If you calibrate from files, make sure stereo files are"
        print "in the left-right-left-right order"

        # Get settings & files
        self.stereo = True
        self.chooseCalibrationSettings()

        # Get patterns
        self.recordPatterns()

        # Compute the intrisic parameters first :
        rvecs = [
            np.zeros(3, dtype=np.float32) for i in xrange(self.max_frames_i)
        ]
        tvecs = [
            np.zeros(3, dtype=np.float32) for i in xrange(self.max_frames_i)
        ]

        # Call OpenCV routines to do the dirty work
        print "Computing intrisic parameters for the first camera"
        res = cv2.calibrateCamera(self.obj_points_l, self.img_points_l,
                                  self.frame_size, self.intrinsics_l,
                                  self.distorsion_l, rvecs, tvecs)

        rms, self.intrinsics_l, self.distorsion_l, rvecs, tvecs = res

        print "Computing intrisic parameters for the second camera"
        res = cv2.calibrateCamera(self.obj_points_r, self.img_points_r,
                                  self.frame_size, self.intrinsics_r,
                                  self.distorsion_r, rvecs, tvecs)

        rms, self.intrinsics_r, self.distorsion_r, rvecs, tvecs = res

        # Allocate arrays for the two camera matrix and distorsion matrices
        R = np.zeros((3, 3), dtype=np.float32)
        T = np.zeros((3, 1), dtype=np.float32)
        E = np.zeros((3, 3), dtype=np.float32)
        F = np.zeros((3, 3), dtype=np.float32)

        # Compute calibration parameters :
        print "Calibrating cameras.."
        print "Frame size : {}".format(self.frame_size)

        # set stereo flags
        stereo_flags = 0
        #        stereo_flags |= cv2.CALIB_FIX_INTRINSIC
        stereo_flags |= cv2.CALIB_USE_INTRINSIC_GUESS  # Refine intrinsic parameters
        #        stereo_flags |= cv2.CALIB_FIX_PRINCIPAL_POINT     # Fix the principal points during the optimization.
        #        stereo_flags |= cv2.CALIB_FIX_FOCAL_LENGTH        # Fix focal length
        #        stereo_flags |= cv2.CALIB_FIX_ASPECT_RATIO        # fix aspect ratio
        stereo_flags |= cv2.CALIB_SAME_FOCAL_LENGTH  # Use same focal length
        #        stereo_flags |= cv2.CALIB_ZERO_TANGENT_DIST       # Set tangential distortion to zero
        #        stereo_flags |= cv2.CALIB_RATIONAL_MODEL          # Use 8 param rational distortion model instead of 5 param plumb bob model

        res = cv2.stereoCalibrate(self.obj_points_l, self.img_points_l,
                                  self.img_points_r, self.frame_size,
                                  self.intrinsics_l, self.distorsion_l,
                                  self.intrinsics_r, self.distorsion_r)

        # flags=stereo_flags

        (rms, int_left, dist_left, int_right, dist_right, R, T, E, F) = res

        # Output
        print "Calibration done"
        print "Residual RMS : {}".format(rms)

        if (rms > 1.0):
            print "Calibration looks faulty, please re-run"

        print "\nCalibration parameters : \n Intrinsics -left \n {} \n\n Distorsion -left\n {}".format(\
            int_left, dist_left)

        print "\nCalibration parameters : \n Intrinsics -right \n {} \n\n Distorsion -right\n {}".format(\
            int_right, dist_right)

        print "\nRotation : \n{}\n".format(R)

        print "\nTranslation : \n{}\n".format(T)

        print "\nEssential matrix : \n{}\n".format(E)  # Essential matrix

        print "\nFundamental matrix : \n{}\n".format(F)  # Fundamental matrix

        # TODO : Compute perspective matrix !

        # Save calibration parameters
        save_file = utils.getAnswer(
            "Would you like to save the results ? (y/n) ", 'yn')

        b_write_success = False

        if save_file == "y":
            while (b_write_success == False):
                save_XML = utils.getAnswer("Save in XML format ? (y/n) ", "yn")

                filepath = raw_input(
                    "Where do you want to save the file ? (enter file path) ")

                try:
                    if (save_XML):

                        file = utils.handlePath(filepath,
                                                "camera_calibration.xml")

                        utils.saveParametersXML(int_left, dist_left, int_right,
                                                dist_right, R, T,
                                                self.frame_size, file)

                    else:
                        file_left = utils.handlePath(filepath, "_left.txt")
                        utils.saveParameters(int_left, dist_left, R, T,
                                             file_left)

                        file_right = utils.handlePath(filepath, "_right.txt")
                        utils.saveParameters(int_right, dist_right, R, T,
                                             file_right)

                    print "Parameters file written"
                    b_write_success = True

                except:
                    print "Wrong path, please correct"

                time.sleep(2)

        return
#####################################################################

# STAGE 3: perform extrinsic calibration (recovery of relative camera positions)

# this takes the existing calibration parameters used to undistort the individual images as
# well as calculated the relative camera positions - represented via the fundamental matrix, F

# alter termination criteria to (perhaps) improve solution - ?

termination_criteria_extrinsics = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.001)

if (chessboard_pattern_detections > 0): # i.e. if we did not load a calibration
    print()
    print("START - extrinsic calibration ...")
    (rms_stereo, camera_matrix_l, dist_coeffs_l, camera_matrix_r, dist_coeffs_r, R, T, E, F) = \
    cv2.stereoCalibrate(objpoints, imgpointsL, imgpointsR, mtxL, distL, mtxR, distR,  grayL.shape[::-1], criteria=termination_criteria_extrinsics, flags=0);

    print("START - extrinsic calibration ...")

    print("STEREO: RMS left to  right re-projection error: ", rms_stereo)

#####################################################################

# STAGE 4: rectification of images (make scan lines align left <-> right

# N.B.  "alpha=0 means that the rectified images are zoomed and shifted so that
# only valid pixels are visible (no black areas after rectification). alpha=1 means
# that the rectified image is decimated and shifted so that all the pixels from the original images
# from the cameras are retained in the rectified images (no source image pixels are lost)." - ?

if (chessboard_pattern_detections > 0): # i.e. if we did not load a calibration
Esempio n. 28
0
    print("camera matrix:\n", camera_matrix)
    print("distortion coefficients: ", dist_coefs.ravel())

    return img_points, camera_matrix, dist_coefs


if __name__ == '__main__':
    pls, cml, dcl = preprocess('left')
    prs, cmr, dcr = preprocess('right')
    pattern_points = np.zeros((np.prod(pattern_size), 3), np.float32)
    pattern_points[:, :2] = np.indices(pattern_size).T.reshape(-1, 2)
    obj_points = [pattern_points] * len(pls)
    print("\nstereo calibrate:")
    # cmr, roiR = cv2.getOptimalNewCameraMatrix(cmr, dcr,(w, h), 1, (w, h))
    # cml, roiR = cv2.getOptimalNewCameraMatrix(cml, dcl,(w, h), 1, (w, h))
    retval, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, R, T, E, F = cv2.stereoCalibrate(
        obj_points, pls, prs, cml, dcl, cmr, dcr, (w, h))
    np.save('cameraMatrix1', cameraMatrix1)
    np.save('distCoeffs1', distCoeffs1)
    np.save('cameraMatrix2', cameraMatrix2)
    np.save('distCoeffs2', distCoeffs2)
    np.save('R', R)
    np.save('T', T)
    print('cameraMatrix1\n', cameraMatrix1)
    print('distCoeffs1\n', distCoeffs1)
    print('cameraMatrix2\n', cameraMatrix2)
    print('distCoeffs2\n', distCoeffs2)
    print('R\n', R)
    print('T\n', T)
    # print(help(cv2.stereoCalibrate))
    print("Dl")
    print(Dl)
    print("Dr")
    print(Dr)

    pickle.dump([err, Mr, Dr, rr, tr, roi_r], open(matrices_file % "der",
                                                   "wb"))
else:
    err, Ml, Dl, rl, tl, roi_l = pickle.load(open(matrices_file % "izq", "br"))
    errores.append(err)
    err, Mr, Dr, rr, tr, roi_r = pickle.load(open(matrices_file % "der", "br"))
    errores.append(err)

# Calibracion estereo
print(Ml)
err, Ml2, Dl2, Mr2, Dr2, R, T, E, F = cv.stereoCalibrate(
    objpoints, puntosl, puntosr, Ml, Dl, Mr, Dr, img_shape)
errores.append(err)
print(Ml2)

print(err)
print("R")
print(R)
print("T")
print(T)

# Rectificacion y generacion de mapas para revertir las distorsiones (directamente sobre el par estereo)
rectification_l, rectification_r, projection_l, projection_r, disparityToDepthMap, ROI_l, ROI_r = cv.stereoRectify(
    Ml2,
    Dl2,
    Mr2,
    Dr2,
Esempio n. 30
0
distortionCoeffsRight = np.zeros((1,5))



pattern_points = np.zeros((np.prod(boardSize), 3), np.float32)
pattern_points[:, :2] = np.indices(boardSize).T.reshape(-1, 2)
pattern_points *= squareSize

objPoints = []
for i in range (0,requiredSuccessfulDetections):
	objPoints.append(pattern_points)


returnCalibrate, cameraMatrixLeft, distortionCoeffsLeft, cameraMatrixRight, distortionCoeffsRight, calib_R, calib_T, calib_E, calib_F \
	= cv2.stereoCalibrate(objPoints, leftImagePoints, rightImagePoints, imageSize, cameraMatrixLeft, distortionCoeffsLeft, cameraMatrixRight, distortionCoeffsRight, \
	criteria=(cv2.TERM_CRITERIA_MAX_ITER|cv2.TERM_CRITERIA_EPS, 100, 1e-5), \
	flags=(cv2.CALIB_FIX_ASPECT_RATIO|cv2.CALIB_ZERO_TANGENT_DIST|cv2.CALIB_SAME_FOCAL_LENGTH) \
	)

print "Found intrinsinc calibration parameters, rmi {}".format(returnCalibrate)


print "Press 'q' to continue. Showing undistorted using instrinsinc params"
print cameraMatrixLeft
print distortionCoeffsLeft
print cameraMatrixRight
print distortionCoeffsRight
while True:
	#Grab frames
	ch.grabFrames()
	frameLeft = ch.imageLeft
	frameRight = ch.imageRight
        x0, y0 = map(int, [0, -l[2] / l[1]])
        x1, y1 = map(int, [c, -(l[2] + l[0] * c) / l[1]])
        cv2.line(img, (x0, y0), (x1, y1), (0, 255, 0), 1, cv2.LINE_AA)

    return img


img1, img2 = stereo_read(c1, c2)

chessboard_found1, corners1 = cv2.findChessboardCorners(img1, (9, 6), None)
chessboard_found2, corners2 = cv2.findChessboardCorners(img2, (9, 6), None)
object_points = chessboard_points_3d()
_, _, _, _, _, R, T, E, F = cv2.stereoCalibrate([object_points], [corners1],
                                                [corners2],
                                                K1,
                                                dist1,
                                                K2,
                                                dist2,
                                                None,
                                                flags=cv2.CALIB_FIX_INTRINSIC)
print(np.linalg.norm(T))

epipolar_loss = []
frames2 = []

for i in range(int(20 / 2 - subvideo_length / 2) * 30):
    stereo_read(c1, c2)

for i in range(subvideo_length * 30):
    i1, i2 = stereo_read(c1, c2)
    frames2.append(i2)
    if i == 2 * 30:  # the minimum of the epipolar loss should be after 2 seconds of playback (x=60 in the plot)
Esempio n. 32
0
def calibration(undistort=False, selfCoor=False):
    """
    主要完成以下功能:
        1. 分别计算单目的投影矩阵并持久化。为计算3d坐标做准备
        2. 计算双目的投影矩阵及R1 R2 Q 矩阵。为行对准做准备
    """
    global imgpoints_r_selfcoor, imgpoints_selfcoor, objpoints_selfcoor
    global cameraMatrix1, cameraMatrix2, distCoeffs1, distCoeffs2
    # termination criteria
    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)

    # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
    objp = np.zeros((7 * 9, 3), np.float32)
    objp[:, :2] = np.mgrid[0:9, 0:7].T.reshape(-1, 2) * 25
    #print(objp.shape)
    #objp[:, -1] = 0
    #print(objp)
    # Arrays to store object points and image points from all the images.
    objpoints = []  # 3d point in real world space
    imgpoints = []  # 2d points in image plane.

    objpoints_r = []
    imgpoints_r = []

    images = glob.glob('stereo512/left.bmp')
    images_r = glob.glob('stereo512/right.bmp')
    images.sort()
    images_r.sort()

    for fname, fname_r in zip(images, images_r):
        img = cv2.imread(fname)
        img_r = cv2.imread(fname_r)

        if undistort:
            img = undistortImage(img, cameraMatrix1, distCoeffs1)
            img_r = undistortImage(img_r, cameraMatrix2, distCoeffs2)
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        gray_r = cv2.cvtColor(img_r, cv2.COLOR_BGR2GRAY)

        # Find the chess board corners
        ret, corners = cv2.findChessboardCorners(gray, (9, 7), None)
        #print('corners',corners)
        ret_r, corners_r = cv2.findChessboardCorners(gray_r, (9, 7), None)

        # If found, add object points, image points (after refining them)
        if ret == True and ret_r == True:
            objpoints.append(objp)
            objpoints_r.append(objp)

            corners2 = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1),
                                        criteria)
            corners2_r = cv2.cornerSubPix(gray_r, corners_r, (11, 11),
                                          (-1, -1), criteria)
            imgpoints.append(corners2)
            imgpoints_r.append(corners2_r)

    # 分别计算投影矩阵并持久化。
    if not selfCoor:
        objpoints_selfcoor = objpoints[0]
        imgpoints_selfcoor = imgpoints[0]
        imgpoints_r_selfcoor = imgpoints_r[0]
    else:
        if objpoints_selfcoor == None or imgpoints_selfcoor == None or imgpoints_r_selfcoor == None:
            print("Initial the self-defined coordinate first")
            return
    ret, rotation, translation = cv2.solvePnP(objpoints_selfcoor,
                                              imgpoints_selfcoor,
                                              cameraMatrix1, distCoeffs1)

    ret, rotation_r, translation_r = cv2.solvePnP(objpoints_selfcoor,
                                                  imgpoints_r_selfcoor,
                                                  cameraMatrix2, distCoeffs2)

    rt1 = getrtMtx(rotation, translation)
    rt2 = getrtMtx(rotation_r, translation_r)

    P1_own = np.dot(cameraMatrix1, rt1)
    P2_own = np.dot(cameraMatrix2, rt2)
    save_camera_matrix_own_proj(P1_own)
    save_camera_matrix_own_proj_r(P2_own)

    # 双目计算 R1 R2 P1 P2 Q并持久化。
    retval, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, R, T, E, F = \
    cv2.stereoCalibrate(objpoints, imgpoints, imgpoints_r, cameraMatrix1,
                        distCoeffs1, cameraMatrix2, distCoeffs2, gray.shape[::-1], flags = cv2.CALIB_FIX_INTRINSIC )
    #print("OPENCV R-T\n",R, "\n", T)
    R1, R2, P1_stereo, P2_stereo, Q, validPixROI1, validPixROI2 = cv2.stereoRectify(
        cameraMatrix1,
        distCoeffs1,
        cameraMatrix2,
        distCoeffs2,
        gray.shape[::-1],
        R,
        T,
        flags=0)
    #print("P1,P2\n",P1_stereo,"\n", P2_stereo)
    save_camera_matrix_rot(R1)
    save_camera_matrix_rot_r(R2)
    save_camera_matrix_stereo_proj(P1_stereo)
    save_camera_matrix_stereo_proj_r(P2_stereo)
    save_camera_matrix_q(Q)
Esempio n. 33
0
objectPoints = leftObjectPoints

print("Calibrating left camera...")
_, leftCameraMatrix, leftDistortionCoefficients, _, _ = cv2.calibrateCamera(
    objectPoints, leftImagePoints, imageSize, None, None)

print("Calibrating right camera...")
_, rightCameraMatrix, rightDistortionCoefficients, _, _ = cv2.calibrateCamera(
    objectPoints, rightImagePoints, imageSize, None, None)

print("Calibrating stereo camera...")
(_, _, _, _, _, rotationMatrix, translationVector, _,
 _) = cv2.stereoCalibrate(objectPoints, leftImagePoints, rightImagePoints,
                          leftCameraMatrix, leftDistortionCoefficients,
                          rightCameraMatrix, rightDistortionCoefficients,
                          imageSize, None, None, None, None,
                          cv2.CALIB_FIX_INTRINSIC, TERMINATION_CRITERIA)

print("Rectifying cameras...")
(leftRectification, rightRectification, leftProjection, rightProjection,
 disparityToDepthMap, leftROI, rightROI) = cv2.stereoRectify(
     leftCameraMatrix, leftDistortionCoefficients, rightCameraMatrix,
     rightDistortionCoefficients, imageSize, rotationMatrix, translationVector,
     None, None, None, None, None, cv2.CALIB_ZERO_DISPARITY, OPTIMIZE_ALPHA)
print "Q matrix:", disparityToDepthMap
print("Saving calibration...")
leftMapX, leftMapY = cv2.initUndistortRectifyMap(leftCameraMatrix,
                                                 leftDistortionCoefficients,
                                                 leftRectification,
                                                 leftProjection, imageSize, 5)
Esempio n. 34
0
for i in range(1, n):
    object_points.append(corner_coordinates)

# Size of the camera images
image_size = (640, 480)

# Set criteria and flags for camera calibration
criteria = (cv2.TERM_CRITERIA_MAX_ITER + cv2.TERM_CRITERIA_EPS, 100, 1e-5)
flags = (cv2.CALIB_FIX_ASPECT_RATIO + cv2.CALIB_ZERO_TANGENT_DIST +
         cv2.CALIB_SAME_FOCAL_LENGTH)

# Calibrate the cameras
(cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, R, T, E,
 F) = cv2.stereoCalibrate(object_points,
                          left_image_points,
                          right_image_points,
                          image_size,
                          criteria=criteria,
                          flags=flags)[1:]

# Compute the rectification transforms for each head of the stereo camera pair
(left_rect_trans, right_rect_trans, left_proj_mats, right_proj_mats, Q,
 left_valid_boxes, right_valid_boxes) = cv2.stereoRectify(cameraMatrix1,
                                                          distCoeffs1,
                                                          cameraMatrix2,
                                                          distCoeffs2,
                                                          image_size,
                                                          R,
                                                          T,
                                                          flags=0)

# Compute the undistortion and rectification transformation map
Esempio n. 35
0
    if keyPress == 27 or keyPress == 1048603:
        print("Pressed ESC, process stopped")
        break

cv2.destroyAllWindows()

print("\nRunning stereo calibration... ", end="")

rms, cameraMatrixL, distCoeffsL, cameraMatrixR, distCoeffsR, R, T, E, F = cv2.stereoCalibrate(
    objectPoints=stereo_objpoints,
    imagePoints1=stereo_imgpoints_l,
    imagePoints2=stereo_imgpoints_r,
    cameraMatrix1=cameraMatrixL,
    distCoeffs1=distCoeffsL,
    cameraMatrix2=cameraMatrixR,
    distCoeffs2=distCoeffsR,
    imageSize=stereo_imgsize,
    R=None,
    T=None,
    E=None,
    F=None,
    flags=(cv2.CALIB_FIX_ASPECT_RATIO + cv2.CALIB_FIX_INTRINSIC),
    criteria=(cv2.TERM_CRITERIA_COUNT + cv2.TERM_CRITERIA_EPS, 300, 1e-6),
)

print("calibrated with RMS error =", rms)
print("\nCamera matrices (L and R respectively):\n", cameraMatrixL, "\n \n", cameraMatrixR)
print("\nDistortion coefficients:\n", distCoeffsL.ravel(), "\n \n", distCoeffsR.ravel(), "\n")
print("Rotational matrix between camera 1 and camera 2 \n", R, "\n")
print("Translational matrix between camera 1 and camera 2 \n", T, "\n")

print("Writing intrinsics to /data/intrinsics.yml... ", end="")
Esempio n. 36
0
    flags |= cv2.CALIB_USE_INTRINSIC_GUESS
    # flags |= cv2.CALIB_FIX_FOCAL_LENGTH
    # flags |= cv2.CALIB_FIX_ASPECT_RATIO
    flags |= cv2.CALIB_ZERO_TANGENT_DIST

    flags = cv2.CALIB_FIX_ASPECT_RATIO + cv2.CALIB_ZERO_TANGENT_DIST +cv2.CALIB_USE_INTRINSIC_GUESS +cv2.CALIB_SAME_FOCAL_LENGTH + \
            cv2.CALIB_RATIONAL_MODEL +cv2.CALIB_FIX_K3 + cv2.CALIB_FIX_K4 + cv2.CALIB_FIX_K5
    """-------------------------------------------------------------------------"""
    """Stereo calibration"""
    """-------------------------------------------------------------------------"""
    rms_stereo, M0, d0, M1, d1, R, T, E, F = cv2.stereoCalibrate(
        object_points,
        image_points0,
        image_points1,
        camera_matrix0,
        dist_coeffs0,
        camera_matrix1,
        dist_coeffs1,
        img0.shape[0:2][::-1],
        criteria=criteria,
        flags=flags)

    print("STEREO: RMS left to  right re-projection error: ", rms_stereo)
    """-------------------------------------------------------------------------"""
    """Stereo rectification"""
    """-------------------------------------------------------------------------"""
    # Q holds the quintessence of the algorithm, the reprojection matrix
    R0, R1, P0, P1, Q, _, _ = cv2.stereoRectify(M0,
                                                d0,
                                                M1,
                                                d1,
Esempio n. 37
0
    def calibrate_cameras(self):
        """Calibrate cameras based on found chessboard corners."""
        criteria = (cv2.TERM_CRITERIA_MAX_ITER + cv2.TERM_CRITERIA_EPS,
                    100, 1e-5)
        flags = (cv2.CALIB_FIX_ASPECT_RATIO + cv2.CALIB_ZERO_TANGENT_DIST +
                 cv2.CALIB_SAME_FOCAL_LENGTH)
        calib = StereoCalibration()

        print(self.object_points)
        print(self.image_points["left"])
        print(self.image_points["right"])

        print(self.image_size)


        (calib.cam_mats["left"], calib.dist_coefs["left"],
         calib.cam_mats["right"], calib.dist_coefs["right"],
         calib.rot_mat, calib.trans_vec, calib.e_mat,
         calib.f_mat) = cv2.stereoCalibrate(objectPoints=self.object_points,
                                            imagePoints1=self.image_points["left"],
                                            imagePoints2=self.image_points["right"],
                                            imageSize=self.image_size,
                                            cameraMatrix1=calib.cam_mats["left"],
                                            distCoeffs1=calib.dist_coefs["left"],
                                            cameraMatrix2=calib.cam_mats["right"],
                                            distCoeffs2=calib.dist_coefs["right"],
                                            R=calib.rot_mat,
                                            T=calib.trans_vec,
                                            E=calib.e_mat,
                                            F=calib.f_mat,
                                            criteria=criteria,
                                            flags=flags)[1:]
        (calib.rect_trans["left"], calib.rect_trans["right"],
         calib.proj_mats["left"], calib.proj_mats["right"],
         calib.disp_to_depth_mat, calib.valid_boxes["left"],
         calib.valid_boxes["right"]) = cv2.stereoRectify(calib.cam_mats["left"],
                                                      calib.dist_coefs["left"],
                                                      calib.cam_mats["right"],
                                                      calib.dist_coefs["right"],
                                                      self.image_size,
                                                      calib.rot_mat,
                                                      calib.trans_vec,
                                                      flags=0)
        for side in ("left", "right"):
            (calib.undistortion_map[side],
             calib.rectification_map[side]) = cv2.initUndistortRectifyMap(
                                                        calib.cam_mats[side],
                                                        calib.dist_coefs[side],
                                                        calib.rect_trans[side],
                                                        calib.proj_mats[side],
                                                        self.image_size,
                                                        cv2.CV_32FC1)
        # This is replaced because my results were always bad. Estimates are
        # taken from the OpenCV samples.
        width, height = self.image_size
        focal_length = 0.8 * width
        calib.disp_to_depth_mat = np.float32([[1, 0, 0, -0.5 * width],
                                              [0, -1, 0, 0.5 * height],
                                              [0, 0, 0, -focal_length],
                                              [0, 0, 1, 0]])
        return calib
Esempio n. 38
0
# StereoCalibrate function
flags = 0
flags |= cv2.CALIB_FIX_INTRINSIC
# flags |= cv2.CALIB_FIX_PRINCIPAL_POINT
# flags |= cv2.CALIB_USE_INTRINSIC_GUESS
# flags |= cv2.CALIB_FIX_FOCAL_LENGTH
# flags |= cv2.CALIB_FIX_ASPECT_RATIO
# flags |= cv2.CALIB_ZERO_TANGENT_DIST
# flags |= cv2.CALIB_RATIONAL_MODEL
# flags |= cv2.CALIB_SAME_FOCAL_LENGTH
# flags |= cv2.CALIB_FIX_K3
# flags |= cv2.CALIB_FIX_K4
# flags |= cv2.CALIB_FIX_K5
retS, MLS, dLS, MRS, dRS, R, T, E, F = cv2.stereoCalibrate(
    objpoints, imgpointsL, imgpointsR, mtxL, distL, mtxR, distR,
    ChessImaR.shape[::-1], criteria_stereo, flags)

# StereoRectify function
rectify_scale = 0  # if 0 image croped, if 1 image nor croped
RL, RR, PL, PR, Q, roiL, roiR = cv2.stereoRectify(
    MLS, dLS, MRS, dRS, ChessImaR.shape[::-1], R, T, rectify_scale,
    (0, 0))  # last paramater is alpha, if 0= croped, if 1= not croped
# initUndistortRectifyMap function
Left_Stereo_Map = cv2.initUndistortRectifyMap(
    MLS, dLS, RL, PL, ChessImaR.shape[::-1], cv2.CV_16SC2
)  # cv2.CV_16SC2 this format enables us the programme to work faster
Right_Stereo_Map = cv2.initUndistortRectifyMap(MRS, dRS, RR, PR,
                                               ChessImaR.shape[::-1],
                                               cv2.CV_16SC2)
Esempio n. 39
0
        imagePoints1.append(corners1)
        imagePoints2.append(corners2)
        object_points.append(obj)
        print "Corners stored\n"
        success += 1

        if (success >= numBoards):
            break

cv2.destroyAllWindows()
print "Starting Calibration\n"
cameraMatrix1 = np.array((3, 3), np.float32)
cameraMatrix2 = np.array((3, 3), np.float32)

retval, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, R, T, E, F = cv2.stereoCalibrate(
    object_points, imagePoints1, imagePoints2, (width, height))
## , cv2.cvTermCriteria(cv2.CV_TERMCRIT_ITER+cv2.CV_TERMCRIT_EPS, 100, 1e-5),   cv2.CV_CALIB_SAME_FOCAL_LENGTH | cv2.CV_CALIB_ZERO_TANGENT_DIST)
#cv2.cv.StereoCalibrate(object_points, imagePoints1, imagePoints2, pointCounts, cv.fromarray(K1), cv.fromarray(distcoeffs1), cv.fromarray(K2), cv.fromarray(distcoeffs2), imageSize, cv.fromarray(R), cv.fromarray(T), cv.fromarray(E), cv.fromarray(F), flags = cv.CV_CALIB_FIX_INTRINSIC)
#FileStorage fs1("mystereocalib.yml", FileStorage::WRITE);
# fs1 << "CM1" << CM1;
#fs1 << "CM2" << CM2;
# #fs1 << "D1" << D1;
#fs1 << "D2" << D2;
#fs1 << "R" << R;
#fs1 << "T" << T;
#fs1 << "E" << E;
#fs1 << "F" << F;
print "Done Calibration\n"
print "Starting Rectification\n"
R1 = np.zeros(shape=(3, 3))
R2 = np.zeros(shape=(3, 3))
Esempio n. 40
0
if __name__ == '__main__':
    #标定所用图片文件夹
    left_pic_dir = '../pic/left'
    right_pic_dir = '../pic/right'
    #单目标定
    real_points, left_pic_points, left_cameraMatrix, left_distCoeffs = calibrate_single(
        left_pic_dir)
    _, right_pic_points, right_cameraMatrix, right_distCoeffs = calibrate_single(
        right_pic_dir)

    size = (752, 480)  #图片尺寸

    #进行双目标定
    _, _, _, _, _, R, T, E, F = cv.stereoCalibrate(
        real_points, left_pic_points, right_pic_points, left_cameraMatrix,
        left_distCoeffs, right_cameraMatrix, right_distCoeffs, size)

    #相机坐标系转换
    R1, R2, P1, P2, Q, validPixROI1, validPixROI2 = cv.stereoRectify(
        left_cameraMatrix, left_distCoeffs, right_cameraMatrix,
        right_distCoeffs, size, R, T)
    #减小畸变
    left_map1, leftmap2 = cv.initUndistortRectifyMap(left_cameraMatrix,
                                                     left_distCoeffs,
                                                     R1,
                                                     P1,
                                                     size=size,
                                                     m1type=cv.CV_16SC2)
    right_map1, right_map2 = cv.initUndistortRectifyMap(right_cameraMatrix,
                                                        right_distCoeffs,
Esempio n. 41
0
def StereoCameraCalibration() :
	# Calibrate the left camera
	cam1 = CameraCalibration( sorted( glob.glob( '{}/left*.png'.format(calibration_directory) ) ) )
	# Calibrate the right camera
	cam2 = CameraCalibration( sorted( glob.glob( '{}/right*.png'.format(calibration_directory) ) ) )
	# Stereo calibration termination criteria
	criteria = (cv2.TERM_CRITERIA_MAX_ITER + cv2.TERM_CRITERIA_EPS, 100, 1e-5)
	# Stereo calibration flags
	flags  = 0
	flags |= cv2.CALIB_USE_INTRINSIC_GUESS
#	flags |= cv2.CALIB_FIX_INTRINSIC
#	flags |= cv2.CALIB_FIX_PRINCIPAL_POINT
#	flags |= cv2.CALIB_FIX_FOCAL_LENGTH
	flags |= cv2.CALIB_FIX_ASPECT_RATIO
#	flags |= cv2.CALIB_SAME_FOCAL_LENGTH
#	flags |= cv2.CALIB_ZERO_TANGENT_DIST
	flags |= cv2.CALIB_RATIONAL_MODEL
	flags |= cv2.CALIB_FIX_K3
	flags |= cv2.CALIB_FIX_K4
	flags |= cv2.CALIB_FIX_K5
	# Stereo calibration
	calibration = cv2.stereoCalibrate( cam1['obj_points'], cam1['img_points'], cam2['img_points'],
		cam1['camera_matrix'], cam1['dist_coefs'], cam2['camera_matrix'], cam2['dist_coefs'], cam1['img_size'],
		flags=flags, criteria=criteria )
	# Store the stereo calibration results in a dictionary
	parameter_names = ( 'calib_error', 'camera_matrix_l', 'dist_coefs_l', 'camera_matrix_r', 'dist_coefs_r', 'R', 'T', 'E', 'F' )
	calibration = dict( zip( parameter_names, calibration ) )
	# Stereo rectification
	rectification = cv2.stereoRectify(
		calibration['camera_matrix_l'], calibration['dist_coefs_l'],
		calibration['camera_matrix_r'], calibration['dist_coefs_r'],
		cam1['img_size'], calibration['R'], calibration['T'], flags=0 )
	# Store the stereo rectification results in the dictionary
	parameter_names = ( 'R1', 'R2', 'P1', 'P2', 'Q', 'ROI1', 'ROI2' )
	calibration.update( zip( parameter_names, rectification ) )
	# Undistortion maps
	calibration['left_map'] = cv2.initUndistortRectifyMap(
		calibration['camera_matrix_l'], calibration['dist_coefs_l'],
		calibration['R1'], calibration['P1'], cam1['img_size'], cv2.CV_32FC1 )
	calibration['right_map'] = cv2.initUndistortRectifyMap(
		calibration['camera_matrix_r'], calibration['dist_coefs_r'],
		calibration['R2'], calibration['P2'], cam2['img_size'], cv2.CV_32FC1 )
	# Compute reprojection error
	undistorted_l = cv2.undistortPoints( np.concatenate( cam1['img_points'] ).reshape(-1, 1, 2),
		calibration['camera_matrix_l'], calibration['dist_coefs_l'], P=calibration['camera_matrix_l'] )
	undistorted_r = cv2.undistortPoints( np.concatenate( cam2['img_points'] ).reshape(-1, 1, 2),
		calibration['camera_matrix_r'], calibration['dist_coefs_r'], P=calibration['camera_matrix_r'] )
	lines_l = cv2.computeCorrespondEpilines( undistorted_l, 1, calibration['F'] )
	lines_r = cv2.computeCorrespondEpilines( undistorted_r, 2, calibration['F'] )
	calibration['reproject_error'] = 0
	for i in range( len( undistorted_l ) ) :
		calibration['reproject_error'] += abs( undistorted_l[i][0][0] * lines_r[i][0][0] +
			undistorted_l[i][0][1] * lines_r[i][0][1] + lines_r[i][0][2] ) + abs( undistorted_r[i][0][0] * lines_l[i][0][0] +
			undistorted_r[i][0][1] * lines_l[i][0][1] + lines_l[i][0][2] )
	calibration['reproject_error'] /= len( undistorted_r )
	# Write calibration results
	with open( '{}/calibration.log'.format(calibration_directory) , 'w') as output_file :
		output_file.write( '\n~~~ Left camera calibration ~~~\n\n' )
		output_file.write( 'Calibration error : {}\n'.format( cam1['calib_error'] ) )
		output_file.write( 'Reprojection error : {}\n'.format( cam1['reproject_error'] ) )
		output_file.write( 'Camera matrix :\n{}\n'.format( cam1['camera_matrix'] ) )
		output_file.write( 'Distortion coefficients :\n{}\n'.format( cam1['dist_coefs'].ravel() ) )
		output_file.write( '\n~~~ Right camera calibration ~~~\n\n' )
		output_file.write( 'Calibration error : {}\n'.format( cam2['calib_error'] ) )
		output_file.write( 'Reprojection error : {}\n'.format( cam2['reproject_error'] ) )
		output_file.write( 'Camera matrix :\n{}\n'.format( cam2['camera_matrix'] ) )
		output_file.write( 'Distortion coefficients :\n{}\n'.format( cam2['dist_coefs'].ravel() ) )
		output_file.write( '\n~~~ Stereo camera calibration ~~~\n\n' )
		output_file.write( 'Stereo calibration error : {}\n'.format( calibration['calib_error'] ) )
		output_file.write( 'Reprojection error : {}\n'.format( calibration['reproject_error'] ) )
		output_file.write( 'Left camera matrix :\n{}\n'.format( calibration['camera_matrix_l'] ) )
		output_file.write( 'Left distortion coefficients :\n{}\n'.format( calibration['dist_coefs_l'].ravel() ) )
		output_file.write( 'Right camera matrix :\n{}\n'.format( calibration['camera_matrix_r'] ) )
		output_file.write( 'Right distortion coefficients :\n{}\n'.format( calibration['dist_coefs_r'].ravel() ) )
		output_file.write( 'Rotation matrix :\n{}\n'.format( calibration['R'] ) )
		output_file.write( 'Translation vector :\n{}\n'.format( calibration['T'].ravel() ) )
		output_file.write( 'Essential matrix :\n{}\n'.format( calibration['E'] ) )
		output_file.write( 'Fundamental matrix :\n{}\n'.format( calibration['F'] ) )
		output_file.write( 'Rotation matrix for the first camera :\n{}\n'.format( calibration['R1'] ) )
		output_file.write( 'Rotation matrix for the second camera :\n{}\n'.format( calibration['R2'] ) )
		output_file.write( 'Projection matrix for the first camera :\n{}\n'.format( calibration['P1'] ) )
		output_file.write( 'Projection matrix for the second camera :\n{}\n'.format( calibration['P2'] ) )
		output_file.write( 'Disparity-to-depth mapping matrix :\n{}\n'.format( calibration['Q'] ) )
		output_file.write( 'ROI for the left camera :  {}\n'.format( calibration['ROI1'] ) )
		output_file.write( 'ROI for the right camera : {}\n'.format( calibration['ROI2'] ) )
	# Write the calibration object with all the parameters
	with open( '{}/calibration.pkl'.format(calibration_directory) , 'wb') as output_file :
		pickle.dump( calibration, output_file, pickle.HIGHEST_PROTOCOL )
	# Return the calibration
	return calibration
Esempio n. 42
0
    def calibrate_cameras(self):
        """Calibrate cameras based on found chessboard corners."""
        criteria = (cv2.TERM_CRITERIA_MAX_ITER + cv2.TERM_CRITERIA_EPS,
                    100, 1e-5)
        stereo_flag = (cv2.CALIB_FIX_INTRINSIC+cv2.CALIB_FIX_ASPECT_RATIO + cv2.CALIB_FIX_FOCAL_LENGTH
                +cv2.CALIB_FIX_PRINCIPAL_POINT)
        #stereo_flag = cv2.CALIB_USE_INTRINSIC_GUESS
        calib = StereoCalibration()
        '''
        calib.cam_mats["left"] = []
        calib.cam_mats["right"] = []
        calib.cam_mats["left"].append(np.array([6761, 0, 0]).astype('float'))
        calib.cam_mats["left"].append(np.array([0, 6700, 0]).astype('float'))
        calib.cam_mats["left"].append(np.array([1818, 1007, 1]).astype('float'))
        
        calib.cam_mats["right"].append(np.array([6888, 0, 0]).astype('float'))
        calib.cam_mats["right"].append(np.array([0, 6821, 0]).astype('float'))
        calib.cam_mats["right"].append(np.array([1664, 1096, 1]).astype('float'))
        '''
        calib.cam_mats['left'] = np.zeros((3,3))
        calib.cam_mats['left'][0,0] = 6761
        calib.cam_mats['left'][0,1] = 0
        calib.cam_mats['left'][0,2] = 0
        calib.cam_mats['left'][1,0] = 0
        calib.cam_mats['left'][1,1] = 6700
        calib.cam_mats['left'][1,2] = 0
        calib.cam_mats['left'][2,0] = 1818
        calib.cam_mats['left'][2,1] = 1007
        calib.cam_mats['left'][2,2] = 1
        calib.cam_mats['right'] = np.zeros((3,3))
        calib.cam_mats['right'][0,0] = 6888
        calib.cam_mats['right'][0,1] = 0
        calib.cam_mats['right'][0,2] = 0
        calib.cam_mats['right'][1,0] = 0
        calib.cam_mats['right'][1,1] = 6821
        calib.cam_mats['right'][1,2] = 0
        calib.cam_mats['right'][2,0] = 1664
        calib.cam_mats['right'][2,1] = 1096
        calib.cam_mats['right'][2,2] = 1

        
        (calib.cam_mats["left"], calib.dist_coefs["left"],
         calib.cam_mats["right"], calib.dist_coefs["right"],
         calib.rot_mat, calib.trans_vec, calib.e_mat,
         calib.f_mat) = cv2.stereoCalibrate(self.object_points,
                                            self.image_points["left"],
                                            self.image_points["right"],
                                            self.image_size,
                                            calib.cam_mats["left"],
                                            calib.dist_coefs["left"],
                                            calib.cam_mats["right"],
                                            calib.dist_coefs["right"],
                                            #self.image_size,
                                            calib.rot_mat,
                                            calib.trans_vec,
                                            calib.e_mat,
                                            calib.f_mat,
                                            criteria=criteria,
                                            flags=stereo_flag)[1:]
        (calib.rect_trans["left"], calib.rect_trans["right"],
         calib.proj_mats["left"], calib.proj_mats["right"],
         calib.disp_to_depth_mat, calib.valid_boxes["left"],
         calib.valid_boxes["right"]) = cv2.stereoRectify(calib.cam_mats["left"],
                                                      calib.dist_coefs["left"],
                                                      calib.cam_mats["right"],
                                                      calib.dist_coefs["right"],
                                                      self.image_size,
                                                      calib.rot_mat,
                                                      calib.trans_vec,
                                                      flags=0)
        for side in ("left", "right"):
            (calib.undistortion_map[side],
             calib.rectification_map[side]) = cv2.initUndistortRectifyMap(
                                                        calib.cam_mats[side],
                                                        calib.dist_coefs[side],
                                                        calib.rect_trans[side],
                                                        calib.proj_mats[side],
                                                        self.image_size,
                                                        cv2.CV_32FC1)
        '''
        # This is replaced because my results were always bad. Estimates are
        # taken from the OpenCV samples.
        width, height = self.image_size
        focal_length = 0.8 * width
        calib.disp_to_depth_mat = np.float32([[1, 0, 0, -0.5 * width],
                                              [0, -1, 0, 0.5 * height],
                                              [0, 0, 0, -focal_length],
                                              [0, 0, 1, 0]])
                                              '''
        return calib
Esempio n. 43
0
        # cv2.imshow('img_left',l_tagged_img)
        # cv2.imshow('img_right',r_tagged_img)
        # cv2.waitKey(2000)
        cv2.imwrite(f'../imgs/check/{i+1}_l.jpg', l_tagged_img)
        cv2.imwrite(f'../imgs/check/{i + 1}_r.jpg', r_tagged_img)


print('逐个标定单目摄像机...')
l_ret, l_mtx, l_dist, l_rvecs, l_tvecs = cv2.calibrateCamera(objpoints, left_imgpoints, l_img_size, None, None, criteria=criteria)
r_ret, r_mtx, r_dist, r_rvecs, r_tvecs = cv2.calibrateCamera(objpoints, right_imgpoints, r_img_size, None, None, criteria=criteria)
# print('左Camera标定的RMS:', l_ret)
# print('右Camera标定的RMS:', r_ret)

print('标定双目摄像机...')
retval, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, R, T, E, F = \
    cv2.stereoCalibrate(objpoints,
                        left_imgpoints,
                        right_imgpoints,
                        l_mtx,
                        l_dist,
                        r_mtx,
                        r_dist,
                        l_img_size,
                        criteria=criteria)

print('*'*50)
print('左摄像机到右摄像机坐标系的旋转矩阵:','\n',R)
print('左摄像机到右摄像机坐标系的位移矩阵:','\n',T)
print('左摄像机到右摄像机坐标系的本征矩阵:','\n',E)
print('左摄像机到右摄像机坐标系的基础矩阵:','\n',F)
print('重建损失:', retval)
Esempio n. 44
0
h, w = image_lists[0].shape
camera_matrix = list()

camera_matrix.append(
    cv2.initCameraMatrix2D(object_points, image_points[:image_number], (w, h),
                           0))
camera_matrix.append(
    cv2.initCameraMatrix2D(object_points, image_points[image_number:], (w, h),
                           0))

# 4. 双目视觉进行标定
term = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_COUNT, 100, 1e-5)
retval, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, R, T, E, F = \
    cv2.stereoCalibrate(object_points, image_points[:image_number], image_points[image_number:], camera_matrix[0],
        None, camera_matrix[1], None, (w, h),
        flags=cv2.CALIB_FIX_ASPECT_RATIO | cv2.CALIB_ZERO_TANGENT_DIST | cv2.CALIB_USE_INTRINSIC_GUESS |
        cv2.CALIB_SAME_FOCAL_LENGTH | cv2.CALIB_RATIONAL_MODEL | cv2.CALIB_FIX_K3 | cv2.CALIB_FIX_K4 | cv2.CALIB_FIX_K5,
        criteria=term)

# 5. 矫正一张图像看看,是否完成了极线矫正
start_time = time.time()
fname1 = './images/left/left01.jpg'
fname2 = './images/right/right01.jpg'
img1 = cv2.imread(fname1)
img2 = cv2.imread(fname2)
gray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)

R1, R2, P1, P2, Q, validPixROI1, validPixROI2 = \
    cv2.stereoRectify(cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, (w, h), R, T)
    # If found, add object points, image points (after refining them)
	if ret == True: 
		corners22 = cv2.cornerSubPix(gray2,corners21,(11,11),(-1,-1),criteria)
		imgpoints2.append(corners22)
        # Draw and display the corners
		img2 = cv2.drawChessboardCorners(img2, (8,6), corners22,ret)
		cv2.imshow('img',img2)
		cv2.waitKey(500)
	i=i+1
cv2.destroyAllWindows()	

ret, cameramatrix2, distcoeffs2, rvecs2, tvecs2 = cv2.calibrateCamera(objpoints, imgpoints2, (320,240),None,None)
ret, cameramatrix1, distcoeffs1, rvecs1, tvecs1 = cv2.calibrateCamera(objpoints, imgpoints1, (320,240),None,None)
R1,Jacobian1=cv2.Rodrigues(rvecs1[1])
R2,Jacobian2=cv2.Rodrigues(rvecs2[1])
ret,cameraMatrix1,distCoeffs1,cameraMatrix2,distCoeffs2,R,T,E,F=cv2.stereoCalibrate(objpoints,imgpoints1,imgpoints2,(320,240),None,None,None,None)#cameramatrix1,distcoeffs1,cameramatrix2,distcoeffs2)
#cv2.StereoRectify(cameramatrix1,distcoeffs1,cameramatrix2,distcoeffs2,(320,240))#,R,T, R1, R2, P1, P2, Q, CV_CALIB_ZERO_DISPARITY, -1, (0, 0))
print str('Camera Matrix 1:') + str(cameramatrix1)
print str('Camera Matrix 2:')+str(cameramatrix2)
print str('Distortion Coefficients 1:')+str(distcoeffs1)
print str('Distortion Coefficients 2:')+str(distcoeffs2)
print str('Relative Rotational Matrix:')+str(R)
print str('Relative Translational Matrix:')+str(T)

#print Q


#stereo = cv2.createStereoBM(numDisparities=16, blockSize=15)
#disparity = stereo.compute(img1,img2)
#imshow('disparity',disparity)
#cv2.ReprojectImageto3D(disparity,_3DImage,Q,handleMissingValues=0)
Esempio n. 46
0
#load img_points
rgbImages = glob(const.rgbFolder + '*.dat')
irImages = glob(const.irFolder+ '*.dat')
rgb_img_points = getImagePoints(rgbImages)
ir_img_points = getImagePoints(irImages)

#create object points for all image pairs
pattern_points = np.zeros((irImages.__len__(),np.prod(const.pattern_size), 3), np.float32)
pattern_points[:,:, :2] = np.indices(const.pattern_size).T.reshape(-1, 2)
pattern_points *= const.square_size

#load calibration results
rgbCamera = shelve.open(const.rgbCameraIntrinsic)
irCamera = shelve.open(const.irCameraIntrinsic)

retval, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, R, T, E, F  = cv2.stereoCalibrate(pattern_points,
                                                                                                    ir_img_points,
                                                                                                    rgb_img_points,
                                                                                                    irCamera['camera_matrix'],
                                                                                                    irCamera['dist_coefs'],
                                                                                                    rgbCamera['camera_matrix'],
                                                                                                    rgbCamera['dist_coefs'],
                                                                                                    const.ir_image_size)

print("error:"+ str(retval))

#save calibration results
camera_file = shelve.open(const.rgbToIR, 'n')
camera_file['R'] = R
camera_file['T'] = T
camera_file.close()
Esempio n. 47
0
def calibrate_stereo_camera(left_video, right_video, cameraMatrix_left,
                            distCoeffs_left, cameraMatrix_right,
                            distCoeffs_right):
    """
    Calibrate a stereo camera pair using data
    previously recorded.
    """
    # Prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
    objp_left = np.zeros((6 * 7, 3), np.float32)
    objp_left[:, :2] = np.mgrid[0:6, 0:7].T.reshape(-1, 2)
    objp_right = np.zeros((6 * 7, 3), np.float32)
    objp_right[:, :2] = np.mgrid[0:6, 0:7].T.reshape(-1, 2)
    objp_left *= 0.024
    objp_right *= 0.024

    # Arrays to store object points and image points from all the images.
    objpoints_left = []  # 3d point in real world space
    imgpoints_left = []  # 2d points in image plane.
    objpoints_right = []  # 3d point in real world space
    imgpoints_right = []  # 2d points in image plane.

    # Get the first pair of frames
    ret_left, frame_left = left_video.read()
    ret_right, frame_right = right_video.read()

    # Get the shape
    shape = frame_left.shape

    # While there are stil images to read
    i = 0
    while ret_left is True and ret_right is True:
        gray_left = cv2.cvtColor(frame_left, cv2.COLOR_BGR2GRAY)
        gray_right = cv2.cvtColor(frame_right, cv2.COLOR_BGR2GRAY)
        ret_left, corners_left = cv2.findChessboardCorners(gray_left, (6, 7), None)
        ret_right, corners_right = cv2.findChessboardCorners(gray_right, (6, 7), None)

        # If found, add object points, image points (after refining them)
        if ret_left is True and ret_right is True:

            # Append object points
            objpoints_left.append(objp_left)
            objpoints_right.append(objp_right)

            # Refine chessboard corner locations
            cv2.cornerSubPix(gray_left, corners_left, (11, 11), (-1, -1), CRITERIA)
            cv2.cornerSubPix(gray_right, corners_right, (11, 11), (-1, -1), CRITERIA)

            # Append image points
            imgpoints_left.append(corners_left)
            imgpoints_right.append(corners_right)

            # Draw and display the corners
            cv2.drawChessboardCorners(frame_left, (6, 7), corners_left, ret_left)
            cv2.drawChessboardCorners(frame_right, (6, 7), corners_right, ret_right)

            print i
            i = i + 1

        # Display the images
        cv2.imshow('frame_left', frame_left)
        cv2.imshow('frame_right', frame_right)

        k = cv2.waitKey(1) & 0xFF
        if k == 27:
            break

        # Advance to the next pair of frames
        ret_left, frame_left = left_video.read()
        ret_right, frame_right = right_video.read()

    # Calibrate the stereo camera pair
    print "Calculating calibration..."
    stereo_calibrate_args = {
        'objectPoints': objpoints_left,
        'imagePoints1': imgpoints_left,
        'imagePoints2': imgpoints_right,
        'imageSize': (shape[1], shape[0]),
        'criteria': CRITERIA,
        'flags': cv2.cv.CV_CALIB_USE_INTRINSIC_GUESS,
        'cameraMatrix1': cameraMatrix_left,
        'cameraMatrix2': cameraMatrix_right,
        'distCoeffs1': distCoeffs_left,
        'distCoeffs2': distCoeffs_right
    }
    _, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, R, T, E, F = \
                                    cv2.stereoCalibrate(**stereo_calibrate_args)
    print "Calculating rectification..."
    stereo_rectify_args = {
        'cameraMatrix1': cameraMatrix1,
        'cameraMatrix2': cameraMatrix2,
        'distCoeffs1': distCoeffs1,
        'distCoeffs2': distCoeffs2,
        'imageSize': (shape[1], shape[0]),
        'R': R,
        'T': T
    }
    R1, R2, P1, P2, Q, validPixROI1, validPixROI2 = \
                                        cv2.stereoRectify(**stereo_rectify_args)
    print "Calculating mappings..."
    map_1_left, map_2_left = cv2.initUndistortRectifyMap(cameraMatrix1,
                                                         distCoeffs1, R1, P1,
                                                         (shape[1], shape[0]),
                                                         cv2.CV_32FC1)
    map_1_right, map_2_right = cv2.initUndistortRectifyMap(cameraMatrix2,
                                                           distCoeffs2, R2, P2,
                                                           (shape[1], shape[0]),
                                                           cv2.CV_32FC1)

    # Save the calibration mappings for future use
    np.save("test_data/map_1_left.npy", map_1_left)
    np.save("test_data/map_2_left.npy", map_2_left)
    np.save("test_data/map_1_right.npy", map_1_right)
    np.save("test_data/map_2_right.npy", map_2_right)
Esempio n. 48
0
def calibrate_stereo(left_filenames, right_filenames):
    left_images = sorted(glob.glob(left_filenames))
    right_images = sorted(glob.glob(right_filenames))

    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)

    objp = np.zeros((6*8,3), np.float32)
    objp[:,:2] = np.mgrid[0:8,0:6].T.reshape(-1,2)

    obj_points = []
    img_pointsL = []
    img_pointsR = []

    for i in range(len(left_images)):
        nameL = left_images[i]
        nameR = right_images[i]

        imgL = cv2.imread(nameL, 0)
        imgR = cv2.imread(nameR, 0)

        retL, cornersL = cv2.findChessboardCorners(imgL, (8,6), None)
        retR, cornersR = cv2.findChessboardCorners(imgR, (8,6), None)

        if retL and retR:
            obj_points.append(objp)
            cv2.cornerSubPix(imgL, cornersL, (11,11), (-1,-1), criteria)
            img_pointsL.append(cornersL)
            cv2.cornerSubPix(imgR, cornersR, (11,11), (-1,-1), criteria)
            img_pointsR.append(cornersR)
            yield False, True, (nameL,cornersL), (nameR,cornersR)
        else:
            yield False, False, (nameL,None), (nameR,None)

    image_size = imgL.shape[::-1]

    # Find individual intrinsic parameters first
    retL, matrixL, distL, rvecsL, tvecsL = cv2.calibrateCamera(
            obj_points, img_pointsL, image_size, None, None)
    retR, matrixR, distR, rvecsR, tvecsR = cv2.calibrateCamera(
            obj_points, img_pointsR, image_size, None, None)

    # Calibrate stereo camera, with calculated intrinsic parameters
    ret, matrixL, distL, matrixR, distR, R, T, E, F = cv2.stereoCalibrate(
            obj_points, img_pointsL, img_pointsR, image_size,
            matrixL, distL, matrixR, distR, flags=cv2.CALIB_FIX_INTRINSIC)

    # Calculate rectification transforms for each camera
    R1 = np.zeros((3,3), np.float32)
    R2 = np.zeros((3,3), np.float32)
    P1 = np.zeros((3,4), np.float32)
    P2 = np.zeros((3,4), np.float32)
    Q = np.zeros((4,4), np.float32)
    R1, R2, P1, P2, Q, roiL, roiR = cv2.stereoRectify(
            matrixL, distL, matrixR, distR, image_size, R, T)

    # Create undistortion/rectification map for each camera
    mapsL = cv2.initUndistortRectifyMap(matrixL, distL, R1, P1,
            image_size, cv2.CV_32FC1)
    mapsR = cv2.initUndistortRectifyMap(matrixR, distR, R2, P2,
            image_size, cv2.CV_32FC1)

    # Convert maps to fixed-point representation (faster)
    mapsL = cv2.convertMaps(mapsL[0], mapsL[1], cv2.CV_32FC1)
    mapsR = cv2.convertMaps(mapsR[0], mapsR[1], cv2.CV_32FC1)

    calib = {
        "intrinsicL": matrixL, "intrinsicR": matrixR,
        "mapsL": mapsL, "mapsR": mapsR,
        "R1": R1, "R2": R2, "P1": P1, "P2": P2, "Q": Q,
        "roiL": roiL, "roiR": roiR
    }
    yield True, calib, None, None
Esempio n. 49
0
print("rvecsL: ", rvecsL) # 旋转向量
print("tvecsL: ", tvecsL) # 平移向量
'''

# Right Side
print("Calibrating right camera...")
retR, mtxR, distR, rvecsR, tvecsR = cv2.calibrateCamera(
    objPoints, imagePointsR, imageSize, None, None)

# Calibrate the Camera for Stereo
flags = 0
flags |= cv2.CALIB_FIX_INTRINSIC

print("Calibrating cameras together...")
retS, MLS, dLS, MRS, dRS, R, T, E, F = cv2.stereoCalibrate(
    objPoints, imagePointsL, imagePointsR, mtxL, distL, mtxR, distR, imageSize,
    None, None, None, None, flags, criteriaStereo)

print("Rectifying cameras...")
rectifyScale = 0
RL, RR, PL, PR, Q, roiL, roiR = cv2.stereoRectify(MLS, dLS, MRS, dRS,
                                                  imageSize, R, T, None, None,
                                                  None, None, None,
                                                  cv2.CALIB_ZERO_DISPARITY,
                                                  rectifyScale)

print("Saving calibration...")
mapXL, mapYL = cv2.initUndistortRectifyMap(MLS, dLS, RL, PL, imageSize,
                                           cv2.CV_32FC1)
mapXR, mapYR = cv2.initUndistortRectifyMap(MRS, dRS, RR, PR, imageSize,
                                           cv2.CV_32FC1)
Esempio n. 50
0
    def calibrate_stereo_camera(self, alpha=0.0):
        '''
        Run stereo calibration
        
        Returns calibration rms error (float), image height, width 
        and camera_matrix, dist_coeffs, projection_matrix and rectification_matrix 
        for right and left camera (as np.ndarray() type) as well as baseline R and T
        
        @param alpha: Set zoom, alpha = 0.0 results in cropped image with all pixels valid in undistorted image, alpha = 1.0 preserves all pixels from original image
        @type  alpha: float (0.0 < alpha < 1.0)
        '''
        # load images
        print "--> loading left  images from folder '%s' with prefix '%s'" % (self.folder, self.image_prefix_left)
        print "--> loading right images from folder '%s' with prefix '%s'" % (self.folder, self.image_prefix_right)
        images_l = self._load_images(self.folder, self.image_prefix_left, grayscale=True)
        images_r = self._load_images(self.folder, self.image_prefix_right, grayscale=True)
        print "--> loaded %i left  images" % len(images_l)           
        print "--> loaded %i right images" % len(images_r)       
        
        # get width and height of images and check all
        (h, w) = images_l[0].shape[:2]
        for l in images_l: assert l.shape[:2] == (h, w)
        for r in images_r: assert r.shape[:2] == (h, w)
        
        # detect image and object points in all images
        print "--> detecting calibration object in all images..."
        (image_points_l, object_points_l) = self._detect_points(images_l, is_grayscale=True)
        (image_points_r, object_points_r) = self._detect_points(images_r, is_grayscale=True)
    
        # sanity checks for image and object points
        print "image_points_l = " + str(len(image_points_l)) + ", image_points_r = " + str(len(image_points_r))
        print "object_points_l = " + str(len(object_points_l)) + ", object_points_r = " + str(len(object_points_r))
        assert (len(image_points_l) == len(image_points_r))
        assert (len(object_points_l) == len(object_points_r))
        object_points = object_points_l
    
        # prepare matrices
        camera_matrix_l = np.zeros((3,3), np.float32)
        dist_coeffs_l   = np.zeros((1,5), np.float32)
        camera_matrix_r = np.zeros((3,3), np.float32)
        dist_coeffs_r   = np.zeros((1,5), np.float32)
    
        # mono flags
        mono_flags = 0
        #mono_flags |= cv2.CALIB_FIX_K3
    
        # run monocular calibration on each camera to get intrinsic parameters
        (rms_l, camera_matrix_l, dist_coeffs_l, _, _) = cv2.calibrateCamera(object_points, image_points_l, (w, h), camera_matrix_l, dist_coeffs_l, flags=mono_flags)
        (rms_r, camera_matrix_r, dist_coeffs_r, _, _) = cv2.calibrateCamera(object_points, image_points_r, (w, h), camera_matrix_r, dist_coeffs_r, flags=mono_flags)
        #(camera_matrix_l, _) = cv2.getOptimalNewCameraMatrix(camera_matrix_l, dist_coeffs_l, (w, h), alpha)
        #(camera_matrix_r, _) = cv2.getOptimalNewCameraMatrix(camera_matrix_r, dist_coeffs_r, (w, h), alpha)

        # set stereo flags
        stereo_flags = 0
        stereo_flags |= cv2.CALIB_FIX_INTRINSIC
        
#        # More availabe flags...
#        stereo_flags |= cv2.CALIB_USE_INTRINSIC_GUESS     # Refine intrinsic parameters
#        stereo_flags |= cv2.CALIB_FIX_PRINCIPAL_POINT     # Fix the principal points during the optimization.
#        stereo_flags |= cv2.CALIB_FIX_FOCAL_LENGTH        # Fix focal length
#        stereo_flags |= cv2.CALIB_FIX_ASPECT_RATIO        # fix aspect ratio
#        stereo_flags |= cv2.CALIB_SAME_FOCAL_LENGTH       # Use same focal length
#        stereo_flags |= cv2.CALIB_ZERO_TANGENT_DIST       # Set tangential distortion to zero
#        stereo_flags |= cv2.CALIB_RATIONAL_MODEL          # Use 8 param rational distortion model instead of 5 param plumb bob model
        
        # run stereo calibration
        res = cv2.stereoCalibrate(object_points, image_points_l, image_points_r, camera_matrix_l, dist_coeffs_l, camera_matrix_r, dist_coeffs_r, (w, h), flags=stereo_flags)
        (rms_stereo, camera_matrix_l, dist_coeffs_l, camera_matrix_r, dist_coeffs_r, R, T, E, F) = res    
        
        # run stereo rectification
        res = self._rectify(camera_matrix_l, dist_coeffs_l, camera_matrix_r, dist_coeffs_r, (h, w), R, T, alpha)
        (rectification_matrix_l, rectification_matrix_r, projection_matrix_l, projection_matrix_r) = res

#        # DEBUG: check types -> should all be np.ndarray
#        print type(camera_matrix_l)
#        print type(dist_coeffs_l)
#        print type(rectification_matrix_l)
#        print type(projection_matrix_l)
#        print type(camera_matrix_r)
#        print type(dist_coeffs_r)
#        print type(rectification_matrix_r)
#        print type(projection_matrix_r)
#        print type(R)
#        print type(T)
        
        return ((rms_l, rms_r, rms_stereo), 
                camera_matrix_l, dist_coeffs_l, rectification_matrix_l, projection_matrix_l,
                camera_matrix_r, dist_coeffs_r, rectification_matrix_r, projection_matrix_r,
                (h, w), R, T)
Esempio n. 51
0
                continue

    cv2.destroyAllWindows()
    print("calibrating camera")
    print(50 * "=")
    (_ , cameraMatrixRight, distortionRight, _, _) = cv2.calibrateCamera(points3DList, cornersListRight, \
                                                                         rightFrame.shape[::-1], None, None)
    (_, cameraMatrixLeft, distortionLeft, _, _) = cv2.calibrateCamera(points3DList, cornersListLeft, \
                                                                      leftFrame.shape[::-1], None, None)

    TERMINATION_CRITERIA = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_MAX_ITER,
                            30, 0.001)
    (_, _, _, _, _, rotationMatrix, translationVector, _,
     _) = cv2.stereoCalibrate(points3DList, cornersListLeft, cornersListRight,
                              cameraMatrixLeft, distortionLeft,
                              cameraMatrixRight, distortionRight,
                              rightFrame.shape[::-1], None, None, None, None,
                              cv2.CALIB_FIX_INTRINSIC, TERMINATION_CRITERIA)

    print("saving numpy files")
    print(50 * "=")

    np.save("cameraMatrixRight.npy", cameraMatrixRight)
    np.save("cameraMatrixLeft.npy", cameraMatrixLeft)
    np.save("distortionLeft.npy", distortionLeft)
    np.save("distortionRight.npy", distortionRight)
    np.save("rotationMatrix.npy", rotationMatrix)
    np.save("translationVector.npy", translationVector)

    cameraProjectionMatrixLeft = np.dot(cameraMatrixLeft, np.c_[np.eye(3),
                                                                np.zeros(
Esempio n. 52
0
    def stop_recording(self):
        self.chb_dimensions_spin.setEnabled(True)
        self.chb_size_x_spin.setEnabled(True)
        self.chb_size_y_spin.setEnabled(True)

        self.start_calib_btn.setText('Start recording')

        self.recording = False

        min_frames = 10
        if len(self.corners['left']) < min_frames:
            QMessageBox.error(
                self, "Not enough images",
                """You have not recorded enough images of the checkerboard. Please record at least {} images to calibrate"""
                .format(min_frames), QMessageBox.Ok)
            return

        self.status_bar_update.emit('Calibrating left camera intrinsics')
        rmsL, mtxL, distL, rvecsL, tvecsL = cv2.calibrateCamera(
            self.object_points,
            self.corners['left'], (self.res_width, self.res_height),
            None,
            None,
            flags=cv2.CALIB_ZERO_TANGENT_DIST)
        print('Left camera RMS reprojection error {}'.format(rmsL))
        if rmsL > 1:
            rospy.logwarn(
                'The reprojection error is very large. Take more images from different view points. Make sure That the checkerboard covers all parts of the image in some frames. Take images from close up, far away and shallow angles of attack.'
            )

        self.status_bar_update.emit('Calibrating right camera intrinsics')
        rmsR, mtxR, distR, rvecsR, tvecsR = cv2.calibrateCamera(
            self.object_points,
            self.corners['right'], (self.res_width, self.res_height),
            None,
            None,
            flags=cv2.CALIB_ZERO_TANGENT_DIST)
        print('Right camera RMS reprojection error {}'.format(rmsR))
        if rmsR > 1:
            rospy.logwarn(
                'The reprojection error is very large. Take more images from different view points. Make sure That the checkerboard covers all parts of the image in some frames. Take images from close up, far away and shallow angles of attack.'
            )

        self.status_bar_update.emit('Calibrating extrinsics')
        rmsStereo, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, R, T, E, F = cv2.stereoCalibrate(
            self.object_points,
            self.corners['left'],
            self.corners['right'],
            mtxL,
            distL,
            mtxR,
            distR, (self.res_width, self.res_height),
            flags=cv2.CALIB_FIX_INTRINSIC + cv2.CALIB_ZERO_TANGENT_DIST)
        print('Stereo RMS reprojection error {}'.format(rmsStereo))
        if rmsStereo > 1:
            rospy.logwarn(
                'The reprojection error is very large. Take more images from different view points. Make sure That the checkerboard covers all parts of the image in some frames. Take images from close up, far away and shallow angles of attack.'
            )
        thresh = 1  # threshold for RMS warning
        if rmsL > thresh or rmsR > thresh or rmsStereo > thresh:
            reply = QMessageBox.warning(
                self, "High reprojection error",
                """The reprojection error is very large.\nRMS reprojection error left: {:.2f}, right: {:.2f}, stereo: {:.2f}.\
                                        \nTake more images from different view points.\
                                        Make sure That the checkerboard covers all parts of the image in some frames.\
                                        \nTake images from close up, far away and shallow angles of attack.\
                                        \n\nWould you like to save this calibration anyway?"""
                .format(rmsL, rmsR, rmsStereo),
                QMessageBox.Yes | QMessageBox.No, QMessageBox.No)

            if reply == QMessageBox.No:
                return

        self.open_calibration_file()
        self.calib['R_lr'] = R.T.tolist()
        self.calib['R_rl'] = R.tolist()

        self.calib['CameraParameters1'][
            'DistortionModel'] = 'plumb_bob'  # opencv parameters are the same as matlab
        self.calib['CameraParameters1']['FocalLength'][0] = float(mtxL[0][0])
        self.calib['CameraParameters1']['FocalLength'][1] = float(mtxL[1][1])
        self.calib['CameraParameters1']['PrincipalPoint'][0] = float(
            mtxL[0][2])
        self.calib['CameraParameters1']['PrincipalPoint'][1] = float(
            mtxL[1][2])
        self.calib['CameraParameters1']['RadialDistortion'][0] = float(
            distL[0][0])
        self.calib['CameraParameters1']['RadialDistortion'][1] = float(
            distL[0][1])
        self.calib['CameraParameters1']['RadialDistortion'][2] = float(
            distL[0][4])

        self.calib['CameraParameters2']['DistortionModel'] = 'plumb_bob'
        self.calib['CameraParameters2']['FocalLength'][0] = float(mtxR[0][0])
        self.calib['CameraParameters2']['FocalLength'][1] = float(mtxR[1][1])
        self.calib['CameraParameters2']['PrincipalPoint'][0] = float(
            mtxR[0][2])
        self.calib['CameraParameters2']['PrincipalPoint'][1] = float(
            mtxR[1][2])
        self.calib['CameraParameters2']['RadialDistortion'][0] = float(
            distR[0][0])
        self.calib['CameraParameters2']['RadialDistortion'][1] = float(
            distR[0][1])
        self.calib['CameraParameters2']['RadialDistortion'][2] = float(
            distR[0][4])

        with open(self.calib_path, 'w') as outfile:
            outfile.write(yaml.dump(self.calib, default_flow_style=None))
            self.status_bar_update.emit('Wrote calibration to {}'.format(
                self.calib_path))
            rospy.loginfo('Wrote calibration to {}'.format(self.calib_path))
def stereo_calibration(check_img_folder, nimages, display=False, dims=(4, 11), size=(640, 480)):
    """reads files from a directory of stereo images of opencv circle grid. calibrates intrinsics of each camera, then extrinsics of stereo rig
	"""

    # grab calbration frames directory
    for dir in os.listdir(check_img_folder):
        if fnmatch.fnmatch(dir, "calibration_frames*"):
            check_img_folder = check_img_folder + dir + "/"
            break

            # Number of points in circle grid
    num_pts = dims[0] * dims[1]

    if not os.path.exists(check_img_folder + "images_used/"):
        os.mkdir(check_img_folder + "images_used/")

        # evaluate image points
    nimg = 0  # number of images with found corners
    iptsF1 = []  # image point arrays to fill up
    iptsF2 = []
    random_images = random.sample(range(500), nimages)
    # for n in range(0,nimages,2):
    for n in random_images:
        filename1 = check_img_folder + "cam1_frame_" + str(n + 1) + ".bmp"
        filename2 = check_img_folder + "cam2_frame_" + str(n + 1) + ".bmp"
        if os.path.exists(filename1) and os.path.exists(filename2):
            img1 = cv2.imread(filename1, 0)
            img2 = cv2.imread(filename2, 0)
            # find center points in circle grid
            [found1, points1] = cv2.findCirclesGridDefault(img1, dims, flags=(cv2.CALIB_CB_ASYMMETRIC_GRID))
            [found2, points2] = cv2.findCirclesGridDefault(img2, dims, flags=(cv2.CALIB_CB_ASYMMETRIC_GRID))
            # copy the found points into the ipts matrices
            temp1 = np.zeros((num_pts, 2))
            temp2 = np.zeros((num_pts, 2))
            if found1 and found2:
                for i in range(num_pts):
                    temp1[i, 0] = points1[i, 0, 0]
                    temp1[i, 1] = points1[i, 0, 1]
                    temp2[i, 0] = points2[i, 0, 0]
                    temp2[i, 1] = points2[i, 0, 1]

                iptsF1.append(temp1)
                iptsF2.append(temp2)
                nimg = nimg + 1  # increment image counter

                # save images with points identified
                drawn_boards_1 = img1.copy()
                drawn_boards_2 = img2.copy()
                cv2.drawChessboardCorners(drawn_boards_1, dims, points1, found1)
                cv2.drawChessboardCorners(drawn_boards_2, dims, points2, found2)
                cv2.imwrite(check_img_folder + "images_used/" + "cam1_frame_" + str(n + 1) + ".bmp", drawn_boards_1)
                cv2.imwrite(check_img_folder + "images_used/" + "cam2_frame_" + str(n + 1) + ".bmp", drawn_boards_2)

    print "\n Usable stereo pairs: " + str(nimg)

    # convert image points to numpy
    iptsF1 = np.array(iptsF1, dtype=np.float32)
    iptsF2 = np.array(iptsF2, dtype=np.float32)

    # evaluate object points
    opts = object_points(dims, nimg, 4.35)

    # initialize camera parameters
    intrinsics1 = np.zeros((3, 3))
    intrinsics2 = np.zeros((3, 3))
    distortion1 = np.zeros((8, 1))
    distortion2 = np.zeros((8, 1))

    # Set initial guess for intrinsic camera parameters (focal length = 0.35cm)
    intrinsics1[0, 0] = 583.3
    intrinsics1[1, 1] = 583.3
    intrinsics1[0, 2] = 320
    intrinsics1[1, 2] = 240
    intrinsics1[2, 2] = 1.0

    intrinsics2[0, 0] = 583.3
    intrinsics2[1, 1] = 583.3
    intrinsics2[0, 2] = 320
    intrinsics2[1, 2] = 240
    intrinsics2[2, 2] = 1.0

    # calibrate cameras
    print "Calibrating camera 1..."
    (cam1rms, intrinsics1, distortion1, rotv1, trav1) = cv2.calibrateCamera(
        opts,
        iptsF1,
        size,
        intrinsics1,
        distortion1,
        flags=int(cv2.CALIB_USE_INTRINSIC_GUESS | cv2.CALIB_RATIONAL_MODEL),
    )
    print "\nEstimated intrinsic parameters for camera 1:"
    for i in range(3):
        print [intrinsics1[i, j] for j in range(3)]

    print "\nEstimated distortion parameters for camera 1:"
    print distortion1

    print "Calibrating camera 2..."
    (cam2rms, intrinsics2, distortion2, rotv2, trav2) = cv2.calibrateCamera(
        opts,
        iptsF2,
        size,
        intrinsics2,
        distortion2,
        flags=int(cv2.CALIB_USE_INTRINSIC_GUESS | cv2.CALIB_RATIONAL_MODEL),
    )

    print "\nEstimated intrinsic parameters for camera 2:"
    for i in range(3):
        print [intrinsics2[i, j] for j in range(3)]

    print "\nEstimated distortion parameters for camera 2:"
    print distortion2

    print "\n rms pixel error:"
    print "cam1 orig: " + str(cam1rms)
    print "cam2 orig: " + str(cam2rms)

    # Estimate extrinsic parameters from stereo point correspondences
    print "\n Stereo estimating..."
    # (stereorms, intrinsics1, distortion1, intrinsics2, distortion2, R, T, E, F) = cv2.stereoCalibrate(opts, iptsF1, iptsF2, intrinsics1, distortion1, intrinsics2, distortion2, size,criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 300, 1e-7), flags=(cv2.CALIB_USE_INTRINSIC_GUESS | cv2.CALIB_RATIONAL_MODEL))
    (stereorms, intrinsics1, distortion1, intrinsics2, distortion2, R, T, E, F) = cv2.stereoCalibrate(
        opts,
        iptsF1,
        iptsF2,
        size,
        intrinsics1,
        distortion1,
        intrinsics2,
        distortion2,
        criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 300, 1e-7),
        flags=(cv2.CALIB_USE_INTRINSIC_GUESS | cv2.CALIB_RATIONAL_MODEL),
    )

    print "\nEstimated extrinsic parameters between cameras 1 and 2:\nRotation:"
    for i in range(3):
        print [R[i, j] for j in range(3)]

    print "\nTranslation:"
    print [T[i, 0] for i in range(3)]
Esempio n. 54
0
print("prev")
print(front_cam.intrinsic, front_cam.dist)
print(back_cam.intrinsic, back_cam.dist)

stereocalib_criteria = (cv2.TERM_CRITERIA_MAX_ITER + cv2.TERM_CRITERIA_EPS,
                        100, 1e-5)
# ret, M1, d1, M2, d2, R, T, E, F = cv2.stereoCalibrate(
#     objpoints, imgpoints_front,
#     imgpoints_back, front_cam.intrinsic, front_cam.dist, back_cam.intrinsic,
#     back_cam.dist, (1920, 1080),
#     criteria=stereocalib_criteria, flags=flags)
ret, M1, d1, M2, d2, R, T, E, F = cv2.stereoCalibrate(
    objpoints,
    imgpoints_front,
    imgpoints_back,
    front_cam.intrinsic,
    np.array([0, 0, 0, 0, 0]),
    back_cam.intrinsic,
    np.array([0, 0, 0, 0, 0]), (1920, 1080),
    criteria=stereocalib_criteria,
    flags=flags)

print("back")
print(front_cam.intrinsic, front_cam.dist)
print(back_cam.intrinsic, back_cam.dist)

print('Intrinsic_mtx_1', M1)
print('dist_1', d1)
print('Intrinsic_mtx_2', M2)
print('dist_2', d2)
print('R', R)
print('T', T)
Esempio n. 55
0
    def calibrate(self):

        ##
        undisorted = False
        cheesboard_found = False
        possib_cho = ['N', 'Y']
        user_choice = ""

        # object points, depend on cheesboard size
        objp = np.zeros((np.prod(self.cheesboard_size),3)
                        , np.float32)
        objp[:,:2] = np.mgrid[0:self.cheesboard_size[0],0:self.cheesboard_size[1]]\
            .T.reshape(-1,2)
        objp = objp*self.cheesboard_scale
        raw_input("Press any key to start calibration...")

        while True:

            # Read single frame from camera
            for i in range(0,5):
                _, frame_l = self.cam_l.read()
                _, frame_r = self.cam_r.read()

            # Frame sizes
            rows,cols,dim = frame_l.shape

            M = cv2.getRotationMatrix2D((cols/2,rows/2),-90,1)
            frame_l = cv2.warpAffine(frame_l,M,(cols,rows))
            frame_r = cv2.warpAffine(frame_r,M,(cols,rows))

            # Convert to grayscale
            gray_l = cv2.cvtColor(frame_l,cv2.COLOR_BGR2GRAY)
            gray_r = cv2.cvtColor(frame_r,cv2.COLOR_BGR2GRAY)

            # Find the chess board corners
            ret_l, corners_l = cv2.findChessboardCorners(gray_l,
                                                     self.cheesboard_size, None)
            ret_r, corners_r = cv2.findChessboardCorners(gray_r,
                                                     self.cheesboard_size, None)

            # If chessboard corners found
            if ret_l and ret_r and not undisorted:

                print "Left Camera: Cheesboard Found"
                print "Right Camera: Cheesboard Found"

                self.objpoints.append(objp)

                cv2.cornerSubPix(gray_l, corners_l,(5, 5),(-1,-1), self.criteria)
                cv2.cornerSubPix(gray_r, corners_r,(5, 5),(-1,-1), self.criteria)

                # Add image points to buffer
                self.imgpoints_l.append(corners_l)
                self.imgpoints_r.append(corners_r)

                # Draw and display the corners
                cv2.drawChessboardCorners(frame_l, self.cheesboard_size, corners_l, ret_l)
                cv2.drawChessboardCorners(frame_r, self.cheesboard_size, corners_r, ret_r)

                if self.lowres:

                    # Put text on image
                    cv2.putText(frame_l,"Found!!!", (100,rows/2), cv2.FONT_HERSHEY_SIMPLEX,
                                2,(255,255,255),5, 1)
                    cv2.putText(frame_r,"Found!!!", (100,rows/2), cv2.FONT_HERSHEY_SIMPLEX,
                                2,(255,255,255),5, 1)
                else:
                    # Put text on image
                    cv2.putText(frame_l,"Found!!!", (100,rows/2), cv2.FONT_HERSHEY_SIMPLEX,
                                4,(255,255,255),5, 1)
                    cv2.putText(frame_r,"Found!!!", (100,rows/2), cv2.FONT_HERSHEY_SIMPLEX,
                                4,(255,255,255),5, 1)


                self.img_counter = self.img_counter + 1
                cheesboard_found = True

            elif undisorted:
                frames = [frame_l, frame_r]
                maps = [left_maps, right_maps]
                rect_frames = [0,0]
                for src in self.cam_sources:
                        rect_frames[src] = cv2.remap(frames[src], maps[src][0], maps[src][1], cv2.INTER_LANCZOS4)

                # Undistorted images
                frame_l = rect_frames[0]
                frame_r = rect_frames[1]


            # show image
            cv2.imshow('img_left',frame_l)
            cv2.imshow('img_right',frame_r)
            k = cv2.waitKey(5) & 0xFF

            # Founded?, What next
            if cheesboard_found:
                while user_choice not in possib_cho:
                    user_choice =  raw_input("Found: %d , continue (Y/N) : " % (self.img_counter,))
                cheesboard_found = False

                # If user chose No
                if user_choice == possib_cho[0]:
                    print "Using calculated parameters to remove disortion..."

                    if not self.isOpen3():
                        # Calculating disortion params for stereo camera
                        ret_val,camera_mat_l, dist_l, camera_mat_r, dist_r, R, T, E, F = cv2.stereoCalibrate(
                                self.objpoints, self.imgpoints_l,
                                self.imgpoints_r, gray_l.shape[::-1]
                        )

                        # distortion params
                        dist = [dist_l, dist_r]

                        # Remove translation
                        for src in self.cam_sources:
                            dist[src][0][-1] = 0.0
                    else:
                        # Calculating disortion params for each camera
                        ret_l, mtx_l, dist_l, rvecs_l, tvecs_l = cv2.calibrateCamera(self.objpoints,
                                                                                     self.imgpoints_l,
                                                                                     gray_l.shape[::-1],
                                                                                     None,None)
                        ret_r, mtx_r, dist_r, rvecs_r, tvecs_r = cv2.calibrateCamera(self.objpoints,
                                                                                     self.imgpoints_r,
                                                                                     gray_r.shape[::-1],
                                                                                    None,None)

                        print "Calibration Error for left, right camera : " + str(ret_l) + ", " + str(ret_r)

                        # distortion params
                        dist = [dist_l, dist_r]

                        # Remove translation
                        for src in self.cam_sources:
                            dist[src][0][-1] = 0.0

                        ## find new camera and remove translation params
                        camera_mat_l, roi_l=cv2.getOptimalNewCameraMatrix(mtx_l,dist_l,(cols,rows),1,(cols,rows))
                        camera_mat_r, roi_r=cv2.getOptimalNewCameraMatrix(mtx_r,dist_r,(cols,rows),1,(cols,rows))

                        stereorms, mtx_l, dist_l, mtx_r,\
                        dist_r, R, T, E, F = cv2.stereoCalibrate(
                                objectPoints=self.objpoints, imagePoints1=self.imgpoints_l,
                                imagePoints2=self.imgpoints_r, cameraMatrix1=camera_mat_l,
                                distCoeffs1=dist_l, cameraMatrix2=camera_mat_r,
                                distCoeffs2=dist_r, imageSize=gray_l.shape[::-1],
                                flags=(cv2.CALIB_FIX_INTRINSIC | cv2.CALIB_RATIONAL_MODEL)
                                )
                        print "Calibration Error for both cameras : " + str(stereorms)

                        # camera matrixes
                        cam_mats = [camera_mat_l, camera_mat_r]

                        # Crop option
                        rectify_scale = 0

                        # Rectification
                        R1, R2, P1, P2, Q, roi1, roi2 = cv2.stereoRectify(
                                camera_mat_l, dist_l,
                                camera_mat_r, dist_r,
                                gray_l.shape[::-1], R, T, alpha = rectify_scale
                        )
                        left_maps = cv2.initUndistortRectifyMap(camera_mat_l, dist_l, R1, P1, gray_l.shape[::-1], cv2.CV_16SC2)
                        right_maps = cv2.initUndistortRectifyMap(camera_mat_r, dist_r, R2, P2, gray_l.shape[::-1], cv2.CV_16SC2)

                    ## Save result
                    np.savez(
                            "disortion_params.npz", camera_mat_l=camera_mat_l, camera_mat_r=camera_mat_r, dist_l=dist_l,
                             dist_r=dist_r, R=R, T=T, E=E,
                             F=F, left_maps=left_maps, right_maps=right_maps
                             )

                    undisorted = True
                user_choice = ""

            if k == 27:
                break

        cv2.destroyAllWindows()
Esempio n. 56
0
def stereoCalibrate():
    board_n = board_w * board_h
    board_sz = (board_w, board_h)
    # cv2.namedWindow('Stereo Calibration', cv2.WINDOW_AUTOSIZE)

    board_pts = inverted_seq(board_sz, square_sz)

    image1, image2, image1_pts, image2_pts, object_pts, imgSize = getMatchedPtsByChessboard(
        [3], board_sz, board_pts)
    #image1, image2, image1_pts, image2_pts, object_pts, imgSize = getMatchedPts()
    # print image1_pts, image2_pts, object_pts, imgSize

    cameraMatrix1 = np.identity(3, np.float32); cameraMatrix2 = np.identity(3, np.float32)
    distCoeffs1 = np.zeros((5), np.float32); distCoeffs2 = np.zeros((5), np.float32)
    #CALIBRATE THE CAMERA
    flags = cv2.CALIB_FIX_ASPECT_RATIO + cv2.CALIB_ZERO_TANGENT_DIST + cv2.CALIB_SAME_FOCAL_LENGTH
    criteria = (1 + 2, 100, 1e-5) # 1 for MAX_ITER and 2 for EPS |= =|
    retval, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, R, T, E, F = cv2.stereoCalibrate(
        object_pts, image1_pts, image2_pts, cameraMatrix1, distCoeffs1,
        cameraMatrix2, distCoeffs2, imgSize, flags = flags, criteria = criteria)
    print 'Re-projection error', retval
    print 'Intrinsic matrix 1\n', cameraMatrix1
    print 'Intrinsic matrix 2\n', cameraMatrix2
    print 'Distortion coeffs 1\n', distCoeffs1
    print 'Distortion coeffs 2\n', distCoeffs2
    print 'Rotation matrix\n', R
    print 'Translation vector\n', T
    print 'Eigen matrix\n', E
    print 'Fundamental matrix\n', F
    cameraMatrix = cameraMatrix1
    cameraMatrix[:2,2] = (imgSize[0] / 2, imgSize[1] / 2)
    distCoeffs = np.zeros((1, 5), np.float32)
    if True:
        distCoeffs1 = distCoeffs2 = distCoeffs
        cameraMatrix1 = cameraMatrix2 = cameraMatrix
    image1 = cv2.undistort(image1, cameraMatrix1, distCoeffs1)
    image2 = cv2.undistort(image2, cameraMatrix2, distCoeffs2)
    
    #CALIBRATION QUALITY CHECK
    #Using the epipolar geometry constraint: m2' * F * m1 = 0
    assert image1_pts[0].shape == image2_pts[0].shape
    newshape = image1_pts[0].shape
    newshape = (newshape[0], 1, newshape[1])
    #Work in undistorted space (An undocumented signature is used here. @Dstray)
    image1_pts = cv2.undistortPoints(image1_pts[0].reshape(newshape, order = 'F'),
        cameraMatrix1, distCoeffs1, P = cameraMatrix1)
    image2_pts = cv2.undistortPoints(image2_pts[0].reshape(newshape, order = 'F'),
        cameraMatrix2, distCoeffs2, P = cameraMatrix2)
    points1, points2 = image1_pts.reshape(-1, 2), image2_pts.reshape(-1, 2)
    #implemented in opencv 3.0.0
    if False:
        lines1 = cv2.computeCorrespondEpilines(points2, 2, F).reshape(-1, 3)
        lines2 = cv2.computeCorrespondEpilines(points1, 1, F).reshape(-1, 3)
        image1, image2 = drawlines(image1, image2, lines1, points2)
        image2, image1 = drawlines(image2, image1, lines2, points1)
    cv2.imshow('Image 1', image1); cv2.imshow('Image 2', image2)
    cv2.waitKey(0)

    #COMPUTE AND DISPLAY RECTIFICATION
    urmaps, Q = bouguetRectify(image1_pts, image2_pts, imgSize,
        cameraMatrix1, cameraMatrix2, distCoeffs1, distCoeffs2, R, T)
    '''urmaps, F = hartleyRectify(image1_pts, image2_pts, imgSize,
        cameraMatrix1, cameraMatrix2, distCoeffs1, distCoeffs2, F)'''
    print 'Q\n', Q
    Q = np.float32([[1, 0, 0,  -0.5*imgSize[0]],
                    [0, 1, 0,  -0.5*imgSize[1]],
                    [0, 0, 0, cameraMatrix[0][0]],
                    [0, 0, 0.07,      0]])
    #Save the results
    np.save('rectmap.npy', urmaps)
    np.save('Q.npy', Q)

    #RECTIFY THE IMAGES AND FIND DISPARITY MAPS
    #Setup for finding correspondences
    img1r = cv2.remap(image1, urmaps[0], urmaps[1], cv2.INTER_LINEAR)
    img2r = cv2.remap(image2, urmaps[2], urmaps[3], cv2.INTER_LINEAR)
    cv2.imshow('Image 1', img1r); cv2.imshow('Image 2', img2r)
    cv2.waitKey(0)
    
    '''disp, points = stereoBMMatch(img1r, img2r, Q)
    colors = cv2.cvtColor(image1, cv2.COLOR_BGR2RGB)
    mask = disp > disp.min()
    points = points[mask]; colors = colors[mask]
    write_ply('_3DPoints.ply', points, colors)'''
    #stereosgbm_match(img1r, img2r, '_3DPoints.ply', Q)

    cv2.waitKey(0)
    cv2.destroyAllWindows()
Esempio n. 57
0
				if (key == 115 and not options.oneCamera):
					savedCorners1 += [corners1];
					savedCorners2 += [corners2];
					objectPoints += [patternPoints];
				elif (key == 115 and options.oneCamera):
					raise BaseException("Option S is only available with two cameras");
				
				h, w = img1.shape[:2]
				cameraMatrix1 = None;
				cameraMatrix2 = None;
				distCoeffs1 = None;
				distCoeffs2 = None;
				R,T,E,F = [None,None,None,None]
				if (len(savedCorners1)>0):
					try:
						ret,cameraMatrix1,distCoeffs1,cameraMatrix2,distCoeffs2,R,T,E,F = cv2.stereoCalibrate(objectPoints, savedCorners1, savedCorners2, (w,h), cameraMatrix1, distCoeffs1,cameraMatrix2,distCoeffs2, R,T,E,F);
					except:
						print len(savedCorners1);
						print len(savedCorners2);
						print savedCorners1;
						print savedCorners2;
				else:	
					ret,cameraMatrix1,distCoeffs1,cameraMatrix2,distCoeffs2,R,T,E,F = cv2.stereoCalibrate([patternPoints], [corners1], [corners2], (w,h), cameraMatrix1, distCoeffs1,cameraMatrix2,distCoeffs2, R,T,E,F);
				print "------------------------"
				printMatrix(R);
				r = cv2.Rodrigues(R)[0];
				
				try:
					theta, vector = parseRotationMatrix(R);
					print "Theta:      %10.7f (%d)" % (theta, theta*360/(2*np.pi));
					print "Vector:     "+printVector(vector,returnString=True);
    def cal_fromcorners(self): 
        '''This function based on code from the ROS camera calibration package.'''
        ret = self.get_calib_data()
        if ret is None:  # If too few correspondencies, just quit.
            return
        cam1_ipts, cam2_ipts, b = ret

        #TODO add debug flag for useful printouts like this:
        """
        print "Calibration data info:"
        print "  CAM1 data array shape:\t %s" % str(cam1_ipts.shape)
        print "  CAM2 data array shape:\t %s" % str(cam2_ipts.shape)
        print "  BOARD data array shape:\t %s" % str(b.shape)
        """

        if self.cam1_do_mono_calib or self.cam2_do_mono_calib:
            print "Doing mono calibration for specified cameras."
            self.mono_cal_fromcorners(cam1_ipts, cam2_ipts, b)

        print "Doing stereo calibration..."
        flags = cv2.CALIB_FIX_INTRINSIC # Intrinsics must be provided.

        self.T = numpy.zeros((3, 1), dtype=numpy.float64)
        self.R = numpy.eye(3, dtype=numpy.float64)
        #self.cam2_D = np.zeros((5,1), np.float64)
        if LooseVersion(cv2.__version__).version[0] == 2:
            cv2.stereoCalibrate(b, cam1_ipts, cam2_ipts, 
                    self.cam1_size,
                               self.cam1_K, self.cam1_D,
                               self.cam2_K, self.cam2_D,
                               self.R,                            # R
                               self.T,                            # T
                               criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 1, 1e-5),
                               flags = flags)

        else:
            cv2.stereoCalibrate(self.CHESSBOARD, cam1_ipts, cam2_ipts,
                               self.cam1_K, self.cam1_D,
                               self.cam2_K, self.cam2_D,
                               self.size,
                               self.R,                            # R
                               self.T,                            # T
                               criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 1, 1e-5),
                               flags = flags)
        print "=======STEREO======"
        print "R = "
        print self.R, 
        print "\nT = "
        print self.T
        homog = np.hstack((self.R, np.array([[0],[0],[0]])))
        homog = np.vstack((homog, np.array([0,0,0,1])))
        print "\nR as a quaternion:"
        q = tf.transformations.quaternion_from_matrix(homog)
        print q
        # NOTE quat_from_mat() doesn't return a quaternion object!
        #   But the Pose msg requires one!  
        #   Fun times if ya like debugging cryptic errors.
        q_msg = Quaternion(q[0], q[1], q[2], q[3])
        t_msg = Point(self.T[0], self.T[1], self.T[2])


        if self.save_calib_results:
            filename = self.save_data_dir + "stereoParams.yaml"
            while os.path.isfile(filename):
                root, ext = os.path.splitext(filename)
                filename = root + "_new" + ext
            data = dict(
                    R = self.R.flatten().tolist(),
                    T = self.T.flatten().tolist(),
                    R_quaternion = q.flatten().tolist())
            with open(filename,'w') as f:
                yaml.dump(data, f, default_flow_style=False)
            print "===Saved results to " + filename
        else:
            "Results not saved."

        print "~~~ Calibration finished! ~~~"
    
        '''
Esempio n. 59
-1
def calibrateImages(obj_pts, ptsL, ptsR, imsize):
    '''Calibrate the Stereo camera rig, given:
    @obj_pts: points that specify the calibration checkerboard pattern
    @ptsL: detected image points in the left calibration images
    @ptsR: detected image points in the right calibration images
    @imsize: the stipulated size of all calibration images
    return: updated StereoCam object
    '''
    # Perform Stereo Calibration
    retval, cam1, dist1, cam2, dist2, R, T, E, F = \
        cv2.stereoCalibrate(
            obj_pts, ptsL, ptsR, imsize)
    dist1 = dist1.ravel()
    dist2 = dist2.ravel()

    cameraObj = StereoCam()
    cameraObj.cam1, cameraObj.dist1 = cam1, dist1
    cameraObj.cam2, cameraObj.dist2 = cam2, dist2

    cameraObj.R, cameraObj.T, cameraObj.E, cameraObj.F = R, T, E, F

    # Perform Stereo Rectification
    (R1, R2, P1, P2, Q, roi1, roi2) = \
        cv2.stereoRectify(cam1, dist1, cam2, dist2, imsize, R, T)

    # update the left and right rotation and projection matrices
    cameraObj.R1, cameraObj.R2 = R1, R2
    cameraObj.P1, cameraObj.P2 = P1, P2

    # update the image projection (aka disparity-to-depth mapping) matrix
    cameraObj.Q = Q

    # Get optimal new camera matrices from the rectified projection matrices
    K_L = cv2.decomposeProjectionMatrix(P1)
    K1 = K_L[0]
    K_R = cv2.decomposeProjectionMatrix(P2)
    K2 = K_R[0]

    cameraObj.K1, cameraObj.K2 = K1, K2

    return cameraObj