Ejemplo n.º 1
1
def stereo_initialize(): 
    #left
    camera_matrixL = np.array([[219.44178, 0., 157.85633], [0., 219.82357, 122.50906], [0.,0.,1.]])
    dist_coefsL = np.array([-0.35878, 0.10911, -0.00069, -0.00088, 0.])

    #right
    camera_matrixR = np.array([[216.15875, 0., 163.36207], [0., 216.12017, 118.46570], [0.,0.,1.]])
    dist_coefsR = np.array([-0.37513, 0.13854, -0.00075, 0.00137, 0.])


    #extrinsic
    om = np.array([-0.00549, 0.01268, -0.06347])
    T = np.array([-63.36115, 0.61235, -1.71106])

    R1 = np.zeros(shape=(3,3))
    R2 = np.zeros(shape=(3,3))
    P1 = np.zeros(shape=(3,3))
    P2 = np.zeros(shape=(3,3))
    R = cv2.Rodrigues(om)

    cv2.stereoRectify(camera_matrixL, dist_coefsL, camera_matrixR, dist_coefsR,(320, 240), R[0], T, R1, R2, P1, P2, Q=None, alpha=-1, newImageSize=(0,0))
    map1x, map1y = cv2.initUndistortRectifyMap(camera_matrixL, dist_coefsL, R1, camera_matrixL, (320, 240), cv2.CV_32FC1)
    map2x, map2y = cv2.initUndistortRectifyMap(camera_matrixR, dist_coefsR, R2, camera_matrixR, (320, 240), cv2.CV_32FC1)

    return map1x, map1y, map2x, map2y
    def fromCamchain(camchain, cam1_no, cam2_no, alpha=0.0):
        res = StereoCameraModel()
        res.cam1 = CameraModel.fromCamchain(camchain, cam1_no)
        res.cam2 = CameraModel.fromCamchain(camchain, cam2_no)

        if res.cam2.T is not None:
            R = res.cam2.T[0:3,0:3]
            T = res.cam2.T[0:3,3]
            #print res.cam2.T, R,T
        else:
            print 'error'
            return None

        # construct R
        cv2.stereoRectify(res.cam1.K,
                 res.cam1.D,
                 res.cam2.K,
                 res.cam2.D,
                 res.cam1.size,
                 R,
                 T,
                 res.cam1.R, res.cam2.R, res.cam1.P, res.cam2.P,
                 alpha = alpha)

        return res
Ejemplo n.º 3
0
def triangulate_features(K,d,train_pts,test_pts,R,T):
   	R1, R2, P_1, P_2, Q, roi1, roi2 = cv2.stereoRectify(K, d, K, d, train_frame.shape[:2], R, T, alpha=1)
   	points1 = np.vstack(train_pts)[:,:-1].T
   	#points1 = cv2.convertPointsToHomogeneous(points1)
  	points2 = np.vstack(test_pts)[:,:-1].T
        #points2 = cv2.convertPointsToHomogeneous(points2)
   	#threeD_points = cv2.triangulatePoints(P1, P2, points1[:2], points2[:2])
   	normRshp1 = points1
   	normRshp2 = points2
   	    
   	Tpoints1 = cv2.triangulatePoints(P_1, P_2, normRshp1, normRshp2)     
   	fin_pts = []
   	#print Tpoints1[0]
    	#print Tpoints1[1]
       	#print Tpoints1[2]
    	#print Tpoints1[3]
        a = (Tpoints1/Tpoints1[3])[0:3]
        #print a
        #print a.shape
        a = a.T
        a = a/100
        print a
        max_ax = 0
        max_ay = 0
        max_az = 0
        for zz in range(a.shape[0]):
            if(abs(a[zz][0]-abs(max_ax))< max_ax + 10) and (abs(a[zz][1]-abs(max_ay))< max_ay + 10) and (abs(a[zz][2]-abs(max_az))< max_az + 10):
                if(a[zz][0] > abs(max_ax)):
                    max_ax = a[zz][0]
                if(a[zz][1] > abs(max_ay)):
                    max_ay = a[zz][1]
                if(a[zz][2] > abs(max_az)):
                    max_az = a[zz][2]
                az.scatter(a[zz][0] + new_origin[0], a[zz][1] + new_origin[1], a[zz][2] + new_origin[2], c='g', marker='o')
Ejemplo n.º 4
0
def draw_rect(K, d, train_frame, R, T, name):
	#perform the rectification
	R1, R2, P1, P2, Q, roi1, roi2 = cv2.stereoRectify(K, d, K, d, train_frame.shape[:2], R, T, alpha=1)
	mapx1, mapy1 = cv2.initUndistortRectifyMap(K, d, R1, K, train_frame.shape[:2], cv2.CV_32F)
	mapx2, mapy2 = cv2.initUndistortRectifyMap(K, d, R2, K, query_frame.shape[:2], cv2.CV_32F)
	img_rect1 = cv2.remap(train_bckp, mapx1, mapy1, cv2.INTER_LINEAR)
	img_rect2 = cv2.remap(query_bckp, mapx2, mapy2, cv2.INTER_LINEAR)

	# draw the images side by side
	total_size = (max(img_rect1.shape[0], img_rect2.shape[0]), img_rect1.shape[1] + img_rect2.shape[1],3)
	img = np.zeros(total_size, dtype=np.uint8)
	img[:img_rect1.shape[0], :img_rect1.shape[1]] = img_rect1
	img[:img_rect2.shape[0], img_rect1.shape[1]:] = img_rect2
	 
	# draw horizontal lines every 25 px accross the side by side image
	for i in range(20, img.shape[0], 25):
		cv2.line(img, (0, i), (img.shape[1], i), (255, 0, 0))
	

	h1, w1 = train_frame.shape[:2]
	h2, w2 = query_frame.shape[:2]
	org_imgs = np.zeros((max(h1, h2), w1+w2,3), np.uint8)
	org_imgs[:h1, :w1] = train_bckp
	org_imgs[:h2, w1:w1+w2] = query_bckp
	for m in good:
		# draw the keypoints
		# print m.queryIdx, m.trainIdx, m.distance
		color = tuple([np.random.randint(0, 255) for _ in xrange(3)])
		cv2.line(org_imgs, (int(train_keypoints[m.queryIdx].pt[0]), int(train_keypoints[m.queryIdx].pt[1])) , (int(query_keypoints[m.trainIdx].pt[0] + w1), int(query_keypoints[m.trainIdx].pt[1])), color)
		cv2.circle(org_imgs, (int(train_keypoints[m.queryIdx].pt[0]), int(train_keypoints[m.queryIdx].pt[1])) , 5, color, 1)
		cv2.circle(org_imgs, (int(query_keypoints[m.trainIdx].pt[0] + w1), int(query_keypoints[m.trainIdx].pt[1])) , 5, color, 1)
	cv2.imshow('original', org_imgs)
	cv2.imshow(name, img)
Ejemplo n.º 5
0
def compute_rectification_transforms(intrinsics_left, intrinsics_right, image_size, rotation_matrix, translation_vector):
    '''
    Computes rectification transforms for stereo vision system

    Arguments:
    intrinsics_left -- a tuple (camera_matrix_left, dist_coefs_left)
                       containing left camera intrinsic parameters: 
                       camera matrix and distortion coeffitient     
    intrinsics_right -- a tuple (camera_matrix_right, dist_coefs_right)
                        containing right camera intrinsic parameters: 
                        camera matrix and distortion coeffitient 
    image_size -- a tuple describing the size of an image in pixels
                  (returned by cvfunctions.images.get_image_size function)
    rotation_matrix -- rotation matrix between left and right camera
                       coordinate systems
    translation_vector -- translation vector between left and right camera
                          coordinate systems
                          
    Returns a tuple as a result of the cv2.stereoRectify function call,
    containing the following results:
    R1, R2, P1, P2, Q, validPixROI1, validPixROI2
    '''    
    camera_matrix_left, dist_coefs_left = intrinsics_left    
    camera_matrix_right, dist_coefs_right = intrinsics_right        
    res = cv2.stereoRectify(camera_matrix_left, dist_coefs_left, camera_matrix_right, dist_coefs_right, image_size, rotation_matrix, translation_vector)
    return res
Ejemplo n.º 6
0
def rectify(mtx1, dist1, mtx2, dist2, R, T):
    # R:行对准的矫正旋转矩阵;P:3*4的左右投影方程;Q:4*4的重投影矩阵
    R1, R2, P1, P2, Q, roi1, roi2 = cv2.stereoRectify(
        mtx1, dist1,
        mtx2, dist2,
        (BINIMG_W, BINIMG_H),
        R,
        T,
        flags=cv2.CALIB_ZERO_DISPARITY,
        alpha=-1,
        newImageSize=(BINIMG_W, BINIMG_H)
    )
    if __name__ == '__main__':
        printMat(R1, R2, P1, P2, Q, roi1, roi2)
    # 产生校正图像所需的变换参数(mapx, mapy)
    mapx1, mapy1 = cv2.initUndistortRectifyMap(
        mtx1, dist1,
        R1, P1,
        (BINIMG_W, BINIMG_H),
        cv2.CV_16SC2
    )
    mapx2, mapy2 = cv2.initUndistortRectifyMap(
        mtx2, dist2,
        R2, P2,
        (BINIMG_W, BINIMG_H),
        cv2.CV_16SC2
    )
    return mapx1, mapy1, mapx2, mapy2, Q, roi1, roi2
Ejemplo n.º 7
0
    def __stereo(self, left, right):
        # return self.undistortLeft(left), self.undistortRight(right)
        if not self.maps_read:
            # clear init flag
            self.maps_read = True
            # use stereoRectify to calculate what we need to rectify stereo images
            M1 = self.info["cameraMatrix1"]
            d1 = self.info["distCoeffs1"]
            M2 = self.info["cameraMatrix2"]
            d2 = self.info["distCoeffs2"]
            size = self.info['imageSize']
            R = self.info['R']
            T = self.info['T']
            h, w = size[:2]
            R1, R2, self.P1, self.P2, self.Q, roi1, roi2 = cv2.stereoRectify(M1, d1, M2, d2, (w,h), R, T, alpha=self.alpha)

            # these return undistortion and rectification maps which are both
            # stored in maps_x for camera 1 and 2
            self.maps_1 = cv2.initUndistortRectifyMap(M1, d1, R1, self.P1, (w,h), cv2.CV_16SC2)  # CV_32F?
            self.maps_2 = cv2.initUndistortRectifyMap(M2, d2, R2, self.P2, (w,h), cv2.CV_16SC2)

        # return self.__fix2(left, self.maps_1), self.__fix2(right, self.maps_2)
        inter=cv2.INTER_LANCZOS4
        return cv2.remap(left, self.maps_1[0], self.maps_1[1], inter),
            cv2.remap(right, self.maps_2[0], self.maps_2[1], inter)
Ejemplo n.º 8
0
 def calibrate_cameras(self):
     """Calibrate cameras based on found chessboard corners."""
     criteria = (cv2.TERM_CRITERIA_MAX_ITER + cv2.TERM_CRITERIA_EPS, 100, 1e-5)
     flags = cv2.CALIB_FIX_ASPECT_RATIO + cv2.CALIB_ZERO_TANGENT_DIST + cv2.CALIB_SAME_FOCAL_LENGTH
     calib = StereoCalibration()
     (
         calib.cam_mats["left"],
         calib.dist_coefs["left"],
         calib.cam_mats["right"],
         calib.dist_coefs["right"],
         calib.rot_mat,
         calib.trans_vec,
         calib.e_mat,
         calib.f_mat,
     ) = cv2.stereoCalibrate(
         self.object_points,
         self.image_points["left"],
         self.image_points["right"],
         self.image_size,
         criteria=criteria,
         flags=flags,
     )[
         1:
     ]
     (
         calib.rect_trans["left"],
         calib.rect_trans["right"],
         calib.proj_mats["left"],
         calib.proj_mats["right"],
         calib.disp_to_depth_mat,
         calib.valid_boxes["left"],
         calib.valid_boxes["right"],
     ) = cv2.stereoRectify(
         calib.cam_mats["left"],
         calib.dist_coefs["left"],
         calib.cam_mats["right"],
         calib.dist_coefs["right"],
         self.image_size,
         calib.rot_mat,
         calib.trans_vec,
         flags=0,
     )
     for side in ("left", "right"):
         (calib.undistortion_map[side], calib.rectification_map[side]) = cv2.initUndistortRectifyMap(
             calib.cam_mats[side],
             calib.dist_coefs[side],
             calib.rect_trans[side],
             calib.proj_mats[side],
             self.image_size,
             cv2.CV_32FC1,
         )
     # This is replaced because my results were always bad. Estimates are
     # taken from the OpenCV samples.
     width, height = self.image_size
     focal_length = 0.8 * width
     calib.disp_to_depth_mat = np.float32(
         [[1, 0, 0, -0.5 * width], [0, -1, 0, 0.5 * height], [0, 0, 0, -focal_length], [0, 0, 1, 0]]
     )
     return calib
Ejemplo n.º 9
0
def stereo_rectify(config, R, T):
    R1, R2, P1, P2, Q, roi1, roi2 = cv2.stereoRectify(
                        config.camera.left_intrinsics,
                        config.camera.left_distortion,
                        config.camera.right_intrinsics,
                        config.camera.right_distortion,
                        config.camera.size, R, T, alpha=0)

    return R1, R2, P1, P2
Ejemplo n.º 10
0
    def doThings(self):
        sgbm = cv2.StereoSGBM()
        sgbm.SADWindowSize, numberOfDisparitiesMultiplier, sgbm.preFilterCap, sgbm.minDisparity, \
        sgbm.uniquenessRatio, sgbm.speckleWindowSize, sgbm.P1, sgbm.P2, \
        sgbm.speckleRange = [v for v,_ in self.params.itervalues()]
        sgbm.numberOfDisparities = numberOfDisparitiesMultiplier*16
        sgbm.disp12MaxDiff = -1
        sgbm.fullDP = False
        R1, R2, P1, P2, Q, topValidRoi, bottomValidRoi = cv2.stereoRectify(self.M1, self.D1, self.M2, self.D2, 
                                (self.top.shape[1],self.top.shape[0]), self.R, self.T, flags=cv2.CALIB_ZERO_DISPARITY, alpha=0)

        top_map1, top_map2 = cv2.initUndistortRectifyMap(self.M1, self.D1, R1, P1, 
                                                         (self.top.shape[1],self.top.shape[0]), cv2.CV_16SC2)
        bottom_map1, bottom_map2 = cv2.initUndistortRectifyMap(self.M2, self.D2, R2, P2, 
                                                               (self.bottom.shape[1], self.bottom.shape[0]), cv2.CV_16SC2)
        
        self.top_r = cv2.remap(self.top, top_map1, top_map2, cv2.cv.CV_INTER_LINEAR);
        self.bottom_r = cv2.remap(self.bottom, bottom_map1, bottom_map2, cv2.cv.CV_INTER_LINEAR)
        top_small = cv2.resize(self.top_r, (self.top_r.shape[1]/2,self.top_r.shape[0]/2))
        bottom_small = cv2.resize(self.bottom_r, (self.bottom_r.shape[1]/2,self.bottom_r.shape[0]/2))
        cv2.imshow('top', top_small);
        cv2.imshow('bottom', bottom_small);
        
#        top_r = cv2.equalizeHist(top_r)
        top_r = cv2.blur(self.top_r, (5,5))
#        bottom_r = cv2.equalizeHist(bottom_r)
        bottom_r = cv2.blur(self.bottom_r, (5,5))
        dispTop = sgbm.compute(top_r.T, bottom_r.T).T;
        dispTopPositive = dispTop
        dispTopPositive[dispTop<0] = 0
        disp8 = (dispTopPositive / (sgbm.numberOfDisparities * 16.) * 255).astype(np.uint8);
        disp_small = cv2.resize(disp8, (disp8.shape[1]/2, disp8.shape[0]/2));
        cv2.imshow(self.winname, disp_small);
        
        self.disp8 = disp8
        self.xyz = cv2.reprojectImageTo3D(dispTop, Q, handleMissingValues=True)
#        self.xyzrgb = np.zeros((self.xyz.shape[0],self.xyz.shape[1],4))
        
#        import struct
#        def color_to_float(color):
#            if color.size == 1:
#                color = [color]*3
#            rgb = (color[2] << 16 | color[1] << 8 | color[0]);
#            rgb_hex = hex(rgb)[2:-1]
#            s = '0'*(8-len(rgb_hex)) + rgb_hex.capitalize()
##            print color, rgb, hex(rgb)
#            rgb_float = struct.unpack('!f', s.decode('hex'))[0]
##            print rgb_float
#            return rgb_float
        
#        for i in range(self.xyz.shape[0]):
#            for j in range(self.xyz.shape[1]):
#                self.xyzrgb[i,j] = np.append(self.xyz[i,j], color_to_float(self.top[i,j])) 
        
Ejemplo n.º 11
0
def stereoCalibration(objpoints, imgpoints1, imgpoints2,   camera_matrix1, dist_coefs1, camera_matrix2, dist_coefs2,shape):	
	T=None
	R= None
	retval, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, R, T, E, F=cv2.stereoCalibrate(objpoints, imgpoints1, imgpoints1,   camera_matrix1, dist_coefs1, camera_matrix1, dist_coefs1,shape, R, T, E=None, F=None, flags=cv2.CALIB_FIX_INTRINSIC,criteria=(cv2.TermCriteria_MAX_ITER+cv2.TermCriteria_EPS, 30, 1e-6))
	R1=None
	R2=None
	P1=None
	P2=None
	Q=None
	R1,R2,P1,P2,Q,unKnown1,unKnown2=cv2.stereoRectify(camera_matrix1,dist_coefs1,camera_matrix2,dist_coefs2,shape,R,T,R1,R2,P1,P2,Q,flags=cv2.CALIB_ZERO_DISPARITY,alpha=1,newImageSize= None)

	return Q
Ejemplo n.º 12
0
    def set_alpha(self, a):
        """
        Set the alpha value for the calibrated camera solution. The
        alpha value is a zoom, and ranges from 0 (zoomed in, all pixels
        in calibrated image are valid) to 1 (zoomed out, all pixels in
        original image are in calibrated image).
        """

        cv2.stereoRectify(self.l.intrinsics,
                         self.l.distortion,
                         self.r.intrinsics,
                         self.r.distortion,
                         self.size,
                         self.R,
                         self.T,
                         self.l.R, self.r.R, self.l.P, self.r.P,
                         alpha = a)
        
        cv2.initUndistortRectifyMap(self.l.intrinsics, self.l.distortion, self.l.R, self.l.P, self.size, cv2.CV_32FC1,
                                   self.l.mapx, self.l.mapy)
        cv2.initUndistortRectifyMap(self.r.intrinsics, self.r.distortion, self.r.R, self.r.P, self.size, cv2.CV_32FC1,
                                   self.r.mapx, self.r.mapy)
Ejemplo n.º 13
0
    def __init__(self, params, winname, top, bottom):
        self.top = top
        self.bottom = bottom
        
        top_small = cv2.resize(top, (top.shape[1] / 2, top.shape[0] / 2))
        bottom_small = cv2.resize(bottom, (bottom.shape[1] / 2, bottom.shape[0] / 2))
        cv2.imshow('top', top_small);
        cv2.imshow('bottom', bottom_small);

        extrinsic_filepath = config.PROJPATH + 'extrinsics.yml'
        intrinsic_filepath = config.PROJPATH + 'intrinsics.yml'
        self.R = np.asarray(cv2.cv.Load(extrinsic_filepath, name='R'))
        self.T = np.asarray(cv2.cv.Load(extrinsic_filepath, name='T'))
        self.R1 = np.asarray(cv2.cv.Load(extrinsic_filepath, name='R1'))
        self.R2 = np.asarray(cv2.cv.Load(extrinsic_filepath, name='R2'))
        self.P1 = np.asarray(cv2.cv.Load(extrinsic_filepath, name='P1'))
        self.P2 = np.asarray(cv2.cv.Load(extrinsic_filepath, name='P2'))
        self.Q = np.asarray(cv2.cv.Load(extrinsic_filepath, name='Q'))
        self.M1 = np.asarray(cv2.cv.Load(intrinsic_filepath, name='M1'))
        self.M2 = np.asarray(cv2.cv.Load(intrinsic_filepath, name='M2'))
        self.D1 = np.asarray(cv2.cv.Load(intrinsic_filepath, name='D1'))
        self.D2 = np.asarray(cv2.cv.Load(intrinsic_filepath, name='D2'))
        
        self.do_tune = config.TUNE_DISPARITY_MAP
        
        R1, R2, P1, P2, self.Q, topValidRoi, bottomValidRoi = cv2.stereoRectify(self.M1, self.D1, self.M2, self.D2,
                        (self.top.shape[1], self.top.shape[0]), self.R, self.T, flags=cv2.CALIB_ZERO_DISPARITY, alpha=-1)

        top_map1, top_map2 = cv2.initUndistortRectifyMap(self.M1, self.D1, R1, P1,
                                    (self.top.shape[1], self.top.shape[0]), cv2.CV_16SC2)
        bottom_map1, bottom_map2 = cv2.initUndistortRectifyMap(self.M2, self.D2, R2, P2,
                                (self.bottom.shape[1], self.bottom.shape[0]), cv2.CV_16SC2)
        
        self.top_r = cv2.remap(self.top, top_map1, top_map2, cv2.cv.CV_INTER_LINEAR);
        self.bottom_r = cv2.remap(self.bottom, bottom_map1, bottom_map2, cv2.cv.CV_INTER_LINEAR)
        
        top_r_small = cv2.resize(self.top_r, (self.top_r.shape[1] / 2, self.top_r.shape[0] / 2))
        bottom_r_small = cv2.resize(self.bottom_r, (self.bottom_r.shape[1] / 2, self.bottom_r.shape[0] / 2))
        cv2.imshow('top rectified', top_r_small);
        cv2.imshow('bottom rectified', bottom_r_small);
        
        tx1,ty1,tx2,ty2 = topValidRoi
        bx1,by1,bx2,by2 = bottomValidRoi
        self.roi = (max(tx1, bx1), max(ty1, by1), min(tx2, bx2), min(ty2, by2))
        self.top_r = cv2.blur(self.top_r, (5, 5))
        self.bottom_r = cv2.blur(self.bottom_r, (5, 5))
#        top_r = cv2.equalizeHist(self.top_r)
#        bottom_r = cv2.equalizeHist(self.bottom_r)
        
        super(SGBMTuner, self).__init__(params, winname)
Ejemplo n.º 14
0
def process(controller):
    map_initialized = False
    num_images = 0
    num_samples = 0
    while True:
        frame = controller.frame()
        images = frame.images
        if images[0].is_valid and images[1].is_valid:
            if not map_initialized:
                left_x_map, left_y_map = initDistortionMap(frame.images[0])
                right_x_map, right_y_map = initDistortionMap(frame.images[1])
                map_initialized = True
            undistorted_left = interpolate(get_image_as_array(images[0]), left_x_map, left_y_map)
            undistorted_right = interpolate(get_image_as_array(images[1]), left_x_map, left_y_map)
            left_bw = cv2.adaptiveThreshold(undistorted_left, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 13, 3)
            right_bw = cv2.adaptiveThreshold(undistorted_right, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 13, 3)
            corners_left = cv2.findChessboardCorners(left_bw, board_size, flags=cv2.CALIB_CB_FILTER_QUADS)
            corners_right = cv2.findChessboardCorners(right_bw, board_size, flags=cv2.CALIB_CB_FILTER_QUADS)
            if corners_left[0] and corners_right[0]:
                cv2.cornerSubPix(left_bw, corners_left[1], (3, 3), (-1, -1), stereocalib_criteria)
                cv2.cornerSubPix(right_bw, corners_right[1], (3, 3), (-1, -1), stereocalib_criteria)
                cv2.drawChessboardCorners(undistorted_left, board_size, corners_left[1], corners_left[0])
                cv2.drawChessboardCorners(undistorted_right, board_size, corners_right[1], corners_right[0])
                image_points1.append(corners_left[1])
                image_points2.append(corners_right[1])
                object_points.append(obj)
                num_samples += 1
                print(num_samples)
                if num_samples > num_boards:
                    break
            cv2.imshow('left', undistorted_left)
            cv2.imshow('right', undistorted_right)
            cv2.imshow('left_bw', left_bw)
            cv2.imshow('right_bw', right_bw)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                cv2.imwrite(f'left{num_images}.jpg', left_bw)
                cv2.imwrite(f'right{num_images}.jpg', right_bw)
                num_images += 1
                break

    retval,cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, R, T, E, F = cv2.stereoCalibrate(
        object_points, image_points1, image_points2, C1, D1, C2, D2, imageSize=(640, 240), flags=stereocalib_flags, criteria=stereocalib_criteria
    )

    print("params")
    print(retval,cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, R, T, E, F)

    print("reprojection matrix")
    a = cv2.stereoRectify(C1, D1, C2, D2, (640, 240), R, T)
    print(a[-3])
Ejemplo n.º 15
0
def computeStereoRectify(params):
    imsize = (1280,960)
    cameraMatrix1 = params['cam'][0]['KK']
    distCoeffs1 = params['cam'][0]['distort']
    cameraMatrix2 = params['cam'][1]['KK']
    distCoeffs2 = params['cam'][1]['distort']

    (R1, R2, P1, P2, Q, size1, size2) = cv2.stereoRectify(
            cameraMatrix1, distCoeffs1, 
            cameraMatrix2, distCoeffs2, 
            imsize,
            params['cam'][0]['E_R'],
            params['cam'][0]['E_t'])
    
    map1x, map1y = cv2.initUndistortRectifyMap(cameraMatrix1, distCoeffs1, R1, P1, imsize, cv2.CV_32FC1)
    map2x, map2y = cv2.initUndistortRectifyMap(cameraMatrix2, distCoeffs2, R2, P2, imsize, cv2.CV_32FC1)

    return (R1, R2, P1, P2, Q, size1, size2, map1x, map1y, map2x, map2y)
Ejemplo n.º 16
0
def rectify(fst_camera, fst_image, snd_camera, snd_image):

    R = calculate_rotation(fst_camera, snd_camera)
    T = calculate_translation(fst_camera, snd_camera)

    height, width = fst_image.shape
    size = (width, height)

    #params:
    #cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, imageSize, R, T
    R1, R2, P1, P2, Q, roi1, roi2 = cv2.stereoRectify(fst_camera.camera_matrix, fst_camera.distortion,\
                                                      snd_camera.camera_matrix, snd_camera.distortion,\
                                                      size, R, T, None, 0)

    fst_rectified_image = get_rectified_image(fst_image, fst_camera, R1, P1)
    snd_rectified_image = get_rectified_image(snd_image, snd_camera, R2, P2)

    return fst_rectified_image, R1, snd_rectified_image, Q
Ejemplo n.º 17
0
    def plot_rectified_images(self, feat_mode="SURF"):
        """Plots rectified images

            This method computes and plots a rectified version of the two
            images side by side.

            :param feat_mode: whether to use rich descriptors for feature
                              matching ("surf") or optic flow ("flow")
        """
        self._extract_keypoints(feat_mode)
        self._find_fundamental_matrix()
        self._find_essential_matrix()
        self._find_camera_matrices_rt()

        R = self.Rt2[:, :3]
        T = self.Rt2[:, 3]
        #perform the rectification
        R1, R2, P1, P2, Q, roi1, roi2 = cv2.stereoRectify(self.K, self.d,
                                                          self.K, self.d,
                                                          self.img1.shape[:2],
                                                          R, T, alpha=1.0)
        mapx1, mapy1 = cv2.initUndistortRectifyMap(self.K, self.d, R1, self.K,
                                                   self.img1.shape[:2],
                                                   cv2.CV_32F)
        mapx2, mapy2 = cv2.initUndistortRectifyMap(self.K, self.d, R2, self.K,
                                                   self.img2.shape[:2],
                                                   cv2.CV_32F)
        img_rect1 = cv2.remap(self.img1, mapx1, mapy1, cv2.INTER_LINEAR)
        img_rect2 = cv2.remap(self.img2, mapx2, mapy2, cv2.INTER_LINEAR)

        # draw the images side by side
        total_size = (max(img_rect1.shape[0], img_rect2.shape[0]),
                      img_rect1.shape[1] + img_rect2.shape[1], 3)
        img = np.zeros(total_size, dtype=np.uint8)
        img[:img_rect1.shape[0], :img_rect1.shape[1]] = img_rect1
        img[:img_rect2.shape[0], img_rect1.shape[1]:] = img_rect2

        # draw horizontal lines every 25 px accross the side by side image
        for i in range(20, img.shape[0], 25):
            cv2.line(img, (0, i), (img.shape[1], i), (255, 0, 0))

        cv2.imshow('imgRectified', img)
        cv2.waitKey()
Ejemplo n.º 18
0
def compute_stereo_rectification_maps(stereo_rig, im_size, size_factor):
    new_size = (int(im_size[1] * size_factor), int(im_size[0] * size_factor))
    rotation1, rotation2, pose1, pose2 = \
        cv2.stereoRectify(cameraMatrix1=stereo_rig.cameras[0].intrinsics.intrinsic_mat,
                          distCoeffs1=stereo_rig.cameras[0].intrinsics.distortion_coeffs,
                          cameraMatrix2=stereo_rig.cameras[1].intrinsics.intrinsic_mat,
                          distCoeffs2=stereo_rig.cameras[1].intrinsics.distortion_coeffs,
                          imageSize=(im_size[1], im_size[0]),
                          R=stereo_rig.cameras[1].extrinsics.rotation,
                          T=stereo_rig.cameras[1].extrinsics.translation,
                          flags=cv2.CALIB_ZERO_DISPARITY,
                          newImageSize=new_size
                          )[0:4]
    map1x, map1y = cv2.initUndistortRectifyMap(stereo_rig.cameras[0].intrinsics.intrinsic_mat,
                                               stereo_rig.cameras[0].intrinsics.distortion_coeffs,
                                               rotation1, pose1, new_size, cv2.CV_32FC1)
    map2x, map2y = cv2.initUndistortRectifyMap(stereo_rig.cameras[1].intrinsics.intrinsic_mat,
                                               stereo_rig.cameras[1].intrinsics.distortion_coeffs,
                                               rotation2, pose2, new_size, cv2.CV_32FC1)
    return map1x, map1y, map2x, map2y
Ejemplo n.º 19
0
 def calibrate(self):
     left = self.detect_squares(self.left_images)
     right = self.detect_squares(self.right_images)
     print "Sweet camera calibration is done. \n now for the stereo data. please wait..."
     (retval, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2,
       R, T, E, F) = cv2.stereoCalibrate(left[0], left[1], right[1], left[2],
                                         left[3], left[4], right[3], right[4])
     self.retval = retval
     self.size = left[3]
     self.rvects = (left[5], right[5])
     self.tvects = (left[6], right[6])
     self.camera = (left[1], right[1])#(cameraMatrix1, cameraMatrix2)
     self.distortion = (left[2], right[2])#(distCoeffs1, distCoeffs2)
     self.r = R
     self.t = T
     self.e = E
     self.f = F
     self.save()
     print "running stereo rectify in case it is needed later"
     R1, R2, P1, P2, Q, valid1, valid2 = cv2.stereoRectify(self.camera[0], 
                     self.distortion[0], self.camera[1], self.distortion[1],
                     left[3], self.r, self.t)
    def StereoRectify(self, R, T):
        """Computes rectification transforms for each head of a calibrated stereo camera."""
        # Prepares the external parameters.
        cameraMatrix1 = StereoCameras.Instance.Parameters.CameraMatrix1
        distCoeffs1   = StereoCameras.Instance.Parameters.DistCoeffs1
        cameraMatrix2 = StereoCameras.Instance.Parameters.CameraMatrix2
        distCoeffs2   = StereoCameras.Instance.Parameters.DistCoeffs2

        imageSize = StereoCameras.Instance.Size

        # Computes rectification transforms.
        R1, R2, P1, P2, Q, _, _ = cv2.stereoRectify(
            cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, imageSize, R, T, alpha=0)

        # Records the external parameters.
        StereoCameras.Instance.Parameters.R1 = R1
        StereoCameras.Instance.Parameters.R2 = R2
        StereoCameras.Instance.Parameters.P1 = P1
        StereoCameras.Instance.Parameters.P2 = P2
        StereoCameras.Instance.Parameters.Q  = Q

        # Define that the stereo cameras are calibrated
        self.IsCalibrated = True
Ejemplo n.º 21
0
def rectify(camera_matrix,dist_coefs,Size,R,T,gray,displacement):

    canvasMD = gray[displacement:,:]
    (w,h,q) = canvasMD.shape

    R1,R2,P1,P2,Q,qw,qw2 = cv2.stereoRectify(camera_matrix,dist_coefs,
                      camera_matrix,dist_coefs,
                      (w,h),
                      R,
                      T)

    mapx1,mapy1 = cv2.initUndistortRectifyMap(camera_matrix,dist_coefs,R1,P1[:,0:3],(w,h),cv2.CV_32FC1)

    img1 = cv2.remap(canvasMD,mapx1,mapy1,cv2.INTER_LINEAR)

    canvasMU = gray[:displacement,:]
    (w,h,q) = canvasMU.shape

    browse.browse(canvasMU)

    mapx2,mapy2 = cv2.initUndistortRectifyMap(camera_matrix,dist_coefs,R2,P2[:,0:3],(w,h),cv2.CV_32FC1)
    img2 = cv2.remap(canvasMU,mapx2,mapy2,cv2.INTER_LINEAR)

    return img1,img2
def rectify_stereo_camera(matrix_1, dist_1, matrix_2, dist_2, R, T):

    rotation_1, rotation_2, pose_1, pose_2, Q, roi_left, roi_right = cv2.stereoRectify(
        matrix_1, dist_1, matrix_2, dist_2, (9, 6), R, T)

    return [rotation_1, rotation_2, pose_1, pose_2, Q, roi_left, roi_right]
Ejemplo n.º 23
0
def convert_doveeye_to_dlc(config, doveeye_calib_file):
    cfg_3d = auxiliaryfunctions.read_config(config)
    img_path, path_corners, path_camera_matrix, path_undistort = auxiliaryfunctions_3d.Foldernames3Dproject(cfg_3d)
    cam_names = cfg_3d['camera_names']

    dist_pickle = {}
    stereo_params = {}
    for cam in cam_names:
        dist_pickle.setdefault(cam, [])

    doveye_calib=readYAMLFile(doveeye_calib_file)
    for cam in cam_names:
        cam_idx=cameras.index(cam)
        # Save the camera calibration result for later use (we won't use rvecs / tvecs)
        dist_pickle[cam] = {'mtx': doveye_calib['C'][cam_idx], 'dist': doveye_calib['D'][cam_idx], 'objpoints': [], 'imgpoints': []}
        pickle.dump(dist_pickle, open(os.path.join(path_camera_matrix, cam + '_intrinsic_params.pickle'), "wb"))

    for i in range(len(cam_names)):
        cam1_idx=cameras.index(cam_names[i])
        for j in range(i + 1, len(cam_names)):
            cam2_idx=cameras.index(cam_names[j])
            pair = [cam_names[i], cam_names[j]]
            pair_idx=-1
            for potential_pair_idx,potential_pair in enumerate(camera_pairs):
                if (potential_pair[0]==cam_names[i] and potential_pair[1]==cam_names[j]) or (potential_pair[0]==cam_names[j] and potential_pair[1]==cam_names[i]):
                    pair_idx=potential_pair_idx
                    break

            # Stereo Rectification
            rectify_scale = 0.4  # Free scaling parameter check this https://docs.opencv.org/2.4/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html#fisheye-stereorectify
            R1, R2, P1, P2, Q, roi1, roi2 = cv2.stereoRectify(doveye_calib['C'][cam1_idx], doveye_calib['D'][cam2_idx],
                                                              doveye_calib['C'][cam2_idx], doveye_calib['D'][cam1_idx],
                                                              (1086, 2040),
                                                              doveye_calib['R'][pair_idx],
                                                              doveye_calib['T'][pair_idx],
                                                              alpha=rectify_scale)

            stereo_params[pair[0] + '-' + pair[1]] = {"cameraMatrix1": doveye_calib['C'][cam1_idx],
                                                      "cameraMatrix2": doveye_calib['C'][cam2_idx],
                                                      "distCoeffs1": doveye_calib['D'][cam1_idx],
                                                      "distCoeffs2": doveye_calib['D'][cam2_idx],
                                                      "R": doveye_calib['R'][pair_idx],
                                                      "T": doveye_calib['T'][pair_idx],
                                                      "E": [],
                                                      "F": doveye_calib['F'][pair_idx],
                                                      "R1": R1,
                                                      "R2": R2,
                                                      "P1": P1,
                                                      "P2": P2,
                                                      "roi1": roi1,
                                                      "roi2": roi2,
                                                      "Q": Q,
                                                      "image_shape": [(1086,2040),(1086,2040)]}

    print('Saving the stereo parameters for every pair of cameras as a pickle file in %s' % str(
        os.path.join(path_camera_matrix)))

    auxiliaryfunctions.write_pickle(os.path.join(path_camera_matrix, 'stereo_params.pickle'), stereo_params)
    print("Camera calibration done! Use the function ``check_undistortion`` to check the check the calibration")

    pass
Ejemplo n.º 24
0
    def camera_cali(self):
        # calibration a single camera
        '''
        for c in xrange(0, 1):
            # 3d points in real world space
            obj_pts = []
            # 2d points in image plane
            img_pts = []
            boards = self.boards[c].chessboards
            corners = self.corners[c].corners_pts
            # check if this image is the reference image
            if c == 0:
                for b in xrange(0, len(boards)):
                    h, w = len(boards[b]), len(boards[b][0])
                    obj_pts_board = np.zeros((h * w, 3), np.float32)
                    img_pts_board = np.zeros((h * w, 2), np.float32)
                    board = boards[b]
                    for i in xrange(0, h):
                        for j in xrange(0, w):
                            obj_pts_board[i * w + j, 0] = j * self.corner_dist
                            obj_pts_board[i * w + j, 1] = i * self.corner_dist
                            img_pts_board[i * w + j, 0] = corners[int(board[i, j])][0]
                            img_pts_board[i * w + j, 1] = corners[int(board[i, j])][1]
                    obj_pts.append(obj_pts_board)
                    img_pts.append(img_pts_board)

            camera_mat = np.zeros((3, 3), np.float32)
            camera_mat[0, 0] = 900.
            camera_mat[0, 2] = len(self.img[c][0]) / 2
            camera_mat[1, 1] = 900.
            camera_mat[1, 2] = len(self.img[c]) / 2
            camera_mat[2, 2] = 1.
            ret, mtx, dist, rv, tv = cv2.calibrateCamera(obj_pts, img_pts,
                                                         (len(self.img[c][0]), len(self.img[c])),
                                                         camera_mat, None, None, None,
                                                         flags=(cv2.CALIB_FIX_ASPECT_RATIO +
                                                                cv2.CALIB_USE_INTRINSIC_GUESS + cv2.CALIB_FIX_K3))

        print(mtx, dist)

        '''
        # first camera/image is the reference camera/image
        boards_ref = self.boards[0].chessboards
        corners_ref = self.corners[0].corners_pts
        # only support two-camera calibration
        for c in xrange(1, len(self.boards)):
            match = self.matching[2 * (c - 1)]
            rot = self.matching[2 * (c - 1) + 1]
            print match
            print rot
            obj_pts_ref = []
            img_pts_ref = []
            obj_pts_tar = []
            img_pts_tar = []
            boards_tar = self.boards[c].chessboards
            corners_tar = self.corners[c].corners_pts
            for b in xrange(0, len(match)):
                if match[b] != -1:
                    # reference camera
                    obj_pts_board_ref = []
                    img_pts_board_ref = []
                    board_ref = boards_ref[b]

                    for i in xrange(0, len(board_ref)):
                        for j in xrange(0, len(board_ref[0])):
                            obj_pts_board_ref.append([
                                j * self.corner_dist, i * self.corner_dist, 0
                            ])
                            img_pts_board_ref.append(corners_ref[board_ref[i,
                                                                           j]])

                    # target camera
                    obj_pts_board_tar = []
                    img_pts_board_tar = []
                    board_tar = boards_tar[int(match[b])]

                    for k in xrange(0, int(rot[b])):
                        board_tar = np.flipud(board_tar).T

                    for i in xrange(0, len(board_tar)):
                        for j in xrange(0, len(board_tar[0])):
                            obj_pts_board_tar.append([
                                j * self.corner_dist, i * self.corner_dist, 0
                            ])
                            img_pts_board_tar.append(corners_tar[board_tar[i,
                                                                           j]])

                    obj_pts_ref.append(
                        np.array(obj_pts_board_ref).astype(np.float32))
                    img_pts_ref.append(
                        np.array(img_pts_board_ref).astype(np.float32))
                    obj_pts_tar.append(
                        np.array(obj_pts_board_tar).astype(np.float32))
                    img_pts_tar.append(
                        np.array(img_pts_board_tar).astype(np.float32))
            '''
            # visualize the corners found on image
            for no_board in xrange(0, len(img_pts_ref)):
                I_1 = self.img[0]
                I_2 = self.img[1]
                for i in xrange(0, len(img_pts_ref[no_board])):
                    x = int(math.ceil(img_pts_ref[no_board][i, 0]))
                    y = int(math.ceil(img_pts_ref[no_board][i, 1]))
                    I_1[y-2:y+2, x-2:x+2] = [0, 0, 255]
                cv2.imshow('left', I_1)
                cv2.waitKey(0)
                cv2.destroyAllWindows()
                for i in xrange(0, len(img_pts_tar[no_board])):
                    x = int(math.ceil(img_pts_tar[no_board][i, 0]))
                    y = int(math.ceil(img_pts_tar[no_board][i, 1]))
                    I_2[y-2:y+2, x-2:x+2] = [0, 0, 255]
                cv2.imshow('right', I_2)
                cv2.waitKey(0)
                cv2.destroyAllWindows()
            '''

            mono_ret, mono_mat_1, mono_dist_1, mono_r, mono_t = \
                cv2.calibrateCamera(obj_pts_ref, img_pts_ref, (len(self.img[c][0]), len(self.img[c])), flags=(
            cv2.CALIB_FIX_K3 + cv2.CALIB_FIX_K4 + cv2.CALIB_FIX_K5))

            print 'mono', mono_mat_1, mono_dist_1

            # stereo camera calibration
            ret, cam_mat_1, cam_dist_1, cam_mat_2, cam_dist_2, R, T, E, F = \
                cv2.stereoCalibrate(obj_pts_ref, img_pts_ref, img_pts_tar, (len(self.img[c][0]), len(self.img[c])),
                                    flags=(cv2.CALIB_FIX_K4+cv2.CALIB_FIX_K5))
            rect_rot_1, rect_rot_2, cam_proj_1, cam_proj_2, Q, ROI1, ROI2 = \
                cv2.stereoRectify(cam_mat_1, cam_dist_1, cam_mat_2, cam_dist_2,
                                  (len(self.img[c][0]), len(self.img[c])), R, T,
                                  None, None, None, None, None, flags=(cv2.CALIB_ZERO_DISPARITY),
                                  alpha=0, newImageSize=(1091, 547))
            # DEBUG
            cam_mat_1 = np.array([[6.47816e2, 0.0, 7.71052e2],
                                  [0.0, 6.45650e2, 4.33588e2], [0.0, 0.0,
                                                                1.0]])
            cam_mat_2 = np.array([[6.47793e2, 0.0, 7.49001e2],
                                  [0.0, 6.47026e2, 4.44525e2], [0.0, 0.0,
                                                                1.0]])
            R = np.array([[9.99915e-01, -2.07121e-03, -1.28613e-02],
                          [2.03651e-03, 9.99994e-01, -2.70997e-03],
                          [1.28669e-02, 2.68355e-03, 9.9991e-01]])
            T = np.array([[-6.311307521502e-01], [-3.756969942287e-03],
                          [8.773418730107e-03]])

            self.intrinsic.append(cam_mat_1)
            self.intrinsic.append(cam_mat_2)
            self.distortion.append(cam_dist_1)
            self.distortion.append(cam_dist_2)
            self.rotation.append(R)
            self.translation.append(T)
            self.cam_proj.append(cam_proj_1)
            self.cam_proj.append(cam_proj_2)
            self.rect_rot.append(rect_rot_1)
            self.rect_rot.append(rect_rot_2)

            for b in xrange(0, len(img_pts_ref)):
                # TODO: update corner pixel coordinates to new rectified coordinate and
                # use rectified projection matrix
                cam_proj_1 = np.concatenate((self.intrinsic[0], np.zeros(
                    (3, 1))),
                                            axis=1)
                cam_proj_2 = self.intrinsic[1].dot(
                    np.concatenate((self.rotation[0], self.translation[0]),
                                   axis=1))
                obj_pts_board = cv2.triangulatePoints(
                    cam_proj_1, cam_proj_2,
                    img_pts_ref[b].astype(np.float32).T,
                    img_pts_tar[b].astype(np.float32).T)
                obj_pts_board = obj_pts_board.T
                obj_pts_board = obj_pts_board[:, 0:3] / obj_pts_board[:, 3:4]
                self.boards_pts_set.append(obj_pts_board)

                # visualize the corners found on image
                '''
Ejemplo n.º 25
0
import cv2
import numpy as np

K0 = np.load('CMAT_DIST/logitech_c310_cam_mat.npy')
dist0 = np.load('CMAT_DIST/logitech_c310_dist_coeffs.npy')
K1 = np.load('CMAT_DIST/logitech_c310_cam_mat.npy')
dist1 = np.load('CMAT_DIST/logitech_c310_dist_coeffs.npy')

R = np.load('STEREO_CALIB/stereo_rmat.npy')
t = np.load('STEREO_CALIB/stereo_tvec.npy')

R1, R2, P1, P2, Q, roi_left, roi_right = cv2.stereoRectify(K0, dist0, K1, dist1, (1280, 720), R, t)



x = np.dot(R1, P1)
print(x)
y = np.dot(R2, P2)
print(y)
np.save('STEREO_RECTIFY/P1', x)
np.save('STEREO_RECTIFY/P2', y)


  def publish_3d_coords(self, left_max_x, left_max_y, right_max_x, right_max_y, imageSize):
          #monocular calibration 1 = left 2 = right
    CMatr1 = np.matrix([[2531.915668, 0.000000, 615.773452],  
    [0.000000, 2594.436434, 344.505755],
    [0.000000, 0.000000, 1.000000]]).astype(np.float)

    pp = pprint.PrettyPrinter(indent=4)

    # print("CMatr1", CMatr1)
    #left distortion parameters: 1.281681 -15.773048 -0.010428 0.012822 0.000000

    CMatr2 = np.matrix([[1539.714285, 0.000000, 837.703760],
    [0.000000, 1506.265655, 391.687374],
    [0.000000, 0.000000, 1.000000]]).astype(np.float)

    # print("CMatr2", CMatr2)

    projPoints1 = np.array([[left_max_x],[left_max_y]]).astype(np.float)

    projPoints2 = np.array([[right_max_x],[right_max_y]]).astype(np.float)

    distort_left = np.array([1.281681, -15.773048, -0.010428, 0.012822, 0.000000]).astype(np.float)
    distort_right = np.array([0.091411, -0.461269, 0.021006, 0.040117, 0.000000]).astype(np.float)
    # print("distort_left", distort_left)
    # print("distort_right", distort_right)

    RMat1 = self.quatToRot(np.array([-0.029, 0.992, -0.110, 0.053]))
    RMat2 = self.quatToRot(np.array([-0.019, 0.997, -0.038, -0.071]))



    RFinal = np.matmul(np.linalg.inv(RMat1),RMat2)

    T_left = np.array([-0.268, 0.516, 3.283]).astype(np.float) #--------------------CHANGE

    T_right = np.array([0.018, -0.184, 1.255]).astype(np.float) #--------------------CHANGE

    T_final = T_right - T_left

    #print("RFinal", RFinal)
    #print("T_final", T_final)
    #print(imageSize)

    R1,R2,P1,P2,Q, a,b = cv2.stereoRectify(CMatr1, distort_left, CMatr2, distort_right, (1280,720), RFinal, T_final,  alpha=-1)
    

    #print("R1",R1)
    #print("R2",R2)CMatr1
    #print("P1",P1)
    #print("P2",P2)

    #pnt1 = cv2.undistortPoints(projPoints1, CMatr1, distort_left, R=RMat1, P=P1)
    #pnt2 = cv2.undistortPoints(projPoints2, CMatr2, distort_right, R=RMat2, P=P2)

    #print("left:",projPoints1)
    #print("right:", projPoints2)

    P1_stereo = np.array([[4890.538324810042, 0.0, -1734.3179817199707, 0.0],[ 0.0, 4890.538324810042, 398.04181480407715, 0.0],[ 0.0, 0.0, 1.0, 0.0]])
    P2_stereo = np.array([[4890.538324810042, 0.0, -1734.3179817199707, 8092.200252104331],[ 0.0, 4890.538324810042, 398.04181480407715, 0.0],[ 0.0, 0.0, 1.0, 0.0]])


    points4D = cv2.triangulatePoints(P1_stereo, P2_stereo, projPoints1, projPoints2)
    #points3D = cv2.convertPointsFromHomogeneous(points4D)
    #print(points4D)

    #Converts 4D to 3D by [x,y,z,w] -> [x/w, y/w, z/w]
    
    points3D = np.array([  points4D[0]/points4D[3], points4D[1]/points4D[3], points4D[2]/points4D[3] ])
Ejemplo n.º 27
0
retS, MLS, dLS, MRS, dRS, R, T, E, F = cv2.stereoCalibrate(
    objpoints,
    imgpointsL,
    imgpointsR,
    mtxL,
    distL,
    mtxR,
    distR,
    ChessImaR.shape[::-1],
    criteria=criteria_stereo,
    flags=cv2.CALIB_FIX_INTRINSIC)

# StereoRectify function
rectify_scale = 0  # if 0 image croped, if 1 image nor croped
RL, RR, PL, PR, Q, roiL, roiR = cv2.stereoRectify(
    MLS, dLS, MRS, dRS, ChessImaR.shape[::-1], R, T, rectify_scale,
    (0, 0))  # last paramater is alpha, if 0= croped, if 1= not croped
# initUndistortRectifyMap function
Left_Stereo_Map = cv2.initUndistortRectifyMap(
    MLS, dLS, RL, PL, ChessImaR.shape[::-1], cv2.CV_16SC2
)  # cv2.CV_16SC2 this format enables us the programme to work faster
Right_Stereo_Map = cv2.initUndistortRectifyMap(MRS, dRS, RR, PR,
                                               ChessImaR.shape[::-1],
                                               cv2.CV_16SC2)
#*******************************************
#***** Parameters for the StereoVision *****
#*******************************************

# Create StereoSGBM and prepare all parameters
window_size = 3
min_disp = 2
Ejemplo n.º 28
0
    K2 = np.array([[5.665439587514537e+02, 0, 3.300422688805023e+02],\
                   [0, 7.557038654043611e+02, 2.679839339287851e+02],\
                   [0, 0, 1]])
    D2 = np.array([[-0.242511849444114, 0.980331254691578,\
                    0.002963361851048, 0.003507404137556, 0.00000]])
    R = np.array([[0.999809488415120, -3.803081241759167e-04, 0.019515179753873],\
                  [4.462727193334061e-04, 1, -0.003375924839205],\
                  [-0.019513782712618, 0.003383990778750, 0.999803861210115]])
    T = np.array([-58.953856363020520, 0.403802936505059, -1.051816198390031])

    rotation1, rotation2, pose1, pose2 = cv2.stereoRectify(
        cameraMatrix1=K1,
        distCoeffs1=D1,
        cameraMatrix2=K2,
        distCoeffs2=D2,
        imageSize=(dispW, dispH),
        R=R,
        T=T,
        #flags=cv2.CALIB_ZERO_DISPARITY,
        #newImageSize=(dispW, dispH)
    )[0:4]
    #print("Retify coefficients calculated")

    if not cap_left.isOpened() and not cap_right.isOpened(
    ):  # If we can't get images from both sources, error
        print("Can't opened the streams!")
        sys.exit(-9)

    # Change the resolution in need
#    cap_right.set(cv2.CAP_PROP_FRAME_WIDTH, 640)  # float
#    cap_right.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)  # float
    start_time = time.time()
    retL, frameL = capL.read()
    retR, frameR = capR.read()



    # image size
    height, width, ch = frameL.shape
    imSize = (width, height)


    #rectify get new camera matrixes and rectification transforms
    #P1, P2 both new camera matrixes
    #R1, R2 are rectification transforms output
    cv2.stereoRectify(camMatL, distCoL, camMatR, distCoR, imSize, R, T, R1, R2, P1, P2, Q=None, flags=0 , alpha=-1, newImageSize=(0,0))

    #appy rectification transform to camera 1 with new camera matrix P1 and rectification transform R1 from stereoRectify above
    mapxL,mapyL = cv2.initUndistortRectifyMap(camMatL,distCoL,rectTransL,P1,imSize,cv2.CV_32FC1)
    mapxR,mapyR = cv2.initUndistortRectifyMap(camMatR,distCoR,rectTransR,P2,imSize,cv2.CV_32FC1)
    #remap cameras to remove distortion
    rectL = cv2.remap(frameL,mapxL,mapyL,cv2.INTER_LINEAR)
    rectR = cv2.remap(frameR,mapxR,mapyR,cv2.INTER_LINEAR)



    # Depth Map
    # # convert rectified from RGB to 8 bit grayscale
    grayRectL = cv2.cvtColor(rectL, cv2.COLOR_BGR2GRAY)
    grayRectR = cv2.cvtColor(rectR, cv2.COLOR_BGR2GRAY)
    grayFrameL = cv2.cvtColor(frameL, cv2.COLOR_BGR2GRAY)
Ejemplo n.º 30
0
_, rightCameraMatrix, rightDistortionCoefficients, _, _ = cv2.calibrateCamera(
    objectPoints, rightImagePoints, imageSize, None, None)

print("Calibrating cameras together...")
(_, _, _, _, _, rotationMatrix, translationVector, _,
 _) = cv2.stereoCalibrate(objectPoints, leftImagePoints, rightImagePoints,
                          leftCameraMatrix, leftDistortionCoefficients,
                          rightCameraMatrix, rightDistortionCoefficients,
                          imageSize, None, None, None, None,
                          cv2.CALIB_FIX_INTRINSIC, TERMINATION_CRITERIA)

print("Rectifying cameras...")
# TODO: Why do I care about the disparityToDepthMap?
(leftRectification, rightRectification, leftProjection, rightProjection,
 dispartityToDepthMap, leftROI, rightROI) = cv2.stereoRectify(
     leftCameraMatrix, leftDistortionCoefficients, rightCameraMatrix,
     rightDistortionCoefficients, imageSize, rotationMatrix, translationVector,
     None, None, None, None, None, cv2.CALIB_ZERO_DISPARITY, OPTIMIZE_ALPHA)

print("Saving calibration...")
leftMapX, leftMapY = cv2.initUndistortRectifyMap(leftCameraMatrix,
                                                 leftDistortionCoefficients,
                                                 leftRectification,
                                                 leftProjection, imageSize,
                                                 cv2.CV_32FC1)
rightMapX, rightMapY = cv2.initUndistortRectifyMap(
    rightCameraMatrix, rightDistortionCoefficients, rightRectification,
    rightProjection, imageSize, cv2.CV_32FC1)

np.savez_compressed(outputFile,
                    imageSize=imageSize,
                    leftMapX=leftMapX,
y = tp[1] / tp[3]
z = tp[2] / tp[3]
ax.scatter3D(tp[0] / tp[3],
             tp[1] / tp[3],
             tp[2] / tp[3],
             c=tp[2] / tp[3],
             marker='o')

ax.set_xlim(-10, 10)
ax.set_ylim(-10, 10)
ax.set_zlim(-10, 10)

plt.show()

R1, R2, P1, P2, Q, roi1, roi2 = cv2.stereoRectify(cmatrix_left, dis_coeff_left,
                                                  cmatrix_right,
                                                  dis_coeff_right,
                                                  img2.shape[:2], R, T)
np.savetxt(
    'D:/SPRING 2020/CSE 598 Perception in Robotics/Project/Project 2a/project_2a/parameters/R1.txt',
    R1)
np.savetxt(
    'D:/SPRING 2020/CSE 598 Perception in Robotics/Project/Project 2a/project_2a/parameters/R2.txt',
    R2)
np.savetxt(
    'D:/SPRING 2020/CSE 598 Perception in Robotics/Project/Project 2a/project_2a/parameters/Q.txt',
    Q)

(height, width) = img1.shape[:2]
print(height, width)

mapx1, mapy1 = cv2.initUndistortRectifyMap(cmatrix_left, dis_coeff_left, R1,
Ejemplo n.º 32
0
def bouguetRectify(points1, points2, imgSize, M1, M2, D1, D2, R, T):
    R1, R2, P1, P2, Q, ROI1, ROI2 = cv2.stereoRectify(M1, D1, M2, D2, imgSize, R, T)
    map1x, map1y = cv2.initUndistortRectifyMap(M1, D1, R1, P1, imgSize, cv2.CV_32FC1)
    map2x, map2y = cv2.initUndistortRectifyMap(M2, D2, R2, P2, imgSize, cv2.CV_32FC1)
    return (map1x, map1y, map2x, map2y), Q
Ejemplo n.º 33
0
plt.imshow(disparity, 'CMRmap_r')
plt.show()

Tmat = np.array([0.4, 0., 0.])

#R, t = Pose_Est(imgR,imgL,K2,K1)
#Tmat = t

print(np.linalg.norm(Tmat))
rev_proj_matrix = np.zeros((4, 4))


cv2.stereoRectify(cameraMatrix1 = K1,cameraMatrix2 = K2, \
                  distCoeffs1 = 0, distCoeffs2 = 0, \
                  imageSize = imgL.shape[:2], \
                  R = np.identity(3), T = Tmat, \
                  R1 = None, R2 = None, \
                  P1 =  None, P2 =  None, Q = rev_proj_matrix)

h, w = imgL.shape[:2]
f = 0.8 * w
Q = np.float32([[1, 0, 0, -0.5 * w], [0, -1, 0, 0.5 * h], [0, 0, 0, -f],
                [0, 0, 1, 0]])

points = cv2.reprojectImageTo3D(disparity, Q)
reflect_matrix = np.identity(3)
reflect_matrix[0] *= -1
points = np.matmul(points, reflect_matrix)

colors = cv2.cvtColor(imgLc, cv2.COLOR_BGR2RGB)
    if len(allObjPoints) > 40:
      retval, leftCameraMatrix, leftDistCoeffs, rightCameraMatrix, rightDistCoeffs, R, T, E, F = cv2.stereoCalibrate(
        objectPoints=allObjPoints,
        imagePoints1=allLeftImgPoints,
        imagePoints2=allRightImgPoints,
        imageSize=imgSize,
        cameraMatrix1=leftCameraMatrix,
        distCoeffs1=leftDistCoeffs,
        cameraMatrix2=rightCameraMatrix,
        distCoeffs2=rightDistCoeffs,
        flags=flags)
      flags |= cv2.CALIB_USE_INTRINSIC_GUESS
      allObjPoints = []
      allLeftImgPoints = []
      allRightImgPoints = []
      R1, R2, P1, P2, Q, roi1, roi2 = cv2.stereoRectify(leftCameraMatrix, leftDistCoeffs, rightCameraMatrix, rightDistCoeffs, imgSize, R, T, flags=cv2.CALIB_ZERO_DISPARITY)
      leftMapX, leftMapY = cv2.initUndistortRectifyMap(leftCameraMatrix, leftDistCoeffs, R1, P1, imgSize, cv2.CV_32FC1)
      rightMapX, rightMapY = cv2.initUndistortRectifyMap(rightCameraMatrix, rightDistCoeffs, R2, P2, imgSize, cv2.CV_32FC1)

    if leftMapX is not None:
      left = cv2.remap(left, leftMapX, leftMapY, cv2.INTER_LINEAR)
      right = cv2.remap(right, rightMapX, rightMapY, cv2.INTER_LINEAR)

    frame = cv2.hconcat([left, right])
    cv2.imshow('Frame', frame)

    if cv2.waitKey(1) & 0xFF == ord('q'):
      break
  else:
    break
Ejemplo n.º 35
0
# b=np.abs(T[0])
b = 4.14353
# f=cameraMatrix1[0,0]
f = 1035.033

path1 = '/Users/zhouying/Desktop/三维重建/DATASET/train/d3/k3/Left_Image.png'
path2 = '/Users/zhouying/Desktop/三维重建/DATASET/train/d3/k3/Right_Image.png'
savepath = "/Users/zhouying/Desktop"

I1 = cv.imread(path1)
I2 = cv.imread(path2)


R1, R2, P1, P2, Q, validPixROI1, validPixROI2=\
    cv.stereoRectify(cameraMatrix1,distCoeffs1,
                     cameraMatrix2,distCoeffs2,imageSize,R,T,
                     flags=cv.CALIB_ZERO_DISPARITY, alpha=-1, newImageSize=(0,0))
left_map1, left_map2 = cv.initUndistortRectifyMap(cameraMatrix1, distCoeffs1,
                                                  R1, P1, imageSize,
                                                  cv.CV_16SC2)
right_map1, right_map2 = cv.initUndistortRectifyMap(cameraMatrix2, distCoeffs2,
                                                    R2, P2, imageSize,
                                                    cv.CV_16SC2)

I1_rectified = cv.remap(I1, left_map1, left_map2, cv.INTER_LINEAR)
I2_rectified = cv.remap(I2, right_map1, right_map2, cv.INTER_LINEAR)

minDisparity = 0
numDisparities = 192
SADWindowSize = 3
P1 = 8 * 3 * SADWindowSize**2
Ejemplo n.º 36
0
    F,
    flags=0,
    criteria=criteria)

R1 = None
R2 = None
P1 = None
P2 = None
# rectify:
R1, R2, P1, P2, Q, roi1, roi2 = cv2.stereoRectify(cameraMatrix1,
                                                  distCoeffs1,
                                                  cameraMatrix2,
                                                  distCoeffs2, (640, 480),
                                                  R,
                                                  T,
                                                  R1,
                                                  R2,
                                                  P1,
                                                  P2,
                                                  Q=None,
                                                  flags=0,
                                                  alpha=-1,
                                                  newImageSize=(0, 0))


def load_image_into_numpy_array(image):
    (im_width, im_height) = image.size
    return np.array(image.getdata()).reshape(
        (im_height, im_width, 3)).astype(np.uint8)


def compare_SIFT(box1, img1, box2, img2):
Ejemplo n.º 37
0
plt.figure(figsize=(10, 4))
plt.subplot(121)
plt.imshow(left_im[..., ::-1])
plt.subplot(122)
plt.imshow(right_im[..., ::-1])
plt.show()

R1 = np.zeros((3, 3))  #output 3x3 matrix
R2 = np.zeros((3, 3))  #output 3x3 matrix
P1 = np.zeros((3, 4))  #output 3x4 matrix
P2 = np.zeros((3, 4))  #output 3x4 matrix

R1, R2, P1, P2, Q, roi1, roi2 = cv2.stereoRectify(
    cameraMatrix1=mtx,  #intrinsic parameters of the first camera
    cameraMatrix2=mtx,  #intrinsic parameters of the second camera
    distCoeffs1=dist,  #distortion parameters of the first camera
    distCoeffs2=dist,  #distortion parameters of the second camera
    imageSize=(1536, 2048),  #image dimensions
    R=R,  #Rotation matrix between first and second cameras (returned by cv2.stereoCalibrate)
    T=T)  #last 4 parameters point to inizialized output variables

R1 = np.array(R1)  #convert output back to numpy format
R2 = np.array(R2)
P1 = np.array(P1)
P2 = np.array(P2)

map1_x, map1_y = cv2.initUndistortRectifyMap(
    mtx, dist, R1, P1, (left_im.shape[1], left_im.shape[0]), cv2.CV_32FC1)
map2_x, map2_y = cv2.initUndistortRectifyMap(
    mtx, dist, R2, P2, (left_im.shape[1], left_im.shape[0]), cv2.CV_32FC1)

im_left = cv2.imread('CAM00054_s1.jpg')
Ejemplo n.º 38
0
def cali_save():
    # Arrays to store object points and image points from all images
    objpoints = []
    imgpointsR = []
    imgpointsL = []
    global retR, mtxR, distR, rvecsR, tvecsR, retS, MLS, dLS, MRS, dRS, R, T, E, F, RL, RR, PL, PR, Q, roiL, roiR, Shape
    # Start calibration from the camera
    print('Starting calibration for the 2 cameras... ')

    # Call all saved images
    for i in range(0, 20):
        if (i % 1 != 0):
            continue
        t = str(i)
        print(t)
        ChessImgR = cv2.imread('python/Stereo/imgs/chessboard-R' + t + '.png',
                               0)
        ChessImgL = cv2.imread('python/Stereo/imgs/chessboard-L' + t + '.png',
                               0)
        ChessImgL = cv2.resize(ChessImgL, (0, 0), fx=.5, fy=.5)
        ChessImgR = cv2.resize(ChessImgR, (0, 0), fx=.5, fy=.5)

        retR, cornersR = cv2.findChessboardCorners(ChessImgR, (9, 6), None)
        retL, cornersL = cv2.findChessboardCorners(ChessImgL, (9, 6), None)

        if (retR == True) & (retL == True):
            objpoints.append(objp)
            cv2.cornerSubPix(ChessImgR, cornersR, (11, 11), (-1, -1), criteria)
            cv2.cornerSubPix(ChessImgL, cornersL, (11, 11), (-1, -1), criteria)
            imgpointsR.append(cornersR)
            imgpointsL.append(cornersL)

    Shape = ChessImgR.shape[::-1]
    # Determine the new values for different parameters
    #	 Right side
    retR, mtxR, distR, rvecsR, tvecsR = cv2.calibrateCamera(
        objpoints, imgpointsR, ChessImgR.shape[::-1], None, None)
    hR, wR = ChessImgR.shape[:2]
    OmtxR, roiR = cv2.getOptimalNewCameraMatrix(mtxR, distR, (wR, hR), 1,
                                                (wR, hR))

    #	Left side
    retL, mtxL, distL, rvecsL, tvecsL = cv2.calibrateCamera(
        objpoints, imgpointsL, ChessImgL.shape[::-1], None, None)
    hL, wL = ChessImgL.shape[:2]
    OmtxL, roiL = cv2.getOptimalNewCameraMatrix(mtxL, distL, (wL, hL), 1,
                                                (wL, hL))

    print('Cameras Ready to use')

    #********************************************
    #***** Calibrate the Cameras for Stereo *****
    #********************************************

    # StereoCalibrate function
    flags = 0
    flags |= cv2.CALIB_FIX_INTRINSIC
    #flags |= cv2.CALIB_FIX_PRINCIPAL_POINT
    #flags |= cv2.CALIB_USE_INTRINSIC_GUESS
    #flags |= cv2.CALIB_FIX_FOCAL_LENGTH
    #flags |= cv2.CALIB_FIX_ASPECT_RATIO
    #flags |= cv2.CALIB_ZERO_TANGENT_DIST
    #flags |= cv2.CALIB_RATIONAL_MODEL
    #flags |= cv2.CALIB_SAME_FOCAL_LENGTH
    #flags |= cv2.CALIB_FIX_K3
    #flags |= cv2.CALIB_FIX_K4
    #flags |= cv2.CALIB_FIX_K5
    retS, MLS, dLS, MRS, dRS, R, T, E, F = cv2.stereoCalibrate(
        objpoints, imgpointsL, imgpointsR, mtxL, distL, mtxR, distR,
        ChessImgR.shape[::-1], criteria_stereo, flags)

    # StereoRectify function
    rectify_scale = 0
    RL, RR, PL, PR, Q, roiL, roiR = cv2.stereoRectify(
        MLS, dLS, MRS, dRS, Shape, R, T, rectify_scale,
        (0, 0))  # last parameter is alpha, if 0 croped, if 1 not croped

    fs = cv2.FileStorage()
    fs.open("cali.yaml", cv2.FILE_STORAGE_WRITE)
    fs.write("MLS", MLS)
    fs.write("dLS", dLS)
    fs.write("MRS", MRS)
    fs.write("dRS", dRS)
    fs.write("mtxR", mtxR)
    fs.write("mtxL", mtxL)
    fs.write("distR", distR)
    fs.write("distL", distL)
    fs.write("R", R)
    fs.write("T", T)
    fs.write("E", E)
    fs.write("F", F)

    #rectification
    fs.write("RL", RL)
    fs.write("RR", RR)
    fs.write("PL", PL)
    fs.write("PR", PR)
    fs.write("Q", Q)

    fs.write("Shape", Shape)
Ejemplo n.º 39
0
    calPath = "CalibrationPhotos/calibration_gusTest.json"
    datathings = cb.loadCalibration(calPath)
    M1, M2, d1, d2, R, T, E, F, dims = datathings
    print datathings

    print('T', T)
    print('T*sqr', T * sqr_size)

    flags = 0
    flags |= cv2.CALIB_ZERO_DISPARITY

    R1, R2, P1, P2, Q, roi1, roi2 = cv2.stereoRectify(M1,
                                                      d1,
                                                      M2,
                                                      d2,
                                                      dims,
                                                      R,
                                                      T,
                                                      alpha=-1,
                                                      flags=flags)
    print('R1', R1)
    print('R2', R2)
    print('P1', P1)
    print('P2', P2)
    print('Q', Q)
    print('roi1', roi1)
    print('roi2', roi2)

    # an assortment of upper and lower bounds for the different colors we use in HSV.
    greenLower = (45, 86, 30)
    greenUpper = (100, 255, 255)
    # cv.imshow('rectified image pairs with rectified matching lines', draw_region)
    # cv.waitKey(0)
    # cv.destroyAllWindows()

    # --------------------    DISPARITY MAP OF RECTIFIED IMAGES      ------------------------ #
    disparityo1 = disparity_bmg(rectimg1, rectimg2,
                                11)  # function from part 2,
    # Later I found 11 might be a sweet spot
    disparityo = cv.normalize(disparityo1,
                              None,
                              alpha=0,
                              beta=255,
                              norm_type=cv.NORM_MINMAX)
    disparityo = np.uint8(disparityo)
    # Q1-to-2, might be
    R1, R2, P1, P2, Q, roi1, roi2 = cv.stereoRectify(new_kmat, dist, new_kmat,
                                                     dist, (C, R), rot, trans)
    # # Q2-to-1, might be
    # R1, R2, P1, P2, Q, roi1, roi2 = cv.stereoRectify(new_kmat, dist, new_kmat, dist, (C, R), invRot, invTrans)
    # Q[0, 3] += 400
    # Q[1, 3] += 300
    # pt_cloud = cv.reprojectImageTo3D(disparityo1, Q)
    # pt_cloud = cv.resize(pt_cloud, (960, 540))

    colored_disparityo = cv.resize(
        cv.applyColorMap(disparityo, cv.COLORMAP_JET), (960, 540))

    # cv.imshow('Disparity Map', colored_disparityo)
    # cv.imshow('Point Cloud', pt_cloud)
    # cv.waitKey(0)
    # cv.destroyAllWindows()
Ejemplo n.º 41
0
K_L = calib.__getattribute__('K_cam' + left_cam)
K_R = calib.__getattribute__('K_cam' + right_cam)

D_L = calib.__getattribute__('D_cam' + left_cam)
D_R = calib.__getattribute__('D_cam' + right_cam)

img_size = calib.__getattribute__('S_cam' + left_cam)

R_L = calib.__getattribute__('R_cam' + left_cam)
R_R = calib.__getattribute__('R_cam' + right_cam)
R = np.matmul(R_L.transpose(), R_R)

T_L = calib.__getattribute__('T_cam' + left_cam)
T_R = calib.__getattribute__('T_cam' + right_cam)
T = T_L - T_R

_, _, _, _, Q, _, _ = cv2.stereoRectify(K_L, D_L, K_R, D_R,
                                        (int(img_size[0]), int(img_size[1])),
                                        R, T)

points = cv2.reprojectImageTo3D(disp, Q, True)

xs = points[:, :, 0].flatten()
ys = points[:, :, 1].flatten()
zs = points[:, :, 2].flatten()

with open('points.obj', 'w') as f:
    for x, y, z in zip(xs, ys, zs):
        if (not (math.isinf(x) or math.isinf(y) or math.isinf(z))):
            f.write('v ' + str(x) + ' ' + str(y) + ' ' + str(z) + '\n')
Ejemplo n.º 42
0
print("\n\nRotation Matrix:\n",R)
print("\n\nTransltion:\n",T)
print("\n\nEssential Matrix:\n",E)
print("\n\nFundamental Matrix:\n",F)
print("\n\nDistortion Coefficients:\n",distCoeffs1,'\n', distCoeffs2)

print("\n\nBaseline:\n",np.linalg.norm(T))


# (width,height)
size = (640, 480)
# size of your picture
left = cv2.imread('data/left/left01.jpg',0)
right = cv2.imread('data/right/right01.jpg',0)

R1, R2, P1, P2, Q, roi1, roi2 = cv2.stereoRectify(cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, size, R, T)
mapL1, mapL2 = cv2.initUndistortRectifyMap(cameraMatrix1, distCoeffs1, R1, P1, size, cv2.CV_16SC2)
mapR1, mapR2 = cv2.initUndistortRectifyMap(cameraMatrix2, distCoeffs2, R2, P2, size, cv2.CV_16SC2)


left_img_remap = cv2.remap(left, mapL1, mapL2, cv2.INTER_LINEAR)
right_img_remap = cv2.remap(right, mapR1, mapR2, cv2.INTER_LINEAR)
cv2.imshow('rectified left',left_img_remap)
cv2.imshow('rectified right',right_img_remap)


cv2.waitKey(0)
x,y,w,h = roi1
left_img_remap = left_img_remap[y:y+h, x:x+w]

x,y,w,h = roi2
Ejemplo n.º 43
0
def calibrate_stereo(left_filenames, right_filenames):
    left_images = sorted(glob.glob(left_filenames))
    right_images = sorted(glob.glob(right_filenames))

    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)

    objp = np.zeros((6*8,3), np.float32)
    objp[:,:2] = np.mgrid[0:8,0:6].T.reshape(-1,2)

    obj_points = []
    img_pointsL = []
    img_pointsR = []

    for i in range(len(left_images)):
        nameL = left_images[i]
        nameR = right_images[i]

        imgL = cv2.imread(nameL, 0)
        imgR = cv2.imread(nameR, 0)

        retL, cornersL = cv2.findChessboardCorners(imgL, (8,6), None)
        retR, cornersR = cv2.findChessboardCorners(imgR, (8,6), None)

        if retL and retR:
            obj_points.append(objp)
            cv2.cornerSubPix(imgL, cornersL, (11,11), (-1,-1), criteria)
            img_pointsL.append(cornersL)
            cv2.cornerSubPix(imgR, cornersR, (11,11), (-1,-1), criteria)
            img_pointsR.append(cornersR)
            yield False, True, (nameL,cornersL), (nameR,cornersR)
        else:
            yield False, False, (nameL,None), (nameR,None)

    image_size = imgL.shape[::-1]

    # Find individual intrinsic parameters first
    retL, matrixL, distL, rvecsL, tvecsL = cv2.calibrateCamera(
            obj_points, img_pointsL, image_size, None, None)
    retR, matrixR, distR, rvecsR, tvecsR = cv2.calibrateCamera(
            obj_points, img_pointsR, image_size, None, None)

    # Calibrate stereo camera, with calculated intrinsic parameters
    ret, matrixL, distL, matrixR, distR, R, T, E, F = cv2.stereoCalibrate(
            obj_points, img_pointsL, img_pointsR, image_size,
            matrixL, distL, matrixR, distR, flags=cv2.CALIB_FIX_INTRINSIC)

    # Calculate rectification transforms for each camera
    R1 = np.zeros((3,3), np.float32)
    R2 = np.zeros((3,3), np.float32)
    P1 = np.zeros((3,4), np.float32)
    P2 = np.zeros((3,4), np.float32)
    Q = np.zeros((4,4), np.float32)
    R1, R2, P1, P2, Q, roiL, roiR = cv2.stereoRectify(
            matrixL, distL, matrixR, distR, image_size, R, T)

    # Create undistortion/rectification map for each camera
    mapsL = cv2.initUndistortRectifyMap(matrixL, distL, R1, P1,
            image_size, cv2.CV_32FC1)
    mapsR = cv2.initUndistortRectifyMap(matrixR, distR, R2, P2,
            image_size, cv2.CV_32FC1)

    # Convert maps to fixed-point representation (faster)
    mapsL = cv2.convertMaps(mapsL[0], mapsL[1], cv2.CV_32FC1)
    mapsR = cv2.convertMaps(mapsR[0], mapsR[1], cv2.CV_32FC1)

    calib = {
        "intrinsicL": matrixL, "intrinsicR": matrixR,
        "mapsL": mapsL, "mapsR": mapsR,
        "R1": R1, "R2": R2, "P1": P1, "P2": P2, "Q": Q,
        "roiL": roiL, "roiR": roiR
    }
    yield True, calib, None, None
Ejemplo n.º 44
0
        imageShape = grayimg.shape[::-1]

        # Determine whether circles are found in the image and the coordinates of the centers
        circlesFound, centerCoords = cv2.findCirclesGrid(grayimg, (4, 11))

        # If circles are found, add object and image points
        if circlesFound:
            objectPoints.append(objp)
            imagePoints.append(centerCoords)

    return objectPoints, imagePoints, imageShape


# Find object and image points from both cameras (object points will be the same)
objectPointsL, imagePointsL, imageShapeL = addObjAndImgPoints(pathLeft)
objectPointsR, imagePointsR, imageShapeR = addObjAndImgPoints(pathRight)

# Grab initial stereo calibration data from both cameras (these are inputs for final calibration data).
isStereoCalibrated, cameraMatrixL, distCoeffsL, cameraMatrixR, distCoeffsR, rotationMatrixStereo, translationMatrixStereo, essentialMatrix, fundamentalMatrix = cv2.stereoCalibrate(objectPointsL, imagePointsL, imagePointsR, None, None, None, None, imageShapeL)

# Grab final stereo calibration data
if isStereoCalibrated:
    rotMatrixL, rotMatrixR, cameraMatrixNewL, cameraMatrixNewR, dispToDepthMap, roiL, roiR = cv2.stereoRectify(cameraMatrixL, distCoeffsL, cameraMatrixR, distCoeffsR, imageShapeL, rotationMatrixStereo, translationMatrixStereo)

    mapXL, mapYL = cv2.initUndistortRectifyMap(cameraMatrixL, distCoeffsL, rotMatrixL, cameraMatrixNewL, imageShapeL, cv2.CV_32FC1)
    mapXR, mapYR = cv2.initUndistortRectifyMap(cameraMatrixR, distCoeffsR, rotMatrixR, cameraMatrixNewR, imageShapeL, cv2.CV_32FC1)

    # Save the calibration data
    np.savez_compressed(calibrationData, imageShapeL = imageShapeL, imageShapeR = imageShapeR, mapXL=mapXL, mapYL = mapYL, roiL = roiL , mapXR = mapXR, mapYR = mapYR, roiR = roiR)

Ejemplo n.º 45
0
res_l = calib['camera_calibrations']['camera_%d' % c]['resolution']

c = 1
K_r = calib['camera_calibrations']['camera_%d' % c]['K']
D_r = calib['camera_calibrations']['camera_%d' % c]['D']
R_r = calib['camera_calibrations']['camera_%d' % c]['R']
T_r = calib['camera_calibrations']['camera_%d' % c]['T']
res_r = calib['camera_calibrations']['camera_%d' % c]['resolution']

# Compute relative transformation
R = R_r.dot(np.linalg.inv(R_l))
T = -R_r.dot(np.linalg.inv(R_l).dot(T_l)) + T_r

# Rectification
newsize = tuple(res_l)
R_l, R_r, P_l, P_r, _, _, _ = cv2.stereoRectify(K_l, D_l, K_r, D_r, newsize, R, T, flags = cv2.CALIB_ZERO_DISPARITY, alpha = 0)

rectMap = [[[], []], [[], []]]
rectMap[0][0], rectMap[0][1] = cv2.initUndistortRectifyMap(K_l, D_l, R_l, P_l, newsize, cv2.CV_16SC2)
rectMap[1][0], rectMap[1][1] = cv2.initUndistortRectifyMap(K_r, D_r, R_r, P_r, newsize, cv2.CV_16SC2)

img_l, gray_l = getimage(imgpath1)
img_r, gray_r = getimage(imgpath2)

imgorig_l = utils.resizeimgh(img_l, H_IMGS)
imgorig_r = utils.resizeimgh(img_r, H_IMGS)
imgorig = np.hstack((imgorig_l, imgorig_r))

imgRect_l = cv2.remap(img_l, rectMap[0][0], rectMap[0][1], cv2.INTER_CUBIC)
imgRect_r = cv2.remap(img_r, rectMap[1][0], rectMap[1][1], cv2.INTER_CUBIC)

M1 = np.array([[ 2.0340227345156890e+002 , 0, 1.5950000000000000e+002 ],[ 0, 2.0340227345156890e+002 , 1.1950000000000000e+002 ],[ 0,0, 1 ]])
M2 = np.array([[2.0340227345156890e+002 , 0, 1.5950000000000000e+002],[0, 2.0340227345156890e+002 , 1.1950000000000000e+002],[0, 0, 1 ]])
D1 = np.array([-4.1344532351734609e-001 , 1.4783234656467026e-001, 4.4055963467856136e-003 , 6.1661017124724649e-003, 0])
D2 = np.array([-4.5592829264409196e-001 , 2.2408910954289543e-001, 1.0425612281350783e-003 , -5.5920270905273290e-003, 0])
R = np.array([[9.9880267535072764e-001 ,1.0478131925395094e-003, -4.8909281325148393e-002],[-1.9549817734339412e-003, 9.9982687809750082e-001, -1.8503834199179697e-002],[4.8881425495729047e-002 ,  1.8577295855929390e-002 , 9.9863180918704308e-001]])
T = np.array([-6.0430960451649426e+001 , 9.5357972721080564e-001, -3.4515100808301149e+000])

R1 = np.zeros(shape=(3,3))
R2 = np.zeros(shape=(3,3))
P1 = np.zeros(shape=(3,4))
P2 = np.zeros(shape=(3,4))
Q = np.zeros(shape=(4,4))

aa,bb,cc,dd,ee,roi1,roi2 = cv2.stereoRectify(M1, D1, M2, D2,(width, height), R, T, R1, R2, P1, P2, Q, flags=cv2.cv.CV_CALIB_ZERO_DISPARITY, alpha=-1, newImageSize=(0,0))
print roi1

# print maskl.max()
maskl = np.zeros((height,width),dtype=np.uint8)

maskr = np.zeros((height,width),dtype=np.uint8)

cv2.rectangle(maskl,(roi1[0],roi1[1]),(roi1[2]+roi1[0],roi1[3]+roi1[1]),(255,255,255),-1)
cv2.rectangle(maskr,(roi2[0],roi2[1]),(roi2[2]+roi2[0],roi2[3]+roi2[1]),(255,255,255),-1)


map1x, map1y = cv2.initUndistortRectifyMap(M1, D1, R1, P1, (width, height), cv2.CV_32FC1)
map2x, map2y = cv2.initUndistortRectifyMap(M2, D2, R2, P2, (width, height), cv2.CV_32FC1)

print "Undistort complete\n"
Ejemplo n.º 47
0
def stereo_rectify(cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2,
                   imageSize, R, T):
    R1, R2, P1, P2, Q, validPixROI1, validPixROI2 = cv2.stereoRectify(
        cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, imageSize, R,
        T)
    return P1, P2, Q
Ejemplo n.º 48
0
list_left_sort = sorted(list_left, key=lambda s: int(p.search(s).group()))
list_right_sort = sorted(list_right, key=lambda s: int(p.search(s).group()))

cameraMatrix1 = numpy.loadtxt(dorectory_calibration + 'cameraMatrix1.csv',delimiter = ',')
cameraMatrix2 = numpy.loadtxt(dorectory_calibration + 'cameraMatrix2.csv',delimiter = ',')
distCoeffs1 = numpy.loadtxt(dorectory_calibration + 'distCoeffs1.csv',delimiter = ',')
distCoeffs2 = numpy.loadtxt(dorectory_calibration + 'distCoeffs2.csv',delimiter = ',')
R = numpy.loadtxt(dorectory_calibration + 'R.csv',delimiter = ',')
T = numpy.loadtxt(dorectory_calibration + 'T.csv',delimiter = ',')

# 平行化変換のためのRとPおよび3次元変換行列Qを求める
flags = 0
alpha = 1
imageSize = (1024,768)
newimageSize = (1024,768)
R1, R2, P1, P2, Q, validPixROI1, validPixROI2 = cv2.stereoRectify(cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, imageSize, R, T, flags, alpha, newimageSize)

# 平行化変換マップを求める
m1type = cv2.CV_32FC1
map1_l, map2_l = cv2.initUndistortRectifyMap(cameraMatrix1, distCoeffs1, R1, P1, newimageSize, m1type) #m1type省略不可
map1_r, map2_r = cv2.initUndistortRectifyMap(cameraMatrix2, distCoeffs2, R2, P2, newimageSize, m1type)





def calculate_contour_area(im_l,im_r):
    interpolation = cv2.INTER_NEAREST
    Re_TgtImg_l = cv2.remap(im_l, map1_l, map2_l, interpolation)
    Re_TgtImg_r = cv2.remap(im_r, map1_r, map2_r, interpolation)
camera_matrix_left = np.array([[1250.12541, 0, 691.61850],
                               [0, 1230.17838, 427.35528], [0, 0, 1]])
# kc_left = []
distortion_left = np.array([-0.05265, 0.14178, -0.00724, 0.00415, 0.00000])

# fc_right_x  0           cc_right_x
# 0           fc_right_y  cc_right_y
# 0           0           1
camera_matrix_right = np.array([[1193.23239, 0, 638.79370],
                                [0, 1179.38126, 411.16069], [0, 0, 1]])
# kc_right = []
distortion_right = np.array([-0.18814, 0.66653, -0.00790, -0.00679, 0.00000])

# rotation vector
om = np.array([-0.01199, -0.00651, -0.00405])
R = cv2.Rodrigues(om)[0]
# translation vector
T = np.array([-47.47028, 6.13393, -20.83518])
# window size
size = (1280, 720)
R1, R2, P1, P2, Q, validPirxROI1, validPixROI2 = cv2.stereoRectify(
    camera_matrix_left, distortion_left, camera_matrix_right, distortion_right,
    size, R, T)

left_map1, left_map2 = cv2.initUndistortRectifyMap(camera_matrix_left,
                                                   distortion_left, R1, P1,
                                                   size, cv2.CV_16SC2)
right_map1, right_map2 = cv2.initUndistortRectifyMap(camera_matrix_right,
                                                     distortion_right, R2, P2,
                                                     size, cv2.CV_16SC2)
Ejemplo n.º 50
0
averageEpipolarError = totalError / totalPoints
print "Average epipolar error is {}".format(averageEpipolarError)



#--------------------------------------------------#
# Calculate extrinsinc pramaters, R1, R2, P1, P2, Q#
#--------------------------------------------------#
print "Calculating extrinsinc parameters"

calib_R_left, calib_R_right, calib_P_left, calib_P_right, calib_Q, validROI_left, validROI_right = \
	cv2.stereoRectify(cameraMatrixLeft, distortionCoeffsLeft, cameraMatrixRight, distortionCoeffsRight, \
	imageSize=imageSize, \
	R=calib_R, \
	T=calib_T, \
	flags=cv2.CALIB_ZERO_DISPARITY, \
	alpha=1, \
	newImageSize=imageSize \
	)

print "IMPORTANT (isVerticalStereo): {}".format(calib_P_right[1][3] > calib_P_right[0][3])
print "Done calculating extrinsinc parameters"



#----------------------------------------#
print "Saving calibrations"

#instrinsinc
np.save("calibrations/cameraMatrixLeft", cameraMatrixLeft)
np.save("calibrations/distortionCoeffsLeft", distortionCoeffsLeft)
Ejemplo n.º 51
0
right_camera_matrix = np.array([[1209.88776209302, 0.000000, 413.856956594846],
                                [0.000000, 1210.67130773907, 228.925227764687],
                                [0.000000, 0.000000, 1.000000]])
right_distortion = np.array([[
    -0.629241841057911, 0.384306491947133, -0.00302412142395574,
    -0.0187961427211770, 0.000000
]])

#om = np.array([0.01911, 0.03125, -0.00960]) # 旋转关系向量
#R = cv2.Rodrigues(om)[0]  # 使用Rodrigues变换将om变换为R

T = np.array([-502.581451656473, -10.0897997351901,
              -15.6283011876912])  # 平移关系向量
R = np.array([[0.998453237208844, 0.0279672732036317, 0.0480516881777589],
              [-0.0298684171587964, 0.998780442220761, 0.0393129227319578],
              [-0.0468936111171582, -0.0406873428335421, 0.998070903979040]])
size = (640, 480)  # 图像尺寸

# 进行立体更正
R1, R2, P1, P2, Q, validPixROI1, validPixROI2 = cv2.stereoRectify(
    left_camera_matrix, left_distortion, right_camera_matrix, right_distortion,
    size, R, T)
# 计算更正map
left_map1, left_map2 = cv2.initUndistortRectifyMap(left_camera_matrix,
                                                   left_distortion, R1, P1,
                                                   size, cv2.CV_16SC2)
right_map1, right_map2 = cv2.initUndistortRectifyMap(right_camera_matrix,
                                                     right_distortion, R2, P2,
                                                     size, cv2.CV_16SC2)
    def _stereo_calibrate(self, dims):
        """
        It performas stereo calibration and makes dictionary of stereo
        camera calibration model parameters.

        Returns:
        camera_model: dict
        A dictionary containing stereo camera model parameters.
        """
        flags = 0
        flags |= cv2.CALIB_FIX_INTRINSIC
        # flags |= cv2.CALIB_FIX_PRINCIPAL_POINT
        flags |= cv2.CALIB_USE_INTRINSIC_GUESS
        flags |= cv2.CALIB_FIX_FOCAL_LENGTH
        # flags |= cv2.CALIB_FIX_ASPECT_RATIO
        flags |= cv2.CALIB_ZERO_TANGENT_DIST
        # flags |= cv2.CALIB_RATIONAL_MODEL
        # flags |= cv2.CALIB_SAME_FOCAL_LENGTH
        # flags |= cv2.CALIB_FIX_K3
        # flags |= cv2.CALIB_FIX_K4
        # flags |= cv2.CALIB_FIX_K5

        stereocalib_criteria = (cv2.TERM_CRITERIA_MAX_ITER +
                                cv2.TERM_CRITERIA_EPS, 100, 1e-5)

        ret, M1, d1, M2, d2, R, T, E, F = cv2.stereoCalibrate(
            self.objpoints,
            self.imgpoints_l,
            self.imgpoints_r,
            self.M1,
            self.d1,
            self.M2,
            self.d2,
            dims,
            criteria=stereocalib_criteria,
            flags=flags)
        # make sure to move around args according to opencv version

        rot_left, _ = cv2.Rodrigues(self.r1[0])
        rot_right, _ = cv2.Rodrigues(self.r2[0])

        R1, R2, P1, P2, Q, validPixROI1, validPixROI2S = cv2.stereoRectify(
            M1, d1, M2, d2, dims, R, T, flags=0, alpha=0)

        mapLx, mapLy = cv2.initUndistortRectifyMap(M1,
                                                   d1,
                                                   R1,
                                                   P1,
                                                   dims,
                                                   m1type=cv2.CV_32FC1)
        mapRx, mapRy = cv2.initUndistortRectifyMap(M2,
                                                   d2,
                                                   R2,
                                                   P2,
                                                   dims,
                                                   m1type=cv2.CV_32FC1)

        camera_model = dict([('M1', M1), ('M2', M2), ('dist1', d1),
                             ('dist2', d2), ('rot_left', rot_left),
                             ('rot_right', rot_right), ('R', R), ('T', T),
                             ('E', E), ('F', F), ('mapLx', mapLx),
                             ('mapLy', mapLy), ('mapRx', mapRx),
                             ('mapRy', mapRy), ('R1', R1), ('R2', R2),
                             ('P1', P1), ('P2', P2), ('dims', dims)])

        return camera_model
flags |= cv.CALIB_FIX_INTRINSIC
# Here we fix the intrinsic camara matrixes so that only Rot, Trns, Emat and Fmat are calculated.
# Hence intrinsic parameters are the same 

criteria_stereo= (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 30, 0.001)

# This step is performed to transformation between the two cameras and calculate Essential and Fundamenatl matrix
retStereo, newCameraMatrixL, distL, newCameraMatrixR, distR, rot, trans, essentialMatrix, fundamentalMatrix = cv.stereoCalibrate(objpoints, imgpointsL, imgpointsR, newCameraMatrixL, distL, newCameraMatrixR, distR, grayL.shape[::-1], criteria_stereo, flags)




########## Stereo Rectification #################################################

rectifyScale= 1
rectL, rectR, projMatrixL, projMatrixR, Q, roi_L, roi_R= cv.stereoRectify(newCameraMatrixL, distL, newCameraMatrixR, distR, grayL.shape[::-1], rot, trans, rectifyScale,(0,0))

stereoMapL = cv.initUndistortRectifyMap(newCameraMatrixL, distL, rectL, projMatrixL, grayL.shape[::-1], cv.CV_16SC2)
stereoMapR = cv.initUndistortRectifyMap(newCameraMatrixR, distR, rectR, projMatrixR, grayR.shape[::-1], cv.CV_16SC2)

print("Saving parameters!")
cv_file = cv.FileStorage('stereoMap.xml', cv.FILE_STORAGE_WRITE)

cv_file.write('stereoMapL_x',stereoMapL[0])
cv_file.write('stereoMapL_y',stereoMapL[1])
cv_file.write('stereoMapR_x',stereoMapR[0])
cv_file.write('stereoMapR_y',stereoMapR[1])

cv_file.release()

Ejemplo n.º 54
0
        if not in_front_of_both_cameras(first_inliers, second_inliers, R, T):

            # Third choice: R = U * Wt * Vt, T = u_3
            R = U.dot(W.T).dot(Vt)
            T = U[:, 2]

            if not in_front_of_both_cameras(first_inliers, second_inliers, R,
                                            T):
                # Fourth choice: R = U * Wt * Vt, T = -u_3
                T = -U[:, 2]

            # perform the rectification
            R1, R2, P1, P2, Q, roi1, roi2 = cv2.stereoRectify(K,
                                                              d,
                                                              K,
                                                              d,
                                                              imgl.shape[:2],
                                                              R,
                                                              T,
                                                              alpha=1.0)
            mapx1, mapy1 = cv2.initUndistortRectifyMap(K, d, R1, K,
                                                       imgl.shape[:2],
                                                       cv2.CV_32F)
            mapx2, mapy2 = cv2.initUndistortRectifyMap(K, d, R2, K,
                                                       imgr.shape[:2],
                                                       cv2.CV_32F)
            img_rect1 = cv2.remap(imgl, mapx1, mapy1, cv2.INTER_LINEAR)
            img_rect2 = cv2.remap(imgr, mapx2, mapy2, cv2.INTER_LINEAR)

            # draw the images side by side
            total_size = (max(img_rect1.shape[0], img_rect2.shape[0]),
                          img_rect1.shape[1] + img_rect2.shape[1], 3)
Ejemplo n.º 55
0
# import camera_configs
# load the calibration data

camtxL = np.load('camera_matrixL.npy')
camtxR = np.load('camera_matrixR.npy')
distL = np.load('distortion_cofficientsL.npy')
distR = np.load('distortion_cofficientsR.npy')
rmtx = np.load('rotation_mat.npy')
tvec = np.load('translation_vectors.npy')

Limg = cv.imread('.\data\left03.jpg', cv.IMREAD_GRAYSCALE)
Rimg = cv.imread('.\data\\right03.jpg', cv.IMREAD_GRAYSCALE)

size = (640, 480)
# Recfitication
RL, RR, ncamtxL, ncamtxR, Q, validPixROI1, validPixROI2 = cv.stereoRectify(
    camtxL, distL, camtxR, distR, size, rmtx, tvec)
Lmap1, Lmap2 = cv.initUndistortRectifyMap(camtxL, distL, RL, ncamtxL, size,
                                          cv.CV_16SC2)
# cv.imshow('',Limg)
# cv.waitKey()
Rmap1, Rmap2 = cv.initUndistortRectifyMap(camtxR, distR, RR, ncamtxR, size,
                                          cv.CV_16SC2)
Limg_Rec = cv.remap(Limg, Lmap1, Lmap2, cv.INTER_LINEAR)
Rimg_Rec = cv.remap(Rimg, Rmap1, Rmap2, cv.INTER_LINEAR)
cv.imwrite('.\data\left_Rec.jpg', Limg_Rec)
cv.imwrite('.\data\\right_Rec.jpg', Rimg_Rec)

cv.namedWindow("left")
cv.namedWindow("right")
cv.namedWindow("depth")
cv.moveWindow("left", 0, 0)
Ejemplo n.º 56
0
    def calibrate(self):

        ##
        undisorted = False
        cheesboard_found = False
        possib_cho = ['N', 'Y']
        user_choice = ""

        # object points, depend on cheesboard size
        objp = np.zeros((np.prod(self.cheesboard_size),3)
                        , np.float32)
        objp[:,:2] = np.mgrid[0:self.cheesboard_size[0],0:self.cheesboard_size[1]]\
            .T.reshape(-1,2)
        objp = objp*self.cheesboard_scale
        raw_input("Press any key to start calibration...")

        while True:

            # Read single frame from camera
            for i in range(0,5):
                _, frame_l = self.cam_l.read()
                _, frame_r = self.cam_r.read()

            # Frame sizes
            rows,cols,dim = frame_l.shape

            M = cv2.getRotationMatrix2D((cols/2,rows/2),-90,1)
            frame_l = cv2.warpAffine(frame_l,M,(cols,rows))
            frame_r = cv2.warpAffine(frame_r,M,(cols,rows))

            # Convert to grayscale
            gray_l = cv2.cvtColor(frame_l,cv2.COLOR_BGR2GRAY)
            gray_r = cv2.cvtColor(frame_r,cv2.COLOR_BGR2GRAY)

            # Find the chess board corners
            ret_l, corners_l = cv2.findChessboardCorners(gray_l,
                                                     self.cheesboard_size, None)
            ret_r, corners_r = cv2.findChessboardCorners(gray_r,
                                                     self.cheesboard_size, None)

            # If chessboard corners found
            if ret_l and ret_r and not undisorted:

                print "Left Camera: Cheesboard Found"
                print "Right Camera: Cheesboard Found"

                self.objpoints.append(objp)

                cv2.cornerSubPix(gray_l, corners_l,(5, 5),(-1,-1), self.criteria)
                cv2.cornerSubPix(gray_r, corners_r,(5, 5),(-1,-1), self.criteria)

                # Add image points to buffer
                self.imgpoints_l.append(corners_l)
                self.imgpoints_r.append(corners_r)

                # Draw and display the corners
                cv2.drawChessboardCorners(frame_l, self.cheesboard_size, corners_l, ret_l)
                cv2.drawChessboardCorners(frame_r, self.cheesboard_size, corners_r, ret_r)

                if self.lowres:

                    # Put text on image
                    cv2.putText(frame_l,"Found!!!", (100,rows/2), cv2.FONT_HERSHEY_SIMPLEX,
                                2,(255,255,255),5, 1)
                    cv2.putText(frame_r,"Found!!!", (100,rows/2), cv2.FONT_HERSHEY_SIMPLEX,
                                2,(255,255,255),5, 1)
                else:
                    # Put text on image
                    cv2.putText(frame_l,"Found!!!", (100,rows/2), cv2.FONT_HERSHEY_SIMPLEX,
                                4,(255,255,255),5, 1)
                    cv2.putText(frame_r,"Found!!!", (100,rows/2), cv2.FONT_HERSHEY_SIMPLEX,
                                4,(255,255,255),5, 1)


                self.img_counter = self.img_counter + 1
                cheesboard_found = True

            elif undisorted:
                frames = [frame_l, frame_r]
                maps = [left_maps, right_maps]
                rect_frames = [0,0]
                for src in self.cam_sources:
                        rect_frames[src] = cv2.remap(frames[src], maps[src][0], maps[src][1], cv2.INTER_LANCZOS4)

                # Undistorted images
                frame_l = rect_frames[0]
                frame_r = rect_frames[1]


            # show image
            cv2.imshow('img_left',frame_l)
            cv2.imshow('img_right',frame_r)
            k = cv2.waitKey(5) & 0xFF

            # Founded?, What next
            if cheesboard_found:
                while user_choice not in possib_cho:
                    user_choice =  raw_input("Found: %d , continue (Y/N) : " % (self.img_counter,))
                cheesboard_found = False

                # If user chose No
                if user_choice == possib_cho[0]:
                    print "Using calculated parameters to remove disortion..."

                    if not self.isOpen3():
                        # Calculating disortion params for stereo camera
                        ret_val,camera_mat_l, dist_l, camera_mat_r, dist_r, R, T, E, F = cv2.stereoCalibrate(
                                self.objpoints, self.imgpoints_l,
                                self.imgpoints_r, gray_l.shape[::-1]
                        )

                        # distortion params
                        dist = [dist_l, dist_r]

                        # Remove translation
                        for src in self.cam_sources:
                            dist[src][0][-1] = 0.0
                    else:
                        # Calculating disortion params for each camera
                        ret_l, mtx_l, dist_l, rvecs_l, tvecs_l = cv2.calibrateCamera(self.objpoints,
                                                                                     self.imgpoints_l,
                                                                                     gray_l.shape[::-1],
                                                                                     None,None)
                        ret_r, mtx_r, dist_r, rvecs_r, tvecs_r = cv2.calibrateCamera(self.objpoints,
                                                                                     self.imgpoints_r,
                                                                                     gray_r.shape[::-1],
                                                                                    None,None)

                        print "Calibration Error for left, right camera : " + str(ret_l) + ", " + str(ret_r)

                        # distortion params
                        dist = [dist_l, dist_r]

                        # Remove translation
                        for src in self.cam_sources:
                            dist[src][0][-1] = 0.0

                        ## find new camera and remove translation params
                        camera_mat_l, roi_l=cv2.getOptimalNewCameraMatrix(mtx_l,dist_l,(cols,rows),1,(cols,rows))
                        camera_mat_r, roi_r=cv2.getOptimalNewCameraMatrix(mtx_r,dist_r,(cols,rows),1,(cols,rows))

                        stereorms, mtx_l, dist_l, mtx_r,\
                        dist_r, R, T, E, F = cv2.stereoCalibrate(
                                objectPoints=self.objpoints, imagePoints1=self.imgpoints_l,
                                imagePoints2=self.imgpoints_r, cameraMatrix1=camera_mat_l,
                                distCoeffs1=dist_l, cameraMatrix2=camera_mat_r,
                                distCoeffs2=dist_r, imageSize=gray_l.shape[::-1],
                                flags=(cv2.CALIB_FIX_INTRINSIC | cv2.CALIB_RATIONAL_MODEL)
                                )
                        print "Calibration Error for both cameras : " + str(stereorms)

                        # camera matrixes
                        cam_mats = [camera_mat_l, camera_mat_r]

                        # Crop option
                        rectify_scale = 0

                        # Rectification
                        R1, R2, P1, P2, Q, roi1, roi2 = cv2.stereoRectify(
                                camera_mat_l, dist_l,
                                camera_mat_r, dist_r,
                                gray_l.shape[::-1], R, T, alpha = rectify_scale
                        )
                        left_maps = cv2.initUndistortRectifyMap(camera_mat_l, dist_l, R1, P1, gray_l.shape[::-1], cv2.CV_16SC2)
                        right_maps = cv2.initUndistortRectifyMap(camera_mat_r, dist_r, R2, P2, gray_l.shape[::-1], cv2.CV_16SC2)

                    ## Save result
                    np.savez(
                            "disortion_params.npz", camera_mat_l=camera_mat_l, camera_mat_r=camera_mat_r, dist_l=dist_l,
                             dist_r=dist_r, R=R, T=T, E=E,
                             F=F, left_maps=left_maps, right_maps=right_maps
                             )

                    undisorted = True
                user_choice = ""

            if k == 27:
                break

        cv2.destroyAllWindows()
stereocalib_criteria = (cv2.TERM_CRITERIA_MAX_ITER + cv2.TERM_CRITERIA_EPS,
                        100, 1e-5)
ret, M1, d1, M2, d2, R, T, E, F = cv2.stereoCalibrate(
                                        objPoints, \
                                        leftCorners,rightCorners,\
                                        mtxLeft, distLeft, \
                                        mtxLeft, distLeft, \
                                        imgSize,\
                                        criteria=stereocalib_criteria, flags=flags)

print("Stereo Rectification")

R1, R2, P1, P2, Q, _, _        = cv2.stereoRectify(\
                                     mtxLeft, distLeft, \
                                        mtxLeft, distLeft, \
                                            imgSize,\
                                                R,\
                                                    T, \
                                                        alpha=1)


mapXLeft, mapYLeft = cv2.initUndistortRectifyMap(mtxLeft, \
                                            distLeft,\
                                            R1,\
                                            P1,\
                                            imgSize,
                                            cv2.CV_32F)
mapXRight, mapYRight = cv2.initUndistortRectifyMap(mtxLeft, \
                                            distLeft,\
                                            R2,\
                                            P2,\
Ejemplo n.º 58
0

    path = '/Users/tangling/python_L/match/'
    imgLeft = cv2.imread(path + "images/test/0left.png", cv2.IMREAD_GRAYSCALE)
    imgRight = cv2.imread(path + "images/test/0right.png", cv2.IMREAD_GRAYSCALE)
    # imgLeft = cv2.imread(imgLeftPath, cv2.IMREAD_GRAYSCALE)
    # imgRight = cv2.imread(imgRightPath, cv2.IMREAD_GRAYSCALE)
    imageSize = (imgLeft.shape[1], imgLeft.shape[0])
    K1 = np.loadtxt(path + 'xj/K1.txt')
    K2 = np.loadtxt(path + 'xj/K2.txt')
    D1 = np.loadtxt(path + "xj/D1.txt")
    D2 = np.loadtxt(path + "xj/D1.txt")
    F = np.loadtxt(path + "xj/F.txt")
    R = np.loadtxt(path + "xj/R.txt")
    T = np.loadtxt(path + "xj/T.txt")
    R1, R2, P1, P2, Q, roi1, roi2 = cv2.stereoRectify(K1, D1, K2, D2, imageSize, R, T)
    mapl1, mapl2 = cv2.initUndistortRectifyMap(K1, D1, R1, P1, imageSize, cv2.CV_16SC2)
    mapr1, mapr2 = cv2.initUndistortRectifyMap(K2, D2, R2, P2, imageSize, cv2.CV_16SC2)
    left = cv2.remap(imgLeft, mapl1, mapl2, cv2.INTER_LINEAR)
    cv2.namedWindow("left", cv2.WINDOW_NORMAL)
    cv2.imshow("left", left)
    # print imgLeftPath
    right = cv2.remap(imgRight, mapr1, mapr2, cv2.INTER_LINEAR)
    cv2.namedWindow("right", cv2.WINDOW_NORMAL)
    cv2.imshow("right", right)
    # print imgRightPath
    projPointsRight = []
    while True:
        cv.SetMouseCallback("left", mycv.onMouseEvent)
        cv.SetMouseCallback("right", mycv.onMouseEvent)
        c = cv.WaitKey()
Ejemplo n.º 59
0
    #     # poprawa lokalizacji punktow (podpiskelowo)
    #
    #     # wizualizacja wykrytych naroznikow
    #     # cv2.drawChessboardCorners(img_L, (7, 6), corners2_L, ret_L)
    #     # cv2.imshow("Corners", img_L)

ret_L, mtx_L, dist_L, rvecs_L, tvecs_L = cv2.calibrateCamera(
    objpoints, imgpoints_L, gray_L.shape[::-1], None, None)
ret_R, mtx_R, dist_R, rvecs_R, tvecs_R = cv2.calibrateCamera(
    objpoints, imgpoints_R, gray_R.shape[::-1], None, None)
retval, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, R, T, E, F = cv2.stereoCalibrate(
    objpoints, imgpoints_L, imgpoints_R, mtx_L, dist_L, mtx_R, dist_R,
    gray_L.shape[::-1])

R1, R2, P1, P2, Q, validPixROI1, validPixROI2 = cv2.stereoRectify(
    cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, gray_R.shape[::-1],
    R, T)

map1_L, map2_L = cv2.initUndistortRectifyMap(cameraMatrix1, distCoeffs1, R1,
                                             P1, gray_L.shape[::-1],
                                             cv2.CV_16SC2)
map1_R, map2_R = cv2.initUndistortRectifyMap(cameraMatrix2, distCoeffs2, R2,
                                             P2, gray_L.shape[::-1],
                                             cv2.CV_16SC2)

i = 1
img_L = cv2.imread('images_left/left%02d.jpg' % i)
img_R = cv2.imread('images_right/right%02d.jpg' % i)

dst_L = cv2.remap(img_L, map1_L, map2_L, cv2.INTER_LINEAR)
dst_R = cv2.remap(img_R, map1_R, map2_R, cv2.INTER_LINEAR)
Ejemplo n.º 60
-1
def calibrateImages(obj_pts, ptsL, ptsR, imsize):
    '''Calibrate the Stereo camera rig, given:
    @obj_pts: points that specify the calibration checkerboard pattern
    @ptsL: detected image points in the left calibration images
    @ptsR: detected image points in the right calibration images
    @imsize: the stipulated size of all calibration images
    return: updated StereoCam object
    '''
    # Perform Stereo Calibration
    retval, cam1, dist1, cam2, dist2, R, T, E, F = \
        cv2.stereoCalibrate(
            obj_pts, ptsL, ptsR, imsize)
    dist1 = dist1.ravel()
    dist2 = dist2.ravel()

    cameraObj = StereoCam()
    cameraObj.cam1, cameraObj.dist1 = cam1, dist1
    cameraObj.cam2, cameraObj.dist2 = cam2, dist2

    cameraObj.R, cameraObj.T, cameraObj.E, cameraObj.F = R, T, E, F

    # Perform Stereo Rectification
    (R1, R2, P1, P2, Q, roi1, roi2) = \
        cv2.stereoRectify(cam1, dist1, cam2, dist2, imsize, R, T)

    # update the left and right rotation and projection matrices
    cameraObj.R1, cameraObj.R2 = R1, R2
    cameraObj.P1, cameraObj.P2 = P1, P2

    # update the image projection (aka disparity-to-depth mapping) matrix
    cameraObj.Q = Q

    # Get optimal new camera matrices from the rectified projection matrices
    K_L = cv2.decomposeProjectionMatrix(P1)
    K1 = K_L[0]
    K_R = cv2.decomposeProjectionMatrix(P2)
    K2 = K_R[0]

    cameraObj.K1, cameraObj.K2 = K1, K2

    return cameraObj