def depthImage2depthFrame(depthImage): IRcalibration = ld.getIRCalib() intrinsicMatrix = np.array( [[IRcalibration['fc'][0], 0, IRcalibration['cc'][0]], [0, IRcalibration['fc'][1], IRcalibration['cc'][1]], [0, 0, 1]]) inverseIM = np.linalg.inv(intrinsicMatrix) depthFrameP = np.ones((depthImage.shape[0], depthImage.shape[1], 4)) for i in range(depthImage.shape[0]): for j in range(depthImage.shape[1]): if (depthImage[i, j] != 0): depthFrameP[i, j, :3] = np.matmul( inverseIM, np.array([[0.01 * depthImage[i, j] * (i + 1)], [0.01 * depthImage[i, j] * (j + 1)], [0.01 * depthImage[i, j]]])).reshape((3, )) valid = np.logical_and((depthFrameP[:, :, 0] != 1), (depthFrameP[:, :, 1] != 1), (depthFrameP[:, :, 2] != 1)) realDepthFrame = depthFrameP[valid, :] return realDepthFrame
import numpy as np from transforms3d.euler import euler2mat, mat2euler import load_data IRCalib = load_data.getIRCalib() Matrix_IRCalib = np.array([[IRCalib['fc'][0], IRCalib['ac']*IRCalib['fc'][0], IRCalib['cc'][0]], [0, IRCalib['fc'][1], IRCalib['cc'][1]], [0,0,1]]) RGBCalib = load_data.getRGBCalib() Matrix_RGBCalib = np.array([[RGBCalib['fc'][0], RGBCalib['ac']*RGBCalib['fc'][0], RGBCalib['cc'][0]], [0, RGBCalib['fc'][1], RGBCalib['cc'][1]], [0,0,1]]) exIR_RGB = load_data.getExtrinsics_IR_RGB() #transform from IR frame to RGB frame RGB_T_IR = np.zeros((4,4)) RGB_T_IR[0:3, 0:3] = exIR_RGB['rgb_R_ir'] RGB_T_IR[:, 3] = [exIR_RGB['rgb_T_ir'][0], exIR_RGB['rgb_T_ir'][1], exIR_RGB['rgb_T_ir'][2], 1] def sensor2body(neck, head): Tz = np.zeros((4, 4)) Ty = np.zeros((4, 4)) #use the idxs to track the Rz = euler2mat(0, 0, neck, axes='sxyz')######################## neck Tz [0:3, 0:3] = Rz Tz [:, 3] = [0, 0, 0.07, 1] Ry = euler2mat(0, head, 0, axes='sxyz') ###################### head Ty [0:3, 0:3] = Ry #np.dot(np.array(Rz), np.array(Ry)) # transform from lidar to body frame Ty [:, 3] = [0, 0, 0.33, 1] # 0.48 is the height above the center mass, head above center mass 33cm + lidar above b_T_c = np.dot(Ty, Tz)
def texture_mapping(MAP, w_T_b_best): IRCalib = getIRCalib() Matrix_IRCalib = np.array([[ IRCalib['fc'][0], IRCalib['ac'] * IRCalib['fc'][0], IRCalib['cc'][0] ], [0, IRCalib['fc'][1], IRCalib['cc'][1]], [0, 0, 1]]) RGBCalib = getRGBCalib() Matrix_RGBCalib = np.array([[ RGBCalib['fc'][0], RGBCalib['ac'] * RGBCalib['fc'][0], RGBCalib['cc'][0] ], [0, RGBCalib['fc'][1], RGBCalib['cc'][1]], [0, 0, 1]]) exIR_RGB = getExtrinsics_IR_RGB() l0 = get_lidar("lidar/train_lidar0") r0 = get_rgb("cam/RGB_0") d0 = get_depth("cam/DEPTH_0") lidar_t = [] depth_t = [] Tneck = np.zeros((4, 4)) Thead = np.zeros((4, 4)) for i in range(len(l0)): lidar_t1 = np.array(l0[i]['t'][0]) lidar_t.append(lidar_t1) for j in range(len(d0)): depth_t1 = np.array(d0[j]['t'][0]) depth_t.append(depth_t1) rgb_it = [r['image'] for r in r0] rgb_angle = [r['head_angles'] for r in r0] rgb_neck = rgb_angle[0] rgb_head = rgb_angle[1] depth_dt = [d['depth'] for d in d0] ############## do the time alignment for lidar time compare with depth time xxx = np.array((lidar_t)) yyy = np.array((depth_t)) xx = np.array(([l[0] for l in xxx])) yy = np.array(([l[0] for l in yyy])) idx1 = np.searchsorted(xx, yy, side="left").clip(max=xx.size - 1) mask = (idx1 > 0) & \ ((idx1 == len(xx)) | (np.fabs(yy - xx[idx1 - 1]) < np.fabs(yy - xx[idx1]))) ##### the index is the resulting lidar timestamp index that has the closest time step as the depth's timestamp index1 = np.array((idx1 - mask)).tolist() #print(index) # transform from ir to rgb frame rgb T_ir rgb_T_ir = np.zeros((4, 4)) rgb_T_ir[0:3, 0:3] = exIR_RGB['rgb_R_ir'] rgb_T_ir[0, 3] = exIR_RGB['rgb_T_ir'][0] rgb_T_ir[1, 3] = exIR_RGB['rgb_T_ir'][1] rgb_T_ir[2, 3] = exIR_RGB['rgb_T_ir'][2] rgb_T_ir[3, 3] = 1 # tranform from rgb to body frame b_T_rgb # use the idxs to track the Rn = euler2mat(0, 0, rgb_neck, axes='sxyz') Tneck[0:3, 0:3] = Rn Tneck[0, 3] = 0 Tneck[1, 3] = 0 Tneck[2, 3] = 0.07 Tneck[3, 3] = 1 Rh = euler2mat(0, rgb_head, 0, axes='sxyz') Thead[0:3, 0:3] = Rh # transform from lidar to body frame Thead[0, 3] = 0 Thead[1, 3] = 0 Thead[2, 3] = 0.33 Thead[3, 3] = 1 b_T_rgb = np.dot(Thead, Tneck) # w_T_b is the current best particle w_T_c_rgb = np.dot(w_T_b_best, b_T_rgb) o_T_c = np.array([[0, -1, 0, 0], [0, 0, -1, 0], [1, 0, 0, 0], [0, 0, 0, 1]]) map_x = MAP['sizex'] map_y = MAP['sizey'] my_x = map_x.tolist() my_y = map_y.tolist() tmt = np.zeros((MAP['sizex'], MAP['sizey'], 3)) #tmt[my_x, my_y, 0] = #tmt[my_x, my_y, 1] = #tmt[my_x, my_y, 2] = return tmt
#Specify map properties resolution = .05 xmin = -40 ymin = -40 xmax = 40 ymax = 40 numParticle = 10 N_threshold = 35 yRange = np.arange(-resolution, resolution + 0.01, resolution) xRange = np.arange(-resolution, resolution + 0.01, resolution) depthFolder = 'C:/Users/Baoqian Wang/Google Drive/cam/DEPTH_0' rgbFolder = 'C:/Users/Baoqian Wang/Google Drive/cam/RGB_0' depthImageList = ld.get_depth(depthFolder) rgbImageList = ld.get_rgb(rgbFolder) #Initialize ParticleSlam object particleSlam = ParticleFilterSlam(numParticle, N_threshold, resolution, xmin, ymin, xmax, ymax, xRange, yRange) # Get lidar data lidar = ld.get_lidar("./lidar/train_lidar0") # Get the head angles data joint = ld.get_joint("./joint/train_joint0") IRcalibration = ld.getIRCalib() # Run the SLAM MAP, bestParticles = particleSlam.slam(lidar, joint, rgbImageList, depthImageList) #fig1 = plt.figure(2) #plt.imshow(MAP['show_map'], cmap = "hot") #plt.show()
y_corr_range_cnt = len(y_corr_range) corr_center_index = len(x_corr_range) * len(y_corr_range) / 2 yaw_corr_range = 1 * x_corr_range yaw_corr_range_cnt = len(yaw_corr_range) log_fill = np.log(8) # choosing g(1)=8 # camera/depth related variables if RGB/DEPTH data exists if rgb_exists: rgb_index, rgb_cnt = 0, len(RGBs) depth_index, depth_cnt = 0, len(depths) exir_rgb = load.getExtrinsics_IR_RGB() ir_to_RGB = np.eye(4) ir_to_RGB[:3, :3], ir_to_RGB[:3, 3] = exir_rgb['rgb_R_ir'], exir_rgb[ 'rgb_T_ir'] ir_calib = load.getIRCalib() K_ir = np.eye(3) # alpha_c is 0, no need to update index [0,1] K_ir[[0, 1], [0, 1]], K_ir[:2, 2] = ir_calib['fc'], ir_calib['cc'] rgb_Calib = load.getRGBCalib() K_rgb = np.eye(3) # alpha_c is 0, no need to update index [0,1] K_rgb[[0, 1], [0, 1]], K_rgb[:2, 2] = rgb_Calib['fc'], rgb_Calib['cc'] optical_to_regular = np.zeros((3, 3)) optical_to_regular[[0, 1, 2], [2, 0, 1]] = np.array([1, -1, -1]) # kinect sensor pose in head frame kinect_to_head = np.eye(4) kinect_to_head[2, 3] = 0.07 rgb_nrows, rgb_ncols, _ = RGBs[0][