def save_pseudo_vel(self, idx): assert (idx < self.num_samples) lidar_filename = os.path.join(self.lidar_dir, '%06d.bin' % (idx)) pseudolidar_filename = os.path.join(self.pseudo_lidar_dir, '%06d.bin' % (idx)) save_filename = os.path.join(self.save_vel_pseudo_dir, '%06d.bin' % (idx)) lidar_scan = utils.load_velo_scan(lidar_filename) pseudolidar_scan = utils.load_velo_scan(pseudolidar_filename) lidar_scan = np.concatenate((lidar_scan, pseudolidar_scan)) lidar_scan.tofile(save_filename)
def get_depth_pc(self, idx): assert idx < self.num_samples lidar_filename = os.path.join(self.depthpc_dir, "%06d.bin" % (idx)) is_exist = os.path.exists(lidar_filename) if is_exist: return utils.load_velo_scan(lidar_filename), is_exist else: return None, is_exist
def get_lidar(self, idx): """ :param idx: :return:(n, 4), n point cloud, 4: x, y, z, reflectance values """ assert (idx < self.num_samples) lidar_filename = os.path.join(self.lidar_dir, '%06d.bin' % (idx)) return utils.load_velo_scan(lidar_filename)
def __getitem__(self, item): sample_idx = item # sample = self.kitti_data.sample[sample_idx] velo_pc = kitti_util.load_velo_scan( os.path.join(self.data_path_pc, sorted(os.listdir(self.data_path_pc))[item])) # img are numpy array so transform must add ToPIL img = kitti_util.load_image( os.path.join(self.data_path_image, sorted(os.listdir(self.data_path_image))[item])) calibration_path = os.path.join( self.data_path_calib, sorted(os.listdir(self.data_path_calib))[item]) calibration = kitti_util.Calibration(calibration_path) img = transform(img) sample = {'image': img, 'velo_pc': velo_pc, 'calibration': calibration} return sample
def get_lidar(self, idx): assert idx < self.num_samples lidar_filename = self.lidar_filenames[idx] return utils.load_velo_scan(lidar_filename)
def get_lidar(self, idx): assert(idx<self.num_samples) lidar_filename = self.lidar_filenames[idx] return utils.load_velo_scan(lidar_filename)
def get_lidar(self, idx): assert(idx<self.num_samples) lidar_filename = os.path.join(self.lidar_dir, '%06d.bin'%(idx)) return utils.load_velo_scan(lidar_filename)
def get_lidar(self, idx): assert (idx < self.num_samples) lidar_filename = os.path.join(self.lidar_dir, '%06d.bin' % (idx)) return utils.load_velo_scan(lidar_filename)
def get_lidar(self, idx): lidar_filename = os.path.join(self.lidar_dir, '%06d.bin' % (idx)) lidar_scan = utils.load_velo_scan(lidar_filename) return lidar_scan
def get_lidar(self, idx, drive_idx): lidar_filename = os.path.join(self.lidar_dir, '%04d' % (drive_idx), '%06d.bin' % (idx)) return utils.load_velo_scan(lidar_filename)
def get_lidar(self, idx): assert (idx in self.lidar_filenames) lidar_filename = self.lidar_filenames[idx] return utils.load_velo_scan(lidar_filename)
def get_lidar(self, idx: str): lidar_filename = os.path.join(self.lidar_dir, '{}.bin'.format(idx)) return utils.load_velo_scan(lidar_filename)
def get_lidar(self, idx, dtype=np.float32, n_vec=4): idx = self.sample_ids[idx] lidar_filename = os.path.join(self.lidar_dir, "%06d.bin" % (idx)) return utils.load_velo_scan(lidar_filename, dtype, n_vec)
def get_lidar(idx, dtype=np.float64, n_vec=4): lidar_filename = path.join(lidar_dir, "%06d.bin" % (idx)) return utils.load_velo_scan(lidar_filename, dtype, n_vec)
def get_lidar(self, idx, dtype=np.float32, n_vec=4): # assert idx < self.num_samples lidar_filename = os.path.join(self.lidar_dir, f"{self.index_format}.bin" % (idx)) print(lidar_filename) return utils.load_velo_scan(lidar_filename, dtype, n_vec)
def get_lidar(self, idx, dtype=np.float64, n_vec=4): assert idx < self.num_samples lidar_filename = os.path.join(self.lidar_dir, "%06d.bin" % (idx)) print(lidar_filename) return utils.load_velo_scan(lidar_filename, dtype, n_vec)
def get_lidar(self, idx): #处理图片-H assert(idx<self.num_samples) //断言,若不满足条件就报错-H lidar_filename = os.path.join(self.lidar_dir, '%06d.bin'%(idx)) return utils.load_velo_scan(lidar_filename)
def load_frame_data(self, data_idx_str, random_flip=False, random_rotate=False, random_shift=False, pca_jitter=False): '''load one frame''' if self.use_aug_scene: data_idx = int(data_idx_str[2:]) else: data_idx = int(data_idx_str) # print(data_idx_str) calib = self.kitti_dataset.get_calibration(data_idx) # 3 by 4 matrix image = self.kitti_dataset.get_image(data_idx) img_height, img_width = image.shape[0:2] # data augmentation if pca_jitter: image = apply_pca_jitter(image)[0] objects = [] if not self.use_aug_scene or data_idx_str[:2] == '00': pc_velo = self.kitti_dataset.get_lidar(data_idx) _, pc_image_coord, img_fov_inds = get_lidar_in_image_fov( pc_velo[:, 0:3], calib, 0, 0, img_width, img_height, True) pc_velo = pc_velo[img_fov_inds, :] choice = np.random.choice(pc_velo.shape[0], self.npoints, replace=True) point_set = pc_velo[choice, :] pc_rect = np.zeros_like(point_set) pc_rect[:, 0:3] = calib.project_velo_to_rect(point_set[:, 0:3]) pc_rect[:, 3] = point_set[:, 3] if self.is_training: objects = self.kitti_dataset.get_label_objects(data_idx) else: pc_rect = utils.load_velo_scan( os.path.join( self.kitti_path, 'aug_scene/rectified_data/{0}.bin'.format(data_idx_str))) choice = np.random.choice(pc_rect.shape[0], self.npoints, replace=True) pc_rect = pc_rect[choice, :] if self.is_training: objects = utils.read_label( os.path.join( self.kitti_path, 'aug_scene/aug_label/{0}.txt'.format(data_idx_str))) objects = filter( lambda obj: obj.type in self.types_list and obj.difficulty in self. difficulties_list, objects) gt_boxes = [] # ground truth boxes #start = time.time() seg_mask = np.zeros((pc_rect.shape[0])) # data augmentation if random_flip and np.random.random() > 0.5: # 50% chance flipping pc_rect[:, 0] *= -1 for obj in objects: obj.t = [-obj.t[0], obj.t[1], obj.t[2]] # ensure that ry is [-pi, pi] if obj.ry >= 0: obj.ry = np.pi - obj.ry else: obj.ry = -np.pi - obj.ry if random_rotate: ry = (np.random.random() - 0.5) * math.radians( 20) # -10~10 degrees pc_rect[:, 0:3] = rotate_points_along_y(pc_rect[:, 0:3], ry) for obj in objects: obj.t = rotate_points_along_y(obj.t, ry) obj.ry -= ry # ensure that ry is [-pi, pi] if obj.ry > np.pi: obj.ry -= 2 * np.pi elif obj.ry < -np.pi: obj.ry += 2 * np.pi proposal_of_point = {} # point index to proposal vector gt_box_of_point = {} # point index to corners_3d for obj in objects: _, obj_box_3d = utils.compute_box_3d(obj, calib.P) _, obj_mask = extract_pc_in_box3d(pc_rect, obj_box_3d) if np.sum(obj_mask) == 0: # label without 3d points # print('skip object without points') continue # IMPORTANT: this must match with NUM_SEG_CLASSES #seg_mask[obj_mask] = g_type2onehotclass[obj.type] seg_mask[obj_mask] = 1 gt_boxes.append(obj_box_3d) obj_idxs = np.where(obj_mask)[0] # data augmentation # FIXME: jitter point will make valid loss growing # Also may go out of image view if random_shift and False: # jitter object points pc_rect[obj_idxs, :3] = shift_point_cloud( pc_rect[obj_idxs, :3], 0.02) for idx in obj_idxs: proposal_of_point[idx] = box_encoder.encode( obj, pc_rect[idx, :3]) gt_box_of_point[idx] = obj_box_3d # self.viz_frame(pc_rect, seg_mask, gt_boxes) # return pc_rect, seg_mask, proposal_of_point, gt_box_of_point, gt_boxes calib_matrix = np.copy(calib.P) calib_matrix[0, :] *= (1200.0 / image.shape[1]) calib_matrix[1, :] *= (360.0 / image.shape[0]) #print('construct', time.time() - start) return { 'pointcloud': pc_rect, 'image': cv2.resize(image, (1200, 360)), 'calib': calib_matrix, 'mask_label': seg_mask, 'proposal_of_point': self.get_proposal_out(proposal_of_point), 'gt_box_of_point': self.get_gt_box_of_points(gt_box_of_point), 'gt_boxes': gt_boxes, 'pc_choice': choice }