def nuscenes_data_prep(root_path, info_prefix, version, dataset_name, out_dir, max_sweeps=10): """Prepare data related to nuScenes dataset. Related data consists of '.pkl' files recording basic infos, 2D annotations and groundtruth database. Args: root_path (str): Path of dataset root. info_prefix (str): The prefix of info filenames. version (str): Dataset version. dataset_name (str): The dataset class name. out_dir (str): Output directory of the groundtruth database info. max_sweeps (int): Number of input consecutive frames. Default: 10 """ nuscenes_converter.create_nuscenes_infos( root_path, info_prefix, version=version, max_sweeps=max_sweeps) if version == 'v1.0-test': return info_train_path = osp.join(root_path, f'{info_prefix}_infos_train.pkl') info_val_path = osp.join(root_path, f'{info_prefix}_infos_val.pkl') nuscenes_converter.export_2d_annotation( root_path, info_train_path, version=version) nuscenes_converter.export_2d_annotation( root_path, info_val_path, version=version) create_groundtruth_database(dataset_name, root_path, info_prefix, f'{out_dir}/{info_prefix}_infos_train.pkl')
def kitti_data_prep(root_path, info_prefix, version, out_dir): """Prepare data related to Kitti dataset. Related data consists of '.pkl' files recording basic infos, 2D annotations and groundtruth database. Args: root_path (str): Path of dataset root. info_prefix (str): The prefix of info filenames. version (str): Dataset version. out_dir (str): Output directory of the groundtruth database info. """ kitti.create_kitti_info_file(root_path, info_prefix) kitti.create_reduced_point_cloud(root_path, info_prefix) info_train_path = osp.join(root_path, f'{info_prefix}_infos_train.pkl') info_val_path = osp.join(root_path, f'{info_prefix}_infos_val.pkl') info_trainval_path = osp.join(root_path, f'{info_prefix}_infos_trainval.pkl') info_test_path = osp.join(root_path, f'{info_prefix}_infos_test.pkl') kitti.export_2d_annotation(root_path, info_train_path) kitti.export_2d_annotation(root_path, info_val_path) kitti.export_2d_annotation(root_path, info_trainval_path) kitti.export_2d_annotation(root_path, info_test_path) create_groundtruth_database('KittiDataset', root_path, info_prefix, f'{out_dir}/{info_prefix}_infos_train.pkl', relative_path=False, mask_anno_path='instances_train.json', with_mask=(version == 'mask'))
def createwaymo_info(root_path, info_prefix, out_dir, workers, max_sweeps=5): """Prepare the info file for waymo dataset. Args: root_path (str): Path of dataset root. info_prefix (str): The prefix of info filenames. out_dir (str): Output directory of the generated info file. workers (int): Number of threads to be used. max_sweeps (int): Number of input consecutive frames. Default: 5 \ Here we store pose information of these frames for later use. """ # Generate waymo infos out_dir = osp.join(out_dir, 'kitti_format') # Create ImageSets, train test split create_trainvaltestsplitfile(out_dir) kitti.create_waymo_info_file(out_dir, info_prefix, max_sweeps=max_sweeps) create_groundtruth_database( 'WaymoDataset', out_dir, info_prefix, f'{out_dir}/{info_prefix}_infos_train.pkl', relative_path=False, with_mask=False)
def waymo_data_prep(root_path, info_prefix, version, out_dir, workers, max_sweeps=5): """Prepare the info file for waymo dataset. Args: root_path (str): Path of dataset root. info_prefix (str): The prefix of info filenames. out_dir (str): Output directory of the generated info file. workers (int): Number of threads to be used. max_sweeps (int): Number of input consecutive frames. Default: 5 \ Here we store pose information of these frames for later use. """ from tools.data_converter import waymo_converter as waymo #splits = ['training', 'validation', 'testing'] splits = ['training', 'validation', 'test'] for i, split in enumerate(splits): #load_dir = osp.join(root_path, 'waymo_format', split) load_dir = osp.join(root_path, split) if split == 'validation': save_dir = osp.join(out_dir, 'kitti_format', 'training') else: save_dir = osp.join(out_dir, 'kitti_format', split) converter = waymo.Waymo2KITTI( load_dir, save_dir, prefix=str(i), workers=workers, test_mode=(split == 'test')) converter.convert() # Generate waymo infos out_dir = osp.join(out_dir, 'kitti_format') kitti.create_waymo_info_file(out_dir, info_prefix, max_sweeps=max_sweeps) create_groundtruth_database( 'WaymoDataset', out_dir, info_prefix, f'{out_dir}/{info_prefix}_infos_train.pkl', relative_path=False, with_mask=False)
def carla_data_prep(root_path, info_prefix, version, dataset_name, out_dir, max_prev_samples=10): carla_data_converter.create_carla_infos(root_path, info_prefix, max_prev_samples=max_prev_samples, balance=None) info_train_path = osp.join(root_path, f"{info_prefix}_infos_train.pkl") info_val_path = osp.join(root_path, f"{info_prefix}_infos_val.pkl") create_groundtruth_database(dataset_name, root_path, info_prefix, f"{out_dir}/{info_prefix}_infos_train.pkl") import pathlib info_train_path = osp.join(root_path, f"{info_prefix}_infos_train.pkl") train_set_folder = pathlib.Path(root_path).joinpath( carla_data_converter.train_set_folder_name) carla_data_converter.export_2d_annotation(train_set_folder, info_train_path) info_val_path = osp.join(root_path, f"{info_prefix}_infos_val.pkl") val_set_folder = pathlib.Path(root_path).joinpath( carla_data_converter.val_set_folder_name) carla_data_converter.export_2d_annotation(val_set_folder, info_val_path) info_test_path = osp.join(root_path, f"{info_prefix}_infos_test.pkl") test_set_folder = pathlib.Path(root_path).joinpath( carla_data_converter.test_set_folder_name) carla_data_converter.export_2d_annotation(test_set_folder, info_test_path) # add extensions info_easy_test_path = osp.join(root_path, f"{info_prefix}_infos_easy_test.pkl") easy_test_set_folder = pathlib.Path(root_path).joinpath( carla_data_converter.easy_test_set_folder_name) carla_data_converter.export_2d_annotation(easy_test_set_folder, info_easy_test_path)
def aiodrive_data_prep(root_path, info_prefix, out_dir): """Prepare data related to AIODrive dataset. Args: root_path (str): Path of dataset root. info_prefix (str): The prefix of info filenames. version (str): Dataset version. out_dir (str): Output directory of the groundtruth database info. """ # aiodrive.create_aiodrive_info_file(root_path, info_prefix, out_dir) create_groundtruth_database( 'AIODriveDataset', root_path, info_prefix, # f'{out_dir}/{info_prefix}_infos_train.pkl', f'{out_dir}/aiodrive_velodyne_infos_train.pkl', # TODO: Acutally, the info files are shared between different point types. database_save_path=osp.join(out_dir, f'{info_prefix}_gt_database'), db_info_save_path=osp.join(out_dir, f'{info_prefix}_dbinfos_train.pkl'), relative_path=False, with_mask=False)
def astyx_data_prep(root_path, info_prefix, pc_type, out_dir): """Prepare data related to Astyx dataset. Related data consists of '.pkl' files recording basic infos, 2D annotations and groundtruth database. Args: root_path (str): Path of dataset root. info_prefix (str): The prefix of info filenames. version (str): Dataset version. out_dir (str): Output directory of the groundtruth database info. """ info_prefix += pc_type astyx_converter.create_astyx_info_file(root_path, info_prefix, pc_type) astyx_converter.create_reduced_point_cloud(root_path, info_prefix) create_groundtruth_database('AstyxDataset', root_path, info_prefix, f'{out_dir}/{info_prefix}_infos_train.pkl', relative_path=False, mask_anno_path='instances_train.json', pc_type=pc_type)
def kitti_data_prep(root_path, info_prefix, version, out_dir): """Prepare data related to Kitti dataset. Related data consists of '.pkl' files recording basic infos, 2D annotations and groundtruth database. Args: root_path (str): Path of dataset root. info_prefix (str): The prefix of info filenames. version (str): Dataset version. out_dir (str): Output directory of the groundtruth database info. """ kitti.create_kitti_info_file(root_path, info_prefix) kitti.create_reduced_point_cloud(root_path, info_prefix) create_groundtruth_database( "KittiDataset", root_path, info_prefix, f"{out_dir}/{info_prefix}_infos_train.pkl", relative_path=False, mask_anno_path="instances_train.json", with_mask=(version == "mask"), )
def extended_nuscenes_data_prep(root_path, info_prefix, version, dataset_name, out_dir, max_prev_samples=10): """Prepare data related to own nuScenes dataset with different api. Related data consists of '.pkl' files recording basic infos, 2D annotations and groundtruth database. Args: root_path (str): Path of dataset root. info_prefix (str): The prefix of info filenames. version (str): Dataset version. dataset_name (str): The dataset class name. out_dir (str): Output directory of the groundtruth database info. max_prev_samples (int): Number of input consecutive frames. Default: 10 """ # extended_nuscenes_converter.create_nuscenes_infos( # root_path, info_prefix, version=version, max_prev_samples=max_prev_samples # ) # if version == "v1.0-test": # return # info_train_path = osp.join(root_path, f"{info_prefix}_infos_train.pkl") # info_val_path = osp.join(root_path, f"{info_prefix}_infos_val.pkl") # extended_nuscenes_converter.export_2d_annotation( # root_path, info_train_path, version=version # ) # extended_nuscenes_converter.export_2d_annotation( # root_path, info_val_path, version=version # ) create_groundtruth_database(dataset_name, root_path, info_prefix, f"{out_dir}/{info_prefix}_infos_train.pkl")
def deeproute_data_prep(root_path, info_prefix, out_dir): deeproute_converter.create_deeproute_info_file(root_path, info_prefix) create_groundtruth_database('DeeprouteDataset', root_path, info_prefix, f'{out_dir}/{info_prefix}_infos_train.pkl')