def build(input_reader_config, model_config, training, voxel_generator, target_assigner=None, multi_gpu=False) -> DatasetWrapper: """Builds a tensor dictionary based on the InputReader config. Args: input_reader_config: A input_reader_pb2.InputReader object. Returns: A tensor dict based on the input_reader_config. Raises: ValueError: On invalid input reader proto. ValueError: If no input paths are specified. """ if not isinstance(input_reader_config, input_reader_pb2.InputReader): raise ValueError('input_reader_config not of type ' 'input_reader_pb2.InputReader.') dataset = dataset_builder.build(input_reader_config, model_config, training, voxel_generator, target_assigner, multi_gpu=multi_gpu) #返回一个数据集 dataset = DatasetWrapper(dataset) return dataset
def build(input_reader_config, model_config, training, voxel_generator, target_assigner=None, net=None, multi_gpu=False) -> Dataset: """Builds a tensor dictionary based on the InputReader config. Args: input_reader_config: A input_reader_pb2.InputReader object. Returns: A tensor dict based on the input_reader_config. Raises: ValueError: On invalid input reader proto. ValueError: If no input paths are specified. """ if not isinstance(input_reader_config, input_reader_pb2.InputReader): raise ValueError('input_reader_config not of type ' 'input_reader_pb2.InputReader.') dataset = dataset_builder.build(input_reader_config, model_config, training, voxel_generator, target_assigner, multi_gpu=multi_gpu) cum_lc_config = input_reader_config.cum_lc_wrapper if cum_lc_config.lc_policy != "": # this means that it needs to be wrapped dataset = CumLCDatasetWrapper(dataset, net, cum_lc_config.lc_policy, cum_lc_config.lc_horizon, cum_lc_config.init_lidar_num_beams, cum_lc_config.sparsify_return, use_cache=not training, contiguous=not training) else: dataset = LidarDatasetWrapper( dataset, num_workers=input_reader_config.preprocess.num_workers) return dataset
def build(input_reader_config, model_config, training, voxel_generator, target_assigner=None, multi_gpu=False, generate_anchors_cachae=True, #True for pillar and second segmentation=False, bcl_keep_voxels=None, seg_keep_points=None, points_per_voxel=None) -> DatasetWrapper: """Builds a tensor dictionary based on the InputReader config. Args: input_reader_config: A input_reader_pb2.InputReader object. Returns: A tensor dict based on the input_reader_config. Raises: ValueError: On invalid input reader proto. ValueError: If no input paths are specified. """ if not isinstance(input_reader_config, input_reader_pb2.InputReader): raise ValueError('input_reader_config not of type ' 'input_reader_pb2.InputReader.') dataset = dataset_builder.build( input_reader_config, model_config, training, voxel_generator, target_assigner, multi_gpu=multi_gpu, generate_anchors_cachae=generate_anchors_cachae, segmentation=segmentation, bcl_keep_voxels=bcl_keep_voxels, seg_keep_points=seg_keep_points, points_per_voxel=points_per_voxel) #f use cachae save memory but not suit for BCL dataset = DatasetWrapper(dataset) return dataset