class C2Pipe(Pipeline): def __init__(self, batch_size, num_threads, device_id, pipelined=True, async=True): super(C2Pipe, self).__init__(batch_size, num_threads, device_id, exec_pipelined=pipelined, exec_async=async) self.input = ops.ExternalSource() self.decode = ops.HostDecoder(output_type=types.RGB) self.rcm = ops.FastResizeCropMirror(crop=[224, 224]) self.np = ops.NormalizePermute(device="gpu", output_dtype=types.FLOAT16, mean=[128., 128., 128.], std=[1., 1., 1.], height=224, width=224, image_type=types.RGB) self.uniform = ops.Uniform(range=(0., 1.)) self.resize_uniform = ops.Uniform(range=(256., 480.)) self.mirror = ops.CoinFlip(probability=0.5) self.iter = 0
def __init__(self, imageset_dir, image_size=128, random_shuffle=False, batch_size=64, num_threads=2, device_id=0): super(ImagePipeline, self).__init__(batch_size, num_threads, device_id, seed=12) eii = ExternalInputIterator(imageset_dir, batch_size, random_shuffle) self.iterator = iter(eii) self.num_inputs = len(eii.frontal_indices) # The source for the inputs and targets self.input = ops.ExternalSource() self.target = ops.ExternalSource() # nvJPEGDecoder below accepts CPU inputs, but returns GPU outputs (hence device = "mixed") self.decode = ops.nvJPEGDecoder(device="mixed", output_type=types.RGB) # The rest of pre-processing is done on the GPU self.res = ops.Resize(device="gpu", resize_x=image_size, resize_y=image_size) self.norm = ops.NormalizePermute(device="gpu", output_dtype=types.FLOAT, mean=[128., 128., 128.], std=[128., 128., 128.], height=image_size, width=image_size)
def __init__(self, batch_size, num_threads, device_id, size=1024): super(CommonPipeline, self).__init__(batch_size, num_threads, device_id) self.decode = ops.nvJPEGDecoder(device="mixed", output_type=types.RGB) self.cmn = ops.NormalizePermute(device="gpu", height=size, width=size, output_dtype=types.FLOAT, image_type=types.RGB, mean=[0.5 * 255, 0.5 * 255, 0.5 * 255], std=[0.5 * 255, 0.5 * 255, 0.5 * 255])
def __init__(self, batch_size, num_threads, device_id, csv_path, data_path, valid=False, nfold=0): super(DALIPipeline, self).__init__(batch_size, num_threads, device_id) self.data_path = data_path self.csv_file = csv_path self.valid = valid self.data = pd.read_csv(self.csv_file) if nfold > 0: self.data = self.data.sort_values(by=['image', 'label']) self.data = self.data.sample(frac=1, random_state=0).reset_index(drop=True) len_fold = int(len(self.data) / nfold) if valid: self.data = self.data[len_fold * (nfold - 1):].reset_index(drop=True) else: self.data = self.data[:len_fold * (nfold - 1)].reset_index(drop=True) self.data.to_csv('data/dali.txt', header=False, index=False, sep=' ') self.input = ops.FileReader(file_root=data_path, file_list='data/dali.txt') self.decode = ops.ImageDecoder(device="mixed", output_type=types.RGB) self.random_resize = ops.Resize(device="gpu", image_type=types.RGB, interp_type=types.INTERP_LINEAR) self.resize = ops.Resize(device="gpu", image_type=types.RGB, interp_type=types.INTERP_LINEAR, resize_x=227., resize_y=227.) self.cmn = ops.CropMirrorNormalize(device="gpu", output_dtype=types.FLOAT, crop=(227, 227), image_type=types.RGB, mean=[128., 128., 128.], std=[1., 1., 1.]) self.normalize = ops.NormalizePermute(device="gpu", height=227, width=227, image_type=types.RGB, mean=[128., 128., 128.], std=[1., 1., 1.]) self.uniform = ops.Uniform(range=(0.0, 1.0)) self.resize_rng = ops.Uniform(range=(256, 480))
def __init__(self, image_dir, batch_size, num_threads, device_id, exec_async=True): super(NetPipeline, self).__init__(batch_size, num_threads, device_id, seed=12, exec_async=exec_async) self.input = ops.FileReader(file_root=image_dir, random_shuffle=True, initial_fill=21) self.decode = ops.nvJPEGDecoder(device="mixed", output_type=types.RGB) self.resize = ops.Resize(device="gpu", resize_x=512, resize_y=512) # self.centerCrop = ops.Crop(device = "gpu", crop=(224,224)) self.norm = ops.NormalizePermute( device="gpu", height=512, width=512, mean=[x * 255 for x in [0.485, 0.456, 0.406]], std=[x * 255 for x in [0.229, 0.224, 0.225]])