def __getitem__( self, index: int) -> Tuple[List[torch.Tensor], List[torch.Tensor]]: index = index % self.size img1 = read_gen(self.image_list[index][0]) img2 = read_gen(self.image_list[index][1]) flow = read_gen(self.flow_list[index]) data = [[img1, img2], [flow]] if self.is_cropped: crop_type = 'rand' csize = self.crop_size else: crop_type = 'center' csize = self.render_size # Instantiate the transformer if self.transform is None: transformer = f_transforms.Compose([ f_transforms.Crop(csize, crop_type=crop_type), f_transforms.ModToTensor() ]) else: transformer = self.transform res_data = tuple(transformer(*data)) return res_data
def write_hdf5(dataset_dict: Dict[str, List[str]], filename: str) -> None: filename += '.h5' dataloader = {} # Define placeholder with h5py.File(filename, "w") as out: for key, value in dataset_dict.items(): # Define the shape file_shape = read_gen(value[0]).shape g = out.create_group(key) # Image(s) placeholder g.create_dataset("data1", (len(value), file_shape[0], file_shape[1],), dtype=np.uint8) g.create_dataset("data2", (len(value), file_shape[0], file_shape[1],), dtype=np.uint8) # Label placeholder g.create_dataset("label", (len(value), file_shape[0], file_shape[1], file_shape[2],), dtype=np.float32) # Instatiate dataloader dataset = FromList(value) dataloader[key] = DataLoader(dataset, shuffle=False, num_workers=8, collate_fn=lambda x: x) out.close() # Define dataset variables with h5py.File(filename, "a") as out: for key, dataload in tqdm(dataloader.items(), ncols=100, desc='Iterate over DataLoader'): for i, data in enumerate(tqdm(dataload, ncols=100, desc=f"{key.upper()} dataset", unit="set")): images, flow, fname, fshape = data[0] out[key]["data1"][i] = images[0] out[key]["data2"][i] = images[1] out[key]["label"][i] = flow out.close()
def __init__(self, inference_size: Tuple = (-1, -1), root: str = '', set_type: Optional[str] = None) -> None: self.render_size = list(inference_size) exts = ['.jpg', '.jpeg', '.png', '.bmp', '.tif', '.ppm'] self.flow_list = [] self.image_list = [] root_ext = os.path.splitext(root)[1] if root_ext and set_type is not None: if root_ext == '.json': flo_list = json_pickler(root, set_type=set_type) else: raise ValueError( f'Only json format is currently supported! Change the input path ({root}).' ) else: flo_list = flo_files_from_folder(root) for flo in flo_list: if 'test' in flo: # print file continue fbase = os.path.splitext(flo)[0] fbase = fbase.rsplit('_', 1)[0] img1, img2 = None, None for ext in exts: img1 = str(fbase) + '_img1' + ext img2 = str(fbase) + '_img2' + ext if os.path.isfile(img1): break if not os.path.isfile(img1) or not os.path.isfile( img2) or not os.path.isfile(flo): continue self.image_list += [[img1, img2]] self.flow_list += [flo] self.size = len(self.image_list) if self.size > 0: self.frame_size = read_gen(self.image_list[0][0]).size if (self.render_size[0] < 0) or (self.render_size[1] < 0) or \ (self.frame_size[0] % 64) or (self.frame_size[1] % 64): self.render_size[0] = ((self.frame_size[0]) // 64) * 64 self.render_size[1] = ((self.frame_size[1]) // 64) * 64 else: self.frame_size = None # Sanity check on the number of image pair and flow assert (len(self.image_list) == len(self.flow_list))
def __getitem__( self, index: int) -> Tuple[List[torch.Tensor], List[torch.Tensor]]: # Init. index = index % self.size img1 = read_gen(self.image_list[index][0]) img2 = read_gen(self.image_list[index][1]) flow = read_gen(self.flow_list[index]) data = [[img1, img2], [flow]] # Cropper and totensor tranformer for the images and flow transformer = f_transforms.Compose([ f_transforms.Crop(self.render_size, crop_type='center'), f_transforms.ModToTensor(), ]) res_data = tuple(transformer(*data)) return res_data
def __getitem__(self, idx: int) -> Tuple[List[np.array], np.array, str, List[int]]: # Call via indexing floname = self.dataset_list[idx] imnames = [imname_modifier(floname, i+1) for i in range(2)] filename = str(os.path.splitext(os.path.basename(floname))[0].rsplit('_', 1)[0]) # Instantiate the images and flow objects flo = read_gen(floname) # Flow fshape = list(flo.shape[:-1]) if self.raw_reading: flo = pickle.dumps(flo) images = [raw_reader(imname) for imname in imnames] else: images = [np.array(Image.open(imname)) for imname in imnames] return images, flo, filename, fshape
def __getitem__(self, index: int) -> Tuple[List[torch.Tensor], List[str]]: # Init. index = index % self.size im_name = [name_list[index] for name_list in self.name_list] # Cropper and totensor tranformer for the images transformer = transforms.Compose([ transforms.CenterCrop(self.render_size), transforms.ToTensor(), ]) # Read and transform file into tensor imgs = [] for im_list in self.image_list: for i, imname in enumerate(im_list[index]): imgs.append(transformer(read_gen(imname))) return imgs, im_name
def __init__(self, inference_size: Tuple = (-1, -1), root: str = '', pair: bool = True, use_stereo: bool = False) -> None: self.render_size = list(inference_size) if use_stereo: file_list = [ image_files_from_folder(x[0], pair=pair) for x in os.walk(root) if os.path.basename(x[0]) != os.path.basename(root) ] assert len(file_list[0]) == len(file_list[1]) else: file_list = image_files_from_folder(root, pair=pair) prev_file = None self.image_list, self.name_list = [], [] for files in file_list: tmp_image_list, tmp_name_list = [], [] for file in files: if 'test' in file: continue if pair: # Using paired images imbase, imext = os.path.splitext( os.path.basename(str(file))) fbase = imbase.rsplit('_', 1)[0] img1 = file img2 = os.path.join(root, str(fbase) + '_img2' + imext) else: # Using sequential images if prev_file is None: prev_file = file continue else: img1, img2 = prev_file, file prev_file = file fbase = os.path.splitext(os.path.basename( str(img1)))[0] fbase = fbase.rsplit('_', 1)[0] if use_stereo else fbase if not os.path.isfile(img1) or not os.path.isfile(img2): continue tmp_image_list += [[img1, img2]] tmp_name_list += [fbase] self.image_list.append(tmp_image_list) self.name_list.append(tmp_name_list) assert len(self.image_list[0]) == len(self.image_list[1]) and \ len(self.name_list[0]) == len(self.name_list[1]) self.size = len(self.image_list[0]) if self.size > 0: img_tmp = self.image_list[0][0][0] self.frame_size = read_gen(img_tmp).size if (self.render_size[0] < 0) or (self.render_size[1] < 0) or \ (self.frame_size[0] % 64) or (self.frame_size[1] % 64): self.render_size[0] = ((self.frame_size[0]) // 64) * 64 self.render_size[1] = ((self.frame_size[1]) // 64) * 64 else: self.frame_size = None
def __init__(self, args, is_cropped: bool = False, root: str = '', replicates: int = 1, mode: str = 'train', transform: Optional[object] = None) -> None: self.args = args self.is_cropped = is_cropped self.crop_size = args.crop_size self.render_size = args.inference_size self.transform = transform self.replicates = replicates self.set_type = mode exts = ['.jpg', '.jpeg', '.png', '.bmp', '.tif', '.ppm'] dataset_list = sorted(glob(os.path.join(root, f'*.json'))) self.flow_list = [] self.image_list = [] for dataset_file in dataset_list: flonames = json_pickler(dataset_file, self.set_type, self.replicates) for flo in flonames: if 'test' in flo: continue fbase = os.path.splitext(flo)[0] fbase = fbase.rsplit('_', 1)[0] img1, img2 = None, None for ext in exts: img1 = str(fbase) + '_img1' + ext img2 = str(fbase) + '_img2' + ext if os.path.isfile(img1): break if not os.path.isfile(img1) or not os.path.isfile( img2) or not os.path.isfile(flo): continue self.image_list.append([img1, img2]) self.flow_list.append(flo) self.size = len(self.image_list) if self.size > 0: self.frame_size = read_gen(self.image_list[0][0]).size if (self.render_size[0] < 0) or (self.render_size[1] < 0) or \ (self.frame_size[0] % 64) or (self.frame_size[1] % 64): self.render_size[0] = ((self.frame_size[0]) // 64) * 64 self.render_size[1] = ((self.frame_size[1]) // 64) * 64 args.inference_size = self.render_size else: self.frame_size = None # Sanity check on the number of image pair and flow assert (len(self.image_list) == len(self.flow_list))