def __init__(self, root, isTrain=True): self.images_root = os.path.join(root, 'img') self.labels_root = os.path.join(root, 'gt') self.list_root = os.path.join(root, 'list') # print('image root = ', self.images_root) # print('labels root = ', self.labels_root) if isTrain: list_path = os.path.join(self.list_root, 'train_aug.txt') self.input_transform = transforms.Compose([ transforms.RandomRotation(10), # 随机旋转 transforms.CenterCrop(256), transforms.RandomHorizontalFlip(), # 随机翻转 transforms.ToTensor(), transforms.Normalize([.485, .456, .406], [.229, .224, .225]) ]) self.target_transform = transforms.Compose( [transforms.CenterCrop(256), transform.ToLabel()]) else: list_path = os.path.join(self.list_root, 'val.txt') self.input_transform = transforms.Compose([ transforms.CenterCrop(256), transforms.ToTensor(), transforms.Normalize([.485, .456, .406], [.229, .224, .225]) ]) self.target_transform = transforms.Compose( [transforms.CenterCrop(256), transform.ToLabel()]) self.filenames = [i_id.strip() for i_id in open(list_path)]
def val_transform(self, rgb, depth): s = self.getFocalScale() depth = np.asfarray( depth, dtype='float32' ) #This used to be the last step, not sure if it goes here? if (self.augArgs.varScale): #Variable global scale simulation scale = self.getDepthGroup() depth_np = depth * scale else: depth_np = depth if (self.augArgs.varFocus): transform = transforms.Compose([ transforms.Crop(130, 10, 240, 1200), transforms.Resize( s ), #Resize both images without correcting the depth values transforms.CenterCrop(self.output_size), ]) else: transform = transforms.Compose([ transforms.Crop(130, 10, 240, 1200), transforms.CenterCrop(self.output_size), ]) rgb_np = transform(rgb) rgb_np = np.asfarray(rgb_np, dtype='float') / 255 depth_np = transform(depth_np) return rgb_np, depth_np
def val_transform(self, rgb, depth): s = self.getFocalScale() if (self.augArgs.varScale): #Variable global scale simulation scale = self.getDepthGroup() depth_np = depth * scale else: depth_np = depth if (self.augArgs.varFocus): transform = transforms.Compose([ transforms.Resize(240.0 / iheight), transforms.Resize( s ), #Resize both images without correcting the depth values transforms.CenterCrop(self.output_size), ]) else: transform = transforms.Compose([ transforms.Resize(240.0 / iheight), transforms.CenterCrop(self.output_size), ]) rgb_np = transform(rgb) rgb_np = np.asfarray(rgb_np, dtype='float') / 255 depth_np = transform(depth_np) return rgb_np, depth_np
def train_transform(self, rgb, depth, rgb_near): s = np.random.uniform(1.0, 1.5) # random scaling depth_np = depth / s angle = np.random.uniform(-5.0, 5.0) # random rotation degrees do_flip = np.random.uniform(0.0, 1.0) < 0.5 # random horizontal flip # perform 1st step of data augmentation transform = transforms.Compose([ transforms.Resize( 250.0 / iheight ), # this is for computational efficiency, since rotation can be slow transforms.Rotate(angle), transforms.Resize(s), transforms.CenterCrop(self.output_size), transforms.HorizontalFlip(do_flip) ]) rgb_np = transform(rgb) rgb_np = self.color_jitter(rgb_np) # random color jittering rgb_np = np.asfarray(rgb_np, dtype='float') / 255 rgb_near_np = None if rgb_near is not None: rgb_near_np = transform(rgb_near) rgb_near_np = np.asfarray(rgb_near_np, dtype='float') / 255 depth_np = transform(depth_np) self.K = TransfromIntrinsics(self.K, (250.0 / iheight) * s, self.output_size) return rgb_np, depth_np, rgb_near_np
def train_transform(self, rgb, depth): s = np.random.uniform(1.0, 1.5) # random scaling depth_np = depth #/ s angle = np.random.uniform(-5.0, 5.0) # random rotation degrees do_flip = np.random.uniform(0.0, 1.0) < 0.5 # random horizontal flip # perform 1st step of data augmentation transform = transforms.Compose([ transforms.Resize( 240.0 / iheight ), # this is for computational efficiency, since rotation can be slow #transforms.Rotate(angle), #transforms.Resize(s), transforms.CenterCrop(self.output_size), transforms.HorizontalFlip(do_flip) ]) rgb_np = transform(rgb) #rgb_np = self.color_jitter(rgb_np) # random color jittering rgb_np = np.asfarray(rgb_np, dtype='float') / 255 depth_np = transform(depth_np) depth_np = np.asfarray(depth_np, dtype='float') if self.depth_16: depth_np = depth_np / self.depth_16_max else: depth_np = (255 - depth_np) / 255 return rgb_np, depth_np
def train_transform(self, rgb, depth): s = self.getFocalScale() if (self.augArgs.varFocus): #Variable focal length simulation depth_np = depth else: depth_np = depth / s #Correct for focal length if (self.augArgs.varScale): #Variable global scale simulation scale = self.getDepthGroup() depth_np = depth_np * scale angle = np.random.uniform(-5.0, 5.0) # random rotation degrees do_flip = np.random.uniform(0.0, 1.0) < 0.5 # random horizontal flip # perform 1st step of data augmentation transform = transforms.Compose([ transforms.Resize( 250.0 / iheight ), # this is for computational efficiency, since rotation can be slow transforms.Rotate(angle), transforms.Resize(s), transforms.CenterCrop(self.output_size), transforms.HorizontalFlip(do_flip) ]) rgb_np = transform(rgb) rgb_np = self.color_jitter(rgb_np) # random color jittering rgb_np = np.asfarray(rgb_np, dtype='float') / 255 depth_np = transform(depth_np) return rgb_np, depth_np
def val_transform(self, rgb, depth): """ [Reference] https://github.com/fangchangma/sparse-to-dense.pytorch/blob/master/dataloaders/nyu_dataloader.py Args: rgb (np.array): RGB image (shape=[H,W,3]) depth (np.array): Depth image (shape=[H,W]) Returns: torch.Tensor: Tranformed RGB image torch.Tensor: Transformed Depth image np.array: Transformed RGB image without color jitter (for 2D mesh creation) """ transform = transforms.Compose([ transforms.Resize(240.0 / RAW_HEIGHT), transforms.CenterCrop(self.img_size), ]) # Apply this transform to rgb/depth rgb_np_orig = transform(rgb) rgb_np_for_edge = np.asfarray(rgb_np_orig) # Used for mesh creation rgb_np = np.asfarray(rgb_np_orig) / 255 depth_np = transform(depth) return rgb_np, depth_np, rgb_np_for_edge
def train_transform(self, rgb, depth): #s = np.random.uniform(1.0, 1.5) # random scaling #depth_np = depth / s s = self.getFocalScale() if (self.augArgs.varFocus): #Variable focal length simulation depth_np = depth else: depth_np = depth / s #Correct for focal length if (self.augArgs.varScale): #Variable global scale simulation scale = self.getDepthGroup() depth_np = depth_np * scale angle = np.random.uniform(-5.0, 5.0) # random rotation degrees do_flip = np.random.uniform(0.0, 1.0) < 0.5 # random horizontal flip # perform 1st step of data augmentation transform = transforms.Compose([ transforms.Crop(130, 10, 240, 1200), transforms.Rotate(angle), transforms.Resize(s), transforms.CenterCrop(self.output_size), transforms.HorizontalFlip(do_flip) ]) rgb_np = transform(rgb) rgb_np = self.color_jitter(rgb_np) # random color jittering rgb_np = np.asfarray(rgb_np, dtype='float') / 255 # Scipy affine_transform produced RuntimeError when the depth map was # given as a 'numpy.ndarray' depth_np = np.asfarray(depth_np, dtype='float32') depth_np = transform(depth_np) return rgb_np, depth_np
def train_transform(self, rgb, depth): scale = np.random.uniform(low=1, high=1.5) depth = depth / scale angle = np.random.uniform(-5.0, 5.0) should_flip = np.random.uniform(0.0, 1.0) < 0.5 h_offset = int((768 - 228) * np.random.uniform(0.0, 1.0)) v_offset = int((1024 - 304) * np.random.uniform(0.0, 1.0)) base_transform = transforms.Compose([ transforms.Resize(250 / iheight), transforms.Rotate(angle), transforms.Resize(scale), transforms.CenterCrop(self.output_size), transforms.HorizontalFlip(should_flip), ]) rgb = base_transform(rgb) rgb = self.color_jitter(rgb) rgb = rgb / 255.0 depth = base_transform(depth) return (rgb, depth)
def __getitem__(self, index): """ Args: index (int): Index Returns: tuple: (rgb, depth) the raw data. """ raw_rgb, raw_depth, _ = self.__getraw__(index) if self.transform is not None: rgb_np, depth_np, rgb_np_for_edge = self.transform(raw_rgb, raw_depth) else: raise RuntimeError("transform not defined") input_tensor = to_tensor(rgb_np) depth_tensor = to_tensor(depth_np).unsqueeze(0) # [1,H,W] # Extract mesh base_mesh = self.mesh_extractor(np.uint8(rgb_np_for_edge)) # Preserve original resolution for evaluation/visualization orig_transform = transforms.Compose([ transforms.CenterCrop((456, 608)), ]) orig_input_tensor = orig_transform(raw_rgb) orig_depth_tensor = orig_transform(raw_depth) # To tensor orig_input_tensor = to_tensor(orig_input_tensor) orig_depth_tensor = to_tensor(orig_depth_tensor).unsqueeze(0) # Estimated depthmaps (added for evaluation) est_depth_np = np.asfarray(self.mat_depth[index], dtype='float') # numpy est_depth_tensor = to_tensor(est_depth_np).unsqueeze(0) return input_tensor, depth_tensor, base_mesh, orig_input_tensor, orig_depth_tensor, est_depth_tensor
def train_transform(self, im, gt): im = np.array(im).astype(np.float32) gt = np.array(gt).astype(np.float32) s = np.random.uniform(1.0, 1.5) # random scaling angle = np.random.uniform(-5.0, 5.0) # random rotation degrees do_flip = np.random.uniform(0.0, 1.0) < 0.5 # random horizontal flip color_jitter = my_transforms.ColorJitter(0.4, 0.4, 0.4) transform = my_transforms.Compose([ my_transforms.Crop(130, 10, 240, 1200), my_transforms.Resize(460 / 240, interpolation='bilinear'), my_transforms.Rotate(angle), my_transforms.Resize(s), my_transforms.CenterCrop(self.size), my_transforms.HorizontalFlip(do_flip) ]) im_ = transform(im) im_ = color_jitter(im_) gt_ = transform(gt) im_ = np.array(im_).astype(np.float32) gt_ = np.array(gt_).astype(np.float32) im_ /= 255.0 gt_ /= 100.0 * s im_ = to_tensor(im_) gt_ = to_tensor(gt_) gt_ = gt_.unsqueeze(0) return im_, gt_
def train_transform(self, rgb, depth): s = np.random.uniform(1.0, 1.5) # random scaling random_size = (int(s * 224), int(s * 224)) depth_np = depth / s angle = np.random.uniform(-5.0, 5.0) # random rotation degrees do_flip = np.random.uniform(0.0, 1.0) < 0.5 # random horizontal flip # perform 1st step of data augmentation # transform = torchvision.transforms.Compose([ # torchvision.transforms.Resize(self.output_size), # this is for computational efficiency, since rotation can be slow # torchvision.transforms.RandomRotation(angle), # torchvision.transforms.Resize(random_size), # torchvision.transforms.CenterCrop(self.output_size), # torchvision.transforms.RandomHorizontalFlip(do_flip) #]) transform2 = transforms.Compose([ transforms.Resize( 250.0 / iheight ), # this is for computational efficiency, since rotation can be slow transforms.Rotate(angle), transforms.Resize(s), transforms.CenterCrop(self.output_size), transforms.HorizontalFlip(do_flip) ]) rgb_np = transform2(rgb) #rgb_n = Image.fromarray(np.uint8(rgb_np * 255)) #rgb_np = self.color_jitter(rgb_n) # random color jittering rgb_np = np.asfarray(rgb_np, dtype='float') / 255 depth_np = transform2(depth_np) #depth_np = np.asfarray(depth_np, dtype='float') / 255 return rgb_np, depth_np
def val_transform(self, rgb): transform = transforms.Compose([ transforms.Resize(240.0 / iheight), transforms.CenterCrop(self.output_size), ]) rgb_np = transform(rgb) rgb_np = np.asfarray(rgb_np, dtype='float') / 255 return rgb_np
def image_transform(rgb, depth): depth_frame_converted = np.asfarray(depth.clip(0, 6000), dtype='float') / 1000 depth_array = depth_frame_converted.reshape((424, 512), order='C') rgb_transform = transforms.Compose([ transforms.Resize([240, 426]), transforms.CenterCrop((228, 304)), ]) depth_transform = transforms.Compose([ transforms.Resize([240, 320]), transforms.CenterCrop((228, 304)), ]) rgb_frame = rgb_transform(rgb) rgb_np = np.asfarray(rgb_frame, dtype='float') / 255 depth_np = depth_transform(depth_array) return rgb_np, depth_np
def val_transform(rgb, depth): depth_np = depth transform = transforms.Compose([ transforms.Resize(240.0 / iheight), transforms.CenterCrop(output_size), ]) rgb_np = transform(rgb) rgb_np = np.asfarray(rgb_np, dtype='float') / 255 depth_np = transform(depth_np) return rgb_np, depth_np
def seq_transform(self, attrib_list, is_validation): iheight = attrib_list['gt_depth'].shape[0] iwidth = attrib_list['gt_depth'].shape[1] transform = transforms.Compose([ transforms.Resize( 240.0 / iheight), # this is for computational efficiency, transforms.CenterCrop(self.output_size) ]) attrib_np = dict() network_max_range = 10.0 # 10 is arbitrary. the network only converge in a especific range if 'scale' in attrib_list and attrib_list['scale'] > 0: scale = 1.0 / attrib_list['scale'] attrib_np['scale'] = attrib_list['scale'] else: if 'fd' in attrib_list: minmax_image = transform(attrib_list['fd']) max_depth = max(minmax_image.max(), 1.0) if 'kor' in attrib_list: minmax_image = transform(attrib_list['kor']) max_depth = max(minmax_image.max(), 1.0) else: max_depth = 50 scale = network_max_range / max_depth attrib_np['scale'] = 1.0 / scale for key, value in attrib_list.items(): if key not in Modality.no_transform: attrib_np[key] = transform(value) else: attrib_np[key] = value if key in Modality.need_divider: attrib_np[key] = scale * attrib_np[key] elif key in Modality.image_size_weight_names: attrib_np[key] = attrib_np[key] / ( iwidth * 1.5) # 1.5 about sqrt(2)- square's diagonal if 'rgb' in attrib_np: if not is_validation: attrib_np['rgb'] = self.color_jitter( attrib_np['rgb']) # random color jittering attrib_np['rgb'] = (np.asfarray(attrib_np['rgb'], dtype='float') / 255).transpose( (2, 0, 1)) # all channels need to have C x H x W if 'grey' in attrib_np: attrib_np['grey'] = np.expand_dims( np.asfarray(attrib_np['grey'], dtype='float') / 255, axis=0) return attrib_np
def val_transform(self, rgb, depth, pose): depth_np = depth transform = transforms.Compose([ transforms.Resize(250.0 / iheight), transforms.CenterCrop((228, 304)), transforms.Resize(self.output_size), ]) rgb_np = transform(rgb) rgb_np = np.asfarray(rgb_np, dtype='float') / 255 depth_np = transform(depth_np) return rgb_np, depth_np, pose
def val_transform(self, rgb, depth): depth_np = depth / (self.depth_divider) transform = transforms.Compose([ transforms.Crop(130, 10, 240, 1200), transforms.CenterCrop(self.output_size), ]) rgb_np = transform(rgb) rgb_np = np.asfarray(rgb_np, dtype='float') / 255 depth_np = np.asfarray(depth_np, dtype='float32') depth_np = transform(depth_np) return rgb_np, depth_np
def val_transform(self, rgb, depth): depth_np = depth transform = transforms.Compose([ transforms.Crop(130, 10, 220, 1200), transforms.CenterCrop(self.output_size) ]) rgb_np = transform(rgb) rgb_np = np.asfarray(rgb_np, dtype='float') / 255 #Why do this?? depth_np = np.asfarray(depth_np, dtype='float32') depth_np = transform(depth_np) return rgb_np, depth_np
def val_transform(self, rgb, depth, random_seed): np.random.seed(random_seed) depth_np = depth transform = transforms.Compose([ transforms.Resize(240.0 / iheight), transforms.CenterCrop(self.output_size), ]) rgb_np = transform(rgb) rgb_np = np.asfarray(rgb_np, dtype='float') / 255 depth_np = transform(depth_np) return rgb_np, depth_np
def val_transform(self, rgb, depth): depth_np = depth transform = transforms.Compose([ transforms.Crop(0, 20, 750, 2000), transforms.Resize(500 / 750), transforms.CenterCrop(self.output_size), ]) rgb_np = transform(rgb) rgb_np = np.asfarray(rgb_np, dtype='float') / 255 depth_np = np.asfarray(depth_np, dtype='float32') depth_np = transform(depth_np) return rgb_np, depth_np
def val_transform(self, rgb, depth): depth_np = depth transform = transforms.Compose([ transforms.Resize(240.0 / iheight), transforms.CenterCrop(self.output_size), ]) rgb_np = transform(rgb) rgb_np = np.asfarray(rgb_np, dtype='float') / 255 depth_np = transform(depth_np) # for compare with Eigen's paper depth_np = depth_data_transforms(depth_np) return rgb_np, depth_np
def validate_transform(self, rgb, depth): h_offset = int((768 - 228) * np.random.uniform(0.0, 1.0)) v_offset = int((1024 - 304) * np.random.uniform(0.0, 1.0)) base_transform = transforms.Compose([ transforms.Resize(240.0 / iheight), transforms.CenterCrop(self.output_size), ]) rgb = base_transform(rgb) rgb = rgb / 255.0 depth = base_transform(depth) return (rgb, depth)
def val_transform(self, rgb, depth): depth_np = depth transform = transforms.Compose([ #transform.Resize(250.0 / iheight), transforms.Crop(130, 10, 240, 1200), transforms.CenterCrop(self.output_size), transforms.Resize(self.output_size), ]) rgb_np = transform(rgb) rgb_np = np.asfarray(rgb_np, dtype='float') / 255 depth_np = np.asfarray(depth_np, dtype='float32') depth_np = transform(depth_np) return rgb_np, depth_np
def val_transform(self, rgb, depth): iheight = rgb.shape[0] depth_np = depth transform = transforms.Compose([ #transforms.Resize((iheight,iwidth)), transforms.Resize(250.0 / iheight), transforms.CenterCrop((228, 304)), transforms.Resize(self.output_size), ]) rgb_np = transform(rgb) rgb_np = np.asfarray(rgb_np, dtype='float') / 255 depth_np = transform(depth_np) return rgb_np, depth_np
def _val_transform(self, rgb, sparse_depth, depth_gt): transform = transforms.Compose([ transforms.Crop(*self._road_crop), transforms.CenterCrop(self.output_size), ]) rgb = transform(rgb) rgb = np.asfarray(rgb, dtype='float') / 255 sparse_depth = np.asfarray(sparse_depth, dtype='float32') sparse_depth = transform(sparse_depth) depth_gt = np.asfarray(depth_gt, dtype='float32') depth_gt = transform(depth_gt) return rgb, sparse_depth, depth_gt
def val_transform(self, rgb, depth, rgb_near): depth_np = depth transform = transforms.Compose([ transforms.Resize(240.0 / iheight), transforms.CenterCrop(self.output_size), ]) rgb_np = transform(rgb) rgb_np = np.asfarray(rgb_np, dtype='float') / 255 rgb_near_np = None if rgb_near is not None: rgb_near_np = transform(rgb_near) rgb_near_np = np.asfarray(rgb_near_np, dtype='float') / 255 depth_np = transform(depth_np) self.K = TransfromIntrinsics(self.K, (240.0 / iheight), self.output_size) return rgb_np, depth_np, rgb_near_np
def val_transform(self, attrib_list): iheight = attrib_list['gt_depth'].shape[0] iwidth = attrib_list['gt_depth'].shape[1] transform = transforms.Compose([ transforms.Resize(240.0 / iheight), transforms.CenterCrop(self.output_size), ]) attrib_np = dict() if self.depth_divider == 0: if 'fd' in attrib_list: minmax_image = transform(attrib_list['fd']) max_depth = max(minmax_image.max(), 1.0) if 'kor' in attrib_list: minmax_image = transform(attrib_list['kor']) max_depth = max(minmax_image.max(), 1.0) else: max_depth = 50 scale = 10.0 / max_depth # 10 is arbitrary. the network only converge in a especific range else: scale = 1.0 / self.depth_divider attrib_np['scale'] = 1.0 / scale for key, value in attrib_list.items(): attrib_np[key] = transform(value) if key in Modality.need_divider: #['gt_depth','fd','kor','kde','kgt','dor','dde', 'd3dwde','d3dwor','dvor','dvde','dvgt']: attrib_np[key] = scale * attrib_np[ key] #(attrib_np[key] - min_depth+0.01) / (max_depth - min_depth) elif key in Modality.image_size_weight_names: attrib_np[key] = attrib_np[key] / ( iwidth * 1.5) #1.5 about sqrt(2)- square's diagonal elif key == 'rgb': attrib_np[key] = (np.asfarray(attrib_np[key], dtype='float') / 255).transpose((2, 0, 1)) elif key == 'grey': attrib_np[key] = np.expand_dims( np.asfarray(attrib_np[key], dtype='float') / 255, axis=0) return attrib_np
def train_transform(self, im, gt, mask): im = np.array(im).astype(np.float32) im = cv2.resize(im, (512, 256), interpolation=cv2.INTER_AREA) gt = cv2.resize(gt, (512, 256), interpolation=cv2.INTER_AREA) mask = cv2.resize(mask, (512, 256), interpolation=cv2.INTER_AREA) # h,w,c = im.shape # th, tw = 256,512 # x1 = random.randint(0, w - tw) # y1 = random.randint(0, h - th) # img = im[y1:y1 + th, x1:x1 + tw, :] # gt = gt[y1:y1 + th, x1:x1 + tw] # mask = mask[y1:y1 + th, x1:x1 + tw] s = np.random.uniform(1.0, 1.5) # random scaling angle = np.random.uniform(-5.0, 5.0) # random rotation degrees do_flip = np.random.uniform(0.0, 1.0) < 0.5 # random horizontal flip color_jitter = my_transforms.ColorJitter(0.4, 0.4, 0.4) transform = my_transforms.Compose([ my_transforms.Rotate(angle), my_transforms.Resize(s), my_transforms.CenterCrop(self.size), my_transforms.HorizontalFlip(do_flip) ]) im_ = transform(im) im_ = color_jitter(im_) gt_ = transform(gt) mask_ = transform(mask) im_ = np.array(im_).astype(np.float32) gt_ = np.array(gt_).astype(np.float32) mask_ = np.array(mask_).astype(np.float32) im_ /= 255.0 gt_ /= s im_ = to_tensor(im_) gt_ = to_tensor(gt_) mask_ = to_tensor(mask_) gt_ = gt_.unsqueeze(0) mask_ = mask_.unsqueeze(0) return im_, gt_, mask_
def val_transform(self, rgb, depth): depth_np = depth transform = transforms.Compose([ transforms.Resize(240.0 / iheight), transforms.CenterCrop(self.output_size), ]) rgb_np = transform(rgb) rgb_np = np.asfarray(rgb_np, dtype='float') / 255 depth_np = transform(depth_np) depth_np = np.asfarray(depth_np, dtype='float') if self.depth_16: depth_np = depth_np / self.depth_16_max else: depth_np = (255 - depth_np) / 255 return rgb_np, depth_np