def __init__(self, base_dir='/newDisk/users/duanshiyu/cityscapes/', split='train', affine_augmenter=None, image_augmenter=None, target_size=(1024, 2048), net_type='unet', ignore_index=255, debug=False): self.debug = debug self.base_dir = Path(base_dir) assert net_type in ['unet', 'deeplab'] self.net_type = net_type self.ignore_index = ignore_index self.split = 'val' if split == 'valid' else split self.img_paths = sorted(self.base_dir.glob(f'leftImg8bit_sequence/{self.split}/*/*leftImg8bit.png')) # self.lbl_paths = sorted(self.base_dir.glob(f'gtFine/{self.split}/*/*gtFine_labelIds.png')) # assert len(self.img_paths) == len(self.lbl_paths) # Resize if isinstance(target_size, str): target_size = eval(target_size) if self.split == 'train': if self.net_type == 'deeplab': target_size = (target_size[0] + 1, target_size[1] + 1) self.resizer = albu.Compose([albu.RandomScale(scale_limit=(-0.5, 0.5), p=1.0), PadIfNeededRightBottom(min_height=target_size[0], min_width=target_size[1], value=0, ignore_index=self.ignore_index, p=1.0), albu.RandomCrop(height=target_size[0], width=target_size[1], p=1.0)]) else: self.resizer = None # Augment if self.split == 'train': self.affine_augmenter = affine_augmenter self.image_augmenter = image_augmenter else: self.affine_augmenter = None self.image_augmenter = None
def __init__(self, base_dir='../data/pascal_voc_2012/VOCdevkit/VOC2012', split='train_aug', affine_augmenter=None, image_augmenter=None, target_size=(512, 512), net_type='unet', ignore_index=255, debug=False): self.debug = debug self.base_dir = Path(base_dir) assert net_type in ['unet', 'deeplab'] self.net_type = net_type self.ignore_index = ignore_index self.split = split valid_ids = self.base_dir / 'ImageSets' / 'Segmentation' / 'val.txt' with open(valid_ids, 'r') as f: valid_ids = f.readlines() if self.split == 'valid': lbl_dir = 'SegmentationClass' img_ids = valid_ids else: valid_set = set([valid_id.strip() for valid_id in valid_ids]) lbl_dir = 'SegmentationClassAug' if 'aug' in split else 'SegmentationClass' all_set = set([ p.name[:-4] for p in self.base_dir.joinpath(lbl_dir).iterdir() ]) img_ids = list(all_set - valid_set) self.img_paths = [ (self.base_dir / 'JPEGImages' / f'{img_id.strip()}.jpg') for img_id in img_ids ] self.lbl_paths = [(self.base_dir / lbl_dir / f'{img_id.strip()}.png') for img_id in img_ids] # Resize if isinstance(target_size, str): target_size = eval(target_size) if 'train' in self.split: if self.net_type == 'deeplab': target_size = (target_size[0] + 1, target_size[1] + 1) self.resizer = albu.Compose([ albu.RandomScale(scale_limit=(-0.5, 0.5), p=1.0), PadIfNeededRightBottom(min_height=target_size[0], min_width=target_size[1], value=0, ignore_index=self.ignore_index, p=1.0), albu.RandomCrop(height=target_size[0], width=target_size[1], p=1.0) ]) else: # self.resizer = None self.resizer = albu.Compose([ PadIfNeededRightBottom(min_height=target_size[0], min_width=target_size[1], value=0, ignore_index=self.ignore_index, p=1.0), albu.Crop(x_min=0, x_max=target_size[1], y_min=0, y_max=target_size[0]) ]) # Augment if 'train' in self.split: self.affine_augmenter = affine_augmenter self.image_augmenter = image_augmenter else: self.affine_augmenter = None self.image_augmenter = None
def __init__(self, base_dir='../data/sherbrooke', split='train', affine_augmenter=None, image_augmenter=None, target_size=(544, 544), net_type='deeplab', ignore_index=255, defects=False, debug=False): if defects is False: self.n_classes = 2 self.void_classes = [ 1, 2, 3, 4, 5, 6, 7, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, -1 ] self.valid_classes = [ 0, 8 ] # background and sidewalks only. 0 will become background... else: base_dir = '../data/bbox_mask' self.n_classes = 2 self.void_classes = [0, 4] #why 4? self.valid_classes = [8, 35] # background and sidewalks only. self.class_map = dict(zip(self.valid_classes, range(self.n_classes))) self.debug = debug self.defects = defects self.base_dir = Path(base_dir) assert net_type in ['unet', 'deeplab'] self.net_type = net_type self.ignore_index = ignore_index self.split = 'val' if split == 'valid' else split self.img_paths = sorted( self.base_dir.glob(f'leftImg8bit/{self.split}/*/*leftImg8bit.*')) self.lbl_paths = sorted( self.base_dir.glob(f'gtFine/{self.split}/*/*gtFine*.png')) #Quality control if len(self.img_paths) != len(self.lbl_paths): raise AssertionError( f'Length of images (count: {len(self.img_paths)}) ' f'and labels (count: {len(self.lbl_paths)}) don\'t match') if len(self.img_paths) == 0: raise AssertionError( f'No images found. Check current working directory.') count = 0 for img_path, lbl_path in zip(self.img_paths, self.lbl_paths): count += 1 _, img_path = os.path.split(img_path) img_name, img_ext = os.path.splitext( img_path) # separate name and extension _, lbl_path = os.path.split(lbl_path) lbl_name, lbl_ext = os.path.splitext( lbl_path) # separate name and extension if img_name.split('_')[0] != lbl_name.split('_')[0]: raise AssertionError( f'Image {img_name} and label {lbl_name} don\'t match') print( f'Assertion success: image and label filenames in {self.split} split of dataset match.' ) # Resize if isinstance(target_size, str): target_size = eval(target_size) if self.split == 'train': if self.net_type == 'deeplab': target_size = (target_size[0] + 1, target_size[1] + 1) # Resize (Scale & Pad & Crop) #self.resizer = None self.resizer = albu.Compose([ albu.RandomScale(scale_limit=(-0.5, 0.5), p=0.5), #next transform is custom. see src.utils.custom_aug PadIfNeededRightBottom(min_height=target_size[0], min_width=target_size[1], value=0, ignore_index=self.ignore_index, p=1.0), albu.RandomCrop(height=target_size[0], width=target_size[1], p=1.0) ]) #self.resizer_info = (f'albu.RandomScale(scale_limit={self.resizer.transforms[0].scale_limit}, p=0.5),' # f'albu.RandomCrop(height={target_size[0]}, width={target_size[1]}, p=1.0)') else: self.resizer = None # Augment if self.split == 'train': self.affine_augmenter = affine_augmenter self.image_augmenter = image_augmenter else: self.affine_augmenter = None self.image_augmenter = None
def __init__(self, base_dir='../data/deepglobe_as_pascalvoc/VOCdevkit/VOC2012', split='train', affine_augmenter=None, image_augmenter=None, target_size=(512, 512), net_type='unet', ignore_index=255, debug=False): self.debug = debug self.base_dir = Path(base_dir) assert net_type in ['unet', 'deeplab'] self.net_type = net_type self.ignore_index = ignore_index self.split = split ###################################### # This will change : # ###################################### # Generate randomized valid split valid_ids = [] valid_ids_dir = self.base_dir / 'ClassifiedTiles' / 'AgricultureLand' / 'val.txt' with open(valid_ids_dir, 'r') as f: temp_ids = f.readlines() random.shuffle(temp_ids) valid_ids = valid_ids + temp_ids[:69] valid_ids_dir = self.base_dir / 'ClassifiedTiles' / 'BarrenLand' / 'val.txt' with open(valid_ids_dir, 'r') as f: temp_ids = f.readlines() random.shuffle(temp_ids) valid_ids = valid_ids + temp_ids[:69] valid_ids_dir = self.base_dir / 'ClassifiedTiles' / 'Forest' / 'val.txt' with open(valid_ids_dir, 'r') as f: temp_ids = f.readlines() random.shuffle(temp_ids) valid_ids = valid_ids + temp_ids[:69] valid_ids_dir = self.base_dir / 'ClassifiedTiles' / 'RangeLand' / 'val.txt' with open(valid_ids_dir, 'r') as f: temp_ids = f.readlines() random.shuffle(temp_ids) valid_ids = valid_ids + temp_ids[:69] valid_ids_dir = self.base_dir / 'ClassifiedTiles' / 'UrbanLand' / 'val.txt' with open(valid_ids_dir, 'r') as f: temp_ids = f.readlines() random.shuffle(temp_ids) valid_ids = valid_ids + temp_ids[:69] valid_ids_dir = self.base_dir / 'ClassifiedTiles' / 'Water' / 'val.txt' with open(valid_ids_dir, 'r') as f: temp_ids = f.readlines() random.shuffle(temp_ids) valid_ids = valid_ids + temp_ids[:69] # Generate randomized train split train_ids = [] train_ids_dir = self.base_dir / 'ClassifiedTiles' / 'AgricultureLand' / 'train.txt' with open(train_ids_dir, 'r') as f: temp_ids = f.readlines() random.shuffle(temp_ids) train_ids = train_ids + temp_ids[:278] train_ids_dir = self.base_dir / 'ClassifiedTiles' / 'BarrenLand' / 'train.txt' with open(train_ids_dir, 'r') as f: temp_ids = f.readlines() random.shuffle(temp_ids) train_ids = train_ids + temp_ids[:278] train_ids_dir = self.base_dir / 'ClassifiedTiles' / 'Forest' / 'train.txt' with open(train_ids_dir, 'r') as f: temp_ids = f.readlines() random.shuffle(temp_ids) train_ids = train_ids + temp_ids[:278] train_ids_dir = self.base_dir / 'ClassifiedTiles' / 'RangeLand' / 'train.txt' with open(train_ids_dir, 'r') as f: temp_ids = f.readlines() random.shuffle(temp_ids) train_ids = train_ids + temp_ids[:278] train_ids_dir = self.base_dir / 'ClassifiedTiles' / 'UrbanLand' / 'train.txt' with open(train_ids_dir, 'r') as f: temp_ids = f.readlines() random.shuffle(temp_ids) train_ids = train_ids + temp_ids[:278] train_ids_dir = self.base_dir / 'ClassifiedTiles' / 'Water' / 'train.txt' with open(train_ids_dir, 'r') as f: temp_ids = f.readlines() random.shuffle(temp_ids) train_ids = train_ids + temp_ids[:278] lbl_dir = 'SegmentationClass' if self.split == 'valid': img_ids = valid_ids elif self.split == 'train': img_ids = train_ids else: valid_set = set([valid_id.strip() for valid_id in valid_ids]) lbl_dir = 'SegmentationClassAug' if 'aug' in split else 'SegmentationClass' all_set = set([ p.name[:-4] for p in self.base_dir.joinpath(lbl_dir).iterdir() ]) img_ids = list(all_set - valid_set) self.img_paths = [ (self.base_dir / 'JPEGImages' / f'{img_id.strip()}.jpg') for img_id in img_ids ] self.lbl_paths = [(self.base_dir / lbl_dir / f'{img_id.strip()}.png') for img_id in img_ids] # Resize if isinstance(target_size, str): target_size = eval(target_size) if 'train' in self.split: if self.net_type == 'deeplab': target_size = (target_size[0] + 1, target_size[1] + 1) self.resizer = albu.Compose([ albu.RandomScale(scale_limit=(-0.5, 0.5), p=1.0), PadIfNeededRightBottom(min_height=target_size[0], min_width=target_size[1], value=0, ignore_index=self.ignore_index, p=1.0), albu.RandomCrop(height=target_size[0], width=target_size[1], p=1.0) ]) else: # self.resizer = None self.resizer = albu.Compose([ PadIfNeededRightBottom(min_height=target_size[0], min_width=target_size[1], value=0, ignore_index=self.ignore_index, p=1.0), albu.Crop(x_min=0, x_max=target_size[1], y_min=0, y_max=target_size[0]) ]) # Augment if 'train' in self.split: self.affine_augmenter = affine_augmenter self.image_augmenter = image_augmenter else: self.affine_augmenter = None self.image_augmenter = None