def __init__(self, common_transforms, transforms1, transforms2, dataset_root=None, mode='train', edge=False): self.dataset_root = dataset_root self.common_transforms = Compose(common_transforms) self.transforms = self.common_transforms if transforms1 is not None: self.transforms1 = Compose(transforms1, to_rgb=False) if transforms2 is not None: self.transforms2 = Compose(transforms2, to_rgb=False) mode = mode.lower() self.ignore_index = 255 self.mode = mode self.num_classes = self.NUM_CLASSES self.input_width = 224 self.input_height = 224 if self.dataset_root is None: self.dataset_root = download_file_and_uncompress( url=URL, savepath=seg_env.DATA_HOME, extrapath=seg_env.DATA_HOME) elif not os.path.exists(self.dataset_root): self.dataset_root = os.path.normpath(self.dataset_root) savepath, extraname = self.dataset_root.rsplit(sep=os.path.sep, maxsplit=1) self.dataset_root = download_file_and_uncompress( url=URL, savepath=savepath, extrapath=savepath, extraname=extraname) if mode == 'train': path = os.path.join(dataset_root, 'eg1800_train.txt') else: path = os.path.join(dataset_root, 'eg1800_test.txt') with open(path, 'r') as f: files = f.readlines() img_files = [ os.path.join(dataset_root, 'Images', file).strip() for file in files ] label_files = [ os.path.join(dataset_root, 'Labels', file).strip() for file in files ] self.file_list = [[ img_path, label_path ] for img_path, label_path in zip(img_files, label_files)] pass
def __init__(self, transforms, dataset_root, mode='train', edge=False): self.dataset_root = dataset_root self.transforms = Compose(transforms) self.file_list = list() mode = mode.lower() self.mode = mode self.num_classes = self.NUM_CLASSES self.ignore_index = 255 self.edge = edge if mode not in ['train', 'val']: raise ValueError( "mode should be 'train', 'val', but got {}.".format(mode)) if self.transforms is None: raise ValueError("`transforms` is necessary, but it is None.") img_dir = os.path.join(self.dataset_root, 'images') label_dir = os.path.join(self.dataset_root, 'annotations') if self.dataset_root is None or not os.path.isdir( self.dataset_root) or not os.path.isdir( img_dir) or not os.path.isdir(label_dir): raise ValueError( "The dataset is not Found or the folder structure is nonconfoumance." ) label_files = sorted( glob.glob(os.path.join(label_dir, mode + '2017', '*.png'))) img_files = sorted( glob.glob(os.path.join(img_dir, mode + '2017', '*.jpg'))) self.file_list = [[ img_path, label_path ] for img_path, label_path in zip(img_files, label_files)]
def main(args): env_info = get_sys_env() place = 'gpu' if env_info['Paddle compiled with cuda'] and env_info[ 'GPUs used'] else 'cpu' paddle.set_device(place) if not args.cfg: raise RuntimeError('No configuration file specified.') cfg = Config(args.cfg) msg = '\n---------------Config Information---------------\n' msg += str(cfg) msg += '------------------------------------------------' logger.info(msg) model = cfg.model transforms = Compose(cfg.val_transforms) print(transforms) image_list, image_dir = get_image_list(args.image_path) logger.info('Number of predict images = {}'.format(len(image_list))) test_config = get_test_config(cfg, args) predict(model, model_path=args.model_path, transforms=transforms, image_list=image_list, image_dir=image_dir, save_dir=args.save_dir, **test_config)
def __init__(self, dataset_root=None, transforms=None, mode='train', edge=False): self.dataset_root = dataset_root self.transforms = Compose(transforms) mode = mode.lower() self.mode = mode self.file_list = list() self.num_classes = self.NUM_CLASSES self.ignore_index = 255 self.edge = edge if mode not in ['train', 'val', 'test']: raise ValueError( "`mode` should be 'train', 'val' or 'test', but got {}.".format( mode)) if self.transforms is None: raise ValueError("`transforms` is necessary, but it is None.") if self.dataset_root is None: self.dataset_root = download_file_and_uncompress( url=URL, savepath=seg_env.DATA_HOME, extrapath=seg_env.DATA_HOME) elif not os.path.exists(self.dataset_root): self.dataset_root = os.path.normpath(self.dataset_root) savepath, extraname = self.dataset_root.rsplit( sep=os.path.sep, maxsplit=1) self.dataset_root = download_file_and_uncompress( url=URL, savepath=savepath, extrapath=savepath, extraname=extraname) if mode == 'train': file_path = os.path.join(self.dataset_root, 'train_list.txt') elif mode == 'val': file_path = os.path.join(self.dataset_root, 'val_list.txt') else: file_path = os.path.join(self.dataset_root, 'test_list.txt') with open(file_path, 'r') as f: for line in f: items = line.strip().split() if len(items) != 2: if mode == 'train' or mode == 'val': raise Exception( "File list format incorrect! It should be" " image_name label_name\\n") image_path = os.path.join(self.dataset_root, items[0]) grt_path = None else: image_path = os.path.join(self.dataset_root, items[0]) grt_path = os.path.join(self.dataset_root, items[1]) self.file_list.append([image_path, grt_path])
def __init__(self, transforms, dataset_root=None, mode='train', edge=False): self.dataset_root = dataset_root self.transforms = Compose(transforms) mode = mode.lower() self.mode = mode self.file_list = list() self.num_classes = 150 self.ignore_index = 255 self.edge = edge if mode not in ['train', 'val']: raise ValueError( "`mode` should be one of ('train', 'val') in ADE20K dataset, but got {}." .format(mode)) if self.transforms is None: raise ValueError("`transforms` is necessary, but it is None.") if self.dataset_root is None: self.dataset_root = download_file_and_uncompress( url=URL, savepath=seg_env.DATA_HOME, extrapath=seg_env.DATA_HOME, extraname='ADEChallengeData2016') elif not os.path.exists(self.dataset_root): self.dataset_root = os.path.normpath(self.dataset_root) savepath, extraname = self.dataset_root.rsplit(sep=os.path.sep, maxsplit=1) self.dataset_root = download_file_and_uncompress( url=URL, savepath=savepath, extrapath=savepath, extraname=extraname) if mode == 'train': img_dir = os.path.join(self.dataset_root, 'images/training') label_dir = os.path.join(self.dataset_root, 'annotations/training') elif mode == 'val': img_dir = os.path.join(self.dataset_root, 'images/validation') label_dir = os.path.join(self.dataset_root, 'annotations/validation') img_files = os.listdir(img_dir) label_files = [i.replace('.jpg', '.png') for i in img_files] for i in range(len(img_files)): img_path = os.path.join(img_dir, img_files[i]) label_path = os.path.join(label_dir, label_files[i]) self.file_list.append([img_path, label_path])
def __init__(self, transforms=None, dataset_root=None, mode='train', edge=False): self.dataset_root = dataset_root self.transforms = Compose(transforms) mode = mode.lower() self.mode = mode self.file_list = list() self.num_classes = self.NUM_CLASSES self.ignore_index = 255 self.edge = edge if mode not in ['train', 'trainval', 'val']: raise ValueError( "`mode` should be one of ('train', 'trainval', 'val') in PascalContext dataset, but got {}." .format(mode)) if self.transforms is None: raise ValueError("`transforms` is necessary, but it is None.") if self.dataset_root is None: raise ValueError( "The dataset is not Found or the folder structure is nonconfoumance." ) image_set_dir = os.path.join(self.dataset_root, 'ImageSets', 'Segmentation') if mode == 'train': file_path = os.path.join(image_set_dir, 'train_context.txt') elif mode == 'val': file_path = os.path.join(image_set_dir, 'val_context.txt') elif mode == 'trainval': file_path = os.path.join(image_set_dir, 'trainval_context.txt') if not os.path.exists(file_path): raise RuntimeError( "PASCAL-Context annotations are not ready, " "Please make sure voc_context.py has been properly run.") img_dir = os.path.join(self.dataset_root, 'JPEGImages') label_dir = os.path.join(self.dataset_root, 'Context') with open(file_path, 'r') as f: for line in f: line = line.strip() image_path = os.path.join(img_dir, ''.join([line, '.jpg'])) label_path = os.path.join(label_dir, ''.join([line, '.png'])) self.file_list.append([image_path, label_path])
def __init__(self, dataset_root=None, transforms=None, mode='train', edge=False): self.dataset_root = dataset_root self.transforms = Compose(transforms) mode = mode.lower() self.mode = mode self.file_list = list() self.num_classes = self.NUM_CLASSES self.ignore_index = 255 self.edge = edge if mode not in ['train', 'val', 'test']: raise ValueError( "`mode` should be 'train', 'val' or 'test', but got {}.".format( mode)) if self.transforms is None: raise ValueError("`transforms` is necessary, but it is None.") if mode == 'train': file_path = os.path.join(self.dataset_root, 'train.txt') elif mode == 'val': file_path = os.path.join(self.dataset_root, 'val.txt') else: file_path = os.path.join(self.dataset_root, 'test.txt') with open(file_path, 'r') as f: for line in f: items = line.strip().split(' ') if len(items) != 2: if mode == 'train' or mode == 'val': raise Exception( "File list format incorrect! It should be" " image_name label_name\\n") image_path = os.path.join(self.dataset_root, items[0]) grt_path = None else: image_path = os.path.join(self.dataset_root, items[0]) grt_path = os.path.join(self.dataset_root, items[1]) self.file_list.append([image_path, grt_path])
def __init__(self, transforms, dataset_root, mode='train', edge=False): self.dataset_root = dataset_root self.transforms = Compose(transforms) self.file_list = list() mode = mode.lower() self.mode = mode self.num_classes = 19 self.ignore_index = 255 self.edge = edge if mode not in ['train', 'val', 'test']: raise ValueError( "mode should be 'train', 'val' or 'test', but got {}.".format( mode)) if self.transforms is None: raise ValueError("`transforms` is necessary, but it is None.") img_dir = os.path.join(self.dataset_root, 'leftImg8bit') label_dir = os.path.join(self.dataset_root, 'gtFine') if self.dataset_root is None or not os.path.isdir( self.dataset_root) or not os.path.isdir( img_dir) or not os.path.isdir(label_dir): raise ValueError( "The dataset is not Found or the folder structure is nonconfoumance." ) label_files = sorted( glob.glob( os.path.join(label_dir, mode, '*', '*_gtFine_labelTrainIds.png'))) img_files = sorted( glob.glob(os.path.join(img_dir, mode, '*', '*_leftImg8bit.png'))) self.file_list = [[ img_path, label_path ] for img_path, label_path in zip(img_files, label_files)]
def __init__(self, transforms, dataset_root, mode='train', coarse_multiple=1, add_val=False): self.dataset_root = dataset_root self.transforms = Compose(transforms) self.file_list = list() mode = mode.lower() self.mode = mode self.num_classes = 26 self.ignore_index = 255 self.coarse_multiple = coarse_multiple if mode not in ['train', 'val', 'test']: raise ValueError( "mode should be 'train', 'val' or 'test', but got {}.".format( mode)) if self.transforms is None: raise ValueError("`transforms` is necessary, but it is None.") img_dir = os.path.join(self.dataset_root, 'leftImg8bit') label_dir = os.path.join(self.dataset_root, 'gtFine') if self.dataset_root is None or not os.path.isdir( self.dataset_root) or not os.path.isdir( img_dir) or not os.path.isdir(label_dir): raise ValueError( "The dataset is not Found or the folder structure is nonconfoumance." ) label_files = sorted( glob.glob( os.path.join(label_dir, mode, '*', '*_gtFine_labellevel3Ids.png'))) img_files = sorted( glob.glob(os.path.join(img_dir, mode, '*', '*_leftImg8bit.*'))) self.file_list = [[ img_path, label_path ] for img_path, label_path in zip(img_files, label_files)] # for ii in range(len(self.file_list)): # print(self.file_list[ii]) # print(len(self.file_list)) self.num_files = len(self.file_list) self.total_num_files = self.num_files if mode == 'train': # whether to add val set in training if add_val: label_files = sorted( glob.glob( os.path.join(label_dir, 'val', '*', '*_gtFine_labellevel3Ids.png'))) img_files = sorted( glob.glob( os.path.join(img_dir, 'val', '*', '*_leftImg8bit.*'))) val_file_list = [[ img_path, label_path ] for img_path, label_path in zip(img_files, label_files)] self.file_list.extend(val_file_list) for ii in range(len(self.file_list)): print(self.file_list[ii]) print(len(self.file_list)) self.num_files = len(self.file_list) self.total_num_files = self.num_files # use coarse dataset only in training img_dir = os.path.join('data/IDD_Detection/JPEGImages') label_dir = os.path.join('data/IDD_Detection/pred_refine') if self.dataset_root is None or not os.path.isdir( self.dataset_root) or not os.path.isdir( img_dir) or not os.path.isdir(label_dir): raise ValueError( "The coarse dataset is not Found or the folder structure is nonconfoumance." ) coarse_label_files = sorted( glob.glob(os.path.join(label_dir, '*', '*'))) coarse_img_files = sorted( glob.glob(os.path.join(img_dir, '*', '*'))) if len(coarse_img_files) != len(coarse_label_files): raise ValueError( "The number of images = {} is not equal to the number of labels = {} in Cityscapes Autolabeling dataset." .format(len(coarse_img_files), len(coarse_label_files))) self.coarse_file_list = [[img_path, label_path] for img_path, label_path in zip( coarse_img_files, coarse_label_files)] random.shuffle(self.coarse_file_list) self.file_list = self.coarse_file_list self.num_files = len(self.file_list) self.total_num_files = self.num_files print(self.num_files)
def __init__(self, transforms, dataset_root=None, mode='train', edge=False): self.dataset_root = dataset_root self.transforms = Compose(transforms) mode = mode.lower() self.mode = mode self.file_list = list() self.num_classes = self.NUM_CLASSES self.ignore_index = 255 self.edge = edge if mode not in ['train', 'trainval', 'trainaug', 'val']: raise ValueError( "`mode` should be one of ('train', 'trainval', 'trainaug', 'val') in PascalVOC dataset, but got {}." .format(mode)) if self.transforms is None: raise ValueError("`transforms` is necessary, but it is None.") if self.dataset_root is None: self.dataset_root = download_file_and_uncompress( url=URL, savepath=seg_env.DATA_HOME, extrapath=seg_env.DATA_HOME, extraname='VOCdevkit') elif not os.path.exists(self.dataset_root): self.dataset_root = os.path.normpath(self.dataset_root) savepath, extraname = self.dataset_root.rsplit( sep=os.path.sep, maxsplit=1) self.dataset_root = download_file_and_uncompress( url=URL, savepath=savepath, extrapath=savepath, extraname=extraname) image_set_dir = os.path.join(self.dataset_root, 'VOC2012', 'ImageSets', 'Segmentation') if mode == 'train': file_path = os.path.join(image_set_dir, 'train.txt') elif mode == 'val': file_path = os.path.join(image_set_dir, 'val.txt') elif mode == 'trainval': file_path = os.path.join(image_set_dir, 'trainval.txt') elif mode == 'trainaug': file_path = os.path.join(image_set_dir, 'train.txt') file_path_aug = os.path.join(image_set_dir, 'aug.txt') if not os.path.exists(file_path_aug): raise RuntimeError( "When `mode` is 'trainaug', Pascal Voc dataset should be augmented, " "Please make sure voc_augment.py has been properly run when using this mode." ) img_dir = os.path.join(self.dataset_root, 'VOC2012', 'JPEGImages') label_dir = os.path.join(self.dataset_root, 'VOC2012', 'SegmentationClass') label_dir_aug = os.path.join(self.dataset_root, 'VOC2012', 'SegmentationClassAug') with open(file_path, 'r') as f: for line in f: line = line.strip() image_path = os.path.join(img_dir, ''.join([line, '.jpg'])) label_path = os.path.join(label_dir, ''.join([line, '.png'])) self.file_list.append([image_path, label_path]) if mode == 'trainaug': with open(file_path_aug, 'r') as f: for line in f: line = line.strip() image_path = os.path.join(img_dir, ''.join([line, '.jpg'])) label_path = os.path.join(label_dir_aug, ''.join([line, '.png'])) self.file_list.append([image_path, label_path])
def __init__(self, transforms, dataset_root, num_classes, mode='train', train_path=None, val_path=None, test_path=None, separator=' ', ignore_index=255, edge=False): self.dataset_root = dataset_root self.transforms = Compose(transforms) self.file_list = list() self.mode = mode.lower() self.num_classes = num_classes self.ignore_index = ignore_index self.edge = edge if self.mode not in ['train', 'val', 'test']: raise ValueError( "mode should be 'train', 'val' or 'test', but got {}.".format( self.mode)) if self.transforms is None: raise ValueError("`transforms` is necessary, but it is None.") if not os.path.exists(self.dataset_root): raise FileNotFoundError('there is not `dataset_root`: {}.'.format( self.dataset_root)) if self.mode == 'train': if train_path is None: raise ValueError( 'When `mode` is "train", `train_path` is necessary, but it is None.' ) elif not os.path.exists(train_path): raise FileNotFoundError( '`train_path` is not found: {}'.format(train_path)) else: file_path = train_path elif self.mode == 'val': if val_path is None: raise ValueError( 'When `mode` is "val", `val_path` is necessary, but it is None.' ) elif not os.path.exists(val_path): raise FileNotFoundError( '`val_path` is not found: {}'.format(val_path)) else: file_path = val_path else: if test_path is None: raise ValueError( 'When `mode` is "test", `test_path` is necessary, but it is None.' ) elif not os.path.exists(test_path): raise FileNotFoundError( '`test_path` is not found: {}'.format(test_path)) else: file_path = test_path with open(file_path, 'r') as f: for line in f: items = line.strip().split(separator) if len(items) != 2: if self.mode == 'train' or self.mode == 'val': raise ValueError( "File list format incorrect! In training or evaluation task it should be" " image_name{}label_name\\n".format(separator)) image_path = os.path.join(self.dataset_root, items[0]) label_path = None else: image_path = os.path.join(self.dataset_root, items[0]) label_path = os.path.join(self.dataset_root, items[1]) self.file_list.append([image_path, label_path])
def __init__(self, transforms, dataset_root, mode='train', ignore_stuff_in_offset=False, small_instance_area=0, small_instance_weight=1, stuff_area=2048): self.dataset_root = dataset_root self.transforms = Compose(transforms) self.file_list = list() self.ins_list = [] mode = mode.lower() self.mode = mode self.num_classes = 19 self.ignore_index = 255 self.thing_list = [11, 12, 13, 14, 15, 16, 17, 18] self.label_divisor = 1000 self.stuff_area = stuff_area if mode not in ['train', 'val']: raise ValueError( "mode should be 'train' or 'val' , but got {}.".format(mode)) if self.transforms is None: raise ValueError("`transforms` is necessary, but it is None.") img_dir = os.path.join(self.dataset_root, 'leftImg8bit') label_dir = os.path.join(self.dataset_root, 'gtFine') if self.dataset_root is None or not os.path.isdir( self.dataset_root) or not os.path.isdir( img_dir) or not os.path.isdir(label_dir): raise ValueError( "The dataset is not Found or the folder structure is nonconfoumance." ) json_filename = os.path.join( self.dataset_root, 'gtFine', 'cityscapes_panoptic_{}_trainId.json'.format(mode)) dataset = json.load(open(json_filename)) img_files = [] label_files = [] for img in dataset['images']: img_file_name = img['file_name'] img_files.append( os.path.join(self.dataset_root, 'leftImg8bit', mode, img_file_name.split('_')[0], img_file_name.replace('_gtFine', ''))) for ann in dataset['annotations']: ann_file_name = ann['file_name'] label_files.append( os.path.join(self.dataset_root, 'gtFine', 'cityscapes_panoptic_{}_trainId'.format(mode), ann_file_name)) self.ins_list.append(ann['segments_info']) self.file_list = [[ img_path, label_path ] for img_path, label_path in zip(img_files, label_files)] self.target_transform = PanopticTargetGenerator( self.ignore_index, self.rgb2id, self.thing_list, sigma=8, ignore_stuff_in_offset=ignore_stuff_in_offset, small_instance_area=small_instance_area, small_instance_weight=small_instance_weight) self.raw_semantic_generator = SemanticTargetGenerator( ignore_index=self.ignore_index, rgb2id=self.rgb2id) self.raw_instance_generator = InstanceTargetGenerator(self.rgb2id) self.raw_panoptic_generator = RawPanopticTargetGenerator( ignore_index=self.ignore_index, rgb2id=self.rgb2id, label_divisor=self.label_divisor)