예제 #1
0
    def __init__(
        self,
        root,
        split="training",
        is_transform=False,
        img_size=512,
        augmentations=None,
        img_norm=True,
    ):
        self.root = root
        self.split = split
        self.is_transform = is_transform
        self.augmentations = augmentations
        self.img_norm = img_norm
        self.n_classes = 100
        self.img_size = (img_size if isinstance(img_size, tuple) else
                         (img_size, img_size))
        self.mean = np.array([104.00699, 116.66877, 122.67892])
        self.files = collections.defaultdict(list)

        for split in ["training", "Masks"]:
            print(os.path.exists(self.root + split))
            if (split == "training"):
                file_list = recursive_glob(rootdir=self.root + split,
                                           suffix=".jpg")
                self.files[split] = file_list
            else:
                file_list = recursive_glob(rootdir=self.root + split,
                                           suffix=".png")
                self.files[split] = file_list
예제 #2
0
    def __init__(
        self,
        root,
        split="training",
        is_transform=False,
        img_size=(480, 640),
        augmentations=None,
        img_norm=True,
    ):
        self.root = root
        self.is_transform = is_transform
        self.n_classes = 38
        self.augmentations = augmentations
        self.img_norm = img_norm
        self.img_size = img_size if isinstance(img_size, tuple) else (img_size, img_size)
        self.mean = np.array([104.00699, 116.66877, 122.67892])
        self.files = collections.defaultdict(list)
        self.anno_files = collections.defaultdict(list)
        self.cmap = self.color_map(normalized=False)

        split_map = {"training": "train", "val": "test"}
        self.split = split_map[split]

        for split in ["train", "test"]:
            file_list = sorted(recursive_glob(rootdir=self.root + split + "/", suffix="jpg"))
            self.files[split] = file_list

        for split in ["train", "test"]:
            file_list = sorted(
                recursive_glob(rootdir=self.root + "annotations/" + split + "/", suffix="png")
            )
            self.anno_files[split] = file_list
예제 #3
0
    def __init__(self,
                 root,
                 split="training",
                 is_transform=False,
                 img_size=512,
                 augmentations=None,
                 img_norm=True,
                 test_mode=False,
                 n_classes=151,
                 fold=None):
        self.root = root
        self.split = split
        self.is_transform = is_transform
        self.augmentations = augmentations
        self.img_norm = img_norm
        self.test_mode = test_mode
        self.n_classes = n_classes
        self.fold = fold

        ignore_dict = {0: [91, 128, 127, 77, 99], 1: [], 2: [], 3: []}
        self.ignore_classes = ignore_dict[self.fold]

        self.img_size = img_size if isinstance(img_size, list) else (img_size,
                                                                     img_size)
        self.mean = np.array([104.00699, 116.66877, 122.67892])
        self.files = collections.defaultdict(list)
        self.class_names = self.parse_classes(self.root + 'classes.txt')

        if not self.test_mode:
            for split in ["training", "validation"]:
                file_list = recursive_glob(rootdir=self.root + "images/" +
                                           self.split + "/",
                                           suffix=".jpg")
                self.files[split] = file_list
예제 #4
0
    def __init__(self,
                 root,
                 split="training",
                 img_size=(640, 1280),
                 is_transform=True,
                 augmentations=None):
        self.root = root
        self.split = split
        self.is_transform = is_transform
        self.augmentations = augmentations
        self.n_classes = 65

        self.img_size = img_size if isinstance(img_size, tuple) else (img_size,
                                                                      img_size)
        self.mean = np.array([80.5423, 91.3162, 81.4312])
        self.files = {}

        self.images_base = os.path.join(self.root, self.split, "images")
        self.annotations_base = os.path.join(self.root, self.split, "labels")

        self.files[split] = recursive_glob(rootdir=self.images_base,
                                           suffix=".jpg")

        self.class_ids, self.class_names, self.class_colors = self.parse_config(
        )

        self.ignore_id = 250

        if not self.files[split]:
            raise Exception("No files for split=[%s] found in %s" %
                            (split, self.images_base))

        print("Found %d %s images" % (len(self.files[split]), split))
예제 #5
0
    def __init__(self,
                 root,
                 split="training",
                 is_transform=False,
                 img_size=(480, 640),
                 augmentations=None):
        self.root = root
        self.is_transform = is_transform
        self.n_classes = 14
        self.augmentations = augmentations
        self.img_size = img_size if isinstance(img_size, tuple) else (img_size,
                                                                      img_size)
        self.mean = np.array([104.00699, 116.66877, 122.67892])
        self.files = collections.defaultdict(list)
        self.cmap = self.color_map(normalized=False)

        split_map = {
            "training": 'train',
            "val": 'test',
        }
        self.split = split_map[split]

        for split in ["train", "test"]:
            file_list = recursive_glob(rootdir=self.root + split + '/',
                                       suffix='png')
            self.files[split] = file_list
예제 #6
0
    def __init__(
        self,
        root,
        split="training",
        is_transform=False,
        img_size=512,
        augmentations=None,
        img_norm=True,
    ):
        self.root = root
        self.is_transform = is_transform
        self.augmentations = augmentations
        self.img_norm = img_norm
        self.n_classes = 150
        self.img_size = (img_size if isinstance(img_size, tuple) else
                         (img_size, img_size))
        self.mean = np.array([104.00699, 116.66877, 122.67892])
        self.files = collections.defaultdict(list)

        split_map = {"training": "training", "val": "validation"}
        self.split = split_map[split]

        for split in ["training", "validation"]:
            file_list = recursive_glob(rootdir=self.root + "images/" + split +
                                       "/",
                                       suffix=".jpg")
            self.files[split] = file_list
    def __init__(self, root, split="training", is_transform=False, img_size=512):
        """__init__

        :param root:
        :param split:
        :param is_transform:
        :param img_size:
        """
        self.root = root
        self.split = split
        self.is_transform = is_transform
        self.n_classes = 151  # 0 is reserved for "other"
        self.img_size = img_size if isinstance(img_size, tuple) else (img_size, img_size)
        self.mean = np.array([104.00699, 116.66877, 122.67892])
        self.files = {}

        self.images_base = os.path.join(self.root, 'images', self.split)
        self.annotations_base = os.path.join(self.root, 'annotations', self.split)

        self.files[split] = recursive_glob(rootdir=self.images_base, suffix='.jpg')

        if not self.files[split]:
            raise Exception("No files for split=[%s] found in %s" % (split, self.images_base))

        print("Found %d %s images" % (len(self.files[split]), split))
예제 #8
0
    def __init__(self, root, split="test"):
        """__init__
        :param root:
        :param split:
        :param setting:
        """

        self.root = root
        self.split = split
        self.n_classes = 3
        self.files = {}

        self.images_base = os.path.join(self.root, self.split)

        # Generate list of all tif files and save in dictionary
        self.files[split] = recursive_glob(rootdir=self.images_base,
                                           suffix="_Img.tif")

        self.ignore_index = 250
        self.valid_classes = {0: self.ignore_index, 1: 0, 2: 1, 3: 2}
        self.class_names = {
            self.ignore_index: "Ignore",
            0: "Background",
            1: "Contour",
            2: "Nuclei"
        }

        if not self.files[split]:
            raise Exception("No files for split=[%s] found in %s" %
                            (split, self.images_base))

        print("Found %d %s images" % (len(self.files[split]), split))

        ## Augmentations ###
        self.image_transform = self.get_transform(split == "train")
    def __init__(self,
                 root,
                 split="training",
                 is_transform=False,
                 img_size=512):
        """__init__

        :param root:
        :param split:
        :param is_transform:
        :param img_size:
        """
        self.root = root
        self.split = split
        self.is_transform = is_transform
        self.n_classes = 151  # 0 is reserved for "other"
        self.img_size = img_size if isinstance(img_size, tuple) else (img_size,
                                                                      img_size)
        self.mean = np.array([104.00699, 116.66877, 122.67892])
        self.files = {}

        self.images_base = os.path.join(self.root, 'images', self.split)
        self.annotations_base = os.path.join(self.root, 'annotations',
                                             self.split)

        self.files[split] = recursive_glob(rootdir=self.images_base,
                                           suffix='.jpg')

        if not self.files[split]:
            raise Exception("No files for split=[%s] found in %s" %
                            (split, self.images_base))

        print("Found %d %s images" % (len(self.files[split]), split))
예제 #10
0
    def __init__(self,
                 root,
                 split="train",
                 is_transform=False,
                 img_size=(1280, 384),
                 augmentations=None,
                 version='pascal',
                 phase='train'):
        """__init__

        :param root:
        :param split:
        :param is_transform: (not used)
        :param img_size: (not used)
        :param augmentations  (not used)
        """
        self.root = root
        self.split = split
        self.is_transform = is_transform
        self.augmentations = augmentations
        self.n_classes = 2
        self.img_size = img_size
        self.mean = np.array(self.mean_rgb)
        self.files = {}

        if phase == 'train':
            self.images_base = os.path.join(self.root, 'training', 'image_2')
            self.lidar_base = os.path.join(self.root, 'training', 'ADI')
            self.annotations_base = os.path.join(self.root, 'training',
                                                 'gt_image_2')
            self.im_files = recursive_glob(rootdir=self.images_base,
                                           suffix='.png')
        else:
            self.images_base = os.path.join(self.root, 'testing', 'image_2')
            self.lidar_base = os.path.join(self.root, 'testing', 'ADI')
            self.annotations_base = os.path.join(self.root, 'testing',
                                                 'gt_image_2')
            self.split = 'test'

            self.im_files = recursive_glob(rootdir=self.images_base,
                                           suffix='.png')
            self.im_files = sorted(self.im_files)

        self.data_size = len(self.im_files)
        self.phase = phase

        print("Found %d %s images" % (self.data_size, self.split))
예제 #11
0
    def __init__(self,
                 root,
                 split="train",
                 is_transform=False,
                 img_size=None,
                 augmentations=None,
                 img_norm=True):
        """__init__
        :param root:
        :param split:
        :param is_transform:
        :param img_size:
        :param augmentations 
       """

        self.root = root
        self.split = split
        self.img_size = (
            374, 1238
        )  # img_size if isinstance(img_size, tuple) else (img_size, img_size)
        self.is_transform = is_transform
        self.augmentations = augmentations
        self.img_norm = img_norm
        self.mean = np.array([100.00517842, 106.47954702, 103.08725176])
        self.n_classes = 19
        self.files = {}

        self.images_base = os.path.join(self.root, 'data_semantics', 'custom',
                                        self.split, 'image_2')
        self.annotations_base = os.path.join(self.root, 'data_semantics',
                                             'custom', self.split, 'semantic')

        self.files[split] = recursive_glob(rootdir=self.images_base,
                                           suffix='.png')

        self.void_classes = [
            0, 1, 2, 3, 4, 5, 6, 9, 10, 14, 15, 16, 18, 29, 30, -1
        ]
        self.valid_classes = [
            7, 8, 11, 12, 13, 17, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 31,
            32, 33
        ]
        self.class_names = ['unlabelled', 'road', 'sidewalk', 'building', 'wall', 'fence',\
                             'pole', 'traffic_light', 'traffic_sign', 'vegetation', 'terrain',\
                             'sky', 'person', 'rider', 'car', 'truck', 'bus', 'train', \
                             'motorcycle', 'bicycle']

        self.ignore_index = 250
        self.class_map = dict(zip(self.valid_classes, range(19)))

        if not self.files[split]:
            raise Exception("No files for split=[%s] found in %s" %
                            (split, self.images_base))

        print("Found %d %s images" % (len(self.files[split]), split))

        print("Image Size: {}".format(self.img_size))
예제 #12
0
    def __init__(self,
                 root,
                 split="train",
                 is_transform=False,
                 img_size=(512, 1024),
                 augmentations=None,
                 img_norm=True,
                 version='pascal'):
        """__init__

        :param root:
        :param split:
        :param is_transform:
        :param img_size:
        :param augmentations 
        """
        self.root = root
        self.split = split
        self.is_transform = is_transform
        self.augmentations = augmentations
        self.img_norm = img_norm
        self.n_classes = 20
        self.img_size = img_size if isinstance(img_size, tuple) else (img_size,
                                                                      img_size)
        self.mean = np.array(self.mean_rgb[version])
        self.files = {}

        self.images_base = os.path.join(self.root, 'leftImg8bit', self.split)
        self.annotations_base = os.path.join(self.root, 'gtFine_trainvaltest',
                                             'gtFine', self.split)

        self.files[split] = recursive_glob(rootdir=self.images_base,
                                           suffix='.png')

        ## Revert Back to Original if there is some problem

        self.void_classes = [
            0, 1, 2, 3, 4, 5, 9, 10, 14, 15, 16, 18, 29, 30, -1
        ]
        self.valid_classes = [
            7, 8, 11, 12, 13, 17, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 31,
            32, 33, 34
        ]
        self.class_names = ['unlabelled', 'road', 'sidewalk', 'building', 'wall', 'fence',\
                            'pole', 'traffic_light', 'traffic_sign', 'vegetation', 'terrain',\
                            'sky', 'person', 'rider', 'car', 'truck', 'bus', 'train', \
                            'motorcycle', 'bicycle', 'roadLines']

        self.ignore_index = 250
        self.class_map = dict(zip(self.valid_classes, range(20)))

        if not self.files[split]:
            raise Exception("No files for split=[%s] found in %s" %
                            (split, self.images_base))

        print("Found %d %s images" % (len(self.files[split]), split))
예제 #13
0
    def __init__(self,
                 root,
                 split="train",
                 is_transform=False,
                 use_multi_scale=False,
                 img_size=(600, 800),
                 augmentations=None,
                 img_norm=True,
                 version='pascal',
                 phase='train',
                 fpn=False,
                 norm=False):
        """__init__

        :param root:
        :param split:
        :param is_transform:
        :param img_size:
        :param augmentations 
        """
        self.root = root
        self.split = split
        self.is_transform = is_transform
        self.augmentations = augmentations
        self.img_norm = img_norm
        self.n_classes = 15  #9 #15
        self.img_size = img_size if isinstance(img_size, tuple) else (img_size,
                                                                      img_size)
        self.mean = np.array(self.mean_rgb)
        self.files = {}

        #if phase == 'train':
        self.images_base = os.path.join(self.root, 'images')
        self.annotations_base = os.path.join(self.root, 'labelsRelabelling')
        self.im_files = recursive_glob(rootdir=self.images_base, suffix='.jpg')

        self.data_size = len(self.im_files)
        self.phase = phase
        self.fpn = fpn
        self.norm = norm
        #import pdb.set_trace()

        #self.void_classes = [0, 1, 2, 3, 4, 5, 6, 9, 10, 14, 15, 16, 18, 29, 30, -1]
        #self.valid_classes = [7, 8, 11, 12, 13, 17, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 31, 32, 33]
        #self.class_names = ['unlabelled', 'road', 'sidewalk', 'building', 'wall', 'fence',\
        #                    'pole', 'traffic_light', 'traffic_sign', 'vegetation', 'terrain',\
        #                    'sky', 'person', 'rider', 'car', 'truck', 'bus', 'train', \
        #                    'motorcycle', 'bicycle']

        #self.ignore_index = 250
        #self.class_map = dict(zip(self.valid_classes, range(19)))

        #if not self.files[split]:
        #    raise Exception("No files for split=[%s] found in %s" % (split, self.images_base))

        print("Found %d %s images" % (self.data_size, split))
예제 #14
0
    def __init__(
        self,
        root,
        split="training",
        is_transform=False,
        img_size=(480, 640),
        augmentations=None,
        img_norm=False,
        test_mode=False,
    ):
        self.root = root
        self.is_transform = is_transform
        self.n_classes = 38
        self.augmentations = augmentations
        self.img_norm = img_norm
        self.test_mode = test_mode
        self.img_size = img_size if isinstance(img_size, tuple) else (img_size,
                                                                      img_size)
        self.mean = np.array([104.00699, 116.66877, 122.67892])
        self.files = collections.defaultdict(list)
        self.anno_files = collections.defaultdict(list)
        self.cmap = self.color_map(normalized=False)

        split_map = {"training": "train", "val": "test"}
        self.split = split_map[split]

        if not self.test_mode:
            self.images_base = os.path.join(self.root, self.split)
            self.annotations_base = os.path.join(self.root, "labels",
                                                 self.split)
            print(self.images_base)
            print(self.annotations_base)

            # for split in ["train", "test"]:
            file_list = sorted(
                recursive_glob(rootdir=self.images_base, suffix="jpg"))
            self.files[self.split] = file_list
            # print(self.files[self.split])

            # for split in ["train", "test"]:
            file_list = sorted(
                recursive_glob(rootdir=self.annotations_base, suffix="png"))
            self.anno_files[self.split] = file_list
    def __init__(
        self,
        data_root="",
        presentation_root="",
        is_transform=False,
        img_size=512,
        augmentations=None,
        aug_k=4,
        img_norm=True,
        test_mode=False,
    ):
        self.data_root = data_root
        self.presentation_root = presentation_root
        self.is_transform = is_transform
        self.augmentations = augmentations
        self.aug_k = aug_k
        self.img_norm = img_norm
        self.test_mode = test_mode
        self.n_classes = 6
        self.img_size = img_size if isinstance(img_size, tuple) else (img_size, img_size)
        self.mean = np.array([104.00699, 116.66877, 122.67892])

        if not self.test_mode:
            image_list = recursive_glob(rootdir=self.data_root + "images/training/", suffix=".jpg")
            annotation_list = recursive_glob(rootdir=self.data_root + "annotations/training/", suffix='.png')
            presentation_list = read_file(rootdir=self.presentation_root, filename='train_presentations.txt', split=',')
            classes_list = read_file(rootdir=self.presentation_root, filename="train_class_list.txt", split=',')
        else:
            image_list = recursive_glob(rootdir=self.data_root + "images/validation/", suffix='.jpg')
            annotation_list = recursive_glob(rootdir=self.data_root + "annotations/validation/", suffix='.png')
            presentation_list = read_file(rootdir=self.presentation_root, filename='val_presentations.txt', split=',')
            classes_list = read_file(rootdir=self.presentation_root, filename='val_class_list.txt', split=',')

        self.images = image_list
        self.annotations = annotation_list
        self.presentations = presentation_list
        self.pre_classes = classes_list

        self.presentation = []
        self.classes = []
        self.image_data = []
        self.label_data = []
예제 #16
0
    def __init__(self, root, split="training", is_transform=False, img_size=512):
        self.root = root
        self.split = split
        self.is_transform = is_transform
        self.n_classes = 150
        self.img_size = img_size if isinstance(img_size, tuple) else (img_size, img_size)
        self.mean = np.array([104.00699, 116.66877, 122.67892])
        self.files = collections.defaultdict(list)

        for split in ["training", "validation",]:
            file_list = recursive_glob(rootdir=self.root + 'images/' + self.split + '/', suffix='.jpg')
            self.files[split] = file_list
예제 #17
0
    def __init__(self, root, split="training", is_transform=False, img_size=(480, 640), augmentations=None):
        self.root = root
        self.is_transform = is_transform
        self.n_classes = 38
        self.augmentations = augmentations
        self.img_size = img_size if isinstance(img_size, tuple) else (img_size, img_size)
        self.mean = np.array([104.00699, 116.66877, 122.67892])
        self.files = collections.defaultdict(list)
        self.anno_files = collections.defaultdict(list)
        self.cmap = self.color_map(normalized=False)

        split_map = {"training": 'train',
                     "val": 'test',}
        self.split = split_map[split]

        for split in ["train", "test"]:
            file_list =  sorted(recursive_glob(rootdir=self.root + split + '/', suffix='jpg'))
            self.files[split] = file_list

        for split in ["train", "test"]:
            file_list =  sorted(recursive_glob(rootdir=self.root + 'annotations/' + split + '/', suffix='png'))
            self.anno_files[split] = file_list
예제 #18
0
    def __init__(self,
                 root,
                 split="train",
                 is_transform=False,
                 img_size=(480, 640),
                 augmentations=None,
                 img_norm=True):
        """__init__
        :param root:
        :param split:
        :param is_transform:
        :param img_size:
        :param augmentations
        """

        self.root = "/home/felix/projects/larynx/data/"
        self.split = split
        self.is_transform = is_transform
        self.augmentations = augmentations
        self.img_norm = img_norm
        self.n_classes = 3
        self.img_size = img_size if isinstance(img_size,
                                               (tuple, list)) else (img_size,
                                                                    img_size)
        self.mean = np.array([103.939, 116.779, 123.68])
        self.ignore_index = 250
        self.files = {}

        self.void_classes = []
        self.valid_classes = [0, 1, 2]
        self.class_map = dict(zip(self.valid_classes, range(self.n_classes)))

        self.colors = [
            [0, 0, 0],
            [255, 0, 0],
            [0, 0, 255],
        ]
        self.label_colours = dict(zip(range(self.n_classes), self.colors))
        self.class_names = ["background", "granuloma", "ulcerations"]

        self.images_base = os.path.join(self.root, self.split, "images")
        self.annotations_base = os.path.join(self.root, self.split,
                                             "annotations")
        self.files[split] = recursive_glob(rootdir=self.images_base,
                                           suffix=".png")

        if not self.files[split]:
            raise Exception("No files for split=[%s] found in %s" %
                            (split, self.images_base))

        print("Found %d %s images" % (len(self.files[split]), split))
예제 #19
0
    def __init__(
        self,
        root,
        split="train",
        is_transform=False,
        img_size=(316, 706),
        augmentations=None,
        img_norm=True,
        version="tempest",
        test_mode=False,
    ):
        """__init__

        :param root:
        :param split:
        :param is_transform:
        :param img_size:
        :param augmentations
        """
        self.root = root
        self.split = split
        self.is_transform = is_transform
        self.augmentations = augmentations
        self.img_norm = img_norm
        self.n_classes = 2
        self.img_size = img_size if isinstance(img_size, tuple) else (img_size,
                                                                      img_size)
        self.mean = np.array(self.mean_rgb[version])
        self.files = {}

        self.images_base = os.path.join(self.root, self.split, "sdr")
        self.annotations_base = os.path.join(self.root, self.split, "labels")

        self.files[split] = recursive_glob(rootdir=self.images_base,
                                           suffix=".png")

        self.void_classes = [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, -1]
        self.valid_classes = [0, 1]

        #self.void_classes = [ 255]
        #self.valid_classes = [i for i in range(19)]
        self.class_names = ["text", "background"]

        self.ignore_index = 250
        self.class_map = dict(zip(self.valid_classes, range(self.n_classes)))

        if not self.files[split]:
            raise Exception("No files for split=[%s] found in %s" %
                            (split, self.images_base))

        print("Found %d %s images" % (len(self.files[split]), split))
예제 #20
0
    def __init__(self, root, split="train", is_transform=False, img_size=(1024, 2048)):
        """__init__

        :param root:
        :param split:
        :param is_transform:
        :param img_size:
        """
        self.root = root
        self.split = split
        self.is_transform = is_transform
        self.n_classes = 20
        self.img_size = img_size if isinstance(img_size, tuple) else (img_size, img_size)
        self.mean = np.array([104.00699, 116.66877, 122.67892])
        self.files = {}

        self.images_base = os.path.join(self.root, 'leftImg8bit', self.split)
        self.annotations_base = os.path.join(self.root, 'gtFine_trainvaltest', 'gtFine', self.split)

        self.files[split] = recursive_glob(rootdir=self.images_base, suffix='.png')

        self.void_classes = [0, 1, 2, 3, 4, 5, 6, 9, 10, 14, 15, 16, 18, 29, 30, -1]
        self.valid_classes = [7, 8, 11, 12, 13, 17, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 31, 32, 33]
        self.class_names = ['unlabelled', 'road', 'sidewalk', 'building', 'wall', 'fence',\
                            'pole', 'traffic_light', 'traffic_sign', 'vegetation', 'terrain',\
                            'sky', 'person', 'rider', 'car', 'truck', 'bus', 'train', \
                            'motorcycle', 'bicycle']
        # Avg class size is used to weight instances during evaluation of iIoU
        # Data from cityscapes evaluation script
        # https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/evaluation/evalPixelLevelSemanticLabeling.py
        self.avg_class_size = [0,0,0,0,0,0,\
                               0,0,0,0,0,\
                               0,3462.4756337644,3930.4788056518,12794.0202738185,27855.1264367816,35732.1511111111,67583.7075812274,\
                               6298.7200839748,4672.3249222261]

        self.classes_to_categories = [0,1,1,2,2,2,\
                                      3,3,3,4,4,\
                                      5,6,6,7,7,7,\
                                      7,7]
        self.category_names = ['void', 'flat', 'construction', 'object',
                               'nature', 'sky', 'human', 'vehicle']

        self.class_map = dict(zip(self.valid_classes, range(1,20)))

        if not self.files[split]:
            raise Exception("No files for split=[%s] found in %s" % (split, self.images_base))

        print("Found %d %s images" % (len(self.files[split]), split))
예제 #21
0
    def __init__(self,
                 root,
                 split="train",
                 is_transform=False,
                 img_size=(1024, 2048)):
        """__init__

        :param root:
        :param split:
        :param is_transform:
        :param img_size:
        """
        self.root = root
        self.split = split
        self.is_transform = is_transform
        self.n_classes = 20
        self.img_size = img_size if isinstance(img_size, tuple) else (img_size,
                                                                      img_size)
        self.mean = np.array([104.00699, 116.66877, 122.67892])
        self.files = {}

        self.images_base = os.path.join(self.root, 'leftImg8bit', self.split)
        self.annotations_base = os.path.join(self.root, 'gtFine_trainvaltest',
                                             'gtFine', self.split)

        self.files[split] = recursive_glob(rootdir=self.images_base,
                                           suffix='.png')

        self.void_classes = [
            0, 1, 2, 3, 4, 5, 6, 9, 10, 14, 15, 16, 18, 29, 30, -1
        ]
        self.valid_classes = [
            7, 8, 11, 12, 13, 17, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 31,
            32, 33
        ]
        self.class_names = ['unlabelled', 'road', 'sidewalk', 'building', 'wall', 'fence',\
                            'pole', 'traffic_light', 'traffic_sign', 'vegetation', 'terrain',\
                            'sky', 'person', 'rider', 'car', 'truck', 'bus', 'train', \
                            'motorcycle', 'bicycle']

        self.class_map = dict(zip(self.valid_classes, range(1, 20)))

        if not self.files[split]:
            raise Exception("No files for split=[%s] found in %s" %
                            (split, self.images_base))

        print("Found %d %s images" % (len(self.files[split]), split))
예제 #22
0
    def __init__(
        self,
        root,
        split="train",
        is_transform=False,
        img_size=(1024, 2048),
        augmentations=None,
        img_norm=True,
        version="cityscapes",
        test_mode = False,
        n_classes = 1,

    # ---------------------------------------------------------
    # remove all image related inputs (e.g. rgb)

    ):
        """__init__

        :param root:
        :param split:
        :param is_transform:
        :param img_size:
        :param augmentations
        """
        self.root = root
        self.split = split
        self.is_transform = is_transform
        self.augmentations = augmentations
        self.img_norm = img_norm
        self.n_classes = n_classes
        self.img_size = img_size if isinstance(img_size, tuple) else (img_size, img_size)
        self.files = {}

        self.images_base = os.path.join(self.root, "leftImg8bit", self.split)

        # add depth path
        self.depths_base = os.path.join(self.root, "disparity", self.split)


        self.files[split] = recursive_glob(rootdir=self.images_base, suffix=".png")

        if not self.files[split]:
            raise Exception("No files for split=[%s] found in %s" % (split, self.images_base))

        print("Found %d %s images" % (len(self.files[split]), split))
예제 #23
0
    def __init__(self,
                 root,
                 split="train",
                 is_transform=False,
                 img_size=(1024, 1024),
                 augmentations=None):
        """__init__

        :param root:
        :param split:
        :param is_transform:
        :param img_size:
        :param augmentations 
        """
        self.root = root
        self.split = split
        self.is_transform = is_transform
        self.augmentations = None
        self.n_classes = 4
        self.img_size = img_size if isinstance(img_size, tuple) else (img_size,
                                                                      img_size)
        self.mean = np.array([73.15835921, 82.90891754, 72.39239876])
        self.files = {}

        self.images_base = os.path.join(self.root, 'leftImg8bit', self.split)
        self.annotations_base = os.path.join(self.root, 'gtFine_trainvaltest',
                                             'gtFine', self.split)

        self.files[split] = recursive_glob(rootdir=self.images_base,
                                           suffix='.png')

        self.void_classes = [0]
        self.valid_classes = [250, 150, 70]
        self.class_names = ['benign', 'in situ', 'invasive']

        self.ignore_index = 3
        self.class_map = dict(zip(self.valid_classes, range(3)))

        if not self.files[split]:
            raise Exception("No files for split=[%s] found in %s" %
                            (split, self.images_base))

        print("Found %d %s images" % (len(self.files[split]), split))
예제 #24
0
    def __init__(
        self,
        root,
        split="train",
        is_transform=True,
        img_size=(1024, 2048),
        augmentations=None,
        img_norm=True,
        # version="cityscapes",
    ):
        """__init__

        :param root:
        :param split:
        :param is_transform:
        :param img_size:
        :param augmentations
        """
        self.root = root
        self.split = split
        self.is_transform = is_transform
        self.augmentations = augmentations
        self.img_norm = img_norm
        self.img_size = (
            img_size if isinstance(img_size, tuple) else (img_size, img_size)
        )
        # self.mean = np.array(self.mean_rgb[version])
        self.files = {}

        self.images_base = os.path.join(self.root, "leftImg8bit", self.split)
        self.annotations_base = os.path.join(
            self.root, "disparity", self.split
        )

        self.files[split] = recursive_glob(rootdir=self.images_base, suffix=".png")

        if not self.files[split]:
            raise Exception(
                "No files for split=[%s] found in %s" % (split, self.images_base)
            )

        print("Found %d %s images" % (len(self.files[split]), split))
        sys.stdout.flush()
예제 #25
0
    def __init__(self, root, split=["train"], is_transform=False,
                 img_size=(512, 1024), augmentations=None):
        """__init__

        :param root:
        :param split:
        :param is_transform:
        :param img_size:
        :param augmentations
        """
        self.root = root
        self.split = split
        self.split_text = '+'.join(split)
        self.is_transform = is_transform
        self.augmentations = augmentations
        self.n_classes = 19
        self.img_size = img_size if isinstance(img_size, tuple) else (img_size, img_size)
        self.mean = np.array([73.15835921, 82.90891754, 72.39239876])
        self.files = {}

        self.files[self.split_text] = []
        for _split in self.split:
            self.images_base = os.path.join(self.root, 'leftImg8bit', _split)
            self.annotations_base = os.path.join(self.root, 'gtFine', _split)
            self.files[self.split_text] = recursive_glob(rootdir=self.images_base, suffix='.png')

        self.void_classes = [0, 1, 2, 3, 4, 5, 6, 9, 10, 14, 15, 16, 18, 29, 30, -1]
        self.valid_classes = [7, 8, 11, 12, 13, 17, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 31, 32, 33]
        self.no_instances =  [7, 8, 11, 12, 13, 17, 19, 20, 21, 22, 23]
        self.class_names = ['unlabelled', 'road', 'sidewalk', 'building', 'wall', 'fence',\
                            'pole', 'traffic_light', 'traffic_sign', 'vegetation', 'terrain',\
                            'sky', 'person', 'rider', 'car', 'truck', 'bus', 'train', \
                            'motorcycle', 'bicycle']

        self.ignore_index = 250
        self.class_map = dict(zip(self.valid_classes, range(19)))

        if len(self.files[self.split_text]) < 2:
            raise Exception("No files for split=[%s] found in %s" % (self.split_text, self.images_base))

        print("Found %d %s images" % (len(self.files[self.split_text]), self.split_text))
예제 #26
0
 def __init__(self,
              root,
              split="training",
              is_transform=False,
              img_size=(240, 320),
              splitRate=0.7):
     self.root = root + 'imgs/'
     self.n_classes = 10
     self.split = split
     self.splitRate = splitRate
     self.is_transform = is_transform
     self.img_size = img_size if isinstance(img_size, tuple) else (img_size,
                                                                   img_size)
     self.mean = np.array(
         [122.5454, 104.7834, 100.0239, 134.5181, 110.9748, 137.2213])
     self.files_rgb = recursive_glob(rootdir=self.root + 'rgb/',
                                     suffix='.png')
     self.datasize = len(self.files_rgb)
     self.startIndex = 0 if (split == "training") else int(self.datasize *
                                                           splitRate)
     self.interestedLables = np.array(
         [5, 11, 36, 49, 83, 88, 157, 158, 169])
    def __init__(self, root, split="train", is_transform=False, 
                 img_size=(512, 1024), augmentations=None):
        """__init__

        :param root:
        :param split:
        :param is_transform:
        :param img_size:
        :param augmentations 
        """
        self.root = root
        self.split = split
        self.is_transform = is_transform
        self.augmentations = augmentations
        self.n_classes = 19
        self.img_size = img_size if isinstance(img_size, tuple) else (img_size, img_size)
        self.mean = np.array([73.15835921, 82.90891754, 72.39239876])
        self.files = {}

        self.images_base = os.path.join(self.root, 'leftImg8bit', self.split)
        self.annotations_base = os.path.join(self.root, 'gtFine_trainvaltest', 'gtFine', self.split)

        self.files[split] = recursive_glob(rootdir=self.images_base, suffix='.png')
    
        self.void_classes = [0, 1, 2, 3, 4, 5, 6, 9, 10, 14, 15, 16, 18, 29, 30, -1]
        self.valid_classes = [7, 8, 11, 12, 13, 17, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 31, 32, 33]
        self.class_names = ['unlabelled', 'road', 'sidewalk', 'building', 'wall', 'fence',\
                            'pole', 'traffic_light', 'traffic_sign', 'vegetation', 'terrain',\
                            'sky', 'person', 'rider', 'car', 'truck', 'bus', 'train', \
                            'motorcycle', 'bicycle']

        self.ignore_index = 250
        self.class_map = dict(zip(self.valid_classes, range(19))) 

        if not self.files[split]:
            raise Exception("No files for split=[%s] found in %s" % (split, self.images_base))

        print("Found %d %s images" % (len(self.files[split]), split))
    def __init__(
        self,
        root,
        split="train",
        is_transform=False,
        img_size=(1024, 2048),
        augmentations=None,
        img_norm=True,
        version="cityscapes",
        test_mode=False,
    ):
        """__init__

        :param root:
        :param split:
        :param is_transform:
        :param img_size:
        :param augmentations
        """
        self.root = root
        self.split = split
        self.is_transform = is_transform
        self.augmentations = augmentations
        self.img_norm = img_norm
        self.n_classes = 19
        self.img_size = img_size if isinstance(img_size, tuple) else (img_size, img_size)
        self.mean = np.array(self.mean_rgb[version])
        self.files = {}

        self.images_base = os.path.join(self.root, "leftImg8bit", self.split)
        self.annotations_base = os.path.join(self.root, "gtFine", self.split)

        self.files[split] = recursive_glob(rootdir=self.images_base, suffix=".png")

        self.void_classes = [0, 1, 2, 3, 4, 5, 6, 9, 10, 14, 15, 16, 18, 29, 30, -1]
        self.valid_classes = [
            7,
            8,
            11,
            12,
            13,
            17,
            19,
            20,
            21,
            22,
            23,
            24,
            25,
            26,
            27,
            28,
            31,
            32,
            33,
        ]

        #self.void_classes = [ 255]
        #self.valid_classes = [i for i in range(19)]
        self.class_names = [
            "unlabelled",
            "road",
            "sidewalk",
            "building",
            "wall",
            "fence",
            "pole",
            "traffic_light",
            "traffic_sign",
            "vegetation",
            "terrain",
            "sky",
            "person",
            "rider",
            "car",
            "truck",
            "bus",
            "train",
            "motorcycle",
            "bicycle",
        ]

        self.ignore_index = 250
        self.class_map = dict(zip(self.valid_classes, range(19)))

        if not self.files[split]:
            raise Exception("No files for split=[%s] found in %s" % (split, self.images_base))

        print("Found %d %s images" % (len(self.files[split]), split))
예제 #29
0
    def __init__(
        self,
        root,
        split="train",
        is_transform=False,
        img_size=(512, 1024),
        augmentations=None,
        img_norm=True,
        version="cityscapes",
        test_mode=False,
    ):
        """__init__

        :param root:
        :param split:
        :param is_transform:
        :param img_size:
        :param augmentations 
        """
        self.root = root
        #print(root)
        self.split = split
        self.is_transform = is_transform
        self.augmentations = augmentations
        self.img_norm = img_norm
        self.n_classes = 2
        self.img_size = img_size if isinstance(img_size, tuple) else (img_size,
                                                                      img_size)

        self.mean = np.array(self.mean_rgb[version])
        print(self.mean)
        self.files = {}
        #self.q="/home/zaid/Documents/mask_rcnn_pytorvh/pytorch-semseg/gtFine/train_aug/"
        self.images_base = os.path.join(self.root, "leftImg8bit", self.split)
        self.annotations_base = os.path.join(self.root, "gtFine", self.split)

        self.files[split] = recursive_glob(rootdir=self.images_base,
                                           suffix=".jpg")

        self.void_classes = [35, 36, 37, 38, 39]
        # self.valid_classes = [
        #     1,
        #     2,
        #     3,
        #     4,
        #     5,
        #     6,
        #     7,
        #     8,
        #     9,
        #     10,
        #     11,
        #     12,
        #     13,
        #     14,
        #     15,
        #     16,
        #     17,
        #     18,
        #     19,
        #     20,
        #     21,
        #     22,
        #     23,
        #     24,
        #     25,
        #     26,
        #     27,
        #     28,
        #     29,
        #     30,
        #     31,
        #     32,
        #     33,
        #     34,
        #     35,

        # ]
        self.valid_classes = [
            #16 ,
            #90 ,
            0,
            1,
            2,
            3,
            4,
            5,
            6,
            7,
            8,
            9,
            10,
            11,
            12,
            13,
            14,
            15,
            16,
            17,
            18,
            19,
            20,
            21,
            22,
            23,
            24,
            25,
            26,
            27,
            28,
            29,
            30,
            31,
            32,
            33,
            34,
            35,
            36,
            37,
            38,
            39,
        ]
        self.class_names = [
            's_w_d',
            's_y_d',
            #  'ds_w_dn',
            #  'ds_y_dn',
            #  'sb_w_do',
            #  'sb_y_do',
            #    'b_w_g',
            #    'b_y_g',
            #   'db_w_g',
            #   'db_y_g',
            #   'db_w_s',
            #    's_w_s',
            #   'ds_w_s',
            #    's_w_c',
            #    's_y_c',
            #    's_w_p',
            #    's_n_p',
            #   'c_wy_z',
            #    'a_w_u',
            #    'a_w_t',
            #   'a_w_tl',
            #   'a_w_tr',
            #  'a_w_tlr',
            #    'a_w_l',
            #    'a_w_r',
            #   'a_w_lr',
            #   'a_n_lu',
            #   'a_w_tu',
            #    'a_w_m',
            #    'a_y_t',
            #   'b_n_sr',
            #  'd_wy_za',
            #  'r_wy_np',
            # 'vom_wy_n',
            #   'om_n_n',
        ]

        self.ignore_index = 255
        self.class_map = dict(zip(self.valid_classes, range(35)))

        if not self.files[split]:
            raise Exception("No files for split=[%s] found in %s" %
                            (split, self.images_base))

        print("Found %d %s images" % (len(self.files[split]), split))
예제 #30
0
    def __init__(
            self,
            cfg,
            mode="train",
            augmentations=None,
    ):
        self.transductive = cfg["transductive"]
        self.root = cfg["data_path"]
        self.mode = mode
        self.augmentations = augmentations
        if mode == 'train':
            self.split = cfg["train_split"]
        elif mode == 'val':
            self.split = cfg["val_split"]

        self.files = {}
        self.images_base = os.path.join(self.root, "Img", self.split)
        self.annotations_base = os.path.join(self.root, "trainval59", self.split)
        self.files[self.split] = recursive_glob(rootdir=self.annotations_base, suffix=".png")

        self.n_classes = 59
        self.void_classes = [0]
        self.valid_classes = range(1,60)

        self.class_map = dict(zip(self.valid_classes, range(59)))
        self.unseen30 = [1, 2, 4, 8, 9, 11, 12, 13, 18, 19, 
                        21, 22, 23, 24, 25, 26, 27, 29, 34, 35, 
                        37, 38, 41, 43, 44, 46, 48, 54, 56, 58]

        self.unseen20 = [2, 4, 11, 12, 13, 19, 21, 23, 24, 26, 
                        34, 35, 37, 41, 43, 44, 46, 48, 51, 56]

        self.unseen10 = [2, 12, 13, 26, 34, 35, 41, 43, 46, 51]

        self.unseen5 = [2, 13, 34, 43, 51]

        if cfg["unseen"] ==0:
            self.unseen_classes = [] # Note that these should be choosen from IDs for valid categories
        elif cfg["unseen"] ==5:
            self.unseen_classes = self.unseen5  # Note that these should be choosen from IDs for valid categories
        elif cfg["unseen"] ==10:
            self.unseen_classes = self.unseen10  # Note that these should be choosen from IDs for valid categories
        elif cfg["unseen"]==20:
            self.unseen_classes = self.unseen20  # Note that these should be choosen from IDs for valid categories
        elif cfg["unseen"]==30:
            self.unseen_classes = self.unseen30  # Note that these should be choosen from IDs for valid categories
        else:
            raise Exception("Only support 10, 20, 30 unseen")

        self.ignore_index = 250

        self.embd = scio.loadmat(cfg["emdb_path"])["embd"]
        self.embd = self.embd[self.valid_classes]
        self.embeddings = torch.nn.Embedding(self.embd.shape[0], self.embd.shape[1])
        self.embeddings.weight.requires_grad = False
        self.embeddings.weight.data.copy_(torch.from_numpy(self.embd))

        if not self.files[self.split]:
            raise Exception("No files for split=[%s] found in %s" % (self.split, self.images_base))

        print("Found %d %s images" % (len(self.files[self.split]), self.split))
예제 #31
0
    def __init__(
        self,
        cfg,
        mode="train",
        augmentations=None,
    ):
        self.transductive = cfg["transductive"]
        self.root = cfg["data_path"]
        self.mode = mode
        self.augmentations = augmentations
        if mode == 'train':
            self.split = cfg["train_split"]
        elif mode == 'val':
            self.split = cfg["val_split"]

        self.files = {}
        self.images_base = os.path.join(self.root, "leftImg8bit", self.split)
        self.annotations_base = os.path.join(self.root, "gtFine", self.split)
        self.files[self.split] = recursive_glob(rootdir=self.images_base,
                                                suffix=".png")

        self.n_classes = 23
        self.void_classes = [0, 1, 2, 3, 4, 5, 9, 14, 16, 18, 29, -1]
        self.valid_classes = [
            6, 7, 8, 10, 11, 12, 13, 15, 17, 19, 20, 21, 22, 23, 24, 25, 26,
            27, 28, 30, 31, 32, 33
        ]

        self.unseen12 = [0, 1, 4, 7, 8, 10, 12, 13, 15, 17, 18, 22]
        self.unseen8 = [7, 8, 10, 12, 17, 18, 22]
        self.unseen4 = [7, 10, 17, 22]
        self.unseen2 = [17, 22]

        if cfg["unseen"] == 0:
            self.unseen_classes = [
            ]  # Note that these should be choosen from IDs for valid categories
        elif cfg["unseen"] == 2:
            self.unseen_classes = self.unseen2  # Note that these should be choosen from IDs for valid categories
        elif cfg["unseen"] == 4:
            self.unseen_classes = self.unseen4  # Note that these should be choosen from IDs for valid categories
        elif cfg["unseen"] == 8:
            self.unseen_classes = self.unseen8  # Note that these should be choosen from IDs for valid categories
        elif cfg["unseen"] == 12:
            self.unseen_classes = self.unseen12  # Note that these should be choosen from IDs for valid categories
        else:
            raise Exception("Only support 4, 8, 12 unseen")

        self.ignore_index = 250
        self.class_map = dict(zip(self.valid_classes, range(23)))

        self.embd = scio.loadmat(cfg["emdb_path"])["embd"]
        self.embd = self.embd[self.valid_classes]
        self.embeddings = torch.nn.Embedding(self.embd.shape[0],
                                             self.embd.shape[1])
        self.embeddings.weight.requires_grad = False
        self.embeddings.weight.data.copy_(torch.from_numpy(self.embd))

        if not self.files[self.split]:
            raise Exception("No files for split=[%s] found in %s" %
                            (self.split, self.images_base))

        print("Found %d %s images" % (len(self.files[self.split]), self.split))
예제 #32
0
    def __init__(
        self,
        cfg,
        mode="train",
        augmentations=None,
    ):
        self.transductive = cfg["transductive"]
        self.root = cfg["data_path"]
        self.mode = mode
        self.augmentations = augmentations
        if mode == 'train':
            self.split = cfg["train_split"]
        elif mode == 'val':
            self.split = cfg["val_split"]

        self.files = {}
        self.images_base = os.path.join(self.root, "IMG", self.split)
        self.annotations_base = os.path.join(self.root, "GT", self.split)
        self.files[self.split] = recursive_glob(rootdir=self.annotations_base,
                                                suffix=".png")

        self.n_classes = 150
        self.void_classes = [0]
        self.valid_classes = range(1, 151)

        self.unseen75 = [
            1, 3, 4, 5, 6, 11, 13, 15, 18, 19, 20, 21, 24, 26, 29, 30, 31, 33,
            34, 35, 38, 41, 43, 44, 45, 48, 50, 53, 55, 58, 59, 62, 63, 64, 70,
            71, 75, 76, 77, 80, 82, 83, 84, 86, 87, 91, 92, 93, 94, 98, 99,
            100, 104, 105, 109, 110, 112, 115, 120, 123, 124, 125, 126, 127,
            128, 129, 133, 136, 137, 138, 139, 140, 143, 144, 147
        ]  #Note that these should be choosen from IDs for valid categories

        self.unseen50 = [
            1, 3, 4, 5, 6, 13, 15, 18, 19, 20, 21, 24, 26, 29, 31, 33, 34, 35,
            41, 43, 48, 50, 53, 58, 59, 62, 64, 71, 75, 80, 82, 83, 84, 86, 87,
            91, 92, 93, 99, 109, 112, 123, 126, 127, 129, 133, 137, 138, 144,
            147
        ]

        self.unseen25 = [
            1, 3, 6, 13, 15, 18, 21, 24, 29, 34, 35, 41, 50, 53, 71, 82, 83,
            84, 91, 92, 99, 109, 126, 127, 133
        ]

        self.unseen5 = [3, 18, 34, 50, 83]

        if cfg["unseen"] == 0:
            self.unseen_classes = [
            ]  # Note that these should be choosen from IDs for valid categories
        elif cfg["unseen"] == 5:
            self.unseen_classes = self.unseen5  # Note that these should be choosen from IDs for valid categories
        elif cfg["unseen"] == 25:
            self.unseen_classes = self.unseen25  # Note that these should be choosen from IDs for valid categories
        elif cfg["unseen"] == 50:
            self.unseen_classes = self.unseen50  # Note that these should be choosen from IDs for valid categories
        elif cfg["unseen"] == 75:
            self.unseen_classes = self.unseen75  # Note that these should be choosen from IDs for valid categories
        else:
            raise Exception("Only support 5,25,50,75 unseen")

        self.ignore_index = 250
        self.class_map = dict(zip(self.valid_classes, range(150)))

        self.embd = scio.loadmat(cfg["emdb_path"])["embd"]
        self.embd = self.embd[self.valid_classes]
        self.embeddings = torch.nn.Embedding(self.embd.shape[0],
                                             self.embd.shape[1])
        self.embeddings.weight.requires_grad = False
        self.embeddings.weight.data.copy_(torch.from_numpy(self.embd))

        if not self.files[self.split]:
            raise Exception("No files for split=[%s] found in %s" %
                            (self.split, self.images_base))

        print("Found %d %s images" % (len(self.files[self.split]), self.split))
예제 #33
0
    def __init__(
        self,
        cfg,
        mode="train",
        augmentations=None,
    ):
        self.transductive = cfg["transductive"]
        self.root = cfg["data_path"]
        self.mode = mode
        self.augmentations = augmentations
        if mode == 'train':
            self.split = cfg["train_split"]
        elif mode == 'val':
            self.split = cfg["val_split"]

        self.files = {}
        self.images_base = os.path.join(self.root, "Img", self.split)
        self.annotations_base = os.path.join(self.root, "trainval400",
                                             self.split)

        self.files[self.split] = recursive_glob(rootdir=self.annotations_base,
                                                suffix=".mat")

        self.n_classes = 215
        self.void_classes = [
            0, 1, 3, 4, 5, 7, 12, 13, 14, 16, 20, 21, 24, 29, 35, 38, 41, 47,
            50, 52, 54, 63, 64, 67, 71, 73, 74, 76, 77, 79, 81, 82, 83, 84, 89,
            91, 92, 93, 94, 95, 97, 99, 100, 101, 102, 103, 107, 108, 109, 111,
            112, 114, 116, 117, 118, 119, 120, 121, 125, 126, 127, 129, 130,
            131, 132, 133, 134, 135, 137, 139, 142, 143, 145, 146, 147, 151,
            152, 153, 156, 157, 160, 161, 163, 164, 166, 167, 168, 171, 172,
            173, 174, 175, 177, 178, 179, 180, 182, 183, 188, 192, 193, 197,
            198, 200, 201, 202, 203, 205, 206, 209, 210, 212, 214, 215, 217,
            218, 222, 224, 226, 227, 229, 230, 231, 233, 234, 235, 236, 237,
            238, 239, 240, 241, 242, 243, 245, 246, 249, 253, 254, 255, 256,
            257, 264, 267, 270, 274, 276, 278, 279, 280, 283, 285, 288, 292,
            298, 299, 300, 301, 302, 304, 305, 310, 312, 313, 315, 317, 318,
            321, 322, 325, 327, 328, 331, 332, 335, 336, 337, 338, 339, 340,
            341, 343, 344, 345, 346, 348, 351, 352, 353, 358, 362, 364, 365,
            367, 369, 370, 372, 375, 376, 379, 380, 381, 382, 385, 386, 387,
            388, 389, 390, 391, 392, 393, 394, 395, 396, 398, 399, 401, 404,
            407, 408, 409, 411, 414, 417, 421, 422, 423, 425, 426, 428, 429,
            433, 439, 441, 442, 444, 447, 448, 449, 450, 451, 453, 455, 459
        ]

        self.valid_classes = [
            2, 6, 8, 9, 10, 11, 15, 17, 18, 19, 22, 23, 25, 26, 27, 28, 30, 31,
            32, 33, 34, 36, 37, 39, 40, 42, 43, 44, 45, 46, 48, 49, 51, 53, 55,
            56, 57, 58, 59, 60, 61, 62, 65, 66, 68, 69, 70, 72, 75, 78, 80, 85,
            86, 87, 88, 90, 96, 98, 104, 105, 106, 110, 113, 115, 122, 123,
            124, 128, 136, 138, 140, 141, 144, 148, 149, 150, 154, 155, 158,
            159, 162, 165, 169, 170, 176, 181, 184, 185, 186, 187, 189, 190,
            191, 194, 195, 196, 199, 204, 207, 208, 211, 213, 216, 219, 220,
            221, 223, 225, 228, 232, 244, 247, 248, 250, 251, 252, 258, 259,
            260, 261, 262, 263, 265, 266, 268, 269, 271, 272, 273, 275, 277,
            281, 282, 284, 286, 287, 289, 290, 291, 293, 294, 295, 296, 297,
            303, 306, 307, 308, 309, 311, 314, 316, 319, 320, 323, 324, 326,
            329, 330, 333, 334, 342, 347, 349, 350, 354, 355, 356, 357, 359,
            360, 361, 363, 366, 368, 371, 373, 374, 377, 378, 383, 384, 397,
            400, 402, 403, 405, 406, 410, 412, 413, 415, 416, 418, 419, 420,
            424, 427, 430, 431, 432, 434, 435, 436, 437, 438, 440, 443, 445,
            446, 452, 454, 456, 457, 458
        ]

        self.unseen_classes = [
            1, 2, 4, 5, 6, 7, 13, 14, 15, 16, 18, 21, 22, 23, 24, 25, 26, 30,
            31, 32, 33, 34, 35, 36, 37, 39, 40, 41, 43, 45, 46, 48, 49, 52, 53,
            54, 55, 56, 60, 61, 64, 65, 66, 67, 68, 69, 70, 71, 73, 74, 75, 76,
            77, 81, 82, 83, 84, 85, 86, 87, 88, 91, 92, 93, 94, 95, 96, 97, 99,
            100, 101, 102, 103, 105, 106, 107, 108, 110, 111, 112, 113, 114,
            115, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130,
            131, 132, 134, 135, 136, 137, 138, 139, 140, 143, 144, 145, 146,
            148, 149, 150, 151, 152, 153, 154, 157, 158, 159, 160, 161, 164,
            167, 168, 169, 171, 172, 175, 176, 177, 178, 179, 180, 181, 183,
            184, 185, 186, 187, 188, 189, 190, 193, 194, 198, 199, 200, 201,
            202, 203, 204, 205, 207, 209, 210, 212, 213
        ]  #Note that these should be choosen from IDs for valid categories

        self.ignore_index = 250
        self.class_map = dict(zip(self.valid_classes, range(215)))

        self.embd = scio.loadmat(cfg["emdb_path"])["embd"]
        self.embd = self.embd[self.valid_classes]
        self.embeddings = torch.nn.Embedding(self.embd.shape[0],
                                             self.embd.shape[1])
        self.embeddings.weight.requires_grad = False
        self.embeddings.weight.data.copy_(torch.from_numpy(self.embd))

        if not self.files[self.split]:
            raise Exception("No files for split=[%s] found in %s" %
                            (self.split, self.images_base))

        print("Found %d %s images" % (len(self.files[self.split]), self.split))