Exemplo n.º 1
0
    def __init__(self,
                 root,
                 split="train",
                 gt="gtCoarse",
                 img_size=(512, 1024),
                 is_transform=False,
                 augmentations=None):
        """
        :param root:         (str)  Path to the datasets sets root
        :param split:        (str)  Data set split -- 'train' 'train_extra' or 'val'
        :param gt:           (str)  Type of ground truth label -- 'gtFine' or 'gtCoarse'
        :param img_size:     (tuple or int) The size of the input image
        :param is_transform: (bool) Transform the image or not
        :param augmentations (object) Data augmentations used in the image and label
        """
        self.root = root
        self.gt = gt
        self.split = split
        self.is_transform = is_transform
        self.augmentations = augmentations

        self.n_classes = 19
        self.img_size = img_size if isinstance(img_size, tuple) else (img_size,
                                                                      img_size)
        self.mean = np.array([73.16, 82.91, 72.39])
        self.files = {}

        self.images_base = os.path.join(self.root, 'leftImg8bit', self.split)
        self.annotations_base = os.path.join(self.root, gt, self.split)

        self.files[split] = recursive_glob(rootdir=self.images_base,
                                           suffix='.png')

        self.void_classes = [
            0, 1, 2, 3, 4, 5, 6, 9, 10, 14, 15, 16, 18, 29, 30, -1
        ]
        self.valid_classes = [
            7, 8, 11, 12, 13, 17, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 31,
            32, 33
        ]
        self.class_names = [
            'unlabelled', 'road', 'sidewalk', 'building', 'wall', 'fence',
            'pole', 'traffic_light', 'traffic_sign', 'vegetation', 'terrain',
            'sky', 'person', 'rider', 'car', 'truck', 'bus', 'train',
            'motorcycle', 'bicycle'
        ]

        self.ignore_index = 250
        self.class_map = dict(zip(self.valid_classes, range(19)))

        if not self.files[split]:
            raise Exception("> No files for split=[%s] found in %s" %
                            (split, self.images_base))

        print("> Found %d %s images..." % (len(self.files[split]), split))
Exemplo n.º 2
0
    def __init__(
            self,
            root,
            split="training",
            is_transform=False,
            img_size=(480, 640),
            img_norm=False,
            test_mode=False,
    ):
        self.root = root
        self.is_transform = is_transform
        self.n_classes = 38
        self.img_norm = img_norm
        self.test_mode = test_mode
        self.img_size = img_size if isinstance(img_size, tuple) else (img_size,
                                                                      img_size)
        self.mean = np.array([104.00699, 116.66877, 122.67892])
        self.files = collections.defaultdict(list)
        self.anno_files = collections.defaultdict(list)
        self.cmap = self.color_map(normalized=False)

        split_map = {"training": "train", "val": "test"}
        self.split = split_map[split]

        if not self.test_mode:
            self.images_base = os.path.join(self.root, self.split)
            self.annotations_base = os.path.join(self.root, "labels",
                                                 self.split)
            print(self.images_base)
            print(self.annotations_base)

            # for split in ["train", "test"]:
            file_list = sorted(
                recursive_glob(rootdir=self.images_base, suffix="jpg"))
            self.files[self.split] = file_list
            # print(self.files[self.split])

            # for split in ["train", "test"]:
            file_list = sorted(
                recursive_glob(rootdir=self.annotations_base, suffix="png"))
            self.anno_files[self.split] = file_list
    def _prepare_filenames(self):
        if self.img_size == (512, 1024):
            self.images_base = os.path.join(self.root, "leftImg8bit_small",
                                            self.split)
            self.sequence_base = os.path.join(self.root,
                                              "leftImg8bit_sequence_small",
                                              self.split)
        elif self.img_size == (256, 512):
            self.images_base = os.path.join(self.root, "leftImg8bit_tiny",
                                            self.split)
            self.sequence_base = os.path.join(self.root,
                                              "leftImg8bit_sequence_tiny",
                                              self.split)
        else:
            raise NotImplementedError(f"Unexpected image size {self.img_size}")
        self.annotations_base = os.path.join(self.root, "gtFine", self.split)

        if self.only_sequences_with_segmentation:
            self.files = sorted(recursive_glob(rootdir=self.images_base))
        else:
            self.files = sorted(recursive_glob(rootdir=self.sequence_base))
    def __init__(self,
                 root,
                 split="training",
                 img_size=(640, 1280),
                 is_transform=True,
                 augmentations=None):
        """
        :param root:         (str)  Path to the datasets sets root
        :param split:        (str)  Data set split -- 'training' or 'validation'
        :param img_size:     (tuple or int) The size of the input image
        :param is_transform: (bool) Transform the image or not
        :param augmentations (object) Data augmentations used in the image and label
        """
        self.root = root
        self.split = split
        self.is_transform = is_transform
        self.augmentations = augmentations
        self.n_classes = 65

        self.img_size = img_size if isinstance(img_size, tuple) else (img_size,
                                                                      img_size)
        self.mean = np.array([80.5423, 91.3162, 81.4312])
        self.files = {}

        self.images_base = os.path.join(self.root, self.split, 'images')
        self.annotations_base = os.path.join(self.root, self.split, 'labels')

        self.files[split] = recursive_glob(rootdir=self.images_base,
                                           suffix='.jpg')

        self.class_ids, self.class_names, self.class_colors = self._parse_config(
        )

        self.ignore_id = 65

        if not self.files[split]:
            raise Exception("> No files for split=[%s] found in %s" %
                            (split, self.images_base))

        print("> Found %d %s images..." % (len(self.files[split]), split))
Exemplo n.º 5
0
 def _prepare_filenames(self):
     self.images_base = os.path.join(self.root, self.split)
     self.annotations_base = os.path.join(self.root, self.split + "annot")
     self.files = sorted(recursive_glob(rootdir=self.images_base))
Exemplo n.º 6
0
    )
    if os.path.exists(cvt_img_root):
        print("> Path {} is exist, delete it...".format(cvt_img_root))
        shutil.rmtree(cvt_img_root)
    if os.path.exists(cvt_msk_root):
        print("> Path {} is exist, delete it...".format(cvt_msk_root))
        shutil.rmtree(cvt_msk_root)

    if not os.path.exists(cvt_img_root):
        print("> Path {} is not exist, create it...".format(cvt_img_root))
        os.mkdir(cvt_img_root)
    if not os.path.exists(cvt_msk_root):
        print("> Path {} is not exist, create it...".format(cvt_msk_root))
        os.mkdir(cvt_msk_root)

    img_list = recursive_glob(rootdir=os.path.join(deepdrive_root, "images"),
                              suffix=".jpg")

    for idx, img_path in enumerate(img_list):
        img_name = os.path.basename(img_path)
        msk_name = img_name.replace(".jpg", "_train_id.png")
        msk_path = os.path.join(deepdrive_root, "labels", msk_name)

        print(
            "> # ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #"
        )
        print("> Processing {}...".format(img_name))
        image = Image.open(img_path).convert('RGB')
        mask = Image.open(msk_path).convert('L')

        image = image.resize((img_w, img_h), Image.BILINEAR)
        mask = mask.resize((img_w, img_h), Image.NEAREST)
Exemplo n.º 7
0
    def _generate_sequence_list(self, dirs, splits, nr_scenes, nr_sequences, seq_len, seq_overlap, shuffle_before_cut,
                                train_scales):
        sequences = []  # [(img_path, lbl_path),...], 'scene_name', seq_in_scene, scene_group_nr, scene_nr, 'file_type', files_seq_nr[], augmentation[hflip, scaleSize]
        scene_nr = 0
        scene_group_nr = 0
        augmentation = [0]
        for dir, split, nr_of_scenes, nr_of_sequences, shuffle_bef_cut, train_scale in zip(dirs, splits, nr_scenes,
                                                                                           nr_sequences,
                                                                                           shuffle_before_cut,
                                                                                           train_scales):
            images_base = path.join(dir, "leftImg8bit", split)
            lbls_base = path.join(dir, "gtFine", split)
            files = recursive_glob(rootdir=images_base, suffix=".png")
            files = sorted(files)

            if split != 'test':  # remove files without labels
                files_temp = []
                for i, file in enumerate(files):
                    lbl_path = path.join(lbls_base, file.split(sep)[-2],
                                            path.basename(file)[:-15] + "gtFine_labelIds.png")
                    if path.isfile(lbl_path):
                        files_temp.append(file)
                files = files_temp

            # create sequence list
            sequences_dataset = []
            if sequences:
                scene_nr = sequences[-1][4] + 1
            seq_in_scene = 0
            for i, file in enumerate(files):
                lbl_path = path.join(lbls_base, file.split(sep)[-2],
                                        path.basename(file)[:-15] + "gtFine_labelIds.png")
                current_file_type = ''
                if len(re.findall(r'_\d+_\d+_\d+_', path.basename(file))) == 1:  # video-file
                    current_file_type = 'video-file'
                    seq_nr_str = re.findall(r'\d+_\d+_\d+', path.basename(file))[0]
                elif len(re.findall(r'_\d+_\d+_', path.basename(file))) == 1:  # single-frame file or sequence file
                    current_file_type = 'seq-file'
                    seq_nr_str = re.findall(r'_\d+_\d+_', path.basename(file))[0]
                    seq_nr_str = seq_nr_str[1:-1]

                seq_nr = int(seq_nr_str.replace('_', ''))

                if len(sequences_dataset) == 0:  # very first interval
                    seq_in_scene = 0
                    sequences_dataset.append(
                        [[(file, lbl_path)], seq_nr_str, seq_in_scene, scene_group_nr, scene_nr, current_file_type,
                         [seq_nr], augmentation.copy() + [train_scale]])
                    continue

                prev_interval = sequences_dataset[-1]
                if current_file_type != prev_interval[5] or prev_interval[6][-1] + 1 != seq_nr:  # new scene:
                    scene_nr += 1
                    seq_in_scene = 0
                    sequences_dataset.append(
                        [[(file, lbl_path)], seq_nr_str, seq_in_scene, scene_group_nr, scene_nr, current_file_type,
                         [seq_nr], augmentation.copy() + [train_scale]])
                elif len(prev_interval[0]) == seq_len:  # check if last interval full --> new interval, same_scene
                    seq_in_scene += 1
                    if seq_overlap > 0:
                        sequences_dataset.append(
                            [prev_interval[0][-seq_overlap:] + [(file, lbl_path)], prev_interval[1],
                             seq_in_scene, scene_group_nr, scene_nr, current_file_type,
                             prev_interval[6][-seq_overlap:] + [seq_nr], augmentation.copy() + [train_scale]])
                    else:
                        sequences_dataset.append(
                            [[(file, lbl_path)], prev_interval[1], seq_in_scene, scene_group_nr, scene_nr,
                             current_file_type, [seq_nr], augmentation.copy() + [train_scale]])
                else:  # same interval, same scene
                    prev_interval[0].append((file, lbl_path))
                    prev_interval[6].append(seq_nr)

            # Cut sequence list
            assert not (nr_of_scenes != 'all' and nr_of_sequences != 'all')
            # Requires file_intervals list to be sorted by scenes.
            if nr_of_scenes != 'all':
                if shuffle_bef_cut:
                    self.shuffle_scenes_of_sequences(sequences_dataset)  # shuffle before cut scenes
                nr_of_scenes_curr = 0
                prev_scene_name = ''
                for index, file_interval in enumerate(sequences_dataset):
                    if prev_scene_name != file_interval[1]:
                        nr_of_scenes_curr += 1
                        if nr_of_scenes_curr > nr_of_scenes:
                            sequences_dataset = sequences_dataset[:index]
                            break
                        prev_scene_name = file_interval[1]
            elif nr_of_sequences != "all":
                sequences_dataset = sequences_dataset[:nr_of_sequences]

            sequences += sequences_dataset
        return sequences
Exemplo n.º 8
0
 def _prepare_filenames(self):
     self.images_base = os.path.join(self.root, self.split, "images")
     self.annotations_base = os.path.join(self.root, self.split, "labels")
     self.files = sorted(
         recursive_glob(rootdir=self.images_base, suffix=".jpg"))
Exemplo n.º 9
0
    def _prepare_filenames(self):
        self.images_base = self.root
        self.sequence_base = None
        self.annotations_base = None

        self.files = sorted(recursive_glob(rootdir=self.images_base))