Exemple #1
0
    def __getitem__(self, index):
        img = None
        valid = True
        while img is None:
            try:
                img = ImageHelper.read_image(
                    self.img_list[index],
                    tool=self.configer.get('data', 'image_tool'),
                    mode=self.configer.get('data', 'input_mode'))
                assert isinstance(img, np.ndarray) or isinstance(
                    img, Image.Image)
            except:
                Log.warn('Invalid image path: {}'.format(self.img_list[index]))
                img = None
                valid = False
                index = (index + 1) % len(self.img_list)

        label = torch.from_numpy(np.array(self.label_list[index]))
        if self.aug_transform is not None:
            img = self.aug_transform(img)

        if self.img_transform is not None:
            img = self.img_transform(img)

        return dict(valid=valid,
                    img=DataContainer(img, stack=True),
                    label=DataContainer(label, stack=True))
Exemple #2
0
    def __getitem__(self, index):
        img = None
        valid = True
        while img is None:
            try:
                img = ImageHelper.read_image(self.item_list[index][0],
                                             tool=self.configer.get('data', 'image_tool'),
                                             mode=self.configer.get('data', 'input_mode'))
                assert isinstance(img, np.ndarray) or isinstance(img, Image.Image)
            except:
                Log.warn('Invalid image path: {}'.format(self.item_list[index][0]))
                img = None
                valid = False
                index = (index + 1) % len(self.item_list)

        ori_img_size = ImageHelper.get_size(img)
        if self.aug_transform is not None:
            img = self.aug_transform(img)

        border_hw = ImageHelper.get_size(img)[::-1]
        if self.img_transform is not None:
            img = self.img_transform(img)

        meta = dict(
            valid=valid,
            ori_img_size=ori_img_size,
            border_hw=border_hw,
            img_path=self.item_list[index][0],
            filename=self.item_list[index][1],
            label=self.item_list[index][2]
        )
        return dict(
            img=DataContainer(img, stack=True),
            meta=DataContainer(meta, stack=False, cpu_only=True)
        )
Exemple #3
0
def stack(batch, data_key=None, device_ids=None):
    if isinstance(batch[0][data_key], DataContainer):
        if batch[0][data_key].stack:
            assert isinstance(batch[0][data_key].data, torch.Tensor) or \
                   isinstance(batch[0][data_key].data, int_classes) or \
                   isinstance(batch[0][data_key].data, float) or \
                   isinstance(batch[0][data_key].data, string_classes) or \
                   isinstance(batch[0][data_key].data, collections.Mapping) or \
                   isinstance(batch[0][data_key].data, collections.Sequence)
            stacked = []
            if batch[0][data_key].samples_per_gpu and len(device_ids) > 1:
                samples_per_gpu = (len(batch) - 1 +
                                   len(device_ids)) // len(device_ids)
                for i in range(0, len(batch), samples_per_gpu):
                    stacked.append(
                        default_collate([
                            sample[data_key].data
                            for sample in batch[i:i + samples_per_gpu]
                        ]))
            else:
                stacked = default_collate(
                    [sample[data_key].data for sample in batch])

            if batch[0][data_key].return_dc and len(device_ids) > 1:
                return DataContainer(
                    stacked,
                    stack=batch[0][data_key].stack,
                    samples_per_gpu=batch[0][data_key].samples_per_gpu,
                    cpu_only=batch[0][data_key].cpu_only)
            else:
                return stacked
        else:
            stacked = []
            if batch[0][data_key].samples_per_gpu and len(device_ids) > 1:
                samples_per_gpu = (len(batch) - 1 +
                                   len(device_ids)) // len(device_ids)
                for i in range(0, len(batch), samples_per_gpu):
                    stacked.append([
                        sample[data_key].data
                        for sample in batch[i:i + samples_per_gpu]
                    ])
            else:
                stacked = [sample[data_key].data for sample in batch]

            if batch[0][data_key].return_dc and len(device_ids) > 1:
                return DataContainer(
                    stacked,
                    stack=batch[0][data_key].stack,
                    samples_per_gpu=batch[0][data_key].samples_per_gpu,
                    cpu_only=batch[0][data_key].cpu_only)
            else:
                return stacked
    else:
        return default_collate([sample[data_key] for sample in batch])