Esempio n. 1
0
    def __getitem__(self, index):
        imgA = ImageHelper.read_image(
            self.imgA_list[index],
            tool=self.configer.get('data', 'image_tool'),
            mode=self.configer.get('data', 'input_mode'))

        indexB = random.randint(0,
                                len(self.imgB_list) - 1) % len(self.imgB_list)
        imgB = ImageHelper.read_image(
            self.imgB_list[indexB],
            tool=self.configer.get('data', 'image_tool'),
            mode=self.configer.get('data', 'input_mode'))

        if self.aug_transform is not None:
            imgA = self.aug_transform(imgA)
            imgB = self.aug_transform(imgB)

        if self.img_transform is not None:
            imgA = self.img_transform(imgA)
            imgB = self.img_transform(imgB)

        return dict(imgA=DataContainer(imgA, stack=True),
                    imgB=DataContainer(imgB, stack=True),
                    labelA=DataContainer(self.labelA_list[index], stack=True),
                    labelB=DataContainer(self.labelB_list[indexB], stack=True))
Esempio n. 2
0
    def todc(data_list,
             samples_per_gpu=True,
             stack=False,
             cpu_only=False,
             device_ids=None):
        if not samples_per_gpu:
            if not stack:
                return DataContainer(data_list,
                                     stack=stack,
                                     samples_per_gpu=samples_per_gpu,
                                     cpu_only=cpu_only)
            else:
                return DataContainer(torch.stack(data_list, 0),
                                     stack=stack,
                                     samples_per_gpu=samples_per_gpu,
                                     cpu_only=cpu_only)

        device_ids = list(range(
            torch.cuda.device_count())) if device_ids is None else device_ids
        samples = (len(data_list) - 1 + len(device_ids)) // len(device_ids)
        stacked = []
        for i in range(0, len(data_list), samples):
            if not stack:
                stacked.append(data_list[i:i + samples])
            else:
                stacked.append(torch.stack(data_list[i:i + samples], 0))

        return DataContainer(stacked,
                             stack=stack,
                             samples_per_gpu=samples_per_gpu,
                             cpu_only=cpu_only)
Esempio n. 3
0
    def __getitem__(self, index):
        img = ImageHelper.read_image(self.img_list[index],
                                     tool=self.configer.get('data', 'image_tool'),
                                     mode=self.configer.get('data', 'input_mode'))
        img_size = ImageHelper.get_size(img)
        bboxes, labels = self.__read_json_file(self.json_list[index])
        ori_bboxes, ori_labels = bboxes.copy(), labels.copy()

        if self.aug_transform is not None:
            img, bboxes, labels = self.aug_transform(img, bboxes=bboxes, labels=labels)

        labels = torch.from_numpy(labels).long()
        bboxes = torch.from_numpy(bboxes).float()

        meta = dict(
            ori_img_size=img_size,
            border_size=ImageHelper.get_size(img),
            ori_bboxes=torch.from_numpy(ori_bboxes).float(),
            ori_labels=torch.from_numpy(ori_labels).long()
        )
        if self.img_transform is not None:
            img = self.img_transform(img)

        return dict(
            img=DataContainer(img, stack=True, return_dc=True, samples_per_gpu=True),
            bboxes=DataContainer(bboxes, stack=False, return_dc=True, samples_per_gpu=True),
            labels=DataContainer(labels, stack=False, return_dc=True, samples_per_gpu=True),
            meta=DataContainer(meta, stack=False, cpu_only=True, return_dc=True, samples_per_gpu=True)
        )
Esempio n. 4
0
    def __getitem__(self, index):
        img = ImageHelper.read_image(
            self.img_list[index],
            tool=self.configer.get('data', 'image_tool'),
            mode=self.configer.get('data', 'input_mode'))

        ori_img_size = ImageHelper.get_size(img)
        if self.aug_transform is not None:
            img = self.aug_transform(img)

        border_hw = ImageHelper.get_size(img)[::-1]
        if self.img_transform is not None:
            img = self.img_transform(img)

        meta = dict(ori_img_size=ori_img_size,
                    border_hw=border_hw,
                    img_path=self.img_list[index])
        return dict(img=DataContainer(img,
                                      stack=True,
                                      return_dc=True,
                                      samples_per_gpu=True),
                    meta=DataContainer(meta,
                                       stack=False,
                                       cpu_only=True,
                                       return_dc=True,
                                       samples_per_gpu=True))
Esempio n. 5
0
def stack(batch, data_key=None, trans_dict=None):
    if isinstance(batch[0][data_key], DataContainer):
        if batch[0][data_key].stack:
            assert isinstance(batch[0][data_key].data, torch.Tensor) or \
                   isinstance(batch[0], int_classes) or isinstance(batch[0], float) or \
                   isinstance(batch[0], string_classes) or isinstance(batch[0], collections.Mapping) or\
                   isinstance(batch[0], collections.Sequence)
            stacked = []
            if batch[0][data_key].samples_per_gpu:
                assert len(batch) % trans_dict['samples_per_gpu'] == 0
                for i in range(0, len(batch), trans_dict['samples_per_gpu']):
                    stacked.append(
                        default_collate([
                            sample[data_key].data
                            for sample in batch[i:i +
                                                trans_dict['samples_per_gpu']]
                        ]))
            else:
                stacked = default_collate(
                    [sample[data_key].data for sample in batch])

            if batch[0][data_key].return_dc:
                return DataContainer(
                    stacked,
                    stack=batch[0][data_key].stack,
                    samples_per_gpu=batch[0][data_key].samples_per_gpu,
                    cpu_only=batch[0][data_key].cpu_only)
            else:
                return stacked
        else:
            stacked = []
            if batch[0][data_key].samples_per_gpu:
                assert len(batch) % trans_dict['samples_per_gpu'] == 0
                for i in range(0, len(batch), trans_dict['samples_per_gpu']):
                    stacked.append([
                        sample[data_key].data
                        for sample in batch[i:i +
                                            trans_dict['samples_per_gpu']]
                    ])
            else:
                stacked = [sample[data_key].data for sample in batch]

            if batch[0][data_key].return_dc:
                return DataContainer(
                    stacked,
                    stack=batch[0][data_key].stack,
                    samples_per_gpu=batch[0][data_key].samples_per_gpu,
                    cpu_only=batch[0][data_key].cpu_only)
            else:
                return stacked
    else:
        return default_collate([sample[data_key] for sample in batch])
Esempio n. 6
0
    def __getitem__(self, index):
        img = ImageHelper.read_image(
            self.img_list[index],
            tool=self.configer.get('data', 'image_tool'),
            mode=self.configer.get('data', 'input_mode'))
        if os.path.exists(self.mask_list[index]):
            maskmap = ImageHelper.read_image(self.mask_list[index],
                                             tool=self.configer.get(
                                                 'data', 'image_tool'),
                                             mode='P')
        else:
            maskmap = np.ones((img.size[1], img.size[0]), dtype=np.uint8)
            if self.configer.get('data', 'image_tool') == 'pil':
                maskmap = ImageHelper.np2img(maskmap)

        kpts, bboxes = self.__read_json_file(self.json_list[index])

        if self.aug_transform is not None and len(bboxes) > 0:
            img, maskmap, kpts, bboxes = self.aug_transform(img,
                                                            maskmap=maskmap,
                                                            kpts=kpts,
                                                            bboxes=bboxes)

        elif self.aug_transform is not None:
            img, maskmap, kpts = self.aug_transform(img,
                                                    maskmap=maskmap,
                                                    kpts=kpts)

        width, height = ImageHelper.get_size(maskmap)
        maskmap = ImageHelper.resize(
            maskmap, (width // self.configer.get('network', 'stride'),
                      height // self.configer.get('network', 'stride')),
            interpolation='nearest')

        maskmap = torch.from_numpy(np.array(maskmap, dtype=np.float32))
        maskmap = maskmap.unsqueeze(0)
        kpts = torch.from_numpy(kpts).float()

        heatmap = self.heatmap_generator(kpts, [width, height], maskmap)
        vecmap = self.paf_generator(kpts, [width, height], maskmap)
        if self.img_transform is not None:
            img = self.img_transform(img)

        return dict(img=DataContainer(img, stack=True),
                    heatmap=DataContainer(heatmap, stack=True),
                    maskmap=DataContainer(maskmap, stack=True),
                    vecmap=DataContainer(vecmap, stack=True))
Esempio n. 7
0
    def __getitem__(self, index):
        img = ImageHelper.read_image(
            self.img_list[index],
            tool=self.configer.get('data', 'image_tool'),
            mode=self.configer.get('data', 'input_mode'))
        label = self.label_list[index]

        if self.aug_transform is not None:
            img = self.aug_transform(img)

        if self.img_transform is not None:
            img = self.img_transform(img)

        return dict(
            img=DataContainer(img, stack=True),
            label=DataContainer(label, stack=True),
        )
Esempio n. 8
0
    def todc(data_list, gpu_list, cpu_only=False):
        assert len(data_list) % len(gpu_list) == 0
        samples_per_gpu = len(data_list) // len(gpu_list)
        stacked = []
        for i in range(0, len(data_list), samples_per_gpu):
            stacked.append(data_list[i:i + samples_per_gpu])

        return DataContainer(stacked, cpu_only=cpu_only)
Esempio n. 9
0
    def __getitem__(self, index):
        img = ImageHelper.read_image(self.img_list[index],
                                     tool=self.configer.get('data', 'image_tool'),
                                     mode=self.configer.get('data', 'input_mode'))
        labels, bboxes, polygons = self.__read_json_file(self.json_list[index])

        if self.aug_transform is not None:
            img, bboxes, labels, polygons = self.aug_transform(img, bboxes=bboxes,
                                                               labels=labels, polygons=polygons)

        if self.img_transform is not None:
            img = self.img_transform(img)

        return dict(
            img=DataContainer(img, stack=True),
            bboxes=DataContainer(bboxes, stack=False),
            labels=DataContainer(labels, stack=False),
            polygons=DataContainer(polygons, stack=False, cpu_only=True)
        )
Esempio n. 10
0
def stack(batch, data_key=None, device_ids=None):
    device_ids = list(range(torch.cuda.device_count())) if device_ids is None else device_ids
    if isinstance(batch[0][data_key], DataContainer):
        if batch[0][data_key].stack:
            assert isinstance(batch[0][data_key].data, torch.Tensor) or \
                   isinstance(batch[0][data_key].data, int_classes) or \
                   isinstance(batch[0][data_key].data, float) or \
                   isinstance(batch[0][data_key].data, string_classes) or \
                   isinstance(batch[0][data_key].data, collections.Mapping) or \
                   isinstance(batch[0][data_key].data, collections.Sequence)
            stacked = []
            if batch[0][data_key].samples_per_gpu:
                samples_per_gpu = (len(batch) - 1 + len(device_ids)) // len(device_ids)
                for i in range(0, len(batch), samples_per_gpu):
                    stacked.append(
                        default_collate([sample[data_key].data for sample in batch[i:i + samples_per_gpu]])
                    )
            else:
                stacked = default_collate([sample[data_key].data for sample in batch])

            if batch[0][data_key].return_dc:
                return DataContainer(stacked, stack=batch[0][data_key].stack,
                                     samples_per_gpu=batch[0][data_key].samples_per_gpu,
                                     cpu_only=batch[0][data_key].cpu_only)
            else:
                return stacked
        else:
            stacked = []
            if batch[0][data_key].samples_per_gpu:
                samples_per_gpu = (len(batch) - 1 + len(device_ids)) // len(device_ids)
                for i in range(0, len(batch), samples_per_gpu):
                    stacked.append([sample[data_key].data for sample in batch[i:i + samples_per_gpu]])
            else:
                stacked = [sample[data_key].data for sample in batch]

            if batch[0][data_key].return_dc:
                return DataContainer(stacked, stack=batch[0][data_key].stack,
                                     samples_per_gpu=batch[0][data_key].samples_per_gpu,
                                     cpu_only=batch[0][data_key].cpu_only)
            else:
                return stacked
    else:
        return default_collate([sample[data_key] for sample in batch])
Esempio n. 11
0
    def todc(data_list, samples_per_gpu, stack=False, cpu_only=False):
        stacked = []
        for i in range(0, len(data_list), samples_per_gpu):
            if not stack:
                stacked.append(data_list[i:i + samples_per_gpu])
            else:
                stacked.append(torch.cat(data_list[i:i + samples_per_gpu], 0))

        return DataContainer(stacked,
                             stack=stack,
                             samples_per_gpu=True,
                             cpu_only=cpu_only)
Esempio n. 12
0
    def __getitem__(self, index):
        img = ImageHelper.read_image(
            self.img_list[index],
            tool=self.configer.get('data', 'image_tool'),
            mode=self.configer.get('data', 'input_mode'))

        kpts, bboxes = self.__read_json_file(self.json_list[index])

        if self.aug_transform is not None:
            img, kpts, bboxes = self.aug_transform(img,
                                                   kpts=kpts,
                                                   bboxes=bboxes)

        kpts = torch.from_numpy(kpts).float()
        heatmap = self.heatmap_generator(kpts, ImageHelper.get_size(img))
        if self.img_transform is not None:
            img = self.img_transform(img)

        return dict(
            img=DataContainer(img, stack=True),
            heatmap=DataContainer(heatmap, stack=True),
        )
Esempio n. 13
0
    def __getitem__(self, index):
        imgA = ImageHelper.read_image(
            self.imgA_list[index],
            tool=self.configer.get('data', 'image_tool'),
            mode=self.configer.get('data', 'input_mode'))

        imgB = ImageHelper.read_image(
            self.imgB_list[index],
            tool=self.configer.get('data', 'image_tool'),
            mode=self.configer.get('data', 'input_mode'))

        if self.aug_transform is not None:
            imgA, imgB = self.aug_transform([imgA, imgB])

        if self.img_transform is not None:
            imgA = self.img_transform(imgA)
            imgB = self.img_transform(imgB)

        return dict(
            imgA=DataContainer(imgA, stack=True),
            imgB=DataContainer(imgB, stack=True),
        )
Esempio n. 14
0
    def __getitem__(self, index):
        img = ImageHelper.read_image(
            self.img_list[index],
            tool=self.configer.get('data', 'image_tool'),
            mode=self.configer.get('data', 'input_mode'))
        img_size = ImageHelper.get_size(img)
        labelmap = ImageHelper.read_image(self.label_list[index],
                                          tool=self.configer.get(
                                              'data', 'image_tool'),
                                          mode='P')
        if self.configer.exists('data', 'label_list'):
            labelmap = self._encode_label(labelmap)

        if self.configer.exists('data', 'reduce_zero_label'):
            labelmap = self._reduce_zero_label(labelmap)

        ori_target = ImageHelper.tonp(labelmap)
        ori_target[ori_target == 255] = -1

        if self.aug_transform is not None:
            img, labelmap = self.aug_transform(img, labelmap=labelmap)

        border_size = ImageHelper.get_size(img)

        if self.img_transform is not None:
            img = self.img_transform(img)

        if self.label_transform is not None:
            labelmap = self.label_transform(labelmap)

        meta = dict(ori_img_size=img_size,
                    border_size=border_size,
                    ori_target=ori_target)
        return dict(
            img=DataContainer(img, stack=True),
            labelmap=DataContainer(labelmap, stack=True),
            meta=DataContainer(meta, stack=False, cpu_only=True),
        )
Esempio n. 15
0
    def __getitem__(self, index):
        img = ImageHelper.read_image(
            self.img_list[index],
            tool=self.configer.get('data', 'image_tool'),
            mode=self.configer.get('data', 'input_mode'))

        bboxes, labels = self.__read_json_file(self.json_list[index])

        if self.aug_transform is not None:
            img, bboxes, labels = self.aug_transform(img,
                                                     bboxes=bboxes,
                                                     labels=labels)

        labels = torch.from_numpy(labels).long()
        bboxes = torch.from_numpy(bboxes).float()

        if self.img_transform is not None:
            img = self.img_transform(img)

        return dict(
            img=DataContainer(img, stack=True),
            bboxes=DataContainer(bboxes, stack=False),
            labels=DataContainer(labels, stack=False),
        )
Esempio n. 16
0
def collate(batch, trans_dict):
    data_keys = batch[0].keys()
    if trans_dict['size_mode'] == 'none':
        return dict({
            key: stack(batch, data_key=key, trans_dict=trans_dict)
            for key in data_keys
        })

    elif trans_dict['size_mode'] == 'fix_size':
        target_width, target_height = trans_dict['input_size']

    elif trans_dict['size_mode'] == 'multi_size':
        ms_input_size = trans_dict['ms_input_size']
        target_width, target_height = ms_input_size[random.randint(
            0,
            len(ms_input_size) - 1)]

    elif trans_dict['size_mode'] == 'max_size':
        border_width = [sample['img'].size(2) for sample in batch]
        border_height = [sample['img'].size(1) for sample in batch]
        target_width, target_height = max(border_width), max(border_height)

    else:
        raise NotImplementedError('Size Mode {} is invalid!'.format(
            trans_dict['size_mode']))

    if 'fit_stride' in trans_dict:
        stride = trans_dict['fit_stride']
        pad_w = 0 if (target_width % stride
                      == 0) else stride - (target_width % stride)  # right
        pad_h = 0 if (target_height % stride
                      == 0) else stride - (target_height % stride)  # down
        target_width = target_width + pad_w
        target_height = target_height + pad_h

    for i in range(len(batch)):
        if 'meta' in data_keys:
            batch[i]['meta'].data['input_size'] = [target_width, target_height]

        channels, height, width = batch[i]['img'].size()
        if height == target_height and width == target_width:
            continue

        scaled_size = [width, height]

        if trans_dict['align_method'] in ['only_scale', 'scale_and_pad']:
            w_scale_ratio = target_width / width
            h_scale_ratio = target_height / height
            if trans_dict['align_method'] == 'scale_and_pad':
                w_scale_ratio = min(w_scale_ratio, h_scale_ratio)
                h_scale_ratio = w_scale_ratio

            if 'kpts' in data_keys and batch[i]['kpts'].numel() > 0:
                batch[i]['kpts'].data[:, :, 0] *= w_scale_ratio
                batch[i]['kpts'].data[:, :, 1] *= h_scale_ratio

            if 'bboxes' in data_keys and batch[i]['bboxes'].numel() > 0:
                batch[i]['bboxes'].data[:, 0::2] *= w_scale_ratio
                batch[i]['bboxes'].data[:, 1::2] *= h_scale_ratio

            if 'polygons' in data_keys:
                for object_id in range(len(batch[i]['polygons'])):
                    for polygon_id in range(
                            len(batch[i]['polygons'][object_id])):
                        batch[i]['polygons'].data[object_id][polygon_id][
                            0::2] *= w_scale_ratio
                        batch[i]['polygons'].data[object_id][polygon_id][
                            1::2] *= h_scale_ratio

            scaled_size = (int(round(width * w_scale_ratio)),
                           int(round(height * h_scale_ratio)))
            if 'meta' in data_keys and 'border_size' in batch[i]['meta'].data:
                batch[i]['meta'].data['border_size'] = scaled_size

            scaled_size_hw = (scaled_size[1], scaled_size[0])

            batch[i]['img'] = DataContainer(TensorHelper.resize(
                batch[i]['img'].data,
                scaled_size_hw,
                mode='bilinear',
                align_corners=True),
                                            stack=batch[i]['img'].stack)
            if 'labelmap' in data_keys:
                batch[i]['labelmap'] = DataContainer(
                    TensorHelper.resize(batch[i]['labelmap'].data,
                                        scaled_size_hw,
                                        mode='nearest'),
                    stack=batch[i]['labelmap'].stack)

            if 'maskmap' in data_keys:
                batch[i]['maskmap'] = DataContainer(
                    TensorHelper.resize(batch[i]['maskmap'].data,
                                        scaled_size_hw,
                                        mode='nearest'),
                    stack=batch[i]['maskmap'].stack)

        pad_width = target_width - scaled_size[0]
        pad_height = target_height - scaled_size[1]
        assert pad_height >= 0 and pad_width >= 0
        if pad_width > 0 or pad_height > 0:
            assert trans_dict['align_method'] in ['only_pad', 'scale_and_pad']
            left_pad, up_pad = None, None
            if 'pad_mode' not in trans_dict or trans_dict[
                    'pad_mode'] == 'random':
                left_pad = random.randint(0, pad_width)  # pad_left
                up_pad = random.randint(0, pad_height)  # pad_up

            elif trans_dict['pad_mode'] == 'pad_border':
                direction = random.randint(0, 1)
                left_pad = pad_width if direction == 0 else 0
                up_pad = pad_height if direction == 0 else 0

            elif trans_dict['pad_mode'] == 'pad_left_up':
                left_pad = pad_width
                up_pad = pad_height

            elif trans_dict['pad_mode'] == 'pad_right_down':
                left_pad = 0
                up_pad = 0

            elif trans_dict['pad_mode'] == 'pad_center':
                left_pad = pad_width // 2
                up_pad = pad_height // 2

            else:
                Log.error('Invalid pad mode: {}'.format(
                    trans_dict['pad_mode']))
                exit(1)

            pad = [left_pad, pad_width - left_pad, up_pad, pad_height - up_pad]

            batch[i]['img'] = DataContainer(F.pad(batch[i]['img'].data,
                                                  pad=pad,
                                                  value=0),
                                            stack=batch[i]['img'].stack)

            if 'labelmap' in data_keys:
                batch[i]['labelmap'] = DataContainer(
                    F.pad(batch[i]['labelmap'].data, pad=pad, value=-1),
                    stack=batch[i]['labelmap'].stack)

            if 'maskmap' in data_keys:
                batch[i]['maskmap'] = DataContainer(
                    F.pad(batch[i]['maskmap'].data, pad=pad, value=1),
                    stack=batch[i]['maskmap'].stack)

            if 'polygons' in data_keys:
                for object_id in range(len(batch[i]['polygons'])):
                    for polygon_id in range(
                            len(batch[i]['polygons'][object_id])):
                        batch[i]['polygons'].data[object_id][polygon_id][
                            0::2] += left_pad
                        batch[i]['polygons'].data[object_id][polygon_id][
                            1::2] += up_pad

            if 'kpts' in data_keys and batch[i]['kpts'].numel() > 0:
                batch[i]['kpts'].data[:, :, 0] += left_pad
                batch[i]['kpts'].data[:, :, 1] += up_pad

            if 'bboxes' in data_keys and batch[i]['bboxes'].numel() > 0:
                batch[i]['bboxes'].data[:, 0::2] += left_pad
                batch[i]['bboxes'].data[:, 1::2] += up_pad

    return dict({
        key: stack(batch, data_key=key, trans_dict=trans_dict)
        for key in data_keys
    })
Esempio n. 17
0
    def __getitem__(self, index):
        image = ImageHelper.read_image(
            self.img_list[index],
            tool=self.configer.get('data', 'image_tool'),
            mode=self.configer.get('data', 'input_mode'))

        img_size = ImageHelper.get_size(image)
        if self.configer.exists('test', 'input_size'):
            input_size = self.configer.get('test', 'input_size')
            if input_size[0] == -1 and input_size[1] == -1:
                in_width, in_height = ImageHelper.get_size(image)

            elif input_size[0] != -1 and input_size[1] != -1:
                in_width, in_height = input_size

            elif input_size[0] == -1 and input_size[1] != -1:
                width, height = ImageHelper.get_size(image)
                scale_ratio = input_size[1] / height
                w_scale_ratio, h_scale_ratio = scale_ratio, scale_ratio
                in_width, in_height = int(round(width * w_scale_ratio)), int(
                    round(height * h_scale_ratio))

            else:
                assert input_size[0] != -1 and input_size[1] == -1
                width, height = ImageHelper.get_size(image)
                scale_ratio = input_size[0] / width
                w_scale_ratio, h_scale_ratio = scale_ratio, scale_ratio
                in_width, in_height = int(round(width * w_scale_ratio)), int(
                    round(height * h_scale_ratio))

        elif self.configer.exists(
                'test', 'min_side_length') and not self.configer.exists(
                    'test', 'max_side_length'):
            width, height = ImageHelper.get_size(image)
            scale_ratio = self.configer.get('test', 'min_side_length') / min(
                width, height)
            w_scale_ratio, h_scale_ratio = scale_ratio, scale_ratio
            in_width, in_height = int(round(width * w_scale_ratio)), int(
                round(height * h_scale_ratio))

        elif not self.configer.exists(
                'test', 'min_side_length') and self.configer.exists(
                    'test', 'max_side_length'):
            width, height = ImageHelper.get_size(image)
            scale_ratio = self.configer.get('test', 'max_side_length') / max(
                width, height)
            w_scale_ratio, h_scale_ratio = scale_ratio, scale_ratio
            in_width, in_height = int(round(width * w_scale_ratio)), int(
                round(height * h_scale_ratio))

        elif self.configer.exists('test',
                                  'min_side_length') and self.configer.exists(
                                      'test', 'max_side_length'):
            width, height = ImageHelper.get_size(image)
            scale_ratio = self.configer.get('test', 'min_side_length') / min(
                width, height)
            bound_scale_ratio = self.configer.get(
                'test', 'max_side_length') / max(width, height)
            scale_ratio = min(scale_ratio, bound_scale_ratio)
            w_scale_ratio, h_scale_ratio = scale_ratio, scale_ratio
            in_width, in_height = int(round(width * w_scale_ratio)), int(
                round(height * h_scale_ratio))

        else:
            in_width, in_height = ImageHelper.get_size(image)

        img = ImageHelper.resize(image, (int(in_width), int(in_height)),
                                 interpolation='linear')
        if self.img_transform is not None:
            img = self.img_transform(img)

        meta = dict(ori_img_size=img_size,
                    border_hw=[in_height, in_width],
                    img_path=self.img_list[index])
        return dict(img=DataContainer(img,
                                      stack=True,
                                      return_dc=True,
                                      samples_per_gpu=True),
                    meta=DataContainer(meta,
                                       stack=False,
                                       cpu_only=True,
                                       return_dc=True,
                                       samples_per_gpu=True))