Exemplo n.º 1
0
    def __init__(self,
                 dataset_root=None,
                 transforms=None,
                 mode='train',
                 edge=False):
        self.dataset_root = dataset_root
        self.transforms = Compose(transforms)
        mode = mode.lower()
        self.mode = mode
        self.file_list = list()
        self.num_classes = self.NUM_CLASSES
        self.ignore_index = 255
        self.edge = edge

        if mode not in ['train', 'val', 'test']:
            raise ValueError(
                "`mode` should be 'train', 'val' or 'test', but got {}.".format(
                    mode))

        if self.transforms is None:
            raise ValueError("`transforms` is necessary, but it is None.")

        if self.dataset_root is None:
            self.dataset_root = download_file_and_uncompress(
                url=URL,
                savepath=seg_env.DATA_HOME,
                extrapath=seg_env.DATA_HOME)
        elif not os.path.exists(self.dataset_root):
            self.dataset_root = os.path.normpath(self.dataset_root)
            savepath, extraname = self.dataset_root.rsplit(
                sep=os.path.sep, maxsplit=1)
            self.dataset_root = download_file_and_uncompress(
                url=URL,
                savepath=savepath,
                extrapath=savepath,
                extraname=extraname)

        if mode == 'train':
            file_path = os.path.join(self.dataset_root, 'train_list.txt')
        elif mode == 'val':
            file_path = os.path.join(self.dataset_root, 'val_list.txt')
        else:
            file_path = os.path.join(self.dataset_root, 'test_list.txt')

        with open(file_path, 'r') as f:
            for line in f:
                items = line.strip().split()
                if len(items) != 2:
                    if mode == 'train' or mode == 'val':
                        raise Exception(
                            "File list format incorrect! It should be"
                            " image_name label_name\\n")
                    image_path = os.path.join(self.dataset_root, items[0])
                    grt_path = None
                else:
                    image_path = os.path.join(self.dataset_root, items[0])
                    grt_path = os.path.join(self.dataset_root, items[1])
                self.file_list.append([image_path, grt_path])
Exemplo n.º 2
0
    def __init__(self,
                 common_transforms,
                 transforms1,
                 transforms2,
                 dataset_root=None,
                 mode='train',
                 edge=False):
        self.dataset_root = dataset_root
        self.common_transforms = Compose(common_transforms)
        self.transforms = self.common_transforms
        if transforms1 is not None:
            self.transforms1 = Compose(transforms1, to_rgb=False)
        if transforms2 is not None:
            self.transforms2 = Compose(transforms2, to_rgb=False)
        mode = mode.lower()
        self.ignore_index = 255
        self.mode = mode
        self.num_classes = self.NUM_CLASSES
        self.input_width = 224
        self.input_height = 224

        if self.dataset_root is None:
            self.dataset_root = download_file_and_uncompress(
                url=URL,
                savepath=seg_env.DATA_HOME,
                extrapath=seg_env.DATA_HOME)
        elif not os.path.exists(self.dataset_root):
            self.dataset_root = os.path.normpath(self.dataset_root)
            savepath, extraname = self.dataset_root.rsplit(sep=os.path.sep,
                                                           maxsplit=1)
            self.dataset_root = download_file_and_uncompress(
                url=URL,
                savepath=savepath,
                extrapath=savepath,
                extraname=extraname)

        if mode == 'train':
            path = os.path.join(dataset_root, 'eg1800_train.txt')
        else:
            path = os.path.join(dataset_root, 'eg1800_test.txt')
        with open(path, 'r') as f:
            files = f.readlines()
        img_files = [
            os.path.join(dataset_root, 'Images', file).strip()
            for file in files
        ]
        label_files = [
            os.path.join(dataset_root, 'Labels', file).strip()
            for file in files
        ]

        self.file_list = [[
            img_path, label_path
        ] for img_path, label_path in zip(img_files, label_files)]
        pass
Exemplo n.º 3
0
def download_data(savepath, extrapath):
    url = "https://paddleseg.bj.bcebos.com/humanseg/data/mini_supervisely.zip"
    download_file_and_uncompress(url=url,
                                 savepath=savepath,
                                 extrapath=extrapath)

    url = "https://paddleseg.bj.bcebos.com/humanseg/data/video_test.zip"
    download_file_and_uncompress(url=url,
                                 savepath=savepath,
                                 extrapath=extrapath,
                                 extraname='video_test.mp4')
Exemplo n.º 4
0
    def __init__(self,
                 transforms,
                 dataset_root=None,
                 mode='train',
                 edge=False):
        self.dataset_root = dataset_root
        self.transforms = Compose(transforms)
        mode = mode.lower()
        self.mode = mode
        self.file_list = list()
        self.num_classes = 150
        self.ignore_index = 255
        self.edge = edge

        if mode not in ['train', 'val']:
            raise ValueError(
                "`mode` should be one of ('train', 'val') in ADE20K dataset, but got {}."
                .format(mode))

        if self.transforms is None:
            raise ValueError("`transforms` is necessary, but it is None.")

        if self.dataset_root is None:
            self.dataset_root = download_file_and_uncompress(
                url=URL,
                savepath=seg_env.DATA_HOME,
                extrapath=seg_env.DATA_HOME,
                extraname='ADEChallengeData2016')
        elif not os.path.exists(self.dataset_root):
            self.dataset_root = os.path.normpath(self.dataset_root)
            savepath, extraname = self.dataset_root.rsplit(sep=os.path.sep,
                                                           maxsplit=1)
            self.dataset_root = download_file_and_uncompress(
                url=URL,
                savepath=savepath,
                extrapath=savepath,
                extraname=extraname)

        if mode == 'train':
            img_dir = os.path.join(self.dataset_root, 'images/training')
            label_dir = os.path.join(self.dataset_root, 'annotations/training')
        elif mode == 'val':
            img_dir = os.path.join(self.dataset_root, 'images/validation')
            label_dir = os.path.join(self.dataset_root,
                                     'annotations/validation')
        img_files = os.listdir(img_dir)
        label_files = [i.replace('.jpg', '.png') for i in img_files]
        for i in range(len(img_files)):
            img_path = os.path.join(img_dir, img_files[i])
            label_path = os.path.join(label_dir, label_files[i])
            self.file_list.append([img_path, label_path])
Exemplo n.º 5
0
def download_pretrained_model(pretrained_model):
    """
    Download pretrained model from url.
    Args:
        pretrained_model (str): the url of pretrained weight
    Returns:
        str: the path of pretrained weight
    """
    assert urlparse(pretrained_model).netloc, "The url is not valid."

    pretrained_model = unquote(pretrained_model)
    savename = pretrained_model.split('/')[-1]
    if not savename.endswith(('tgz', 'tar.gz', 'tar', 'zip')):
        savename = pretrained_model.split('/')[-2]
    else:
        savename = savename.split('.')[0]

    with generate_tempdir() as _dir:
        with filelock.FileLock(os.path.join(seg_env.TMP_HOME, savename)):
            pretrained_model = download_file_and_uncompress(
                pretrained_model,
                savepath=_dir,
                extrapath=seg_env.PRETRAINED_MODEL_HOME,
                extraname=savename)
            pretrained_model = os.path.join(pretrained_model, 'model.pdparams')
    return pretrained_model
Exemplo n.º 6
0
def load_pretrained_model(model, pretrained_model):
    if pretrained_model is not None:
        logger.info(
            'Loading pretrained model from {}'.format(pretrained_model))
        # download pretrained model from url
        if urlparse(pretrained_model).netloc:
            pretrained_model = unquote(pretrained_model)
            savename = pretrained_model.split('/')[-1]
            if not savename.endswith(('tgz', 'tar.gz', 'tar', 'zip')):
                savename = pretrained_model.split('/')[-2]
            else:
                savename = savename.split('.')[0]
            with generate_tempdir() as _dir:
                with filelock.FileLock(os.path.join(seg_env.TMP_HOME,
                                                    savename)):
                    pretrained_model = download_file_and_uncompress(
                        pretrained_model,
                        savepath=_dir,
                        extrapath=seg_env.PRETRAINED_MODEL_HOME,
                        extraname=savename)

                    pretrained_model = os.path.join(pretrained_model,
                                                    'model.pdparams')

        if os.path.exists(pretrained_model):
            para_state_dict = paddle.load(pretrained_model)

            model_state_dict = model.state_dict()
            keys = model_state_dict.keys()
            num_params_loaded = 0
            for k in keys:
                if k not in para_state_dict:
                    logger.warning("{} is not in pretrained model".format(k))
                elif list(para_state_dict[k].shape) != list(
                        model_state_dict[k].shape):
                    logger.warning(
                        "[SKIP] Shape of pretrained params {} doesn't match.(Pretrained: {}, Actual: {})"
                        .format(k, para_state_dict[k].shape,
                                model_state_dict[k].shape))
                else:
                    model_state_dict[k] = para_state_dict[k]
                    num_params_loaded += 1
            model.set_dict(model_state_dict)
            logger.info("There are {}/{} variables loaded into {}.".format(
                num_params_loaded, len(model_state_dict),
                model.__class__.__name__))

        else:
            raise ValueError(
                'The pretrained model directory is not Found: {}'.format(
                    pretrained_model))
    else:
        logger.info(
            'No pretrained model to load, {} will be trained from scratch.'.
            format(model.__class__.__name__))
Exemplo n.º 7
0
def main():
    args = parse_args()
    sbd_path = download_file_and_uncompress(url=URL,
                                            savepath=DATA_HOME,
                                            extrapath=DATA_HOME,
                                            extraname='benchmark_RELEASE')
    with open(os.path.join(sbd_path, 'dataset/train.txt'), 'r') as f:
        sbd_file_list = [line.strip() for line in f]
    with open(os.path.join(sbd_path, 'dataset/val.txt'), 'r') as f:
        sbd_file_list += [line.strip() for line in f]
    if not os.path.exists(args.voc_path):
        raise FileNotFoundError(
            'There is no voc_path: {}. Please ensure that the Pascal VOC dataset has been downloaded correctly'
        )
    with open(
            os.path.join(args.voc_path,
                         'VOC2012/ImageSets/Segmentation/trainval.txt'),
            'r') as f:
        voc_file_list = [line.strip() for line in f]

    aug_file_list = list(set(sbd_file_list) - set(voc_file_list))
    with open(
            os.path.join(args.voc_path,
                         'VOC2012/ImageSets/Segmentation/aug.txt'), 'w') as f:
        f.writelines(''.join([line, '\n']) for line in aug_file_list)

    sbd_cls_dir = os.path.join(sbd_path, 'dataset/cls')
    save_dir = os.path.join(args.voc_path, 'VOC2012/SegmentationClassAug')
    if not os.path.exists(save_dir):
        os.mkdir(save_dir)
    mat_file_list = os.listdir(sbd_cls_dir)
    p = Pool(args.num_workers)
    for f in tqdm.tqdm(mat_file_list):
        p.apply_async(mat_to_png, args=(f, sbd_cls_dir, save_dir))
    p.close()
    p.join()
Exemplo n.º 8
0
# limitations under the License.

import sys
import os

LOCAL_PATH = os.path.dirname(os.path.abspath(__file__))
TEST_PATH = os.path.join(LOCAL_PATH, "../../../", "test")
sys.path.append(TEST_PATH)

from paddleseg.utils.download import download_file_and_uncompress

model_urls = {
    "ppseg_lite_portrait_398x224":
    "https://paddleseg.bj.bcebos.com/dygraph/ppseg/ppseg_lite_portrait_398x224.tar.gz",
    "deeplabv3p_resnet50_os8_humanseg_512x512_100k":
    "https://paddleseg.bj.bcebos.com/dygraph/humanseg/train/deeplabv3p_resnet50_os8_humanseg_512x512_100k.zip",
    "fcn_hrnetw18_small_v1_humanseg_192x192":
    "https://paddleseg.bj.bcebos.com/dygraph/humanseg/train/fcn_hrnetw18_small_v1_humanseg_192x192.zip",
    "shufflenetv2_humanseg_192x192":
    "https://paddleseg.bj.bcebos.com/dygraph/humanseg/train/shufflenetv2_humanseg_192x192.zip",
}

if __name__ == "__main__":
    for model_name, url in model_urls.items():
        download_file_and_uncompress(url=url,
                                     savepath=LOCAL_PATH,
                                     extrapath=LOCAL_PATH,
                                     extraname=model_name)

    print("Pretrained model download success!")
Exemplo n.º 9
0
    def __init__(self, transforms, dataset_root=None, mode='train', edge=False):
        self.dataset_root = dataset_root
        self.transforms = Compose(transforms)
        mode = mode.lower()
        self.mode = mode
        self.file_list = list()
        self.num_classes = self.NUM_CLASSES
        self.ignore_index = 255
        self.edge = edge

        if mode not in ['train', 'trainval', 'trainaug', 'val']:
            raise ValueError(
                "`mode` should be one of ('train', 'trainval', 'trainaug', 'val') in PascalVOC dataset, but got {}."
                .format(mode))

        if self.transforms is None:
            raise ValueError("`transforms` is necessary, but it is None.")

        if self.dataset_root is None:
            self.dataset_root = download_file_and_uncompress(
                url=URL,
                savepath=seg_env.DATA_HOME,
                extrapath=seg_env.DATA_HOME,
                extraname='VOCdevkit')
        elif not os.path.exists(self.dataset_root):
            self.dataset_root = os.path.normpath(self.dataset_root)
            savepath, extraname = self.dataset_root.rsplit(
                sep=os.path.sep, maxsplit=1)
            self.dataset_root = download_file_and_uncompress(
                url=URL,
                savepath=savepath,
                extrapath=savepath,
                extraname=extraname)

        image_set_dir = os.path.join(self.dataset_root, 'VOC2012', 'ImageSets',
                                     'Segmentation')
        if mode == 'train':
            file_path = os.path.join(image_set_dir, 'train.txt')
        elif mode == 'val':
            file_path = os.path.join(image_set_dir, 'val.txt')
        elif mode == 'trainval':
            file_path = os.path.join(image_set_dir, 'trainval.txt')
        elif mode == 'trainaug':
            file_path = os.path.join(image_set_dir, 'train.txt')
            file_path_aug = os.path.join(image_set_dir, 'aug.txt')

            if not os.path.exists(file_path_aug):
                raise RuntimeError(
                    "When `mode` is 'trainaug', Pascal Voc dataset should be augmented, "
                    "Please make sure voc_augment.py has been properly run when using this mode."
                )

        img_dir = os.path.join(self.dataset_root, 'VOC2012', 'JPEGImages')
        label_dir = os.path.join(self.dataset_root, 'VOC2012',
                                 'SegmentationClass')
        label_dir_aug = os.path.join(self.dataset_root, 'VOC2012',
                                     'SegmentationClassAug')

        with open(file_path, 'r') as f:
            for line in f:
                line = line.strip()
                image_path = os.path.join(img_dir, ''.join([line, '.jpg']))
                label_path = os.path.join(label_dir, ''.join([line, '.png']))
                self.file_list.append([image_path, label_path])
        if mode == 'trainaug':
            with open(file_path_aug, 'r') as f:
                for line in f:
                    line = line.strip()
                    image_path = os.path.join(img_dir, ''.join([line, '.jpg']))
                    label_path = os.path.join(label_dir_aug,
                                              ''.join([line, '.png']))
                    self.file_list.append([image_path, label_path])