Example #1
0
    def register(cls):
        """Closure registering the passed class."""

        # Test the presence and usability of the functions
        try:
            tested = cls.to_yaml, cls.from_yaml
        except AttributeError:
            raise TypeError('Missing YAML serialization method')

        if not all(isinstance(f, Callable) for f in tested):
            raise TypeError('YAML serialization method(s) are not callable')

        # Make conversion handlers
        def dump(dumper: Dumper, value: Any) -> yaml.Node:
            return type.represent(dumper)(tag, cls.to_yaml(value))

        def load(loader: Loader, node: yaml.Node) -> Any:
            return cls.from_yaml(type.construct(loader)(node))

        # Register conversions
        Dumper.add_representer(cls, dump)
        Loader.add_constructor(tag, load)

        if pattern is not None:
            regexp = re.compile(pattern)
            Dumper.add_implicit_resolver(tag, regexp, None)
            Loader.add_implicit_resolver(tag, regexp, None)

        return cls
Example #2
0
    def load(cls,
             stream,
             constructors=None,
             multi_constructors=None,
             implicit_resolvers=None):
        loader = Loader(stream)

        cs = dict(cls._constructors)
        if constructors:
            cs.update(constructors)

        ir = dict(cls._implicit_resolvers)
        if implicit_resolvers:
            ir.update(implicit_resolvers)

        mcs = dict(cls._multi_constructors)
        if multi_constructors:
            mcs.update(multi_constructors)

        if cs:
            for name, constructor in cs.items():
                loader.add_constructor(name, constructor)

        if mcs:
            for name, constructor in mcs.items():
                loader.add_multi_constructor(name, constructor)

        if ir:
            for name, pattern in ir.items():
                loader.add_implicit_resolver(name, pattern, None)

        try:
            return loader.get_single_data()
        finally:
            loader.dispose()
Example #3
0
def OrderedYaml():
    '''yaml orderedDict support'''
    _mapping_tag = yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG

    def dict_representer(dumper, data):
        return dumper.represent_dict(data.items())

    def dict_constructor(loader, node):
        return OrderedDict(loader.construct_pairs(node))

    Dumper.add_representer(OrderedDict, dict_representer)
    Loader.add_constructor(_mapping_tag, dict_constructor)
    Loader.add_implicit_resolver(
        u'tag:yaml.org,2002:float',
        re.compile(
            u'''^(?:
         [-+]?(?:[0-9][0-9_]*)\\.[0-9_]*(?:[eE][-+]?[0-9]+)?
        |[-+]?(?:[0-9][0-9_]*)(?:[eE][-+]?[0-9]+)
        |\\.[0-9_]+(?:[eE][-+][0-9]+)?
        |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\\.[0-9_]*
        |[-+]?\\.(?:inf|Inf|INF)
        |\\.(?:nan|NaN|NAN))$''', re.X), list(u'-+0123456789.'))
    return Loader, Dumper
Example #4
0
def parse(opt_path: str, is_train: bool = True) -> NoneDict:
    """Parse options file.
    Args:
        opt_path (str): Option file path. Can be JSON or YAML
        is_train (str): Indicate whether in training or not. Default: True.
    Returns:
        (dict): Parsed Options
    """

    # check if configuration file exists
    if not os.path.isfile(opt_path):
        opt_path = os.path.join("options", "train" if is_train else "test",
                                opt_path)
        if not os.path.isfile(opt_path):
            raise ValueError(
                "Configuration file {} not found.".format(opt_path))

    ext = os.path.splitext(opt_path)[1].lower()
    if ext == '.json':
        import json
        # remove comments starting with '//'
        json_str = ''
        with open(opt_path, 'r') as f:
            for line in f:
                line = line.split('//')[0] + '\n'
                json_str += line
        opt = json.loads(json_str, object_pairs_hook=OrderedDict)
    elif ext in ['.yml', '.yaml']:
        import yaml
        import re
        with open(opt_path, mode='r') as f:
            try:
                # use SafeLoader's over Loader to prevent against arbitrary python object execution
                # Use C loaders if possible, faster
                from yaml import CLoader as Loader  #CSafeLoader as Loader
            except ImportError:
                from yaml import Loader  #SafeLoader as Loader
            _mapping_tag = yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG

            def dict_constructor(loader, node):
                return OrderedDict(loader.construct_pairs(node))

            Loader.add_constructor(_mapping_tag, dict_constructor)
            # compiled resolver to correctly parse scientific notation numbers
            Loader.add_implicit_resolver(
                u'tag:yaml.org,2002:float',
                re.compile(
                    u'''^(?:
                [-+]?(?:[0-9][0-9_]*)\\.[0-9_]*(?:[eE][-+]?[0-9]+)?
                |[-+]?(?:[0-9][0-9_]*)(?:[eE][-+]?[0-9]+)
                |\\.[0-9_]+(?:[eE][-+]?[0-9]+)?
                |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\\.[0-9_]*
                |[-+]?\\.(?:inf|Inf|INF)
                |\\.(?:nan|NaN|NAN))$''', re.X), list(u'-+0123456789.'))
            opt = yaml.load(f, Loader=Loader)

    opt['is_train'] = is_train
    scale = opt.get('scale', 1)
    bm = opt.get('batch_multiplier', None)

    # datasets
    for phase, dataset in opt['datasets'].items():
        phase = phase.split('_')[0]
        dataset['phase'] = phase
        dataset['scale'] = scale
        is_lmdb = False
        image_path_keys = [
            "HR", "HR_bg", "LR", "A", "B", "AB", "lq", "gt", "ref"
        ]
        for key in image_path_keys:
            image_path = dataset.get('dataroot_' + key, None)
            if image_path is not None:
                if isinstance(image_path, str):
                    is_lmdb = os.path.splitext(
                        image_path)[1].lower() == ".lmdb"
                    image_path = [image_path]
                if isinstance(image_path, list):
                    image_path = [
                        os.path.normpath(os.path.expanduser(path))
                        for path in image_path
                    ]
                    if len(image_path) == 1:
                        # if it's a single-item list, act as if it was a str instead of a list
                        image_path = image_path[0]
                    dataset['dataroot_' + key] = image_path
                else:
                    raise ValueError(
                        "Unexpected path type: {}. Either a single \
                        path or a list of paths are supported.".format(
                            type(image_path)))
        dataset['data_type'] = 'lmdb' if is_lmdb else 'img'

        HR_size = dataset.get('HR_size', None)
        if HR_size:
            dataset['crop_size'] = HR_size

        if phase == 'train':
            preprocess = dataset.get('preprocess', None)
            if preprocess is not None:
                crop_size = dataset.get('crop_size', None)
                aspect_ratio = dataset.get('aspect_ratio', None)
                load_size = dataset.get('load_size', None)
                center_crop_size = dataset.get('center_crop_size', None)

                if ('resize' in preprocess or 'scale_width' in preprocess
                        or 'scale_height' in preprocess
                        or 'scale_shortside' in preprocess):
                    assert load_size, "load_size not defined"
                    if crop_size:
                        # crop_size should be smaller than the size of loaded image
                        assert (load_size >= crop_size)
                if 'center_crop' in preprocess:
                    assert center_crop_size, "center_crop_size not defined"
                    if crop_size:
                        assert (center_crop_size >= crop_size)
                if 'fixed' in preprocess:
                    assert aspect_ratio, "aspect_ratio not defined"

            pre_crop = dataset.get('pre_crop', None)
            if scale != 1 and not pre_crop:
                if not preprocess:
                    dataset['preprocess'] = 'crop'
                else:
                    for popt in [
                            'scale_shortside', 'scale_height', 'scale_width',
                            'none'
                    ]:
                        if popt in preprocess:
                            raise ValueError(
                                f"Preprocess option {popt} can only be used with 1x scale."
                            )

        if phase == 'train' and bm:
            # compatibility with other forks
            dataset['virtual_batch_size'] = bm * dataset["batch_size"]
        if dataset.get('virtual_batch_size', None):
            dataset['virtual_batch_size'] = max(dataset['virtual_batch_size'],
                                                dataset["batch_size"])

        if phase == 'train' and 'subset_file' in dataset and dataset[
                'subset_file'] is not None:
            dataset['subset_file'] = os.path.normpath(
                os.path.expanduser(dataset['subset_file']))

        if 'lr_downscale_types' in dataset and dataset[
                'lr_downscale_types'] is not None:
            if isinstance(dataset['lr_downscale_types'], str):
                dataset['lr_downscale_types'] = [dataset['lr_downscale_types']]
            dataset['lr_downscale_types'] = [
                (_cv2_str2interpolation[algo.lower()]
                 if isinstance(algo, str) else algo)
                for algo in dataset['lr_downscale_types']
            ]

        for k in [
                'lr_blur_types', 'lr_noise_types', 'lr_noise_types2',
                'hr_noise_types'
        ]:
            if dataset.get(k, None):
                dataset[k] = parse2lists(dataset[k])

        tensor_shape = dataset.get('tensor_shape', None)
        if tensor_shape:
            opt['tensor_shape'] = tensor_shape

    # path
    for key, path in opt['path'].items():
        if path and key in opt['path']:
            opt['path'][key] = os.path.normpath(os.path.expanduser(path))

    if is_train:
        experiments_root = os.path.join(opt['path']['root'], 'experiments',
                                        opt['name'])
        opt['path']['experiments_root'] = experiments_root
        opt['path']['models'] = os.path.join(experiments_root, 'models')
        opt['path']['training_state'] = os.path.join(experiments_root,
                                                     'training_state')
        opt['path']['log'] = experiments_root
        opt['path']['val_images'] = os.path.join(experiments_root,
                                                 'val_images')
        if opt['train'].get('display_freq', None):
            opt['path']['disp_images'] = os.path.join(experiments_root,
                                                      'disp_images')
        opt['train']['overwrite_val_imgs'] = opt['train'].get(
            'overwrite_val_imgs', None)
        opt['train']['val_comparison'] = opt['train'].get(
            'val_comparison', None)
        opt['logger']['overwrite_chkp'] = opt['logger'].get(
            'overwrite_chkp', None)
        fsa = opt['train'].get('use_frequency_separation', None)
        if fsa and not opt['train'].get('fs', None):
            opt['train']['fs'] = fsa

        # change some options for debug mode
        if 'debug_nochkp' in opt['name']:
            opt['train']['val_freq'] = 8
            opt['logger']['print_freq'] = 2
            opt['logger']['save_checkpoint_freq'] = 10000000
            opt['train']['lr_decay_iter'] = 10
        elif 'debug' in opt['name']:
            opt['train']['val_freq'] = 8
            opt['logger']['print_freq'] = 2
            opt['logger']['save_checkpoint_freq'] = 8
            opt['train']['lr_decay_iter'] = 10

    else:  # test
        results_root = os.path.join(opt['path']['root'], 'results',
                                    opt['name'])
        opt['path']['results_root'] = results_root
        opt['path']['log'] = results_root

    # network_G
    opt['network_G']['scale'] = scale

    # relative learning rate and options
    if 'train' in opt:
        niter = opt['train']['niter']
        for k in ['T_period', 'restarts', 'lr_steps', 'lr_steps_inverse']:
            k_rel = k + '_rel'
            if k_rel in opt['train']:
                opt['train'][k] = [int(x * niter) for x in opt['train'][k_rel]]
                opt['train'].pop(k_rel)
        if 'swa_start_iter_rel' in opt['train']:
            opt['train']['swa_start_iter'] = int(
                opt['train']['swa_start_iter_rel'] * niter)
            opt['train'].pop('swa_start_iter_rel')

    # export CUDA_VISIBLE_DEVICES
    gpu_list = ','.join(str(x) for x in opt['gpu_ids'])
    os.environ['CUDA_VISIBLE_DEVICES'] = gpu_list
    print('export CUDA_VISIBLE_DEVICES=' + gpu_list)

    return dict_to_nonedict(opt)