def test_readonly(src, func, expectation):
    c = OmegaConf.create(src)
    OmegaConf.set_readonly(c, True)
    with expectation:
        func(c)
    assert c == src
Exemple #2
0
class TableModelConfig:
    train_data_folds: List[str] = MISSING
    val_data_folds: List[str] = MISSING
    train_feats: List[str] = MISSING
    target: str = MISSING
    model_name: str = MISSING
    classifier_model: str = MISSING
    model_type: Optional[str] = "CatBoostRegressor"
    construct_params: Dict[Any, Any] = field(default_factory=dict)
    train_params: Dict[Any, Any] = field(default_factory=dict)


#%%
train_config_path = util.get_my_data_dir(
) / os.environ["TRAIN_CONFIG_FILENAME"]
conf = OmegaConf.create(TableModelConfig(**OmegaConf.load(train_config_path)))

#%%
train_df, val_df, = io.load_dataset(conf.train_data_folds, conf.val_data_folds)

from catboost import CatBoostClassifier
classifier = CatBoostClassifier()
classifier.load_model(conf.classifier_model)
t = [l for l in conf.train_feats if l != "rank_class"]
train_df["rank_class"] = classifier.predict(train_df[t])
val_df["rank_class"] = classifier.predict(val_df[t])

# %%
import pandas as pd
train_data_concat = pd.concat([train_df, val_df], axis=0, ignore_index=True)
#%%
Exemple #3
0
def create(obj: Any, _parent_: Container) -> Any:
    """Create a config object from `obj`, similar to `OmegaConf.create`"""
    from omegaconf import OmegaConf

    assert isinstance(_parent_, BaseContainer)
    return OmegaConf.create(obj, parent=_parent_)
Exemple #4
0
def test_class_instantiate(conf, expected):
    conf = OmegaConf.create(conf)
    obj = utils.instantiate(conf)
    assert obj == expected
def _mock_model_config():
    conf = {'temp_file': None}
    conf = OmegaConf.create({'model': conf})
    OmegaConf.set_struct(conf, True)
    return conf
Exemple #6
0
    def __init__(self,
                 model_name,
                 config,
                 vocab_file,
                 model_parallel_size=None,
                 model_parallel_rank=None):

        super().__init__()

        self._model_parallel_size = model_parallel_size
        self._model_parallel_rank = model_parallel_rank
        self._restore_path = None
        self._app_state = None
        self._model_name = model_name

        if not os.path.exists(vocab_file):
            raise ValueError(f'Vocab file not found at {vocab_file}')

        config["vocab_file"] = vocab_file
        config['tokenizer_type'] = 'BertWordPieceLowerCase'
        config['lazy_mpu_init'] = True
        config['onnx_safe'] = True

        # if 'model_parallel_size' in config:
        if self._model_parallel_size is not None:
            app_state = AppState()
            self._app_state = app_state

            # must be set for model parallel megatron-lm
            os.environ["WORLD_SIZE"] = str(app_state.world_size)
            os.environ["RANK"] = str(self._model_parallel_rank)

            # used to set model_parallel_size in megatron-lm argparser
            def _update_model_parallel_arg(parser):
                parser.set_defaults(
                    model_parallel_size=self._model_parallel_size)
                return parser

            extra_args_provider = _update_model_parallel_arg
        else:
            extra_args_provider = None

        # Initialize part of Megatron global state that is needed for its constructor.
        # We set 'lazy_mpu_init' flag on to make Megatron do only the initialization that does not depend
        # on ddp be initialized yet (and we don't want Megatron to initialize DDP itself either)
        # and to return a hook for us to call after PTL has torch.distributed initialized.
        # We call this hook during .forward
        # TODO: can we call this hook using the PTL hook .setup()
        self._lazy_init_fn = initialize_megatron(
            extra_args_provider=extra_args_provider,
            args_defaults=config,
            ignore_unknown_args=True)

        # read Megatron arguments back
        args = get_args()
        logging.info(f'Megatron-lm argparse args: {args}')

        self.language_model, self._language_model_key = get_language_model(
            attention_mask_func=bert_attention_mask_func,
            num_tokentypes=2,
            add_pooler=False)

        self.config = OmegaConf.create(config)
        # key used for checkpoints
        self._hidden_size = self.language_model.hidden_size
Exemple #7
0
 def __init__(self):
     self.conf = OmegaConf.create()
     self.set("name", "UNKNOWN_NAME")
Exemple #8
0
def test_supported_chars() -> None:
    supported_chars = "%_-abc123."
    c = OmegaConf.create(dict(dir1="${copy:" + supported_chars + "}"))

    OmegaConf.register_resolver("copy", lambda x: x)
    assert c.dir1 == supported_chars
Exemple #9
0
def test_interpolation_in_list_key_error() -> None:
    # Test that a KeyError is thrown if an str_interpolation key is not available
    c = OmegaConf.create(["${10}"])

    with pytest.raises(KeyError):
        c[0]
Exemple #10
0
def test_non_container_interpolation() -> None:
    cfg = OmegaConf.create(dict(foo=0, bar="${foo.baz}"))
    with pytest.raises(ConfigKeyError):
        cfg.bar
Exemple #11
0
def test_register_resolver_1(restore_resolvers: Any) -> None:
    OmegaConf.register_resolver("plus_10", lambda x: int(x) + 10)
    c = OmegaConf.create({"k": "${plus_10:990}"})

    assert type(c.k) == int
    assert c.k == 1000
def test_save_illegal_type() -> None:
    with pytest.raises(TypeError):
        OmegaConf.save(OmegaConf.create(), 1000)  # type: ignore
def test_readonly_list_change_item():
    c = OmegaConf.create([1, 2, 3])
    OmegaConf.set_readonly(c, True)
    with raises(ReadonlyConfigError, match="[1]"):
        c[1] = 10
    assert c == [1, 2, 3]
def test_readonly_list_append():
    c = OmegaConf.create([])
    OmegaConf.set_readonly(c, True)
    with raises(ReadonlyConfigError, match="[0]"):
        c.append(10)
    assert c == []
Exemple #15
0
def test_custom_resolver_param_supported_chars() -> None:
    supported_chars = "abc123_/:-\\+.$%*@"
    c = OmegaConf.create({"dir1": "${copy:" + supported_chars + "}"})

    OmegaConf.register_new_resolver("copy", lambda x: x)
    assert c.dir1 == supported_chars
Exemple #16
0
def test_unsupported_interpolation_type() -> None:
    c = OmegaConf.create({"foo": "${wrong_type:ref}"})
    with pytest.raises(ValueError):
        c.foo
Exemple #17
0
 def create() -> DictConfig:
     return OmegaConf.create({"invalid": f"${{ab{c}de}}"})
Exemple #18
0
def test_incremental_dict_with_interpolation() -> None:
    conf = OmegaConf.create()
    conf.a = 1
    conf.b = OmegaConf.create()
    conf.b.c = "${a}"
    assert conf.b.c == conf.a  # type:ignore
def qufpn_config(min_level, max_level, weight_method=None):
    """A dynamic quad fpn config that can adapt to different min/max levels.

    It extends the idea of BiFPN, and has four paths:
        (up_down -> bottom_up) + (bottom_up -> up_down).

    Paper: https://ieeexplore.ieee.org/document/9225379
    Ref code: From contribution to TF EfficientDet
    https://github.com/google/automl/blob/eb74c6739382e9444817d2ad97c4582dbe9a9020/efficientdet/keras/fpn_configs.py
    """
    p = OmegaConf.create()
    weight_method = weight_method or 'fastattn'
    quad_method = 'fastattn'
    num_levels = max_level - min_level + 1
    node_ids = {min_level + i: [i] for i in range(num_levels)}
    level_last_id = lambda level: node_ids[level][-1]
    level_all_ids = lambda level: node_ids[level]
    level_first_id = lambda level: node_ids[level][0]
    id_cnt = itertools.count(num_levels)

    p.nodes = []
    for i in range(max_level - 1, min_level - 1, -1):
        # top-down path 1.
        p.nodes.append({
            'reduction':
            1 << i,
            'inputs_offsets': [level_last_id(i),
                               level_last_id(i + 1)],
            'weight_method':
            weight_method
        })
        node_ids[i].append(next(id_cnt))
    node_ids[max_level].append(node_ids[max_level][-1])

    for i in range(min_level + 1, max_level):
        # bottom-up path 2.
        p.nodes.append({
            'reduction':
            1 << i,
            'inputs_offsets':
            level_all_ids(i) + [level_last_id(i - 1)],
            'weight_method':
            weight_method
        })
        node_ids[i].append(next(id_cnt))

    i = max_level
    p.nodes.append({
        'reduction':
        1 << i,
        'inputs_offsets': [level_first_id(i)] + [level_last_id(i - 1)],
        'weight_method':
        weight_method
    })
    node_ids[i].append(next(id_cnt))
    node_ids[min_level].append(node_ids[min_level][-1])

    for i in range(min_level + 1, max_level + 1, 1):
        # bottom-up path 3.
        p.nodes.append({
            'reduction':
            1 << i,
            'inputs_offsets': [
                level_first_id(i),
                level_last_id(i -
                              1) if i != min_level + 1 else level_first_id(i -
                                                                           1)
            ],
            'weight_method':
            weight_method
        })
        node_ids[i].append(next(id_cnt))
    node_ids[min_level].append(node_ids[min_level][-1])

    for i in range(max_level - 1, min_level, -1):
        # top-down path 4.
        p.nodes.append({
            'reduction':
            1 << i,
            'inputs_offsets':
            [node_ids[i][0]] + [node_ids[i][-1]] + [level_last_id(i + 1)],
            'weight_method':
            weight_method
        })
        node_ids[i].append(next(id_cnt))
    i = min_level
    p.nodes.append({
        'reduction': 1 << i,
        'inputs_offsets': [node_ids[i][0]] + [level_last_id(i + 1)],
        'weight_method': weight_method
    })
    node_ids[i].append(next(id_cnt))
    node_ids[max_level].append(node_ids[max_level][-1])

    # NOTE: the order of the quad path is reversed from the original, my code expects the output of
    # each FPN repeat to be same as input from backbone, in order of increasing reductions
    for i in range(min_level, max_level + 1):
        # quad-add path.
        p.nodes.append({
            'reduction': 1 << i,
            'inputs_offsets': [node_ids[i][2], node_ids[i][4]],
            'weight_method': quad_method
        })
        node_ids[i].append(next(id_cnt))

    return p
Exemple #20
0
import fairseq  #don't required
import torch

from omegaconf import OmegaConf
from desed_task.nnet.WRNN_2_ import WRNN

import pdb

model = torch.load('/home1/irteam/users/koo/trained2.pt')
big_cfg = OmegaConf.create(vars(vars(model['args'])['w2v_args']))

sed = WRNN(big_cfg)
pdb.set_trace()
sed.w2v.load_state_dict(model['model'])
Exemple #21
0
def test_class_instantiate_passthrough(conf, expected):
    conf = OmegaConf.create(conf)
    obj = utils.instantiate(conf, 10, d=40)
    assert obj == expected
Exemple #22
0
def load_checkpoint_to_cpu(path, arg_overrides=None, load_on_all_ranks=False):
    """Loads a checkpoint to CPU (with upgrading for backward compatibility).

    If doing single-GPU training or if the checkpoint is only being loaded by at
    most one process on each node (current default behavior is for only rank 0
    to read the checkpoint from disk), load_on_all_ranks should be False to
    avoid errors from torch.distributed not having been initialized or
    torch.distributed.barrier() hanging.

    If all processes on each node may be loading the checkpoint
    simultaneously, load_on_all_ranks should be set to True to avoid I/O
    conflicts.

    There's currently no support for > 1 but < all processes loading the
    checkpoint on each node.
    """
    local_path = PathManager.get_local_path(path)
    # The locally cached file returned by get_local_path() may be stale for
    # remote files that are periodically updated/overwritten (ex:
    # checkpoint_last.pt) - so we remove the local copy, sync across processes
    # (if needed), and then download a fresh copy.
    if local_path != path and PathManager.path_requires_pathmanager(path):
        try:
            os.remove(local_path)
        except FileNotFoundError:
            # With potentially multiple processes removing the same file, the
            # file being missing is benign (missing_ok isn't available until
            # Python 3.8).
            pass
        if load_on_all_ranks:
            torch.distributed.barrier()
        local_path = PathManager.get_local_path(path)

    with open(local_path, "rb") as f:
        state = torch.load(f, map_location=torch.device("cpu"))

    if "args" in state and state[
            "args"] is not None and arg_overrides is not None:
        args = state["args"]
        for arg_name, arg_val in arg_overrides.items():
            setattr(args, arg_name, arg_val)

    if "cfg" in state and state["cfg"] is not None:

        # hack to be able to set Namespace in dict config. this should be removed when we update to newer
        # omegaconf version that supports object flags, or when we migrate all existing models
        from omegaconf import _utils

        old_primitive = _utils.is_primitive_type
        _utils.is_primitive_type = lambda _: True

        state["cfg"] = OmegaConf.create(state["cfg"])

        _utils.is_primitive_type = old_primitive
        OmegaConf.set_struct(state["cfg"], True)

        if arg_overrides is not None:
            overwrite_args_by_name(state["cfg"], arg_overrides)

    state = _upgrade_state_dict(state)
    return state
Exemple #23
0
    def __init__(self, cfg: DictConfig, trainer: Trainer = None):
        # Get global rank and total number of GPU workers for IterableDataset partitioning, if applicable
        self.global_rank = 0
        self.world_size = 0
        self.local_rank = 0
        if trainer is not None:
            self.global_rank = (trainer.node_rank *
                                trainer.num_gpus) + trainer.local_rank
            self.world_size = trainer.num_nodes * trainer.num_gpus
            self.local_rank = trainer.local_rank

        super().__init__(cfg=cfg, trainer=trainer)

        schema = OmegaConf.structured(Wav2VecEncoderModelConfig)
        if isinstance(cfg, dict):
            cfg = OmegaConf.create(cfg)
        elif not isinstance(cfg, DictConfig):
            raise ValueError(
                f"cfg was type: {type(cfg)}. Expected either a dict or a DictConfig"
            )

        cfg = OmegaConf.create(OmegaConf.to_container(cfg, resolve=True))
        cfg = OmegaConf.merge(schema, cfg)

        feature_enc_layers = cfg.conv_feature_encoder.conv_feature_layers
        self.embed = feature_enc_layers[-1][
            0]  # Select last conv output layer dimension

        self.feature_extractor = ConvFeatureEncoder(
            conv_layers=feature_enc_layers,
            mode=cfg.conv_feature_encoder.extractor_mode,
            conv_bias=cfg.conv_feature_encoder.conv_bias,
        )

        encoder_embed_dim = cfg.transformer_encoder.encoder.embedding_dim
        self.post_extract_proj = (nn.Linear(self.embed, encoder_embed_dim)
                                  if self.embed != encoder_embed_dim
                                  and not cfg.quantizer.quantize_input else
                                  None)

        self.mask_cfg = cfg.masking

        self.dropout_input = nn.Dropout(cfg.dropout_input)
        self.dropout_features = nn.Dropout(cfg.dropout_features)

        self.feature_grad_mult = cfg.feature_grad_mult

        self.quantizer = None
        self.input_quantizer = None

        self.n_negatives = cfg.n_negatives
        self.cross_sample_negatives = cfg.cross_sample_negatives
        self.codebook_negatives = cfg.codebook_negatives
        self.negatives_from_everywhere = cfg.negatives_from_everywhere

        final_dim = cfg.final_dim if cfg.final_dim > 0 else encoder_embed_dim
        self.final_dim = final_dim
        if cfg.quantizer.quantize_targets:
            vq_dim = cfg.quantizer.latent_dim if cfg.quantizer.latent_dim > 0 else final_dim
            self.quantizer = GumbelVectorQuantizer(
                dim=self.embed,
                num_vars=cfg.quantizer.latent_vars,
                temp=cfg.quantizer.latent_temp,
                groups=cfg.quantizer.latent_groups,
                combine_groups=False,
                vq_dim=vq_dim,
                time_first=True,
            )
            self.project_q = nn.Linear(vq_dim, final_dim)
        else:
            self.project_q = nn.Linear(self.embed, final_dim)

        if cfg.quantizer.quantize_input:
            if cfg.quantizer.same_quantizer and self.quantizer is not None:
                vq_dim = final_dim
                self.input_quantizer = self.quantizer
            else:
                vq_dim = cfg.quantizer.latent_dim if cfg.quantizer.latent_dim > 0 else encoder_embed_dim
                self.input_quantizer = GumbelVectorQuantizer(
                    dim=self.embed,
                    num_vars=cfg.quantizer.latent_vars,
                    temp=cfg.quantizer.latent_temp,
                    groups=cfg.quantizer.latent_groups,
                    combine_groups=False,
                    vq_dim=vq_dim,
                    time_first=True,
                )
            self.project_inp = nn.Linear(vq_dim, encoder_embed_dim)

        self.mask_emb = nn.Parameter(
            torch.FloatTensor(encoder_embed_dim).uniform_())

        self.encoder = Wav2VecTransformerEncoder(cfg.transformer_encoder)
        self.layer_norm = nn.LayerNorm(self.embed)

        self.target_glu = None
        if cfg.target_glu:
            self.target_glu = nn.Sequential(
                nn.Linear(final_dim, final_dim * 2), nn.GLU())

        self.final_proj = nn.Linear(encoder_embed_dim, final_dim)
        self.loss = Wav2VecLoss(
            feature_loss_weight=cfg.loss.feature_loss_weight,
            prob_ppl_weight=cfg.loss.prob_ppl_weight,
            logit_temp=cfg.logit_temp,
        )
    result = trainer.fit(model)
    assert result == 1
    assert trainer.custom_kwarg == 'custom'
    assert trainer.fast_dev_run

    # when we pass in an unknown arg, the base class should complain
    with pytest.raises(
            TypeError,
            match=r"__init__\(\) got an unexpected keyword argument 'abcdefg'"
    ):
        TrainerSubclass(abcdefg='unknown_arg')


@pytest.mark.parametrize('trainer_params', [
    OmegaConf.create({
        'max_epochs': 1,
        'gpus': 1
    }),
    OmegaConf.create({
        'max_epochs': 1,
        'gpus': [0]
    }),
])
@pytest.mark.skipif(not torch.cuda.is_available(),
                    reason="test requires GPU machine")
def test_trainer_omegaconf(trainer_params):
    Trainer(**trainer_params)


def test_trainer_pickle(tmpdir):
    trainer = Trainer(
        max_epochs=1,
Exemple #25
0
 def __init__(self) -> None:
     self.conf: DictConfig = OmegaConf.create()
     self.set("name", "UNKNOWN_NAME")
    model_type: str = MISSING
    model_name: str = MISSING
    train_params: Dict[Any, Any] = field(default_factory=dict)


#%%
try:
    yaml_path = sys.argv[1]
except IndexError:
    print(
        "usage: python train_decision_tree.py <path to configguraiton yaml>",
        file=sys.stderr,
    )
    sys.exit(-1)

conf = OmegaConf.create(TableModelConfig(**OmegaConf.load(yaml_path)))

#%%
def make_sklearn_api_model(train_df, conf):
    from importlib import import_module

    module = import_module(conf.model_module)
    model = getattr(module, conf.model_type)(**conf.train_params)
    model.fit(train_df[conf.train_feats], train_df[conf.target])
    return model


def mse(model, x, y) -> float:
    diff = y - model.predict(x)
    return float(diff @ diff) / len(y)
Exemple #27
0
def resolve_test_dataloaders(model: 'ModelPT'):
    """
    Helper method that operates on the ModelPT class to automatically support
    multiple dataloaders for the test set.

    It does so by first resolving the path to one/more data files via `resolve_dataset_name_from_cfg()`.
    If this resolution fails, it assumes the data loader is prepared to manually support / not support
    multiple data loaders and simply calls the appropriate setup method.

    If resolution succeeds:
        Checks if provided path is to a single file or a list of files.
        If a single file is provided, simply tags that file as such and loads it via the setup method.
        If multiple files are provided:
            Inject a new manifest path at index "i" into the resolved key.
            Calls the appropriate setup method to set the data loader.
            Collects the initialized data loader in a list and preserves it.
            Once all data loaders are processed, assigns the list of loaded loaders to the ModelPT.
            Finally assigns a list of unique names resolved from the file paths to the ModelPT.

    Args:
        model: ModelPT subclass, which requires >=1 Test Dataloaders to be setup.
    """
    cfg = copy.deepcopy(model._cfg)
    dataloaders = []

    # process test_loss_idx
    if 'test_dl_idx' in cfg.test_ds:
        cfg = OmegaConf.to_container(cfg)
        test_dl_idx = cfg['test_ds'].pop('test_dl_idx')
        cfg = OmegaConf.create(cfg)
    else:
        test_dl_idx = 0

    # Set val_loss_idx
    model._test_dl_idx = test_dl_idx

    ds_key = resolve_dataset_name_from_cfg(cfg.test_ds)

    if ds_key is None:
        logging.debug("Could not resolve file path from provided config - {}. "
                      "Disabling support for multi-dataloaders.".format(
                          cfg.test_ds))

        model.setup_test_data(cfg.test_ds)
        return

    ds_values = cfg.test_ds[ds_key]

    if isinstance(ds_values, (list, tuple, ListConfig)):

        for ds_value in ds_values:
            cfg.test_ds[ds_key] = ds_value
            model.setup_test_data(cfg.test_ds)
            dataloaders.append(model._test_dl)

        model._test_dl = dataloaders
        model._test_names = [parse_dataset_as_name(ds) for ds in ds_values]

        unique_names_check(name_list=model._test_names)
        return

    else:
        model.setup_test_data(cfg.test_ds)
        model._test_names = [parse_dataset_as_name(ds_values)]

        unique_names_check(name_list=model._test_names)
Exemple #28
0
BASE_TEST_CFG = OmegaConf.create({
    # Standard data types.
    "str": "hi",
    "int": 123,
    "float": 1.2,
    "dict": {
        "a": 0,
        "b": {
            "c": 1
        }
    },
    "list": [x - 1 for x in range(11)],
    "null": None,
    # Special cases.
    "x@y": 123,  # @ in name
    "$x$y$z$": 456,  # $ in name (beginning, middle and end)
    "0": 0,  # integer name
    "FalsE": {
        "TruE": True
    },  # bool name
    "None": {
        "null": 1
    },  # null-like name
    "1": {
        "2": 12
    },  # dot-path with int keys
    # Used in nested interpolations.
    "str_test": "test",
    "ref_str": "str",
    "options": {
        "a": "A",
        "b": "B"
    },
    "choice": "a",
    "rel_opt": ".options",
})
Exemple #29
0
def _get_kwargs(
    config: Union[DictConfig, ListConfig],
    root: bool = True,
    **kwargs: Any,
) -> Any:
    from hydra.utils import instantiate

    assert OmegaConf.is_config(config)

    if OmegaConf.is_list(config):
        assert isinstance(config, ListConfig)
        return [
            _get_kwargs(x, root=False) if OmegaConf.is_config(x) else x for x in config
        ]

    assert OmegaConf.is_dict(config), "Input config is not an OmegaConf DictConfig"

    recursive = _is_recursive(config, kwargs)
    overrides = OmegaConf.create(kwargs, flags={"allow_objects": True})
    config.merge_with(overrides)

    final_kwargs = OmegaConf.create(flags={"allow_objects": True})
    final_kwargs._set_parent(config._get_parent())
    final_kwargs._set_flag("readonly", False)
    final_kwargs._set_flag("struct", False)
    if recursive:
        for k, v in config.items_ex(resolve=False):
            if OmegaConf.is_none(v):
                final_kwargs[k] = v
            elif _is_target(v):
                final_kwargs[k] = instantiate(v)
            elif OmegaConf.is_dict(v):
                d = OmegaConf.create({}, flags={"allow_objects": True})
                for key, value in v.items_ex(resolve=False):
                    if _is_target(value):
                        d[key] = instantiate(value)
                    elif OmegaConf.is_config(value):
                        d[key] = _get_kwargs(value, root=False)
                    else:
                        d[key] = value
                d._metadata.object_type = v._metadata.object_type
                final_kwargs[k] = d
            elif OmegaConf.is_list(v):
                lst = OmegaConf.create([], flags={"allow_objects": True})
                for x in v:
                    if _is_target(x):
                        lst.append(instantiate(x))
                    elif OmegaConf.is_config(x):
                        lst.append(_get_kwargs(x, root=False))
                        lst[-1]._metadata.object_type = x._metadata.object_type
                    else:
                        lst.append(x)
                final_kwargs[k] = lst
            else:
                final_kwargs[k] = v
    else:
        for k, v in config.items_ex(resolve=False):
            final_kwargs[k] = v

    final_kwargs._set_flag("readonly", None)
    final_kwargs._set_flag("struct", None)
    final_kwargs._set_flag("allow_objects", None)
    if not root:
        # This is tricky, since the root kwargs is exploded anyway we can treat is as an untyped dict
        # the motivation is that the object type is used as an indicator to treat the object differently during
        # conversion to a primitive container in some cases
        final_kwargs._metadata.object_type = config._metadata.object_type
    return final_kwargs
def test_readonly_from_cli():
    c = OmegaConf.create({"foo": {"bar": [1]}})
    OmegaConf.set_readonly(c, True)
    cli = OmegaConf.from_dotlist(["foo.bar=[2]"])
    with raises(ReadonlyConfigError, match="foo.bar"):
        OmegaConf.merge(c, cli)