예제 #1
0
def test_initialize() -> None:
    try:
        assert not GlobalHydra().is_initialized()
        initialize(config_dir=None, strict=True)
        assert GlobalHydra().is_initialized()
    finally:
        GlobalHydra().clear()
예제 #2
0
def main(config_file: str):
    parser = argparse.ArgumentParser()
    parser.add_argument("overrides", nargs="*", default=[])
    args = parser.parse_args()

    parsed = Path(config_file)
    initialize(config_dir=str(parsed.parent), strict=False)
    cfg = compose(parsed.name, overrides=args.overrides)
    logger.info(f"Training with the following config:\n{cfg.pretty()}")

    # we want to pass in dictionaries as OmegaConf doesn't play nicely with
    # loggers and doesn't allow non-native types
    module = NetworkLightningModule(OmegaConf.to_container(cfg, resolve=True))
    trainer = Trainer(**OmegaConf.to_container(cfg.pl_trainer, resolve=True))
    trainer.fit(
        module,
        train_dataloader=DataLoader(
            Dataset.from_data_dir(cfg.dataset.train.dir_path, transform=True),
            shuffle=True,
            batch_size=cfg.dataset.train.batch_size,
            num_workers=cfg.dataset.train.num_workers,
        ),
        val_dataloaders=DataLoader(
            Dataset.from_data_dir(cfg.dataset.val.dir_path),
            batch_size=cfg.dataset.val.batch_size,
            num_workers=cfg.dataset.val.num_workers,
        ),
    )
    if cfg.train.run_test:
        trainer.test(test_dataloaders=DataLoader(
            Dataset.from_data_dir(cfg.dataset.test.dir_path),
            batch_size=cfg.datset.train.batch_size,
            num_workers=cfg.dataset.test.num_workers,
        ))
예제 #3
0
def main():
    """
    手動起動したとき
    """
    # コマンドライン引数に必要な情報があるかチェック
    if len(argv) >= 4:
        voicebank_config_yaml_path = argv[1].strip('"')
        label_path = argv[2].strip('"')
        out_wav_path = argv[3].strip('"')
    # コマンドライン引数が不足していれば標準入力で受ける
    else:
        voicebank_config_yaml_path = \
            input("Please input voicebank's config file path\n>>> ").strip('"')
        label_path = \
            input('Please input label file path\n>>> ').strip('"')
        out_wav_path = f'{splitext(label_path)[0]}.wav'

    # configファイルのパスを分割する
    config_path, config_name = split(voicebank_config_yaml_path)

    # configファイルを読み取る
    initialize(config_path=relpath(config_path))
    config = compose(config_name=config_name, overrides=[f'+config_path={config_path}'])

    # WAVファイル生成
    str_now = datetime.now().strftime('%Y%m%d%h%M%S')
    out_wav_path = out_wav_path.replace('.wav', f'__{str_now}.wav')
    hts2wav(config, label_path, out_wav_path)
예제 #4
0
def create_new_dataset_with_embeddings(path_to_dataset: str,
                                       token: str,
                                       dataset_name: str) -> ApiWorkflowClient:
    api_workflow_client = ApiWorkflowClient(token=token)

    # create the dataset
    api_workflow_client.create_new_dataset_with_unique_name(dataset_basename=dataset_name)

    # upload to the dataset
    initialize(config_path="../../lightly/cli/config", job_name="test_app")
    cfg = compose(config_name="config", overrides=[
        f"input_dir='{path_to_dataset}'",
        f"token='{token}'",
        f"dataset_id={api_workflow_client.dataset_id}"
        ])
    upload_cli(cfg)

    # calculate and save the embeddings
    path_to_embeddings_csv = f"{path_to_dataset}/embeddings.csv"
    if not os.path.isfile(path_to_embeddings_csv):
        dataset = LightlyDataset(input_dir=path_to_dataset)
        embeddings = np.random.normal(size=(len(dataset.dataset.samples), 32))
        filepaths, labels = zip(*dataset.dataset.samples)
        filenames = [filepath[len(path_to_dataset):].lstrip('/') for filepath in filepaths]
        print("Starting save of embeddings")
        save_embeddings(path_to_embeddings_csv, embeddings, labels, filenames)
        print("Finished save of embeddings")

    # upload the embeddings
    print("Starting upload of embeddings.")
    api_workflow_client.upload_embeddings(path_to_embeddings_csv=path_to_embeddings_csv, name="embedding_1")
    print("Finished upload of embeddings.")

    return api_workflow_client
예제 #5
0
 def setUpClass(cls):
     initialize("../conf")
     cfg = compose("config")
     cls.dataset = cfg.dataset
     cls.hparams = cfg.hparams
     cls.transform = torchaudio.transforms.MelSpectrogram(
         sample_rate=cls.dataset.sr_libri, n_mels=cls.hparams["n_mels"])
예제 #6
0
def main():
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    initialize(config_path=".")
    cfg = compose(config_name="config.yaml")

    model = cfg.training.model
    assert model in ['rnnsm', 'rmtpp', 'grobformer'], 'Invalid model name'

    if model == 'rmtpp':
        assert(cfg.training.validate_by == 'rmse')
    else:
        assert(cfg.training.validate_by in ['rmse', 'recall', 'auc', 'none'])

    model2class = {'rnnsm': RNNSM, 'rmtpp': RMTPP, 'grobformer': Grobformer}
    model2cfg = {'rnnsm': cfg.rnnsm, 'rmtpp': cfg.rmtpp, 'grobformer': cfg.grobformer}

    model_class = model2class[model]
    model_cfg = model2cfg[model]

    train_loader, val_loader = get_ocon_train_val_loaders(
                                 cat_feat_name='event_type',
                                 num_feat_name='time_delta',
                                 model=model,
                                 global_cfg=cfg.globals,
                                 path='data/OCON/train.csv',
                                 batch_size=cfg.training.batch_size,
                                 max_seq_len=model_cfg.max_seq_len)

    model = model_class(model_cfg, cfg.globals).to(device)
    optimizer = optim.Adam(model.parameters(), lr=cfg.training.lr)
    train(train_loader, val_loader, model, optimizer, cfg.training, cfg.globals, device)
예제 #7
0
def test_strict_deprecation_warning(restore_singletons: Any) -> None:
    msg = (
        "\[email protected](strict) flag is deprecated and will removed in the next version."
        "\nSee https://hydra.cc/next/upgrades/0.11_to_1.0/strict_mode_flag_deprecated"
    )
    with pytest.warns(expected_warning=UserWarning, match=re.escape(msg)):
        initialize(config_path=None, strict=True)
예제 #8
0
def main():
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    initialize(config_path=".")
    cfg = compose(config_name="config.yaml")

    model = cfg.testing.model
    assert model in ['rnnsm', 'rmtpp', 'grobformer'], 'Invalid model name'

    model2class = {'rnnsm': RNNSM, 'rmtpp': RMTPP, 'grobformer': Grobformer}
    model2cfg = {
        'rnnsm': cfg.rnnsm,
        'rmtpp': cfg.rmtpp,
        'grobformer': cfg.grobformer
    }

    model_class = model2class[model]
    model_cfg = model2cfg[model]

    test_loader = get_ocon_test_loader(cat_feat_name='event_type',
                                       num_feat_name='time_delta',
                                       global_cfg=cfg.globals,
                                       path='data/OCON/test.csv',
                                       batch_size=cfg.training.batch_size,
                                       max_seq_len=model_cfg.max_seq_len)

    model = model_class(model_cfg, cfg.globals)
    model.load_state_dict(torch.load(cfg.testing.model_path))
    test(test_loader, model, cfg.globals, device)
예제 #9
0
def convert_namespace_to_omegaconf(args: Namespace) -> DictConfig:

    # Here we are using field values provided in args to override counterparts inside config object
    overrides, deletes = override_module_args(args)

    cfg_name = "config"
    cfg_path = f"../../{cfg_name}"

    if not GlobalHydra().is_initialized():
        initialize(config_path=cfg_path)

    composed_cfg = compose(cfg_name, overrides=overrides, strict=False)
    for k in deletes:
        composed_cfg[k] = None

    cfg = OmegaConf.create(
        OmegaConf.to_container(composed_cfg, resolve=True, enum_to_str=True))

    # hack to be able to set Namespace in dict config. this should be removed when we update to newer
    # omegaconf version that supports object flags, or when we migrate all existing models
    from omegaconf import _utils

    old_primitive = _utils.is_primitive_type
    _utils.is_primitive_type = lambda _: True

    if cfg.task is None and getattr(args, "task", None):
        cfg.task = Namespace(**vars(args))
        from fairseq.tasks import TASK_REGISTRY

        _set_legacy_defaults(cfg.task, TASK_REGISTRY[args.task])
        cfg.task._name = args.task
    if cfg.model is None and getattr(args, "arch", None):
        cfg.model = Namespace(**vars(args))
        from fairseq.models import ARCH_MODEL_REGISTRY

        _set_legacy_defaults(cfg.model, ARCH_MODEL_REGISTRY[args.arch])
        cfg.model._name = args.arch
    if cfg.optimizer is None and getattr(args, "optimizer", None):
        cfg.optimizer = Namespace(**vars(args))
        from fairseq.optim import OPTIMIZER_REGISTRY

        _set_legacy_defaults(cfg.optimizer, OPTIMIZER_REGISTRY[args.optimizer])
        cfg.optimizer._name = args.optimizer
    if cfg.lr_scheduler is None and getattr(args, "lr_scheduler", None):
        cfg.lr_scheduler = Namespace(**vars(args))
        from fairseq.optim.lr_scheduler import LR_SCHEDULER_REGISTRY

        _set_legacy_defaults(cfg.lr_scheduler,
                             LR_SCHEDULER_REGISTRY[args.lr_scheduler])
        cfg.lr_scheduler._name = args.lr_scheduler
    if cfg.criterion is None and getattr(args, "criterion", None):
        cfg.criterion = Namespace(**vars(args))
        from fairseq.criterions import CRITERION_REGISTRY

        _set_legacy_defaults(cfg.criterion, CRITERION_REGISTRY[args.criterion])
        cfg.criterion._name = args.criterion

    _utils.is_primitive_type = old_primitive
    OmegaConf.set_struct(cfg, True)
    return cfg
예제 #10
0
def get_global_cfg():
    """
    WARNING: does not work, hydra compose API seems to work only in Jupyter now
    Use experimental Hydra compose API
    """
    raise NotImplementedError
    from hydra.experimental import initialize, compose
    initialize(config_dir='../../conf', strict=True)
    return compose('config.yaml')
def test_main():
    # Ensure our training script runs top to bottom without errors.
    initialize(config_path='../conf')
    overrides = dict(num_steps=5,
                     train_bs=2,
                     val_bs=2,
                     test_mode=True,
                     eval_freq=2,
                     report_freq=2)
    cfg = compose("config.yaml", overrides=[f'{k}={v}' for k, v in overrides.items()])
    main(cfg)
예제 #12
0
def test_initialize_with_config_path(hydra_restore_singletons: Any) -> None:
    assert not GlobalHydra().is_initialized()
    initialize(config_path="../hydra/test_utils/configs")
    assert GlobalHydra().is_initialized()

    gh = GlobalHydra.instance()
    assert gh.hydra is not None
    config_search_path = gh.hydra.config_loader.get_search_path()
    assert isinstance(config_search_path, ConfigSearchPathImpl)
    idx = config_search_path.find_first_match(
        SearchPathQuery(provider="main", path=None))
    assert idx != -1
예제 #13
0
def test_initialize_with_config_dir() -> None:
    try:
        assert not GlobalHydra().is_initialized()
        initialize(config_dir="../hydra/test_utils/configs", strict=True)
        assert GlobalHydra().is_initialized()

        gh = GlobalHydra.instance()
        assert gh.hydra is not None
        config_search_path = gh.hydra.config_loader.get_search_path()
        idx = config_search_path.find_first_match(
            SearchPath(provider="main", search_path=None))
        assert idx != -1
    finally:
        GlobalHydra().clear()
예제 #14
0
    def load_args(cfg_path: str, cfg_name: str) -> DictConfig:
        """
        Load arguments from yaml files using hydra

        Args:
            cfg_path (str): parents path (e.g. '../configs')
            cfg_name (str): config file name (e.g. 'bart_for_paraphrase_generation')

        Returns:
            (DictConfig): hydra configuration object
        """

        initialize(config_path=cfg_path)
        cfg = compose(config_name=cfg_name)
        return cfg
예제 #15
0
def train_config(model_dir, dataset_path) -> Config:
    try:
        initialize(config_path="../conf", job_name="test_app")
    except ValueError:
        pass
    cfg = compose(config_name="config")
    cfg = cast(Config, cfg)
    cfg.main.track.experiment_dir_name_format = "test_experiment"
    cfg.main.track.experiment_data_dir = model_dir
    cfg.main.save_model.model_dir = model_dir
    cfg.main.save_model.overwrite_main_model = True
    cfg.main.track.track_experiment = True
    cfg.main.track.save_model_weights = True
    cfg.main.input_data_path = dataset_path
    return cfg
예제 #16
0
def test_initialize_ctx_with_absolute_dir(hydra_restore_singletons: Any,
                                          tmpdir: Any) -> None:
    with raises(
            HydraException,
            match=re.escape("config_path in initialize() must be relative")):
        with initialize(config_path=str(tmpdir)):
            compose(overrides=["+test_group=test"])
def hydra_instance() -> Union[Hydra, GlobalHydra]:
    "Provide Hydra/GlobalHydra instance for compose"
    if HydraConfig.initialized():
        yield GlobalHydra.instance()
    hydra_init = initialize(config_path="../peddet/conf")
    yield hydra_init
    GlobalHydra.instance().clear()
예제 #18
0
    def test_dataset_index(self):
        with initialize():
            config = compose(config_name="config", overrides=["no_db=true", "psql_password=dummy"])
            dataset = datasets.SocPSQLSeqSAToSDataset(config)

            dataset._get_states_from_db = MagicMock(side_effect=self._get_states_from_db_se_f)
            dataset._get_actions_from_db = MagicMock(side_effect=self._get_actions_from_db_se_f)

            data_df = self._get_states_from_db_se_f(0)
            s = len(data_df)

            input_size = dataset.get_input_size()
            output_size = dataset.get_output_size()

            out = dataset[0]

            in_data = out[0]
            out_data = out[1]

            s_in = in_data.shape[0]
            s_out = out_data.shape[0]

            assert s_in == s_out
            assert in_data.shape == (s - 1, input_size[0], input_size[1], input_size[2])
            assert out_data.shape == (s - 1, output_size[0], output_size[1], output_size[2])
예제 #19
0
    def test_dataset_index(self):
        with initialize():
            config = compose(
                config_name="config",
                overrides=[
                    "no_db=true",
                    "history_length=3",
                    "future_length=2",
                    "first_index=0",
                    "psql_password=dummy"
                ]
            )
            dataset = datasets.SocPSQLForwardSAToSAPolicyDataset(config)
            dataset._get_states_from_db = MagicMock(side_effect=self._get_states_from_db_se_f)
            dataset._get_actions_from_db = MagicMock(side_effect=self._get_actions_from_db_se_f)
            dataset._get_trajectories_length = MagicMock(return_value=[9, 9])

            input_size = dataset.get_input_size()
            output_shape_spatial, output_shape, output_shape_actions = dataset.get_output_size()

            inputs, outputs = dataset[0]

            np.testing.assert_array_equal(inputs.shape, input_size)
            np.testing.assert_array_equal(outputs[0].shape, output_shape_spatial)
            np.testing.assert_array_equal(outputs[1].shape, output_shape)
            np.testing.assert_array_equal(outputs[2].shape, output_shape_actions)
예제 #20
0
def test_model_save_hyper_parameters_interpolation_with_hydra(tmpdir):
    """
    This test relies on configuration saved under tests/models/conf/config.yaml
    """
    class TestHydraModel(BoringModel):
        def __init__(self, args_0, args_1, args_2, kwarg_1=None):
            self.save_hyperparameters()
            assert self.hparams.args_0.log == "Something"
            assert self.hparams.args_1['cfg'].log == "Something"
            assert self.hparams.args_2[0].log == "Something"
            assert self.hparams.kwarg_1['cfg'][0].log == "Something"
            super().__init__()

    with initialize(config_path="conf"):
        args_0 = compose(config_name="config")
        args_1 = {"cfg": compose(config_name="config")}
        args_2 = [compose(config_name="config")]
        kwarg_1 = {"cfg": [compose(config_name="config")]}
        model = TestHydraModel(args_0, args_1, args_2, kwarg_1=kwarg_1)
        epochs = 2
        checkpoint_callback = ModelCheckpoint(monitor=None,
                                              dirpath=tmpdir,
                                              save_top_k=-1)
        trainer = Trainer(
            default_root_dir=tmpdir,
            callbacks=[checkpoint_callback],
            limit_train_batches=10,
            limit_val_batches=10,
            max_epochs=epochs,
            logger=False,
        )
        trainer.fit(model)
        _ = TestHydraModel.load_from_checkpoint(
            checkpoint_callback.best_model_path)
예제 #21
0
    def test_dataset_index(self):
        with initialize():
            config = compose(config_name="config",
                             overrides=[
                                 "history_length=3",
                                 "future_length=2",
                                 "dataset_path={}".format(_DATASET_PATH),
                             ])
            dataset = datasets.SocPreprocessedForwardSAToSAPolicyDataset(
                config)
            dataset._get_states_from_db = MagicMock(
                side_effect=self._get_states_from_db_se_f)
            dataset._get_actions_from_db = MagicMock(
                side_effect=self._get_actions_from_db_se_f)
            dataset._get_trajectories_length = MagicMock(return_value=[9, 9])

            input_size = dataset.get_input_size()
            output_shape_spatial, output_shape, output_shape_actions = dataset.get_output_size(
            )

            inputs, outputs = dataset[0]

            np.testing.assert_array_equal(inputs.shape, input_size)
            np.testing.assert_array_equal(outputs[0].shape,
                                          output_shape_spatial)
            np.testing.assert_array_equal(outputs[1].shape, output_shape)
            np.testing.assert_array_equal(outputs[2].shape,
                                          output_shape_actions)
예제 #22
0
def cfg():
  with initialize(config_path="../", job_name="test_app"):
    config = compose(config_name="config")
    config.dataset = compose(config_name="tests/test_dataset_config")
    config.train = compose(config_name="tests/test_train_config")

    return config
예제 #23
0
def initialize_hydra_no_path() -> Any:
    try:
        init = initialize()
        init.__enter__()
        yield
    finally:
        init.__exit__(*sys.exc_info())
예제 #24
0
def initialize_hydra(config_path: Optional[str]) -> Any:
    try:
        init = initialize(config_path=config_path)
        init.__enter__()
        yield
    finally:
        init.__exit__(*sys.exc_info())
    def setUpClass(cls):
        with initialize(config_path="../configs"):
            cfg = compose(config_name="test")
            cls.edit_distance = cfg.edit_distance
            cls.graph_modify_ratio = cfg.graph_modify_ratio
            cls.TESTCASE_COUNT = cfg.TESTCASE_COUNT

            stats_cfg = cfg.stats
            cls.SAMPLES_PER_CLASS = cls.TESTCASE_COUNT

            cls.min_accuracy = stats_cfg.min_accuracy
            cls.min_diff = stats_cfg.min_diff

            dataset = PretrainNASBench(
                engine=api101.NASBench(cfg.dataset_path),
                model_spec=api101.ModelSpec,
                samples_per_class=cls.SAMPLES_PER_CLASS,
                max_seq_len=cfg.max_seq_len,
                graph_modify_ratio=cls.graph_modify_ratio
            )

        cls.graph_modifier = dataset.graph_modifier

        cls.testcases = []
        for _, key in enumerate(random.sample(dataset.engine.hash_iterator(), cls.TESTCASE_COUNT)):
            arch = dataset.engine.get_modelspec_by_hash(key)
            matrix, ops = arch.matrix, arch.ops
            cls.testcases.append((matrix, ops))
예제 #26
0
    def test_get_output_metadata(self):
        with initialize():
            config = compose(config_name="config",
                             overrides=[
                                 "history_length=3",
                                 "future_length=2",
                                 "dataset_path={}".format(_DATASET_PATH),
                             ])
            dataset = datasets.SocPreprocessedForwardSAToSAPolicyDataset(
                config)
            dataset._get_states_from_db = MagicMock(
                side_effect=self._get_states_from_db_se_f)
            dataset._get_actions_from_db = MagicMock(
                side_effect=self._get_actions_from_db_se_f)
            dataset._get_trajectories_length = MagicMock(return_value=[9, 9])

            batch = dataset[0]
            y_spatial_s_true_seq, y_s_true_seq, y_a_true_seq = batch[1]
            metadata = dataset.get_output_metadata()
            spatial_metadata, linear_metadata, actions_metadata = metadata

            last_spatial_key = list(spatial_metadata.keys())[-1]
            assert spatial_metadata[last_spatial_key][
                1] == y_spatial_s_true_seq.shape[1]
            last_linear_key = list(linear_metadata.keys())[-1]
            assert linear_metadata[last_linear_key][1] == y_s_true_seq.shape[1]
            last_action_key = list(actions_metadata.keys())[-1]
            assert actions_metadata[last_action_key][1] == y_a_true_seq.shape[
                1]
예제 #27
0
def test_jobname_override_initialize_ctx(hydra_restore_singletons: Any,
                                         job_name: Optional[str],
                                         expected: str) -> None:
    with initialize(config_path="../examples/jupyter_notebooks/cloud_app/conf",
                    job_name=job_name):
        ret = compose(return_hydra_config=True)
        assert ret.hydra.job.name == expected
예제 #28
0
파일: cli.py 프로젝트: S-aiueo32/hiraishin
def train(config_path: str, config_name: str, overrides: str) -> None:

    initialize(config_path=relpath(config_path, dirname(__file__)))

    config = compose(config_name,
                     overrides=overrides.split(),
                     return_hydra_config=True)

    OmegaConf.resolve(config)
    OmegaConf.set_struct(config, False)
    config_hydra = config.pop('hydra')

    run_dir = Path(config_hydra.run.dir)
    run_dir.mkdir(exist_ok=True, parents=True)

    app(config)
예제 #29
0
 def setUp(self):
     self.create_fake_dataset()
     with initialize(config_path="../../lightly/cli/config",
                     job_name="test_app"):
         self.cfg = compose(
             config_name="config",
             overrides=["token='123'", f"input_dir={self.folder_path}"])
예제 #30
0
def test_metaworld_env_MT(env, mode) -> None:
    with initialize(config_path="../../config"):
        # config is relative to a modules
        config = compose(
            config_name="config",
            overrides=[f"env={env}", "experiment.num_eval_episodes=2"],
        )
        benchmark = hydra.utils.instantiate(config.env.benchmark)
        env, env_id_to_task_map = env_builder.build_metaworld_vec_env(
            config=config,
            benchmark=benchmark,
            mode=mode,
            env_id_to_task_map=None)
        _, new_env_id_to_task_map = env_builder.build_metaworld_vec_env(
            config=config,
            benchmark=benchmark,
            mode=mode,
            env_id_to_task_map=env_id_to_task_map,
        )
        assert new_env_id_to_task_map is env_id_to_task_map
        env.reset()
        num_envs = len(env.ids)
        action = np.concatenate(
            [np.expand_dims(x, 0) for x in env.action_space.sample()])
        mtobs, reward, done, info = env.step(action)
        assert mtobs["env_obs"].shape == (num_envs, 12)
        assert action.shape == (num_envs, 4)
        assert "success" in info[0]
        env.close()