Пример #1
0
    def test_get_module_root(self):
        """
        When a user runs `allennlp test-install`, we have no idea where
        they're running it from, so we do an `os.chdir` to the _module_
        root in order to get all the paths in the fixtures to resolve properly.

        The logic within `allennlp test-install` is pretty hard to test in
        its entirety, so this test is verifies that the `os.chdir` component
        works properly by checking that we correctly find the path to
        `os.chdir` to.
        """
        project_root = _get_module_root()
        assert os.path.exists(os.path.join(project_root, "__main__.py"))
Пример #2
0
    params["trainer"]["cuda_device"] = 0

    # train this one to a tempdir
    tempdir = tempfile.gettempdir()
    train_model(params, tempdir)

    # now copy back the weights and and archived model
    shutil.copy(os.path.join(tempdir, "best.th"),
                os.path.join(serialization_dir, "best_gpu.th"))
    shutil.copy(os.path.join(tempdir, "model.tar.gz"),
                os.path.join(serialization_dir, "model_gpu.tar.gz"))


if __name__ == "__main__":
    initial_working_dir = os.getcwd()
    module_root = _get_module_root().parent
    logger.info("Changing directory to %s", module_root)
    os.chdir(module_root)
    if len(sys.argv) >= 2 and sys.argv[1].lower() == "gpu":
        train_fixture_gpu("allennlp/tests/fixtures/srl/")
    else:
        models = [
            'biaffine_dependency_parser',
            'bidaf',
            'dialog_qa',
            'constituency_parser',
            'coref',
            'decomposable_attention',
            'encoder_decoder/composed_seq2seq',
            'encoder_decoder/simple_seq2seq',
            'encoder_decoder/copynet_seq2seq',
Пример #3
0
    serialization_dir = config_prefix + 'serialization'
    params = Params.from_file(config_file)
    params["trainer"]["cuda_device"] = 0

    # train this one to a tempdir
    tempdir = tempfile.gettempdir()
    train_model(params, tempdir)

    # now copy back the weights and and archived model
    shutil.copy(os.path.join(tempdir, "best.th"), os.path.join(serialization_dir, "best_gpu.th"))
    shutil.copy(os.path.join(tempdir, "model.tar.gz"), os.path.join(serialization_dir, "model_gpu.tar.gz"))


if __name__ == "__main__":
    initial_working_dir = os.getcwd()
    module_root = _get_module_root()
    logger.info("Changing directory to %s", module_root)
    os.chdir(module_root)
    if len(sys.argv) >= 2 and sys.argv[1].lower() == "gpu":
        train_fixture_gpu("tests/fixtures/srl/")
    else:
        models = [
                'bidaf',
                'constituency_parser',
                'coref',
                'decomposable_attention',
                'encoder_decoder/simple_seq2seq',
                'semantic_parsing/nlvr_coverage_semantic_parser',
                'semantic_parsing/nlvr_direct_semantic_parser',
                'semantic_parsing/wikitables',
                'srl',
Пример #4
0
    serialization_dir = config_prefix + 'serialization'
    params = Params.from_file(config_file)
    params["trainer"]["cuda_device"] = 0

    # train this one to a tempdir
    tempdir = tempfile.gettempdir()
    train_model(params, tempdir)

    # now copy back the weights and and archived model
    shutil.copy(os.path.join(tempdir, "best.th"), os.path.join(serialization_dir, "best_gpu.th"))
    shutil.copy(os.path.join(tempdir, "model.tar.gz"), os.path.join(serialization_dir, "model_gpu.tar.gz"))


if __name__ == "__main__":
    initial_working_dir = os.getcwd()
    module_root = _get_module_root().parent
    logger.info("Changing directory to %s", module_root)
    os.chdir(module_root)
    if len(sys.argv) >= 2 and sys.argv[1].lower() == "gpu":
        train_fixture_gpu("allennlp/tests/fixtures/srl/")
    else:
        models = [
                'biaffine_dependency_parser',
                'bidaf',
                'dialog_qa',
                'constituency_parser',
                'coref',
                'decomposable_attention',
                'encoder_decoder/simple_seq2seq',
                'semantic_parsing/nlvr_coverage_semantic_parser',
                'semantic_parsing/nlvr_direct_semantic_parser',