コード例 #1
0
def train_fixture(config_prefix: str) -> None:
    import allennlp_rc  # noqa F401: Needed to register the registrables.

    config_file = config_prefix + "experiment.json"
    if not os.path.exists(config_file):
        config_file = config_prefix + "experiment.jsonnet"

    serialization_dir = config_prefix + "serialization"
    # Train model doesn't like it if we have incomplete serialization
    # directories, so remove them if they exist.
    if os.path.exists(serialization_dir):
        shutil.rmtree(serialization_dir)

    # train the model
    train_model_from_file(config_file, serialization_dir)

    # remove unnecessary files
    shutil.rmtree(os.path.join(serialization_dir, "log"))

    for filename in glob.glob(os.path.join(serialization_dir, "*")):
        if (
            filename.endswith(".log")
            or filename.endswith(".json")
            or re.search(r"epoch_[0-9]+\.th$", filename)
        ):
            os.remove(filename)
コード例 #2
0
    def test_model_can_train_with_amp(self):
        train_model_from_file(
            self.param_file,
            self.TEST_DIR,
            overrides="{'trainer.use_amp':true,'trainer.cuda_device':0}",
        )

        # NOTE: as of writing this test, AMP does not work with RNNs and LSTMCells. Hence we had
        # to wrap the call to LSTMCell() in CopyNet (and other models) within an autocast(False) context.
        # But if this part of the test fails, i.e. a RuntimeError is never raised,
        # that means AMP may be working now with RNNs, in which case we can remove
        # any calls to `autocast(False)` around RNNs like we do in CopyNet.
        # So just do a grep search for uses of 'autocast(False)' or 'autocast(enabled=False)'
        # in the library.
        # If you're still confused, contact @epwalsh.
        with pytest.raises(RuntimeError,
                           match="expected scalar type Half but found Float"):
            rnn = torch.nn.LSTMCell(10, 20).cuda()

            hx = torch.rand((3, 20), device="cuda")
            cx = torch.rand((3, 20), device="cuda")
            inp = torch.rand((3, 10), device="cuda")

            with torch.cuda.amp.autocast(True):
                hx, cx = rnn(inp, (hx, cx))
コード例 #3
0
 def train_file(self):
     config_filename = "training_config/my_model_trained_on_my_dataset.jsonnet"
     serialization_dir = "result"
     train_model_from_file(config_filename,
                           serialization_dir,
                           file_friendly_logging=True,
                           force=True)
コード例 #4
0
 def test_model_can_train_save_and_load_with_mixed_precision(self):
     seed = 0  # This test is very sensitive to the seed.
     train_model_from_file(
         FIXTURES_ROOT / "rc" / "bidaf" / "experiment.json",
         self.TEST_DIR,
         overrides=f"{{'trainer.use_amp':true,'trainer.cuda_device':0,'random_seed':{seed},'numpy_seed':{seed},'pytorch_seed':{seed}}}",
     )
コード例 #5
0
def train_fixture(config_file: str, serialization_dir: str) -> None:
    # train the model
    train_model_from_file(config_file, serialization_dir)

    # remove unnecessary files
    shutil.rmtree(os.path.join(serialization_dir, "log"))

    for filename in glob.glob(os.path.join(serialization_dir, "*")):
        if filename.endswith(".log") or filename.endswith(
                ".json") or re.search(r"epoch_[0-9]+\.th$", filename):
            os.remove(filename)
コード例 #6
0
def train_model(name: str):
    typer.echo(f"Running {name}")
    try:
        train_model_from_file(
            parameter_filename=f'./configs/{name}.jsonnet',
            serialization_dir=f"./models/{name}_model",
            include_package=['ger_wiki', 'allennlp_models'],
            force=True
        )
    except FileNotFoundError as e:
        print(e)
コード例 #7
0
def test_bert_tune_overfit():
    config_path = os.path.join(test_path, "configs", "overfit_decomp_bert_tune.jsonnet") 
    output_dir = os.path.join(test_path, "checkpoints", "overfit_decomp_bert_tune.ckpt") 

    test_args = setup_checkpointing_and_args(config_path, output_dir) 
    train_model_from_file(test_args.param_path,
                          test_args.serialization_dir)

    metrics = read_metrics(output_dir) 
    assert_successful_overfit(metrics, {"validation_s_f1": 100.0, 
                                        "training_uas": 100.0,
                                         "training_las": 100.0}) 
コード例 #8
0
ファイル: test_ud_overfit.py プロジェクト: esteng/miso_uds
def test_ud_ewt_lstm():
    config_path = os.path.join(test_path, "configs",
                               "overfit_syntax_only.jsonnet")
    output_dir = os.path.join(test_path, "checkpoints",
                              "overfit_syntax_only.ckpt")

    test_args = setup_checkpointing_and_args(config_path, output_dir)
    train_model_from_file(test_args.param_path, test_args.serialization_dir)

    metrics = read_metrics(output_dir)
    assert_successful_overfit(metrics, {
        "validation_syn_uas": 100.0,
        "validation_syn_las": 100.0
    })
コード例 #9
0
def _run_experiment(config_file, serialization_dir, config_override, cuda_ind,
                    learning_rate, use_lexicon):
    config_override["trainer"].update({
        "optimizer": {
            "lr": learning_rate
        },
        "cuda_device": cuda_ind
    })
    config_override["model"]["domain_utils"].update(
        {"is_use_lexicon": use_lexicon})
    train_model_from_file(parameter_filename=config_file,
                          serialization_dir=serialization_dir,
                          overrides=json.dumps(config_override),
                          force=True)
コード例 #10
0
ファイル: train_fixtures.py プロジェクト: siddsach/allennlp
def train_fixture(config_file: str, serialization_dir: str) -> None:
    # Train model doesn't like it if we have incomplete serialization
    # directories, so remove them if they exist.
    if os.path.exists(serialization_dir):
        shutil.rmtree(serialization_dir)

    # train the model
    train_model_from_file(config_file, serialization_dir)

    # remove unnecessary files
    shutil.rmtree(os.path.join(serialization_dir, "log"))

    for filename in glob.glob(os.path.join(serialization_dir, "*")):
        if filename.endswith(".log") or filename.endswith(".json") or re.search(r"epoch_[0-9]+\.th$", filename):
            os.remove(filename)
コード例 #11
0
ファイル: main.py プロジェクト: lgessler/embur
def evaluate_w2v(config, serialization_dir, trainable):
    if os.path.exists(serialization_dir):
        print(f"{serialization_dir} exists, removing...")
        shutil.rmtree(serialization_dir)

    print("#" * 40)
    print("# Training")
    print("#" * 40)
    os.environ["TRAINABLE"] = str(int(trainable))
    train_model_from_file(config, serialization_dir)

    print("#" * 40)
    print("# Evaluating")
    print("#" * 40)
    args = eval_args(serialization_dir)
    evaluate_from_args(args)
コード例 #12
0
def test_interface_encoder_side():
    config_path = os.path.join(test_path, "configs",
                               "overfit_synt_sem_encoder.jsonnet")
    output_dir = os.path.join(test_path, "checkpoints",
                              "overfit_interface_encoder_side.ckpt")

    test_args = setup_checkpointing_and_args(config_path, output_dir)
    train_model_from_file(test_args.param_path, test_args.serialization_dir)

    metrics = read_metrics(output_dir)
    assert_successful_overfit(
        metrics, {
            "validation_s_f1": 100.0,
            "validation_syn_uas": 100.0,
            "validation_syn_las": 100.0
        })
コード例 #13
0
ファイル: main.py プロジェクト: lgessler/embur
def pretrain(config, serialization_dir, output_dir, num_layers,
             num_attention_heads, embedding_dim, tokenizer_conllu_path):
    if os.path.exists(serialization_dir):
        print(f"{serialization_dir} exists, removing...")
        shutil.rmtree(serialization_dir)
    if os.path.exists(output_dir):
        print(f"{output_dir} exists, removing...")
        shutil.rmtree(output_dir)

    os.makedirs(output_dir, exist_ok=True)

    # Prepare tokenizer and save to dir
    documents = read_conllu_files(tokenizer_conllu_path)
    sentences = []
    for document in documents:
        for sentence in document:
            sentences.append(" ".join([t['form'] for t in sentence]))
    print("Training tokenizer...")
    os.environ["TOKENIZER_PATH"] = output_dir
    os.environ["NUM_LAYERS"] = str(num_layers)
    os.environ["NUM_ATTENTION_HEADS"] = str(num_attention_heads)
    os.environ["EMBEDDING_DIM"] = str(embedding_dim)
    # TODO: check pretrained tokenizer for behavior
    train_bert_tokenizer(sentences, serialize_path=output_dir, vocab_size=6000)

    # Train the LM
    print("Beginning pretraining...")
    model = train_model_from_file(config, serialization_dir)

    # Write out
    bert_serialization: BertModel = model._backbone.bert
    bert_serialization.save_pretrained(output_dir)
コード例 #14
0
def run():
    parser = argparse.ArgumentParser(allow_abbrev=True)
    parser.add_argument("--dir", type=str)
    parser.add_argument("--gpu", type=int,default=0)
    
    args = parser.parse_args()
    overrides_dict = {"trainer":{
                            "cuda_device": args.gpu,
                            }
    }
    modify_config(args.dir, overrides_dict)
    train_model_from_file(
        serialization_dir=args.dir,
        parameter_filename=f"{args.dir}/config.json",
        recover=True,
        )
コード例 #15
0
def cli_train(config_path: str, log_to_comet: bool = False):
    log.info("log_to_comet: %s", log_to_comet)
    log.info("config_path: %s", config_path)
    log.info("Training model")
    with open(config_path) as f:
        conf = toml.load(f)

    log.info("Configuration\n%s", conf)
    train.train_model_from_file(
        parameter_filename=conf["allennlp_conf"],
        serialization_dir=conf["serialization_dir"],
        file_friendly_logging=True,
        force=True,
    )
    score_model(conf["serialization_dir"], log_to_comet=log_to_comet)
    shell(f'touch {conf["serialization_dir"]}/COMPLETE')
コード例 #16
0
ファイル: model_test_case.py プロジェクト: zxsted/allennlp
    def ensure_model_can_train_save_and_load(self, param_file: str):
        save_dir = os.path.join(self.TEST_DIR, "save_and_load_test")
        archive_file = os.path.join(save_dir, "model.tar.gz")
        model = train_model_from_file(param_file, save_dir)
        loaded_model = load_archive(archive_file).model
        state_keys = model.state_dict().keys()
        loaded_state_keys = loaded_model.state_dict().keys()
        assert state_keys == loaded_state_keys
        # First we make sure that the state dict (the parameters) are the same for both models.
        for key in state_keys:
            assert_allclose(model.state_dict()[key].numpy(),
                            loaded_model.state_dict()[key].numpy(),
                            err_msg=key)
        params = Params.from_file(self.param_file)
        reader = DatasetReader.from_params(params['dataset_reader'])
        iterator = DataIterator.from_params(params['iterator'])

        # We'll check that even if we index the dataset with each model separately, we still get
        # the same result out.
        model_dataset = reader.read(params['validation_data_path'])
        model_dataset.index_instances(model.vocab)
        model_batch_arrays = next(iterator(model_dataset, shuffle=False))
        model_batch = arrays_to_variables(model_batch_arrays, for_training=False)
        loaded_dataset = reader.read(params['validation_data_path'])
        loaded_dataset.index_instances(loaded_model.vocab)
        loaded_batch_arrays = next(iterator(loaded_dataset, shuffle=False))
        loaded_batch = arrays_to_variables(loaded_batch_arrays, for_training=False)

        # The datasets themselves should be identical.
        for key in model_batch.keys():
            field = model_batch[key]
            if isinstance(field, dict):
                for subfield in field:
                    self.assert_fields_equal(model_batch[key][subfield],
                                             loaded_batch[key][subfield],
                                             tolerance=1e-6,
                                             name=key + '.' + subfield)
            else:
                self.assert_fields_equal(model_batch[key], loaded_batch[key], 1e-6, key)

        # Set eval mode, to turn off things like dropout, then get predictions.
        model.eval()
        loaded_model.eval()
        model_predictions = model.forward(**model_batch)
        loaded_model_predictions = loaded_model.forward(**loaded_batch)

        # Check loaded model's loss exists and we can compute gradients, for continuing training.
        loaded_model_loss = loaded_model_predictions["loss"]
        assert loaded_model_loss is not None
        loaded_model_loss.backward()

        # Both outputs should have the same keys and the values for these keys should be close.
        for key in model_predictions.keys():
            self.assert_fields_equal(model_predictions[key],
                                     loaded_model_predictions[key],
                                     tolerance=1e-4,
                                     name=key)

        return model, loaded_model
コード例 #17
0
 def setUp(self):
     super().setUp()
     self.set_up_model(
         self.FIXTURES_ROOT / "event2mind" / "experiment.json",
         self.FIXTURES_ROOT / "data" / "event2mind_medium.csv",
     )
     save_dir = self.TEST_DIR / "trained_model_tests"
     self.trained_model = train_model_from_file(self.param_file, save_dir)
コード例 #18
0
ファイル: train_fixtures.py プロジェクト: ziaridoy20/allennlp
def train_fixture(config_prefix: str) -> None:
    config_file = config_prefix + 'experiment.json'
    serialization_dir = config_prefix + 'serialization'
    # Train model doesn't like it if we have incomplete serialization
    # directories, so remove them if they exist.
    if os.path.exists(serialization_dir):
        shutil.rmtree(serialization_dir)

    # train the model
    train_model_from_file(config_file, serialization_dir)

    # remove unnecessary files
    shutil.rmtree(os.path.join(serialization_dir, "log"))

    for filename in glob.glob(os.path.join(serialization_dir, "*")):
        if filename.endswith(".log") or filename.endswith(".json") or re.search(r"epoch_[0-9]+\.th$", filename):
            os.remove(filename)
コード例 #19
0
ファイル: train.py プロジェクト: jacobdanovitch/jdnlp
def train(cfg):
    # import_submodules("jdnlp")

    exp_fp = f'experiments/{cfg.exp}.json'
    if not os.path.isfile(exp_fp):
        raise FileNotFoundError(
            f'Experiment file {exp_fp} not found in dir {os.getcwd()}')

    params = dict(
        parameter_filename=f'jdnlp/model_configs/{cfg.model}.json',
        serialization_dir=f'saved/{cfg.exp}/{cfg.model}',
        overrides=open(exp_fp).read(),
        force=True,
        cache_directory="~/.cache/allennlp",
    )
    #print(json.dumps(params, indent=2))

    train_model_from_file(**params)
コード例 #20
0
ファイル: main.py プロジェクト: lgessler/embur
def evaluate(config, serialization_dir, bert_path, trainable):
    if os.path.exists(serialization_dir):
        print(f"{serialization_dir} exists, removing...")
        shutil.rmtree(serialization_dir)

    print("#" * 40)
    print("# Training")
    print("#" * 40)
    os.environ["BERT_DIMS"] = str(
        BertModel.from_pretrained(bert_path).config.hidden_size)
    os.environ["BERT_PATH"] = bert_path
    os.environ["TRAINABLE"] = str(int(trainable))
    train_model_from_file(config, serialization_dir)

    print("#" * 40)
    print("# Evaluating")
    print("#" * 40)
    args = eval_args(serialization_dir)
    evaluate_from_args(args)
コード例 #21
0
def _run_experiment(config_file, serialization_dir, config_override,
                    embeddings, cuda_ind, domain, learning_rate, dropout):
    config_override["trainer"] = {
        "optimizer": {
            "lr": learning_rate
        },
        "cuda_device": cuda_ind
    }
    or_model = {}
    if embeddings == 'elmo':
        or_model["source_embedder"] = {"elmo": {"dropout": dropout}}
    if domain is not None:
        or_model["domain"] = domain
    config_override["model"] = or_model

    train_model_from_file(parameter_filename=config_file,
                          serialization_dir=serialization_dir,
                          overrides=json.dumps(config_override),
                          force=True)
コード例 #22
0
ファイル: util.py プロジェクト: bplank/DaNplus
def train(config, name, resume):
    now = datetime.now()
    serialization_dir = 'logs/' + name + '/' + now.strftime(
        "%Y.%m.%d_%H.%M.%S") + '/'
    if resume:
        serialization_dir = name
    if not os.path.isdir(serialization_dir):
        os.makedirs(serialization_dir)

    config_path = serialization_dir + 'config.json'
    config.to_file(config_path)

    train_model_from_file(config_path,
                          serialization_dir,
                          file_friendly_logging=True,
                          force=(not resume),
                          recover=resume)
    if os.path.isfile(serialization_dir + 'vocabulary/.lock'):
        os.remove(serialization_dir + 'vocabulary/.lock')
    return serialization_dir
コード例 #23
0
    def test_npmi_computed_correctly(self):
        save_dir = self.TEST_DIR / "save_and_load_test"
        model = train_model_from_file(self.param_file, save_dir, overrides="")

        topics = [(1, ["great", "movie", "film", "amazing", "wow", "best", "ridiculous", "ever", "good", "incredible", "positive"]),
                  (2, ["bad", "film", "worst", "negative", "movie", "ever", "not", "any", "gross", "boring"])]
        npmi = model.compute_npmi(topics, num_words=10)

        ref_vocab = model._ref_vocab
        ref_counts = model._ref_count_mat

        vocab_index = dict(zip(ref_vocab, range(len(ref_vocab))))
        n_docs, _ = ref_counts.shape

        npmi_means = []
        for topic in topics:
            words = topic[1]
            npmi_vals = []
            for word_i, word1 in enumerate(words[:10]):
                if word1 in vocab_index:
                    index1 = vocab_index[word1]
                else:
                    index1 = None
                for word2 in words[word_i+1:10]:
                    if word2 in vocab_index:
                        index2 = vocab_index[word2]
                    else:
                        index2 = None
                    if index1 is None or index2 is None:
                        _npmi = 0.0
                    else:
                        col1 = np.array(ref_counts[:, index1].todense() > 0, dtype=int)
                        col2 = np.array(ref_counts[:, index2].todense() > 0, dtype=int)
                        sum1 = col1.sum()
                        sum2 = col2.sum()
                        interaction = np.sum(col1 * col2)
                        if interaction == 0:
                            assert model._npmi_numerator[index1, index2] == 0.0 and model._npmi_denominator[index1, index2] == 0.0
                            _npmi = 0.0
                        else:
                            assert model._ref_interaction[index1, index2] == np.log10(interaction)
                            assert model._ref_doc_sum[index1] == sum1
                            assert model._ref_doc_sum[index2] == sum2
                            expected_numerator = np.log10(n_docs) + np.log10(interaction) - np.log10(sum1) - np.log10(sum2)
                            numerator = np.log10(model.n_docs) + model._npmi_numerator[index1, index2]
                            assert np.isclose(expected_numerator, numerator)
                            expected_denominator = np.log10(n_docs) - np.log10(interaction)
                            denominator = np.log10(model.n_docs) - model._npmi_denominator[index1, index2]
                            assert np.isclose(expected_denominator, denominator)
                            _npmi = expected_numerator / expected_denominator
                    npmi_vals.append(_npmi)
            npmi_means.append(np.mean(npmi_vals))
        assert np.isclose(npmi, np.mean(npmi_means))
コード例 #24
0
ファイル: OntoEmma.py プロジェクト: swy9834/ontoemma
    def _train_nn(self, model_path: str, config_file: str):
        """
        Train a neural network model
        :param model_path:
        :param config_file:
        :return:
        """
        # import allennlp ontoemma classes (to register -- necessary, do not remove)
        from emma.allennlp_classes.ontoemma_dataset_reader import OntologyMatchingDatasetReader
        from emma.allennlp_classes.ontoemma_model import OntoEmmaNN

        with open(config_file) as json_data:
            configuration = json.load(json_data)

        cuda_device = configuration['trainer']['cuda_device']

        if cuda_device >= 0:
            with device(cuda_device):
                train_model_from_file(config_file, model_path)
        else:
            train_model_from_file(config_file, model_path)
        return
コード例 #25
0
 def test_model_can_train_save_and_load(self):
     save_dir = self.TEST_DIR / "save_and_load_test"
     archive_file = save_dir / "model.tar.gz"
     # test train and save
     model = train_model_from_file(self.param_file, save_dir)
     # test load
     loaded_model = load_archive(archive_file, cuda_device=-1).model
     state_keys = model.state_dict().keys()
     loaded_state_keys = loaded_model.state_dict().keys()
     assert state_keys == loaded_state_keys
     # make sure that the state dict (the parameters) are the same
     # for both models.
     for key in state_keys:
         assert_allclose(model.state_dict()[key].cpu().numpy(),
                         loaded_model.state_dict()[key].cpu().numpy(),
                         err_msg=key)
コード例 #26
0
    def fit(self,
            train_data: TargetTextCollection,
            val_data: TargetTextCollection,
            test_data: Optional[TargetTextCollection] = None) -> None:
        '''
        Given the training, validation, and optionally the test data it will 
        train the model that is defined in the model params file provided as 
        argument to the constructor of the class. Once trained the model can 
        be accessed through the `model` attribute.

        NOTE: If the test data is given the model only uses it to fit to the 
        vocabularly that is within the test data, the model NEVER trains on 
        the test data.
        
        :param train_data: Training data.
        :param val_data: Validation data.
        :param test_data: Optional, test data.
        '''

        model_params = self._preprocess_and_load_param_file(self._param_fp)
        # Ensures that a different random seed is used each time
        self._set_random_seeds(model_params)
        with tempfile.TemporaryDirectory() as temp_dir:
            train_fp = Path(temp_dir, 'train_data.json')
            val_fp = Path(temp_dir, 'val_data.json')

            # Write the training and validation data to json Optionally test as
            # well
            train_data.to_json_file(train_fp)
            val_data.to_json_file(val_fp)
            if test_data:
                test_fp = Path(temp_dir, 'test_data.json')
                test_data.to_json_file(test_fp)
                self._add_dataset_paths(model_params, train_fp, val_fp,
                                        test_fp)
                model_params["evaluate_on_test"] = True
            else:
                self._add_dataset_paths(model_params, train_fp, val_fp)

            save_dir = self.save_dir
            if save_dir is None:
                save_dir = Path(temp_dir, 'temp_save_dir')

            temp_param_fp = Path(temp_dir, 'temp_param_file.json')
            model_params.to_file(temp_param_fp.resolve())
            trained_model = train_model_from_file(temp_param_fp, save_dir)
            self.model = trained_model
コード例 #27
0
 def ensure_batch_predictions_are_consistent(self, param_file: str = None):
     if param_file:
         save_dir = self.TEST_DIR / "save_and_load_test"
         model = train_model_from_file(param_file, save_dir)
     else:
         model = self.model
     model.eval()
     single_predictions = []
     for i, instance in enumerate(self.instances):
         dataset = Batch([instance])
         tensors = dataset.as_tensor_dict(dataset.get_padding_lengths())
         result = model(**tensors)
         single_predictions.append(result)
     full_dataset = Batch(self.instances)
     batch_tensors = full_dataset.as_tensor_dict(
         full_dataset.get_padding_lengths())
     batch_predictions = model(**batch_tensors)
     for i, instance_predictions in enumerate(single_predictions):
         for key, single_predicted in instance_predictions.items():
             tolerance = 1e-6
             if key == 'loss':
                 # Loss is particularly unstable; we'll just be satisfied if everything else is
                 # close.
                 continue
             single_predicted = single_predicted[0]
             batch_predicted = batch_predictions[key][i]
             if isinstance(single_predicted, torch.Tensor):
                 if single_predicted.size() != batch_predicted.size():
                     slices = tuple(
                         slice(0, size) for size in single_predicted.size())
                     batch_predicted = batch_predicted[slices]
                 assert_allclose(single_predicted.data.numpy(),
                                 batch_predicted.data.numpy(),
                                 atol=tolerance,
                                 err_msg=key)
             else:
                 assert single_predicted == batch_predicted, key
コード例 #28
0
    def ensure_model_can_train_save_and_load(
        self,
        param_file: str,
        tolerance: float = 1e-4,
        cuda_device: int = -1,
        gradients_to_ignore: Set[str] = None,
        overrides: str = "",
        metric_to_check: str = None,
        metric_terminal_value: float = None,
        metric_tolerance: float = 1e-4,
        disable_dropout: bool = True,
    ):
        """
        # Parameters

        param_file : `str`
            Path to a training configuration file that we will use to train the model for this
            test.
        tolerance : `float`, optional (default=1e-4)
            When comparing model predictions between the originally-trained model and the model
            after saving and loading, we will use this tolerance value (passed as `rtol` to
            `numpy.testing.assert_allclose`).
        cuda_device : `int`, optional (default=-1)
            The device to run the test on.
        gradients_to_ignore : `Set[str]`, optional (default=None)
            This test runs a gradient check to make sure that we're actually computing gradients
            for all of the parameters in the model.  If you really want to ignore certain
            parameters when doing that check, you can pass their names here.  This is not
            recommended unless you're `really` sure you don't need to have non-zero gradients for
            those parameters (e.g., some of the beam search / state machine models have
            infrequently-used parameters that are hard to force the model to use in a small test).
        overrides : `str`, optional (default = "")
            A JSON string that we will use to override values in the input parameter file.
        metric_to_check: `str`, optional (default = None)
            We may want to automatically perform a check that model reaches given metric when
            training (on validation set, if it is specified). It may be useful in CI, for example.
            You can pass any metric that is in your model returned metrics.
        metric_terminal_value: `str`, optional (default = None)
            When you set `metric_to_check`, you need to set the value this metric must converge to
        metric_tolerance: `float`, optional (default=1e-4)
            Tolerance to check you model metric against metric terminal value. One can expect some
            variance in model metrics when the training process is highly stochastic.
        disable_dropout : `bool`, optional (default = True)
            If True we will set all dropout to 0 before checking gradients. (Otherwise, with small
            datasets, you may get zero gradients because of unlucky dropout.)
        """
        save_dir = self.TEST_DIR / "save_and_load_test"
        archive_file = save_dir / "model.tar.gz"
        model = train_model_from_file(param_file,
                                      save_dir,
                                      overrides=overrides)
        metrics_file = save_dir / "metrics.json"
        if metric_to_check is not None:
            metrics = json.loads(metrics_file.read_text())
            metric_value = metrics.get(
                f"best_validation_{metric_to_check}") or metrics.get(
                    f"training_{metric_to_check}")
            assert metric_value is not None, f"Cannot find {metric_to_check} in metrics.json file"
            assert metric_terminal_value is not None, "Please specify metric terminal value"
            assert abs(metric_value - metric_terminal_value) < metric_tolerance
        loaded_model = load_archive(archive_file,
                                    cuda_device=cuda_device).model
        state_keys = model.state_dict().keys()
        loaded_state_keys = loaded_model.state_dict().keys()
        assert state_keys == loaded_state_keys
        # First we make sure that the state dict (the parameters) are the same for both models.
        for key in state_keys:
            assert_allclose(
                model.state_dict()[key].cpu().numpy(),
                loaded_model.state_dict()[key].cpu().numpy(),
                err_msg=key,
            )
        params = Params.from_file(param_file, params_overrides=overrides)
        reader = DatasetReader.from_params(params["dataset_reader"])

        print("Reading with original model")
        model_dataset = reader.read(params["validation_data_path"])
        model_dataset.index_with(model.vocab)

        print("Reading with loaded model")
        loaded_dataset = reader.read(params["validation_data_path"])
        loaded_dataset.index_with(loaded_model.vocab)

        # Need to duplicate params because DataLoader.from_params will consume.
        data_loader_params = params["data_loader"]
        data_loader_params["shuffle"] = False
        data_loader_params2 = Params(
            copy.deepcopy(data_loader_params.as_dict()))

        data_loader = DataLoader.from_params(dataset=model_dataset,
                                             params=data_loader_params)
        data_loader2 = DataLoader.from_params(dataset=loaded_dataset,
                                              params=data_loader_params2)

        # We'll check that even if we index the dataset with each model separately, we still get
        # the same result out.
        model_batch = next(iter(data_loader))

        loaded_batch = next(iter(data_loader2))

        # Check gradients are None for non-trainable parameters and check that
        # trainable parameters receive some gradient if they are trainable.
        self.check_model_computes_gradients_correctly(model, model_batch,
                                                      gradients_to_ignore,
                                                      disable_dropout)

        # The datasets themselves should be identical.
        assert model_batch.keys() == loaded_batch.keys()
        for key in model_batch.keys():
            self.assert_fields_equal(model_batch[key], loaded_batch[key], key,
                                     1e-6)

        # Set eval mode, to turn off things like dropout, then get predictions.
        model.eval()
        loaded_model.eval()
        # Models with stateful RNNs need their states reset to have consistent
        # behavior after loading.
        for model_ in [model, loaded_model]:
            for module in model_.modules():
                if hasattr(module, "stateful") and module.stateful:
                    module.reset_states()
        print("Predicting with original model")
        model_predictions = model(**model_batch)
        print("Predicting with loaded model")
        loaded_model_predictions = loaded_model(**loaded_batch)

        # Check loaded model's loss exists and we can compute gradients, for continuing training.
        loaded_model_loss = loaded_model_predictions["loss"]
        assert loaded_model_loss is not None
        loaded_model_loss.backward()

        # Both outputs should have the same keys and the values for these keys should be close.
        for key in model_predictions.keys():
            self.assert_fields_equal(model_predictions[key],
                                     loaded_model_predictions[key],
                                     name=key,
                                     tolerance=tolerance)

        return model, loaded_model
コード例 #29
0
ファイル: train.py プロジェクト: mojesty/kaggle_tf2_qa
import os
import sys

from allennlp.commands.train import train_model_from_file
from allennlp.common.util import import_submodules

sys.path.append(os.path.abspath('..'))

if __name__ == '__main__':

    import_submodules('src')
    train_model_from_file('natural-questions-simplified-full.jsonnet',
                          '/home/emelyanov-yi/models/tf2_qa/init3',
                          force=True)
コード例 #30
0
ファイル: test_case.py プロジェクト: leo-liuzy/neural_persona
    def ensure_model_can_train_save_and_load(
            self,
            param_file: str,
            tolerance: float = 1e-4,
            cuda_device: int = -1,
            gradients_to_ignore: Set[str] = None,
            overrides: str = ""):
        """
        Parameters
        ----------
        param_file : ``str``
            Path to a training configuration file that we will use to train the model for this
            test.
        tolerance : ``float``, optional (default=1e-4)
            When comparing model predictions between the originally-trained model and the model
            after saving and loading, we will use this tolerance value (passed as ``rtol`` to
            ``numpy.testing.assert_allclose``).
        cuda_device : ``int``, optional (default=-1)
            The device to run the test on.
        gradients_to_ignore : ``Set[str]``, optional (default=None)
            This test runs a gradient check to make sure that we're actually computing gradients
            for all of the parameters in the model.  If you really want to ignore certain
            parameters when doing that check, you can pass their names here.  This is not
            recommended unless you're `really` sure you don't need to have non-zero gradients for
            those parameters (e.g., some of the beam search / state machine models have
            infrequently-used parameters that are hard to force the model to use in a small test).
        overrides : ``str``, optional (default = "")
            A JSON string that we will use to override values in the input parameter file.
        """
        save_dir = self.TEST_DIR / "save_and_load_test"
        archive_file = save_dir / "model.tar.gz"
        model = train_model_from_file(param_file,
                                      save_dir,
                                      overrides=overrides)
        loaded_model = load_archive(archive_file,
                                    cuda_device=cuda_device).model
        state_keys = model.state_dict().keys()
        loaded_state_keys = loaded_model.state_dict().keys()
        assert state_keys == loaded_state_keys
        # First we make sure that the state dict (the parameters) are the same for both models.
        for key in state_keys:
            assert_allclose(model.state_dict()[key].cpu().numpy(),
                            loaded_model.state_dict()[key].cpu().numpy(),
                            err_msg=key)
        params = Params.from_file(param_file)

        # Need to duplicate params because DatasetReader.from_params will consume.
        reader_params = params['dataset_reader']
        reader_params2 = Params(copy.deepcopy(reader_params.as_dict()))

        reader = DatasetReader.from_params(reader_params)
        reader2 = DatasetReader.from_params(reader_params2)

        # Need to duplicate params because Iterator.from_params will consume.
        iterator_params = params['iterator']
        iterator_params2 = Params(copy.deepcopy(iterator_params.as_dict()))

        iterator = DataIterator.from_params(iterator_params)
        iterator2 = DataIterator.from_params(iterator_params2)

        # We'll check that even if we index the dataset with each model separately, we still get
        # the same result out.
        seed_params = Params({
            "random_seed": 5,
            "numpy_seed": 5,
            "pytorch_seed": 5
        })
        prepare_environment(seed_params)
        model_dataset = reader.read(params['validation_data_path'])
        iterator.index_with(model.vocab)
        model_batch = next(iterator(model_dataset, shuffle=False))

        seed_params = Params({
            "random_seed": 5,
            "numpy_seed": 5,
            "pytorch_seed": 5
        })
        prepare_environment(seed_params)
        loaded_dataset = reader2.read(params['validation_data_path'])
        iterator2.index_with(loaded_model.vocab)
        loaded_batch = next(iterator2(loaded_dataset, shuffle=False))

        # Check gradients are None for non-trainable parameters and check that
        # trainable parameters receive some gradient if they are trainable.
        self.check_model_computes_gradients_correctly(model, model_batch,
                                                      gradients_to_ignore)

        # The datasets themselves should be identical.
        assert model_batch.keys() == loaded_batch.keys()
        # import pdb; pdb.set_trace()
        for key in model_batch.keys():
            self.assert_fields_equal(model_batch[key], loaded_batch[key], key,
                                     1e-6)

        # Set eval mode, to turn off things like dropout, then get predictions.
        model.eval()
        loaded_model.eval()
        # Models with stateful RNNs need their states reset to have consistent
        # behavior after loading.
        for model_ in [model, loaded_model]:
            for module in model_.modules():
                if hasattr(module, 'stateful') and module.stateful:
                    module.reset_states()
        model_predictions = model(**model_batch)
        loaded_model_predictions = loaded_model(**loaded_batch)

        # Check loaded model's loss exists and we can compute gradients, for continuing training.
        loaded_model_loss = loaded_model_predictions["loss"]
        assert loaded_model_loss is not None
        loaded_model_loss.backward()

        # Both outputs should have the same keys and the values for these keys should be close.
        for key in model_predictions.keys():
            self.assert_fields_equal(model_predictions[key],
                                     loaded_model_predictions[key],
                                     name=key,
                                     tolerance=tolerance)

        return model, loaded_model
コード例 #31
0
    def ensure_model_can_train_save_and_load(self,
                                             param_file: str,
                                             tolerance: float = 1e-4,
                                             cuda_device: int = -1):
        save_dir = os.path.join(self.TEST_DIR, "save_and_load_test")
        archive_file = os.path.join(save_dir, "model.tar.gz")
        model = train_model_from_file(param_file, save_dir)
        loaded_model = load_archive(archive_file, cuda_device=cuda_device).model
        state_keys = model.state_dict().keys()
        loaded_state_keys = loaded_model.state_dict().keys()
        assert state_keys == loaded_state_keys
        # First we make sure that the state dict (the parameters) are the same for both models.
        for key in state_keys:
            assert_allclose(model.state_dict()[key].cpu().numpy(),
                            loaded_model.state_dict()[key].cpu().numpy(),
                            err_msg=key)
        params = Params.from_file(self.param_file)
        reader = DatasetReader.from_params(params['dataset_reader'])

        # Need to duplicate params because Iterator.from_params will consume.
        iterator_params = params['iterator']
        iterator_params2 = Params(copy.deepcopy(iterator_params.as_dict()))

        iterator = DataIterator.from_params(iterator_params)
        iterator2 = DataIterator.from_params(iterator_params2)

        # We'll check that even if we index the dataset with each model separately, we still get
        # the same result out.
        model_dataset = reader.read(params['validation_data_path'])
        iterator.index_with(model.vocab)
        model_batch = next(iterator(model_dataset, shuffle=False, cuda_device=cuda_device))

        loaded_dataset = reader.read(params['validation_data_path'])
        iterator2.index_with(loaded_model.vocab)
        loaded_batch = next(iterator2(loaded_dataset, shuffle=False, cuda_device=cuda_device))

        # Check gradients are None for non-trainable parameters and check that
        # trainable parameters receive some gradient if they are trainable.
        self.check_model_computes_gradients_correctly(model, model_batch)

        # The datasets themselves should be identical.
        assert model_batch.keys() == loaded_batch.keys()
        for key in model_batch.keys():
            self.assert_fields_equal(model_batch[key], loaded_batch[key], key, 1e-6)

        # Set eval mode, to turn off things like dropout, then get predictions.
        model.eval()
        loaded_model.eval()
        # Models with stateful RNNs need their states reset to have consistent
        # behavior after loading.
        for model_ in [model, loaded_model]:
            for module in model_.modules():
                if hasattr(module, 'stateful') and module.stateful:
                    module.reset_states()
        model_predictions = model(**model_batch)
        loaded_model_predictions = loaded_model(**loaded_batch)

        # Check loaded model's loss exists and we can compute gradients, for continuing training.
        loaded_model_loss = loaded_model_predictions["loss"]
        assert loaded_model_loss is not None
        loaded_model_loss.backward()

        # Both outputs should have the same keys and the values for these keys should be close.
        for key in model_predictions.keys():
            self.assert_fields_equal(model_predictions[key],
                                     loaded_model_predictions[key],
                                     name=key,
                                     tolerance=tolerance)

        return model, loaded_model
コード例 #32
0
    def ensure_model_can_train_save_and_load(
        self,
        param_file: Union[PathLike, str],
        tolerance: float = 1e-4,
        cuda_device: int = -1,
        gradients_to_ignore: Set[str] = None,
        overrides: str = "",
        metric_to_check: str = None,
        metric_terminal_value: float = None,
        metric_tolerance: float = 1e-4,
        disable_dropout: bool = True,
    ):
        save_dir = self.TEST_DIR / "save_and_load_test"
        archive_file = save_dir / "model.tar.gz"
        model = train_model_from_file(param_file, save_dir, overrides=overrides)
        metrics_file = save_dir / "metrics.json"
        if metric_to_check is not None:
            metric_value = metrics.get(f"best_validation_{metric_to_check}") or metrics.get(
                f"training_{metric_to_check}"
            )
            assert metric_value is not None, f"Cannot find {metric_to_check} in metrics.json file"
            assert metric_terminal_value is not None, "Please specify metric terminal value"
            assert abs(metric_value - metric_terminal_value) < metric_tolerance
        loaded_model = load_archive(archive_file, cuda_device=cuda_device).model
        assert state_keys == loaded_state_keys
        for key in state_keys:
            assert_allclose(
                model.state_dict()[key].cpu().numpy(),
                loaded_model.state_dict()[key].cpu().numpy(),
                err_msg=key,
            )
        params = Params.from_file(param_file, params_overrides=overrides)
        reader = DatasetReader.from_params(params["dataset_reader"])

        print("Reading with original model")
        model_dataset = reader.read(params["validation_data_path"])

        print("Reading with loaded model")
        loaded_dataset = reader.read(params["validation_data_path"])

        data_loader_params = params["data_loader"]
        data_loader_params["shuffle"] = False
        data_loader_params2 = Params(copy.deepcopy(data_loader_params.as_dict()))

        data_loader2 = DataLoader.from_params(dataset=loaded_dataset, params=data_loader_params2)

        model_batch = next(iter(data_loader))

        loaded_batch = next(iter(data_loader2))

        self.check_model_computes_gradients_correctly(
            model, model_batch, gradients_to_ignore, disable_dropout
        )

        for key in model_batch.keys():
            self.assert_fields_equal(model_batch[key], loaded_batch[key], key, 1e-6)

        for model_ in [model, loaded_model]:
            for module in model_.modules():
                if hasattr(module, "stateful") and module.stateful:
                    module.reset_states()
        print("Predicting with original model")
        model_predictions = model(**model_batch)
        print("Predicting with loaded model")
        loaded_model_predictions = loaded_model(**loaded_batch)

            self.assert_fields_equal(
                model_predictions[key], loaded_model_predictions[key], name=key, tolerance=tolerance
            )
コード例 #33
0
    def ensure_model_can_train_save_and_load(self,
                                             param_file: str,
                                             tolerance: float = 1e-4,
                                             cuda_device: int = -1,
                                             gradients_to_ignore: Set[str] = None,
                                             overrides: str = ""):
        """
        Parameters
        ----------
        param_file : ``str``
            Path to a training configuration file that we will use to train the model for this
            test.
        tolerance : ``float``, optional (default=1e-4)
            When comparing model predictions between the originally-trained model and the model
            after saving and loading, we will use this tolerance value (passed as ``rtol`` to
            ``numpy.testing.assert_allclose``).
        cuda_device : ``int``, optional (default=-1)
            The device to run the test on.
        gradients_to_ignore : ``Set[str]``, optional (default=None)
            This test runs a gradient check to make sure that we're actually computing gradients
            for all of the parameters in the model.  If you really want to ignore certain
            parameters when doing that check, you can pass their names here.  This is not
            recommended unless you're `really` sure you don't need to have non-zero gradients for
            those parameters (e.g., some of the beam search / state machine models have
            infrequently-used parameters that are hard to force the model to use in a small test).
        overrides : ``str``, optional (default = "")
            A JSON string that we will use to override values in the input parameter file.
        """
        save_dir = self.TEST_DIR / "save_and_load_test"
        archive_file = save_dir / "model.tar.gz"
        model = train_model_from_file(param_file, save_dir, overrides=overrides)
        loaded_model = load_archive(archive_file, cuda_device=cuda_device).model
        state_keys = model.state_dict().keys()
        loaded_state_keys = loaded_model.state_dict().keys()
        assert state_keys == loaded_state_keys
        # First we make sure that the state dict (the parameters) are the same for both models.
        for key in state_keys:
            assert_allclose(model.state_dict()[key].cpu().numpy(),
                            loaded_model.state_dict()[key].cpu().numpy(),
                            err_msg=key)
        params = Params.from_file(param_file)
        reader = DatasetReader.from_params(params['dataset_reader'])

        # Need to duplicate params because Iterator.from_params will consume.
        iterator_params = params['iterator']
        iterator_params2 = Params(copy.deepcopy(iterator_params.as_dict()))

        iterator = DataIterator.from_params(iterator_params)
        iterator2 = DataIterator.from_params(iterator_params2)

        # We'll check that even if we index the dataset with each model separately, we still get
        # the same result out.
        model_dataset = reader.read(params['validation_data_path'])
        iterator.index_with(model.vocab)
        model_batch = next(iterator(model_dataset, shuffle=False))

        loaded_dataset = reader.read(params['validation_data_path'])
        iterator2.index_with(loaded_model.vocab)
        loaded_batch = next(iterator2(loaded_dataset, shuffle=False))

        # Check gradients are None for non-trainable parameters and check that
        # trainable parameters receive some gradient if they are trainable.
        self.check_model_computes_gradients_correctly(model, model_batch, gradients_to_ignore)

        # The datasets themselves should be identical.
        assert model_batch.keys() == loaded_batch.keys()
        for key in model_batch.keys():
            self.assert_fields_equal(model_batch[key], loaded_batch[key], key, 1e-6)

        # Set eval mode, to turn off things like dropout, then get predictions.
        model.eval()
        loaded_model.eval()
        # Models with stateful RNNs need their states reset to have consistent
        # behavior after loading.
        for model_ in [model, loaded_model]:
            for module in model_.modules():
                if hasattr(module, 'stateful') and module.stateful:
                    module.reset_states()
        model_predictions = model(**model_batch)
        loaded_model_predictions = loaded_model(**loaded_batch)

        # Check loaded model's loss exists and we can compute gradients, for continuing training.
        loaded_model_loss = loaded_model_predictions["loss"]
        assert loaded_model_loss is not None
        loaded_model_loss.backward()

        # Both outputs should have the same keys and the values for these keys should be close.
        for key in model_predictions.keys():
            self.assert_fields_equal(model_predictions[key],
                                     loaded_model_predictions[key],
                                     name=key,
                                     tolerance=tolerance)

        return model, loaded_model
コード例 #34
0
ファイル: event2mind_test.py プロジェクト: apmoore1/allennlp
 def setUp(self):
     super().setUp()
     self.set_up_model(self.FIXTURES_ROOT / "event2mind" / "experiment.json",
                       self.FIXTURES_ROOT / "data" / "event2mind_medium.csv")
     save_dir = self.TEST_DIR / "trained_model_tests"
     self.trained_model = train_model_from_file(self.param_file, save_dir)