Пример #1
0
def main(
    vocab_path: str,
    elmo_config_path: str,
    elmo_weights_path: str,
    output_dir: str,
    batch_size: int,
    device: int,
    use_custom_oov_token: bool = False,
):
    """
    Creates ELMo word representations from a vocabulary file. These
    word representations are _independent_ - they are the result of running
    the CNN and Highway layers of the ELMo model, but not the Bidirectional LSTM.
    ELMo requires 2 additional tokens: <S> and </S>. The first token
    in this file is assumed to be an unknown token.

    This script produces two artifacts: A new vocabulary file
    with the <S> and </S> tokens inserted and a glove formatted embedding
    file containing word : vector pairs, one per line, with all values
    separated by a space.
    """

    # Load the vocabulary words and convert to char ids
    with open(vocab_path, "r") as vocab_file:
        tokens = vocab_file.read().strip().split("\n")

    # Insert the sentence boundary tokens which elmo uses at positions 1 and 2.
    if tokens[0] != DEFAULT_OOV_TOKEN and not use_custom_oov_token:
        raise ConfigurationError(
            "ELMo embeddings require the use of a OOV token.")

    tokens = [tokens[0]] + ["<S>", "</S>"] + tokens[1:]

    indexer = ELMoTokenCharactersIndexer()
    indices = indexer.tokens_to_indices([Token(token) for token in tokens],
                                        Vocabulary())["tokens"]
    sentences = []
    for k in range((len(indices) // 50) + 1):
        sentences.append(
            indexer.as_padded_tensor_dict(indices[(k * 50):((k + 1) * 50)],
                                          padding_lengths={"tokens": 50}))

    last_batch_remainder = 50 - (len(indices) % 50)
    if device != -1:
        elmo_token_embedder = _ElmoCharacterEncoder(
            elmo_config_path, elmo_weights_path).cuda(device)
    else:
        elmo_token_embedder = _ElmoCharacterEncoder(elmo_config_path,
                                                    elmo_weights_path)

    all_embeddings = []
    for i in range((len(sentences) // batch_size) + 1):
        batch = torch.stack(sentences[i * batch_size:(i + 1) * batch_size])
        if device != -1:
            batch = batch.cuda(device)

        token_embedding = elmo_token_embedder(batch)["token_embedding"].data

        # Reshape back to a list of words of shape (batch_size * 50, encoding_dim)
        # We also need to remove the <S>, </S> tokens appended by the encoder.
        per_word_embeddings = (token_embedding[:, 1:-1, :].contiguous().view(
            -1, token_embedding.size(-1)))

        all_embeddings.append(per_word_embeddings)

    # Remove the embeddings associated with padding in the last batch.
    all_embeddings[-1] = all_embeddings[-1][:-last_batch_remainder, :]

    embedding_weight = torch.cat(all_embeddings, 0).cpu().numpy()

    # Write out the embedding in a glove format.
    os.makedirs(output_dir, exist_ok=True)
    with gzip.open(os.path.join(output_dir, "elmo_embeddings.txt.gz"),
                   "wb") as embeddings_file:
        for i, word in enumerate(tokens):
            string_array = " ".join(
                str(x) for x in list(embedding_weight[i, :]))
            embeddings_file.write(f"{word} {string_array}\n".encode("utf-8"))

    # Write out the new vocab with the <S> and </S> tokens.
    _, vocab_file_name = os.path.split(vocab_path)
    with open(os.path.join(output_dir, vocab_file_name),
              "w") as new_vocab_file:
        for word in tokens:
            new_vocab_file.write(f"{word}\n")
Пример #2
0
    def test_elmo_as_array_produces_token_sequence(self):
        indexer = ELMoTokenCharactersIndexer()
        tokens = [Token("Second"), Token(".")]
        indices = indexer.tokens_to_indices(tokens, Vocabulary())
        padded_tokens = indexer.as_padded_tensor_dict(
            indices, padding_lengths={"elmo_tokens": 3})
        expected_padded_tokens = [
            [
                259,
                84,
                102,
                100,
                112,
                111,
                101,
                260,
                261,
                261,
                261,
                261,
                261,
                261,
                261,
                261,
                261,
                261,
                261,
                261,
                261,
                261,
                261,
                261,
                261,
                261,
                261,
                261,
                261,
                261,
                261,
                261,
                261,
                261,
                261,
                261,
                261,
                261,
                261,
                261,
                261,
                261,
                261,
                261,
                261,
                261,
                261,
                261,
                261,
                261,
            ],
            [
                259,
                47,
                260,
                261,
                261,
                261,
                261,
                261,
                261,
                261,
                261,
                261,
                261,
                261,
                261,
                261,
                261,
                261,
                261,
                261,
                261,
                261,
                261,
                261,
                261,
                261,
                261,
                261,
                261,
                261,
                261,
                261,
                261,
                261,
                261,
                261,
                261,
                261,
                261,
                261,
                261,
                261,
                261,
                261,
                261,
                261,
                261,
                261,
                261,
                261,
            ],
            [
                0,
                0,
                0,
                0,
                0,
                0,
                0,
                0,
                0,
                0,
                0,
                0,
                0,
                0,
                0,
                0,
                0,
                0,
                0,
                0,
                0,
                0,
                0,
                0,
                0,
                0,
                0,
                0,
                0,
                0,
                0,
                0,
                0,
                0,
                0,
                0,
                0,
                0,
                0,
                0,
                0,
                0,
                0,
                0,
                0,
                0,
                0,
                0,
                0,
                0,
            ],
        ]

        assert padded_tokens["elmo_tokens"].tolist() == expected_padded_tokens
Пример #3
0
def main(
    vocab_path: str,
    elmo_config_path: str,
    elmo_weights_path: str,
    output_dir: str,
    batch_size: int,
    device: int,
    use_custom_oov_token: bool = False,
):

    with open(vocab_path, "r") as vocab_file:
        tokens = vocab_file.read().strip().split("\n")

    if tokens[0] != DEFAULT_OOV_TOKEN and not use_custom_oov_token:
        raise ConfigurationError("ELMo embeddings require the use of a OOV token.")

    tokens = [tokens[0]] + ["<S>", "</S>"] + tokens[1:]

    indexer = ELMoTokenCharactersIndexer()
    indices = indexer.tokens_to_indices([Token(token) for token in tokens], Vocabulary())["tokens"]
    sentences = []
    for k in range((len(indices) // 50) + 1):
        sentences.append(
            indexer.as_padded_tensor_dict(
                indices[(k * 50) : ((k + 1) * 50)], padding_lengths={"tokens": 50}
            )
        )

    last_batch_remainder = 50 - (len(indices) % 50)
    if device != -1:
        elmo_token_embedder = _ElmoCharacterEncoder(elmo_config_path, elmo_weights_path).cuda(
            device
        )
    else:
        elmo_token_embedder = _ElmoCharacterEncoder(elmo_config_path, elmo_weights_path)

    all_embeddings = []
    for i in range((len(sentences) // batch_size) + 1):
        batch = torch.stack(sentences[i * batch_size : (i + 1) * batch_size])
        if device != -1:
            batch = batch.cuda(device)

        token_embedding = elmo_token_embedder(batch)["token_embedding"].data

        per_word_embeddings = (
            token_embedding[:, 1:-1, :].contiguous().view(-1, token_embedding.size(-1))
        )

        all_embeddings.append(per_word_embeddings)

    all_embeddings[-1] = all_embeddings[-1][:-last_batch_remainder, :]

    embedding_weight = torch.cat(all_embeddings, 0).cpu().numpy()

    os.makedirs(output_dir, exist_ok=True)
    with gzip.open(os.path.join(output_dir, "elmo_embeddings.txt.gz"), "wb") as embeddings_file:
        for i, word in enumerate(tokens):
            string_array = " ".join(str(x) for x in list(embedding_weight[i, :]))
            embeddings_file.write(f"{word} {string_array}\n".encode("utf-8"))

    _, vocab_file_name = os.path.split(vocab_path)
    with open(os.path.join(output_dir, vocab_file_name), "w") as new_vocab_file:
        for word in tokens:
            new_vocab_file.write(f"{word}\n")