Ejemplo n.º 1
0
    "<unk>",
    "<mask>",
])
# Save files to disk
tokenizer.save_model("BR_BERTo")
# Test
tokenizer = ByteLevelBPETokenizer(
    "./BR_BERTo/vocab.json",
    "./BR_BERTo/merges.txt",
)
tokenizer._tokenizer.post_processor = BertProcessing(
    ("</s>", tokenizer.token_to_id("</s>")),
    ("<s>", tokenizer.token_to_id("<s>")),
)
tokenizer.enable_truncation(max_length=512)
print(tokenizer.encode("gostei muito dessa ideia".lower()).tokens)

# Model type
# --------------------------------------------------
config = RobertaConfig(
    vocab_size=vocab_size,
    max_position_embeddings=514,
    num_attention_heads=12,
    num_hidden_layers=8,
    type_vocab_size=1,
)
model = RobertaForMaskedLM(config=config)
print("Params: ", model.num_parameters())
tokenizer = RobertaTokenizerFast.from_pretrained("./BR_BERTo", max_len=512)

# Dataset load
Ejemplo n.º 2
0
                vocab_size=52_000,
                min_frequency=2,
                special_tokens=["<s>", "<pad>", "</s>", "<unk>", "<mask>"])

# Need to save it to model dir for inference
tokenizer.save(args.model_dir)

tokenizer = ByteLevelBPETokenizer(os.path.join(args.model_dir, "vocab.json"),
                                  os.path.join(args.model_dir, "merges.txt"))

tokenizer._tokenizer.post_processor = BertProcessing(
    ("</s>", tokenizer.token_to_id("</s>")),
    ("<s>", tokenizer.token_to_id("<s>")))
tokenizer.enable_truncation(max_length=args.token_max_len)

print(tokenizer.encode("Nay, but speak not."))
print(tokenizer.encode("Nay, but speak not.").tokens)

from transformers import RobertaConfig

config = RobertaConfig(vocab_size=args.vocab_size,
                       max_position_embeddings=args.max_position_embeddings,
                       num_attention_heads=args.num_attention_heads,
                       num_hidden_layers=args.num_hidden_layers,
                       type_vocab_size=args.type_vocab_size)

from transformers import RobertaTokenizerFast

tokenizer = RobertaTokenizerFast.from_pretrained(args.model_dir,
                                                 max_len=args.token_max_len)
Ejemplo n.º 3
0
# -*- coding:utf-8 -*-
import os
from argparse import ArgumentParser
from tokenizers.implementations import ByteLevelBPETokenizer
from tokenizers.processors import BertProcessing, RobertaProcessing

if __name__ == "__main__":
    parser = ArgumentParser()
    parser.add_argument("--token_path",
                        type=str,
                        nargs='?',
                        required=True,
                        help="")
    args = parser.parse_args()

    inputpath = args.token_path
    tokenizer = ByteLevelBPETokenizer(os.path.join(inputpath, "vocab.json"),
                                      os.path.join(inputpath, "merges.txt"),
                                      add_prefix_space=True,
                                      trim_offsets=True,
                                      lowercase=True,
                                      unicode_normalizer="nfkc")
    tokenizer._tokenizer.post_processor = RobertaProcessing(
        ("</s>", tokenizer.token_to_id("</s>")),
        ("<s>", tokenizer.token_to_id("<s>")),
        trim_offsets=True,
        add_prefix_space=True)
    tokenizer.enable_truncation(max_length=512)
    tokens = tokenizer.encode("I am Julien\nI am from China.").tokens
    print([x.encode('utf-8') for x in tokens])
Ejemplo n.º 4
0
from tokenizers.implementations import ByteLevelBPETokenizer
from tokenizers.processors import BertProcessing

path = "./model"

tokenizer = ByteLevelBPETokenizer(
    "{path}/vocab.json".format(path=path),
    "{path}/merges.txt".format(path=path),
)

tokenizer._tokenizer.post_processor = BertProcessing(
    ("</s>", tokenizer.token_to_id("</s>")),
    ("<s>", tokenizer.token_to_id("<s>")),
)

tokenizer.enable_truncation(max_length=512)

tokenizer.encode("Dette er første testen.")

tokens = tokenizer.encode("Dette er første testen.").tokens

print(tokens)
Ejemplo n.º 5
0
from tokenizers.implementations import ByteLevelBPETokenizer
from tokenizers.processors import BertProcessing
import os.path
import sys
ROOT_DIRECTORY = os.path.join(os.path.dirname(__file__), '..')
sys.path.append(ROOT_DIRECTORY)

tokenizer = ByteLevelBPETokenizer(
    os.path.join(ROOT_DIRECTORY, "models/en_cycl_tokenizer/vocab.json"),
    os.path.join(ROOT_DIRECTORY, "models/en_cycl_tokenizer/merges.txt"),
)
tokenizer._tokenizer.post_processor = BertProcessing(
    ("</s>", tokenizer.token_to_id("</s>")),
    ("<s>", tokenizer.token_to_id("<s>")),
)
tokenizer.enable_truncation(max_length=512)
token_to_encode = "(() ((#$isa #$McCoyTyner-Musician #$Individual)))"
encoded_token = tokenizer.encode(token_to_encode)
print(encoded_token)
print(encoded_token.tokens)

token_to_encode = "Pair of scissors is marketed as office product."
encoded_token = tokenizer.encode(token_to_encode)
print(encoded_token)
print(encoded_token.tokens)
Ejemplo n.º 6
0
from tokenizers.implementations import ByteLevelBPETokenizer
from tokenizers.processors import BertProcessing


tokenizer = ByteLevelBPETokenizer(
    "./EsperBERTo/vocab.json",
    "./EsperBERTo/merges.txt",
)

tokenizer._tokenizer.post_processor = BertProcessing(
    ("</s>", tokenizer.token_to_id("</s>")),
    ("<s>", tokenizer.token_to_id("<s>")),
)
tokenizer.enable_truncation(max_length=512)

tokenizer.encode("Mi estas Julien.")

tokenizer.encode("Mi estas Julien.").tokens

"""## 3. Train a language model from scratch

**Update:** This section follows along the [`run_language_modeling.py`](https://github.com/huggingface/transformers/blob/master/examples/legacy/run_language_modeling.py) script, using our new [`Trainer`](https://github.com/huggingface/transformers/blob/master/src/transformers/trainer.py) directly. Feel free to pick the approach you like best.

> We’ll train a RoBERTa-like model, which is a BERT-like with a couple of changes (check the [documentation](https://huggingface.co/transformers/model_doc/roberta.html) for more details).

As the model is BERT-like, we’ll train it on a task of *Masked language modeling*, i.e. the predict how to fill arbitrary tokens that we randomly mask in the dataset. This is taken care of by the example script.

"""

# Check that we have a GPU
!nvidia-smi
Ejemplo n.º 7
0
from tokenizers.implementations import ByteLevelBPETokenizer
from tokenizers.processors import BertProcessing

tokenizer = ByteLevelBPETokenizer(
    "./bert-tokenizer/vocab.json",
    "./bert-tokenizer/merges.txt",
)

from tokenizers.processors import BertProcessing

tokenizer._tokenizer.post_processor = BertProcessing(
    ("</s>", tokenizer.token_to_id("</s>")),
    ("<s>", tokenizer.token_to_id("<s>")),
)
tokenizer.enable_truncation(max_length=512)

### Testing

tokenizer.encode('TOBB ETU NLP&IR team')

Ejemplo n.º 8
0
PATH = os.getcwd()
SAVE_MODEL = os.getcwd()

tokenizer = ByteLevelBPETokenizer()
tokenizer.train(files="kant.txt",
                vocab_size=52_000,
                min_frequency=2,
                special_tokens=["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]"])
tokenizer.save_model(SAVE_MODEL)
tokenizer = ByteLevelBPETokenizer(
    SAVE_MODEL + "/vocab.json",
    SAVE_MODEL + "/merges.txt",
)

tokenizer.enable_truncation(max_length=512)
print(tokenizer.encode("For it is in reality vain to profess"))

config = BertConfig(
    vocab_size=52_000,
    max_position_embeddings=514,
    num_attention_heads=12,
    num_hidden_layers=6,
    type_vocab_size=1,
)

tokenizer = BertTokenizer.from_pretrained(SAVE_MODEL, max_len=512)
model = BertForMaskedLM(config=config)

print(model.num_parameters())

dataset = LineByLineTextDataset(