def test_seq_to_seq_generation(self):
        model = M2M100ForConditionalGeneration.from_pretrained(
            "facebook/m2m100_418M").to(torch_device)
        tokenizer = M2M100Tokenizer.from_pretrained("facebook/m2m100_418M",
                                                    src_lang="fr",
                                                    tgt_lang="en")

        src_fr = [
            "L'affaire NSA souligne l'absence totale de débat sur le renseignement",
            "Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.",
            "Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de l'ampleur de la surveillance américaine sur l'ensemble des communications en France.",
        ]

        # The below article tests that we don't add any hypotheses outside of the top n_beams
        dct = tokenizer(src_fr, padding=True, return_tensors="pt")

        hypotheses_batch = model.generate(
            input_ids=dct["input_ids"].to(torch_device),
            attention_mask=dct["attention_mask"].to(torch_device),
            num_beams=5,
            forced_bos_token_id=tokenizer.get_lang_id("en"),
        )

        expected_en = [
            "The NSA case highlights the total absence of intelligence debate",
            "I think there are two levels of response from the French government.",
            "When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S. Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all communications in France.",
        ]

        generated = tokenizer.batch_decode(hypotheses_batch.tolist(),
                                           clean_up_tokenization_spaces=True,
                                           skip_special_tokens=True)
        assert generated == expected_en
Ejemplo n.º 2
0
 def load_model(self):
     """
     加载模型
     :return:
     """
     app.logger.info(f"开始加载模型")
     model = M2M100ForConditionalGeneration.from_pretrained(self.model_name)
     model.to(self.device)
     self.tokenizer = M2M100Tokenizer.from_pretrained(self.model_name)
     self.model = model
Ejemplo n.º 3
0
    def setUp(self):
        super().setUp()

        vocab = [
            "</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120",
            "<pad>"
        ]
        vocab_tokens = dict(zip(vocab, range(len(vocab))))
        save_dir = Path(self.tmpdirname)
        save_json(vocab_tokens, save_dir / VOCAB_FILES_NAMES["vocab_file"])
        if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
            copyfile(SAMPLE_SP, save_dir / VOCAB_FILES_NAMES["spm_file"])

        tokenizer = M2M100Tokenizer.from_pretrained(self.tmpdirname)
        tokenizer.save_pretrained(self.tmpdirname)
Ejemplo n.º 4
0
def load(args):
    # The below line is not useful. Maybe deleted later
    print('loading M2M-100 model')
    device = torch.device(
        "cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
    '''
    tokenizer = BertTokenizer.from_pretrained(args.m2m100_model, do_lower_case=True, cache_dir=args.cache_dir)
    model = BertModel.from_pretrained(args.m2m100_model, cache_dir=args.cache_dir)
    '''
    model = M2M100Model.from_pretrained('facebook/m2m100_418M')
    tokenizer = M2M100Tokenizer.from_pretrained('facebook/m2m100_418M')
    model.to(device)
    if args.num_gpus > 1:
        model = torch.nn.DataParallel(model)
    model.eval()
    return model, tokenizer, device
def preprocess(args):
    # Load mBERT to generate attention output for 828I Multilingual Project
    bert = BertModel.from_pretrained('bert-base-multilingual-uncased')
    for param in bert.parameters():
        param.requires_grad = False
    bert_tokenizer = BertTokenizer.from_pretrained(
        'bert-base-multilingual-uncased')
    tokenizer = M2M100Tokenizer.from_pretrained('facebook/m2m100_418M')
    bert.to('cuda')
    bert.eval()
    examples = read_examples(
        args.train_file, 3000, 500
    )  # default number of labeled and unlabeld chunks to consider are obtained from https://aclweb.org/anthology/D18-1179
    features = convert_examples_to_features(
        examples=examples,
        seq_length=2 + get_max_seq_length(examples, tokenizer),
        tokenizer=tokenizer,
        bert=bert,
        bert_tokenizer=bert_tokenizer)
    chunk_spans = get_chunk_spans(examples, features)

    # extract and write features
    all_input_ids = torch.tensor([f.input_ids for f in features],
                                 dtype=torch.long)
    all_input_mask = torch.tensor([f.input_mask for f in features],
                                  dtype=torch.long)
    all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long)
    all_bert_attention_output = torch.vstack(
        [f.bert_attention_output for f in features])
    eval_data = TensorDataset(all_input_ids, all_input_mask, all_example_index,
                              all_bert_attention_output)
    eval_sampler = SequentialSampler(eval_data)
    eval_dataloader = DataLoader(eval_data,
                                 sampler=eval_sampler,
                                 batch_size=args.batch_size)

    # Probably no use
    del bert
    del bert_tokenizer
    del tokenizer

    return examples, features, chunk_spans, eval_dataloader
Ejemplo n.º 6
0
    def load(self, path):
        """
        Loads a model specified by path.

        Args:
            path: model path

        Returns:
            (model, tokenizer)
        """

        if path.startswith("Helsinki-NLP"):
            model = MarianMTModel.from_pretrained(path)
            tokenizer = MarianTokenizer.from_pretrained(path)
        else:
            model = M2M100ForConditionalGeneration.from_pretrained(path)
            tokenizer = M2M100Tokenizer.from_pretrained(path)

        # Apply model initialization routines
        model = self.prepare(model)

        return (model, tokenizer)
def load(args):
    print('loading model')
    device = torch.device(
        "cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
    # Load M2M-100 model
    config = M2M100Config.from_pretrained("facebook/m2m100_418M")
    config.method = 1
    m2m = M2M100ForConditionalGeneration.from_pretrained(
        "facebook/m2m100_418M", config=config)
    tokenizer = M2M100Tokenizer.from_pretrained('facebook/m2m100_418M')
    # Build Fused Model and load parameters from local checkpoint
    model = FusedM2M(config, None, m2m)
    state_dict = torch.load(args.checkpoint)
    state_dict = {k: v
                  for k, v in state_dict.items()
                  if 'fuse' in k}  # load linear layer only
    model.load_state_dict(state_dict, strict=False)
    model = model.model  # Take the M2M100Model from M2M100ForConditionalGeneration

    model.to(device)
    if args.num_gpus > 1:
        model = torch.nn.DataParallel(model)
    model.eval()
    return model, tokenizer, device
 def default_tokenizer(self):
     return M2M100Tokenizer.from_pretrained("facebook/m2m100_418M")
Ejemplo n.º 9
0
 def get_tokenizer(self, **kwargs):
     return M2M100Tokenizer.from_pretrained(self.tmpdirname, **kwargs)
Ejemplo n.º 10
0
 def test_special_tokens_unaffacted_by_save_load(self):
     tmpdirname = tempfile.mkdtemp()
     original_special_tokens = self.tokenizer.lang_token_to_id
     self.tokenizer.save_pretrained(tmpdirname)
     new_tok = M2M100Tokenizer.from_pretrained(tmpdirname)
     self.assertDictEqual(new_tok.lang_token_to_id, original_special_tokens)
Ejemplo n.º 11
0
 def setUpClass(cls):
     cls.tokenizer: M2M100Tokenizer = M2M100Tokenizer.from_pretrained(
         cls.checkpoint_name, src_lang="en", tgt_lang="fr")
     cls.pad_token_id = 1
     return cls
Ejemplo n.º 12
0
parser.add_argument("--do_train", action='store_true')
parser.add_argument("--do_eval", action='store_true')
parser.add_argument("--do_generate", action='store_true')

args = parser.parse_args()

# Load dataset
# We are using 'wmt20_mlqe_task1' 'si-en'
raw_datasets = load_dataset(args.dataset_name, args.dataset_arg)

# Preprocess data
max_source_length = args.max_source_length
max_target_length = args.max_target_length
source_lang = args.source_lang
target_lang = args.target_lang
m2m_tokenizer = M2M100Tokenizer.from_pretrained("facebook/m2m100_418M")
m2m_tokenizer.src_lang = source_lang
m2m_tokenizer.tgt_lang = target_lang
bert_type = args.bert_type
bert_tokenizer = BertTokenizer.from_pretrained(bert_type)
max_input_length_bert = 51  # Get from tokenize inputs with bert
fuse_method = args.fuse_method
checkpoint = args.checkpoint

# Load BERT for preprocessing the BERT attention output
bert = BertModel.from_pretrained(bert_type)
for para in bert.parameters():
    para.requires_grad = False


def preprocess(examples):
Ejemplo n.º 13
0
    def get_tokenizer(self, save_dir, config, src_lang, tgt_lang):
        tokenizer_args = {
            'do_lower_case': False,
            'do_basic_tokenize': False,
            'cache_dir': self._cache,
            'use_fast': self._use_fast(),
            'src_lang': src_lang,
            'tgt_lang': tgt_lang
        }
        if save_dir is not None:
            tokenizer_args.update({
                'pretrained_model_name_or_path': save_dir,
                'config': config
            })
        else:
            tokenizer_args.update(
                {'pretrained_model_name_or_path': self._pretrained_name})

        model_is_marian = isinstance(config, MarianConfig)
        model_is_mbart = isinstance(config, MBartConfig)
        model_is_m2m100 = isinstance(config, M2M100Config)
        model_is_t5 = isinstance(config, T5Config)

        # hack until huggingface provides mbart50 config
        if model_is_mbart and 'mbart-50' in config.name_or_path:
            self._tokenizer = MBart50Tokenizer.from_pretrained(
                **tokenizer_args)
        elif model_is_m2m100:
            self._tokenizer = M2M100Tokenizer.from_pretrained(**tokenizer_args)
        else:
            self._tokenizer = AutoTokenizer.from_pretrained(**tokenizer_args)

        # some tokenizers like Mbart do not set src_lang and tgt_lan when initialized; take care of it here
        self._tokenizer.src_lang = src_lang
        self._tokenizer.tgt_lang = tgt_lang

        # define input prefix to add before every input text
        input_prefix = ''
        if model_is_marian and tgt_lang:
            input_prefix = f'>>{tgt_lang}<< '
        elif model_is_t5:
            t5_task = f'translation_{src_lang}_to_{tgt_lang}'
            # TODO add support for summarization
            # t5_task = 'summarization'
            input_prefix = config.task_specific_params[t5_task]['prefix']

        self.input_prefix = input_prefix

        # We only include the base tokenizers since `isinstance` checks for inheritance
        if isinstance(self._tokenizer, (BertTokenizer, BertTokenizerFast)):
            self._tokenizer.is_piece_fn = lambda wp: wp.startswith('##')
        elif isinstance(self._tokenizer,
                        (XLMRobertaTokenizer, XLMRobertaTokenizerFast,
                         MarianTokenizer, M2M100Tokenizer)):
            self._tokenizer.is_piece_fn = lambda wp: not wp.startswith(
                SPIECE_UNDERLINE)
        elif isinstance(self._tokenizer, (GPT2Tokenizer, GPT2TokenizerFast)):
            self._tokenizer.is_piece_fn = lambda wp: not wp.startswith('Ġ')

        # make sure we assigned is_piece_fn
        assert self._tokenizer.is_piece_fn
Ejemplo n.º 14
0
import argparse
from tqdm import tqdm

from transformers import M2M100ForConditionalGeneration, M2M100Tokenizer

model = M2M100ForConditionalGeneration.from_pretrained("facebook/m2m100_1.2B")
tokenizer = M2M100Tokenizer.from_pretrained("facebook/m2m100_1.2B")

model.to('cuda')

parser = argparse.ArgumentParser(description='Argument Parser for M2M-100')

parser.add_argument('--data', type=str)
parser.add_argument('--src', type=str)
parser.add_argument('--tgt', type=str)
parser.add_argument('--BATCH_SIZE', type=int)

args = parser.parse_args()
batch = []

data = args.data
src = args.src
tgt = args.tgt
BATCH_SIZE = args.BATCH_SIZE

with open(f'./{src}-{tgt}/{data}/test.{src}', 'r') as f:
    src_lines = f.readlines()

tgt_lines = []
for i in tqdm(range(0, len(src_lines), BATCH_SIZE)):
    if i + BATCH_SIZE < len(src_lines):