def convert_fairseq_m2m100_checkpoint_from_disk(checkpoint_path):
    m2m_100 = torch.load(checkpoint_path, map_location="cpu")
    args = m2m_100["args"]
    state_dict = m2m_100["model"]
    remove_ignore_keys_(state_dict)
    vocab_size = state_dict["encoder.embed_tokens.weight"].shape[0]

    config = M2M100Config(
        vocab_size=vocab_size,
        max_position_embeddings=1024,
        encoder_layers=args.encoder_layers,
        decoder_layers=args.decoder_layers,
        encoder_attention_heads=args.encoder_attention_heads,
        decoder_attention_heads=args.decoder_attention_heads,
        encoder_ffn_dim=args.encoder_ffn_embed_dim,
        decoder_ffn_dim=args.decoder_ffn_embed_dim,
        d_model=args.encoder_embed_dim,
        encoder_layerdrop=args.encoder_layerdrop,
        decoder_layerdrop=args.decoder_layerdrop,
        dropout=args.dropout,
        attention_dropout=args.attention_dropout,
        activation_dropout=args.activation_dropout,
        activation_function="relu",
    )

    state_dict["shared.weight"] = state_dict["decoder.embed_tokens.weight"]
    model = M2M100ForConditionalGeneration(config)
    model.model.load_state_dict(state_dict)
    model.lm_head = make_linear_from_emb(model.model.shared)

    return model
    def test_seq_to_seq_generation(self):
        model = M2M100ForConditionalGeneration.from_pretrained(
            "facebook/m2m100_418M").to(torch_device)
        tokenizer = M2M100Tokenizer.from_pretrained("facebook/m2m100_418M",
                                                    src_lang="fr",
                                                    tgt_lang="en")

        src_fr = [
            "L'affaire NSA souligne l'absence totale de débat sur le renseignement",
            "Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.",
            "Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de l'ampleur de la surveillance américaine sur l'ensemble des communications en France.",
        ]

        # The below article tests that we don't add any hypotheses outside of the top n_beams
        dct = tokenizer(src_fr, padding=True, return_tensors="pt")

        hypotheses_batch = model.generate(
            input_ids=dct["input_ids"].to(torch_device),
            attention_mask=dct["attention_mask"].to(torch_device),
            num_beams=5,
            forced_bos_token_id=tokenizer.get_lang_id("en"),
        )

        expected_en = [
            "The NSA case highlights the total absence of intelligence debate",
            "I think there are two levels of response from the French government.",
            "When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S. Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all communications in France.",
        ]

        generated = tokenizer.batch_decode(hypotheses_batch.tolist(),
                                           clean_up_tokenization_spaces=True,
                                           skip_special_tokens=True)
        assert generated == expected_en
Exemplo n.º 3
0
 def test_generate_fp16(self):
     config, input_dict = self.model_tester.prepare_config_and_inputs()
     input_ids = input_dict["input_ids"]
     attention_mask = input_ids.ne(1).to(torch_device)
     model = M2M100ForConditionalGeneration(config).eval().to(torch_device)
     if torch_device == "cuda":
         model.half()
     model.generate(input_ids, attention_mask=attention_mask)
     model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3)
Exemplo n.º 4
0
    def __init__(self,
                 config: PretrainedConfig,
                 bert: BertModel = None,
                 m2m: M2M100ForConditionalGeneration = None,
                 path: str = None,
                 bert_input=None):
        super().__init__(config)
        self.bert = bert  # TODO: don't need
        self.m2m = m2m
        if m2m is not None:
            self.model = m2m.model
            self.base_model = m2m.base_model
        self.fuse_layer_path = path
        self.bert_input = bert_input

        if self.bert_input:
            if self.fuse_layer_path:
                m2m.load_state_dict(torch.load(self.fuse_layer_path))
Exemplo n.º 5
0
 def load_model(self):
     """
     加载模型
     :return:
     """
     app.logger.info(f"开始加载模型")
     model = M2M100ForConditionalGeneration.from_pretrained(self.model_name)
     model.to(self.device)
     self.tokenizer = M2M100Tokenizer.from_pretrained(self.model_name)
     self.model = model
Exemplo n.º 6
0
    def test_inference_head(self):
        model = M2M100ForConditionalGeneration.from_pretrained("facebook/m2m100_418M").to(torch_device)

        # change to intended input
        input_ids = _long_tensor([[128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38, 2]])
        decoder_input_ids = _long_tensor([[2, 128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38]])
        inputs_dict = prepare_m2m_100_inputs_dict(model.config, input_ids, decoder_input_ids)
        with torch.no_grad():
            output = model(**inputs_dict)[0]
        expected_shape = torch.Size((1, 11, model.config.vocab_size))
        self.assertEqual(output.shape, expected_shape)
        # change to expected output here
        expected_slice = torch.tensor(
            [[-1.0448, -1.0411, 3.7992], [-3.2191, -3.2386, -1.3451], [-3.6210, -3.5993, 0.4925]], device=torch_device
        )
        self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=TOLERANCE))
Exemplo n.º 7
0
    def load(self, path):
        """
        Loads a model specified by path.

        Args:
            path: model path

        Returns:
            (model, tokenizer)
        """

        if path.startswith("Helsinki-NLP"):
            model = MarianMTModel.from_pretrained(path)
            tokenizer = MarianTokenizer.from_pretrained(path)
        else:
            model = M2M100ForConditionalGeneration.from_pretrained(path)
            tokenizer = M2M100Tokenizer.from_pretrained(path)

        # Apply model initialization routines
        model = self.prepare(model)

        return (model, tokenizer)
def load(args):
    print('loading model')
    device = torch.device(
        "cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
    # Load M2M-100 model
    config = M2M100Config.from_pretrained("facebook/m2m100_418M")
    config.method = 1
    m2m = M2M100ForConditionalGeneration.from_pretrained(
        "facebook/m2m100_418M", config=config)
    tokenizer = M2M100Tokenizer.from_pretrained('facebook/m2m100_418M')
    # Build Fused Model and load parameters from local checkpoint
    model = FusedM2M(config, None, m2m)
    state_dict = torch.load(args.checkpoint)
    state_dict = {k: v
                  for k, v in state_dict.items()
                  if 'fuse' in k}  # load linear layer only
    model.load_state_dict(state_dict, strict=False)
    model = model.model  # Take the M2M100Model from M2M100ForConditionalGeneration

    model.to(device)
    if args.num_gpus > 1:
        model = torch.nn.DataParallel(model)
    model.eval()
    return model, tokenizer, device
Exemplo n.º 9
0
    return example["translation"][source_lang] is not None and example[
        "translation"][target_lang] is not None


# Preprocess data or load from local file
if args.load_local_dataset:
    tokenized_datasets = load_from_disk("data")
else:
    tokenized_datasets = raw_datasets.filter(filter_none).map(preprocess,
                                                              batched=True)
    tokenized_datasets.save_to_disk("data")

# Prepare models
config = M2M100Config.from_pretrained("facebook/m2m100_418M")
config.method = fuse_method
m2m = M2M100ForConditionalGeneration.from_pretrained("facebook/m2m100_418M",
                                                     config=config)
fused_model = FusedM2M(config, bert, m2m)

# DEBUG: Check the weight of layers
# shared_weight = m2m.model.shared.weight.data.clone().detach()
# layer_1_weight = m2m.model.encoder.layers[0].fc1.weight.data.clone().detach()
# fuse_12_weight = m2m.model.encoder.layers[-1].fuse_layer.weight.data.clone().detach()

# Load state dict from local checkpoint
if checkpoint:
    state_dict = torch.load(f'{checkpoint}/pytorch_model.bin')
    state_dict = {k: v
                  for k, v in state_dict.items()
                  if 'fuse' in k}  # load linear layer only
    fused_model.load_state_dict(state_dict, strict=False)
    # DEBUG: Check the weight of layers
Exemplo n.º 10
0
from transformers import M2M100ForConditionalGeneration, M2M100Tokenizer
import gradio as gr

model = M2M100ForConditionalGeneration.from_pretrained("facebook/m2m100_418M")
tokenizer = M2M100Tokenizer.from_pretrained("facebook/m2m100_418M")


def translate(text):
    tokenizer.src_lang = "en"
    encoded_hi = tokenizer(text, return_tensors="pt")
    generated_tokens = model.generate(
        **encoded_hi, forced_bos_token_id=tokenizer.get_lang_id("fr"))
    return tokenizer.batch_decode(generated_tokens,
                                  skip_special_tokens=True)[0]


inputs = gr.inputs.Textbox(lines=5, label="Input Text")
outputs = gr.outputs.Textbox(label="Output Text")

title = "m2m100"
description = "demo for Facebook m2m100 english to french. To use it, simply add your text, or click one of the examples to load them. Read more at the links below."
article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2010.11125'>Beyond English-Centric Multilingual Machine Translation</a> | <a href='https://github.com/pytorch/fairseq'>Github Repo</a></p>"

gr.Interface(translate,
             inputs,
             outputs,
             title=title,
             description=description,
             article=article).launch()