Пример #1
0
    def check_encoder_decoder_model_from_pretrained(self,
                                                    config,
                                                    attention_mask,
                                                    decoder_config,
                                                    decoder_input_ids,
                                                    decoder_attention_mask,
                                                    return_dict,
                                                    input_values=None,
                                                    input_features=None,
                                                    **kwargs):
        encoder_model, decoder_model = self.get_encoder_decoder_model(
            config, decoder_config)
        kwargs = {
            "encoder_model": encoder_model,
            "decoder_model": decoder_model,
            "return_dict": return_dict
        }
        enc_dec_model = SpeechEncoderDecoderModel.from_encoder_decoder_pretrained(
            **kwargs)
        enc_dec_model.to(torch_device)
        outputs_encoder_decoder = enc_dec_model(
            input_values=input_values,
            input_features=input_features,
            decoder_input_ids=decoder_input_ids,
            attention_mask=attention_mask,
            decoder_attention_mask=decoder_attention_mask,
            output_hidden_states=True,
            return_dict=True,
        )

        self.assertEqual(outputs_encoder_decoder["logits"].shape,
                         (decoder_input_ids.shape +
                          (decoder_config.vocab_size, )))
Пример #2
0
    def check_save_and_load_encoder_decoder_model(self,
                                                  config,
                                                  attention_mask,
                                                  decoder_config,
                                                  decoder_input_ids,
                                                  decoder_attention_mask,
                                                  input_values=None,
                                                  input_features=None,
                                                  **kwargs):
        encoder_model, decoder_model = self.get_encoder_decoder_model(
            config, decoder_config)
        enc_dec_model = SpeechEncoderDecoderModel(encoder=encoder_model,
                                                  decoder=decoder_model)
        enc_dec_model.to(torch_device)
        enc_dec_model.eval()
        with torch.no_grad():
            outputs = enc_dec_model(
                input_values=input_values,
                input_features=input_features,
                decoder_input_ids=decoder_input_ids,
                attention_mask=attention_mask,
                decoder_attention_mask=decoder_attention_mask,
            )
            out_2 = outputs[0].cpu().numpy()
            out_2[np.isnan(out_2)] = 0

            with tempfile.TemporaryDirectory(
            ) as encoder_tmp_dirname, tempfile.TemporaryDirectory(
            ) as decoder_tmp_dirname:
                enc_dec_model.encoder.save_pretrained(encoder_tmp_dirname)
                enc_dec_model.decoder.save_pretrained(decoder_tmp_dirname)
                SpeechEncoderDecoderModel.from_encoder_decoder_pretrained(
                    encoder_pretrained_model_name_or_path=encoder_tmp_dirname,
                    decoder_pretrained_model_name_or_path=decoder_tmp_dirname,
                )

                after_outputs = enc_dec_model(
                    input_values=input_values,
                    input_features=input_features,
                    decoder_input_ids=decoder_input_ids,
                    attention_mask=attention_mask,
                    decoder_attention_mask=decoder_attention_mask,
                )
                out_1 = after_outputs[0].cpu().numpy()
                out_1[np.isnan(out_1)] = 0
                max_diff = np.amax(np.abs(out_1 - out_2))
                self.assertLessEqual(max_diff, 1e-5)
    def get_pretrained_model_and_inputs(self):
        model = SpeechEncoderDecoderModel.from_encoder_decoder_pretrained(
            "facebook/s2t-small-librispeech-asr", "bert-base-cased"
        )
        batch_size = 13
        input_features = floats_tensor([batch_size, 7, 80], scale=1.0)
        attention_mask = random_attention_mask([batch_size, 7])
        decoder_input_ids = ids_tensor([batch_size, 4], model.decoder.config.vocab_size)
        decoder_attention_mask = random_attention_mask([batch_size, 4])
        inputs = {
            "input_features": input_features,
            "attention_mask": attention_mask,
            "decoder_input_ids": decoder_input_ids,
            "decoder_attention_mask": decoder_attention_mask,
        }

        return model, inputs
    def get_pretrained_model_and_inputs(self):
        model = SpeechEncoderDecoderModel.from_encoder_decoder_pretrained(
            "facebook/wav2vec2-base-960h", "bert-base-cased"
        )
        batch_size = 13
        input_values = floats_tensor([batch_size, 512], scale=1.0)
        attention_mask = random_attention_mask([batch_size, 512])
        decoder_input_ids = ids_tensor([batch_size, 4], model.decoder.config.vocab_size)
        decoder_attention_mask = random_attention_mask([batch_size, 4])
        inputs = {
            "input_values": input_values,
            "attention_mask": attention_mask,
            "decoder_input_ids": decoder_input_ids,
            "decoder_attention_mask": decoder_attention_mask,
        }

        return model, inputs
 def get_pretrained_model(self):
     return SpeechEncoderDecoderModel.from_encoder_decoder_pretrained("bert-large-uncased", "facebook/bart-large")
 def get_pretrained_model(self):
     return SpeechEncoderDecoderModel.from_encoder_decoder_pretrained(
         "facebook/s2t-small-librispeech-asr", "bert-base-cased"
     )
 def get_pretrained_model(self):
     return SpeechEncoderDecoderModel.from_encoder_decoder_pretrained(
         "facebook/wav2vec2-base-960h", "bert-base-cased"
     )