Пример #1
0
    def create_and_check_causal_lm_model_as_decoder(
        self,
        config,
        input_ids,
        token_type_ids,
        input_mask,
        sequence_labels,
        token_labels,
        choice_labels,
        encoder_hidden_states,
        encoder_attention_mask,
    ):
        config.add_cross_attention = True

        model = TFRobertaForCausalLM(config=config)
        inputs = {
            "input_ids": input_ids,
            "attention_mask": input_mask,
            "token_type_ids": token_type_ids,
            "encoder_hidden_states": encoder_hidden_states,
            "encoder_attention_mask": encoder_attention_mask,
        }
        result = model(inputs)

        inputs = [input_ids, input_mask]
        result = model(inputs, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states)

        prediction_scores = result["logits"]
        self.parent.assertListEqual(
            list(prediction_scores.numpy().shape), [self.batch_size, self.seq_length, self.vocab_size]
        )
Пример #2
0
    def create_and_check_causal_lm_model_past_large_inputs(
        self,
        config,
        input_ids,
        token_type_ids,
        input_mask,
        sequence_labels,
        token_labels,
        choice_labels,
    ):
        config.is_decoder = True

        model = TFRobertaForCausalLM(config=config)

        # special to `RobertaEmbeddings` in `Roberta`:
        #   - its `padding_idx` and its effect on `position_ids`
        #     (TFRobertaEmbeddings.create_position_ids_from_input_ids)
        #   - `1` here is `TFRobertaEmbeddings.padding_idx`
        # avoid `padding_idx` in the past
        input_ids = tf.where(input_ids == 1, 2, input_ids)

        input_ids = input_ids[:1, :]
        input_mask = input_mask[:1, :]
        self.batch_size = 1

        # first forward pass
        outputs = model(input_ids, attention_mask=input_mask, use_cache=True)
        past_key_values = outputs.past_key_values

        # create hypothetical next token and extent to next_input_ids
        next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
        next_attn_mask = ids_tensor((self.batch_size, 3), 2)

        # append to next input_ids and
        next_input_ids = tf.concat([input_ids, next_tokens], axis=-1)
        next_attention_mask = tf.concat([input_mask, next_attn_mask], axis=-1)

        output_from_no_past = model(
            next_input_ids,
            attention_mask=next_attention_mask,
            output_hidden_states=True,
        ).hidden_states[0]
        output_from_past = model(
            next_tokens,
            attention_mask=next_attention_mask,
            past_key_values=past_key_values,
            output_hidden_states=True,
        ).hidden_states[0]

        self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1])

        # select random slice
        random_slice_idx = int(ids_tensor((1,), output_from_past.shape[-1]))
        output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx]
        output_from_past_slice = output_from_past[:, :, random_slice_idx]

        # test that outputs are equal for slice
        tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-3)
 def create_and_check_roberta_for_causal_lm(self, config, input_ids,
                                            token_type_ids, input_mask,
                                            sequence_labels, token_labels,
                                            choice_labels):
     model = TFRobertaForCausalLM(config=config)
     result = model([input_ids, input_mask, token_type_ids])
     self.parent.assertEqual(
         result.logits.shape,
         (self.batch_size, self.seq_length, self.vocab_size))
Пример #4
0
    def create_and_check_causal_lm_model_past(
        self,
        config,
        input_ids,
        token_type_ids,
        input_mask,
        sequence_labels,
        token_labels,
        choice_labels,
    ):
        config.is_decoder = True

        model = TFRobertaForCausalLM(config=config)

        # special to `RobertaEmbeddings` in `Roberta`:
        #   - its `padding_idx` and its effect on `position_ids`
        #     (TFRobertaEmbeddings.create_position_ids_from_input_ids)
        #   - `1` here is `TFRobertaEmbeddings.padding_idx`
        input_ids = tf.where(input_ids == 1, 2, input_ids)

        # first forward pass
        outputs = model(input_ids, use_cache=True)
        outputs_use_cache_conf = model(input_ids)
        outputs_no_past = model(input_ids, use_cache=False)

        self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf))
        self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1)

        past_key_values = outputs.past_key_values

        # create hypothetical next token and extent to next_input_ids
        next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)

        # append to next input_ids and attn_mask
        next_input_ids = tf.concat([input_ids, next_tokens], axis=-1)

        output_from_no_past = model(next_input_ids,
                                    output_hidden_states=True).hidden_states[0]
        output_from_past = model(next_tokens,
                                 past_key_values=past_key_values,
                                 output_hidden_states=True).hidden_states[0]

        # select random slice
        random_slice_idx = int(ids_tensor((1, ), output_from_past.shape[-1]))
        output_from_no_past_slice = output_from_no_past[:, -1,
                                                        random_slice_idx]
        output_from_past_slice = output_from_past[:, 0, random_slice_idx]

        # test that outputs are equal for slice
        tf.debugging.assert_near(output_from_past_slice,
                                 output_from_no_past_slice,
                                 rtol=1e-6)
Пример #5
0
    def create_and_check_causal_lm_model(
        self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
    ):
        config.is_decoder = True

        model = TFRobertaForCausalLM(config=config)
        inputs = {
            "input_ids": input_ids,
            "attention_mask": input_mask,
            "token_type_ids": token_type_ids,
        }
        prediction_scores = model(inputs)["logits"]
        self.parent.assertListEqual(
            list(prediction_scores.numpy().shape), [self.batch_size, self.seq_length, self.vocab_size]
        )
Пример #6
0
    def create_and_check_causal_lm_model_past_with_attn_mask(
        self,
        config,
        input_ids,
        token_type_ids,
        input_mask,
        sequence_labels,
        token_labels,
        choice_labels,
    ):
        config.is_decoder = True

        model = TFRobertaForCausalLM(config=config)

        # special to `RobertaEmbeddings` in `Roberta`:
        #   - its `padding_idx` and its effect on `position_ids`
        #     (TFRobertaEmbeddings.create_position_ids_from_input_ids)
        #   - `1` here is `TFRobertaEmbeddings.padding_idx`
        # avoid `padding_idx` in the past
        input_ids = tf.where(input_ids == 1, 2, input_ids)

        # create attention mask
        half_seq_length = self.seq_length // 2
        attn_mask_begin = tf.ones((self.batch_size, half_seq_length), dtype=tf.int32)
        attn_mask_end = tf.zeros((self.batch_size, self.seq_length - half_seq_length), dtype=tf.int32)
        attn_mask = tf.concat([attn_mask_begin, attn_mask_end], axis=1)

        # first forward pass
        outputs = model(input_ids, attention_mask=attn_mask, use_cache=True)

        # create hypothetical next token and extent to next_input_ids
        next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)

        past_key_values = outputs.past_key_values

        # change a random masked slice from input_ids
        random_seq_idx_to_change = ids_tensor((1,), half_seq_length).numpy() + 1
        random_other_next_tokens = ids_tensor((self.batch_size, self.seq_length), config.vocab_size)
        vector_condition = tf.range(self.seq_length) == (self.seq_length - random_seq_idx_to_change)
        condition = tf.transpose(
            tf.broadcast_to(tf.expand_dims(vector_condition, -1), (self.seq_length, self.batch_size))
        )
        input_ids = tf.where(condition, random_other_next_tokens, input_ids)
        # avoid `padding_idx` in the past
        input_ids = tf.where(input_ids == 1, 2, input_ids)

        # append to next input_ids and
        next_input_ids = tf.concat([input_ids, next_tokens], axis=-1)
        attn_mask = tf.concat(
            [attn_mask, tf.ones((attn_mask.shape[0], 1), dtype=tf.int32)],
            axis=1,
        )

        output_from_no_past = model(
            next_input_ids,
            attention_mask=attn_mask,
            output_hidden_states=True,
        ).hidden_states[0]
        output_from_past = model(
            next_tokens, past_key_values=past_key_values, attention_mask=attn_mask, output_hidden_states=True
        ).hidden_states[0]

        # select random slice
        random_slice_idx = int(ids_tensor((1,), output_from_past.shape[-1]))
        output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx]
        output_from_past_slice = output_from_past[:, 0, random_slice_idx]

        # test that outputs are equal for slice
        tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-6)