Beispiel #1
0
 def forward(self, input, generation_mode=False):
     """Input is expected to be of size [bsz x seqlen]."""
     if generation_mode:  # the position is our current step in the decoded sequence
         pos = int(self.padding_idx + input.size(1))
         positions = input.data.new(1, 1).fill_(pos)
     else:
         positions = create_position_ids_from_input_ids(
             input, self.padding_idx)
     return super().forward(positions)
Beispiel #2
0
    def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, ner_ids=None, ent_ids=None):
        if position_ids is None:
            if input_ids is not None:
                # Create the position ids from the input token ids. Any padded tokens remain padded.
                position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx).to(input_ids.device)
            else:
                position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds)

        return super().forward(
            input_ids, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, ner_ids=ner_ids, ent_ids=ent_ids
        )
    def test_create_position_ids_respects_padding_index(self):
        """ Ensure that the default position ids only assign a sequential . This is a regression
        test for https://github.com/huggingface/transformers/issues/1761

        The position ids should be masked with the embedding object's padding index. Therefore, the
        first available non-padding position index is RobertaEmbeddings.padding_idx + 1
        """
        config = self.model_tester.prepare_config_and_inputs()[0]
        model = RobertaEmbeddings(config=config)

        input_ids = torch.as_tensor([[12, 31, 13, model.padding_idx]])
        expected_positions = torch.as_tensor(
            [[0 + model.padding_idx + 1, 1 + model.padding_idx + 1, 2 + model.padding_idx + 1, model.padding_idx]]
        )

        position_ids = create_position_ids_from_input_ids(input_ids, model.padding_idx)
        self.assertEqual(position_ids.shape, expected_positions.shape)
        self.assertTrue(torch.all(torch.eq(position_ids, expected_positions)))
        The position ids should be masked with the embedding object's padding index. Therefore, the
        first available non-padding position index is RobertaEmbeddings.padding_idx + 1
        """
        config = self.model_tester.prepare_config_and_inputs()[0]
        model = RobertaEmbeddings(config=config)

        input_ids = torch.as_tensor([[12, 31, 13, model.padding_idx]])
        expected_positions = torch.as_tensor(
            [[0 + model.padding_idx + 1, 1 + model.padding_idx + 1, 2 + model.padding_idx + 1, model.padding_idx]]
        )

<<<<<<< HEAD
        position_ids = model.create_position_ids_from_input_ids(input_ids)
=======
        position_ids = create_position_ids_from_input_ids(input_ids, model.padding_idx)
>>>>>>> 2bd79e23defb6cf6af96a4a6318b0ced9913a906
        self.assertEqual(position_ids.shape, expected_positions.shape)
        self.assertTrue(torch.all(torch.eq(position_ids, expected_positions)))

    def test_create_position_ids_from_inputs_embeds(self):
        """ Ensure that the default position ids only assign a sequential . This is a regression
        test for https://github.com/huggingface/transformers/issues/1761

        The position ids should be masked with the embedding object's padding index. Therefore, the
        first available non-padding position index is RobertaEmbeddings.padding_idx + 1
        """
        config = self.model_tester.prepare_config_and_inputs()[0]
        embeddings = RobertaEmbeddings(config=config)

        inputs_embeds = torch.Tensor(2, 4, 30)