コード例 #1
0
    def test_local_attn_probs(self):
        model = GPTNeoModel.from_pretrained(
            "valhalla/gpt-neo-random-tiny").eval()
        layer = model.h[1].attn.attention.to(torch_device)
        hidden_states = self._get_hidden_states()
        hidden_states = torch.cat([hidden_states, hidden_states - 0.5], dim=2)

        batch_size, seq_length, _ = hidden_states.shape
        mask_tokens = 2
        attention_mask = torch.ones(batch_size,
                                    seq_length,
                                    device=torch_device,
                                    dtype=torch.long)
        attention_mask[:, -mask_tokens:] = 0  # dont attend last mask_tokens

        attention_mask = attention_mask.view(batch_size, -1)
        attention_mask = attention_mask[:, None, None, :]
        attention_mask = (1.0 - attention_mask) * -10000.0

        attn_probs = layer(hidden_states,
                           attention_mask=attention_mask,
                           output_attentions=True)[-1]

        # the last 2 tokens are masked, and should have 0 attn_probs
        self.assertTrue(
            torch.all(attn_probs[:, :, -mask_tokens:, -mask_tokens:] == 0))

        # in loacal attention each token can only attend to the previous window_size tokens (inlcuding itself)
        # here window_size is 4, so a token at index 5 can only attend to indcies [2, 3, 4, 5]
        # and the attn_probs should be 0 for token [0, 1]
        self.assertTrue(torch.all(attn_probs[:, :, 5, 2:6] != 0))
        self.assertTrue(torch.all(attn_probs[:, :, 5, :2] == 0))
コード例 #2
0
    def test_local_attn_probs(self):
        model = GPTNeoModel.from_pretrained(
            "valhalla/gpt-neo-random-tiny").eval()
        layer = model.h[1].attn.attention.to(torch_device)
        hidden_states = self._get_hidden_states()
        hidden_states = torch.cat([hidden_states, hidden_states - 0.5], dim=2)
        batch_size, seq_length, hidden_size = hidden_states.shape
        mask_tokens = 3
        attention_mask = torch.ones(batch_size,
                                    seq_length,
                                    device=torch_device,
                                    dtype=torch.long)
        attention_mask[:, -mask_tokens:] = 0  # dont atten last mask_tokens
        local_causal_mask = GPTNeoAttentionMixin.create_local_attention_mask(
            batch_size, seq_length, model.config.window_size, torch_device,
            attention_mask)

        _, attn_probs = layer(hidden_states,
                              attention_mask=local_causal_mask,
                              output_attentions=True)

        # the last 3 tokens will be in the last block, and should have 0 attn_probs
        self.assertTrue(
            torch.all(attn_probs[:, -1, :, -mask_tokens:, -mask_tokens:] == 0))
        # the first config.window_size tokens in the first block are always padded
        # and should have 0 attn_probs
        self.assertTrue(
            torch.all(attn_probs[:, 0, :, :model.config.window_size:, :model.
                                 config.window_size] == 0))
コード例 #3
0
    def create_and_check_gpt_neo_model_past(self, config, input_ids, input_mask, head_mask, token_type_ids, *args):
        model = GPTNeoModel(config=config)
        model.to(torch_device)
        model.eval()

        # first forward pass
        outputs = model(input_ids, token_type_ids=token_type_ids, use_cache=True)
        outputs_use_cache_conf = model(input_ids, token_type_ids=token_type_ids)
        outputs_no_past = model(input_ids, token_type_ids=token_type_ids, use_cache=False)

        self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf))
        self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1)

        output, past = outputs.to_tuple()

        # create hypothetical next token and extent to next_input_ids
        next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)
        next_token_types = ids_tensor([self.batch_size, 1], self.type_vocab_size)

        # append to next input_ids and token_type_ids
        next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
        next_token_type_ids = torch.cat([token_type_ids, next_token_types], dim=-1)

        output_from_no_past = model(next_input_ids, token_type_ids=next_token_type_ids)["last_hidden_state"]
        output_from_past = model(next_tokens, token_type_ids=next_token_types, past_key_values=past)[
            "last_hidden_state"
        ]

        # select random slice
        random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
        output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach()
        output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach()

        # test that outputs are equal for slice
        self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
コード例 #4
0
    def create_and_check_gpt_neo_model_attention_mask_past(
            self, config, input_ids, input_mask, head_mask, token_type_ids,
            *args):
        model = GPTNeoModel(config=config)
        model.to(torch_device)
        model.eval()

        # create attention mask
        attn_mask = torch.ones(input_ids.shape,
                               dtype=torch.long,
                               device=torch_device)
        half_seq_length = self.seq_length // 2
        attn_mask[:, half_seq_length:] = 0

        # first forward pass
        output, past = model(input_ids, attention_mask=attn_mask).to_tuple()

        # create hypothetical next token and extent to next_input_ids
        next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)

        # change a random masked slice from input_ids
        random_seq_idx_to_change = ids_tensor(
            (1, ), half_seq_length).item() + 1
        random_other_next_tokens = ids_tensor((self.batch_size, 1),
                                              config.vocab_size).squeeze(-1)
        input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens

        # append to next input_ids and attn_mask
        next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
        attn_mask = torch.cat(
            [
                attn_mask,
                torch.ones((attn_mask.shape[0], 1),
                           dtype=torch.long,
                           device=torch_device)
            ],
            dim=1,
        )

        # get two different outputs
        output_from_no_past = model(
            next_input_ids, attention_mask=attn_mask)["last_hidden_state"]
        output_from_past = model(next_tokens,
                                 past_key_values=past,
                                 attention_mask=attn_mask)["last_hidden_state"]

        # select random slice
        random_slice_idx = ids_tensor((1, ), output_from_past.shape[-1]).item()
        output_from_no_past_slice = output_from_no_past[:, -1,
                                                        random_slice_idx].detach(
                                                        )
        output_from_past_slice = output_from_past[:, 0,
                                                  random_slice_idx].detach()

        # test that outputs are equal for slice
        self.parent.assertTrue(
            torch.allclose(output_from_past_slice,
                           output_from_no_past_slice,
                           atol=1e-3))
コード例 #5
0
    def create_and_check_gpt_neo_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args):
        model = GPTNeoModel(config=config)
        model.to(torch_device)
        model.eval()

        result = model(input_ids, token_type_ids=token_type_ids, head_mask=head_mask)
        result = model(input_ids, token_type_ids=token_type_ids)
        result = model(input_ids)

        self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
コード例 #6
0
 def test_model_from_pretrained(self):
     for model_name in GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
         model = GPTNeoModel.from_pretrained(model_name)
         self.assertIsNotNone(model)