コード例 #1
0
    def test_create_attention_mask(self):
        config = GPTNeoConfig.from_pretrained("valhalla/gpt-neo-random-tiny")
        window_size = config.window_size
        batch_size, seq_length = 8, 1
        block_length, num_blocks = GPTNeoAttentionMixin._get_block_length_and_num_blocks(
            seq_length, window_size)

        # causal_mask = layer._create_attention_mask(batch_size, seq_length, num_blocks, block_length, torch_device)
        causal_mask = GPTNeoAttentionMixin.create_local_attention_mask(
            batch_size, seq_length, config.window_size, torch_device)
        # check shapes
        expected_shape = [
            batch_size, num_blocks, 1, block_length, window_size + block_length
        ]
        self.assertListEqual(list(causal_mask.shape), expected_shape)
        # first window_size tokens in the first block are always padded
        # and should not be attended
        self.assertTrue(torch.all(causal_mask[:, 0, :, :, :window_size] == 0))
        # each window can attend at most window_size tokens
        self.assertTrue(
            torch.all(torch.sum(causal_mask, dim=4) <= config.window_size))

        # check if user provided attention_mask is handled correctly
        attention_mask = torch.ones(batch_size,
                                    seq_length,
                                    dtype=torch.long,
                                    device=torch_device)
        attention_mask[:, -3:] = 0  # don't attend last 3 tokens

        # causal_mask = layer._create_attention_mask(
        # batch_size, seq_length, num_blocks, block_length, torch_device, attention_mask
        # )
        causal_mask = GPTNeoAttentionMixin.create_local_attention_mask(
            batch_size, seq_length, config.window_size, torch_device,
            attention_mask)
        # last 3 tokens will be in the last block and shoul have 0s in causal_mask
        self.assertTrue(torch.all(causal_mask[:, -1, :, :, -3:] == 0))
        # check shapes
        expected_shape = [
            batch_size, num_blocks, 1, block_length, window_size + block_length
        ]
        self.assertListEqual(list(causal_mask.shape), expected_shape)
        # first window_size tokens in the first block are always padded
        # and should not be attended
        self.assertTrue(torch.all(causal_mask[:, 0, :, :, :window_size] == 0))
        # each window can attend at most window_size tokens
        self.assertTrue(
            torch.all(torch.sum(causal_mask, dim=4) <= config.window_size))
コード例 #2
0
    def test_local_attn_probs(self):
        model = GPTNeoModel.from_pretrained(
            "valhalla/gpt-neo-random-tiny").eval()
        layer = model.h[1].attn.attention.to(torch_device)
        hidden_states = self._get_hidden_states()
        hidden_states = torch.cat([hidden_states, hidden_states - 0.5], dim=2)
        batch_size, seq_length, hidden_size = hidden_states.shape
        mask_tokens = 3
        attention_mask = torch.ones(batch_size,
                                    seq_length,
                                    device=torch_device,
                                    dtype=torch.long)
        attention_mask[:, -mask_tokens:] = 0  # dont atten last mask_tokens
        local_causal_mask = GPTNeoAttentionMixin.create_local_attention_mask(
            batch_size, seq_length, model.config.window_size, torch_device,
            attention_mask)

        _, attn_probs = layer(hidden_states,
                              attention_mask=local_causal_mask,
                              output_attentions=True)

        # the last 3 tokens will be in the last block, and should have 0 attn_probs
        self.assertTrue(
            torch.all(attn_probs[:, -1, :, -mask_tokens:, -mask_tokens:] == 0))
        # the first config.window_size tokens in the first block are always padded
        # and should have 0 attn_probs
        self.assertTrue(
            torch.all(attn_probs[:, 0, :, :model.config.window_size:, :model.
                                 config.window_size] == 0))
コード例 #3
0
    def test_look_back(self):
        hidden_states = self._get_hidden_states()
        batch_size, seq_length, hidden_size = hidden_states.shape

        # check when seq_length is divisible by window_size
        window_size = 4
        block_length, num_block = GPTNeoAttentionMixin._get_block_length_and_num_blocks(
            seq_length, window_size)
        blocked_hidden_states = GPTNeoAttentionMixin._look_back(
            hidden_states, block_length, window_size)
        expected_shape = [
            batch_size, num_block, window_size + block_length, hidden_size
        ]
        self.assertListEqual(list(blocked_hidden_states.shape), expected_shape)
        # The last block should contain the last (window_size + block_length) hidden_states
        self.assertTrue(
            torch.all(blocked_hidden_states[:, -1, ...] ==
                      hidden_states[:, -(window_size + block_length):, ...]))

        # check when seq_length is not divisible by window_size
        window_size = 3
        block_length, num_block = GPTNeoAttentionMixin._get_block_length_and_num_blocks(
            seq_length, window_size)
        blocked_hidden_states = GPTNeoAttentionMixin._look_back(
            hidden_states, block_length, window_size)
        expected_shape = [
            batch_size, num_block, window_size + block_length, hidden_size
        ]
        self.assertListEqual(list(blocked_hidden_states.shape), expected_shape)
        # The last block should contain the last (window_size + block_length) hidden_states
        self.assertTrue(
            torch.all(blocked_hidden_states[:, -1, ...] ==
                      hidden_states[:, -(window_size + block_length):, ...]))

        # check when window_size is > seq_length
        window_size = 19
        block_length, num_block = GPTNeoAttentionMixin._get_block_length_and_num_blocks(
            seq_length, window_size)
        blocked_hidden_states = GPTNeoAttentionMixin._look_back(
            hidden_states, block_length, window_size)
        expected_shape = [
            batch_size, num_block, window_size + block_length, hidden_size
        ]
        self.assertListEqual(list(blocked_hidden_states.shape), expected_shape)

        # when window_size > seq_length, num_blocks becomes 1, in this case
        # the first window_size values in blocked_hidden_staes are all zeros
        # and the last block_length values are equal to the hidden_states
        values = blocked_hidden_states[:, -1, :window_size, ...]
        expected_values = torch.zeros_like(values)
        self.assertTrue(torch.all(values == expected_values))

        self.assertTrue(
            torch.all(blocked_hidden_states[:, -1, -block_length:,
                                            ...] == hidden_states))