def test_local_attn_probs(self): model = GPTNeoModel.from_pretrained( "valhalla/gpt-neo-random-tiny").eval() layer = model.h[1].attn.attention.to(torch_device) hidden_states = self._get_hidden_states() hidden_states = torch.cat([hidden_states, hidden_states - 0.5], dim=2) batch_size, seq_length, _ = hidden_states.shape mask_tokens = 2 attention_mask = torch.ones(batch_size, seq_length, device=torch_device, dtype=torch.long) attention_mask[:, -mask_tokens:] = 0 # dont attend last mask_tokens attention_mask = attention_mask.view(batch_size, -1) attention_mask = attention_mask[:, None, None, :] attention_mask = (1.0 - attention_mask) * -10000.0 attn_probs = layer(hidden_states, attention_mask=attention_mask, output_attentions=True)[-1] # the last 2 tokens are masked, and should have 0 attn_probs self.assertTrue( torch.all(attn_probs[:, :, -mask_tokens:, -mask_tokens:] == 0)) # in loacal attention each token can only attend to the previous window_size tokens (inlcuding itself) # here window_size is 4, so a token at index 5 can only attend to indcies [2, 3, 4, 5] # and the attn_probs should be 0 for token [0, 1] self.assertTrue(torch.all(attn_probs[:, :, 5, 2:6] != 0)) self.assertTrue(torch.all(attn_probs[:, :, 5, :2] == 0))
def test_local_attn_probs(self): model = GPTNeoModel.from_pretrained( "valhalla/gpt-neo-random-tiny").eval() layer = model.h[1].attn.attention.to(torch_device) hidden_states = self._get_hidden_states() hidden_states = torch.cat([hidden_states, hidden_states - 0.5], dim=2) batch_size, seq_length, hidden_size = hidden_states.shape mask_tokens = 3 attention_mask = torch.ones(batch_size, seq_length, device=torch_device, dtype=torch.long) attention_mask[:, -mask_tokens:] = 0 # dont atten last mask_tokens local_causal_mask = GPTNeoAttentionMixin.create_local_attention_mask( batch_size, seq_length, model.config.window_size, torch_device, attention_mask) _, attn_probs = layer(hidden_states, attention_mask=local_causal_mask, output_attentions=True) # the last 3 tokens will be in the last block, and should have 0 attn_probs self.assertTrue( torch.all(attn_probs[:, -1, :, -mask_tokens:, -mask_tokens:] == 0)) # the first config.window_size tokens in the first block are always padded # and should have 0 attn_probs self.assertTrue( torch.all(attn_probs[:, 0, :, :model.config.window_size:, :model. config.window_size] == 0))
def test_model_from_pretrained(self): for model_name in GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = GPTNeoModel.from_pretrained(model_name) self.assertIsNotNone(model)