示例#1
0
 def create_and_check_reformer_with_mlm(self, config, input_ids, input_mask, choice_labels):
     config.is_decoder = False
     model = ReformerForMaskedLM(config=config)
     model.to(torch_device)
     model.eval()
     result = model(input_ids, attention_mask=input_mask, labels=input_ids)
     self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
示例#2
0
 def create_and_check_reformer_model_with_lm_backward(self, config, input_ids, input_mask, choice_labels):
     config.is_decoder = False
     config.lsh_num_chunks_after = 1
     model = ReformerForMaskedLM(config=config)
     model.to(torch_device)
     model.eval()
     loss = model(input_ids, attention_mask=input_mask, labels=input_ids)[0]
     loss.backward()
示例#3
0
 def create_and_check_reformer_no_chunking(self, config, input_ids, input_mask, choice_labels):
     # force chunk length to be bigger than input_ids
     config.lsh_attn_chunk_length = 2 * input_ids.shape[-1]
     config.local_attn_chunk_length = 2 * input_ids.shape[-1]
     config.lsh_num_chunks_after = 1
     config.is_decoder = False
     model = ReformerForMaskedLM(config=config)
     model.to(torch_device)
     model.eval()
     output_logits = model(input_ids, attention_mask=input_mask)["logits"]
     self.parent.assertTrue(output_logits.shape[1] == input_ids.shape[-1])
示例#4
0
 def create_and_check_reformer_with_mlm(self, config, input_ids, input_mask, choice_labels):
     config.is_decoder = False
     model = ReformerForMaskedLM(config=config)
     model.to(torch_device)
     model.eval()
     loss, prediction_scores = model(input_ids, attention_mask=input_mask, labels=input_ids)
     result = {
         "loss": loss,
         "prediction_scores": prediction_scores,
     }
     self.parent.assertListEqual(
         list(result["prediction_scores"].size()), [self.batch_size, self.seq_length, self.vocab_size],
     )
     self.check_loss_output(result)
示例#5
0
 def test_lm_model_forward(self):
     config = self._get_basic_config_and_input()
     config["attn_layers"] = ["local", "lsh", "local", "lsh", "local", "lsh"]
     config["num_buckets"] = [2, 4]
     config["is_decoder"] = False
     torch.manual_seed(0)
     model = ReformerForMaskedLM(ReformerConfig(**config)).to(torch_device)
     model.eval()
     input_ids, attn_mask = self._get_input_ids_and_mask()
     hidden_states = model(input_ids=input_ids, attention_mask=attn_mask)[0]
     output_slice = hidden_states[1, -1, :5]
     expected_output_slice = torch.tensor(
         [0.0256, -0.0121, 0.0636, 0.0024, -0.0393], dtype=torch.float, device=torch_device,
     )
     self.assertTrue(torch.allclose(output_slice, expected_output_slice, atol=1e-3))