def create_and_check_reformer_model_fp16_generate(self, config, input_ids, input_mask):
     model = ReformerModelWithLMHead(config=config)
     model.to(torch_device)
     model.half()
     model.eval()
     output = model.generate(input_ids, attention_mask=input_mask, do_sample=False)
     self.parent.assertFalse(torch.isnan(output).any().item())
コード例 #2
0
 def create_and_check_reformer_model_fp16_generate(self, config, input_ids, input_mask, choice_labels):
     config.is_decoder = True
     config.lsh_num_chunks_after = 0
     model = ReformerModelWithLMHead(config=config)
     model.to(torch_device)
     model.half()
     model.eval()
     # only use last 10 inputs for generation
     output = model.generate(input_ids[:, -10:], attention_mask=input_mask, do_sample=False)
     self.parent.assertFalse(torch.isnan(output).any().item())
コード例 #3
0
    def create_and_check_reformer_model_generate(self, config, input_ids, input_mask, choice_labels):
        config.is_decoder = True
        config.lsh_num_chunks_after = 0
        config.bos_token_id = 0
        config.eos_token_id = None
        config.max_length = 20

        model = ReformerModelWithLMHead(config=config)
        model.to(torch_device)
        model.eval()
        output = model.generate()
        self.parent.assertIsNotNone(output)