示例#1
0
    def test_inference_no_head(self):
        directory = "nvidia/megatron-bert-uncased-345m"
        if "MYDIR" in os.environ:
            directory = os.path.join(os.environ["MYDIR"], directory)
        model = MegatronBertModel.from_pretrained(directory)
        model.to(torch_device)
        model.half()
        input_ids = _long_tensor(
            [[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]])
        with torch.no_grad():
            output = model(input_ids)[0]
        expected_shape = torch.Size((1, 9, 1024))
        self.assertEqual(output.shape, expected_shape)

        expected = [
            -0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089,
            -0.1990, 0.5728
        ]
        for ii in range(3):
            for jj in range(3):
                a = output[0, ii, jj]
                b = expected[3 * ii + jj]
                msg = "ii={} jj={} a={} b={}".format(ii, jj, a, b)
                self.assertTrue(math.isclose(a,
                                             b,
                                             rel_tol=TOLERANCE,
                                             abs_tol=TOLERANCE),
                                msg=msg)
    def create_and_check_megatron_bert_model(
        self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
    ):
        model = MegatronBertModel(config=config)
        model.to(torch_device)
        model.eval()
        result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)
        result = model(input_ids, token_type_ids=token_type_ids)
        result = model(input_ids)

        self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
        self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))