Example #1
0
    def create_and_check_xlm_token_classif(
        self,
        config,
        input_ids,
        token_type_ids,
        input_lengths,
        sequence_labels,
        token_labels,
        is_impossible_labels,
        input_mask,
    ):
        config.num_labels = self.num_labels
        model = XLMForTokenClassification(config)
        model.to(torch_device)
        model.eval()

        loss, logits = model(input_ids,
                             attention_mask=input_mask,
                             labels=token_labels)
        result = {
            "loss": loss,
            "logits": logits,
        }
        self.parent.assertListEqual(
            list(result["logits"].size()),
            [self.batch_size, self.seq_length, self.num_labels])
        self.check_loss_output(result)
Example #2
0
    def init(cls, cf, d_out):
        if "bert" in cf.model_pretrained:
            model = BertForTokenClassification.from_pretrained(cf.model_pretrained, num_labels=d_out,
                                    output_attentions=False, output_hidden_states=False)
            if cf.random_weights is True:
                # initiate Bert with random weights
                print("randomizing weights")
                model = randomize_model(model)
                #print(model.classifier.weight.data)
            else:
                # initiate Bert with pre-trained weights
                print("keeping Bert with pre-trained weights")
                #print(model.classifier.weight.data)

        elif "xlm" in cf.model_pretrained:
            model = XLMForTokenClassification.from_pretrained(cf.model_pretrained, num_labels=d_out, output_attentions=False, output_hidden_states=False)



        model.d_out = d_out

        return model
    def create_and_check_xlm_token_classif(
        self,
        config,
        input_ids,
        token_type_ids,
        input_lengths,
        sequence_labels,
        token_labels,
        is_impossible_labels,
        choice_labels,
        input_mask,
    ):
        config.num_labels = self.num_labels
        model = XLMForTokenClassification(config)
        model.to(torch_device)
        model.eval()

        result = model(input_ids, attention_mask=input_mask, labels=token_labels)
        self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))