Esempio n. 1
0
    def __init__(self, config, dropout=0.1):
        super().__init__(config[0])

        config_node, config_path, config_concat = config

        self.embedding_dim = config_node.hidden_size
        self.node_embedding = RobertaEmbeddings(config_node)
        self.path_embedding = RobertaEmbeddings(config_path)

        self.W = nn.Parameter(torch.randn(1, config_node.hidden_size, 3 * config_node.hidden_size))
        self.a = nn.Parameter(torch.randn(1, config_node.hidden_size, 1))
        self.out = nn.Linear(config_node.hidden_size, config_node.num_labels)
        self.drop = nn.Dropout(dropout)
       
        self.sub_num = [1]
        self.init_weights()
Esempio n. 2
0
    def test_create_position_ids_from_inputs_embeds(self):
        """Ensure that the default position ids only assign a sequential . This is a regression
        test for https://github.com/huggingface/transformers/issues/1761

        The position ids should be masked with the embedding object's padding index. Therefore, the
        first available non-padding position index is RobertaEmbeddings.padding_idx + 1
        """
        config = self.model_tester.prepare_config_and_inputs()[0]
        embeddings = RobertaEmbeddings(config=config)

        inputs_embeds = torch.Tensor(2, 4, 30)
        expected_single_positions = [
            0 + embeddings.padding_idx + 1,
            1 + embeddings.padding_idx + 1,
            2 + embeddings.padding_idx + 1,
            3 + embeddings.padding_idx + 1,
        ]
        expected_positions = torch.as_tensor([expected_single_positions, expected_single_positions])
        position_ids = embeddings.create_position_ids_from_inputs_embeds(inputs_embeds)
        self.assertEqual(position_ids.shape, expected_positions.shape)
        self.assertTrue(torch.all(torch.eq(position_ids, expected_positions)))
Esempio n. 3
0
    def __init__(self, config, num_labels=None):
        super().__init__(config)
        self.num_labels = config.num_labels
        self.config = config

        #self.roberta = RobertaModel(config, add_pooling_layer=False)
        self.embeddings = RobertaEmbeddings(config)
        self.encoder = RobertaEncoder(config)
        self.pooler = RobertaPooler(config)  #if add_pooling_layer else None

        self.classifier = RobertaClassificationHead(config)

        self.init_weights()
Esempio n. 4
0
    def __init__(self, config: LukeConfig):
        super(LukeModel, self).__init__()

        self.config = config

        self.encoder = BertEncoder(config)
        self.pooler = BertPooler(config)

        if self.config.bert_model_name and "roberta" in self.config.bert_model_name:
            self.embeddings = RobertaEmbeddings(config)
            self.embeddings.token_type_embeddings.requires_grad = False
        else:
            self.embeddings = BertEmbeddings(config)
        self.entity_embeddings = EntityEmbeddings(config)
Esempio n. 5
0
    def __init__(self, config):
        super().__init__(config)

        self.embeddings = RobertaEmbeddings(config)
        self.init_weights()