Beispiel #1
0
 def __init__(self, num_nodes, n_hidden, num_classes, num_rels, num_bases,
              num_hidden_layers, dropout):
     super(RGCN, self).__init__()
     self.layers = nn.ModuleList()
     # i2h
     self.layers.append(
         RelGraphConv(num_nodes,
                      n_hidden,
                      num_rels,
                      "basis",
                      num_bases,
                      activation=F.relu,
                      dropout=dropout,
                      low_mem=True))
     # h2h
     for i in range(num_hidden_layers):
         self.layers.append(
             RelGraphConv(n_hidden,
                          n_hidden,
                          num_rels,
                          "basis",
                          num_bases,
                          activation=F.relu,
                          dropout=dropout,
                          low_mem=True))
     # o2h
     self.layers.append(
         RelGraphConv(n_hidden,
                      num_classes,
                      num_rels,
                      "basis",
                      num_bases,
                      activation=None,
                      low_mem=True))
Beispiel #2
0
    def __init__(self,
                 num_nodes,
                 h_dim,
                 out_dim,
                 num_rels,
                 regularizer="basis",
                 num_bases=-1,
                 dropout=0.,
                 self_loop=False,
                 ns_mode=False):
        super(RGCN, self).__init__()

        if num_bases == -1:
            num_bases = num_rels
        self.emb = nn.Embedding(num_nodes, h_dim)
        self.conv1 = RelGraphConv(h_dim,
                                  h_dim,
                                  num_rels,
                                  regularizer,
                                  num_bases,
                                  self_loop=self_loop)
        self.conv2 = RelGraphConv(h_dim,
                                  out_dim,
                                  num_rels,
                                  regularizer,
                                  num_bases,
                                  self_loop=self_loop)
        self.dropout = nn.Dropout(dropout)
        self.ns_mode = ns_mode
Beispiel #3
0
 def build_output_layer(self):
     return RelGraphConv(self.h_dim,
                         self.out_dim,
                         self.num_rels,
                         "basis",
                         self.num_bases,
                         activation=None,
                         self_loop=self.use_self_loop)
Beispiel #4
0
 def build_output_layer(self):
     return RelGraphConv(self.h_dim,
                         self.out_dim,
                         self.num_rels,
                         "basis",
                         self.num_bases,
                         activation=partial(F.softmax, dim=1),
                         self_loop=self.use_self_loop)
Beispiel #5
0
 def build_hidden_layer(self, idx):
     return RelGraphConv(self.h_dim,
                         self.h_dim,
                         self.num_rels,
                         "basis",
                         self.num_bases,
                         activation=F.relu,
                         self_loop=self.use_self_loop,
                         dropout=self.dropout)
Beispiel #6
0
 def build_input_layer(self):
     return RelGraphConv(self.num_nodes,
                         self.h_dim,
                         self.num_rels,
                         "basis",
                         self.num_bases,
                         activation=F.relu,
                         self_loop=self.use_self_loop,
                         dropout=self.dropout)
Beispiel #7
0
 def build_encoder(self):
     self.input_layer = EmbeddingLayer(self.num_nodes, self.h_dim)
     self.rconv_layer_1 = RelGraphConv(self.h_dim,
                                       self.h_dim,
                                       self.num_rels,
                                       "bdd",
                                       self.num_bases,
                                       activation=nn.ReLU(),
                                       self_loop=True,
                                       dropout=self.dropout)
     self.rconv_layer_2 = RelGraphConv(self.h_dim,
                                       self.h_dim * 2,
                                       self.num_rels,
                                       "bdd",
                                       self.num_bases,
                                       activation=lambda x: x,
                                       self_loop=True,
                                       dropout=self.dropout)
Beispiel #8
0
 def build_hidden_layer(self, idx):
     act = F.relu if idx < self.num_hidden_layers - 1 else None
     return RelGraphConv(self.h_dim,
                         self.h_dim,
                         self.num_rels,
                         "bdd",
                         self.num_bases,
                         activation=act,
                         self_loop=True,
                         dropout=self.dropout)
Beispiel #9
0
 def build_hidden_layer(self, index):
     act = F.relu if index < self.hidden_layers - 1 else None
     return RelGraphConv(in_feat=self.h_dim,
                         out_feat=self.h_dim,
                         num_rels=self.num_rels,
                         regularizer='bdd',
                         num_bases=self.num_bases,
                         activation=act,
                         self_loop=True,
                         dropout=self.dropout)
Beispiel #10
0
 def create_graph_layer(self):
     return RelGraphConv(
         self.h_dim,
         self.h_dim,
         self.num_relations,
         "basis",
         self.num_bases,
         activation=self.activation,
         self_loop=True,
         dropout=self.dropout,
     )
Beispiel #11
0
    def __init__(self,
                 in_dim,
                 h_dim,
                 out_dim,
                 num_rels,
                 regularizer="basis",
                 num_bases=-1,
                 dropout=0.,
                 self_loop=False,
                 link_pred=False):
        super(RGCN, self).__init__()

        self.layers = nn.ModuleList()
        if link_pred:
            self.emb = nn.Embedding(in_dim, h_dim)
            in_dim = h_dim
        else:
            self.emb = None
        self.layers.append(
            RelGraphConv(in_dim,
                         h_dim,
                         num_rels,
                         regularizer,
                         num_bases,
                         activation=F.relu,
                         self_loop=self_loop,
                         dropout=dropout))

        # For entity classification, dropout should not be applied to the output layer
        if not link_pred:
            dropout = 0.
        self.layers.append(
            RelGraphConv(h_dim,
                         out_dim,
                         num_rels,
                         regularizer,
                         num_bases,
                         self_loop=self_loop,
                         dropout=dropout))
Beispiel #12
0
    def __init__(self,
                 num_nodes: int,
                 hidden_dim: int,
                 num_relations: int,
                 num_bases: int = -1,
                 dropout: float = 0.0,
                 num_layers: int = 1,
                 node_regularization_param: float = 0.0,
                 regularizer: str = "basis"):
        super().__init__()

        self.layers = nn.ModuleList([RGCNEmbedding(num_nodes, hidden_dim)])

        for i in range(num_layers - 1):
            layer = RelGraphConv(in_feat=hidden_dim,
                                 out_feat=hidden_dim,
                                 num_rels=num_relations,
                                 regularizer=regularizer,
                                 num_bases=num_bases,
                                 activation=F.relu,
                                 self_loop=True,
                                 dropout=dropout)
            self.layers.append(layer)

        final_layer = RelGraphConv(in_feat=hidden_dim,
                                   out_feat=hidden_dim,
                                   num_rels=num_relations,
                                   regularizer=regularizer,
                                   num_bases=num_bases,
                                   self_loop=True,
                                   dropout=dropout)
        self.layers.append(final_layer)

        self.reg_param = node_regularization_param
        self.w_relation = nn.Parameter(torch.Tensor(num_relations, hidden_dim))
        nn.init.xavier_uniform_(self.w_relation,
                                gain=nn.init.calculate_gain('relu'))
Beispiel #13
0
    def __init__(self,
                 num_nodes,
                 num_relations,
                 num_hidden,
                 num_bases,
                 num_hidden_layers=2,
                 dropout=0.0,
                 embed=None,
                 embed_connect="residual"):
        super(RelGraphConvolutionEncoder, self).__init__()
        num_bases = None if num_bases < 0 else num_bases

        # Embedding layer
        if embed is not None:
            if embed.shape[1] > num_hidden:
                raise Exception('Pretrain embdding dimension mismatch:'
                                'required {}-d, but got {}-d'.format(
                                    num_hidden, embed.shape[1]))
                # svd = decomposition.TruncatedSVD(n_components=num_hidden)
                # embed = svd.fit_transform(embed)
                # embed = torch.tensor(embed, dtype=torch.float)
            self.emb_node = nn.Embedding.from_pretrained(embed)
        else:
            self.emb_node = nn.Embedding(num_nodes, num_hidden)

        # Register layers
        layers = []
        for i in range(num_hidden_layers):
            act = F.relu if i < num_hidden_layers - 1 else None
            layers.append(
                RelGraphConv(num_hidden,
                             num_hidden,
                             num_relations,
                             "bdd",
                             num_bases,
                             activation=act,
                             self_loop=True,
                             dropout=dropout))
        self.layers = nn.ModuleList(layers)

        # Connction from node embedding to relational decoder
        self.emb_connect = embed_connect