Ejemplo n.º 1
0
    def __init__(self,
                 num_nodes,
                 num_relations,
                 num_layers,
                 emb_dim,
                 num_bases=None):
        super(node_RGCN, self).__init__()

        self.num_nodes = num_nodes
        self.num_relations = num_relations
        self.num_layers = num_layers
        self.emb_dim = emb_dim
        self.num_bases = num_bases if num_bases else num_relations

        self.layers = nn.ModuleList()
        rgcn = RGCNConv(in_channels=self.emb_dim,
                        out_channels=self.emb_dim,
                        num_relations=self.num_relations,
                        num_bases=self.num_bases)
        self.layers.append(rgcn)

        for i in range(self.num_layers - 1):
            rgcn = RGCNConv(in_channels=self.emb_dim,
                            out_channels=self.emb_dim,
                            num_relations=self.num_relations,
                            num_bases=self.num_bases)

            self.layers.append(rgcn)
Ejemplo n.º 2
0
    def __init__(self, num_features, n_classes,num_heads ,num_rels, num_bases, num_hidden, num_hidden_layers_rgcn,num_hidden_layers_gat, dropout, activation, alpha, bias):
        super(PRGAT, self).__init__()
        self.concat = True
        self.neg_slope = alpha
        self.num_hidden_layers_rgcn = num_hidden_layers_rgcn
        self.num_hidden_layers_gat = num_hidden_layers_gat
        # dropout
        if dropout:
            self.dropout = nn.Dropout(p=dropout)
        else:
            self.dropout = nn.Dropout(p=0.)
        # activation
        self.activation = activation
        # RGCN input layer
        self.rgcn_input = RGCNConv(num_features, num_hidden, num_rels, num_bases, bias=bias) #aggr values ['add', 'mean', 'max'] default : add
        # RGCN Hidden layers
        self.layers = nn.ModuleList()
        for _ in range(num_hidden_layers_rgcn):
            self.layers.append(RGCNConv(num_hidden, num_hidden, num_rels, num_bases, bias=bias))
        # GAT input layer
        self.layers.append(GATConv(num_hidden, num_hidden, heads=num_heads, concat= self.concat, negative_slope=self.neg_slope, dropout = dropout, bias=bias))

        # GAT Hidden layers
        for _ in range(num_hidden_layers_gat):
            if self.concat:
                self.layers.append(GATConv(num_hidden*num_heads, num_hidden, heads=num_heads, concat= self.concat, negative_slope=self.neg_slope, dropout = dropout, bias=bias))
            else:
                self.layers.append(GATConv(num_hidden, num_hidden, heads=num_heads, concat= self.concat, negative_slope=self.neg_slope, dropout = dropout, bias=bias))
        # GAT output layer
        if self.concat:
            self.gat_output = GATConv(num_hidden*num_heads, n_classes, heads=num_heads, concat= self.concat, negative_slope=self.neg_slope, dropout = dropout, bias=bias)
        else:
            self.gat_output = GATConv(num_hidden, n_classes, heads=num_heads, concat= self.concat, negative_slope=self.neg_slope, dropout = dropout, bias=bias)
Ejemplo n.º 3
0
 def __init__(self, num_nodes, hidden_channels, num_relations):
     super().__init__()
     self.node_emb = Parameter(torch.Tensor(num_nodes, hidden_channels))
     self.conv1 = RGCNConv(hidden_channels, hidden_channels, num_relations,
                           num_blocks=5)
     self.conv2 = RGCNConv(hidden_channels, hidden_channels, num_relations,
                           num_blocks=5)
     self.reset_parameters()
Ejemplo n.º 4
0
    def __init__(self,
                 num_features,
                 n_classes,
                 num_heads,
                 num_rels,
                 num_bases,
                 num_hidden,
                 num_hidden_layer_pairs,
                 dropout,
                 activation,
                 neg_slope,
                 bias=True):
        super(PRGAT2, self).__init__()
        self.neg_slope = neg_slope
        self.num_hidden_layer_pairs = num_hidden_layer_pairs
        # dropout
        if dropout:
            self.dropout = nn.Dropout(p=dropout)
        else:
            self.dropout = nn.Dropout(p=0.)
        # activation
        self.activation = activation

        if num_bases < 0:
            num_bases = num_rels

        self.layers = nn.ModuleList()
        self.layers.append(
            RGCNConv(num_features,
                     num_hidden[0],
                     num_rels,
                     num_bases,
                     bias=bias))
        for num_layer in range(num_hidden_layer_pairs):
            self.layers.append(
                GATConv(num_hidden[num_layer],
                        num_hidden[num_layer + 1],
                        heads=num_heads[num_layer],
                        concat=True,
                        negative_slope=self.neg_slope,
                        dropout=0,
                        bias=bias))
            self.layers.append(
                RGCNConv(num_hidden[num_layer + 1] * num_heads[num_layer],
                         num_hidden[num_layer + 1],
                         num_rels,
                         num_bases,
                         bias=bias))
        self.layers.append(
            GATConv(num_hidden[-2],
                    num_hidden[-1],
                    heads=num_heads[num_layer + 1],
                    concat=False,
                    negative_slope=self.neg_slope,
                    dropout=dropout,
                    bias=bias))
Ejemplo n.º 5
0
	def __init__(self):
		super(Net, self).__init__()
		self.transform_paper = Linear(data.paper_feature_dim, n_dim_initial_embedding)
		self.transform_mesh = Linear(data.mesh_feature_dim, n_dim_initial_embedding)
		self.conv1 = RGCNConv(n_dim_initial_embedding, 32, n_relations, num_bases=num_bases)
		self.conv2 = RGCNConv(32, 32, n_relations, num_bases=num_bases)
		self.conv3 = RGCNConv(32, 32, n_relations, num_bases=num_bases)
		self.conv4 = RGCNConv(32, embedding_dim, n_relations, num_bases=num_bases)
		self.decoding_matrix_paper_paper = Linear(embedding_dim, embedding_dim)
		self.decoding_matrix_paper_mesh = Linear(embedding_dim, embedding_dim)
Ejemplo n.º 6
0
 def __init__(self):
     super(Net, self).__init__()
     self.conv1 = RGCNConv(data.num_nodes,
                           16,
                           dataset.num_relations,
                           num_bases=30)  # 23644, 16, 46, 30
     self.conv2 = RGCNConv(16,
                           dataset.num_classes,
                           dataset.num_relations,
                           num_bases=30)  # 16, 2, 46, 30
Ejemplo n.º 7
0
 def __init__(self):
     super().__init__()
     self.conv1 = RGCNConv(data.num_nodes,
                           16,
                           dataset.num_relations,
                           num_bases=30)
     self.conv2 = RGCNConv(16,
                           dataset.num_classes,
                           dataset.num_relations,
                           num_bases=30)
Ejemplo n.º 8
0
    def _create_output_gate_layers(self):

        self.conv_x_o = RGCNConv(in_channels=self.in_channels,
                                 out_channels=self.out_channels,
                                 num_relations=self.num_relations,
                                 num_bases=self.num_bases)

        self.conv_h_o = RGCNConv(in_channels=self.out_channels,
                                 out_channels=self.out_channels,
                                 num_relations=self.num_relations,
                                 num_bases=self.num_bases)
Ejemplo n.º 9
0
 def __init__(self):
     super(Encoder, self).__init__()
     self.emb = nn.Embedding(e_size, dim_size)
     self.conv1 = RGCNConv(in_channels=dim_size,
                           out_channels=dim_size,
                           num_relations=r_size,
                           num_bases=2)
     self.conv2 = RGCNConv(in_channels=dim_size,
                           out_channels=dim_size,
                           num_relations=r_size,
                           num_blocks=4)
Ejemplo n.º 10
0
    def _create_cell_state_layers(self):

        self.conv_x_c = RGCNConv(in_channels=self.in_channels,
                                 out_channels=self.out_channels,
                                 num_relations=self.num_relations,
                                 num_bases=self.num_bases)

        self.conv_h_c = RGCNConv(in_channels=self.out_channels,
                                 out_channels=self.out_channels,
                                 num_relations=self.num_relations,
                                 num_bases=self.num_bases)
Ejemplo n.º 11
0
 def __init__(self, in_c, out_c):
     super(GraphRCNN, self).__init__()
     self.conv1 = RGCNConv(in_channels=in_c,
                           out_channels=1024,
                           num_relations=3)
     self.conv2 = RGCNConv(in_channels=1024,
                           out_channels=512,
                           num_relations=3)
     self.conv3 = RGCNConv(in_channels=512,
                           out_channels=128,
                           num_relations=3)
     self.fc = nn.Linear(128, out_c)
Ejemplo n.º 12
0
    def __init__(self, args, data_G2):
        super(Net, self).__init__()

        self.x_g2 = nn.Embedding(data_G2.x.shape[0], 200)  # 26078*500

        self.layer_g3_rgcn_1 = RGCNConv(data_G2.num_nodes,
                                        args.class_num_double,
                                        data_G2.num_relations,
                                        num_bases=30)
        self.layer_g3_rgcn_2 = RGCNConv(args.class_num_double,
                                        args.class_num,
                                        data_G2.num_relations,
                                        num_bases=30)

        print("for debug")
Ejemplo n.º 13
0
    def __init__(self):
        super(Net, self).__init__()
        self.embedding = torch.nn.Parameter(
            torch.Tensor(data.num_nodes, in_dim))
        self.embedding.data.normal_()

        self.conv1 = RGCNConv(in_dim,
                              hidden_size,
                              data.num_relations,
                              num_bases=data.num_relations)
        self.conv2 = RGCNConv(hidden_size,
                              embedding,
                              data.num_relations,
                              num_bases=data.num_relations)
        self.mclp = multiClassInnerProductDecoder(embedding, data.num_classes)
Ejemplo n.º 14
0
    def _create_forget_gate_layers(self):

        self.conv_x_f = RGCNConv(
            in_channels=self.in_channels,
            out_channels=self.out_channels,
            num_relations=self.num_relations,
            num_bases=self.num_bases,
        )

        self.conv_h_f = RGCNConv(
            in_channels=self.out_channels,
            out_channels=self.out_channels,
            num_relations=self.num_relations,
            num_bases=self.num_bases,
        )
Ejemplo n.º 15
0
def test_rgcn_conv():
    in_channels, out_channels = (16, 32)
    num_relations = 8
    edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]])
    num_nodes = edge_index.max().item() + 1
    x = torch.randn((num_nodes, in_channels))
    edge_type = torch.randint(0, num_relations, (edge_index.size(1), ))

    conv = RGCNConv(in_channels, out_channels, num_relations, num_bases=4)
    assert conv.__repr__() == 'RGCNConv(16, 32, num_relations=8)'
    assert conv(x, edge_index, edge_type).size() == (num_nodes, out_channels)

    x = None
    conv = RGCNConv(num_nodes, out_channels, num_relations, num_bases=4)
    assert conv(x, edge_index, edge_type).size() == (num_nodes, out_channels)
Ejemplo n.º 16
0
 def __init__(self,
              input_dim=10184,
              hid_dim=64,
              out_dim=32,
              num_relations=964,
              num_bases=2):
     super(Encoder, self).__init__()
     self.input_dim = input_dim
     self.hid_dim = hid_dim
     self.out_dim = out_dim
     self.num_relations = num_relations
     self.num_bases = num_bases
     self.conv1 = RGCNConv(self.input_dim, self.hid_dim, self.num_relations,
                           self.num_bases)
     self.conv2 = RGCNConv(self.hid_dim, self.out_dim, self.num_relations,
                           self.num_bases)
def test_rgcn_conv_equality(conf):
    num_bases, num_blocks = conf

    x1 = torch.randn(4, 4)
    edge_index = torch.tensor([[0, 1, 1, 2, 2, 3], [0, 0, 1, 0, 1, 1]])
    edge_type = torch.tensor([0, 1, 1, 0, 0, 1])

    edge_index = torch.tensor([
        [0, 1, 1, 2, 2, 3, 0, 1, 1, 2, 2, 3],
        [0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1],
    ])
    edge_type = torch.tensor([0, 1, 1, 0, 0, 1, 2, 3, 3, 2, 2, 3])

    torch.manual_seed(12345)
    conv1 = RGCNConv(4, 32, 4, num_bases, num_blocks)

    torch.manual_seed(12345)
    conv2 = FastRGCNConv(4, 32, 4, num_bases, num_blocks)

    out1 = conv1(x1, edge_index, edge_type)
    out2 = conv2(x1, edge_index, edge_type)
    assert torch.allclose(out1, out2, atol=1e-6)

    if num_blocks is None:
        out1 = conv1(None, edge_index, edge_type)
        out2 = conv2(None, edge_index, edge_type)
        assert torch.allclose(out1, out2, atol=1e-6)
Ejemplo n.º 18
0
    def __init__(self,
                 num_features,
                 num_classes,
                 num_relations,
                 max_seq_len,
                 hidden_size=64,
                 dropout=0.5,
                 no_cuda=False):
        """
        The Speaker-level context encoder in the form of a 2 layer GCN.
        """
        super(GraphNetwork, self).__init__()

        self.conv1 = RGCNConv(num_features,
                              hidden_size,
                              num_relations,
                              num_bases=30)
        self.conv2 = GraphConv(hidden_size, hidden_size)
        self.matchatt = MatchingAttention(num_features + hidden_size,
                                          num_features + hidden_size,
                                          att_type='general2')
        self.linear = nn.Linear(num_features + hidden_size, hidden_size)
        self.dropout = nn.Dropout(dropout)
        self.smax_fc = nn.Linear(hidden_size, num_classes)
        self.no_cuda = no_cuda
Ejemplo n.º 19
0
    def __build_model(self):
        """
        Layout the model.
        """
        self.rgcn_layers = []
        for l in range(self.hparams.num_layers):
            in_channels = self.hparams.relation_embedding_dim
            out_channels = self.hparams.relation_embedding_dim
            num_bases = self.hparams.relation_embedding_dim

            self.rgcn_layers.append(
                RGCNConv(
                    in_channels,
                    out_channels,
                    self.hparams.num_classes,
                    num_bases,
                    root_weight=self.hparams.root_weight,
                    bias=self.hparams.bias,
                ))

        self.rgcn_layers = nn.ModuleList(self.rgcn_layers)
        self.classfier = []
        inp_dim = (self.hparams.relation_embedding_dim * 2 +
                   self.hparams.relation_embedding_dim)
        outp_dim = self.hparams.hidden_dim
        for l in range(self.hparams.classify_layers - 1):
            self.classfier.append(nn.Linear(inp_dim, outp_dim))
            self.classfier.append(nn.ReLU())
            inp_dim = outp_dim
        self.classfier.append(nn.Linear(inp_dim, self.hparams.num_classes))
        self.classfier = nn.Sequential(*self.classfier)
Ejemplo n.º 20
0
 def _build_kg_layer(self):
     self.kg_encoder = RGCNConv(self.n_entity,
                                self.kg_emb_dim,
                                self.n_relation,
                                num_bases=self.num_bases)
     self.kg_attn = SelfAttentionBatch(self.kg_emb_dim, self.kg_emb_dim)
     logger.debug('[Build kg layer]')
Ejemplo n.º 21
0
def test_to_hetero_with_bases_and_rgcn_equal_output():
    torch.manual_seed(1234)

    # Run `RGCN` with basis decomposition:
    x = torch.randn(10, 16)  # 6 paper nodes, 4 author nodes
    adj = (torch.rand(10, 10) > 0.5)
    adj[6:, 6:] = False
    edge_index = adj.nonzero(as_tuple=False).t().contiguous()
    row, col = edge_index

    # # 0 = paper<->paper, 1 = author->paper, 2 = paper->author
    edge_type = torch.full((edge_index.size(1), ), -1, dtype=torch.long)
    edge_type[(row < 6) & (col < 6)] = 0
    edge_type[(row < 6) & (col >= 6)] = 1
    edge_type[(row >= 6) & (col < 6)] = 2
    assert edge_type.min() == 0

    num_bases = 4
    conv = RGCNConv(16, 32, num_relations=3, num_bases=num_bases, aggr='add')
    out1 = conv(x, edge_index, edge_type)

    # Run `to_hetero_with_bases`:
    x_dict = {
        'paper': x[:6],
        'author': x[6:],
    }
    edge_index_dict = {
        ('paper', '_', 'paper'):
        edge_index[:, edge_type == 0],
        ('paper', '_', 'author'):
        edge_index[:, edge_type == 1] - torch.tensor([[0], [6]]),
        ('author', '_', 'paper'):
        edge_index[:, edge_type == 2] - torch.tensor([[6], [0]]),
    }

    adj_t_dict = {
        key: SparseTensor.from_edge_index(edge_index).t()
        for key, edge_index in edge_index_dict.items()
    }

    metadata = (list(x_dict.keys()), list(edge_index_dict.keys()))
    model = to_hetero_with_bases(RGCN(16, 32), metadata, num_bases=num_bases,
                                 debug=False)

    # Set model weights:
    for i in range(num_bases):
        model.conv.convs[i].lin.weight.data = conv.weight[i].data.t()
        model.conv.convs[i].edge_type_weight.data = conv.comp[:, i].data.t()

    model.lin.weight.data = conv.root.data.t()
    model.lin.bias.data = conv.bias.data

    out2 = model(x_dict, edge_index_dict)
    out2 = torch.cat([out2['paper'], out2['author']], dim=0)
    assert torch.allclose(out1, out2, atol=1e-6)

    out3 = model(x_dict, adj_t_dict)
    out3 = torch.cat([out3['paper'], out3['author']], dim=0)
    assert torch.allclose(out1, out3, atol=1e-6)
Ejemplo n.º 22
0
 def __init__(self, RGCNConv, graph, num_layers, num_nodes, num_classes,
              num_relations, num_hidden):
     super(Net, self).__init__()
     self.num_layers = num_layers
     self.layers = nn.ModuleList()
     #input layer
     self.layers.append(
         RGCNConv(num_nodes, num_hidden, num_relations, num_bases=None))
     for idx in range(self.num_layers - 2):
         self.layers.append(
             RGCNConv(num_hidden, num_hidden, num_relations,
                      num_bases=None))
     #outpu layer
     self.output_layer = RGCNConv(num_hidden,
                                  num_classes,
                                  num_relations,
                                  num_bases=None)
def test_to_hetero_and_rgcn_equal_output():
    torch.manual_seed(1234)

    # Run `RGCN`:
    x = torch.randn(10, 16)  # 6 paper nodes, 4 author nodes
    adj = (torch.rand(10, 10) > 0.5)
    adj[6:, 6:] = False
    edge_index = adj.nonzero(as_tuple=False).t().contiguous()
    row, col = edge_index

    # # 0 = paper<->paper, 1 = paper->author, 2 = author->paper
    edge_type = torch.full((edge_index.size(1), ), -1, dtype=torch.long)
    edge_type[(row < 6) & (col < 6)] = 0
    edge_type[(row < 6) & (col >= 6)] = 1
    edge_type[(row >= 6) & (col < 6)] = 2
    assert edge_type.min() == 0

    conv = RGCNConv(16, 32, num_relations=3)
    out1 = conv(x, edge_index, edge_type)

    # Run `to_hetero`:
    x_dict = {
        'paper': x[:6],
        'author': x[6:],
    }
    edge_index_dict = {
        ('paper', '_', 'paper'):
        edge_index[:, edge_type == 0],
        ('paper', '_', 'author'):
        edge_index[:, edge_type == 1] - torch.tensor([[0], [6]]),
        ('author', '_', 'paper'):
        edge_index[:, edge_type == 2] - torch.tensor([[6], [0]]),
    }

    node_types, edge_types = list(x_dict.keys()), list(edge_index_dict.keys())

    adj_t_dict = {
        key: SparseTensor.from_edge_index(edge_index).t()
        for key, edge_index in edge_index_dict.items()
    }

    model = to_hetero(RGCN(16, 32), (node_types, edge_types))

    # Set model weights:
    for i, edge_type in enumerate(edge_types):
        weight = model.conv['__'.join(edge_type)].lin.weight
        weight.data = conv.weight[i].data.t()
    for i, node_type in enumerate(node_types):
        model.lin[node_type].weight.data = conv.root.data.t()
        model.lin[node_type].bias.data = conv.bias.data

    out2 = model(x_dict, edge_index_dict)
    out2 = torch.cat([out2['paper'], out2['author']], dim=0)
    assert torch.allclose(out1, out2, atol=1e-6)

    out3 = model(x_dict, adj_t_dict)
    out3 = torch.cat([out3['paper'], out3['author']], dim=0)
    assert torch.allclose(out1, out3, atol=1e-6)
Ejemplo n.º 24
0
    def __init__(self, opt, emb_matrix=None):
        super(SynGCN, self).__init__()
        self.drop = nn.Dropout(opt['dropout'])
        self.emb = nn.Embedding(opt['vocab_size'],
                                opt['emb_dim'],
                                padding_idx=constant.PAD_ID)
        if opt['pos_dim'] > 0:
            self.pos_emb = nn.Embedding(len(constant.POS_TO_ID),
                                        opt['pos_dim'],
                                        padding_idx=constant.PAD_ID)
        if opt['ner_dim'] > 0:
            self.ner_emb = nn.Embedding(len(constant.NER_TO_ID),
                                        opt['ner_dim'],
                                        padding_idx=constant.PAD_ID)

        input_size = opt['emb_dim'] + opt['pos_dim'] + opt['ner_dim']
        self.rnn = nn.LSTM(input_size, opt['hidden_dim'], opt['num_layers'], batch_first=True,\
                dropout=opt['dropout'], bidirectional=True)

        if opt['sgcn']:
            self.deprel_emb = nn.Embedding(len(constant.DEPREL_TO_ID),
                                           opt['deprel_dim'],
                                           padding_idx=constant.PAD_ID)
            self.attn = Attention(opt['deprel_dim'], 2 * opt['hidden_dim'])
            self.sgcn2 = GCNConv(2 * opt['hidden_dim'], opt['hidden_dim'])
        if opt['rgcn']:
            self.rgcn = RGCNConv(2 * opt['hidden_dim'],
                                 opt['hidden_dim'],
                                 len(constant.DEPREL_TO_ID) - 1,
                                 num_bases=len(constant.DEPREL_TO_ID) - 1)
        if opt['gcn']:
            self.gcn = GCNConv(2 * opt['hidden_dim'], opt['hidden_dim'])
        if opt['gat']:
            self.deprel_emb = nn.Embedding(len(constant.DEPREL_TO_ID),
                                           opt['deprel_dim'],
                                           padding_idx=constant.PAD_ID)
            self.gat = GATConv((2 * opt['hidden_dim'],
                                2 * opt['hidden_dim'] + opt['deprel_dim']),
                               opt['hidden_dim'])

        # output mlp layers
        in_dim = opt['hidden_dim'] * 3
        layers = [nn.Linear(in_dim, opt['hidden_dim']), nn.ReLU()]
        for _ in range(opt['mlp_layers'] - 1):
            layers += [
                nn.Linear(opt['hidden_dim'], opt['hidden_dim']),
                nn.ReLU()
            ]
        self.out_mlp = nn.Sequential(*layers)
        self.linear = nn.Linear(opt['hidden_dim'], opt['num_class'])

        self.opt = opt
        self.topn = self.opt.get('topn', 1e10)
        self.use_cuda = opt['cuda']
        self.emb_matrix = emb_matrix
        self.init_weights()
Ejemplo n.º 25
0
    def __init__(self, graph, hidden_dim=100):
        super(rgcn_link_predict, self).__init__()
        # Should add embedding layer at start  ----   self.emb_e = torch.nn.Embedding(data.num_nodes, hidden_dim, padding_idx=0)
        self.emb_rel = torch.nn.Embedding(graph.num_edge_features + 1,
                                          hidden_dim,
                                          padding_idx=0)

        self.conv1 = RGCNConv(
            in_channels=graph.num_nodes,
            out_channels=hidden_dim,
            num_relations=graph.num_edge_features,
            num_bases=30,
        )
        self.conv2 = RGCNConv(
            in_channels=hidden_dim,
            out_channels=hidden_dim,
            num_relations=graph.num_edge_features,
            num_bases=30,
        )
Ejemplo n.º 26
0
 def __init__(self, conv_name, in_hid, out_hid, num_types, num_relations, n_heads, dropout, use_norm = True, use_RTE = True):
     super(GeneralConv, self).__init__()
     self.conv_name = conv_name
     if self.conv_name == 'hgt':
         self.base_conv = HGTConv(in_hid, out_hid, num_types, num_relations, n_heads, dropout, use_norm, use_RTE)
     elif self.conv_name == 'gcn':
         self.base_conv = GCNConv(in_hid, out_hid)
     elif self.conv_name == 'gat':
         self.base_conv = GATConv(in_hid, out_hid // n_heads, heads=n_heads)
     elif self.conv_name == 'rgcn':
         self.base_conv = RGCNConv(in_hid, out_hid, num_relations)
Ejemplo n.º 27
0
    def __init__(self, in_dim, out_dim, x_g1, data_G2_x, data_G2_num_nodes,
                 data_G2_num_edges):
        super(Net, self).__init__()
        self.x_g1 = Parameter(x_g1)
        self.layer_s_to_c = STCConv(in_dim, out_dim)

        self.x_g2 = Parameter(data_G2_x)
        self.layer_g2_rgcn = RGCNConv(data_G2_num_nodes,
                                      data_G2_num_nodes,
                                      data_G2_num_edges,
                                      num_bases=30)
Ejemplo n.º 28
0
    def __init__(self):
        super(Net, self).__init__()

        num_features = dataset.num_features
        dim = 32

        self.conv1 = RGCNConv(num_features, dim, num_relations=2)
        self.bn1 = torch.nn.BatchNorm1d(dim)

        #self.fc1 = Linear(dim, dim)

        self.fc2 = Linear(dim, dataset.num_classes)
Ejemplo n.º 29
0
    def _build_kg_layer(self):
        # db encoder
        self.entity_encoder = RGCNConv(self.n_entity, self.kg_emb_dim, self.n_relation, self.num_bases)
        self.entity_self_attn = SelfAttentionSeq(self.kg_emb_dim, self.kg_emb_dim)

        # concept encoder
        self.word_encoder = GCNConv(self.kg_emb_dim, self.kg_emb_dim)
        self.word_self_attn = SelfAttentionSeq(self.kg_emb_dim, self.kg_emb_dim)

        # gate mechanism
        self.gate_layer = GateLayer(self.kg_emb_dim)

        logger.debug('[Finish build kg layer]')
Ejemplo n.º 30
0
    def __init__(self,
                 node_input_dim=15,
                 num_edge_type=5,
                 output_dim=12,
                 node_hidden_dim=64,
                 num_basis=-1,
                 num_step_prop=6,
                 num_step_set2set=6):
        super(RGCN, self).__init__()
        self.num_step_prop = num_step_prop
        self.lin0 = nn.Linear(node_input_dim, node_hidden_dim)
        if num_basis < 0:
            self.conv = RGCNConv(node_hidden_dim, node_hidden_dim,
                                 num_edge_type, num_edge_type)
        else:
            self.conv = RGCNConv(node_hidden_dim, node_hidden_dim,
                                 num_edge_type, num_basis)

        self.set2set = Set2Set(node_hidden_dim,
                               processing_steps=num_step_set2set)
        self.lin1 = nn.Linear(2 * node_hidden_dim, node_hidden_dim)
        self.lin2 = nn.Linear(node_hidden_dim, output_dim)