Exemplo n.º 1
0
    def __init__(self, num_layer, in_dim, emb_dim, rank, drop_ratio=0.5):
        '''
            emb_dim (int): node embedding dimensionality
            num_layer (int): number of GNN message passing layers
        '''

        super(GNN_node, self).__init__()
        self.num_layer = num_layer
        self.drop_ratio = drop_ratio
        self.rank = rank

        if self.num_layer < 2:
            raise ValueError("Number of GNN layers must be greater than 1.")

        ###List of GNNs
        self.convs = torch.nn.ModuleList()
        #self.batch_norms = torch.nn.ModuleList()

        #self.convs.append(DGLGraphConv(9, emb_dim, rank, allow_zero_in_degree=True))
        self.convs.append(
            dglnn.GraphConv(in_dim, emb_dim, allow_zero_in_degree=True))

        for layer in range(num_layer - 1):
            #self.convs.append(DGLGraphConv(emb_dim, emb_dim, rank, allow_zero_in_degree=True))
            self.convs.append(
                dglnn.GraphConv(emb_dim, emb_dim, allow_zero_in_degree=True))
Exemplo n.º 2
0
 def __init__(self, in_dim, hidden_dim, n_class):
     super(GCN, self).__init__()
     self.GConv1 = dglnn.GraphConv(
         in_dim,
         hidden_dim)  # in_dim指的是每个节点特征的维度,而不是节点数,所有图结构本身特点均由输入的graph决定
     self.GConv2 = dglnn.GraphConv(hidden_dim, hidden_dim)
     self.fc = nn.Linear(hidden_dim, n_class)
Exemplo n.º 3
0
def test_graph_conv0(out_dim):
    g = dgl.DGLGraph(nx.path_graph(3)).to(F.ctx())
    ctx = F.ctx()
    adj = g.adjacency_matrix(transpose=True, ctx=ctx)

    conv = nn.GraphConv(5, out_dim, norm='none', bias=True)
    conv = conv.to(ctx)
    print(conv)

    # test pickle
    th.save(conv, tmp_buffer)

    # test#1: basic
    h0 = F.ones((3, 5))
    h1 = conv(g, h0)
    assert len(g.ndata) == 0
    assert len(g.edata) == 0
    assert F.allclose(h1, _AXWb(adj, h0, conv.weight, conv.bias))
    # test#2: more-dim
    h0 = F.ones((3, 5, 5))
    h1 = conv(g, h0)
    assert len(g.ndata) == 0
    assert len(g.edata) == 0
    assert F.allclose(h1, _AXWb(adj, h0, conv.weight, conv.bias))

    conv = nn.GraphConv(5, out_dim)
    conv = conv.to(ctx)
    # test#3: basic
    h0 = F.ones((3, 5))
    h1 = conv(g, h0)
    assert len(g.ndata) == 0
    assert len(g.edata) == 0
    # test#4: basic
    h0 = F.ones((3, 5, 5))
    h1 = conv(g, h0)
    assert len(g.ndata) == 0
    assert len(g.edata) == 0

    conv = nn.GraphConv(5, out_dim)
    conv = conv.to(ctx)
    # test#3: basic
    h0 = F.ones((3, 5))
    h1 = conv(g, h0)
    assert len(g.ndata) == 0
    assert len(g.edata) == 0
    # test#4: basic
    h0 = F.ones((3, 5, 5))
    h1 = conv(g, h0)
    assert len(g.ndata) == 0
    assert len(g.edata) == 0

    # test rest_parameters
    old_weight = deepcopy(conv.weight.data)
    conv.reset_parameters()
    new_weight = conv.weight.data
    assert not F.allclose(old_weight, new_weight)
Exemplo n.º 4
0
def test_graph_conv():
    g = dgl.DGLGraph(nx.path_graph(3))
    ctx = F.ctx()
    adj = g.adjacency_matrix(ctx=ctx)

    conv = nn.GraphConv(5, 2, norm=False, bias=True)
    if F.gpu_ctx():
        conv = conv.to(ctx)
    print(conv)
    # test#1: basic
    h0 = F.ones((3, 5))
    h1 = conv(g, h0)
    assert len(g.ndata) == 0
    assert len(g.edata) == 0
    assert F.allclose(h1, _AXWb(adj, h0, conv.weight, conv.bias))
    # test#2: more-dim
    h0 = F.ones((3, 5, 5))
    h1 = conv(g, h0)
    assert len(g.ndata) == 0
    assert len(g.edata) == 0
    assert F.allclose(h1, _AXWb(adj, h0, conv.weight, conv.bias))

    conv = nn.GraphConv(5, 2)
    if F.gpu_ctx():
        conv = conv.to(ctx)
    # test#3: basic
    h0 = F.ones((3, 5))
    h1 = conv(g, h0)
    assert len(g.ndata) == 0
    assert len(g.edata) == 0
    # test#4: basic
    h0 = F.ones((3, 5, 5))
    h1 = conv(g, h0)
    assert len(g.ndata) == 0
    assert len(g.edata) == 0

    conv = nn.GraphConv(5, 2)
    if F.gpu_ctx():
        conv = conv.to(ctx)
    # test#3: basic
    h0 = F.ones((3, 5))
    h1 = conv(g, h0)
    assert len(g.ndata) == 0
    assert len(g.edata) == 0
    # test#4: basic
    h0 = F.ones((3, 5, 5))
    h1 = conv(g, h0)
    assert len(g.ndata) == 0
    assert len(g.edata) == 0

    # test rest_parameters
    old_weight = deepcopy(conv.weight.data)
    conv.reset_parameters()
    new_weight = conv.weight.data
    assert not F.allclose(old_weight, new_weight)
Exemplo n.º 5
0
    def __init__(self, in_feats, hid_feats, out_feats, rel_names):
        super().__init__()

        self.conv1 = dglnn.HeteroGraphConv(
            {rel: dglnn.GraphConv(in_feats, hid_feats)
             for rel in rel_names},
            aggregate='sum')
        self.conv2 = dglnn.HeteroGraphConv(
            {rel: dglnn.GraphConv(hid_feats, out_feats)
             for rel in rel_names},
            aggregate='sum')
Exemplo n.º 6
0
    def __init__(
        self,
        in_feats,
        n_hidden,
        n_classes,
        n_layers,
        activation,
        dropout,
    ):
        super().__init__()
        self.n_layers = n_layers
        self.n_hidden = n_hidden
        self.n_classes = n_classes

        self.convs = nn.ModuleList()
        self.bns = nn.ModuleList()

        for i in range(n_layers):
            in_hidden = n_hidden if i > 0 else in_feats
            out_hidden = n_hidden if i < n_layers - 1 else n_classes
            self.convs.append(dglnn.GraphConv(in_hidden, out_hidden, "both"))
            if i < n_layers - 1:
                self.bns.append(nn.BatchNorm1d(out_hidden))

        self.dropout = nn.Dropout(dropout)
        self.activation = activation
Exemplo n.º 7
0
    def __init__(self, in_feats, n_hidden, n_classes, n_layers, activation,
                 dropout, use_linear):
        super().__init__()
        self.n_layers = n_layers
        self.n_hidden = n_hidden
        self.n_classes = n_classes
        self.use_linear = use_linear

        self.convs = nn.ModuleList()
        if use_linear:
            self.linear = nn.ModuleList()
        self.bns = nn.ModuleList()

        for i in range(n_layers):
            in_hidden = n_hidden if i > 0 else in_feats
            out_hidden = n_hidden if i < n_layers - 1 else n_classes
            bias = i == n_layers - 1

            self.convs.append(
                dglnn.GraphConv(in_hidden, out_hidden, "both", bias=bias))
            if use_linear:
                self.linear.append(nn.Linear(in_hidden, out_hidden,
                                             bias=False))
            if i < n_layers - 1:
                self.bns.append(nn.BatchNorm1d(out_hidden))

        self.dropout0 = nn.Dropout(min(0.1, dropout))
        self.dropout = nn.Dropout(dropout)
        self.activation = activation
Exemplo n.º 8
0
 def __init__(self, in_dim, h_dim, out_dim, n_layers, activation, dropout, rel_names):
     super().__init__()
     self.h_dim = h_dim
     self.out_dim = out_dim
     self.in_dim = in_dim
     self.layers = nn.ModuleList()
     #i2h
     self.layers.append(dglnn.HeteroGraphConv(
         {rel: dglnn.GraphConv(in_dim, h_dim) for rel in rel_names}))
     #h2h
     for i in range(1, n_layers - 1):
         self.layers.append(dglnn.HeteroGraphConv(
             {rel: dglnn.GraphConv(h_dim, h_dim) for rel in rel_names}))
     #h2o
     self.layers.append(dglnn.HeteroGraphConv(
         {rel: dglnn.GraphConv(h_dim, out_dim) for rel in rel_names}))
     self.dropout = nn.Dropout(dropout)
     self.activation = activation
Exemplo n.º 9
0
def test_graph_conv2(g, norm, weight, bias):
    conv = nn.GraphConv(5, 2, norm=norm, weight=weight, bias=bias).to(F.ctx())
    ext_w = F.randn((5, 2)).to(F.ctx())
    nsrc = g.number_of_nodes() if isinstance(g, dgl.DGLGraph) else g.number_of_src_nodes()
    ndst = g.number_of_nodes() if isinstance(g, dgl.DGLGraph) else g.number_of_dst_nodes()
    h = F.randn((nsrc, 5)).to(F.ctx())
    if weight:
        h = conv(g, h)
    else:
        h = conv(g, h, weight=ext_w)
    assert h.shape == (ndst, 2)
Exemplo n.º 10
0
    def __init__(self, input_size: int, hidden_size: int):
        """
        参数:
        input_size  输入尺寸
        hidden_size 输出尺寸/隐藏尺寸
        若是GAT,则至少还有num_heads

        输出与隐藏状态之间经过线性变换
        """
        super(GRUCell, self).__init__()  #父类初始化函数
        #生成卷积层
        self.ConvIR = dglnn.GraphConv(input_size + hidden_size,
                                      hidden_size,
                                      allow_zero_in_degree=True)
        self.ConvIZ = dglnn.GraphConv(input_size + hidden_size,
                                      hidden_size,
                                      allow_zero_in_degree=True)
        self.ConvIH = dglnn.GraphConv(input_size + hidden_size,
                                      hidden_size,
                                      allow_zero_in_degree=True)
Exemplo n.º 11
0
    def __init__(self, in_dim, hidden_dim, n_classes, hidden_layers, readout,
                 activation_func, dropout, device):
        super(Classifier, self).__init__()
        self.device = device
        self.readout = readout
        self.layers = nn.ModuleList()

        self.layers.append(
            conv.GraphConv(in_dim, hidden_dim, activation=activation_func))

        # hidden layers
        for k in range(0, hidden_layers):
            self.layers.append(
                conv.GraphConv(hidden_dim,
                               hidden_dim,
                               activation=activation_func))

        # last layer
        self.dropout = nn.Dropout(p=dropout)
        self.classify = nn.Linear(hidden_dim, n_classes)
Exemplo n.º 12
0
    def __init__(self, num_nodes, in_feats, n_hidden, n_layers, activation,
                 dropout):
        super(GCN, self).__init__()

        self.num_nodes = num_nodes
        self.embedding = nn.Embedding(num_nodes, in_feats)
        self.layers = nn.ModuleList()
        # input layer
        self.layers.append(
            dglnn.GraphConv(in_feats,
                            n_hidden,
                            activation=activation,
                            allow_zero_in_degree=True))
        # hidden layers
        for i in range(1, n_layers):
            self.layers.append(
                dglnn.GraphConv(n_hidden,
                                n_hidden,
                                activation=activation,
                                allow_zero_in_degree=True))
        self.dropout = nn.Dropout(dropout)
Exemplo n.º 13
0
    def __init__(self,
                 layer_type,
                 block_type,
                 activation,
                 normalization=None,
                 **core_layer_hyperparms):
        super(GNNBasicBlock, self).__init__()
        self.layer_type = layer_type
        self.block_type = block_type

        if self.layer_type in ['gcn', 'gcn_res']:
            self.core_layer_type = 'gcn'
            self.core_layer = dglnn.GraphConv(
                in_feats=core_layer_hyperparms['in_channels'],
                out_feats=core_layer_hyperparms['out_channels'],
                bias=core_layer_hyperparms['bias'])
        elif self.layer_type in ['gat', 'gat_res']:
            self.core_layer_type = 'gat'
            self.core_layer = dglnn.GATConv(
                in_feats=core_layer_hyperparms['in_channels'],
                out_feats=int(core_layer_hyperparms['out_channels'] /
                              core_layer_hyperparms['num_heads']),
                num_heads=core_layer_hyperparms['num_heads'],
                feat_drop=core_layer_hyperparms['feat_drop'],
                attn_drop=core_layer_hyperparms['attn_drop'])
        elif self.layer_type in ['sage', 'sage_res']:
            self.core_layer_type = 'sage'
            self.core_layer = dglnn.SAGEConv(
                in_feats=core_layer_hyperparms['in_channels'],
                out_feats=core_layer_hyperparms['out_channels'],
                aggregator_type='mean',
                bias=core_layer_hyperparms['bias'])

        else:
            raise NotImplementedError

        acti_type, acti_hyperparam = activation
        if acti_type == 'relu':
            self.activation = nn.ReLU(inplace=acti_hyperparam)
        elif acti_type == 'lkrelu':
            self.activation = nn.LeakyReLU(negative_slope=acti_hyperparam)
        elif acti_type == 'elu':
            self.activation = nn.ELU(inplace=acti_hyperparam)
        elif acti_type == 'no':
            self.activation = None
        else:
            raise NotImplementedError

        if 'n' in block_type.split('_'):
            self.node_norm = get_normalization(
                norm_type=normalization,
                num_channels=core_layer_hyperparms['out_channels'])
        self.block_type_str = self.get_block_type_str()
Exemplo n.º 14
0
    def __init__(self,
                 in_feat,
                 out_feat,
                 rel_names,
                 num_bases,
                 *,
                 weight=True,
                 bias=True,
                 activation=None,
                 self_loop=False,
                 dropout=0.0):
        super(RelGraphConvLayer, self).__init__()
        self.in_feat = in_feat
        self.out_feat = out_feat
        self.rel_names = rel_names
        self.num_bases = num_bases
        self.bias = bias
        self.activation = activation
        self.self_loop = self_loop

        self.conv = dglnn.HeteroGraphConv({
            rel: dglnn.GraphConv(in_feat,
                                 out_feat,
                                 norm='right',
                                 weight=False,
                                 bias=False)
            for rel in rel_names
        })

        self.use_weight = weight
        self.use_basis = num_bases < len(self.rel_names) and weight
        if self.use_weight:
            if self.use_basis:
                self.basis = dglnn.WeightBasis((in_feat, out_feat), num_bases,
                                               len(self.rel_names))
            else:
                self.weight = nn.Parameter(
                    torch.Tensor(len(self.rel_names), in_feat, out_feat))
                nn.init.xavier_uniform_(self.weight,
                                        gain=nn.init.calculate_gain('relu'))

        # bias
        if bias:
            self.h_bias = nn.Parameter(torch.Tensor(out_feat))
            nn.init.zeros_(self.h_bias)

        # weight for self loop
        if self.self_loop:
            self.loop_weight = nn.Parameter(torch.Tensor(in_feat, out_feat))
            nn.init.xavier_uniform_(self.loop_weight,
                                    gain=nn.init.calculate_gain('relu'))

        self.dropout = nn.Dropout(dropout)
Exemplo n.º 15
0
    def __init__(self,
                 in_feats,
                 n_hidden,
                 out_feats,
                 device,
                 distype='Norm',
                 categorical_dim=None,
                 **kwargs):
        super(SGD_MRVGAE, self).__init__()
        self.n_hidden = n_hidden
        self.cat = categorical_dim
        self.distype = distype
        self.device = device

        self.relu = nn.ReLU()
        self.dropout = nn.Dropout(0.5)
        self.sigmoid = nn.Sigmoid()
        ## encoder
        self.enc_gcn = nn.ModuleList()
        self.enc_gcn.append(dglnn.GraphConv(in_feats, n_hidden[0]))
        self.enc_gcn.append(dglnn.GraphConv(n_hidden[0], n_hidden[1]))
        #self.enc_gcn1 = dglnn.GraphConv(in_feats,n_hidden[0])
        #self.enc_gcn2 = dglnn.GraphConv(n_hidden[0],n_hidden[1])
        if distype == 'Rel':
            ## variation inference
            pass
            ## decoder
            pass
        if distype == 'Both':
            # variation inference
            # instead of n_hidden[1], the input dimention is n_hidden[2]
            self.vi_mlp_mean = nn.Linear(n_hidden[1], n_hidden[2])
            self.vi_mlp_logstd = nn.Linear(n_hidden[1], n_hidden[2])
            self.vi_q = nn.Linear(n_hidden[1], categorical_dim)
            ## decoder
            self.dec_mlp1 = nn.Linear(n_hidden[3], n_hidden[4])
            self.dec_mlpX = nn.Linear(n_hidden[4], out_feats)
            ## edge classifier
            self.cls_mlpA = nn.Linear(n_hidden[3],
                                      categorical_dim)  #输出one-hot 编码
Exemplo n.º 16
0
def test_graph_conv(idtype, g, norm, weight, bias):
    # Test one tensor input
    g = g.astype(idtype).to(F.ctx())
    conv = nn.GraphConv(5, 2, norm=norm, weight=weight, bias=bias).to(F.ctx())
    ext_w = F.randn((5, 2)).to(F.ctx())
    nsrc = g.number_of_src_nodes()
    ndst = g.number_of_dst_nodes()
    h = F.randn((nsrc, 5)).to(F.ctx())
    if weight:
        h_out = conv(g, h)
    else:
        h_out = conv(g, h, weight=ext_w)
    assert h_out.shape == (ndst, 2)
Exemplo n.º 17
0
    def __init__(self,
                 in_feats,
                 hid_feats,
                 out_feats,
                 rel_names,
                 att_out_feats,
                 relation_agg="sum"):
        super().__init__()

        logger.warning("rel_names: {}".format(rel_names))
        self.conv1 = HeteroGraphAttentionConv(
            {rel: dglnn.GraphConv(in_feats, hid_feats)
             for rel in rel_names},
            aggregate=relation_agg,
            att_in_feats=out_feats,
            att_out_feats=att_out_feats)
        self.conv2 = HeteroGraphAttentionConv(
            {rel: dglnn.GraphConv(hid_feats, out_feats)
             for rel in rel_names},
            aggregate=relation_agg,
            att_in_feats=out_feats,
            att_out_feats=att_out_feats)
Exemplo n.º 18
0
def test_graph_conv_e_weight(idtype, g, norm, weight, bias):
    g = g.astype(idtype).to(F.ctx())
    conv = nn.GraphConv(5, 2, norm=norm, weight=weight, bias=bias).to(F.ctx())
    ext_w = F.randn((5, 2)).to(F.ctx())
    nsrc = g.number_of_src_nodes()
    ndst = g.number_of_dst_nodes()
    h = F.randn((nsrc, 5)).to(F.ctx())
    e_w = g.edata['scalar_w']
    if weight:
        h_out = conv(g, h, edge_weight=e_w)
    else:
        h_out = conv(g, h, weight=ext_w, edge_weight=e_w)
    assert h_out.shape == (ndst, 2)
Exemplo n.º 19
0
def test_graph_conv_bi(idtype, g, norm, weight, bias, out_dim):
    # Test a pair of tensor inputs
    g = g.astype(idtype).to(F.ctx())
    conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx())
    ext_w = F.randn((5, out_dim)).to(F.ctx())
    nsrc = g.number_of_src_nodes()
    ndst = g.number_of_dst_nodes()
    h = F.randn((nsrc, 5)).to(F.ctx())
    h_dst = F.randn((ndst, out_dim)).to(F.ctx())
    if weight:
        h_out = conv(g, (h, h_dst))
    else:
        h_out = conv(g, (h, h_dst), weight=ext_w)
    assert h_out.shape == (ndst, out_dim)
Exemplo n.º 20
0
def test_graph_conv_e_weight_norm(idtype, g, norm, weight, bias, out_dim):
    g = g.astype(idtype).to(F.ctx())
    conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias).to(F.ctx())
    ext_w = F.randn((5, out_dim)).to(F.ctx())
    nsrc = g.number_of_src_nodes()
    ndst = g.number_of_dst_nodes()
    h = F.randn((nsrc, 5)).to(F.ctx())
    edgenorm = nn.EdgeWeightNorm(norm=norm)
    norm_weight = edgenorm(g, g.edata['scalar_w'])
    if weight:
        h_out = conv(g, h, edge_weight=norm_weight)
    else:
        h_out = conv(g, h, weight=ext_w, edge_weight=norm_weight)
    assert h_out.shape == (ndst, out_dim)
Exemplo n.º 21
0
    def __init__(
        self,
        in_feats: int,
        out_feats: int,
        rel_names: List[str],
        num_bases: int,
        norm: str = 'right',
        weight: bool = True,
        bias: bool = True,
        activation: Callable[[torch.Tensor], torch.Tensor] = None,
        dropout: float = None,
        self_loop: bool = False,
    ):
        super().__init__()
        self._rel_names = rel_names
        self._num_rels = len(rel_names)
        self._conv = dglnn.HeteroGraphConv({
            rel: dglnn.GraphConv(in_feats,
                                 out_feats,
                                 norm=norm,
                                 weight=False,
                                 bias=False)
            for rel in rel_names
        })
        self._use_weight = weight
        self._use_basis = num_bases < self._num_rels and weight
        self._use_bias = bias
        self._activation = activation
        self._dropout = nn.Dropout(dropout) if dropout is not None else None
        self._use_self_loop = self_loop

        if weight:
            if self._use_basis:
                self.basis = dglnn.WeightBasis((in_feats, out_feats),
                                               num_bases, self._num_rels)
            else:
                self.weight = nn.Parameter(
                    torch.Tensor(self._num_rels, in_feats, out_feats))
                nn.init.xavier_uniform_(self.weight,
                                        gain=nn.init.calculate_gain('relu'))

        if bias:
            self.bias = nn.Parameter(torch.Tensor(out_feats))
            nn.init.zeros_(self.bias)

        if self_loop:
            self.self_loop_weight = nn.Parameter(
                torch.Tensor(in_feats, out_feats))
            nn.init.xavier_uniform_(self.self_loop_weight,
                                    gain=nn.init.calculate_gain('relu'))
Exemplo n.º 22
0
def test_dense_graph_conv():
    ctx = F.ctx()
    g = dgl.DGLGraph(sp.sparse.random(100, 100, density=0.1), readonly=True)
    adj = g.adjacency_matrix(ctx=ctx).to_dense()
    conv = nn.GraphConv(5, 2, norm=False, bias=True)
    dense_conv = nn.DenseGraphConv(5, 2, norm=False, bias=True)
    dense_conv.weight.data = conv.weight.data
    dense_conv.bias.data = conv.bias.data
    feat = F.randn((100, 5))
    conv = conv.to(ctx)
    dense_conv = dense_conv.to(ctx)
    out_conv = conv(g, feat)
    out_dense_conv = dense_conv(adj, feat)
    assert F.allclose(out_conv, out_dense_conv)
Exemplo n.º 23
0
def test_dense_graph_conv(norm_type, g):
    ctx = F.ctx()
    # TODO(minjie): enable the following option after #1385
    adj = g.adjacency_matrix(ctx=ctx).to_dense()
    conv = nn.GraphConv(5, 2, norm=norm_type, bias=True)
    dense_conv = nn.DenseGraphConv(5, 2, norm=norm_type, bias=True)
    dense_conv.weight.data = conv.weight.data
    dense_conv.bias.data = conv.bias.data
    feat = F.randn((g.number_of_src_nodes(), 5))
    conv = conv.to(ctx)
    dense_conv = dense_conv.to(ctx)
    out_conv = conv(g, feat)
    out_dense_conv = dense_conv(adj, feat)
    assert F.allclose(out_conv, out_dense_conv)
Exemplo n.º 24
0
def test_graph_conv():
    g = dgl.DGLGraph(nx.path_graph(3))
    adj = g.adjacency_matrix()

    conv = nn.GraphConv(5, 2, norm=False, bias=True)
    print(conv)
    # test#1: basic
    h0 = th.ones((3, 5))
    h1 = conv(h0, g)
    assert th.allclose(h1, _AXWb(adj, h0, conv.weight, conv.bias))
    # test#2: more-dim
    h0 = th.ones((3, 5, 5))
    h1 = conv(h0, g)
    assert th.allclose(h1, _AXWb(adj, h0, conv.weight, conv.bias))

    conv = nn.GraphConv(5, 2)
    # test#3: basic
    h0 = th.ones((3, 5))
    h1 = conv(h0, g)
    # test#4: basic
    h0 = th.ones((3, 5, 5))
    h1 = conv(h0, g)

    conv = nn.GraphConv(5, 2)
    # test#3: basic
    h0 = th.ones((3, 5))
    h1 = conv(h0, g)
    # test#4: basic
    h0 = th.ones((3, 5, 5))
    h1 = conv(h0, g)

    # test rest_parameters
    old_weight = deepcopy(conv.weight.data)
    conv.reset_parameters()
    new_weight = conv.weight.data
    assert not th.allclose(old_weight, new_weight)
Exemplo n.º 25
0
    def __init__(self, config):
        super(GAIN_GloVe, self).__init__()
        self.config = config

        word_emb_size = config.word_emb_size
        vocabulary_size = config.vocabulary_size
        encoder_input_size = word_emb_size
        self.activation = nn.Tanh() if config.activation == 'tanh' else nn.ReLU()

        self.word_emb = nn.Embedding(vocabulary_size, word_emb_size, padding_idx=config.word_pad)
        if config.pre_train_word:
            self.word_emb = nn.Embedding(config.data_word_vec.shape[0], word_emb_size, padding_idx=config.word_pad)
            self.word_emb.weight.data.copy_(torch.from_numpy(config.data_word_vec[:, :word_emb_size]))

        self.word_emb.weight.requires_grad = config.finetune_word
        if config.use_entity_type:
            encoder_input_size += config.entity_type_size
            self.entity_type_emb = nn.Embedding(config.entity_type_num, config.entity_type_size,
                                                padding_idx=config.entity_type_pad)

        if config.use_entity_id:
            encoder_input_size += config.entity_id_size
            self.entity_id_emb = nn.Embedding(config.max_entity_num + 1, config.entity_id_size,
                                              padding_idx=config.entity_id_pad)

        self.encoder = BiLSTM(encoder_input_size, config)

        self.gcn_dim = config.gcn_dim
        assert self.gcn_dim == 2 * config.lstm_hidden_size, 'gcn dim should be the lstm hidden dim * 2'
        rel_name_lists = ['intra', 'inter', 'global']
        self.GCN_layers = nn.ModuleList([dglnn.GraphConv(self.gcn_dim, self.gcn_dim, norm='right', weight=True,
                                                         bias=True, activation=self.activation)
                                         for i in range(config.gcn_layers)])

        self.bank_size = self.config.gcn_dim * (self.config.gcn_layers + 1)
        self.dropout = nn.Dropout(self.config.dropout)

        self.predict = nn.Sequential(
            nn.Linear(self.bank_size * 4 + self.gcn_dim * 5, self.bank_size * 2),
            self.activation,
            self.dropout,
            nn.Linear(self.bank_size * 2, config.relation_nums),
        )

        self.edge_layer = RelEdgeLayer(node_feat=self.gcn_dim, edge_feat=self.gcn_dim,
                                       activation=self.activation, dropout=config.dropout)
        self.path_info_mapping = nn.Linear(self.gcn_dim * 4, self.gcn_dim * 4)
        self.attention = Attention(self.bank_size * 2, self.gcn_dim * 4)
Exemplo n.º 26
0
    def __init__(self, config):
        super(GAIN_BERT, self).__init__()
        self.config = config
        self.activation = nn.Tanh() if config.activation == 'tanh' else nn.ReLU()

        if config.use_entity_type:
            self.entity_type_emb = nn.Embedding(config.entity_type_num, config.entity_type_size,
                                                padding_idx=config.entity_type_pad)

        if config.use_entity_id:
            self.entity_id_emb = nn.Embedding(config.max_entity_num + 1, config.entity_id_size,
                                              padding_idx=config.entity_id_pad)

        self.bert = BertModel.from_pretrained(config.bert_path)
        if config.bert_fix:
            for p in self.bert.parameters():
                p.requires_grad = False

        self.gcn_dim = config.gcn_dim
        assert self.gcn_dim == config.bert_hid_size + config.entity_id_size + config.entity_type_size

        rel_name_lists = ['intra', 'inter', 'global']
        self.GCN_layers = nn.ModuleList([dglnn.GraphConv(self.gcn_dim, self.gcn_dim, norm='right', weight=True,
                                                         bias=True, activation=self.activation)
                                         for i in range(config.gcn_layers)])

        self.bank_size = self.gcn_dim * (self.config.gcn_layers + 1)

        self.dropout = nn.Dropout(self.config.dropout)

        self.predict = nn.Sequential(
            nn.Linear(self.bank_size * 4 + self.gcn_dim * 5, self.bank_size * 2),
            self.activation,
            self.dropout,
            nn.Linear(self.bank_size * 2, config.relation_nums),
        )

        self.edge_layer = RelEdgeLayer(node_feat=self.gcn_dim, edge_feat=self.gcn_dim,
                                       activation=self.activation, dropout=config.dropout)

        self.path_info_mapping = nn.Linear(self.gcn_dim * 4, self.gcn_dim * 4)

        self.attention = Attention(self.bank_size * 2, self.gcn_dim * 4)
Exemplo n.º 27
0
def test_graph_conv2(g, norm, weight, bias):
    conv = nn.GraphConv(5, 2, norm=norm, weight=weight, bias=bias).to(F.ctx())
    ext_w = F.randn((5, 2)).to(F.ctx())
    nsrc = g.number_of_nodes() if isinstance(
        g, dgl.DGLGraph) else g.number_of_src_nodes()
    ndst = g.number_of_nodes() if isinstance(
        g, dgl.DGLGraph) else g.number_of_dst_nodes()
    h = F.randn((nsrc, 5)).to(F.ctx())
    h_dst = F.randn((ndst, 2)).to(F.ctx())
    if weight:
        h_out = conv(g, h)
    else:
        h_out = conv(g, h, weight=ext_w)
    assert h_out.shape == (ndst, 2)

    if not isinstance(g, dgl.DGLGraph) and len(g.ntypes) == 2:
        # bipartite, should also accept pair of tensors
        if weight:
            h_out2 = conv(g, (h, h_dst))
        else:
            h_out2 = conv(g, (h, h_dst), weight=ext_w)
        assert h_out2.shape == (ndst, 2)
        assert F.array_equal(h_out, h_out2)
Exemplo n.º 28
0
def test_hetero_conv(agg, idtype):
    g = dgl.heterograph(
        {
            ('user', 'follows', 'user'): ([0, 0, 2, 1], [1, 2, 1, 3]),
            ('user', 'plays', 'game'): ([0, 0, 0, 1, 2], [0, 2, 3, 0, 2]),
            ('store', 'sells', 'game'): ([0, 0, 1, 1], [0, 3, 1, 2])
        },
        idtype=idtype,
        device=F.ctx())
    conv = nn.HeteroGraphConv(
        {
            'follows': nn.GraphConv(2, 3, allow_zero_in_degree=True),
            'plays': nn.GraphConv(2, 4, allow_zero_in_degree=True),
            'sells': nn.GraphConv(3, 4, allow_zero_in_degree=True)
        }, agg)
    conv = conv.to(F.ctx())
    uf = F.randn((4, 2))
    gf = F.randn((4, 4))
    sf = F.randn((2, 3))

    h = conv(g, {'user': uf})
    assert set(h.keys()) == {'user', 'game'}
    if agg != 'stack':
        assert h['user'].shape == (4, 3)
        assert h['game'].shape == (4, 4)
    else:
        assert h['user'].shape == (4, 1, 3)
        assert h['game'].shape == (4, 1, 4)

    h = conv(g, {'user': uf, 'store': sf})
    assert set(h.keys()) == {'user', 'game'}
    if agg != 'stack':
        assert h['user'].shape == (4, 3)
        assert h['game'].shape == (4, 4)
    else:
        assert h['user'].shape == (4, 1, 3)
        assert h['game'].shape == (4, 2, 4)

    h = conv(g, {'store': sf})
    assert set(h.keys()) == {'game'}
    if agg != 'stack':
        assert h['game'].shape == (4, 4)
    else:
        assert h['game'].shape == (4, 1, 4)

    # test with pair input
    conv = nn.HeteroGraphConv(
        {
            'follows': nn.SAGEConv(2, 3, 'mean'),
            'plays': nn.SAGEConv((2, 4), 4, 'mean'),
            'sells': nn.SAGEConv(3, 4, 'mean')
        }, agg)
    conv = conv.to(F.ctx())

    h = conv(g, ({'user': uf}, {'user': uf, 'game': gf}))
    assert set(h.keys()) == {'user', 'game'}
    if agg != 'stack':
        assert h['user'].shape == (4, 3)
        assert h['game'].shape == (4, 4)
    else:
        assert h['user'].shape == (4, 1, 3)
        assert h['game'].shape == (4, 1, 4)

    # pair input requires both src and dst type features to be provided
    h = conv(g, ({'user': uf}, {'game': gf}))
    assert set(h.keys()) == {'game'}
    if agg != 'stack':
        assert h['game'].shape == (4, 4)
    else:
        assert h['game'].shape == (4, 1, 4)

    # test with mod args
    class MyMod(th.nn.Module):
        def __init__(self, s1, s2):
            super(MyMod, self).__init__()
            self.carg1 = 0
            self.carg2 = 0
            self.s1 = s1
            self.s2 = s2

        def forward(self, g, h, arg1=None, *, arg2=None):
            if arg1 is not None:
                self.carg1 += 1
            if arg2 is not None:
                self.carg2 += 1
            return th.zeros((g.number_of_dst_nodes(), self.s2))

    mod1 = MyMod(2, 3)
    mod2 = MyMod(2, 4)
    mod3 = MyMod(3, 4)
    conv = nn.HeteroGraphConv({
        'follows': mod1,
        'plays': mod2,
        'sells': mod3
    }, agg)
    conv = conv.to(F.ctx())
    mod_args = {'follows': (1, ), 'plays': (1, )}
    mod_kwargs = {'sells': {'arg2': 'abc'}}
    h = conv(g, {
        'user': uf,
        'store': sf
    },
             mod_args=mod_args,
             mod_kwargs=mod_kwargs)
    assert mod1.carg1 == 1
    assert mod1.carg2 == 0
    assert mod2.carg1 == 1
    assert mod2.carg2 == 0
    assert mod3.carg1 == 0
    assert mod3.carg2 == 1
Exemplo n.º 29
0
 def __init__(self,input_dim,activation_fn,k):
     super(SagPooling, self).__init__()
     self.layers=nn.ModuleList()
     self.layers.append(conv.GraphConv(input_dim,1,norm=True,bias=True,activation=activation_fn))
     self.k=k
     print('Keeping ',k,' percent nodes')
Exemplo n.º 30
0
def test_hetero_conv(agg, idtype):
    g = dgl.heterograph(
        {
            ('user', 'follows', 'user'): ([0, 0, 2, 1], [1, 2, 1, 3]),
            ('user', 'plays', 'game'): ([0, 0, 0, 1, 2], [0, 2, 3, 0, 2]),
            ('store', 'sells', 'game'): ([0, 0, 1, 1], [0, 3, 1, 2])
        },
        idtype=idtype,
        device=F.ctx())
    conv = nn.HeteroGraphConv(
        {
            'follows': nn.GraphConv(2, 3, allow_zero_in_degree=True),
            'plays': nn.GraphConv(2, 4, allow_zero_in_degree=True),
            'sells': nn.GraphConv(3, 4, allow_zero_in_degree=True)
        }, agg)
    conv = conv.to(F.ctx())

    # test pickle
    th.save(conv, tmp_buffer)

    uf = F.randn((4, 2))
    gf = F.randn((4, 4))
    sf = F.randn((2, 3))

    h = conv(g, {'user': uf, 'game': gf, 'store': sf})
    assert set(h.keys()) == {'user', 'game'}
    if agg != 'stack':
        assert h['user'].shape == (4, 3)
        assert h['game'].shape == (4, 4)
    else:
        assert h['user'].shape == (4, 1, 3)
        assert h['game'].shape == (4, 2, 4)

    block = dgl.to_block(g.to(F.cpu()), {
        'user': [0, 1, 2, 3],
        'game': [0, 1, 2, 3],
        'store': []
    }).to(F.ctx())
    h = conv(block, ({
        'user': uf,
        'game': gf,
        'store': sf
    }, {
        'user': uf,
        'game': gf,
        'store': sf[0:0]
    }))
    assert set(h.keys()) == {'user', 'game'}
    if agg != 'stack':
        assert h['user'].shape == (4, 3)
        assert h['game'].shape == (4, 4)
    else:
        assert h['user'].shape == (4, 1, 3)
        assert h['game'].shape == (4, 2, 4)

    h = conv(block, {'user': uf, 'game': gf, 'store': sf})
    assert set(h.keys()) == {'user', 'game'}
    if agg != 'stack':
        assert h['user'].shape == (4, 3)
        assert h['game'].shape == (4, 4)
    else:
        assert h['user'].shape == (4, 1, 3)
        assert h['game'].shape == (4, 2, 4)

    # test with mod args
    class MyMod(th.nn.Module):
        def __init__(self, s1, s2):
            super(MyMod, self).__init__()
            self.carg1 = 0
            self.carg2 = 0
            self.s1 = s1
            self.s2 = s2

        def forward(self, g, h, arg1=None, *, arg2=None):
            if arg1 is not None:
                self.carg1 += 1
            if arg2 is not None:
                self.carg2 += 1
            return th.zeros((g.number_of_dst_nodes(), self.s2))

    mod1 = MyMod(2, 3)
    mod2 = MyMod(2, 4)
    mod3 = MyMod(3, 4)
    conv = nn.HeteroGraphConv({
        'follows': mod1,
        'plays': mod2,
        'sells': mod3
    }, agg)
    conv = conv.to(F.ctx())
    mod_args = {'follows': (1, ), 'plays': (1, )}
    mod_kwargs = {'sells': {'arg2': 'abc'}}
    h = conv(g, {
        'user': uf,
        'game': gf,
        'store': sf
    },
             mod_args=mod_args,
             mod_kwargs=mod_kwargs)
    assert mod1.carg1 == 1
    assert mod1.carg2 == 0
    assert mod2.carg1 == 1
    assert mod2.carg2 == 0
    assert mod3.carg1 == 0
    assert mod3.carg2 == 1

    #conv on graph without any edges
    for etype in g.etypes:
        g = dgl.remove_edges(g, g.edges(form='eid', etype=etype), etype=etype)
    assert g.num_edges() == 0
    h = conv(g, {'user': uf, 'game': gf, 'store': sf})
    assert set(h.keys()) == {'user', 'game'}

    block = dgl.to_block(g.to(F.cpu()), {
        'user': [0, 1, 2, 3],
        'game': [0, 1, 2, 3],
        'store': []
    }).to(F.ctx())
    h = conv(block, ({
        'user': uf,
        'game': gf,
        'store': sf
    }, {
        'user': uf,
        'game': gf,
        'store': sf[0:0]
    }))
    assert set(h.keys()) == {'user', 'game'}