Exemplo n.º 1
0
    def __init__(self,
                 in_channels,
                 hidden_channels,
                 out_channels,
                 num_layers,
                 dropout,
                 gnn_type='gcn'):
        super(GCN, self).__init__()

        self.convs = torch.nn.ModuleList()
        if gnn_type == 'gat':
            self.convs.append(nn.GATConv(in_channels, hidden_channels, 1))
            for _ in range(num_layers - 2):
                self.convs.append(
                    nn.GATConv(hidden_channels * 1, hidden_channels, 1))
            self.convs.append(nn.GATConv(hidden_channels * 1, out_channels, 1))
        elif gnn_type == 'gcn':
            self.convs.append(
                nn.GraphConv(in_channels, hidden_channels, norm='none'))
            for _ in range(num_layers - 2):
                self.convs.append(
                    nn.GraphConv(hidden_channels, hidden_channels,
                                 norm='none'))
            self.convs.append(
                nn.GraphConv(hidden_channels, out_channels, norm='none'))

        self.dropout = dropout
Exemplo n.º 2
0
 def __init__(self, in_size, hid_size, out_size):
     super().__init__()
     self.layers = nn.ModuleList()
     # two-layer GCN
     self.layers.append(dglnn.GraphConv(in_size, hid_size, activation=F.relu))
     self.layers.append(dglnn.GraphConv(hid_size, out_size))
     self.dropout = nn.Dropout(0.5)
Exemplo n.º 3
0
    def __init__(self,
                 in_feat,
                 out_feat,
                 rel_names,
                 num_bases,
                 *,
                 weight=True,
                 bias=True,
                 activation=None,
                 self_loop=False,
                 dropout=0.0):
        super(RelGraphConvLayer, self).__init__()
        self.in_feat = in_feat
        self.out_feat = out_feat
        self.rel_names = rel_names
        self.num_bases = num_bases
        self.bias = bias
        self.activation = activation
        self.self_loop = self_loop
        self.batchnorm = False

        self.conv = dglnn.HeteroGraphConv({
            rel: dglnn.GraphConv(in_feat,
                                 out_feat,
                                 norm='right',
                                 weight=False,
                                 bias=False)
            for rel in rel_names
        })

        self.use_weight = weight
        self.use_basis = num_bases < len(self.rel_names) and weight
        if self.use_weight:
            if self.use_basis:
                self.basis = dglnn.WeightBasis((in_feat, out_feat), num_bases,
                                               len(self.rel_names))
            else:
                self.weight = nn.Parameter(
                    th.Tensor(len(self.rel_names), in_feat, out_feat))
                nn.init.xavier_uniform_(self.weight,
                                        gain=nn.init.calculate_gain('relu'))

        # bias
        if bias:
            self.h_bias = nn.Parameter(th.Tensor(out_feat))
            nn.init.zeros_(self.h_bias)

        # weight for self loop
        if self.self_loop:
            self.loop_weight = nn.Parameter(th.Tensor(in_feat, out_feat))
            nn.init.xavier_uniform_(self.loop_weight,
                                    gain=nn.init.calculate_gain('relu'))
        # define batch norm layer
        if self.batchnorm:
            self.bn = nn.BatchNorm1d(out_feat)

        self.dropout = nn.Dropout(dropout)
Exemplo n.º 4
0
 def __init__(self, in_dim, hidden_dim, rel_names, n_conv):
     super().__init__()
     self.encoder = nn.ModuleList([
         dglnn.HeteroGraphConv(
             {
                 rel: dglnn.GraphConv(in_dim if i == 0 else hidden_dim,
                                      hidden_dim)
                 for rel in rel_names
             },
             aggregate='sum') for i in range(n_conv)
     ])
Exemplo n.º 5
0
def test_graph_conv0():
    g = dgl.DGLGraph(nx.path_graph(3)).to(F.ctx())
    ctx = F.ctx()
    adj = g.adjacency_matrix(transpose=False, ctx=ctx)

    conv = nn.GraphConv(5, 2, norm='none', bias=True)

    # test#1: basic
    h0 = F.ones((3, 5))
    init_params = conv.init(jax.random.PRNGKey(2666), g, h0)
    h1 = conv.apply(init_params, g, h0)
    assert len(g.ndata) == 0
    assert len(g.edata) == 0

    assert F.allclose(
        h1,
        _AXWb(adj, h0, init_params["params"]["_weight"],
              init_params["params"]["_bias"]))
    # test#2: more-dim
    h0 = F.ones((3, 5, 5))
    h1 = conv.apply(init_params, g, h0)
    assert len(g.ndata) == 0
    assert len(g.edata) == 0

    assert F.allclose(
        h1,
        _AXWb(adj, h0, init_params["params"]["_weight"],
              init_params["params"]["_bias"]))

    conv = nn.GraphConv(5, 2)
    # test#3: basic
    h0 = F.ones((3, 5))
    init_params = conv.init(jax.random.PRNGKey(2666), g, h0)
    h1 = conv.apply(init_params, g, h0)
    assert len(g.ndata) == 0
    assert len(g.edata) == 0
    # test#4: basic
    h0 = F.ones((3, 5, 5))
    h1 = conv.apply(init_params, g, h0)
    assert len(g.ndata) == 0
    assert len(g.edata) == 0
    def __init__(self, in_feats, hid_feats):
        super().__init__()


        # 15hid-3Relu
        self.conv1 = dglnn.GraphConv(in_feats=in_feats, out_feats=hid_feats//2, norm='right', weight=True, bias=True, activation=None, allow_zero_in_degree=False)
        self.conv2 = dglnn.GraphConv(in_feats=hid_feats//2, out_feats=hid_feats, norm='right', weight=True, bias=True, activation=None, allow_zero_in_degree=False)
        self.conv3 = dglnn.GraphConv(in_feats=hid_feats, out_feats=hid_feats, norm='right', weight=True, bias=True, activation=None, allow_zero_in_degree=False)
        self.conv4 = dglnn.GraphConv(in_feats=hid_feats, out_feats=hid_feats, norm='right', weight=True, bias=True, activation=None, allow_zero_in_degree=False)
        self.conv5 = dglnn.GraphConv(in_feats=hid_feats, out_feats=hid_feats, norm='right', weight=True, bias=True, activation=None, allow_zero_in_degree=False)
        self.conv6 = dglnn.GraphConv(in_feats=hid_feats, out_feats=hid_feats, norm='right', weight=True, bias=True, activation=None, allow_zero_in_degree=False)
        self.conv7 = dglnn.GraphConv(in_feats=hid_feats, out_feats=hid_feats, norm='right', weight=True, bias=True, activation=None, allow_zero_in_degree=False)
        self.conv8 = dglnn.GraphConv(in_feats=hid_feats, out_feats=hid_feats, norm='right', weight=True, bias=True, activation=None, allow_zero_in_degree=False)
        self.conv9 = dglnn.GraphConv(in_feats=hid_feats, out_feats=hid_feats, norm='right', weight=True, bias=True, activation=None, allow_zero_in_degree=False)
        self.conv10 = dglnn.GraphConv(in_feats=hid_feats, out_feats=1, norm='right', weight=True, bias=True, activation=None, allow_zero_in_degree=False)



        self.Batchnorm = torch.nn.BatchNorm1d(hid_feats, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        self.htan = nn.Hardtanh(min_val=0, max_val=1)
        self.sig = nn.Sigmoid()
        self.Lrelu = nn.LeakyReLU()
Exemplo n.º 7
0
def test_graph_conv_bi(idtype, g, norm, weight, bias):
    # Test a pair of tensor inputs
    g = g.astype(idtype)
    conv = nn.GraphConv(5, 2, norm=norm, weight=weight, bias=bias)
    ext_w = F.randn((5, 2))
    nsrc = g.number_of_src_nodes()
    ndst = g.number_of_dst_nodes()
    h = F.randn((nsrc, 5))
    h_dst = F.randn((ndst, 2))
    if weight:
        init_params = conv.init(jax.random.PRNGKey(2666), g, h)
        h_out = conv.apply(init_params, g, (h, h_dst))
    else:
        init_params = conv.init(jax.random.PRNGKey(2666), g, h, weight=ext_w)
        h_out = conv.apply(init_params, g, (h, h_dst), weight=ext_w)
    assert h_out.shape == (ndst, 2)
Exemplo n.º 8
0
    def __init__(self,
                 in_feat,
                 out_feat,
                 ntypes,
                 rel_names,
                 *,
                 weight=True,
                 bias=True,
                 activation=None,
                 self_loop=False,
                 dropout=0.0):
        super(RelGraphConvLayer, self).__init__()
        self.in_feat = in_feat
        self.out_feat = out_feat
        self.ntypes = ntypes
        self.rel_names = rel_names
        self.bias = bias
        self.activation = activation
        self.self_loop = self_loop

        self.conv = dglnn.HeteroGraphConv({
            rel: dglnn.GraphConv(in_feat,
                                 out_feat,
                                 norm='right',
                                 weight=False,
                                 bias=False)
            for rel in rel_names
        })

        self.use_weight = weight
        if self.use_weight:
            self.weight = nn.ModuleDict({
                rel_name: nn.Linear(in_feat, out_feat, bias=False)
                for rel_name in self.rel_names
            })

        # weight for self loop
        if self.self_loop:
            self.loop_weights = nn.ModuleDict({
                ntype: nn.Linear(in_feat, out_feat, bias=bias)
                for ntype in self.ntypes
            })

        self.dropout = nn.Dropout(dropout)

        self.reset_parameters()
Exemplo n.º 9
0
    def __init__(
        self,
        ntype2in_feat_dim: Dict[str, int],
        out_feat_dim: int,
        etype2ntypes: Dict[str, Tuple[str, str]],
        bias: bool = True,
        activation: Optional[Callable] = None,
        dropout: float = 0.0,
        regularizer: Optional[str] = None,
        ntype_need_basis_reg: Optional[str] = None,
        num_bases: Optional[int] = None,
    ):
        super(RelGraphConvLayer, self).__init__()
        self.ntype2in_feat_dim = ntype2in_feat_dim
        self.out_feat_dim = out_feat_dim
        self.etype2ntypes = etype2ntypes
        self.etypes = list(etype2ntypes.keys())
        self.bias = bias
        self.activation = activation
        self.regularizer = regularizer
        self.num_bases = num_bases
        self.ntype_need_basis_reg = ntype_need_basis_reg
        self.etypes_need_reg = []

        if bias:
            self.h_bias = nn.Parameter(torch.Tensor(out_feat_dim))
            nn.init.zeros_(self.h_bias)

        self.dropout = nn.Dropout(dropout)

        etype2in_feat_dim = {
            etype: self.ntype2in_feat_dim[etype2ntypes[etype][0]]
            for etype in self.etypes
        }

        # weight = False, because we initialize weights in this class
        # to can adding weights regularization
        self.conv = dglnn.HeteroGraphConv({
            etype: dglnn.GraphConv(etype2in_feat_dim[etype],
                                   out_feat_dim,
                                   norm='both',
                                   weight=False,
                                   bias=False)
            for etype in self.etypes
        })

        if regularizer == 'bdd':
            self.bdds = {
                etype: BlockDiagDecomp(
                    (etype2in_feat_dim[etype], out_feat_dim), num_bases)
                for etype in self.etypes
            }
            self.bdds = nn.ModuleDict(self.bdds)
            self.etypes_need_reg = self.etypes
            self.etypes_without_reg = []
            return

        self.etypes_need_reg = []
        if regularizer == 'basis':
            self.etypes_need_reg = [
                etype for etype, ntypes in self.etype2ntypes.items()
                if ntypes[0] == ntype_need_basis_reg
                and ntypes[1] == ntype_need_basis_reg
            ]
            self.basis = WeightBasis(
                (ntype2in_feat_dim[ntype_need_basis_reg], out_feat_dim),
                num_bases, len(self.etypes_need_reg))

        self.weights_without_reg = nn.ParameterDict()
        self.etypes_without_reg = list(
            set(self.etypes) - set(self.etypes_need_reg))
        for etype in self.etypes_without_reg:
            self.weights_without_reg[etype] = nn.Parameter(
                torch.Tensor(etype2in_feat_dim[etype], out_feat_dim))
            nn.init.xavier_uniform_(self.weights_without_reg[etype])
Exemplo n.º 10
0
 def __init__(self, in_features, hidden_features, out_features):
     super().__init__()
     self.conv1 = dglnn.GraphConv(in_features, hidden_features)
     self.conv2 = dglnn.GraphConv(hidden_features, out_features)