Beispiel #1
0
    def __init__(self,
                 in_features,
                 out_features,
                 *,
                 alpha=0.1,
                 K=10,
                 ppr_dropout=0.,
                 hids=[64],
                 acts=['relu'],
                 dropout=0.5,
                 bias=True):

        super().__init__()

        lin = []
        lin.append(nn.Dropout(dropout))
        for hid, act in zip(hids, acts):
            lin.append(nn.Linear(in_features, hid, bias=bias))
            lin.append(activations.get(act))
            lin.append(nn.Dropout(dropout))
            in_features = hid
        lin.append(nn.Linear(in_features, out_features, bias=bias))
        lin = nn.Sequential(*lin)
        self.lin = lin
        self.propagation = APPNPConv(K, alpha, ppr_dropout)
Beispiel #2
0
 def __init__(
     self,
     g,
     in_feats,
     n_classes,
     n_hidden,
     n_layers,
     activation,
     feat_drop,
     edge_drop,
     alpha,
     k,
 ):
     super(APPNP, self).__init__()
     self.g = g
     self.layers = nn.ModuleList()
     # input layer
     self.layers.append(nn.Linear(in_feats, n_hidden))
     # hidden layers
     for i in range(1, n_layers):
         self.layers.append(nn.Linear(n_hidden, n_hidden))
     # output layer
     self.layers.append(nn.Linear(n_hidden, n_classes))
     self.activation = activation
     if feat_drop:
         self.feat_drop = nn.Dropout(feat_drop)
     else:
         self.feat_drop = lambda x: x
     self.propagate = APPNPConv(k, alpha, edge_drop)
     self.reset_parameters()
Beispiel #3
0
 def __init__(self,
              in_dim,
              hidden_dim,
              out_dim,
              dropout=0.,
              name='gat',
              residual=True,
              use_mlp=False,
              join_with_mlp=False):
     super(GNNModelDGL, self).__init__()
     self.name = name
     self.use_mlp = use_mlp
     self.join_with_mlp = join_with_mlp
     self.normalize_input_columns = True
     if use_mlp:
         self.mlp = MLPRegressor(in_dim, hidden_dim, out_dim)
         if join_with_mlp:
             in_dim += out_dim
         else:
             in_dim = out_dim
     if name == 'gat':
         self.l1 = GATConvDGL(in_dim,
                              hidden_dim // 8,
                              8,
                              feat_drop=dropout,
                              attn_drop=dropout,
                              residual=False,
                              activation=F.elu)
         self.l2 = GATConvDGL(hidden_dim,
                              out_dim,
                              1,
                              feat_drop=dropout,
                              attn_drop=dropout,
                              residual=residual,
                              activation=None)
     elif name == 'gcn':
         self.l1 = GraphConv(in_dim, hidden_dim, activation=F.elu)
         self.l2 = GraphConv(hidden_dim, out_dim, activation=F.elu)
         self.drop = Dropout(p=dropout)
     elif name == 'cheb':
         self.l1 = ChebConvDGL(in_dim, hidden_dim, k=3)
         self.l2 = ChebConvDGL(hidden_dim, out_dim, k=3)
         self.drop = Dropout(p=dropout)
     elif name == 'agnn':
         self.lin1 = Sequential(Dropout(p=dropout),
                                Linear(in_dim, hidden_dim), ELU())
         self.l1 = AGNNConvDGL(learn_beta=False)
         self.l2 = AGNNConvDGL(learn_beta=True)
         self.lin2 = Sequential(Dropout(p=dropout),
                                Linear(hidden_dim, out_dim), ELU())
     elif name == 'appnp':
         self.lin1 = Sequential(Dropout(p=dropout),
                                Linear(in_dim, hidden_dim), ReLU(),
                                Dropout(p=dropout),
                                Linear(hidden_dim, out_dim))
         self.l1 = APPNPConv(k=10, alpha=0.1, edge_drop=0.)
Beispiel #4
0
 def __init__(self,
              num_feats,
              num_classes,
              k,
              alpha,
              bias=False,
              activation=None):
     super(DglAPPNPNet, self).__init__()
     self.layer = APPNPConv(k, alpha)
     self.linear = nn.Linear(num_feats, num_classes, bias)
     self.activation = activation
Beispiel #5
0
class APPNP(nn.Module):
    def __init__(self,
                 in_features,
                 out_features,
                 *,
                 alpha=0.1,
                 K=10,
                 ppr_dropout=0.,
                 hids=[64],
                 acts=['relu'],
                 dropout=0.5,
                 bias=True):

        super().__init__()

        lin = []
        lin.append(nn.Dropout(dropout))
        for hid, act in zip(hids, acts):
            lin.append(nn.Linear(in_features, hid, bias=bias))
            lin.append(activations.get(act))
            lin.append(nn.Dropout(dropout))
            in_features = hid
        lin.append(nn.Linear(in_features, out_features, bias=bias))
        lin = nn.Sequential(*lin)
        self.lin = lin
        self.propagation = APPNPConv(K, alpha, ppr_dropout)

    def reset_parameters(self):
        for lin in self.lin:
            if hasattr(lin, "reset_parameters"):
                lin.reset_parameters()
        self.propagation.reset_parameters()

    def forward(self, x, g):
        x = self.lin(x)
        x = self.propagation(g, x)
        return x
Beispiel #6
0
 def __init__(self,
              num_feats,
              num_classes,
              k,
              alpha,
              num_hidden,
              num_layers,
              bias=False,
              activation=F.relu,
              batch_norm=False,
              residual=False,
              dropout=0):
     super(APPNPNet, self).__init__()
     self.mlp = MLPNet(num_feats, num_classes, num_hidden, num_layers, bias,
                       activation, batch_norm, residual, dropout)
     self.appnp = APPNPConv(k, alpha)