Beispiel #1
0
 def __init__(self,
              in_dim,
              hidden_dim,
              out_dim,
              dropout=0.,
              name='gat',
              residual=True,
              use_mlp=False,
              join_with_mlp=False):
     super(GNNModelDGL, self).__init__()
     self.name = name
     self.use_mlp = use_mlp
     self.join_with_mlp = join_with_mlp
     self.normalize_input_columns = True
     if use_mlp:
         self.mlp = MLPRegressor(in_dim, hidden_dim, out_dim)
         if join_with_mlp:
             in_dim += out_dim
         else:
             in_dim = out_dim
     if name == 'gat':
         self.l1 = GATConvDGL(in_dim,
                              hidden_dim // 8,
                              8,
                              feat_drop=dropout,
                              attn_drop=dropout,
                              residual=False,
                              activation=F.elu)
         self.l2 = GATConvDGL(hidden_dim,
                              out_dim,
                              1,
                              feat_drop=dropout,
                              attn_drop=dropout,
                              residual=residual,
                              activation=None)
     elif name == 'gcn':
         self.l1 = GraphConv(in_dim, hidden_dim, activation=F.elu)
         self.l2 = GraphConv(hidden_dim, out_dim, activation=F.elu)
         self.drop = Dropout(p=dropout)
     elif name == 'cheb':
         self.l1 = ChebConvDGL(in_dim, hidden_dim, k=3)
         self.l2 = ChebConvDGL(hidden_dim, out_dim, k=3)
         self.drop = Dropout(p=dropout)
     elif name == 'agnn':
         self.lin1 = Sequential(Dropout(p=dropout),
                                Linear(in_dim, hidden_dim), ELU())
         self.l1 = AGNNConvDGL(learn_beta=False)
         self.l2 = AGNNConvDGL(learn_beta=True)
         self.lin2 = Sequential(Dropout(p=dropout),
                                Linear(hidden_dim, out_dim), ELU())
     elif name == 'appnp':
         self.lin1 = Sequential(Dropout(p=dropout),
                                Linear(in_dim, hidden_dim), ReLU(),
                                Dropout(p=dropout),
                                Linear(hidden_dim, out_dim))
         self.l1 = APPNPConv(k=10, alpha=0.1, edge_drop=0.)
Beispiel #2
0
    def __init__(
        self,
        in_feats,
        n_classes,
        n_layers=3,
        n_heads=3,
        activation=F.relu,
        n_hidden=250,
        dropout=0.75,
        input_drop=0.1,
        attn_drop=0.0,
    ):
        super().__init__()
        self.in_feats = in_feats
        self.n_hidden = n_hidden
        self.n_classes = n_classes
        self.n_layers = n_layers
        self.num_heads = n_heads

        self.convs = torch.nn.ModuleList()
        self.norms = torch.nn.ModuleList()

        for i in range(n_layers):
            in_hidden = n_heads * n_hidden if i > 0 else in_feats
            out_hidden = n_hidden if i < n_layers - 1 else n_classes
            num_heads = n_heads if i < n_layers - 1 else 1
            out_channels = n_heads

            self.convs.append(
                GATConvDGL(
                    in_hidden,
                    out_hidden,
                    num_heads=num_heads,
                    attn_drop=attn_drop,
                    residual=True,
                ))

            if i < n_layers - 1:
                self.norms.append(
                    torch.nn.BatchNorm1d(out_channels * out_hidden))

        self.bias_last = ElementWiseLinear(n_classes,
                                           weight=False,
                                           bias=True,
                                           inplace=True)

        self.input_drop = nn.Dropout(input_drop)
        self.dropout = nn.Dropout(dropout)
        self.activation = activation