Пример #1
0
 def __init__(self, num_features, num_classes, hidden_size, num_heads, dropout):
     super(DrGAT, self).__init__()
     self.num_features = num_features
     self.num_classes = num_classes
     self.hidden_size = hidden_size
     self.num_heads = num_heads
     self.dropout = dropout
     self.conv1 = GATLayer(num_features, hidden_size, nhead=num_heads, attn_drop=dropout)
     self.conv2 = GATLayer(hidden_size * num_heads, num_classes, nhead=1, attn_drop=dropout)
     self.se1 = SELayer(num_features, se_channels=int(np.sqrt(num_features)))
     self.se2 = SELayer(hidden_size * num_heads, se_channels=int(np.sqrt(hidden_size * num_heads)))
Пример #2
0
Файл: gat.py Проект: jkx19/cogdl
 def __init__(
     self,
     in_feats,
     hidden_size,
     out_features,
     num_layers,
     dropout,
     alpha,
     nhead,
     residual,
     last_nhead,
 ):
     """Sparse version of GAT."""
     super(GAT, self).__init__()
     self.dropout = dropout
     self.attentions = nn.ModuleList()
     self.attentions.append(
         GATLayer(
             in_feats,
             hidden_size,
             nhead=nhead,
             dropout=dropout,
             alpha=alpha,
             concat=True,
             residual=residual,
         )
     )
     for i in range(num_layers - 2):
         self.attentions.append(
             GATLayer(
                 hidden_size * nhead,
                 hidden_size,
                 nhead=nhead,
                 dropout=dropout,
                 alpha=alpha,
                 concat=True,
                 residual=residual,
             )
         )
     self.attentions.append(
         GATLayer(
             hidden_size * nhead,
             out_features,
             dropout=dropout,
             alpha=alpha,
             concat=False,
             nhead=last_nhead,
             residual=False,
         )
     )
     self.num_layers = num_layers
     self.last_nhead = last_nhead
     self.residual = residual
Пример #3
0
 def __init__(
     self,
     in_feats,
     hidden_size,
     out_feats,
     num_layers,
     group=2,
     alpha=0.2,
     nhead=1,
     dropout=0.5,
     attn_drop=0.5,
     activation="relu",
     norm="batchnorm",
 ):
     super(RevGAT, self).__init__()
     self.dropout = dropout
     self.num_layers = num_layers
     self.layers = nn.ModuleList()
     self.norm = get_norm_layer(norm, hidden_size * nhead)
     self.act = get_activation(activation)
     for i in range(num_layers):
         if i == 0:
             self.layers.append(
                 GATLayer(
                     in_feats,
                     hidden_size,
                     nhead,
                     alpha,
                     attn_drop,
                     residual=True,
                 )
             )
         elif i == num_layers - 1:
             self.layers.append(GATLayer(hidden_size * nhead, out_feats, 1, alpha, attn_drop, residual=True))
         else:
             conv = GATLayer(
                 hidden_size * nhead // group,
                 hidden_size // group,
                 nhead=nhead,
                 alpha=alpha,
                 attn_drop=attn_drop,
             )
             res_conv = ResGNNLayer(
                 conv,
                 hidden_size * nhead // group,
                 activation=activation,
                 norm=norm,
                 out_norm=norm,
                 out_channels=hidden_size * nhead // group,
             )
             self.layers.append(RevGNNLayer(res_conv, group))
Пример #4
0
def gat_model(in_feats, hidden_size, out_feats, nhead, attn_drop, alpha,
              residual, norm, num_layers, dropout, last_nhead):
    layers = nn.ModuleList()
    layers.append(
        GATLayer(in_feats,
                 hidden_size,
                 nhead=nhead,
                 attn_drop=attn_drop,
                 alpha=alpha,
                 residual=residual,
                 norm=norm))
    if num_layers != 1:
        layers.append(nn.ELU())
    for i in range(num_layers - 2):
        if dropout > 0.0:
            layers.append(nn.Dropout(dropout))
        layers.append(
            GATLayer(
                hidden_size * nhead,
                hidden_size,
                nhead=nhead,
                attn_drop=attn_drop,
                alpha=alpha,
                residual=residual,
                norm=norm,
            ))
        layers.append(nn.ELU())

    if dropout > 0.0:
        layers.append(nn.Dropout(p=dropout))
    layers.append(
        GATLayer(
            hidden_size * nhead,
            out_feats,
            attn_drop=attn_drop,
            alpha=alpha,
            nhead=last_nhead,
            residual=False,
        ))

    return layers
Пример #5
0
    def __init__(self, num_features, hidden_size, embedding_size, num_heads,
                 dropout, num_clusters):
        super(DAEGC, self).__init__()

        self.hidden_size = hidden_size
        self.num_heads = num_heads
        self.embedding_size = embedding_size
        self.dropout = dropout
        self.num_clusters = num_clusters
        self.att1 = GATLayer(num_features,
                             hidden_size,
                             attn_drop=dropout,
                             alpha=0.2,
                             nhead=num_heads)
        self.att2 = GATLayer(hidden_size * num_heads,
                             embedding_size,
                             attn_drop=dropout,
                             alpha=0.2,
                             nhead=1)
        self.cluster_center = torch.nn.Parameter(
            torch.FloatTensor(self.num_clusters))
Пример #6
0
def drgat_model(num_features, hidden_size, num_classes, dropout, num_heads):
    layers = nn.ModuleList()
    layers.append(nn.Dropout(p=dropout))
    layers.append(SELayer(num_features,
                          se_channels=int(np.sqrt(num_features))))
    layers.append(
        GATLayer(num_features, hidden_size, nhead=num_heads,
                 attn_drop=dropout))
    layers.append(nn.ELU())
    layers.append(nn.Dropout(p=dropout))
    layers.append(
        SELayer(hidden_size * num_heads,
                se_channels=int(np.sqrt(hidden_size * num_heads))))
    layers.append(
        GATLayer(hidden_size * num_heads,
                 num_classes,
                 nhead=1,
                 attn_drop=dropout))
    layers.append(nn.ELU())

    return layers
Пример #7
0
    def __init__(self, num_features, hidden_size, embedding_size, num_heads,
                 dropout, num_clusters):
        super(DAEGC, self).__init__()

        self.hidden_size = hidden_size
        self.num_heads = num_heads
        self.embedding_size = embedding_size
        self.dropout = dropout
        self.num_clusters = num_clusters
        self.att1 = GATLayer(num_features,
                             hidden_size,
                             dropout=dropout,
                             alpha=0.2,
                             nhead=num_heads,
                             concat=True)
        self.att2 = GATLayer(hidden_size * num_heads,
                             embedding_size,
                             dropout=dropout,
                             alpha=0.2,
                             nhead=1,
                             concat=False)
        self.cluster_center = None
Пример #8
0
 def __init__(self,
              in_feats,
              hidden_size,
              num_layers,
              nhead,
              dropout=0.0,
              attn_drop=0.0,
              alpha=0.2,
              residual=False):
     super(GATModel, self).__init__()
     assert hidden_size % nhead == 0
     self.layers = nn.ModuleList([
         GATLayer(
             in_feats=in_feats if i > 0 else hidden_size // nhead,
             out_feats=hidden_size // nhead,
             nhead=nhead,
             attn_drop=0.0,
             alpha=0.2,
             residual=False,
             activation=F.leaky_relu if i + 1 < num_layers else None,
         ) for i in range(num_layers)
     ])