示例#1
0
    def __init__(self,
                 in_feats: int,
                 hidden_size: int,
                 out_feats: int,
                 pooling_layer: int,
                 pooling_rates: List[float],
                 n_dropout: float = 0.5,
                 adj_dropout: float = 0.3,
                 activation: str = "elu",
                 improved: bool = False,
                 aug_adj: bool = False):
        super(GraphUnet, self).__init__()
        self.improved = improved
        self.n_dropout = n_dropout
        self.act = get_activation(activation)
        assert pooling_layer <= len(pooling_rates)
        pooling_rates = pooling_rates[:pooling_layer]
        self.unet = GraphUnetLayer(hidden_size, pooling_layer, pooling_rates,
                                   activation, n_dropout, aug_adj)

        self.in_gcn = GraphConvolution(in_feats, hidden_size)
        self.out_gcn = GraphConvolution(hidden_size, out_feats)
        self.adj_drop = nn.Dropout(
            adj_dropout) if adj_dropout > 0.001 else nn.Identity()

        self.cache_edge_index = None
        self.cache_edge_attr = None
示例#2
0
 def __init__(self,
              in_feats,
              hidden_size,
              out_feats,
              num_layers,
              dropout,
              activation="relu",
              residual=True,
              norm=None):
     super(MoEGCN, self).__init__()
     shapes = [in_feats] + [hidden_size] * num_layers
     conv_func = GCNLayer
     conv_params = {
         "dropout": dropout,
         "norm": norm,
         "residual": residual,
         "activation": activation,
     }
     self.layers = nn.ModuleList([
         GraphConvBlock(
             conv_func,
             conv_params,
             shapes[i],
             shapes[i + 1],
             dropout=dropout,
         ) for i in range(num_layers)
     ])
     self.num_layers = num_layers
     self.dropout = dropout
     self.act = get_activation(activation)
     self.final_cls = nn.Linear(hidden_size, out_feats)
示例#3
0
文件: pprgo.py 项目: zhjhr181/cogdl
 def __init__(self, in_feats, hidden_size, out_feats, num_layers, dropout, activation="relu"):
     super(PPRGoMLP, self).__init__()
     self.dropout = dropout
     self.nlayers = num_layers
     shapes = [in_feats] + [hidden_size] * (num_layers - 1) + [out_feats]
     self.layers = nn.ModuleList([nn.Linear(shapes[i], shapes[i + 1]) for i in range(num_layers)])
     self.activation = get_activation(activation)
示例#4
0
    def __init__(
        self,
        in_feats: int,
        hidden_size: int,
        out_feats: int,
        pooling_layer: int,
        pooling_rates: List[float],
        n_dropout: float = 0.5,
        adj_dropout: float = 0.3,
        activation: str = "elu",
        improved: bool = False,
        aug_adj: bool = False,
    ):
        super(GraphUnet, self).__init__()
        self.improved = improved
        self.n_dropout = n_dropout
        self.adj_dropout = adj_dropout

        self.act = get_activation(activation)
        assert pooling_layer <= len(pooling_rates)
        pooling_rates = pooling_rates[:pooling_layer]
        pooling_rates = [float(x) for x in pooling_rates]
        self.unet = GraphUnetLayer(hidden_size, pooling_layer, pooling_rates,
                                   activation, n_dropout, aug_adj)

        self.in_gcn = GCNLayer(in_feats, hidden_size)
        self.out_gcn = GCNLayer(hidden_size, out_feats)
示例#5
0
    def __init__(
        self,
        hidden_size: int,
        pooling_layer: int,
        pooling_rates: List[float],
        activation: str = "elu",
        dropout: float = 0.5,
        aug_adj: bool = False,
    ):
        super(GraphUnetLayer, self).__init__()
        self.dropout = dropout
        self.activation = activation
        self.pooling_layer = pooling_layer
        self.gcn = GCNLayer(hidden_size, hidden_size)
        self.act = get_activation(activation)

        self.down_gnns = nn.ModuleList(
            [GCNLayer(hidden_size, hidden_size) for _ in range(pooling_layer)])
        self.up_gnns = nn.ModuleList(
            [GCNLayer(hidden_size, hidden_size) for _ in range(pooling_layer)])
        self.poolings = nn.ModuleList([
            Pool(hidden_size, pooling_rates[i], aug_adj, dropout)
            for i in range(pooling_layer)
        ])
        self.unpoolings = nn.ModuleList(
            [UnPool() for _ in range(pooling_layer)])
示例#6
0
 def __init__(self,
              in_feats,
              out_feats,
              hidden_size,
              num_layers,
              dropout=0.0,
              activation="relu",
              norm=None):
     super(MLP, self).__init__()
     self.norm = norm
     self.activation = get_activation(activation)
     self.dropout = dropout
     shapes = [in_feats] + [hidden_size] * (num_layers - 1) + [out_feats]
     self.mlp = nn.ModuleList([
         nn.Linear(shapes[layer], shapes[layer + 1])
         for layer in range(num_layers)
     ])
     if norm is not None and num_layers > 1:
         if norm == "layernorm":
             self.norm_list = nn.ModuleList(
                 nn.LayerNorm(x) for x in shapes[1:-1])
         elif norm == "batchnorm":
             self.norm_list = nn.ModuleList(
                 nn.BatchNorm1d(x) for x in shapes[1:-1])
         else:
             raise NotImplementedError(
                 f"{norm} is not implemented in CogDL.")
示例#7
0
    def __init__(
        self, in_feats, out_feats, nhead=1, alpha=0.2, attn_drop=0.5, activation=None, residual=False, norm=None
    ):
        super(GATLayer, self).__init__()
        self.in_features = in_feats
        self.out_features = out_feats
        self.alpha = alpha
        self.nhead = nhead

        self.W = nn.Parameter(torch.FloatTensor(in_feats, out_feats * nhead))

        self.a_l = nn.Parameter(torch.zeros(size=(1, nhead, out_feats)))
        self.a_r = nn.Parameter(torch.zeros(size=(1, nhead, out_feats)))

        self.edge_softmax = EdgeSoftmax()
        self.mhspmm = MultiHeadSpMM()

        self.dropout = nn.Dropout(attn_drop)
        self.leakyrelu = nn.LeakyReLU(self.alpha)
        self.act = None if activation is None else get_activation(activation)
        self.norm = None if norm is None else get_norm_layer(norm, out_feats * nhead)

        if residual:
            self.residual = nn.Linear(in_feats, out_feats * nhead)
        else:
            self.register_buffer("residual", None)
        self.reset_parameters()
示例#8
0
 def __init__(
     self, in_feats: int, pooling_rate: float, aug_adj: bool = False, dropout: float = 0.5, activation: str = "tanh"
 ):
     super(Pool, self).__init__()
     self.aug_adj = aug_adj
     self.pooling_rate = pooling_rate
     self.act = get_activation(activation)
     self.proj = nn.Linear(in_feats, 1)
     self.dropout = nn.Dropout(dropout) if dropout > 0 else nn.Identity
示例#9
0
文件: pprgo.py 项目: yuchen7/cogdl
 def __init__(self, in_feats, hidden_size, out_feats, num_layers, dropout, activation="relu"):
     super(PPRGoMLP, self).__init__()
     self.dropout = dropout
     self.nlayers = num_layers
     shapes = [hidden_size] * (num_layers - 1) + [out_feats]
     self.layers = nn.ModuleList()
     self.layers.append(LinearLayer(in_feats, hidden_size, bias=False))
     for i in range(num_layers - 1):
         self.layers.append(nn.Linear(shapes[i], shapes[i + 1], bias=False))
     self.activation = get_activation(activation)
示例#10
0
文件: grace.py 项目: Zeigar/cogdl
 def __init__(
     self,
     in_feats: int,
     out_feats: int,
     num_layers: int,
     activation: str = "relu",
 ):
     super(GraceEncoder, self).__init__()
     shapes = [in_feats] + [2 * out_feats] * (num_layers - 1) + [out_feats]
     self.layers = nn.ModuleList([GraphConvolution(shapes[i], shapes[i + 1]) for i in range(num_layers)])
     self.activation = get_activation(activation)
示例#11
0
    def __init__(self, in_ft, out_ft, act, bias=True):
        super(GCN, self).__init__()
        self.fc = nn.Linear(in_ft, out_ft, bias=False)
        self.act = nn.PReLU() if act == "prelu" else get_activation(act)

        if bias:
            self.bias = nn.Parameter(torch.FloatTensor(out_ft))
            self.bias.data.fill_(0.0)
        else:
            self.register_parameter("bias", None)

        for m in self.modules():
            self.weights_init(m)
示例#12
0
文件: revgcn.py 项目: rpatil524/cogdl
 def __init__(
     self,
     in_feats,
     hidden_size,
     out_feats,
     num_layers,
     group=2,
     alpha=0.2,
     nhead=1,
     dropout=0.5,
     attn_drop=0.5,
     activation="relu",
     norm="batchnorm",
 ):
     super(RevGAT, self).__init__()
     self.dropout = dropout
     self.num_layers = num_layers
     self.layers = nn.ModuleList()
     self.norm = get_norm_layer(norm, hidden_size * nhead)
     self.act = get_activation(activation)
     for i in range(num_layers):
         if i == 0:
             self.layers.append(
                 GATLayer(
                     in_feats,
                     hidden_size,
                     nhead,
                     alpha,
                     attn_drop,
                     residual=True,
                 )
             )
         elif i == num_layers - 1:
             self.layers.append(GATLayer(hidden_size * nhead, out_feats, 1, alpha, attn_drop, residual=True))
         else:
             conv = GATLayer(
                 hidden_size * nhead // group,
                 hidden_size // group,
                 nhead=nhead,
                 alpha=alpha,
                 attn_drop=attn_drop,
             )
             res_conv = ResGNNLayer(
                 conv,
                 hidden_size * nhead // group,
                 activation=activation,
                 norm=norm,
                 out_norm=norm,
                 out_channels=hidden_size * nhead // group,
             )
             self.layers.append(RevGNNLayer(res_conv, group))
示例#13
0
    def __init__(
        self,
        in_feat,
        hidden_size,
        out_feat,
        num_layers,
        connection="res+",
        activation="relu",
        dropout=0.0,
        aggr="max",
        beta=1.0,
        p=1.0,
        learn_beta=False,
        learn_p=False,
        learn_msg_scale=True,
        use_msg_norm=False,
    ):
        super(DeeperGCN, self).__init__()
        self.dropout = dropout
        self.feat_encoder = nn.Linear(in_feat, hidden_size)

        self.layers = nn.ModuleList()
        self.layers.append(GENConv(hidden_size, hidden_size))
        for i in range(num_layers - 1):
            self.layers.append(
                DeepGCNLayer(
                    in_feat=hidden_size,
                    out_feat=hidden_size,
                    conv=GENConv(
                        in_feat=hidden_size,
                        out_feat=hidden_size,
                        aggr=aggr,
                        beta=beta,
                        p=p,
                        learn_beta=learn_beta,
                        learn_p=learn_p,
                        use_msg_norm=use_msg_norm,
                        learn_msg_scale=learn_msg_scale,
                    ),
                    connection=connection,
                    activation=activation,
                    dropout=dropout,
                    checkpoint_grad=(num_layers > 3)
                    and ((i + 1) == num_layers // 2),
                ))
        self.norm = nn.BatchNorm1d(hidden_size, affine=True)
        self.activation = get_activation(activation)
        self.fc = nn.Linear(hidden_size, out_feat)
示例#14
0
文件: revgcn.py 项目: rpatil524/cogdl
    def __init__(
        self,
        in_feats,
        hidden_size,
        out_feats,
        num_layers,
        group=2,
        activation="relu",
        norm="batchnorm",
        last_norm="batchnorm",
        dropout=0.0,
        aggr="softmax_sg",
        beta=1.0,
        p=1.0,
        learn_beta=False,
        learn_p=False,
        learn_msg_scale=True,
        use_msg_norm=False,
        edge_attr_size: Optional[list] = None,
        one_hot_emb: bool = False,
    ):
        super(RevGEN, self).__init__()
        self.input_fc = nn.Linear(in_feats, hidden_size)
        self.output_fc = nn.Linear(hidden_size, out_feats)
        self.layers = nn.ModuleList()

        for _ in range(num_layers):
            conv = GENConv(
                hidden_size // group,
                hidden_size // group,
                aggr,
                beta,
                p,
                learn_beta,
                learn_p,
                use_msg_norm,
                learn_msg_scale,
                residual=True,
                edge_attr_size=edge_attr_size,
            )
            res_conv = ResGNNLayer(conv, hidden_size // group, norm=norm, activation=activation, residual=False)
            self.layers.append(RevGNNLayer(res_conv, group))
        self.activation = get_activation(activation)
        self.norm = get_norm_layer(last_norm, hidden_size)
        self.dropout = dropout
        if one_hot_emb:
            self.one_hot_encoder = nn.Linear(in_feats // 2, in_feats // 2)
        self.use_one_hot_emb = one_hot_emb
示例#15
0
    def __init__(
        self,
        in_feat,
        hidden_size,
        out_feat,
        num_layers,
        activation="relu",
        dropout=0.0,
        aggr="max",
        beta=1.0,
        p=1.0,
        learn_beta=False,
        learn_p=False,
        learn_msg_scale=True,
        use_msg_norm=False,
        edge_attr_size=None,
    ):
        super(DeeperGCN, self).__init__()
        self.dropout = dropout
        self.feat_encoder = nn.Linear(in_feat, hidden_size)

        self.layers = nn.ModuleList()
        for i in range(num_layers - 1):
            self.layers.append(
                ResGNNLayer(
                    conv=GENConv(
                        in_feats=hidden_size,
                        out_feats=hidden_size,
                        aggr=aggr,
                        beta=beta,
                        p=p,
                        learn_beta=learn_beta,
                        learn_p=learn_p,
                        use_msg_norm=use_msg_norm,
                        learn_msg_scale=learn_msg_scale,
                        edge_attr_size=edge_attr_size,
                    ),
                    in_channels=hidden_size,
                    activation=activation,
                    dropout=dropout,
                    checkpoint_grad=False,
                )
            )
        self.norm = nn.BatchNorm1d(hidden_size, affine=True)
        self.activation = get_activation(activation)
        self.fc = nn.Linear(hidden_size, out_feat)
示例#16
0
 def __init__(
     self,
     in_feat,
     out_feat,
     conv,
     connection="res",
     activation="relu",
     dropout=0.0,
     checkpoint_grad=False,
 ):
     super(DeepGCNLayer, self).__init__()
     self.conv = conv
     self.activation = get_activation(activation)
     self.dropout = dropout
     self.connection = connection
     self.norm = nn.BatchNorm1d(out_feat, affine=True)
     self.checkpoint_grad = checkpoint_grad
示例#17
0
    def __init__(
        self,
        edge_feats,
        num_etypes,
        in_features,
        out_features,
        nhead,
        feat_drop=0.0,
        attn_drop=0.5,
        negative_slope=0.2,
        residual=False,
        activation=None,
        alpha=0.0,
    ):
        super(myGATConv, self).__init__()
        self.edge_feats = edge_feats
        self.in_features = in_features
        self.out_features = out_features
        self.nhead = nhead
        self.edge_emb = nn.Parameter(torch.zeros(
            size=(num_etypes,
                  edge_feats)))  # nn.Embedding(num_etypes, edge_feats)

        self.W = nn.Parameter(
            torch.FloatTensor(in_features, out_features * nhead))
        self.W_e = nn.Parameter(
            torch.FloatTensor(edge_feats, edge_feats * nhead))

        self.a_l = nn.Parameter(torch.zeros(size=(1, nhead, out_features)))
        self.a_r = nn.Parameter(torch.zeros(size=(1, nhead, out_features)))
        self.a_e = nn.Parameter(torch.zeros(size=(1, nhead, edge_feats)))

        self.mhspmm = MultiHeadSpMM()

        self.feat_drop = nn.Dropout(feat_drop)
        self.dropout = nn.Dropout(attn_drop)
        self.leakyrelu = nn.LeakyReLU(negative_slope)
        self.act = None if activation is None else get_activation(activation)

        if residual:
            self.residual = nn.Linear(in_features, out_features * nhead)
        else:
            self.register_buffer("residual", None)
        self.reset_parameters()
        self.alpha = alpha
示例#18
0
文件: revgcn.py 项目: rpatil524/cogdl
    def __init__(
        self,
        in_feats,
        out_feats,
        hidden_size,
        num_layers,
        dropout=0.5,
        drop_edge_rate=0.1,
        activation="relu",
        norm="batchnorm",
        group=2,
    ):
        super(RevGCN, self).__init__()
        self.dropout = dropout
        self.drop_edge_rate = drop_edge_rate
        self.num_layers = num_layers
        self.layers = nn.ModuleList()
        self.norm = get_norm_layer(norm, hidden_size)
        self.act = get_activation(activation)

        for i in range(num_layers):
            if i == 0:
                self.layers.append(
                    GCNLayer(
                        in_feats,
                        hidden_size,
                        residual=True,
                    )
                )
            elif i == num_layers - 1:
                self.layers.append(GCNLayer(hidden_size, out_feats, residual=True))
            else:
                conv = GCNLayer(
                    hidden_size // group,
                    hidden_size // group,
                )
                res_conv = ResGNNLayer(
                    conv,
                    hidden_size // group,
                    activation=activation,
                    norm=norm,
                    out_norm=norm,
                    out_channels=hidden_size // group,
                )
                self.layers.append(RevGNNLayer(res_conv, group))
示例#19
0
    def __init__(self,
                 in_feats,
                 out_feats,
                 normalize=False,
                 aggr="mean",
                 dropout=0.0,
                 norm=None,
                 activation=None,
                 residual=False):
        super(SAGELayer, self).__init__()
        self.in_feats = in_feats
        self.out_feats = out_feats
        self.fc = nn.Linear(2 * in_feats, out_feats)
        self.normalize = normalize
        if dropout > 0:
            self.dropout = nn.Dropout(dropout)
        else:
            self.dropout = None
        if aggr == "mean":
            self.aggr = MeanAggregator()
        elif aggr == "sum":
            self.aggr = SumAggregator()
        elif aggr == "max":
            self.aggr = MaxAggregator()
        else:
            raise NotImplementedError

        if activation is not None:
            self.act = get_activation(activation, inplace=True)
        else:
            self.act = None

        if norm is not None:
            self.norm = get_norm_layer(norm, out_feats)
        else:
            self.norm = None

        if residual:
            self.residual = nn.Linear(in_features=in_feats,
                                      out_features=out_feats)
        else:
            self.residual = None
示例#20
0
    def __init__(self,
                 in_features,
                 out_features,
                 dropout=0.0,
                 activation=None,
                 residual=False,
                 norm=None,
                 bias=True):
        super(GCNLayer, self).__init__()
        self.in_features = in_features
        self.out_features = out_features
        self.weight = Parameter(torch.FloatTensor(in_features, out_features))
        if dropout > 0:
            self.dropout = nn.Dropout(dropout)
        else:
            self.dropout = None
        if residual:
            self.residual = nn.Linear(in_features, out_features)
        else:
            self.residual = None

        if activation is not None:
            self.act = get_activation(activation)
        else:
            self.act = None

        if norm is not None:
            if norm == "batchnorm":
                self.norm = nn.BatchNorm1d(out_features)
            elif norm == "layernorm":
                self.norm = nn.LayerNorm(out_features)
            else:
                raise NotImplementedError
        else:
            self.norm = None

        if bias:
            self.bias = Parameter(torch.FloatTensor(out_features))
        else:
            self.register_parameter("bias", None)
        self.reset_parameters()
示例#21
0
    def __init__(self,
                 in_features,
                 out_features,
                 dropout=0.0,
                 activation=None,
                 residual=False,
                 norm=None,
                 bias=True,
                 **kwargs):
        super(GCNLayer, self).__init__()
        self.in_features = in_features
        self.out_features = out_features
        self.linear = nn.Linear(in_features, out_features, bias=bias)
        if dropout > 0:
            self.dropout = nn.Dropout(dropout)
        else:
            self.dropout = None
        if residual:
            self.residual = nn.Linear(in_features, out_features)
        else:
            self.residual = None

        if activation is not None:
            self.act = get_activation(activation, inplace=True)
        else:
            self.act = None

        if norm is not None:
            if norm == "batchnorm":
                self.norm = nn.BatchNorm1d(out_features)
            elif norm == "layernorm":
                self.norm = nn.LayerNorm(out_features)
            else:
                raise NotImplementedError
        else:
            self.norm = None

        self.reset_parameters()
示例#22
0
 def __init__(
     self,
     conv,
     in_channels,
     activation="relu",
     norm="batchnorm",
     dropout=0.0,
     out_norm=None,
     out_channels=-1,
     residual=True,
     checkpoint_grad=False,
 ):
     super(ResGNNLayer, self).__init__()
     self.conv = conv
     self.activation = get_activation(activation)
     self.dropout = dropout
     self.norm = get_norm_layer(norm, in_channels)
     self.residual = residual
     if out_norm:
         self.out_norm = get_norm_layer(norm, out_channels)
     else:
         self.out_norm = None
     self.checkpoint_grad = checkpoint_grad