示例#1
0
    def __init__(self,
                 num_layers=2,
                 hidden=16,
                 features_num=16,
                 num_class=2,
                 droprate=0.5,
                 dim=1,
                 kernel_size=2,
                 edge_droprate=0.0,
                 fea_norm="no_norm",
                 K=20,
                 alpha=0.5):
        super(SplineGCN, self).__init__()
        self.droprate = droprate
        self.edge_droprate = edge_droprate
        if fea_norm == "no_norm":
            self.fea_norm_layer = None
        elif fea_norm == "graph_size_norm":
            self.fea_norm_layer = GraphSizeNorm()
        else:
            raise ValueError("your fea_norm is un-defined: %s") % fea_norm

        self.convs = torch.nn.ModuleList()
        self.convs.append(SplineConv(features_num, hidden, dim, kernel_size))
        for i in range(num_layers - 2):
            self.convs.append(SplineConv(hidden, hidden, dim, kernel_size))
        self.convs.append(SplineConv(hidden, num_class, dim, kernel_size))

        self.appnp = APPNP(K, alpha)
示例#2
0
 def __init__(self,num_layers=1,hidden=48,features_num=16,num_class=2,K=5,alpha=0.2):
     super(GCN_APPNP, self).__init__()
     self.lin2 = Linear(hidden, num_class)
     self.first_lin = Linear(features_num, hidden)
     self.convs = torch.nn.ModuleList()
     for i in range(num_layers):
         self.convs.append(GCNConv(hidden, hidden))
     self.ppnp = APPNP(K=K, alpha=alpha)
示例#3
0
    def __init__(self,
                 input_dim,
                 out_dim,
                 filter_num,
                 alpha=0.1,
                 dropout=False,
                 K=1):
        super(APPNP_Link, self).__init__()
        self.dropout = dropout

        self.line1 = nn.Linear(input_dim, filter_num)
        self.line2 = nn.Linear(filter_num, filter_num)

        self.conv1 = APPNP(K=K, alpha=alpha)
        self.conv2 = APPNP(K=K, alpha=alpha)

        self.linear = nn.Linear(filter_num * 2, out_dim)
 def __init__(self,
              input_dim,
              hidden_dim,
              distmult=False,
              k=10,
              alpha=0.1,
              dropout=0):
     super().__init__(input_dim, hidden_dim, distmult, dropout)
     self.gcn = APPNP(K=k, alpha=alpha)
示例#5
0
def test_appnp():
    in_channels, out_channels = (16, 32)
    edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]])
    num_nodes = edge_index.max().item() + 1
    x = torch.randn((num_nodes, in_channels))

    conv = APPNP(in_channels, out_channels, K=10, alpha=0.1)
    assert conv.__repr__() == 'APPNP(16, 32, K=10, alpha=0.1)'
    assert conv(x, edge_index).size() == (num_nodes, out_channels)
示例#6
0
 def __init__(self, K, alpha, hidden, activation, data):
     super(ModelAPPNP, self).__init__()
     self.linear_1 = Linear(data.num_features, hidden)
     self.conv = APPNP(K, alpha)
     self.linear_2 = Linear(hidden, data.num_class)
     if activation == "relu":
         self.activation = relu
     elif activation == "leaky_relu":
         self.activation = leaky_relu
示例#7
0
    def __init__(self,
                 input_dim,
                 out_dim,
                 filter_num,
                 alpha=0.1,
                 dropout=False,
                 layer=3):
        super(APPNP_Model, self).__init__()
        self.dropout = dropout
        self.line1 = nn.Linear(input_dim, filter_num)
        self.line2 = nn.Linear(filter_num, filter_num)

        self.conv1 = APPNP(K=10, alpha=alpha)
        self.conv2 = APPNP(K=10, alpha=alpha)
        self.layer = layer
        if layer == 3:
            self.line3 = nn.Linear(filter_num, filter_num)
            self.conv3 = APPNP(K=10, alpha=alpha)

        self.Conv = nn.Conv1d(filter_num, out_dim, kernel_size=1)
示例#8
0
    def init_model(self, n_class, feature_num):
        hidden_size = int(2**self.hyperparameters['hidden'])
        K = int(self.hyperparameters['K'])
        self.lin1 = Linear(feature_num, hidden_size)
        self.lin2 = Linear(hidden_size, n_class)
        self.prop1 = APPNP(K=K, alpha=self.hyperparameters['alpha'])

        self.optimizer = torch.optim.Adam(self.parameters(),
                                          lr=self.hyperparameters['lr'],
                                          weight_decay=5e-4)

        self = self.to('cuda')
示例#9
0
    def __init__(self, dataset, args):
        super(GPRGNN, self).__init__()
        self.lin1 = Linear(dataset.num_features, args.hidden)
        self.lin2 = Linear(args.hidden, dataset.num_classes)

        if args.ppnp == 'PPNP':
            self.prop1 = APPNP(args.K, args.alpha)
        elif args.ppnp == 'GPR_prop':
            self.prop1 = GPR_prop(args.K, args.alpha, args.Init, args.Gamma)

        self.Init = args.Init
        self.dprate = args.dprate
        self.dropout = args.dropout
    def __init__(self, in_channels, hidden_channels, out_channels, Init='PPR', dprate=.5, dropout=.5, K=10, alpha=.1, Gamma=None, ppnp='GPR_prop'):
        super(GPRGNN, self).__init__()
        self.lin1 = nn.Linear(in_channels, hidden_channels)
        self.lin2 = nn.Linear(hidden_channels, out_channels)

        if ppnp == 'PPNP':
            self.prop1 = APPNP(K, alpha)
        elif ppnp == 'GPR_prop':
            self.prop1 = GPR_prop(K, alpha, Init, Gamma)

        self.Init = Init
        self.dprate = dprate
        self.dropout = dropout
示例#11
0
    def __init__(self, dataset, args):
        in_channels = dataset.num_features
        out_channels = dataset.num_classes

        super(GCN_JKNet, self).__init__()
        self.conv1 = GCNConv(in_channels, 16)
        self.conv2 = GCNConv(16, 16)
        self.lin1 = torch.nn.Linear(16, out_channels)
        self.one_step = APPNP(K=1, alpha=0)
        self.JK = JumpingKnowledge(mode='lstm',
                                   channels=16,
                                   num_layers=4
                                   )
    def __init__(self, in_channels, hidden_channels, out_channels, num_layers,
                 K, alpha, dropout):
        super(APPNPNet, self).__init__()

        self.lins = torch.nn.ModuleList()
        self.lins.append(Linear(in_channels, hidden_channels))
        self.bns = torch.nn.ModuleList()
        self.bns.append(torch.nn.BatchNorm1d(hidden_channels))
        for _ in range(num_layers - 2):
            self.lins.append(Linear(hidden_channels, hidden_channels))
            self.bns.append(torch.nn.BatchNorm1d(hidden_channels))
        self.lins.append(Linear(hidden_channels, out_channels))
        self.prop = APPNP(K, alpha)

        self.dropout = dropout
示例#13
0
 def __init__(self,
              K=5,
              alpha=0.16,
              hidden=128,
              num_features=16,
              num_class=2,
              aggr='add',
              num_layers=2):
     super(My_APPNP, self).__init__()
     self.lin1 = Linear(num_features, hidden)
     self.convs = torch.nn.ModuleList()
     for i in range(num_layers - 1):
         self.convs.append(GCNConv(hidden, hidden))
     self.lin2 = Linear(hidden, num_class)
     self.prop1 = APPNP(K, alpha, aggr)
示例#14
0
    def __init__(self,
                 num_features,
                 hidden_size,
                 dropout=0.5,
                 activation="relu",
                 K=10,
                 alpha=0.1,
                 *args,
                 **kwargs):
        super().__init__(*args, **kwargs)
        self.lin1 = Linear(num_features, hidden_size)
        self.lin2 = Linear(hidden_size, 1)
        self.prop1 = APPNP(K, alpha)

        self.dropout = dropout
        assert activation in ["relu", "elu"]
        self.activation = getattr(F, activation)
示例#15
0
    def __init__(self, dataset, channels, dropout=0.8, K=10, alpha=0.10):
        super(MonoAPPNPModel, self).__init__()
        self.dropout = dropout

        if len(channels) > 1:
            print(
                'WARNING: Taking only the first hidden layer size, the rest is ignored.'
            )

        self.nn = nn.Sequential(
            nn.Dropout(dropout),
            nn.Linear(dataset.num_node_features, channels[0]),
            nn.ReLU(),
            nn.Dropout(dropout),
            nn.Linear(channels[0], dataset.num_classes),
        )
        self.appnp = APPNP(K, alpha)
示例#16
0
    def __init__(self, dropout, k, alpha, hiddens, num_features, num_classes):
        super(Net, self).__init__()
        self.dropout = dropout
        self.k = k
        self.alpha = alpha
        self.hiddens = hiddens
        self.appnp_layers = torch.nn.ModuleList()

        self.appnp_layers.append(Linear(num_features, hiddens[0]))

        for i in range(1, len(hiddens)):
            self.appnp_layers.append(Linear(hiddens[i - 1], hiddens[i]))

        self.appnp_layers.append(Linear(hiddens[-1], num_classes))
        #self.lin1 = Linear(dataset.num_features, args.hidden)
        #self.lin2 = Linear(args.hidden, dataset.num_classes)
        self.prop1 = APPNP(k, alpha)
示例#17
0
def test_appnp():
    in_channels, out_channels = (16, 32)
    edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]])
    edge_weight = torch.rand(edge_index.size(1))
    num_nodes = edge_index.max().item() + 1
    x = torch.randn((num_nodes, in_channels))

    lin = torch.nn.Linear(in_channels, out_channels)
    conv = APPNP(K=10, alpha=0.1)
    assert conv.__repr__() == 'APPNP(K=10, alpha=0.1)'
    out1 = conv(lin(x), edge_index)
    assert out1.size() == (num_nodes, out_channels)
    out2 = conv(lin(x), edge_index, edge_weight)
    assert out2.size() == (num_nodes, out_channels)

    jit = torch.jit.script(conv.jittable())
    assert jit(lin(x), edge_index).tolist() == out1.tolist()
    assert jit(lin(x), edge_index, edge_weight).tolist() == out2.tolist()
示例#18
0
    def __init__(self, dim_input, n_heads, n_att_layers, dim_embedding,
                 dim_values, dim_hidden, K, alpha, weighted):
        super(NodeEncoder, self).__init__()

        # if the graph is weighted
        self.weighted = weighted
        if weighted:
            # there are 4 features that are added to the input data
            first_dim = dim_input + 2
        else:
            # else, only 2 features are added
            first_dim = dim_input + 1
        self.n_att_layers = n_att_layers
        self.Lin1 = nn.Linear(first_dim, dim_embedding)
        self.attention_layers = nn.ModuleList([
            AttentionLayer(n_heads, dim_embedding, dim_values, dim_hidden)
            for k in range(n_att_layers)
        ])
        self.power = APPNP(K, alpha, bias=False).to(device)
示例#19
0
def test_appnp():
    x = torch.randn(4, 16)
    edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]])
    row, col = edge_index
    adj = SparseTensor(row=row, col=col, sparse_sizes=(4, 4))

    conv = APPNP(K=10, alpha=0.1)
    assert conv.__repr__() == 'APPNP(K=10, alpha=0.1)'
    out = conv(x, edge_index)
    assert out.size() == (4, 16)
    assert torch.allclose(conv(x, adj.t()), out, atol=1e-6)

    t = '(Tensor, Tensor, OptTensor) -> Tensor'
    jit = torch.jit.script(conv.jittable(t))
    assert jit(x, edge_index).tolist() == out.tolist()

    t = '(Tensor, SparseTensor, OptTensor) -> Tensor'
    jit = torch.jit.script(conv.jittable(t))
    assert torch.allclose(jit(x, adj.t()), out, atol=1e-6)
示例#20
0
    def __init__(self,
                 in_channels,
                 hidden_channels,
                 out_channels,
                 num_iteration,
                 mlp_layers,
                 dropout,
                 alpha=0.1):
        super(APPNPNet, self).__init__()

        self.mlp = torch.nn.ModuleList()
        self.mlp.append(torch.nn.Linear(in_channels, hidden_channels))
        for _ in range(mlp_layers - 2):
            self.mlp.append(torch.nn.Linear(hidden_channels, hidden_channels))
        self.mlp.append(torch.nn.Linear(hidden_channels, out_channels))

        self.appnp = APPNP(num_iteration,
                           alpha,
                           dropout=dropout,
                           normalize=False)
示例#21
0
    def generate_model(self, args):
        self.gnn_layers = nn.ModuleList()
        self.norm_layers = nn.ModuleList()
        self.act_layers = nn.ModuleList()

        # first conv layer
        self.gnn_layers.append(
            GNNLayer(self.in_channels, args.hidden_size, args.aggr_type,
                     args.conv_type))
        self.norm_layers.append(NormLayer(args.norm_type, args.hidden_size))
        self.act_layers.append(ActivationLayer(args.act_type))

        # intermediate layers
        if self.layer_aggr_type != 'dense':
            for i in range(1, self.num_layers):
                self.gnn_layers.append(
                    GNNLayer(args.hidden_size, args.hidden_size,
                             args.aggr_type, args.conv_type))
                self.norm_layers.append(
                    NormLayer(args.norm_type, args.hidden_size))
                self.act_layers.append(ActivationLayer(args.act_type))
        else:
            for i in range(1, self.num_layers):
                self.gnn_layers.append(
                    GNNLayer(i * args.hidden_size, args.hidden_size,
                             args.aggr_type, args.conv_type))
                self.norm_layers.append(
                    NormLayer(args.norm_type, args.hidden_size))
                self.act_layers.append(ActivationLayer(args.act_type))

        # last (output) layer
        if self.layer_aggr_type == 'jk':
            self.out_lin = nn.Linear(self.num_layers * args.hidden_size,
                                     self.num_class)
        else:
            self.out_lin = nn.Linear(args.hidden_size, self.num_class)

        if self.conv_type == 'appnp':
            self.prop = APPNP(10, 0.1)
示例#22
0
 def __init__(self, in_dim, out_dim):
     super().__init__()
     self.prop = APPNP(K=10, alpha=0.2)
     self.lin = nn.Linear(in_dim, out_dim)
     self.sigma = nn.PReLU(out_dim)
     self.reset_parameters()
示例#23
0
 def __init__(self, dataset):
     super(Net, self).__init__()
     self.lin1 = Linear(dataset.num_features, args.hidden)
     self.lin2 = Linear(args.hidden, dataset.num_classes)
     self.prop1 = APPNP(args.K, args.alpha)
示例#24
0
    def __init__(self,
                 in_channels=1,
                 hidden_channels=1,
                 out_channels=1,
                 normalize=False,
                 add_loop=False,
                 gnn_k=1,
                 gnn_type=1):
        super(GNN, self).__init__()

        self.add_loop = add_loop

        
        self.bn1 = torch.nn.BatchNorm1d(hidden_channels)
        self.bn2 = torch.nn.BatchNorm1d(out_channels)
        self.k=gnn_k#number of repitiions of gnn
        self.gnn_type=gnn_type
        if gnn_type==0:
            self.conv1 = DenseSAGEConv(in_channels=1, out_channels=hidden_channels, normalize=False)
            self.conv2 = DenseSAGEConv(in_channels=hidden_channels, out_channels=out_channels, normalize=False)
        if gnn_type==1:
            self.conv1 = DenseSAGEConv(in_channels=1, out_channels=hidden_channels, normalize=True)
            self.conv2 = DenseSAGEConv(in_channels=hidden_channels, out_channels=out_channels, normalize=True)
        
        if gnn_type==2:
            self.conv1 = GCNConv(in_channels=1, out_channels=hidden_channels, cached=True)
            self.conv2 = GCNConv(in_channels=hidden_channels, out_channels=out_channels, cached=True)
        if gnn_type==3:
            self.conv1 = GCNConv(in_channels=1, out_channels=hidden_channels,improved=True, cached=True)
            self.conv2 = GCNConv(in_channels=hidden_channels, out_channels=out_channels,improved=True, cached=True)
        if gnn_type==4:
            self.conv1 = ChebConv(in_channels=1, out_channels=hidden_channels,K=2)
            self.conv2 = ChebConv(in_channels=hidden_channels, out_channels=out_channels,K=2)
        if gnn_type==5:
            self.conv1 = ChebConv(in_channels=1, out_channels=hidden_channels,K=4)
            self.conv2 = ChebConv(in_channels=hidden_channels, out_channels=out_channels,K=4)
        if gnn_type==6:
            self.conv1 = GraphConv(in_channels=1, out_channels=hidden_channels,aggr='add')
            self.conv2 = GraphConv(in_channels=hidden_channels, out_channels=out_channels,aggr='add')
        if gnn_type==7:
            self.conv1 = GatedGraphConv(in_channels=1,out_channels=hidden_channels, num_layers=3, aggr='add', bias=True)
            self.conv2 = GatedGraphConv(in_channels=hidden_channels,out_channels=out_channels, num_layers=3, aggr='add', bias=True)
        if gnn_type==8:
            self.conv1 = GatedGraphConv(in_channels=1,out_channels=hidden_channels, num_layers=7, aggr='add', bias=True)
            self.conv2 = GatedGraphConv(in_channels=hidden_channels,out_channels=out_channels, num_layers=7, aggr='add', bias=True)
        if gnn_type==9:
            self.conv1 =GATConv(in_channels=1,out_channels=hidden_channels, heads=1, concat=True, negative_slope=0.2,dropout=0.6)
            self.conv2 =GATConv(in_channels=hidden_channels,out_channels=out_channels, heads=1, concat=True, negative_slope=0.2,dropout=0.6)
        if gnn_type==10:
            self.conv1 =GATConv(in_channels=1,out_channels=hidden_channels, heads=6, concat=False, negative_slope=0.2,dropout=0.6)
            self.conv2 =GATConv(in_channels=hidden_channels,out_channels=out_channels, heads=6, concat=False, negative_slope=0.2,dropout=0.6)
            
        if gnn_type==11:
            self.conv1 =GATConv(in_channels=1,out_channels=hidden_channels, heads=4, concat=True, negative_slope=0.2,dropout=0.6)
            self.conv2 =GATConv(in_channels=hidden_channels,out_channels=out_channels, heads=4, concat=True, negative_slope=0.2,dropout=0.6)
        
        if gnn_type==12:
            self.conv1 =GATConv(in_channels=1,out_channels=hidden_channels, heads=4, concat=False, negative_slope=0.2,dropout=0.6)
            self.conv2 =GATConv(in_channels=hidden_channels,out_channels=out_channels, heads=4, concat=False, negative_slope=0.2,dropout=0.6)
            
        if gnn_type==13:
            self.conv1 = AGNNConv(requires_grad=True)
            self.conv2 = AGNNConv(requires_grad=True)
        if gnn_type==14:
            self.conv1 = ARMAConv(in_channels=1, out_channel=hidden_channels, num_stacks=1, num_layers=1, \
                                  shared_weights=False, act=F.relu, dropout=0.5, bias=True)
            self.conv2 = ARMAConv(in_channels=hidden_channels, out_channel=out_channels, num_stacks=1, num_layers=1, \
                                  shared_weights=False, act=F.relu, dropout=0.5, bias=True)
        if gnn_type==15:
            self.conv1 = SGConv(in_channels=1, out_channels=hidden_channels, K=1, cached=True, bias=True)
            self.conv2 = SGConv(in_channels=hidden_channels, out_channels=out_channels, K=1, cached=True, bias=True)
        if gnn_type==16:
            self.conv1 = SGConv(in_channels=1, out_channels=hidden_channels, K=3, cached=True, bias=True)
            self.conv2 = SGConv(in_channels=hidden_channels, out_channels=out_channels, K=3, cached=True, bias=True)
        if gnn_type==17:
            self.conv1 = APPNP(K=1, alpha=0.2, bias=True)
            self.conv2 = APPNP(K=1, alpha=0.2, bias=True)
        if gnn_type==18:
            self.conv1 = APPNP(K=3, alpha=0.2, bias=True)
            self.conv2 = APPNP(K=3, alpha=0.2, bias=True)
        if gnn_type==19:
            self.conv1 =RGCNConv(in_channels=1, out_channels=hidden_channels, num_relations=3, num_bases=2, bias=True)
            self.conv2 =RGCNConv(in_channels=hidden_channels, out_channels=out_channels, num_relations=3, num_bases=2, bias=True)
# =============================================================================
#         if gnn_type==20:
#             self.conv1 = SignedConv(in_channels=1, out_channels=hidden_channels, first_aggr=True, bias=True)
#             self.conv2 = SignedConv(in_channels=hidden_channels, out_channels=out_channels, first_aggr=True, bias=True)
#         if gnn_type==21:
#             self.conv1 =SignedConv(in_channels=1, out_channels=hidden_channels, first_aggr=False, bias=True)
#             self.conv2 =SignedConv(in_channels=hidden_channels, out_channels=out_channels, first_aggr=False, bias=True)
#         if gnn_type==22:
#             self.conv1 = GMMConv(in_channels=1, out_channels=hidden_channels, dim=2, kernel_size=3, bias=True)
#             self.conv2 = GMMConv(in_channels=hidden_channels, out_channels=out_channels, dim=2, kernel_size=3, bias=True)
#         if gnn_type==23:
#             self.conv1 = GMMConv(in_channels=1, out_channels=hidden_channels, dim=5, kernel_size=3, bias=True)
#             self.conv2 = GMMConv(in_channels=hidden_channels, out_channels=out_channels, dim=5, kernel_size=3, bias=True)
#         if gnn_type==24:
#             self.conv1 = GMMConv(in_channels=1, out_channels=hidden_channels, dim=2, kernel_size=3, bias=True)
#             self.conv2 = GMMConv(in_channels=hidden_channels, out_channels=out_channels, dim=2, kernel_size=3, bias=True)
# =============================================================================
        if gnn_type==25:
            self.conv1 = SplineConv(in_channels=1, out_channels=hidden_channels, dim=2, kernel_size=3, is_open_spline=True, \
                                    degree=1, norm=True, root_weight=True, bias=True)
            self.conv2 = SplineConv(in_channels=hidden_channels, out_channels=out_channels, dim=2, kernel_size=3, is_open_spline=True, \
                                    degree=1, norm=True, root_weight=True, bias=True)
        if gnn_type==26:
            self.conv1 = SplineConv(in_channels=1, out_channels=hidden_channels, dim=3, kernel_size=3, is_open_spline=False, \
                                    degree=1, norm=True, root_weight=True, bias=True)
            self.conv2 = SplineConv(in_channels=hidden_channels, out_channels=out_channels, dim=3, kernel_size=3, is_open_spline=False, \
                                    degree=1, norm=True, root_weight=True, bias=True)
        if gnn_type==27:
            self.conv1 = SplineConv(in_channels=1, out_channels=hidden_channels, dim=3, kernel_size=6, is_open_spline=True, \
                                    degree=1, norm=True, root_weight=True, bias=True)
            self.conv2 = SplineConv(in_channels=hidden_channels, out_channels=out_channels, dim=3, kernel_size=6, is_open_spline=True, \
                                    degree=1, norm=True, root_weight=True, bias=True)
        if gnn_type==28:
            self.conv1 = SplineConv(in_channels=1, out_channels=hidden_channels, dim=3, kernel_size=3, is_open_spline=True, \
                                    degree=3, norm=True, root_weight=True, bias=True)
            self.conv2 = SplineConv(in_channels=hidden_channels, out_channels=out_channels, dim=3, kernel_size=3, is_open_spline=True, \
                                    degree=3, norm=True, root_weight=True, bias=True)
        if gnn_type==29:
            self.conv1 = SplineConv(in_channels=1, out_channels=hidden_channels, dim=3, kernel_size=6, is_open_spline=True, \
                                    degree=3, norm=True, root_weight=True, bias=True)
            self.conv2 = SplineConv(in_channels=hidden_channels, out_channels=out_channels, dim=3, kernel_size=6, is_open_spline=True, \
                                    degree=3, norm=True, root_weight=True, bias=True)
示例#25
0
 def __init__(self, input_channels, hidden_channels, out_channels, K=10, alpha=0.1):
     super().__init__()
     self.lin1 = Linear(input_channels, hidden_channels)
     self.lin2 = Linear(hidden_channels, out_channels)
     self.prop = APPNP(K, alpha)
示例#26
0
    def __init__(
            self,
            in_channels=1,
            hidden_channels=1,
            out_channels=1,
            normalize=False,
            add_loop=False,
            gnn_k=1,
            gnn_type=1,
            jump=None,  #None,max,lstm
            res=False,
            activation='leaky'):
        super(GNN, self).__init__()

        self.add_loop = add_loop

        self.in_channels = in_channels
        self.bn1 = torch.nn.BatchNorm1d(hidden_channels)
        self.bn2 = torch.nn.BatchNorm1d(out_channels)
        self.k = gnn_k  #number of repitiions of gnn
        self.gnn_type = gnn_type

        self.jump = jump
        if not (jump is None):
            if jump != 'lstm':
                self.jk = JumpingKnowledge(jump)
            else:
                self.jk = JumpingKnowledge(jump, out_channels, gnn_k)
        if activation == 'leaky':
            self.activ = F.leaky_relu
        elif activation == 'elu':
            self.activ = F.elu
        elif activation == 'relu':
            self.activ = F.relu
        self.res = res
        if self.gnn_type in [10, 12] and self.res == True:
            raise Exception('res must be false when gnn_type==10 or 12!')
        if self.k == 1 and self.res == True:
            raise Exception('res must be false when gnn_k==1!')
        if self.k == 1 and not (self.jump is None):
            raise Exception(
                'jumping knowledge only serves for the case where k>1!')
        if gnn_type == 0:
            self.conv1 = DenseSAGEConv(in_channels=self.in_channels,
                                       out_channels=out_channels,
                                       normalize=False)
            self.conv2 = DenseSAGEConv(in_channels=hidden_channels,
                                       out_channels=out_channels,
                                       normalize=False)
        if gnn_type == 1:
            self.conv1 = DenseSAGEConv(in_channels=self.in_channels,
                                       out_channels=out_channels,
                                       normalize=True)
            self.conv2 = DenseSAGEConv(in_channels=hidden_channels,
                                       out_channels=out_channels,
                                       normalize=True)

        if gnn_type == 2:
            self.conv1 = GCNConv(in_channels=1,
                                 out_channels=out_channels,
                                 cached=False)
            self.conv2 = GCNConv(in_channels=hidden_channels,
                                 out_channels=out_channels,
                                 cached=False)
        if gnn_type == 3:
            self.conv1 = GCNConv(in_channels=1,
                                 out_channels=out_channels,
                                 improved=True,
                                 cached=False)
            self.conv2 = GCNConv(in_channels=hidden_channels,
                                 out_channels=out_channels,
                                 improved=True,
                                 cached=False)
        if gnn_type == 4:
            self.conv1 = ChebConv(in_channels=1,
                                  out_channels=out_channels,
                                  K=2)
            self.conv2 = ChebConv(in_channels=hidden_channels,
                                  out_channels=out_channels,
                                  K=2)
        if gnn_type == 5:
            self.conv1 = ChebConv(in_channels=1,
                                  out_channels=out_channels,
                                  K=4)
            self.conv2 = ChebConv(in_channels=hidden_channels,
                                  out_channels=out_channels,
                                  K=4)
        if gnn_type == 6:
            self.conv1 = GraphConv(in_channels=1,
                                   out_channels=out_channels,
                                   aggr='add')
            self.conv2 = GraphConv(in_channels=hidden_channels,
                                   out_channels=out_channels,
                                   aggr='add')
        if gnn_type == 7:
            self.conv1 = GatedGraphConv(out_channels=out_channels,
                                        num_layers=3,
                                        aggr='add',
                                        bias=True)
            self.conv2 = GatedGraphConv(out_channels=out_channels,
                                        num_layers=3,
                                        aggr='add',
                                        bias=True)
        if gnn_type == 8:
            self.conv1 = GatedGraphConv(out_channels=out_channels,
                                        num_layers=7,
                                        aggr='add',
                                        bias=True)
            self.conv2 = GatedGraphConv(out_channels=out_channels,
                                        num_layers=7,
                                        aggr='add',
                                        bias=True)
        if gnn_type == 9:
            self.conv1 = GATConv(in_channels=1,
                                 out_channels=out_channels,
                                 heads=1,
                                 concat=True,
                                 negative_slope=0.2,
                                 dropout=0)
            self.conv2 = GATConv(in_channels=hidden_channels,
                                 out_channels=out_channels,
                                 heads=1,
                                 concat=True,
                                 negative_slope=0.2,
                                 dropout=0.6)
        if gnn_type == 10:
            self.conv1 = GATConv(in_channels=1,
                                 out_channels=out_channels,
                                 heads=6,
                                 concat=False,
                                 negative_slope=0.2,
                                 dropout=0.6)
            self.conv2 = GATConv(in_channels=hidden_channels,
                                 out_channels=out_channels,
                                 heads=6,
                                 concat=False,
                                 negative_slope=0.2,
                                 dropout=0.6)

        if gnn_type == 11:
            self.conv1 = GATConv(in_channels=1,
                                 out_channels=out_channels,
                                 heads=4,
                                 concat=True,
                                 negative_slope=0.2,
                                 dropout=0)
            self.conv2 = GATConv(in_channels=hidden_channels,
                                 out_channels=out_channels,
                                 heads=4,
                                 concat=True,
                                 negative_slope=0.2,
                                 dropout=0.6)

        if gnn_type == 12:
            self.conv1 = GATConv(in_channels=1,
                                 out_channels=out_channels,
                                 heads=4,
                                 concat=False,
                                 negative_slope=0.2,
                                 dropout=0.6)
            self.conv2 = GATConv(in_channels=hidden_channels,
                                 out_channels=out_channels,
                                 heads=4,
                                 concat=False,
                                 negative_slope=0.2,
                                 dropout=0.6)

        if gnn_type == 13:
            self.conv1 = AGNNConv(requires_grad=True)
            self.conv2 = AGNNConv(requires_grad=True)
        if gnn_type == 14:
            self.conv1 = ARMAConv(in_channels=1,
                                  out_channels=hidden_channels,
                                  num_stacks=1,
                                  num_layers=1,
                                  shared_weights=False,
                                  act=F.relu,
                                  dropout=0.5,
                                  bias=True)
            self.conv2 = ARMAConv(in_channels=hidden_channels,
                                  out_channels=out_channels,
                                  num_stacks=1,
                                  num_layers=1,
                                  shared_weights=False,
                                  act=F.relu,
                                  dropout=0.5,
                                  bias=True)
        if gnn_type == 15:
            self.conv1 = SGConv(in_channels=1,
                                out_channels=out_channels,
                                K=1,
                                cached=True,
                                bias=True)
            self.conv2 = SGConv(in_channels=hidden_channels,
                                out_channels=out_channels,
                                K=1,
                                cached=True,
                                bias=True)
        if gnn_type == 16:
            self.conv1 = SGConv(in_channels=1,
                                out_channels=out_channels,
                                K=3,
                                cached=True,
                                bias=True)
            self.conv2 = SGConv(in_channels=hidden_channels,
                                out_channels=out_channels,
                                K=3,
                                cached=True,
                                bias=True)
        if gnn_type == 17:
            self.conv1 = APPNP(K=1, alpha=0.2, bias=True)
            self.conv2 = APPNP(K=1, alpha=0.2, bias=True)
        if gnn_type == 18:
            self.conv1 = APPNP(K=3, alpha=0.2, bias=True)
            self.conv2 = APPNP(K=3, alpha=0.2, bias=True)
        if gnn_type == 19:
            self.conv1 = RGCNConv(in_channels=1,
                                  out_channels=out_channels,
                                  num_relations=3,
                                  num_bases=2,
                                  bias=True)
            self.conv2 = RGCNConv(in_channels=hidden_channels,
                                  out_channels=out_channels,
                                  num_relations=3,
                                  num_bases=2,
                                  bias=True)
# =============================================================================
#         if gnn_type==20:
#             self.conv1 = SignedConv(in_channels=1, out_channels=out_channels, first_aggr=True, bias=True)
#             self.conv2 = SignedConv(in_channels=hidden_channels, out_channels=out_channels, first_aggr=True, bias=True)
#         if gnn_type==21:
#             self.conv1 =SignedConv(in_channels=1, out_channels=out_channels, first_aggr=False, bias=True)
#             self.conv2 =SignedConv(in_channels=hidden_channels, out_channels=out_channels, first_aggr=False, bias=True)
#         if gnn_type==22:
#             self.conv1 = GMMConv(in_channels=1, out_channels=out_channels, dim=2, kernel_size=3, bias=True)
#             self.conv2 = GMMConv(in_channels=hidden_channels, out_channels=out_channels, dim=2, kernel_size=3, bias=True)
#         if gnn_type==23:
#             self.conv1 = GMMConv(in_channels=1, out_channels=out_channels, dim=5, kernel_size=3, bias=True)
#             self.conv2 = GMMConv(in_channels=hidden_channels, out_channels=out_channels, dim=5, kernel_size=3, bias=True)
#         if gnn_type==24:
#             self.conv1 = GMMConv(in_channels=1, out_channels=out_channels, dim=2, kernel_size=3, bias=True)
#             self.conv2 = GMMConv(in_channels=hidden_channels, out_channels=out_channels, dim=2, kernel_size=3, bias=True)
# =============================================================================
        if gnn_type == 25:
            self.conv1 = SplineConv(in_channels=1,
                                    out_channels=out_channels,
                                    dim=2,
                                    kernel_size=3,
                                    is_open_spline=True,
                                    degree=1,
                                    norm=True,
                                    root_weight=True,
                                    bias=True)
            self.conv2 = SplineConv(in_channels=hidden_channels,
                                    out_channels=out_channels,
                                    dim=2,
                                    kernel_size=3,
                                    is_open_spline=True,
                                    degree=1,
                                    norm=True,
                                    root_weight=True,
                                    bias=True)
        if gnn_type == 26:
            self.conv1 = SplineConv(in_channels=1,
                                    out_channels=out_channels,
                                    dim=3,
                                    kernel_size=3,
                                    is_open_spline=False,
                                    degree=1,
                                    norm=True,
                                    root_weight=True,
                                    bias=True)
            self.conv2 = SplineConv(in_channels=hidden_channels,
                                    out_channels=out_channels,
                                    dim=3,
                                    kernel_size=3,
                                    is_open_spline=False,
                                    degree=1,
                                    norm=True,
                                    root_weight=True,
                                    bias=True)
        if gnn_type == 27:
            self.conv1 = SplineConv(in_channels=1,
                                    out_channels=out_channels,
                                    dim=3,
                                    kernel_size=6,
                                    is_open_spline=True,
                                    degree=1,
                                    norm=True,
                                    root_weight=True,
                                    bias=True)
            self.conv2 = SplineConv(in_channels=hidden_channels,
                                    out_channels=out_channels,
                                    dim=3,
                                    kernel_size=6,
                                    is_open_spline=True,
                                    degree=1,
                                    norm=True,
                                    root_weight=True,
                                    bias=True)
        if gnn_type == 28:
            self.conv1 = SplineConv(in_channels=1,
                                    out_channels=out_channels,
                                    dim=3,
                                    kernel_size=3,
                                    is_open_spline=True,
                                    degree=3,
                                    norm=True,
                                    root_weight=True,
                                    bias=True)
            self.conv2 = SplineConv(in_channels=hidden_channels,
                                    out_channels=out_channels,
                                    dim=3,
                                    kernel_size=3,
                                    is_open_spline=True,
                                    degree=3,
                                    norm=True,
                                    root_weight=True,
                                    bias=True)
        if gnn_type == 29:
            self.conv1 = SplineConv(in_channels=1,
                                    out_channels=out_channels,
                                    dim=3,
                                    kernel_size=6,
                                    is_open_spline=True,
                                    degree=3,
                                    norm=True,
                                    root_weight=True,
                                    bias=True)
            self.conv2 = SplineConv(in_channels=hidden_channels,
                                    out_channels=out_channels,
                                    dim=3,
                                    kernel_size=6,
                                    is_open_spline=True,
                                    degree=3,
                                    norm=True,
                                    root_weight=True,
                                    bias=True)
    def __init__(
        self,
        num_feats,
        max_nodes,
        num_classes,
        num_heads,
        hidden_dim,
        num_keys,
        mem_hidden_dim=100,
        variant="gmn",
        use_deeper: bool = False,
        num_layers: Optional[int] = None,
        dropout: Optional[float] = None,
        block: Optional[str] = None,
        conv_encode_edge: Optional[bool] = None,
        add_virtual_node: Optional[bool] = None,
        conv: Optional[str] = None,
        gcn_aggr: Optional[str] = None,
        t: Optional[float] = None,
        learn_t: Optional[bool] = None,
        p: Optional[float] = None,
        learn_p: Optional[bool] = None,
        y: Optional[float] = None,
        learn_y: Optional[bool] = None,
        msg_norm: Optional[bool] = None,
        learn_msg_scale: Optional[bool] = None,
        norm: Optional[str] = None,
        mlp_layers: Optional[int] = None,
        use_appnp: bool = False,
        k: int = 10,
        alpha: float = 0.1,
        mlp_hidden_dim: int = 50,
    ):
        super(GMN, self).__init__()

        self.k = k
        self.alpha = alpha
        self.use_deeper = use_deeper
        self.num_features = num_feats
        self.max_nodes = max_nodes
        self.num_classes = num_classes
        self.num_heads = num_heads
        self.num_keys = num_keys
        self.variant = variant

        self.atom_encoder = AtomEncoder(emb_dim=hidden_dim)
        self.q0s = torch.nn.ModuleDict()
        self.q0s[Q0LayerType.with_edge] = GCNConv(hidden_dim, aggr="add")

        if use_deeper:
            deeper_gcn = deeper.DeeperGCN(
                num_layers=num_layers,
                dropout=dropout,
                block=block,
                conv_encode_edge=conv_encode_edge,
                add_virtual_node=add_virtual_node,
                hidden_channels=hidden_dim,
                num_tasks=None,
                conv=conv,
                gcn_aggr=gcn_aggr,
                t=t,
                learn_t=learn_t,
                p=p,
                learn_p=learn_p,
                y=y,
                learn_y=learn_y,
                msg_norm=msg_norm,
                learn_msg_scale=learn_msg_scale,
                norm=norm,
                mlp_layers=mlp_layers,
                graph_pooling=None,
                node_encoder=True,
                encode_atom=False,
            )
            self.q0s[Q0LayerType.deeper] = deeper_gcn

        if use_appnp:
            self.q0s[Q0LayerType.no_edge] = APPNP(K=self.k, alpha=self.alpha)

        self.bn = nn.BatchNorm1d(hidden_dim)
        self.q0_second = GraphConv(hidden_dim * len(self.q0s), hidden_dim)
        self.mem_layers = nn.ModuleList()

        max_dims = [self.max_nodes]
        for idx, num_keys in enumerate(self.num_keys):
            max_dims.append(num_keys)
            num_feats = hidden_dim if idx == 0 else mem_hidden_dim
            self.mem_layers.append(
                MemConv(
                    num_features=num_feats,
                    heads=self.num_heads,
                    num_keys=num_keys,
                    dim_out=mem_hidden_dim,
                    variant=variant,
                    max_queries=max_dims[idx],
                ))

        self.mlp = nn.Sequential(
            Linear(mem_hidden_dim, mlp_hidden_dim),
            nn.LeakyReLU(),
            Linear(mlp_hidden_dim, self.num_classes),
        )
示例#28
0
 def __init__(self):
     super(Net, self).__init__()
     self.lin1 = Linear(data.x.shape[1], 64)
     self.lin2 = Linear(64, int(max(data.y)) + 1)
     self.prop1 = APPNP(10, 0.1)
示例#29
0
 def __init__(self, n_features, n_classes, n_hidden, k_hops, alpha, p_dropout=0.5):
     super().__init__()
     self.lin1 = SparseLinear(n_features, n_hidden, bias=False)
     self.lin2 = nn.Linear(n_hidden, n_classes, bias=False)
     self.prop = APPNP(k_hops, alpha)
     self.p_dropout = p_dropout
示例#30
0
 def __init__(self, input_dim, hidden_dim, num_classes, dropout, K, alpha):
     super(My_APPNP, self).__init__()
     self.dropout = dropout
     self.lin1 = Linear(input_dim, hidden_dim)
     self.lin2 = Linear(hidden_dim, num_classes)
     self.prop1 = APPNP(K, alpha)