def __init__(self, w1=16, w2=64, w3=64, w4=10):
     super(SG, self).__init__()
     self.conv1 = SGConv(8, w1, cached=False)
     self.conv2 = SGConv(w1, w2, cached=False)
     self.conv3 = SGConv(w2, w3, cached=False)
     self.conv4 = SGConv(w3, w4, cached=False)
     self.linear = Linear(w4, 2)
Exemplo n.º 2
0
 def __init__(self,
              num_features,
              n_classes,
              num_hidden,
              num_hidden_layers,
              dropout,
              activation,
              K=1,
              cached=False,
              bias=True):
     super(PSG, self).__init__()
     # dropout
     if dropout:
         self.dropout = nn.Dropout(p=dropout)
     else:
         self.dropout = nn.Dropout(p=0.)
     #activation
     self.activation = activation
     # input layer
     self.conv_input = SGConv(num_features,
                              num_hidden,
                              K=K,
                              cached=cached,
                              bias=bias)
     # Hidden layers
     self.layers = nn.ModuleList()
     for _ in range(num_hidden_layers):
         self.layers.append(
             SGConv(num_hidden, num_hidden, K=K, cached=cached, bias=bias))
     # output layer
     self.conv_output = SGConv(num_hidden,
                               n_classes,
                               K=K,
                               cached=cached,
                               bias=bias)
Exemplo n.º 3
0
class SGC(torch.nn.Module):
    """
    Simplifying Graph Convolutional Networks"
    <https://arxiv.org/abs/1902.07153>
    """
    def __init__(self):
        super(SGC, self).__init__()
        self.name = 'SGC'
        self.conv1 = SGConv(75, 128, K=2, cached=False)
        self.gather_layer = nn.Linear(128, 1)

    def reset_parameters(self):
        self.conv1.reset_parameters()
        self.gather_layer.reset_parameters()

    def forward(self, data):
        x, edge_index, batch = data.x, data.edge_index, data.batch
        x1 = self.conv1(x, edge_index)

        y_molecules = global_add_pool(x1, batch)
        z_molecules = self.gather_layer(y_molecules)
        return z_molecules

    def __call__(self, data, std, mean):
        target = torch.unsqueeze(data.y, 1)
        out = self.forward(data)
        loss = F.mse_loss(out, target)
        z = out.to('cpu').data.numpy()
        t = target.to('cpu').data.numpy()
        z, t = std * z + mean, std * t + mean
        return loss, z, t
Exemplo n.º 4
0
    def __init__(self,
                 nfeat,
                 nclass,
                 K=3,
                 cached=True,
                 lr=0.01,
                 weight_decay=5e-4,
                 with_bias=True,
                 device=None):

        super(SGC, self).__init__()

        assert device is not None, "Please specify 'device'!"
        self.device = device

        self.nfeat = nfeat
        self.hidden_sizes = [K]
        self.nclass = nclass

        self.conv1 = SGConv(nfeat, nclass, bias=with_bias, K=K, cached=cached)

        self.weight_decay = weight_decay
        self.lr = lr
        self.output = None
        self.best_model = None
        self.best_output = None
Exemplo n.º 5
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 hiddens=[],
                 activations=[],
                 K=2,
                 dropout=0.5,
                 weight_decay=5e-5,
                 lr=0.2,
                 use_bias=False):
        super().__init__()

        if hiddens or activations:
            raise RuntimeError(
                f"Arguments 'hiddens' and 'activations' are not supported to use in SGC (PyG backend)."
            )

        conv = SGConv(in_channels,
                      out_channels,
                      bias=use_bias,
                      K=K,
                      cached=True,
                      add_self_loops=True)
        self.conv = conv
        self.dropout = Dropout(dropout)
        self.compile(loss=torch.nn.CrossEntropyLoss(),
                     optimizer=optim.Adam(conv.parameters(),
                                          lr=lr,
                                          weight_decay=weight_decay),
                     metrics=[Accuracy()])
Exemplo n.º 6
0
    def __init__(self,
                 n_feat,
                 n_class,
                 n_layer,
                 agg_hidden,
                 fc_hidden,
                 dropout,
                 readout,
                 device,
                 K=2):

        super(SGCNN, self).__init__()

        self.n_layer = n_layer
        self.dropout = dropout
        self.readout = readout
        self.device = device
        self.readout_dim = agg_hidden

        # spline CNN layer
        self.sgcnn_layers = []
        for i in range(n_layer):
            if i == 0:
                sgcnn = SGConv(n_feat, agg_hidden, K=K).to(device)
            else:
                sgcnn = SGConv(agg_hidden, agg_hidden, K=K).to(device)
            self.sgcnn_layers.append(sgcnn)

        # Fully-connected layer
        self.fc1 = nn.Linear(self.readout_dim, fc_hidden)
        self.fc2 = nn.Linear(fc_hidden, n_class)
Exemplo n.º 7
0
    def __init__(self, hidden_size, n_node, dropout=0.5, negative_slope=0.2, heads=8, item_fusing=False):
        super(SRGNN, self).__init__()
        self.hidden_size, self.n_node = hidden_size, n_node
        self.item_fusing = item_fusing
        self.embedding = nn.Embedding(self.n_node, self.hidden_size)
        # self.gated = InOutGGNN(self.hidden_size, num_layers=1)

        self.gcn = GCNConv(in_channels=hidden_size, out_channels=hidden_size)
        self.gcn2 = GCNConv(in_channels=hidden_size, out_channels=hidden_size)

        self.gated = SGConv(in_channels=hidden_size, out_channels=hidden_size, K=2)
        # self.gated = InOutGATConv_intra(in_channels=hidden_size, out_channels=hidden_size, dropout=dropout,
        #                           negative_slope=negative_slope, heads=heads, concat=True)
        # self.gated2 = InOutGATConv(in_channels=hidden_size * heads, out_channels=hidden_size, dropout=dropout,
        #                            negative_slope=negative_slope, heads=heads, concat=True, middle_layer=True)
        # self.gated3 = InOutGATConv(in_channels=hidden_size * heads, out_channels=hidden_size, dropout=dropout,
        #                            negative_slope=negative_slope, heads=heads, concat=False)

        self.W_1 = nn.Linear(self.hidden_size * 8, self.hidden_size)
        self.W_2 = nn.Linear(self.hidden_size * 8, self.hidden_size)
        self.q = nn.Linear(self.hidden_size, 1)
        self.W_3 = nn.Linear(16 * self.hidden_size, self.hidden_size)

        self.loss_function = nn.CrossEntropyLoss()
        self.reset_parameters()
Exemplo n.º 8
0
    def __init__(self, num_features):
        super(SGCN, self).__init__()
        self.conv1 = SGConv(num_features, 8, K=2)
        self.conv2 = SGConv(8, 16, K=2)

        # self.fc = torch.nn.Linear(2 * 16, 1)
        self.fc = torch.nn.Linear(2 * 16, 2)
Exemplo n.º 9
0
    def __init__(self,
                 in_features,
                 out_features,
                 hids=[],
                 acts=[],
                 K=2,
                 dropout=None,
                 weight_decay=5e-5,
                 lr=0.2,
                 bias=False):
        super().__init__()

        if hids or acts:
            raise RuntimeError(
                f"Arguments 'hids' and 'acts' are not supported to use in SGC (PyG backend)."
            )

        # assert dropout, "unused"
        conv = SGConv(in_features,
                      out_features,
                      bias=bias,
                      K=K,
                      cached=True,
                      add_self_loops=True)
        self.conv = conv
        self.compile(loss=nn.CrossEntropyLoss(),
                     optimizer=optim.Adam(conv.parameters(),
                                          lr=lr,
                                          weight_decay=weight_decay),
                     metrics=[Accuracy()])
Exemplo n.º 10
0
class ModelSGC(torch.nn.Module):
    def __init__(self, num_layers, hidden, activation, data):
        super(ModelSGC, self).__init__()
        self.linear_1 = Linear(data.num_features, hidden)
        self.conv = SGConv(hidden, hidden, K=num_layers)
        self.linear_2 = Linear(hidden, data.num_class)
        if activation == "relu":
            self.activation = relu
        elif activation == "leaky_relu":
            self.activation = leaky_relu

    def reset_parameters(self):
        self.linear_1.reset_parameters()
        self.conv.reset_parameters()
        self.linear_2.reset_parameters()

    def forward(self, data):
        x, edge_index, edge_weight = data.x, data.edge_index, data.edge_weight
        x = self.linear_1(x)
        x = self.activation(x)
        x = dropout(x, p=0.5, training=self.training)
        x = self.conv(x, edge_index, edge_weight=edge_weight)
        x = dropout(x, p=0.5, training=self.training)
        x = self.linear_2(x)
        return log_softmax(x, dim=-1)
Exemplo n.º 11
0
 def __init__(self, num_layers, hidden, activation, data):
     super(ModelSGC, self).__init__()
     self.linear_1 = Linear(data.num_features, hidden)
     self.conv = SGConv(hidden, hidden, K=num_layers)
     self.linear_2 = Linear(hidden, data.num_class)
     if activation == "relu":
         self.activation = relu
     elif activation == "leaky_relu":
         self.activation = leaky_relu
Exemplo n.º 12
0
 def __init__(self):
     super(Net, self).__init__()
     nn1 = torch.nn.Sequential(torch.nn.Linear(5, 30), torch.nn.ReLU())
     nn2 = torch.nn.Sequential(torch.nn.Linear(30, 30), torch.nn.ReLU())
     self.nnconv1 = GINConv(nn1)
     self.sconv1 = SGConv(30, 30, K=5)
     self.sconv2 = SGConv(30, 30, K=5)
     self.nnconv2 = GINConv(nn2)
     self.nn = torch.nn.Linear(30, 1)
Exemplo n.º 13
0
def test_sg_conv():
    in_channels, out_channels = (16, 32)
    edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]])
    num_nodes = edge_index.max().item() + 1
    x = torch.randn((num_nodes, in_channels))

    conv = SGConv(in_channels, out_channels, K=10, cached=True)
    assert conv.__repr__() == 'SGConv(16, 32, K=10)'
    assert conv(x, edge_index).size() == (num_nodes, out_channels)
    assert conv(x, edge_index).size() == (num_nodes, out_channels)
Exemplo n.º 14
0
 def __init__(self,
              features_num=16,
              num_class=2,
              dropout=0.3,
              num_layers=2,
              hidden=16):
     super(SGCN, self).__init__()
     self.conv1 = SGConv(features_num, hidden)
     self.conv2 = SGConv(hidden, num_class)
     self.dropout = dropout
Exemplo n.º 15
0
    def __init__(self, in_channels, out_channels):
        super(SG, self).__init__()

        self.conv1 = SGConv(in_channels, in_channels * 2)
        self.conv2 = SGConv(in_channels * 2, in_channels * 2)
        self.conv3 = SGConv(in_channels * 2, in_channels * 4)

        self.lin1 = Linear(in_channels * 4, in_channels * 2)
        self.lin2 = Linear(in_channels * 2, in_channels)
        self.lin3 = Linear(in_channels, out_channels)
Exemplo n.º 16
0
    def __init__(self, in_channel, hid1, hid2, hid3, lin1, lin2, out, drop, K):
        super(SGC_network, self).__init__()
        self.drop = drop
        self.conv1 = SGConv(in_channel, hid1, K)
        self.conv2 = SGConv(hid1, hid2, K)
        self.conv3 = GATConv(hid2, hid3)

        self.l1 = nn.Linear(40 * hid3, lin1)
        self.l2 = nn.Linear(lin1, lin2)
        self.l3 = nn.Linear(lin2, out)
        self.l = nn.LeakyReLU(0.1)
Exemplo n.º 17
0
    def __init__(self, feature, out_channel):
        super(SGCN, self).__init__()

        self.GConv1 = SGConv(feature, 1024)
        self.bn1 = BatchNorm(1024)

        self.GConv2 = SGConv(1024, 1024)
        self.bn2 = BatchNorm(1024)

        self.fc = nn.Sequential(nn.Linear(1024, 512), nn.ReLU(inplace=True))
        self.dropout = nn.Dropout(0.2)
        self.fc1 = nn.Sequential(nn.Linear(512, out_channel))
    def __init__(self, data, K=1):
        super().__init__()
        num_classes = len(data.y.unique())

        # Create a Simple convolutional layer with K neighbourhood
        # "averaging" steps
        self.conv = SGConv(in_channels=data.num_features,
                           out_channels=64,
                           K=K,
                           cached=True)

        self.conv2 = SGConv(in_channels=64, out_channels=6, K=K)
Exemplo n.º 19
0
class SGC(nn.Module):
    def __init__(self, dataset, K):
        super(SGC, self).__init__()
        self.gc1 = SGConv(dataset.num_features, dataset.num_classes, K=K, cached=True)

    def reset_parameters(self):
        self.gc1.reset_parameters()

    def forward(self, data):
        x, edge_index = data.x, data.edge_index
        x = self.gc1(x, edge_index)
        return F.log_softmax(x, dim=1)
Exemplo n.º 20
0
    def __init__(self, num_features, channels=64):
        super(SGCNLearn, self).__init__()
        self.conv1 = SGConv(num_features, 8, K=2, add_self_loops=False)
        self.conv2 = SGConv(8, 8, K=2, add_self_loops=False)

        # self.fc = torch.nn.Linear(2 * 16, 1)
        self.fc = torch.nn.Linear(2 * 8, 2)

        num_edges = channels * channels - channels
        self.edge_weight = torch.nn.Parameter(torch.FloatTensor(num_edges, 1),
                                              requires_grad=True)
        self.edge_weight.data.fill_(1)
Exemplo n.º 21
0
 def __init__(self, n_features, n_outputs, dim=100):
     super(SGCA, self).__init__()
     # 
     self.sgc1 = SGConv(n_features,dim)
     self.sgc2 = SGConv(dim,dim)
     self.bn = torch.nn.BatchNorm1d(dim)
     self.armaconv = ARMAConv(dim, dim)
      
     # the Fully Connected Layer
     self.fc1 = Linear(dim, 2*dim)
     self.fc2 = Linear(2*dim, 3*dim)
     self.fc3 = Linear(3*dim, 2*dim)
     self.fc4 = Linear(2*dim, 1)
Exemplo n.º 22
0
class Net(torch.nn.Module):
    def __init__(self, dataset):
        super(Net, self).__init__()
        self.conv1 = SGConv(
            dataset.num_features, dataset.num_classes, K=args.K, cached=True)

    def reset_parameters(self):
        self.conv1.reset_parameters()

    def forward(self, data):
        x, edge_index = data.x, data.edge_index
        x = self.conv1(x, edge_index)
        return F.log_softmax(x, dim=1)
Exemplo n.º 23
0
def test_sg_conv():
    in_channels, out_channels = (16, 32)
    edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]])
    num_nodes = edge_index.max().item() + 1
    x = torch.randn((num_nodes, in_channels))

    conv = SGConv(in_channels, out_channels, K=10, cached=False)
    assert conv.__repr__() == 'SGConv(16, 32, K=10)'
    out = conv(x, edge_index)
    assert out.size() == (num_nodes, out_channels)

    jit_conv = conv.jittable(x=x, edge_index=edge_index)
    jit_conv = torch.jit.script(jit_conv)
    assert jit_conv(x, edge_index).tolist() == out.tolist()

    conv = SGConv(in_channels, out_channels, K=10, cached=True)
    assert conv.__repr__() == 'SGConv(16, 32, K=10)'
    out = conv(x, edge_index)
    out = conv(x, edge_index)
    assert out.size() == (num_nodes, out_channels)

    jit_conv = conv.jittable(x=x, edge_index=edge_index)
    jit_conv = torch.jit.script(jit_conv)
    jit_conv(x, edge_index)
    assert jit_conv(x, edge_index).tolist() == out.tolist()
Exemplo n.º 24
0
class SGC(nn.Module):
    def __init__(self, in_channels, out_channels, hops):
        """ takes 'hops' power of the normalized adjacency"""
        super(SGC, self).__init__()
        self.conv = SGConv(in_channels, out_channels, hops, cached=True) 

    def reset_parameters(self):
        self.conv.reset_parameters()

    def forward(self, data):
        edge_index = data.graph['edge_index']
        x = data.graph['node_feat']
        x = self.conv(x, edge_index)
        return x
Exemplo n.º 25
0
    def __init__(self, in_feats, hid_feats, out_feats):
        super(BUrumorSGCN, self).__init__()
        self.conv1 = SGConv(in_feats,
                            hid_feats,
                            K=1,
                            cached=False,
                            add_self_loops=True,
                            bias=True)

        self.conv2 = SGConv(hid_feats + in_feats,
                            out_feats,
                            K=1,
                            cached=False,
                            add_self_loops=True,
                            bias=True)
Exemplo n.º 26
0
class SGC_Net(torch.nn.Module):
    def __init__(self, features_num, num_class, K, cached):
        super(SGC_Net, self).__init__()
        self.conv1 = SGConv(features_num, num_class, K, cached)

    def reset_parameters(self):
        self.conv1.reset_parameters()

    def forward(self, data):
        x, edge_index = data.x, data.edge_index
        x = self.conv1(x, edge_index)
        return F.log_softmax(x, dim=-1)

    def __repr__(self):
        return self.__class__.__name__
Exemplo n.º 27
0
    def __init__(self, k=2, w1=128, w2=128, w3=128):
        super(Net, self).__init__()

        self.conv = CustomGCN(2, 1, cached=False)
        self.conv.weight.requires_grad = False

        self.topk = TopKPooling(1, min_score=0.1)
        self.topk.weight.requires_grad = False

        self.conv1 = SGConv(2, w1, k)
        self.bn1 = BatchNorm1d(w1)
        self.conv2 = SGConv(w1, w2, k)
        self.bn2 = BatchNorm1d(w2)
        self.conv3 = SGConv(w2, w3, k)
        self.bn3 = BatchNorm1d(w3)
        self.linear = Linear(w3, 3)
Exemplo n.º 28
0
    def get_layer(self, in_dim: int, out_dim: int, K: Optional[int] = None):
        """
            get the GNN layer

            Parameters
            ----------
            in_dim: int - input dimension
            out_dim: int - output dimension
            K: int - number of layers for SGC only

            Returns
            -------
            layer: torch_geometric.nn
        """
        if self is GNN_TYPE.GCN:
            return GCNConv(in_channels=in_dim, out_channels=out_dim)
        elif self is GNN_TYPE.GAT:
            return ModifiedGATConv(in_channels=in_dim, out_channels=out_dim)
        elif self is GNN_TYPE.SAGE:
            return ModifiedSAGEConv(in_channels=in_dim, out_channels=out_dim)
        elif self is GNN_TYPE.GIN:
            sequential = nn.Sequential(nn.Linear(in_dim, out_dim), nn.BatchNorm1d(out_dim), nn.ReLU(),
                                       nn.Linear(out_dim, out_dim), nn.BatchNorm1d(out_dim), nn.ReLU())
            return ModifiedGINConv(sequential)
        elif self is GNN_TYPE.SGC:
            return SGConv(in_channels=in_dim, out_channels=out_dim, K=K)
        else:
            exit(self.string() + " can not use this method")
Exemplo n.º 29
0
 def __init__(self,
              num_layers=2,
              hidden=16,
              features_num=16,
              num_class=2,
              hidden_droprate=0.5,
              edge_droprate=0.0):
     super(SGCN, self).__init__()
     self.conv1 = SGConv(features_num, hidden)
     self.convs = torch.nn.ModuleList()
     for i in range(num_layers - 1):
         self.convs.append(SGConv(hidden, hidden))
     self.lin2 = Linear(hidden, num_class)
     self.first_lin = Linear(features_num, hidden)
     self.hidden_droprate = hidden_droprate
     self.edge_droprate = edge_droprate
Exemplo n.º 30
0
 def __init__(self,
              num_features,
              embedding_size=128,
              slope=None,
              temp=None):
     super(SGNet, self).__init__()
     self.conv1 = SGConv(num_features, embedding_size, K=2, cached=True)