def __init__(self,
                 in_channels,
                 hidden_channels,
                 out_channels,
                 num_layers,
                 dropout,
                 mode='concat'):
        super().__init__()

        self.convs = torch.nn.ModuleList()
        self.convs.append(GCNConv(in_channels, hidden_channels, cached=False))
        self.bns = torch.nn.ModuleList()
        self.bns.append(torch.nn.BatchNorm1d(hidden_channels))
        for _ in range(num_layers - 1):
            self.convs.append(
                GCNConv(hidden_channels, hidden_channels, cached=False))
            self.bns.append(torch.nn.BatchNorm1d(hidden_channels))
        self.jump = JumpingKnowledge(mode)
        if mode == 'cat':
            self.lin1 = Linear(num_layers * hidden_channels, hidden_channels)
        else:
            self.lin1 = Linear(hidden_channels, hidden_channels)

        self.lin2 = Linear(hidden_channels, out_channels)
        self.dropout = dropout
Example #2
0
    def __init__(self,
                 nfeat,
                 nhid,
                 nclass,
                 dropout=0.5,
                 lr=0.01,
                 weight_decay=5e-4,
                 n_edge=1,
                 with_relu=True,
                 drop=False,
                 with_bias=True,
                 device=None):

        super(GIN, self).__init__()

        assert device is not None, "Please specify 'device'!"
        self.device = device
        self.nfeat = nfeat
        self.hidden_sizes = [nhid]
        self.nclass = int(nclass)
        self.dropout = dropout
        self.lr = lr
        self.drop = drop
        if not with_relu:
            self.weight_decay = 0
        else:
            self.weight_decay = weight_decay
        self.with_relu = with_relu
        self.with_bias = with_bias
        self.n_edge = n_edge
        self.output = None
        self.best_model = None
        self.best_output = None
        self.adj_norm = None
        self.features = None
        self.gate = Parameter(torch.rand(1))  # creat a generator between [0,1]
        nclass = int(nclass)
        """GIN from torch-geometric"""
        num_features = nfeat
        dim = nhid
        nn1 = Sequential(
            Linear(num_features, dim),
            ReLU(),
        )
        self.gc1 = GINConv(nn1)
        # self.bn1 = torch.nn.BatchNorm1d(dim)
        nn2 = Sequential(
            Linear(dim, dim),
            ReLU(),
        )
        self.gc2 = GINConv(nn2)
        nn3 = Sequential(
            Linear(dim, dim),
            ReLU(),
        )
        self.gc3 = GINConv(nn3)
        self.jump = JumpingKnowledge(mode='cat')
        # self.bn2 = torch.nn.BatchNorm1d(dim)
        self.fc1 = Linear(dim, dim)
        self.fc2 = Linear(dim * 1, int(nclass))
Example #3
0
 def __init__(self, dataset, num_layers, hidden, train_eps=False, mode='cat'):
     super().__init__()
     self.conv1 = GINConv(nn.Sequential(
         nn.Linear(dataset.num_features, hidden),
         nn.ReLU(),
         nn.Linear(hidden, hidden),
         nn.ReLU(),
         nn.BatchNorm1d(hidden),
     ), train_eps=train_eps)
     self.convs = nn.ModuleList()
     for i in range(num_layers - 1):
         self.convs.append(
             GINConv(nn.Sequential(
                 nn.Linear(hidden, hidden),
                 nn.ReLU(),
                 nn.Linear(hidden, hidden),
                 nn.ReLU(),
                 nn.BatchNorm1d(hidden),
             ), train_eps=train_eps))
     self.jump = JumpingKnowledge(mode)
     if mode == 'cat':
         self.lin1 = nn.Linear(num_layers * hidden, hidden)
     else:
         self.lin1 = nn.Linear(hidden, hidden)
     self.lin2 = nn.Linear(hidden, dataset.num_classes)
Example #4
0
    def __init__(self,
                 args,
                 num_nodes=10,
                 num_layers=4,
                 hidden=16,
                 ratio=0.25):
        super(DiffPool, self).__init__()

        self.args = args
        num_features = self.args.filters_3

        self.att = DenseAttentionModule(self.args)

        num_nodes = ceil(ratio * num_nodes)
        self.embed_block1 = Block(num_features, hidden, hidden)
        self.pool_block1 = Block(num_features, hidden, num_nodes)

        self.embed_blocks = torch.nn.ModuleList()
        self.pool_blocks = torch.nn.ModuleList()
        for i in range((num_layers // 2) - 1):
            num_nodes = ceil(ratio * num_nodes)
            self.embed_blocks.append(Block(hidden, hidden, hidden))
            self.pool_blocks.append(Block(hidden, hidden, num_nodes))
        self.jump = JumpingKnowledge(mode="cat")
        self.lin1 = Linear((len(self.embed_blocks) + 1) * hidden, hidden)
        self.lin2 = Linear(hidden, num_features)
Example #5
0
class DiffPool(torch.nn.Module):
    def __init__(self,
                 args,
                 num_nodes=10,
                 num_layers=4,
                 hidden=16,
                 ratio=0.25):
        super(DiffPool, self).__init__()

        self.args = args
        num_features = self.args.filters_3

        self.att = DenseAttentionModule(self.args)

        num_nodes = ceil(ratio * num_nodes)
        self.embed_block1 = Block(num_features, hidden, hidden)
        self.pool_block1 = Block(num_features, hidden, num_nodes)

        self.embed_blocks = torch.nn.ModuleList()
        self.pool_blocks = torch.nn.ModuleList()
        for i in range((num_layers // 2) - 1):
            num_nodes = ceil(ratio * num_nodes)
            self.embed_blocks.append(Block(hidden, hidden, hidden))
            self.pool_blocks.append(Block(hidden, hidden, num_nodes))
        self.jump = JumpingKnowledge(mode="cat")
        self.lin1 = Linear((len(self.embed_blocks) + 1) * hidden, hidden)
        self.lin2 = Linear(hidden, num_features)

    def reset_parameters(self):
        self.embed_block1.reset_parameters()
        self.pool_block1.reset_parameters()
        for block1, block2 in zip(self.embed_blocks, self.pool_blocks):
            block1.reset_parameters()
            block2.reset_parameters()
        self.jump.reset_parameters()
        self.lin1.reset_parameters()
        self.lin2.reset_parameters()

    def forward(self, x, adj, mask):
        s = self.pool_block1(x, adj, mask, add_loop=True)
        x = F.relu(self.embed_block1(x, adj, mask, add_loop=True))

        xs = [self.att(x, mask)]
        x, adj, _, _ = dense_diff_pool(x, adj, s, mask)

        for i, (embed,
                pool) in enumerate(zip(self.embed_blocks, self.pool_blocks)):
            s = pool(x, adj)
            x = F.relu(embed(x, adj))
            xs.append(self.att(x))
            if i < (len(self.embed_blocks) - 1):
                x, adj, _, _ = dense_diff_pool(x, adj, s)

        x = self.jump(xs)
        x = F.relu(self.lin1(x))
        x = self.lin2(x)
        return x

    def __repr__(self):
        return self.__class__.__name__
    def __init__(self, hparams: dict):
        super().__init__()
        self.hparams = hparams
        self.aggregation_method = hparams["aggregation_method"]

        if hparams["num_conv_layers"] < 1:
            raise Exception("Invalid number of layers!")

        self.conv_modules = nn.ModuleList()

        self.conv_modules.append(
            GCNConv(hparams["num_node_features"], hparams["conv_size"]))

        for _ in range(hparams["num_conv_layers"] - 1):
            conv = GCNConv(hparams["conv_size"], hparams["conv_size"])
            self.conv_modules.append(conv)

        if self.aggregation_method == 'lstm':
            self.jk = JumpingKnowledge(self.aggregation_method,
                                       num_layers=hparams["num_conv_layers"],
                                       channels=hparams["conv_size"])
        else:
            self.jk = JumpingKnowledge(self.aggregation_method)

        if self.aggregation_method == 'cat':
            self.lin = nn.Linear(
                int(hparams["conv_size"] * hparams["num_conv_layers"]),
                hparams["lin_size"])
        else:
            self.lin = nn.Linear(int(hparams["conv_size"]),
                                 hparams["lin_size"])
        self.output = nn.Linear(hparams["lin_size"], hparams["output_size"])
class DiffPool(torch.nn.Module):
    def __init__(self, dataset, num_pools, hidden, ratio=0.25):
        super(DiffPool, self).__init__()

        self.num_pools, self.hidden = num_pools, hidden

        num_nodes = ceil(ratio * dataset[0].num_nodes)
        self.embed_block1 = Block(dataset.num_features, hidden, hidden)
        self.pool_block1 = Block(dataset.num_features, hidden, num_nodes)

        self.embed_blocks = torch.nn.ModuleList()
        self.pool_blocks = torch.nn.ModuleList()
        for i in range(num_pools - 1):
            num_nodes = ceil(ratio * num_nodes)
            self.embed_blocks.append(Block(hidden, hidden, hidden))
            self.pool_blocks.append(Block(hidden, hidden, num_nodes))

        self.jump = JumpingKnowledge(mode='cat')
        self.lin1 = Linear((len(self.embed_blocks) + 1) * hidden, hidden)
        self.lin2 = Linear(hidden, dataset.num_classes)

    def reset_parameters(self):
        self.embed_block1.reset_parameters()
        self.pool_block1.reset_parameters()
        for embed_block, pool_block in zip(self.embed_blocks,
                                           self.pool_blocks):
            embed_block.reset_parameters()
            pool_block.reset_parameters()
        self.jump.reset_parameters()
        self.lin1.reset_parameters()
        self.lin2.reset_parameters()

    def forward(self, data):
        # x:[batch_size,num_nodes,in_channels]
        x, adj, mask = data.x, data.adj, data.mask

        # x:[batch_size, num_nodes, c_num_nodes]
        s = self.pool_block1(x, adj, mask, add_loop=True)
        # s:[batch_size, num_nodes, hidden]
        x = F.relu(self.embed_block1(x, adj, mask, add_loop=True))
        xs = [x.mean(dim=1)]
        # x:[batch_size, c_num_nodes, hidden]
        x, adj, _, _ = dense_diff_pool(x, adj, s, mask)
        # adj: [batch_size,c_num_nodes, c_num_nodes]
        for i, (embed_block, pool_block) in enumerate(
                zip(self.embed_blocks, self.pool_blocks)):
            # s: [batch_size,c_num_nodes, cc_num_nodes]
            s = pool_block(x, adj)
            # x: [batch_size,c_num_nodes,hidden]
            x = F.relu(embed_block(x, adj))
            xs.append(x.mean(dim=1))
            if i < len(self.embed_blocks) - 1:
                # x: [batch_size,cc_num_nodes, hidden]
                x, adj, _, _ = dense_diff_pool(x, adj, s)
                # adj: [batch_size,cc_num_nodes,cc_num_nodes]
        x = self.jump(xs)  # x: [batch_size,len(self.embed_blocks)+1)*hidden]
        x = F.relu(self.lin1(x))  # x: [batch_size,hidden]
        x = F.dropout(x, p=0.5, training=self.training)
        x = self.lin2(x)  # x: [batch_size,dataset.num_classes]
        return F.log_softmax(x, dim=-1)
Example #8
0
 def __init__(self, dataset, num_layers, hidden, mode='cat'):
     super(GIN0WithJK, self).__init__()
     self.conv1 = GINConv(Sequential(
         Linear(dataset.num_features, hidden),
         ReLU(),
         Linear(hidden, hidden),
         ReLU(),
         BN(hidden),
     ),
                          train_eps=False)
     self.convs = torch.nn.ModuleList()
     for i in range(num_layers - 1):
         self.convs.append(
             GINConv(Sequential(
                 Linear(hidden, hidden),
                 ReLU(),
                 Linear(hidden, hidden),
                 ReLU(),
                 BN(hidden),
             ),
                     train_eps=False))
     self.jump = JumpingKnowledge(mode)
     if mode == 'cat':
         self.lin1 = Linear(num_layers * hidden, hidden)
     else:
         self.lin1 = Linear(hidden, hidden)
     self.lin2 = Linear(hidden, dataset.num_classes)
    def __init__(self, in_channels, hidden_channels, out_channels, num_layers=2,
                 dropout=0.5, heads=2, jk_type='max'):
        super(GATJK, self).__init__()

        self.convs = nn.ModuleList()
        self.convs.append(
            GATConv(in_channels, hidden_channels, heads=heads, concat=True))

        self.bns = nn.ModuleList()
        self.bns.append(nn.BatchNorm1d(hidden_channels*heads))
        for _ in range(num_layers - 2):

            self.convs.append(
                    GATConv(hidden_channels*heads, hidden_channels, heads=heads, concat=True) ) 
            self.bns.append(nn.BatchNorm1d(hidden_channels*heads))

        self.convs.append(
            GATConv(hidden_channels*heads, hidden_channels, heads=heads))

        self.dropout = dropout
        self.activation = F.elu # note: uses elu

        self.jump = JumpingKnowledge(jk_type, channels=hidden_channels*heads, num_layers=1)
        if jk_type == 'cat':
            self.final_project = nn.Linear(hidden_channels*heads*num_layers, out_channels)
        else: # max or lstm
            self.final_project = nn.Linear(hidden_channels*heads, out_channels)
    def __init__(self, in_channels, hidden_channels, out_channels, num_layers=2,
                 dropout=0.5, save_mem=False, jk_type='max'):
        super(GCNJK, self).__init__()

        self.convs = nn.ModuleList()
        self.convs.append(
            GCNConv(in_channels, hidden_channels, cached=not save_mem, normalize=not save_mem))

        self.bns = nn.ModuleList()
        self.bns.append(nn.BatchNorm1d(hidden_channels))
        for _ in range(num_layers - 2):
            self.convs.append(
                GCNConv(hidden_channels, hidden_channels, cached=not save_mem, normalize=not save_mem))
            self.bns.append(nn.BatchNorm1d(hidden_channels))

        self.convs.append(
            GCNConv(hidden_channels, hidden_channels, cached=not save_mem, normalize=not save_mem))

        self.dropout = dropout
        self.activation = F.relu
        self.jump = JumpingKnowledge(jk_type, channels=hidden_channels, num_layers=1)
        if jk_type == 'cat':
            self.final_project = nn.Linear(hidden_channels * num_layers, out_channels)
        else: # max or lstm
            self.final_project = nn.Linear(hidden_channels, out_channels)
Example #11
0
class GCNWithJK(torch.nn.Module):
    def __init__(self, dataset, num_layers, hidden):
        super(GCNWithJK, self).__init__()
        self.conv1 = GCNConv(dataset.num_features, hidden)
        self.convs = torch.nn.ModuleList()
        for i in range(num_layers - 1):
            self.convs.append(GCNConv(hidden, hidden))
        self.jump = JumpingKnowledge(mode='cat')
        self.lin1 = Linear(num_layers * hidden, hidden)
        self.lin2 = Linear(hidden, dataset.num_classes)

    def reset_parameters(self):
        self.conv1.reset_parameters()
        for conv in self.convs:
            conv.reset_parameters()
        self.jump.reset_parameters()
        self.lin1.reset_parameters()
        self.lin2.reset_parameters()

    def forward(self, data):
        x, edge_index, batch = data.x, data.edge_index, data.batch
        x = F.relu(self.conv1(x, edge_index))
        xs = [x]
        for conv in self.convs:
            x = F.relu(conv(x, edge_index))
            xs += [x]
        x = self.jump(xs)
        x = global_mean_pool(x, batch)
        x = F.relu(self.lin1(x))
        x = F.dropout(x, p=0.5, training=self.training)
        x = self.lin2(x)
        return F.log_softmax(x, dim=-1)

    def __repr__(self):
        return self.__class__.__name__
Example #12
0
class Coarsening(torch.nn.Module):
    def __init__(self, dataset, hidden, ratio=0.25): # we only use 1 layer for coarsening
        super(Coarsening, self).__init__()

        # self.embed_block1 = GNNBlock(dataset.num_features, hidden, hidden)
        self.embed_block1 = DenseGCNConv(dataset.num_features, hidden)
        self.coarse_block1 = CoarsenBlock(hidden, ratio)
        self.embed_block2 = DenseGCNConv(hidden, dataset.num_features)

        self.jump = JumpingKnowledge(mode='cat')

        self.lin1 = Linear(hidden + dataset.num_features, hidden)
        self.lin2 = Linear(hidden, dataset.num_classes)

    def reset_parameters(self):
        self.embed_block1.reset_parameters()
        self.coarse_block1.reset_parameters()
        self.jump.reset_parameters()
        self.lin1.reset_parameters()
        self.lin2.reset_parameters()

    def forward(self, data, epsilon=0.01, opt_epochs=100):
        x, adj, mask = data.x, data.adj, data.mask
        batch_num_nodes = data.mask.sum(-1)
        x1 = F.relu(self.embed_block1(x, adj, mask, add_loop=True))
        # xs = [x1.mean(dim=1)]
        coarse_x, new_adj, S = self.coarse_block1(x1, adj, batch_num_nodes)
        xs = [coarse_x.mean(dim=1)]
        x2 = F.tanh(self.embed_block2(coarse_x, new_adj, mask, add_loop=True))
        xs.append(x2.mean(dim=1))


        opt_loss = 0.0
        for i in range(len(x)):
            x3 = self.get_nonzero_rows(x[i])
            x4 = self.get_nonzero_rows(x2[i])
            # if x3.size()[0]==0 or x4.size()[0]==0:
            #     continue
            # opt_loss += sinkhorn_loss_default(x3, x4, epsilon, niter=opt_epochs).float()
            opt_loss += sinkhorn_loss_default(x3, x2[i], epsilon, niter=opt_epochs)
        return xs, new_adj, S, opt_loss

    def predict(self, xs):
        x = self.jump(xs)
        x = F.relu(self.lin1(x))
        x = F.dropout(x, p=0.5, training=self.training)
        x = self.lin2(x)
        return F.log_softmax(x, dim=-1)

    def get_nonzero_rows(self, M):# M is a matrix
        # row_ind = M.sum(-1).nonzero().squeeze() #nonzero has bugs in Pytorch 1.2.0.........
        #So we use other methods to take place of it
        MM, MM_ind = M.sum(-1).sort()
        N = (M.sum(-1)>0).sum()
        return M[MM_ind[:N]]


    def __repr__(self):
        return self.__class__.__name__
Example #13
0
class DiffPool(torch.nn.Module):
    def __init__(self, dataset, num_layers, hidden, ratio=0.25):
        super(DiffPool, self).__init__()

        num_nodes = ceil(ratio * dataset[0].num_nodes)
        self.embed_block1 = Block(dataset.num_features, hidden, hidden)
        self.pool_block1 = Block(dataset.num_features, hidden, num_nodes)

        self.embed_blocks = torch.nn.ModuleList()
        self.pool_blocks = torch.nn.ModuleList()
        for i in range((num_layers // 2) - 1):
            num_nodes = ceil(ratio * num_nodes)
            self.embed_blocks.append(Block(hidden, hidden, hidden))
            self.pool_blocks.append(Block(hidden, hidden, num_nodes))

        self.jump = JumpingKnowledge(mode='cat')
        self.lin1 = Linear((len(self.embed_blocks) + 1) * hidden, hidden)
        self.lin2 = Linear(hidden, dataset.num_classes)

    def reset_parameters(self):
        self.embed_block1.reset_parameters()
        self.pool_block1.reset_parameters()
        for embed_block, pool_block in zip(self.embed_blocks,
                                           self.pool_blocks):
            embed_block.reset_parameters()
            pool_block.reset_parameters()
        self.jump.reset_parameters()
        self.lin1.reset_parameters()
        self.lin2.reset_parameters()

    def forward(self, data):
        x, adj, mask = data.x, data.adj, data.mask
        link_losses = 0.
        ent_losses = 0.
        s = self.pool_block1(x, adj, mask, add_loop=True)
        x = F.relu(self.embed_block1(x, adj, mask, add_loop=True))
        xs = [x.mean(dim=1)]
        x, adj, link_loss, ent_loss = dense_diff_pool(x, adj, s, mask)
        link_losses += link_loss
        ent_losses += ent_loss
        for i, (embed_block, pool_block) in enumerate(
                zip(self.embed_blocks, self.pool_blocks)):
            s = pool_block(x, adj)
            x = F.relu(embed_block(x, adj))
            xs.append(x.mean(dim=1))
            if i < len(self.embed_blocks) - 1:
                x, adj, link_loss, ent_loss = dense_diff_pool(x, adj, s)
                link_losses += link_loss
                ent_losses += ent_loss
        x = self.jump(xs)
        x = F.relu(self.lin1(x))
        x = F.dropout(x, p=0.5, training=self.training)
        x = self.lin2(x)

        return F.log_softmax(x, dim=-1), link_losses + ent_losses

    def __repr__(self):
        return self.__class__.__name__
Example #14
0
 def __init__(self, dataset, num_layers, hidden):
     super(Graclus, self).__init__()
     self.conv1 = GraphConv(dataset.num_features, hidden, aggr='mean')
     self.convs = torch.nn.ModuleList()
     for i in range(num_layers - 1):
         self.convs.append(GraphConv(hidden, hidden, aggr='mean'))
     self.jump = JumpingKnowledge(mode='cat')
     self.lin1 = Linear(num_layers * hidden, hidden)
     self.lin2 = Linear(hidden, dataset.num_classes)
Example #15
0
def test_jumping_knowledge():
    num_nodes, channels, num_layers = 100, 17, 5
    xs = list([torch.randn(num_nodes, channels) for _ in range(num_layers)])

    model = JumpingKnowledge('cat')
    assert model.__repr__() == 'JumpingKnowledge(cat)'

    out = model(xs)
    assert out.size() == (num_nodes, channels * num_layers)

    if is_full_test():
        jit = torch.jit.script(model)
        assert torch.allclose(jit(xs), out)

    model = JumpingKnowledge('max')
    assert model.__repr__() == 'JumpingKnowledge(max)'

    out = model(xs)
    assert out.size() == (num_nodes, channels)

    if is_full_test():
        jit = torch.jit.script(model)
        assert torch.allclose(jit(xs), out)

    model = JumpingKnowledge('lstm', channels, num_layers)
    assert model.__repr__() == 'JumpingKnowledge(lstm)'

    out = model(xs)
    assert out.size() == (num_nodes, channels)

    if is_full_test():
        jit = torch.jit.script(model)
        assert torch.allclose(jit(xs), out)
Example #16
0
 def __init__(self, num_input_features, num_layers, hidden, mode='cat'):
     super(GraphSAGEWithJK, self).__init__()
     self.conv1 = SAGEConv(num_input_features, hidden)
     self.convs = torch.nn.ModuleList()
     for i in range(num_layers - 1):
         self.convs.append(SAGEConv(hidden, hidden))
     self.jump = JumpingKnowledge(mode)
     if mode == 'cat':
         self.lin1 = Linear(3 * num_layers * hidden, hidden)
     else:
         self.lin1 = Linear(3 * hidden, hidden)
     self.lin2 = Linear(hidden, 2)
class GATJK(nn.Module):
    def __init__(self, in_channels, hidden_channels, out_channels, num_layers=2,
                 dropout=0.5, heads=2, jk_type='max'):
        super(GATJK, self).__init__()

        self.convs = nn.ModuleList()
        self.convs.append(
            GATConv(in_channels, hidden_channels, heads=heads, concat=True))

        self.bns = nn.ModuleList()
        self.bns.append(nn.BatchNorm1d(hidden_channels*heads))
        for _ in range(num_layers - 2):

            self.convs.append(
                    GATConv(hidden_channels*heads, hidden_channels, heads=heads, concat=True) ) 
            self.bns.append(nn.BatchNorm1d(hidden_channels*heads))

        self.convs.append(
            GATConv(hidden_channels*heads, hidden_channels, heads=heads))

        self.dropout = dropout
        self.activation = F.elu # note: uses elu

        self.jump = JumpingKnowledge(jk_type, channels=hidden_channels*heads, num_layers=1)
        if jk_type == 'cat':
            self.final_project = nn.Linear(hidden_channels*heads*num_layers, out_channels)
        else: # max or lstm
            self.final_project = nn.Linear(hidden_channels*heads, out_channels)


    def reset_parameters(self):
        for conv in self.convs:
            conv.reset_parameters()
        for bn in self.bns:
            bn.reset_parameters()
        self.jump.reset_parameters()
        self.final_project.reset_parameters()


    def forward(self, data):
        x = data.graph['node_feat']
        xs = []
        for i, conv in enumerate(self.convs[:-1]):
            x = conv(x, data.graph['edge_index'])
            x = self.bns[i](x)
            x = self.activation(x)
            xs.append(x)
            x = F.dropout(x, p=self.dropout, training=self.training)
        x = self.convs[-1](x, data.graph['edge_index'])
        xs.append(x)
        x = self.jump(xs)
        x = self.final_project(x)
        return x
Example #18
0
    def __init__(self, dataset, hidden, ratio=0.25): # we only use 1 layer for coarsening
        super(Coarsening, self).__init__()

        # self.embed_block1 = GNNBlock(dataset.num_features, hidden, hidden)
        self.embed_block1 = DenseGCNConv(dataset.num_features, hidden)
        self.coarse_block1 = CoarsenBlock(hidden, ratio)
        self.embed_block2 = DenseGCNConv(hidden, dataset.num_features)

        self.jump = JumpingKnowledge(mode='cat')

        self.lin1 = Linear(hidden + dataset.num_features, hidden)
        self.lin2 = Linear(hidden, dataset.num_classes)
Example #19
0
 def __init__(self, dataset, num_layers, hidden, mode='cat'):
     super(GCNWithJK, self).__init__()
     self.conv1 = GCNConv(dataset.num_features, hidden)
     self.convs = torch.nn.ModuleList()
     for i in range(num_layers - 1):
         self.convs.append(GCNConv(hidden, hidden))
     self.jump = JumpingKnowledge(mode)
     if mode == 'cat':
         self.lin1 = Linear(num_layers * hidden, hidden)
     else:
         self.lin1 = Linear(hidden, hidden)
     self.lin2 = Linear(hidden, dataset.num_classes)
Example #20
0
    def __init__(self, dataset, hidden, num_layers=2, ratio=0.5):
        super(MultiLayerCoarsening, self).__init__()

        self.embed_block1 = DenseGCNConv(dataset.num_features, hidden)
        self.coarse_block1 = CoarsenBlock(hidden, ratio)
        self.embed_block2 = DenseGCNConv(hidden, dataset.num_features)
        # self.embed_block2 = GNNBlock(hidden, hidden, dataset.num_features)

        self.num_layers = num_layers

        self.jump = JumpingKnowledge(mode='cat')
        self.lin1 = Linear(hidden + dataset.num_features, hidden)
        self.lin2 = Linear(hidden, dataset.num_classes)
class JKNet(torch.nn.Module):
    def __init__(self,
                 in_channels,
                 hidden_channels,
                 out_channels,
                 num_layers,
                 dropout,
                 mode='concat'):
        super().__init__()

        self.convs = torch.nn.ModuleList()
        self.convs.append(GCNConv(in_channels, hidden_channels, cached=False))
        self.bns = torch.nn.ModuleList()
        self.bns.append(torch.nn.BatchNorm1d(hidden_channels))
        for _ in range(num_layers - 1):
            self.convs.append(
                GCNConv(hidden_channels, hidden_channels, cached=False))
            self.bns.append(torch.nn.BatchNorm1d(hidden_channels))
        self.jump = JumpingKnowledge(mode)
        if mode == 'cat':
            self.lin1 = Linear(num_layers * hidden_channels, hidden_channels)
        else:
            self.lin1 = Linear(hidden_channels, hidden_channels)

        self.lin2 = Linear(hidden_channels, out_channels)
        self.dropout = dropout

    def reset_parameters(self):
        for conv in self.convs:
            conv.reset_parameters()
        for bn in self.bns:
            bn.reset_parameters()

        self.jump.reset_parameters()
        self.lin1.reset_parameters()
        self.lin2.reset_parameters()

    def forward(self, x, adj_t):
        xs = []
        for i, conv in enumerate(self.convs):
            x = conv(x, adj_t)
            x = self.bns[i](x)
            x = F.relu(x)
            x = F.dropout(x, p=self.dropout, training=self.training)
            xs += [x]

        x = self.jump(xs)
        x = F.relu(self.lin1(x))
        x = F.dropout(x, p=self.dropout, training=self.training)
        x = self.lin2(x)
        return F.log_softmax(x, dim=-1)
class DiffPool(torch.nn.Module):
    def __init__(self, dataset, num_layers, hidden):
        super(DiffPool, self).__init__()

        num_nodes = ceil(0.25 * dataset[0].num_nodes)
        self.embed_block1 = Block(dataset.num_features, hidden, hidden)
        self.pool_block1 = Block(dataset.num_features, hidden, num_nodes)

        self.embed_blocks = torch.nn.ModuleList()
        self.pool_blocks = torch.nn.ModuleList()
        for i in range((num_layers // 2) - 1):
            num_nodes = ceil(0.25 * num_nodes)
            self.embed_blocks.append(Block(hidden, hidden, hidden))
            self.pool_blocks.append(Block(hidden, hidden, num_nodes))

        self.jump = JumpingKnowledge(mode='cat')
        self.lin1 = Linear((len(self.embed_blocks) + 1) * hidden, hidden)
        self.lin2 = Linear(hidden, dataset.num_classes)

    def reset_parameters(self):
        self.embed_block1.reset_parameters()
        self.pool_block1.reset_parameters()
        for block1, block2 in zip(self.embed_blocks, self.pool_blocks):
            block1.reset_parameters()
            block2.reset_parameters()
        self.jump.reset_parameters()
        self.lin1.reset_parameters()
        self.lin2.reset_parameters()

    def forward(self, data):
        x, adj, mask = data.x, data.adj, data.mask

        s = self.pool_block1(x, adj, mask, add_loop=True)
        x = F.relu(self.embed_block1(x, adj, mask, add_loop=True))
        xs = [x.mean(dim=1)]
        x, adj, reg = dense_diff_pool(x, adj, s, mask)

        for embed, pool in zip(self.embed_blocks, self.pool_blocks):
            s = pool(x, adj)
            x = F.relu(embed(x, adj))
            xs.append(x.mean(dim=1))
            x, adj, _, _ = dense_diff_pool(x, adj, s)

        x = self.jump(xs)
        x = F.relu(self.lin1(x))
        x = F.dropout(x, p=0.5, training=self.training)
        x = self.lin2(x)
        return F.log_softmax(x, dim=-1)

    def __repr__(self):
        return self.__class__.__name__
Example #23
0
class GIN(nn.Module):
    def __init__(self, dataset, num_layers, hidden, train_eps=False, mode='cat'):
        super().__init__()
        self.conv1 = GINConv(nn.Sequential(
            nn.Linear(dataset.num_features, hidden),
            nn.ReLU(),
            nn.Linear(hidden, hidden),
            nn.ReLU(),
            nn.BatchNorm1d(hidden),
        ), train_eps=train_eps)
        self.convs = nn.ModuleList()
        for i in range(num_layers - 1):
            self.convs.append(
                GINConv(nn.Sequential(
                    nn.Linear(hidden, hidden),
                    nn.ReLU(),
                    nn.Linear(hidden, hidden),
                    nn.ReLU(),
                    nn.BatchNorm1d(hidden),
                ), train_eps=train_eps))
        self.jump = JumpingKnowledge(mode)
        if mode == 'cat':
            self.lin1 = nn.Linear(num_layers * hidden, hidden)
        else:
            self.lin1 = nn.Linear(hidden, hidden)
        self.lin2 = nn.Linear(hidden, dataset.num_classes)

    def reset_parameters(self):
        self.conv1.reset_parameters()
        for conv in self.convs:
            conv.reset_parameters()
        self.jump.reset_parameters()
        self.lin1.reset_parameters()
        self.lin2.reset_parameters()

    def forward(self, data):
        x, edge_index, batch = data.x, data.edge_index, data.batch
        x = self.conv1(x, edge_index)
        xs = [x]
        for conv in self.convs:
            x = conv(x, edge_index)
            xs += [x]
        x = self.jump(xs)
        x = global_mean_pool(x, batch)
        x = F.relu(self.lin1(x))
        x = F.dropout(x, p=0.5, training=self.training)
        x = self.lin2(x)
        return x

    def __repr__(self):
        return self.__class__.__name__
Example #24
0
class ModelEnsemble(torch.nn.Module):
    def __init__(self, num_features, num_class):
        super(ModelEnsemble, self).__init__()
        self.JK = JumpingKnowledge(mode='lstm',
                                   channels=num_class,
                                   num_layers=1)
        # self.linear = Linear(num_features, num_class)

    def reset_parameters(self):
        self.JK.reset_parameters()

    def forward(self, x):
        x = self.JK(x)
        return log_softmax(x, dim=-1)
Example #25
0
    def __init__(self, num_vocab, max_seq_len, node_encoder, emb_dim, num_layers, hidden, ratio=0.8, dropout=0, num_class=0):
        super(ASAP, self).__init__()

        self.num_class = num_class
        self.max_seq_len = max_seq_len
        self.node_encoder = node_encoder

        self.conv1 = GraphConv(emb_dim, hidden, aggr='mean')
        self.convs = torch.nn.ModuleList()
        self.pools = torch.nn.ModuleList()
        self.convs.extend([
            GraphConv(hidden, hidden, aggr='mean')
            for i in range(num_layers - 1)
        ])
        self.pools.extend([
            ASAPooling(hidden, ratio, dropout=dropout)
            for i in range((num_layers) // 2)
        ])
        self.jump = JumpingKnowledge(mode='cat')
        self.lin1 = Linear(num_layers * hidden, hidden)
        # self.lin2 = Linear(hidden, dataset.num_classes)

        if self.num_class > 0:  # classification
            self.graph_pred_linear = torch.nn.Linear(hidden, self.num_class)
        else:
            self.graph_pred_linear_list = torch.nn.ModuleList()
            for i in range(max_seq_len):
                self.graph_pred_linear_list.append(torch.nn.Linear(hidden, num_vocab))
Example #26
0
 def __init__(self, num_layers, hidden_list, activation, data):
     super(ModelGCN, self).__init__()
     assert len(hidden_list) == num_layers + 1
     self.linear_1 = Linear(data.num_features, hidden_list[0])
     self.convs = torch.nn.ModuleList()
     for i in range(num_layers):
         self.convs.append(GCNConv(hidden_list[i], hidden_list[i + 1]))
     self.JK = JumpingKnowledge(mode='max')
     self.linear_2 = Linear(hidden_list[-1], data.num_class)
     if activation == "relu":
         self.activation = relu
     elif activation == "leaky_relu":
         self.activation = leaky_relu
     self.reg_params = list(self.linear_1.parameters()) + list(
         self.convs.parameters()) + list(self.JK.parameters()) + list(
             self.linear_2.parameters())
Example #27
0
    def __init__(self, dataset, num_layers, hidden, jpgs=False, jp=False, hop=2, num_patches=5, \
                 ratio=0.25, plot=False, dropout=False, ge=False):
        super(DiffPool, self).__init__()
        
        Block = [Block_1hop, Block_2hop, Block_3hop][hop-1]
        self.num_patches = num_patches
        self.dropout = dropout
        self.plot = plot
        num_nodes = ceil(ratio * dataset[0].num_nodes)
        self.embed_block1 = Block(dataset.num_features, hidden, hidden, jpgs)
        self.pool_block1 = Block(dataset.num_features, hidden, num_nodes, jpgs)

        self.embed_blocks = torch.nn.ModuleList()
        self.pool_blocks = torch.nn.ModuleList()
        for i in range((num_layers // 2) - 1):
            num_nodes = ceil(ratio * num_nodes)
            self.embed_blocks.append(Block(hidden, hidden, hidden, jpgs))
            self.pool_blocks.append(Block(hidden, hidden, num_nodes, jpgs))
        
        self.pool_block_last = Block(hidden, hidden, 1, jpgs)
        self.jp = jp
        self.ge = ge
        if self.jp:
            self.jump = JumpingKnowledge(mode='cat')
            self.lin1 = Linear((len(self.embed_blocks) + 1) * hidden, hidden)
            self.lin2 = Linear(hidden, dataset.num_classes)
        else:
            # self.lin1 = Linear(num_patches*hidden, hidden)
            # !!! 因为改成了average graph embeddings,所以dimension也需要修改
            self.lin1 = Linear(hidden, dataset.num_classes)
Example #28
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 depth=3,
                 jk_depth=7,
                 base_conv=TAGConv,
                 att_conv=GATConv,
                 base_conv_settings={"K": 3},
                 att_conv_settings={}):
        super(AttentionBlock, self).__init__()

        assert out_channels % in_channels == 0, "out_channels must be a multiple of in_channels"

        ratio = out_channels // in_channels

        att_conv_settings = dict(**att_conv_settings, heads=ratio)

        self.base_conv_list = torch.nn.ModuleList([])
        self.att_conv_list = torch.nn.ModuleList([])
        self.slc_list = torch.nn.ModuleList([])

        self.jk = JumpingKnowledge("lstm", ratio * in_channels, jk_depth)

        for _ in range(depth):
            self.slc_list.append(SublayerConnection(in_channels))

            self.base_conv_list.append(
                base_conv(in_channels, in_channels, **base_conv_settings))
            self.att_conv_list.append(
                att_conv(in_channels, in_channels, **att_conv_settings))

        self.reset_parameters()
Example #29
0
    def __init__(self, in_channels, hidden_channels, out_channels, mode="cat"):
        super(Block, self).__init__()

        # self.conv1 = DenseSAGEConv(in_channels, hidden_channels)
        # self.conv2 = DenseSAGEConv(hidden_channels, out_channels)

        # self.conv1 = DenseGCNConv(in_channels, hidden_channels)
        # self.conv2 = DenseGCNConv(hidden_channels, out_channels)

        nn1 = torch.nn.Sequential(
            Linear(in_channels, hidden_channels),
            ReLU(),
            Linear(hidden_channels, hidden_channels),
        )

        nn2 = torch.nn.Sequential(
            Linear(hidden_channels, out_channels),
            ReLU(),
            Linear(out_channels, out_channels),
        )

        self.conv1 = DenseGINConv(nn1, train_eps=True)
        self.conv2 = DenseGINConv(nn2, train_eps=True)

        self.jump = JumpingKnowledge(mode)
        if mode == "cat":
            self.lin = Linear(hidden_channels + out_channels, out_channels)
        else:
            self.lin = Linear(out_channels, out_channels)
Example #30
0
    def __init__(self,
                 dataset,
                 num_layers,
                 hidden,
                 weight_conv='WeightConv1',
                 multi_channel='False'):
        super(SMG_JK, self).__init__()
        self.lin0 = Linear(dataset.num_features, hidden)
        self.convs = torch.nn.ModuleList()
        for i in range(num_layers):
            self.convs.append(SparseConv(hidden, hidden))

        self.masks = torch.nn.ModuleList()
        if multi_channel == 'True':
            out_channel = hidden
        else:
            out_channel = 1
        if weight_conv != 'WeightConv2':
            for i in range(num_layers):
                self.masks.append(WeightConv1(hidden, hidden, out_channel))
        else:
            for i in range(num_layers):
                self.masks.append(
                    WeightConv2(
                        Sequential(Linear(hidden * 2, hidden), ReLU(),
                                   Linear(hidden, hidden), ReLU(),
                                   Linear(hidden, out_channel), Sigmoid())))

        self.jump = JumpingKnowledge(mode='cat')
        self.lin1 = Linear(num_layers * hidden, hidden)
        self.lin2 = Linear(hidden, dataset.num_classes)