def __init__(self, n_feats, device='cpu'): super(Net, self).__init__() self.atom_embedding = nn.Linear(n_feats, n_feats) self.conv1 = GCNConv(n_feats, n_feats) self.pool1 = EdgePooling(n_feats) self.conv2 = GCNConv(n_feats, n_feats) self.pool2 = EdgePooling(n_feats) self.linear = torch.nn.Linear(2 * n_feats, 1) self.device = device
def __init__(self, dataset, num_layers, hidden, ratio): super(EdgePool, self).__init__() self.conv1 = GNN_Block(dataset.num_features, hidden) self.pool1 = EdgePooling(hidden) self.convs = torch.nn.ModuleList() self.pools = torch.nn.ModuleList() self.convs.extend( [GNN_Block(hidden, hidden) for i in range(num_layers - 1)]) self.pools.extend( [EdgePooling(hidden) for i in range((num_layers) - 1)]) self.embed_final = GNN_Block(hidden, hidden) self.jump = JumpingKnowledge(mode='cat') self.lin1 = Linear((num_layers + 1) * hidden, hidden) self.lin2 = Linear(hidden, dataset.num_classes)
def __init__(self, in_channels, out_channels, beta = 0.3): super().__init__() self.in_channels = in_channels self.out_channels = out_channels self.beta = beta text_feature_size = 220 self.conv1_img = GATConv(train_dataset.num_features, 512, heads= 3, dropout=0.2) self.conv1_text = GATConv(text_feature_size, text_feature_size // 2 , heads= 3, dropout=0.2) self.conv2_img = GATConv(512 * 3, 512, heads= 1, dropout=0.2) self.conv2_text = GATConv(text_feature_size // 2 * 3, text_feature_size // 2, heads= 1, dropout=0.2) self.conv3_img = GATConv(512, 20, heads = 1, dropout = 0.2) self.conv3_text = GATConv(text_feature_size // 2, 20, heads = 1, dropout= 0.2) self.conv4 = GATConv(40, 20, heads= 2, dropout=0.2) self.pool1= EdgePooling(40) #global_mean_pool self.lin1 = Lin(40, 20) self.lin2 = Lin(20, 2)
def test_compute_edge_score_sigmoid(): edge_index = torch.tensor([[0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 5], [1, 2, 3, 0, 2, 3, 0, 1, 3, 0, 1, 2, 5, 4]]) raw = torch.randn(edge_index.size(1)) e = EdgePooling.compute_edge_score_sigmoid(raw, edge_index, 6) assert torch.all(e >= 0) and torch.all(e <= 1) assert torch.all(torch.argsort(raw) == torch.argsort(e))
def __init__(self, sample, multipliers=[4, 4, 4], channels=8, finalnodes=2): super(GraphConvPoolNet, self).__init__() self.channels = channels self.input = GraphConv(sample.num_node_features, self.channels) self.conv1 = GraphConv(self.channels, multipliers[0] * self.channels) self.pool1 = EdgePooling(multipliers[0] * self.channels, dropout=0.2) self.conv2 = GraphConv(multipliers[0] * self.channels, (multipliers[1] + multipliers[0]) * self.channels) self.pool2 = EdgePooling((multipliers[1] + multipliers[0]) * self.channels, dropout=0.2) self.conv3 = GraphConv((multipliers[1] + multipliers[0]) * self.channels, (multipliers[2] + multipliers[1] + multipliers[0]) * self.channels) self.looppool = EdgePooling((multipliers[2] + multipliers[1] + multipliers[0]) * self.channels, dropout=0.2) self.loopconv = GraphConv((multipliers[2] + multipliers[1] + multipliers[0]) * self.channels, (multipliers[2] + multipliers[1] + multipliers[0]) * self.channels) # Readout layer self.readout = max_pool_x self.finalnodes = finalnodes self.output = nn.Linear(self.finalnodes * (multipliers[2] + multipliers[1] + multipliers[0]) * self.channels, 1)
def test_compute_edge_score_softmax(): edge_index = torch.tensor([[0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 5], [1, 2, 3, 0, 2, 3, 0, 1, 3, 0, 1, 2, 5, 4]]) raw = torch.randn(edge_index.size(1)) e = EdgePooling.compute_edge_score_softmax(raw, edge_index, 6) assert torch.all(e >= 0) and torch.all(e <= 1) # Test whether all incoming edge scores sum up to one. assert torch.allclose(scatter_add(e, edge_index[1]), torch.Tensor([1, 1, 1, 1, 1, 1]))
def poollayer(self, pooltype): self.pooltype = pooltype if self.pooltype == 'TopKPool': self.pool1 = TopKPooling(1024) self.pool2 = TopKPooling(1024) elif self.pooltype == 'EdgePool': self.pool1 = EdgePooling(1024) self.pool2 = EdgePooling(1024) elif self.pooltype == 'ASAPool': self.pool1 = ASAPooling(1024) self.pool2 = ASAPooling(1024) elif self.pooltype == 'SAGPool': self.pool1 = SAGPooling(1024) self.pool2 = SAGPooling(1024) else: print('Such graph pool method is not implemented!!') return self.pool1, self.pool2
def __init__(self, k=2, w1=128, w2=128, w3=128): super(Net, self).__init__() self.conv = CustomGCN(2, 1, cached=False) self.conv.weight.requires_grad = False self.topk = TopKPooling(1, min_score=0.1) self.topk.weight.requires_grad = False self.conv1 = ChebConv(2, w1, k) self.bn1 = BatchNorm1d(w1) self.pool1 = EdgePooling(w1) self.conv2 = ChebConv(w1, w2, k) self.bn2 = BatchNorm1d(w2) self.pool2 = EdgePooling(w2) self.conv3 = ChebConv(w2, w3, k) self.bn3 = BatchNorm1d(w3) self.linear = Linear(w3, 3)
def test_edge_pooling(): x = torch.Tensor([[0], [1], [2], [3], [4], [5], [-1]]) edge_index = torch.tensor([[0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 5, 6], [1, 2, 3, 0, 2, 3, 0, 1, 3, 0, 1, 2, 5, 4, 0]]) batch = torch.tensor([0, 0, 0, 0, 1, 1, 0]) op = EdgePooling(in_channels=1) assert op.__repr__() == 'EdgePooling(1)' # Setting parameters fixed so we can test the expected outcome. op.lin.weight[0, 0] = 1 op.lin.weight[0, 1] = 1 op.lin.bias[0] = 0 # Test pooling. new_x, new_edge_index, new_batch, unpool_info = op(x, edge_index, batch) assert new_x.size(0) == new_batch.size(0) == 4 assert new_batch.tolist() == [1, 0, 0, 0] assert torch.all(new_batch == torch.tensor([1, 0, 0, 0])) assert new_edge_index.tolist() == [[0, 1, 1, 2, 2, 3], [0, 1, 2, 1, 2, 2]] # Test unpooling. unpooled_x, unpooled_edge_index, unpooled_batch = op.unpool( new_x, unpool_info) assert unpooled_edge_index.tolist() == edge_index.tolist() assert unpooled_batch.tolist() == batch.tolist() assert x.size() == unpooled_x.size() assert unpooled_x.tolist() == [[1], [1], [5], [5], [9], [9], [-1]] # Test edge cases. x = torch.Tensor([[0], [1], [2], [3], [4], [5]]) edge_index = torch.tensor([[0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 5], [1, 2, 3, 0, 2, 3, 0, 1, 3, 0, 1, 2, 5, 4]]) batch = torch.tensor([0, 0, 0, 0, 1, 1]) new_x, new_edge_index, new_batch, _ = op(x, edge_index, batch) assert new_x.size(0) == new_batch.size(0) == 3 assert new_batch.tolist() == [1, 0, 0] assert new_edge_index.tolist() == [[0, 1, 1, 2, 2], [0, 1, 2, 1, 2]]
def __init__(self, dataset, num_layers, hidden): super().__init__() self.conv1 = GraphConv(dataset.num_features, hidden, aggr='mean') self.convs = torch.nn.ModuleList() self.pools = torch.nn.ModuleList() self.convs.extend([ GraphConv(hidden, hidden, aggr='mean') for i in range(num_layers - 1) ]) self.pools.extend( [EdgePooling(hidden) for i in range((num_layers) // 2)]) self.jump = JumpingKnowledge(mode='cat') self.lin1 = Linear(num_layers * hidden, hidden) self.lin2 = Linear(hidden, dataset.num_classes)
def message(self, x_i, x_j, edge_index: Adj, edge_attr: OptTensor, size) -> Tensor: if self.debug: print('a x_j:', x_j.shape, 'x_i:', x_i.shape, 'edge_attr:', edge_attr.shape) if self.step == 0: x_i = F.leaky_relu(self.atom_fc(x_i)) # code 3 # neighbor_feature => neighbor_fc x_j = torch.cat([x_j, edge_attr], dim=-1) # code 8 if self.debug: print('b neighbor_feature i = 0', x_j.shape) x_j = F.leaky_relu(self.neighbor_fc(x_j)) # code 9 if self.debug: print('c neighbor_feature i = 0', x_j.shape) # align score evu = F.leaky_relu(self.align(torch.cat([x_i, x_j], dim=-1))) # code 10 if self.debug: print('d align_score:', evu.shape) avu = EdgePooling.compute_edge_score_softmax(evu, edge_index, edge_index.max().item() + 1) # code 11 if self.debug: print('e attention_weight:', avu.shape) # to do downscaling 200 fp => 32 c_i = F.elu(torch.mul(avu, self.attend(self.dropout(x_i)))) # code 12 # to do upscaling 32 => 200 fp if self.debug: print('f context', c_i.shape) x_i = self.rnn(c_i, x_i) if self.debug: print('g gru', c_i.shape) return x_i