Пример #1
0
 def setup_layers(self):
     """
     Creating the layers.
     """
     self.conv1 = ConvolutionModule(self.n, self.f)
     self.conv2 = ConvolutionModule(self.n, self.f)
     self.conv3 = ConvolutionModule(self.n, self.f)
     self.attention = AttentionModule(self.n, self.f)
     self.tensor_network = TensorNetworkModule(self.f + self.cf)
     self.linear = torch.nn.Linear(2 * (self.f + self.cf), 2)
Пример #2
0
    def setup_layers(self):
        """
        Creating the layers.
        """
        self.calculate_bottleneck_features()
        # 三层GCN
        '''
        GCN init: def __init__(self, in_channels, out_channels)
        def forward(self, x, edge_index):
            x has shape [N, in_channels]
            edge_index has shape [2, E]
        '''
        self.convolution_1 = GCNConv(self.number_labels, self.args.filters_1)
        self.convolution_2 = GCNConv(self.args.filters_1, self.args.filters_2)
        self.convolution_3 = GCNConv(self.args.filters_2, self.args.filters_3)
        # 得到 [n, out_channels]

        # att
        self.attention = AttentionModule(self.args)

        # 用来计算 embedding_graph_1  和 embedding_graph_2 的合并向量
        self.tensor_network = TenorNetworkModule(self.args)

        # bottle-neck-neurons , 16
        # feature_count , 16
        # [16, 16]
        self.fully_connected_first = torch.nn.Linear(
            self.feature_count, self.args.bottle_neck_neurons)
        # [16, 1]
        self.scoring_layer = torch.nn.Linear(self.args.bottle_neck_neurons, 1)
    def setup_layers(self):
        """
        Creating the layers.
        """
        self.calculate_bottleneck_features()
        if self.args.gnn_operator == 'gcn':
            self.convolution_1 = GCNConv(self.number_node_labels,
                                         self.args.filters_1)
            self.convolution_2 = GCNConv(self.args.filters_1,
                                         self.args.filters_2)
            self.convolution_3 = GCNConv(self.args.filters_2,
                                         self.args.filters_3)
        elif self.args.gnn_operator == 'gin':
            nn1 = torch.nn.Sequential(
                torch.nn.Linear(self.number_node_labels, self.args.filters_1),
                torch.nn.ReLU(),
                torch.nn.Linear(self.args.filters_1, self.args.filters_1),
                torch.nn.BatchNorm1d(self.args.filters_1))

            nn2 = torch.nn.Sequential(
                torch.nn.Linear(self.args.filters_1, self.args.filters_2),
                torch.nn.ReLU(),
                torch.nn.Linear(self.args.filters_2, self.args.filters_2),
                torch.nn.BatchNorm1d(self.args.filters_2))

            nn3 = torch.nn.Sequential(
                torch.nn.Linear(self.args.filters_2, self.args.filters_3),
                torch.nn.ReLU(),
                torch.nn.Linear(self.args.filters_3, self.args.filters_3),
                torch.nn.BatchNorm1d(self.args.filters_3))

            self.convolution_1 = GINConv(nn1, train_eps=True)
            self.convolution_2 = GINConv(nn2, train_eps=True)
            self.convolution_3 = GINConv(nn3, train_eps=True)
        # elif self.args.gnn_operator == 'gatedgcn':
        #    self.convolution_1 = GatedGCN(self.number_node_labels, self.args.filters_1)
        #    self.convolution_2 = GatedGCN(self.args.filters_1, self.args.filters_2)
        #    self.convolution_3 = GatedGCN(self.args.filters_2, self.args.filters_3)
        elif self.args.gnn_operator == 'gat':
            self.convolution_1 = GATConv(self.number_node_labels,
                                         self.args.filters_1)
            self.convolution_2 = GATConv(self.args.filters_1,
                                         self.args.filters_2)
            self.convolution_3 = GATConv(self.args.filters_2,
                                         self.args.filters_3)
        else:
            raise NotImplementedError('Unknown GNN-Operator.')

        if self.args.diffpool:
            self.attention = DiffPool(self.args)
        else:
            self.attention = AttentionModule(self.args)

        self.tensor_network = TensorNetworkModule(self.args)
        self.fully_connected_first = torch.nn.Linear(
            self.feature_count, self.args.bottle_neck_neurons)
        self.scoring_layer = torch.nn.Linear(self.args.bottle_neck_neurons, 1)
Пример #4
0
 def setup_layers(self):
     """
     Creating the layers.
     """
     self.calculate_bottleneck_features()
     self.convolution_1 = GCNConv(self.number_labels, self.args.filters_1)
     self.convolution_2 = GCNConv(self.args.filters_1, self.args.filters_2)
     self.convolution_3 = GCNConv(self.args.filters_2, self.args.filters_3)
     self.attention = AttentionModule(self.args)
     self.tensor_network = TenorNetworkModule(self.args)
     self.fully_connected_first = torch.nn.Linear(
         self.feature_count, self.args.bottle_neck_neurons)
     self.scoring_layer = torch.nn.Linear(self.args.bottle_neck_neurons, 1)
Пример #5
0
 def setup_layers(self):
     """
     Creating the layers.
     """
     self.calculate_bottleneck_features()
     self.convolution_1 = GCNConv(100352, 128)
     self.convolution_2 = GCNConv(self.args.filters_1, self.args.filters_2)
     self.convolution_3 = GCNConv(self.args.filters_2, self.args.filters_3)
     self.attention = AttentionModule(self.args)
     self.tensor_network = TenorNetworkModule(self.args)
     self.fully_connected_first = torch.nn.Linear(self.feature_count,
                                                  self.args.bottle_neck_neurons)
     self.scoring_layer = torch.nn.Linear(self.args.bottle_neck_neurons, 1)
     self.vggg = list(torchvision.models.vgg16(pretrained=True).cuda().children())[0][:24]
     self.backbone = torch.nn.Sequential(*self.vggg)
Пример #6
0
class SimGNN(torch.nn.Module):
    """
    SimGNN: A Neural Network Approach to Fast Graph Similarity Computation
    https://arxiv.org/abs/1808.05689
    """
    def __init__(self, graph_size, feature_size, count_size):
        """
        :param args: Arguments object.
        :param graph_size: Size of the graph
        :param count_feature: bool for adding subgraph counts
        """
        super().__init__()
        self.n = graph_size
        self.f = feature_size
        self.cf = count_size
        self.setup_layers()

    def setup_layers(self):
        """
        Creating the layers.
        """
        self.conv1 = ConvolutionModule(self.n, self.f)
        self.conv2 = ConvolutionModule(self.n, self.f)
        self.conv3 = ConvolutionModule(self.n, self.f)
        self.attention = AttentionModule(self.n, self.f)
        self.tensor_network = TensorNetworkModule(self.f + self.cf)
        self.linear = torch.nn.Linear(2 * (self.f + self.cf), 2)

    def reset_parameters(self):
        self.conv1.init_parameters()
        self.conv2.init_parameters()
        self.conv3.init_parameters()
        self.attention.init_parameters()
        self.tensor_network.init_parameters()
        torch.nn.init.normal_(self.linear.weight, std=1)
        torch.nn.init.normal_(self.linear.bias, std=1)

    def count_subgraph(self, g):
        a = g / self.n * 2
        f = torch.zeros([self.cf, 1])
        for i in range(self.cf):
            a = torch.matmul(a, g) / (self.n * 0.3)
            f[i, 0] = torch.trace(a)
        return f

    def forward(self, g1, g2):
        features_1 = self.conv1(g1)
        features_1 = torch.nn.functional.relu(features_1)
        features_1 = torch.nn.functional.dropout(features_1)
        features_1 = self.conv2(features_1)
        features_1 = torch.nn.functional.relu(features_1)
        features_1 = torch.nn.functional.dropout(features_1)
        features_1 = self.conv3(features_1)
        features_2 = self.conv1(g2)
        features_2 = torch.nn.functional.relu(features_2)
        features_2 = torch.nn.functional.dropout(features_2)
        features_2 = self.conv2(features_2)
        features_2 = torch.nn.functional.relu(features_2)
        features_2 = torch.nn.functional.dropout(features_2)
        features_2 = self.conv3(features_2)
        # print(features_1)
        # print(features_2)
        pooled_features_1 = self.attention(features_1)
        pooled_features_2 = self.attention(features_2)
        # print(pooled_features_1)
        # print(pooled_features_2)
        if self.cf > 0:
            #print(g1)
            #print(g2)
            #print(self.count_subgraph(g1))
            #print(self.count_subgraph(g2))
            pooled_features_1 = torch.cat(
                (pooled_features_1, self.count_subgraph(g1)), dim=0)
            pooled_features_2 = torch.cat(
                (pooled_features_2, self.count_subgraph(g2)), dim=0)
            # print(pooled_features_1, pooled_features_2)
        score = self.tensor_network(pooled_features_1, pooled_features_2)
        # print(score)
        # print(score)
        pooled_features = torch.cat((pooled_features_1, pooled_features_2),
                                    dim=0)
        # print(self.linear(pooled_features.view(1, -1)))
        score += self.linear(pooled_features.view(1, -1))
        # print(score)

        # print(pooled_features_1, pooled_features_2, score)
        return score