示例#1
0
    def __init__(
            self, input_dimension: int, output_dimension: int,
            _ratio: _typing.Union[float, int], _dropout: _typing.Optional[float],
            _act: _typing.Optional[str], num_graph_features: _typing.Optional[int]
    ):
        super(_DiffPoolDecoder, self).__init__()
        self.input_dimension = input_dimension
        self.output_dimension = output_dimension
        self.ratio: _typing.Union[float, int] = _ratio
        self._act: _typing.Optional[str] = _act
        self.dropout: _typing.Optional[float] = _dropout
        self.num_graph_features: _typing.Optional[int] = num_graph_features

        self.conv1 = GraphConv(self.input_dimension, 128)
        self.pool1 = TopKPooling(128, ratio=self.ratio)
        self.conv2 = GraphConv(128, 128)
        self.pool2 = TopKPooling(128, ratio=self.ratio)
        self.conv3 = GraphConv(128, 128)
        self.pool3 = TopKPooling(128, ratio=self.ratio)

        if (
                isinstance(self.num_graph_features, int)
                and self.num_graph_features > 0
        ):
            self.lin1 = torch.nn.Linear(256 + self.num_graph_features, 128)
        else:
            self.lin1 = torch.nn.Linear(256, 128)
        self.lin2 = torch.nn.Linear(128, 64)
        self.lin3 = torch.nn.Linear(64, self.output_dimension)
示例#2
0
 def __init__(self,
              sample=None,
              out_channels=4,
              augmented_channels_multiplier=5,
              convlayers=4):
     super(UnweightedSimplifiedDebruijnGraphNet, self).__init__()
     self.device = torch.device(
         'cuda' if torch.cuda.is_available() else 'cpu')
     self.empty_edges = torch.tensor([[], []],
                                     dtype=torch.long,
                                     device=self.device)
     self.channels = out_channels * augmented_channels_multiplier
     self.augmented_channels_multiplier = augmented_channels_multiplier
     self.dense_input = GraphConv(sample.num_node_features,
                                  augmented_channels_multiplier)
     self.input = GraphConv(augmented_channels_multiplier, self.channels)
     self.conv1 = GraphConv(self.channels, 2 * self.channels)
     self.conv2 = GraphConv(2 * self.channels, 4 * self.channels)
     self.conv3 = GraphConv(4 * self.channels, 8 * self.channels)
     # self.conv4 = GraphConv(8*out_channels, 16*out_channels)
     self.final_nodes = 7
     self.convlayers = convlayers
     self.hidden_channels = 2**(convlayers - 1)
     # self.pool = SortPooling(self.final_nodes)
     self.output = nn.Sequential(
         AdaptiveAvgPool1d(self.final_nodes), Flatten(), Flatten(),
         nn.Linear(self.final_nodes * self.channels * self.hidden_channels,
                   1)
         # nn.Linear(self.final_nodes*8*out_channels, 1)
     )
示例#3
0
 def __init__(self, sample=None, out_channels=4):
     super(UnweightedDebruijnGraphNet, self).__init__()
     self.device = torch.device(
         'cuda' if torch.cuda.is_available() else 'cpu')
     self.input = GraphConv(1, out_channels=out_channels)
     self.conv1 = GraphConv(out_channels, 2 * out_channels)
     self.conv2 = GraphConv(2 * out_channels, 4 * out_channels)
     self.conv3 = GraphConv(4 * out_channels, 8 * out_channels)
     # self.conv4 = GraphConv(8*out_channels, 16*out_channels)
     self.final_nodes = 41
     # self.pool = SortPooling(self.final_nodes)
     self.output = nn.Sequential(
         AdaptiveAvgPool1d(self.final_nodes), Flatten(), Flatten(),
         nn.Linear(self.final_nodes * out_channels * 8, 1))
示例#4
0
    def __init__(self, sample, multipliers=[4, 4, 4], channels=8, finalnodes=2):
        super(GraphConvPoolNet, self).__init__()
        self.channels = channels
        self.input = GraphConv(sample.num_node_features, self.channels)
        self.conv1 = GraphConv(self.channels, multipliers[0] * self.channels)
        self.pool1 = EdgePooling(multipliers[0] * self.channels, dropout=0.2)
        self.conv2 = GraphConv(multipliers[0] * self.channels, (multipliers[1] + multipliers[0]) * self.channels)
        self.pool2 = EdgePooling((multipliers[1] + multipliers[0]) * self.channels, dropout=0.2)
        self.conv3 = GraphConv((multipliers[1] + multipliers[0]) * self.channels,
                               (multipliers[2] + multipliers[1] + multipliers[0]) * self.channels)

        self.looppool = EdgePooling((multipliers[2] + multipliers[1] + multipliers[0]) * self.channels, dropout=0.2)
        self.loopconv = GraphConv((multipliers[2] + multipliers[1] + multipliers[0]) * self.channels,
                                  (multipliers[2] + multipliers[1] + multipliers[0]) * self.channels)

        # Readout layer
        self.readout = max_pool_x
        self.finalnodes = finalnodes
        self.output = nn.Linear(self.finalnodes * (multipliers[2] + multipliers[1] + multipliers[0]) * self.channels, 1)
示例#5
0
class GNN_Conv_SOM(torch.nn.Module):
    def __init__(self,
                 in_channels,
                 out_channels,
                 n_class=2,
                 som_grid_dims=(10, 10),
                 dropout=0,
                 device=None):
        super(GNN_Conv_SOM, self).__init__()

        if device is None:
            self.device = torch.device(
                'cuda' if torch.cuda.is_available() else 'cpu')
        else:
            self.device = device

        self.in_channels = in_channels
        self.out_channels = out_channels
        self.n_class = n_class
        self.som_grid_dims = som_grid_dims

        self.conv1 = GraphConv(self.in_channels, self.out_channels)
        self.conv2 = GraphConv(self.out_channels, out_channels * 2)
        self.conv3 = GraphConv(self.out_channels * 2, out_channels * 3)

        self.act1 = torch.nn.LeakyReLU()
        self.act2 = torch.nn.LeakyReLU()
        self.act3 = torch.nn.LeakyReLU()

        self.norm1 = torch.nn.BatchNorm1d(self.out_channels)
        self.norm2 = torch.nn.BatchNorm1d(self.out_channels * 2)
        self.norm3 = torch.nn.BatchNorm1d(self.out_channels * 3)

        self.dropout = torch.nn.Dropout(p=dropout)

        #define readout of GNN_only model
        self.lin_GNN = torch.nn.Linear(
            (out_channels + out_channels * 2 + self.out_channels * 3) * 3,
            n_class)

        #define som for read out

        self.som1 = SOM(out_channels, out_size=som_grid_dims)
        self.som2 = SOM(out_channels * 2, out_size=som_grid_dims)
        self.som3 = SOM(out_channels * 3, out_size=som_grid_dims)

        # define  read_out
        self.out_conv1 = GraphConv(som_grid_dims[0] * som_grid_dims[1],
                                   self.out_channels)
        self.out_conv2 = GraphConv(som_grid_dims[0] * som_grid_dims[1],
                                   self.out_channels)
        self.out_conv3 = GraphConv(som_grid_dims[0] * som_grid_dims[1],
                                   self.out_channels)

        self.out_norm1 = torch.nn.BatchNorm1d(self.out_channels)
        self.out_norm2 = torch.nn.BatchNorm1d(self.out_channels)
        self.out_norm3 = torch.nn.BatchNorm1d(self.out_channels)

        self.out_act = torch.nn.ReLU()

        self.out_norm4 = torch.nn.BatchNorm1d(out_channels * 3 * 3)
        self.lin_out1 = torch.nn.Linear(out_channels * 3 * 3, out_channels)
        self.lin_out2 = torch.nn.Linear(out_channels, out_channels // 2)
        self.lin_out3 = torch.nn.Linear(out_channels // 2, n_class)

        self.out_fun = torch.nn.LogSoftmax(dim=1)

        self.reset_prameters()

    def reset_prameters(self):
        self.conv1.reset_parameters()
        self.conv2.reset_parameters()
        self.conv3.reset_parameters()

        self.norm1.reset_parameters()
        self.norm2.reset_parameters()
        self.norm3.reset_parameters()

        self.lin_GNN.reset_parameters()

        self.out_conv1.reset_parameters()
        self.out_conv2.reset_parameters()
        self.out_conv3.reset_parameters()

        self.out_norm1.reset_parameters()
        self.out_norm2.reset_parameters()
        self.out_norm3.reset_parameters()

        self.out_norm4.reset_parameters()

        self.lin_out1.reset_parameters()
        self.lin_out2.reset_parameters()
        self.lin_out3.reset_parameters()

    def get_som_weights(self):
        return self.som1.weight, self.som2.weight, self.som3.weight

    def forward(self, data, conv_train=False):

        x = data.x

        edge_index = data.edge_index
        x1 = self.norm1(self.act1(self.conv1(x, edge_index)))
        x = self.dropout(x1)

        x2 = self.norm2(self.act2(self.conv2(x, edge_index)))
        x = self.dropout(x2)

        x3 = self.norm3(self.act3(self.conv3(x, edge_index)))

        h_conv = torch.cat([x1, x2, x3], dim=1)

        #compute GNN only output

        conv_batch_avg = gap(h_conv, data.batch)
        conv_batch_add = gadd(h_conv, data.batch)
        conv_batch_max = gmp(h_conv, data.batch)

        h_GNN = torch.cat([conv_batch_avg, conv_batch_add, conv_batch_max],
                          dim=1)

        gnn_out = self.out_fun(self.lin_GNN(h_GNN))

        if conv_train:
            return None, None, gnn_out

        #SOM
        _, _, som_out_1 = self.som1(x1)
        _, _, som_out_2 = self.som2(x2)
        _, _, som_out_3 = self.som3(x3)

        #READOUT
        h1 = self.out_norm1(self.act1(self.out_conv1(som_out_1, edge_index)))
        h2 = self.out_norm2(self.act2(self.out_conv2(som_out_2, edge_index)))
        h3 = self.out_norm3(self.act3(self.out_conv3(som_out_3, edge_index)))

        som_out_conv = torch.cat([h1, h2, h3], dim=1)

        som_batch_avg = gap(som_out_conv, data.batch)
        som_batch_add = gadd(som_out_conv, data.batch)
        som_batch_max = gmp(som_out_conv, data.batch)

        h = torch.cat([som_batch_avg, som_batch_add, som_batch_max], dim=1)

        h = self.out_norm4(h)

        h = self.out_act(self.lin_out1(h))
        h = self.dropout(h)

        h = self.out_act(self.lin_out2(h))
        h = self.dropout(h)

        h = self.out_fun(self.lin_out3(h))

        return h, h_conv, gnn_out
示例#6
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 n_class=2,
                 som_grid_dims=(10, 10),
                 dropout=0,
                 device=None):
        super(GNN_Conv_SOM, self).__init__()

        if device is None:
            self.device = torch.device(
                'cuda' if torch.cuda.is_available() else 'cpu')
        else:
            self.device = device

        self.in_channels = in_channels
        self.out_channels = out_channels
        self.n_class = n_class
        self.som_grid_dims = som_grid_dims

        self.conv1 = GraphConv(self.in_channels, self.out_channels)
        self.conv2 = GraphConv(self.out_channels, out_channels * 2)
        self.conv3 = GraphConv(self.out_channels * 2, out_channels * 3)

        self.act1 = torch.nn.LeakyReLU()
        self.act2 = torch.nn.LeakyReLU()
        self.act3 = torch.nn.LeakyReLU()

        self.norm1 = torch.nn.BatchNorm1d(self.out_channels)
        self.norm2 = torch.nn.BatchNorm1d(self.out_channels * 2)
        self.norm3 = torch.nn.BatchNorm1d(self.out_channels * 3)

        self.dropout = torch.nn.Dropout(p=dropout)

        #define readout of GNN_only model
        self.lin_GNN = torch.nn.Linear(
            (out_channels + out_channels * 2 + self.out_channels * 3) * 3,
            n_class)

        #define som for read out

        self.som1 = SOM(out_channels, out_size=som_grid_dims)
        self.som2 = SOM(out_channels * 2, out_size=som_grid_dims)
        self.som3 = SOM(out_channels * 3, out_size=som_grid_dims)

        # define  read_out
        self.out_conv1 = GraphConv(som_grid_dims[0] * som_grid_dims[1],
                                   self.out_channels)
        self.out_conv2 = GraphConv(som_grid_dims[0] * som_grid_dims[1],
                                   self.out_channels)
        self.out_conv3 = GraphConv(som_grid_dims[0] * som_grid_dims[1],
                                   self.out_channels)

        self.out_norm1 = torch.nn.BatchNorm1d(self.out_channels)
        self.out_norm2 = torch.nn.BatchNorm1d(self.out_channels)
        self.out_norm3 = torch.nn.BatchNorm1d(self.out_channels)

        self.out_act = torch.nn.ReLU()

        self.out_norm4 = torch.nn.BatchNorm1d(out_channels * 3 * 3)
        self.lin_out1 = torch.nn.Linear(out_channels * 3 * 3, out_channels)
        self.lin_out2 = torch.nn.Linear(out_channels, out_channels // 2)
        self.lin_out3 = torch.nn.Linear(out_channels // 2, n_class)

        self.out_fun = torch.nn.LogSoftmax(dim=1)

        self.reset_prameters()
示例#7
0
    def __init__(self, sample=None, pooling_layers=1, pooling_type="TopKPooling", topk_ratio=0.6, convolution_type="GraphConv", final_pooling="avg_pool_x", dense_output = False, channels_optuna=2, final_nodes=2, optuna_multiplier=1):
        super(TopKPoolingNet, self).__init__()
        out_channels = 5
        augmented_channels_multiplier = 5
        convolution_type = GraphConv if convolution_type == "GraphConv" else GATConv if convolution_type == "GATConv" else GraphConv
        self.pooling_type = TopKPooling if pooling_type == "TopKPooling" else SAGPooling if pooling_type == "SAGPooling" else EdgePooling if pooling_type == "EdgePooling" else ASAPooling
        self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        self.empty_edges = torch.tensor([[], []], dtype=torch.long, device=self.device)
        self.channels = out_channels * augmented_channels_multiplier * channels_optuna
        self.augmented_channels_multiplier = augmented_channels_multiplier
        self.dense_input = GraphConv(sample.num_node_features, augmented_channels_multiplier)
        self.input = convolution_type(augmented_channels_multiplier, self.channels)

        self.conv1 = convolution_type(self.channels, 2 * self.channels)
        # if pooling_type == "EdgePooling":
        #     self.topkpool1 = self.pooling_type(2 * self.channels, dropout=0.3)
        # else:
        #     self.topkpool1 = self.pooling_type(2 * self.channels, ratio=topk_ratio+0.1)
        self.conv2 = convolution_type(2 * self.channels, 4 * self.channels)
        if pooling_type == "EdgePooling":
            self.topkpool2 = self.pooling_type(4 * self.channels)
        else:
            self.topkpool2 = self.pooling_type(4 * self.channels, ratio=topk_ratio)

        self.conv3 = convolution_type(4 * self.channels, 8 * self.channels * optuna_multiplier)
        # if pooling_type == "EdgePooling":
        #     self.topkpool3 = self.pooling_type(8 * self.channels * optuna_multiplier, dropout=0.3)
        # else:
        #     self.topkpool3 = self.pooling_type(8 * self.channels * optuna_multiplier, ratio=topk_ratio)
        # self.conv4 = convolution_type(8 * self.channels * optuna_multiplier, 8 * self.channels * optuna_multiplier)
        # self.conv5 = convolution_type(8 * self.channels * optuna_multiplier, 16 * self.channels * optuna_multiplier)
        self.final_pooling = final_pooling
        self.pooling_layers = pooling_layers
        self.final_nodes = final_nodes
        if self.final_pooling == "topk":
            ratio = 0.5 if pooling_type == "EdgePooling" else topk_ratio
            ratio = ratio**pooling_layers
            current = np.ceil(len(sample.x) * ratio)
            ratio = self.final_nodes / current - 0.01
            self.last_pooling_layer = TopKPooling(8 * self.channels * optuna_multiplier,
                                                  ratio=ratio)
        elif self.final_pooling == "asap":
            ratio = 0.5 if pooling_type == "EdgePooling" else topk_ratio
            ratio = ratio ** pooling_layers
            current = np.ceil(len(sample.x) * ratio)
            ratio = self.final_nodes / current
            self.last_pooling_layer = ASAPooling(8 * self.channels * optuna_multiplier, ratio=ratio)
        elif self.final_pooling == "sag":
            ratio = 0.5 if pooling_type == "EdgePooling" else topk_ratio
            ratio = ratio ** pooling_layers
            current = np.ceil(len(sample.x) * ratio)
            ratio = self.final_nodes / current
            self.last_pooling_layer = SAGPooling(8 * self.channels * optuna_multiplier, ratio=ratio)

        self.input_nodes_output_layer = self.final_nodes * self.channels * 8 * optuna_multiplier
        if dense_output:
            self.output = nn.Sequential(
                nn.Linear(self.input_nodes_output_layer, self.channels),
                nn.GELU(),
                nn.Linear(self.channels, 1)
            )
        else:
            # k = 16
            # l = 8
            # self.phi = SmallPhi(self.input_nodes_output_layer, k)
            # self.rho = SmallRho(k, l)
            # self.output = nn.Sequential(
            #     InvariantModel(self.phi, self.rho).cuda(),
            #     nn.GELU(),
            #     nn.Linear(l, 1)
            # )
            self.output = nn.Linear(self.input_nodes_output_layer, 1)