コード例 #1
0
 def __init__(self,
              in_size,
              kernel_size,
              G_size=None,
              bias=False,
              normalize=False,
              activation='relu',
              dropout=0.,
              b_norm=False,
              pooling='sum',
              pack_batch=False):
     # Accepted methods for pooling are avg, max and sum
     super().__init__()
     self.in_size = in_size
     self.out_size = kernel_size
     self.G_size = G_size
     self._pooling = get_pooling(pooling)
     self.pack_batch = pack_batch
     self.net = FCLayer(self.in_size,
                        self.out_size,
                        activation=activation,
                        bias=bias,
                        dropout=dropout,
                        b_norm=b_norm)
     self.normalize = normalize
     self.reset_parameters()
コード例 #2
0
 def __init__(self, feat_dim, nedges, out_size=32, depth=2, pooling='max'):
     super(EdgeGraphLayer, self).__init__()
     self.feat_dim = feat_dim
     self.edge_dim = nedges
     self.out_size = out_size
     self.update_layers = nn.ModuleList()
     self.input_layer = nn.Linear(self.feat_dim, self.out_size)
     self.depth = depth
     for d in range(self.depth):
         self.update_layers.append(
             FCLayer(self.out_size + self.edge_dim, self.out_size))
     self.pooling = get_pooling(pooling)
     self.readout = FCLayer(self.out_size, self.out_size)
コード例 #3
0
    def __init__(self,
                 in_size,
                 kernel_size,
                 G_size=None,
                 eps=None,
                 net=None,
                 pooling="sum",
                 pack_batch=False,
                 **kwargs):

        super(GINLayer, self).__init__()
        self.in_size = in_size
        self.out_size = kernel_size
        self._pooling = get_pooling(pooling)
        self.pack_batch = pack_batch
        self.G_size = G_size
        kwargs.pop("normalize", None)
        self.net = (net or FCLayer)(in_size, kernel_size, **kwargs)
        self.chosen_eps = eps
        if eps is None:
            self.eps = torch.nn.Parameter(torch.Tensor([0]))
        else:
            self.register_buffer('eps', torch.Tensor([eps]))
        self.reset_parameters()
コード例 #4
0
    def __init__(self,
                 input_dim,
                 out_dim,
                 feat_dim=0,
                 nedges=1,
                 gather_dim=72,
                 conv_dims=[64, 64],
                 conv_dims_after=[128, 128],
                 linear_dim=[32],
                 dropout=0.2,
                 gather="agg",
                 pool_arch={},
                 pool_loss=False,
                 **kwargs):
        super(Encoder, self).__init__()
        GraphLayer = kwargs.pop(
            "GraphConv",
            GLayer)  # using GIN convolution when none is specified
        activation = kwargs.pop("activation", nn.LeakyReLU())  # Leaky relu
        self.input_dim = input_dim
        self.out_dim = out_dim
        self.feat_dim = feat_dim  # when additional features are available for the node
        self.pool_loss = pool_loss
        self.nedges = nedges
        self.conv_layers1 = nn.ModuleList()
        self.conv_layers2 = nn.ModuleList()
        input_dim += feat_dim
        if nedges > 1:
            self.edge_layer = EdgeGraphLayer(input_dim,
                                             nedges,
                                             method='cat',
                                             b_norm=False,
                                             dropout=0.0,
                                             activation=activation)
            input_dim = self.edge_layer.output_dim + self.feat_dim
        for cdim in conv_dims:
            self.conv_layers1.append(
                GraphLayer(input_dim,
                           kernel_size=cdim,
                           b_norm=False,
                           dropout=dropout,
                           activation=activation))
            input_dim = cdim

        self.pool_layer = get_graph_coarsener(**pool_arch)
        self.pool_arch = pool_arch.get("arch", "")
        if self.pool_layer is not None:
            self.pool_layer = self.pool_layer(input_dim)
            if self.pool_arch == "laplacian":
                input_dim = self.pool_layer.cluster_dim

        for cdim in conv_dims_after:
            self.conv_layers2.append(
                GraphLayer(input_dim,
                           kernel_size=cdim,
                           b_norm=False,
                           dropout=dropout,
                           activation=activation))
            input_dim = cdim

        if gather == "agg":
            self.agg_layer = AggLayer(input_dim, gather_dim, dropout)
        elif gather in ["attn", "gated"]:
            self.agg_layer = get_pooling("attn",
                                         input_dim=input_dim,
                                         output_dim=gather_dim,
                                         dropout=dropout)
        else:
            gather_dim = input_dim
            self.agg_layer = get_pooling(gather)
        # multi dense layer
        input_dim = gather_dim

        self.linear_layers = nn.ModuleList()
        for ldim in linear_dim:
            self.linear_layers.append(
                FCLayer(input_dim, ldim, activation=activation, **kwargs))
            input_dim = ldim

        self.output_layer = nn.Linear(input_dim, out_dim)
        self.pool_inspect = None
        self.pooling_loss = []