Beispiel #1
0
    def __init__(self, z_dim, atom_dim, max_vertex, nedges=1, **config):
        super(Decoder, self).__init__()
        self.z_dim = z_dim
        self.graph_size = max_vertex
        self.node_feat_dim = atom_dim
        self.nedges = nedges

        layers = []
        in_dim = z_dim
        for conf in config['layers']:
            layers.append(FCLayer(in_dim, **conf))
            in_dim = conf['out_size']
        self.layers = nn.Sequential(*layers)

        adj_layers = []
        adj_in_dim = in_dim
        for gconf in config['graph_layers']:
            adj_layers.append(FCLayer(adj_in_dim, **gconf))
            adj_in_dim = gconf['out_size']
        nnodes = self.graph_size * (self.graph_size -
                                    1) // 2  # correspond to upper triangular
        adj_layers.append(nn.Linear(adj_in_dim, nnodes * self.nedges))
        self.adj_layers = nn.Sequential(*adj_layers)

        node_layers = []
        node_in_dim = in_dim
        for nconf in config['node_layers']:
            node_layers.append(FCLayer(node_in_dim, **nconf))
            node_in_dim = nconf['out_size']
        node_layers.append(
            nn.Linear(node_in_dim, self.graph_size * self.node_feat_dim))
        self.node_layers = nn.Sequential(*node_layers)
Beispiel #2
0
 def __init__(self, feat_dim, nedges, out_size=32, depth=2, pooling='max'):
     super(EdgeGraphLayer, self).__init__()
     self.feat_dim = feat_dim
     self.edge_dim = nedges
     self.out_size = out_size
     self.update_layers = nn.ModuleList()
     self.input_layer = nn.Linear(self.feat_dim, self.out_size)
     self.depth = depth
     for d in range(self.depth):
         self.update_layers.append(
             FCLayer(self.out_size + self.edge_dim, self.out_size))
     self.pooling = get_pooling(pooling)
     self.readout = FCLayer(self.out_size, self.out_size)
Beispiel #3
0
 def __init__(self,
              in_size,
              kernel_size,
              G_size=None,
              bias=False,
              normalize=False,
              activation='relu',
              dropout=0.,
              b_norm=False,
              pooling='sum',
              pack_batch=False):
     # Accepted methods for pooling are avg, max and sum
     super().__init__()
     self.in_size = in_size
     self.out_size = kernel_size
     self.G_size = G_size
     self._pooling = get_pooling(pooling)
     self.pack_batch = pack_batch
     self.net = FCLayer(self.in_size,
                        self.out_size,
                        activation=activation,
                        bias=bias,
                        dropout=dropout,
                        b_norm=b_norm)
     self.normalize = normalize
     self.reset_parameters()
Beispiel #4
0
    def __init__(self, g_size, in_size, out_size, config):
        super(Net, self).__init__()
        self.in_size = in_size
        self.out_size = out_size

        self.conv1 = GNNBlock(g_size, in_size, **config["conv_before1"])
        self.conv2 = GNNBlock(g_size, self.conv1.out_size,
                              **config["conv_before2"])
        self.hpool, self.hpool_arch = get_hpool(**config["hpool"])
        hsize = self.conv2.out_size
        if self.hpool_arch is not None:
            self.hpool = self.hpool(input_dim=hsize)
        if self.hpool_arch == 'laplacian':
            hsize = self.hpool.cluster_dim

        self.conv3 = GNNBlock(None, hsize, **config["conv_after1"])

        in_size = self.conv3.out_size
        self.gpooler = get_gpool(input_dim=in_size, **config["gpool"])

        self.fc_layers = nn.ModuleList()
        for conf in config["fclayers"]:
            fc = FCLayer(in_size=in_size, **conf)
            self.fc_layers.append(fc)
            in_size = conf["out_size"]
        self.out_layers = nn.Linear(in_size, self.out_size)
    def __init__(self,
                 input_dim,
                 cluster_dim,
                 hidden_dim=None,
                 attn='cos',
                 hop=-1,
                 reg_mode=0,
                 concat=False,
                 strict_leader=True,
                 GLayer=GraphLayer,
                 lap_hop=0,
                 sigma=0.8,
                 **kwargs):
        net = GLayer(input_dim, input_dim, activation='relu')
        super(LaPool, self).__init__(input_dim, -1, net)
        self.cur_S = None
        self.leader_idx = []
        self.cluster_dim = cluster_dim
        self.attn_softmax = Sparsegen(dim=-1, sigma=sigma)
        self.attn_net = cosine_attn  # using cosine attention
        if attn == 'dot':
            self.attn_net = dot_attn

        self.concat = concat
        self.feat_update = FCLayer(in_size=input_dim * int(1 + self.concat),
                                   out_size=self.cluster_dim,
                                   activation='relu',
                                   **kwargs)
        self.k = hidden_dim
        self.reg_mode = reg_mode
        self.hop = hop
        self.lap_hop = lap_hop
        self.strict_leader = strict_leader
Beispiel #6
0
 def __init__(self, z_dim, layers_dim=[64, 64, 32], dropout=0., **kwargs):
     super(MLPdiscriminator, self).__init__()
     self.out_dim = 1
     layers = []
     for in_dim, out_dim in zip([z_dim] + layers_dim[:-1], layers_dim):
         layers.append(FCLayer(in_dim, out_dim, dropout=dropout, **kwargs))
     self.layers = nn.Sequential(*layers)
     self.output_layer = nn.Sequential(nn.Linear(out_dim, 1), nn.Sigmoid())
Beispiel #7
0
    def __init__(self, input_dim, out_dim, nedges, **config):
        super(Encoder, self).__init__()
        self.input_dim = input_dim
        self.out_dim = out_dim
        self.nedges = nedges
        self.conv_layers1 = nn.ModuleList()
        self.conv_layers2 = nn.ModuleList()
        self.pool_loss = 0
        self._embedding_layer = None
        if config.get('embed_dim'):
            self._embedding_layer = nn.Embedding(self.input_dim,
                                                 config['embed_dim'])
            input_dim = config.get('embed_dim', self.input_dim)
        if nedges > 1:
            self.edge_layer = EdgeGraphLayer(self.input_dim, nedges,
                                             **config.get("edge_layer", {}))
            input_dim = self.edge_layer.output_dim

        conv_before_dims = config['conv_before']
        for conf in conv_before_dims:
            self.conv_layers1.append(GraphLayer(input_dim, **conf))
            input_dim = conf['kernel_size']

        self.hpool, self.hpool_arch = get_hpool(**config["hpool"])
        if self.hpool_arch is not None:
            self.hpool = self.hpool(input_dim=input_dim)
        if self.hpool_arch == 'laplacian':
            input_dim = self.hpool.cluster_dim

        conv_after_dims = config['conv_after']
        for conf in conv_after_dims:
            self.conv_layers2.append(GraphLayer(input_dim, **conf))
            input_dim = conf['kernel_size']

        self.gpooler = get_gpool(input_dim=input_dim, **config["gpool"])
        self.linear_layers = nn.ModuleList()
        for conf in (config.get("fc_layers") or []):
            self.linear_layers.append(FCLayer(input_dim, **conf))
            input_dim = conf["out_size"]

        self.output_layer = nn.Linear(input_dim, self.out_dim)
Beispiel #8
0
 def __init__(self,
              input_dim,
              dropk=None,
              cluster_dim=None,
              strict_path=False,
              attn=1,
              hop=3,
              reg_mode=1,
              concat=False,
              strict_leader=True,
              GLayer=GraphLayer,
              lap_hop=1,
              sigma=0.5,
              **kwargs):
     net = GLayer(input_dim, input_dim, activation='relu')
     super(LaplacianPool, self).__init__(input_dim, -1, net)
     self.cur_S = None
     self.alpha = kwargs.pop("alpha", 1)
     self.leader_idx = []
     self.cluster_dim = cluster_dim or self.input_dim
     self.attn_softmax = Sparsegen(dim=-1, sigma=sigma)
     self.attn_mode = attn
     self.strict_path = strict_path
     if attn == 1:
         self.attn_net = cosine_attn  # using cosine attention
     else:
         self.attn_net = nn.Sequential(nn.Linear(
             input_dim * 2, 1), nn.LeakyReLU(
             ))  # computing base on weight prediction after concatenation
     self.lap_hop = lap_hop
     self.concat = concat
     self.feat_update = FCLayer(in_size=input_dim * int(1 + self.concat),
                                out_size=self.cluster_dim,
                                activation='relu',
                                **kwargs)
     self.hop = hop
     self.dropk = dropk
     self.reg_mode = reg_mode
     self.strict_leader = strict_leader
Beispiel #9
0
    def __init__(self,
                 z_dim,
                 node_feat_dim,
                 nedges=1,
                 max_vertex=50,
                 layers_dim=[256, 512],
                 nodes_dim=[],
                 graph_dim=[],
                 dropout=0.,
                 activation="relu",
                 other_feat_dim=0,
                 **kwargs):
        super(Decoder, self).__init__()
        self.z_dim = z_dim
        self.graph_size = max_vertex
        self.node_feat_dim = node_feat_dim
        self.feat_dim = other_feat_dim
        self.nedges = nedges

        layers = []
        for in_dim, out_dim in zip([z_dim] + layers_dim[:-1], layers_dim):
            layers.append(
                FCLayer(in_dim, out_dim, activation=activation, b_norm=True))
        self.layers = nn.Sequential(*layers)

        adj_layers = []
        in_dim = out_dim
        for adj_dim in graph_dim:
            adj_layers.append(
                FCLayer(in_dim,
                        adj_dim,
                        dropout=dropout,
                        activation=activation,
                        b_norm=True,
                        bias=False))
            in_dim = adj_dim

        nnodes = self.graph_size * (self.graph_size -
                                    1) // 2  # correspond to upper triangular
        adj_layers += [nn.Linear(in_dim, nnodes * self.nedges)]
        self.adj_layer = nn.Sequential(*adj_layers)

        node_layers = []
        in_dim = layers_dim[-1]
        for n_dim in nodes_dim:
            node_layers.append(
                FCLayer(in_dim,
                        n_dim,
                        dropout=dropout,
                        activation=activation,
                        b_norm=True,
                        bias=False))
            in_dim = n_dim

        node_layers += [
            nn.Linear(in_dim, self.graph_size * self.node_feat_dim)
        ]
        self.nodes_layer = nn.Sequential(*node_layers)

        if self.feat_dim:
            self.feat_layer = nn.Linear(layers_dim[-1],
                                        self.graph_size * self.feat_dim)
        else:
            self.feat_layer = None
        ind = np.ravel_multi_index(np.triu_indices(self.graph_size, 1),
                                   (self.graph_size, self.graph_size))
        self.upper_tree = torch.zeros(self.graph_size**2).index_fill(
            0, torch.from_numpy(ind),
            1).contiguous().unsqueeze(-1).expand(self.graph_size**2,
                                                 self.nedges).byte()
Beispiel #10
0
    def __init__(self,
                 input_dim,
                 out_dim,
                 feat_dim=0,
                 nedges=1,
                 gather_dim=72,
                 conv_dims=[64, 64],
                 conv_dims_after=[128, 128],
                 linear_dim=[32],
                 dropout=0.2,
                 gather="agg",
                 pool_arch={},
                 pool_loss=False,
                 **kwargs):
        super(Encoder, self).__init__()
        GraphLayer = kwargs.pop(
            "GraphConv",
            GLayer)  # using GIN convolution when none is specified
        activation = kwargs.pop("activation", nn.LeakyReLU())  # Leaky relu
        self.input_dim = input_dim
        self.out_dim = out_dim
        self.feat_dim = feat_dim  # when additional features are available for the node
        self.pool_loss = pool_loss
        self.nedges = nedges
        self.conv_layers1 = nn.ModuleList()
        self.conv_layers2 = nn.ModuleList()
        input_dim += feat_dim
        if nedges > 1:
            self.edge_layer = EdgeGraphLayer(input_dim,
                                             nedges,
                                             method='cat',
                                             b_norm=False,
                                             dropout=0.0,
                                             activation=activation)
            input_dim = self.edge_layer.output_dim + self.feat_dim
        for cdim in conv_dims:
            self.conv_layers1.append(
                GraphLayer(input_dim,
                           kernel_size=cdim,
                           b_norm=False,
                           dropout=dropout,
                           activation=activation))
            input_dim = cdim

        self.pool_layer = get_graph_coarsener(**pool_arch)
        self.pool_arch = pool_arch.get("arch", "")
        if self.pool_layer is not None:
            self.pool_layer = self.pool_layer(input_dim)
            if self.pool_arch == "laplacian":
                input_dim = self.pool_layer.cluster_dim

        for cdim in conv_dims_after:
            self.conv_layers2.append(
                GraphLayer(input_dim,
                           kernel_size=cdim,
                           b_norm=False,
                           dropout=dropout,
                           activation=activation))
            input_dim = cdim

        if gather == "agg":
            self.agg_layer = AggLayer(input_dim, gather_dim, dropout)
        elif gather in ["attn", "gated"]:
            self.agg_layer = get_pooling("attn",
                                         input_dim=input_dim,
                                         output_dim=gather_dim,
                                         dropout=dropout)
        else:
            gather_dim = input_dim
            self.agg_layer = get_pooling(gather)
        # multi dense layer
        input_dim = gather_dim

        self.linear_layers = nn.ModuleList()
        for ldim in linear_dim:
            self.linear_layers.append(
                FCLayer(input_dim, ldim, activation=activation, **kwargs))
            input_dim = ldim

        self.output_layer = nn.Linear(input_dim, out_dim)
        self.pool_inspect = None
        self.pooling_loss = []