コード例 #1
0
                             ]))  # Average number of nodes in dataset

################################################################################
# BUILD MODEL
################################################################################
X_in = Input(tensor=tf.placeholder(tf.float32, shape=(None, F), name='X_in'))
A_in = Input(tensor=tf.sparse_placeholder(tf.float32, shape=(None, None)),
             sparse=True)
I_in = Input(
    tensor=tf.placeholder(tf.int32, shape=(None, ), name='segment_ids_in'))
target = Input(
    tensor=tf.placeholder(tf.float32, shape=(None, n_out), name='target'))

# Block 1
gc1 = GraphConvSkip(n_channels,
                    activation=activ,
                    kernel_regularizer=l2(GNN_l2))([X_in, A_in])
X_1, A_1, I_1, M_1 = MinCutPool(
    k=int(average_N // 2),
    h=mincut_H,
    activation=activ,
    kernel_regularizer=l2(pool_l2))([gc1, A_in, I_in])

# Block 2
gc2 = GraphConvSkip(n_channels,
                    activation=activ,
                    kernel_regularizer=l2(GNN_l2))([X_1, A_1])
X_2, A_2, I_2, M_2 = MinCutPool(
    k=int(average_N // 4),
    h=mincut_H,
    activation=activ,
コード例 #2
0
ファイル: BDGC_disjoint.py プロジェクト: yaniv256/spektral
A_train = [normalized_adjacency(a) for a in A_train]
A_val = [normalized_adjacency(a) for a in A_val]
A_test = [normalized_adjacency(a) for a in A_test]

# Parameters
F = X_train[0].shape[-1]  # Dimension of node features
n_out = y_train[0].shape[-1]  # Dimension of the target

################################################################################
# BUILD MODEL
################################################################################
X_in = Input(shape=(F, ), name='X_in')
A_in = Input(shape=(None,), sparse=True)
I_in = Input(shape=(), name='segment_ids_in', dtype=tf.int32)

X_1 = GraphConvSkip(32, activation='relu')([X_in, A_in])
X_1, A_1, I_1 = TopKPool(ratio=0.5)([X_1, A_in, I_in])
X_2 = GraphConvSkip(32, activation='relu')([X_1, A_1])
X_2, A_2, I_2 = TopKPool(ratio=0.5)([X_2, A_1, I_1])
X_3 = GraphConvSkip(32, activation='relu')([X_2, A_2])
X_3 = GlobalAvgPool()([X_3, I_2])
output = Dense(n_out, activation='softmax')(X_3)

# Build model
model = Model(inputs=[X_in, A_in, I_in], outputs=output)
opt = Adam(lr=learning_rate)
loss_fn = CategoricalCrossentropy()
acc_fn = CategoricalAccuracy()


@tf.function(
コード例 #3
0
    def __init__(self,
                 X,
                 adj,
                 adj_n,
                 hidden_dim=128,
                 latent_dim=10,
                 dec_dim=None,
                 adj_dim=32,
                 decA="DBL",
                 layer_enc="GAT"):
        super(SCGAE, self).__init__()
        if dec_dim is None:
            dec_dim = [64, 256, 512]
        self.latent_dim = latent_dim
        self.X = X
        self.adj = np.float32(adj)
        self.adj_n = np.float32(adj_n)
        self.n_sample = X.shape[0]
        self.in_dim = X.shape[1]
        self.sparse = False

        initializer = GlorotUniform(seed=7)

        # Encoder
        X_input = Input(shape=self.in_dim)
        h = Dropout(0.2)(X_input)
        if layer_enc == "GAT":
            A_in = Input(shape=self.n_sample)
            h = GraphAttention(channels=hidden_dim,
                               attn_heads=1,
                               kernel_initializer=initializer,
                               activation="relu")([h, A_in])
            z_mean = GraphAttention(channels=latent_dim,
                                    kernel_initializer=initializer,
                                    attn_heads=1)([h, A_in])
        elif layer_enc == "GCN":
            A_in = Input(shape=self.n_sample)
            h = GraphConvSkip(channels=hidden_dim,
                              kernel_initializer=initializer,
                              activation="relu")([h, A_in])
            z_mean = GraphConvSkip(channels=latent_dim,
                                   kernel_initializer=initializer)([h, A_in])
        elif layer_enc == "TAG":
            self.sparse = True
            A_in = Input(shape=self.n_sample, sparse=True)
            h = TAGConv(channels=hidden_dim,
                        kernel_initializer=initializer,
                        activation="relu")([h, A_in])
            z_mean = TAGConv(channels=latent_dim,
                             kernel_initializer=initializer)([h, A_in])

        self.encoder = Model(inputs=[X_input, A_in],
                             outputs=z_mean,
                             name="encoder")
        clustering_layer = ClusteringLayer(name='clustering')(z_mean)
        self.cluster_model = Model(inputs=[X_input, A_in],
                                   outputs=clustering_layer,
                                   name="cluster_encoder")

        # Adjacency matrix decoder
        if decA == "DBL":
            dec_in = Input(shape=latent_dim)
            h = Dense(units=adj_dim, activation=None)(dec_in)
            h = Bilinear()(h)
            dec_out = Lambda(lambda z: tf.nn.sigmoid(z))(h)
            self.decoderA = Model(inputs=dec_in,
                                  outputs=dec_out,
                                  name="decoder1")
        elif decA == "BL":
            dec_in = Input(shape=latent_dim)
            h = Bilinear()(dec_in)
            dec_out = Lambda(lambda z: tf.nn.sigmoid(z))(h)
            self.decoderA = Model(inputs=dec_in,
                                  outputs=dec_out,
                                  name="decoder1")
        elif decA == "IP":
            dec_in = Input(shape=latent_dim)
            dec_out = Lambda(
                lambda z: tf.nn.sigmoid(tf.matmul(z, tf.transpose(z))))(dec_in)
            self.decoderA = Model(inputs=dec_in,
                                  outputs=dec_out,
                                  name="decoder1")
        else:
            self.decoderA = None

        # Expression matrix decoder

        decx_in = Input(shape=latent_dim)
        h = Dense(units=dec_dim[0], activation="relu")(decx_in)
        h = Dense(units=dec_dim[1], activation="relu")(h)
        h = Dense(units=dec_dim[2], activation="relu")(h)
        decx_out = Dense(units=self.in_dim)(h)
        self.decoderX = Model(inputs=decx_in,
                              outputs=decx_out,
                              name="decoderX")
コード例 #4
0
ファイル: GC_main.py プロジェクト: chqlee/decimation-pooling

def decimation_pooling_fn(x_):
    X_, D_, I_ = x_
    X_pooled = K.dot(D_, X_)
    I_pooled = K.cast(
        K.dot(D_,
              K.cast(I_, tf.float32)[..., None])[..., 0], tf.int32)
    return [X_pooled, I_pooled]


decimation_pooling_op = Lambda(decimation_pooling_fn)

# Block 1
X_1 = GraphConvSkip(P['n_channels'],
                    activation=P['activ'],
                    kernel_regularizer=l2(P['GNN_l2']))([X_in, A_in[0]])
X_1, I_1 = decimation_pooling_op([X_1, D_in[0], I_in])

# Block 2
X_2 = GraphConvSkip(P['n_channels'],
                    activation=P['activ'],
                    kernel_regularizer=l2(P['GNN_l2']))([X_1, A_in[1]])
X_2, I_2 = decimation_pooling_op([X_2, D_in[1], I_1])

# Block 3
X_3 = GraphConvSkip(P['n_channels'],
                    activation=P['activ'],
                    kernel_regularizer=l2(P['GNN_l2']))([X_2, A_in[2]])

# Output block
コード例 #5
0
# MODEL DEFINITION
X_in = Input(
    tensor=tf.placeholder(tf.float32, shape=(None, n_feat), name='X_in'))
A_in = Input(tensor=tf.sparse_placeholder(tf.float32, shape=(None, None)),
             name='A_in')
I_in = Input(
    tensor=tf.placeholder(tf.int32, shape=(None, ), name='segment_ids_in'))
X_target = Input(
    tensor=tf.placeholder(tf.float32, shape=(None, n_feat), name='X_target'))
A_target = Input(tensor=tf.sparse_placeholder(tf.float32, shape=(None, None)),
                 name='A_target')
A = normalized_adjacency(A)
n_out = X.shape[-1]

# encoder
X1 = GraphConvSkip(gnn_channels, activation=ACTIV)([X_in, A_in])
X1 = GraphConvSkip(gnn_channels, activation=ACTIV)([X1, A_in])
# pooling
X2, A2, I2, M2 = MinCutPool(k=n_nodes // 4, h=gnn_channels)([X1, A_in, I_in])
# unpooling
X3, A3, I3 = upsampling_from_matrix_op([X2, A2, I2, M2])
# decoder
X3 = GraphConvSkip(gnn_channels, activation=ACTIV)([X3, A_in])
X3 = GraphConvSkip(gnn_channels, activation=ACTIV)([X3, A_in])
X3 = GraphConvSkip(n_out)([X3, A_in])

model = Model([X_in, A_in, I_in], [X3])
model.compile('adam', 'mse', target_tensors=[X_target])

# TRAINING
sess = K.get_session()