################################################################################ A, X, y, _, _, _ = citation.load_data('cora') A_norm = normalized_adjacency(A) X = X.todense() F = X.shape[-1] y = np.argmax(y, axis=-1) n_clusters = y.max() + 1 ################################################################################ # MODEL ################################################################################ X_in = Input(shape=(F, ), name='X_in') A_in = Input(shape=(None, ), name='A_in', sparse=True) X_1 = GraphConvSkip(16, activation='elu')([X_in, A_in]) X_1, A_1, S = MinCutPool(n_clusters, return_mask=True)([X_1, A_in]) model = Model([X_in, A_in], [X_1, S]) ################################################################################ # TRAINING ################################################################################ # Setup inputs = [X, sp_matrix_to_sp_tensor(A_norm)] opt = tf.keras.optimizers.Adam(learning_rate=lr) # Fit model loss_history = [] nmi_history = [] for _ in tqdm(range(epochs)): outs = train_step(inputs)
################################################################################ X_in = Input(tensor=tf.placeholder(tf.float32, shape=(None, F), name='X_in')) A_in = Input(tensor=tf.sparse_placeholder(tf.float32, shape=(None, None)), sparse=True) I_in = Input( tensor=tf.placeholder(tf.int32, shape=(None, ), name='segment_ids_in')) target = Input( tensor=tf.placeholder(tf.float32, shape=(None, n_out), name='target')) # Block 1 gc1 = GraphConvSkip(n_channels, activation=activ, kernel_regularizer=l2(GNN_l2))([X_in, A_in]) X_1, A_1, I_1, M_1 = MinCutPool( k=int(average_N // 2), h=mincut_H, activation=activ, kernel_regularizer=l2(pool_l2))([gc1, A_in, I_in]) # Block 2 gc2 = GraphConvSkip(n_channels, activation=activ, kernel_regularizer=l2(GNN_l2))([X_1, A_1]) X_2, A_2, I_2, M_2 = MinCutPool( k=int(average_N // 4), h=mincut_H, activation=activ, kernel_regularizer=l2(pool_l2))([gc2, A_1, I_1]) # Block 3 X_3 = GraphConvSkip(n_channels,
dataset = Cora() adj, x, y = dataset[0].a, dataset[0].x, dataset[0].y a_norm = normalized_adjacency(adj) a_norm = sp_matrix_to_sp_tensor(a_norm) F = dataset.n_node_features y = np.argmax(y, axis=-1) n_clusters = y.max() + 1 ################################################################################ # MODEL ################################################################################ x_in = Input(shape=(F, ), name="X_in") a_in = Input(shape=(None, ), name="A_in", sparse=True) x_1 = GCSConv(16, activation="elu")([x_in, a_in]) x_1, a_1, s_1 = MinCutPool(n_clusters, return_selection=True)([x_1, a_in]) model = Model([x_in, a_in], [x_1, s_1]) ################################################################################ # TRAINING ################################################################################ # Setup inputs = [x, a_norm] opt = tf.keras.optimizers.Adam(learning_rate=lr) # Fit model loss_history = [] nmi_history = [] for _ in tqdm(range(epochs)): outs = train_step(inputs)
dataset = Cora() adj, x, y = dataset[0].a, dataset[0].x, dataset[0].y a_norm = normalized_adjacency(adj) a_norm = sp_matrix_to_sp_tensor(a_norm) F = dataset.n_node_features y = np.argmax(y, axis=-1) n_clusters = y.max() + 1 ################################################################################ # MODEL ################################################################################ x_in = Input(shape=(F, ), name='X_in') a_in = Input(shape=(None, ), name='A_in', sparse=True) x_1 = GCSConv(16, activation='elu')([x_in, a_in]) x_1, a_1, s_1 = MinCutPool(n_clusters, return_mask=True)([x_1, a_in]) model = Model([x_in, a_in], [x_1, s_1]) ################################################################################ # TRAINING ################################################################################ # Setup inputs = [x, a_norm] opt = tf.keras.optimizers.Adam(learning_rate=lr) # Fit model loss_history = [] nmi_history = [] for _ in tqdm(range(epochs)): outs = train_step(inputs)
y = np.argmax(y, axis=-1) n_clust = y.max() + 1 ################################################################################ # MODEL ################################################################################ X_in = Input( tensor=tf.placeholder(tf.float32, shape=(None, n_feat), name='X_in')) A_in = Input(tensor=tf.sparse_placeholder(tf.float32, shape=(None, None)), name='A_in', sparse=True) X_1 = GraphConvSkip(gnn_channels, kernel_initializer='he_normal', activation=gnn_activ)([X_in, A_in]) pool1, adj1, S = MinCutPool(k=n_clust, h=mlp_channels, activation=mlp_activ)([X_1, A_in]) model = Model([X_in, A_in], [pool1, S]) model.compile('adam', None) ################################################################################ # TRAINING ################################################################################ # Setup sess = K.get_session() loss = model.total_loss # The full unsupervised loss of MinCutPool mincut_loss = model.losses[0] # The minCUT loss of MinCutPool ortho_loss = model.losses[1] # The orthogonality loss of MinCutPool opt = tf.train.AdamOptimizer(learning_rate=lr)
X = X.todense() n_feat = X.shape[-1] y = np.argmax(y, axis=-1) n_clust = y.max() + 1 ################################################################################ # MODEL ################################################################################ X_in = Input(shape=(n_feat, ), name='X_in') A_in = Input(shape=(None, ), name='A_in', sparse=True) X_1 = GraphConvSkip(gnn_channels, kernel_initializer='he_normal', activation=gnn_activ)([X_in, A_in]) pool1, adj1, S = MinCutPool(n_clust, return_mask=True, activation=mlp_activ)([X_1, A_in]) model = Model([X_in, A_in], [pool1, S]) model.compile('adam', None) ################################################################################ # TRAINING ################################################################################ # Setup loss = model.total_loss # The full unsupervised loss of MinCutPool mincut_loss = model.losses[0] # The minCUT loss of MinCutPool ortho_loss = model.losses[1] # The orthogonality loss of MinCutPool inputs = [X, sp_matrix_to_sp_tensor(A_norm)] opt = tf.keras.optimizers.Adam(learning_rate=lr)