Exemplo n.º 1
0
def get_optimizer(model_str, model, discriminator, placeholders, pos_weight,
                  norm, d_real, num_nodes):
    if model_str == 'arga_ae':
        d_fake = discriminator.construct(model.embeddings, reuse=True)
        opt = OptimizerAE(preds=model.reconstructions,
                          labels=tf.reshape(
                              tf.sparse_tensor_to_dense(
                                  placeholders['adj_orig'],
                                  validate_indices=False), [-1]),
                          pos_weight=pos_weight,
                          norm=norm,
                          d_real=d_real,
                          d_fake=d_fake)
    elif model_str == 'arga_vae':
        opt = OptimizerVAE(preds=model.reconstructions,
                           labels=tf.reshape(
                               tf.sparse_tensor_to_dense(
                                   placeholders['adj_orig'],
                                   validate_indices=False), [-1]),
                           model=model,
                           num_nodes=num_nodes,
                           pos_weight=pos_weight,
                           norm=norm,
                           d_real=d_real,
                           d_fake=discriminator.construct(model.embeddings,
                                                          reuse=True))
    return opt
Exemplo n.º 2
0
def get_optimizer(model_str, model, model_z2g, D_Graph, discriminator, placeholders, pos_weight, norm, d_real,num_nodes,GD_real):
    if model_str == 'arga_ae':
        output = model.construct()
        embeddings = output[0]
        reconstructions = output[1]
        d_fake = discriminator.construct(embeddings, reuse=True)
        opt = OptimizerAE(preds=reconstructions,
                          labels=tf.reshape(tf.sparse_tensor_to_dense(placeholders['adj_orig'],
                                                                      validate_indices=False), [-1]),
                          pos_weight=pos_weight,
                          norm=norm,
                          d_real=d_real,
                          d_fake=d_fake)
    elif model_str == 'DBGAN':
        z2g = model_z2g.construct()
        g2z = model.construct()
        embeddings = g2z[0]
        reconstructions = g2z[1]
        d_fake = discriminator.construct(embeddings, reuse=True)
        GD_fake = D_Graph.construct(z2g, reuse = True)
        print('----------------------------',tf.shape(placeholders['features']),'----------------------------')
        opt = OptimizerCycle(preds=reconstructions,
                          labels=tf.reshape(tf.sparse_tensor_to_dense(placeholders['adj_orig'],
                                                                      validate_indices=False), [-1]),
                          pos_weight=pos_weight,
                          norm=norm,
                          d_real=d_real,
                          d_fake=d_fake,
                            GD_real = GD_real,
                            GD_fake = GD_fake,
                            preds_z2g = placeholders['real_distribution'],
                            labels_z2g = placeholders['real_distribution'],
                            preds_cycle = model_z2g.construct(inputs = embeddings, hidden = None, reuse = True),
                            labels_cycle = placeholders['features_dense'])
    return opt
def get_optimizer(model_str, model, placeholders, num_nodes, alpha):
    opt = OptimizerAE(preds_attribute=model.attribute_reconstructions,
                          labels_attribute=tf.sparse_tensor_to_dense(placeholders['features']),
                          preds_structure=model.structure_reconstructions,
                          labels_structure=tf.sparse_tensor_to_dense(placeholders['adj_orig']), alpha=alpha)

    return opt
Exemplo n.º 4
0
def get_hyper_optimizer(model_str, model, discriminator, placeholders,
                        pos_weight, norm, H_orig, d_real):
    if model_str == 'hyper_arga_ae':
        d_fake = discriminator.construct(model.embeddings, reuse=True)
        opt = OptimizerAE(preds=model.reconstructions,
                          labels=tf.reshape(placeholders['H_orig'], [-1]),
                          pos_weight=pos_weight,
                          norm=norm,
                          d_real=d_real,
                          d_fake=d_fake)
    return opt
Exemplo n.º 5
0
def get_optimizer(model, placeholders, seq_len):
    opt = OptimizerAE(
        struct_preds=model.reconstructions,
        temporal_preds=model.reconstructions_tss,
        struct_labels=placeholders['struct_adj_origs'],
        temporal_labels=placeholders['temporal_adj_origs'],
        struct_pos_weights=placeholders['struct_pos_weights'],
        struct_norms=placeholders['struct_norms'],
        temporal_pos_weights=placeholders['temporal_pos_weights'],
        temporal_norms=placeholders['temporal_norms'],
        seq_len=seq_len)
    return opt
def get_optimizer(model_str, model, placeholders, num_nodes, alpha):
    if model_str == 'gcn_ae':
        opt = OptimizerAE(preds_attribute=model.attribute_reconstructions,
                          labels_attribute=tf.sparse_tensor_to_dense(placeholders['features']),
                          preds_structure=model.structure_reconstructions,
                          labels_structure=tf.sparse_tensor_to_dense(placeholders['adj_orig']), alpha=alpha)

    elif model_str == 'gcn_vae':
        opt = OptimizerVAE(preds=model.reconstructions,
                           labels=placeholders['features'],
                           model=model, num_nodes=num_nodes)
    return opt
def get_optimizer(model, discriminator, placeholders, pos_weight, norm, d_real,num_nodes,attr_labels_list):

    d_fake = discriminator.construct(model.embeddings, reuse=True)
#         pred_attrs=[model.attr0_logits,model.attr1_logits,model.attr2_logits,
#                     model.attr3_logits,model.attr4_logits]
    pred_attrs = [model.attr_logits,model.pri_logits]

    opt = OptimizerAE(preds=model.reconstructions,
                      labels=tf.reshape(tf.sparse_tensor_to_dense(placeholders['adj_orig'],validate_indices=False), [-1]),
                      pos_weight=pos_weight,
                      pred_attrs = pred_attrs,
                      norm=norm,
                      d_real=d_real,
                      d_fake=d_fake,
                      attr_labels_list=attr_labels_list,
                      sample_list=model.sample)
    return opt
Exemplo n.º 8
0
def train_gcn(features, adj_train, train_edges, train_edges_false, test_edges,
              test_edges_false):
    # Settings
    flags = tf.app.flags
    FLAGS = flags.FLAGS
    flags.DEFINE_float('learning_rate', 0.005, 'Initial learning rate.')
    flags.DEFINE_integer('epochs', 200, 'Number of epochs to train.')
    flags.DEFINE_integer('hidden1', 96, 'Number of units in hidden layer 1.')
    flags.DEFINE_integer('hidden2', 48, 'Number of units in hidden layer 2.')
    flags.DEFINE_float('weight_decay', 0.,
                       'Weight for L2 loss on embedding matrix.')
    flags.DEFINE_float('dropout', 0., 'Dropout rate (1 - keep probability).')
    flags.DEFINE_string('model', 'gcn_vae', 'Model string.')
    flags.DEFINE_integer('features', 1,
                         'Whether to use features (1) or not (0).')

    model_str = FLAGS.model

    #1-dim index array, used in cost function to only focus on those interactions with high confidence
    mask_index = construct_optimizer_list(features.shape[0], train_edges,
                                          train_edges_false)

    # Store original adjacency matrix (without diagonal entries) for later
    adj_orig = adj_train
    adj_orig = adj_orig - sp.dia_matrix(
        (adj_orig.diagonal()[np.newaxis, :], [0]), shape=adj_orig.shape)
    adj_orig.eliminate_zeros()

    adj = adj_train

    if FLAGS.features == 0:
        features = sp.identity(features.shape[0])  # featureless

    # Some preprocessing
    adj_norm = preprocess_graph(adj)

    # Define placeholders
    placeholders = {
        'features': tf.sparse_placeholder(tf.float64),
        'adj': tf.sparse_placeholder(tf.float64),
        'adj_orig': tf.sparse_placeholder(tf.float64),
        'dropout': tf.placeholder_with_default(0., shape=())
    }

    num_nodes = adj.shape[0]

    features = sparse_to_tuple(features.tocoo())
    num_features = features[2][1]
    features_nonzero = features[1].shape[0]

    # Create model
    model = None
    if model_str == 'gcn_ae':
        model = GCNModelAE(placeholders, num_features, features_nonzero)
    elif model_str == 'gcn_vae':
        model = GCNModelVAE(placeholders, num_features, num_nodes,
                            features_nonzero)

    pos_weight = 1
    norm = 1
    #pos_weight = train_edges_false.shape[0] / float(train_edges.shape[0])
    #norm = (train_edges.shape[0]+train_edges_false.shape[0]) / float(train_edges_false.shape[0]*train_edges_false.shape[0])

    # Optimizer
    with tf.name_scope('optimizer'):
        if model_str == 'gcn_ae':
            opt = OptimizerAE(preds=model.reconstructions,
                              labels=tf.reshape(
                                  tf.sparse_tensor_to_dense(
                                      placeholders['adj_orig'],
                                      validate_indices=False), [-1]),
                              pos_weight=pos_weight,
                              norm=norm,
                              mask=mask_index)
        elif model_str == 'gcn_vae':
            opt = OptimizerVAE(preds=model.reconstructions,
                               labels=tf.reshape(
                                   tf.sparse_tensor_to_dense(
                                       placeholders['adj_orig'],
                                       validate_indices=False), [-1]),
                               model=model,
                               num_nodes=num_nodes,
                               pos_weight=pos_weight,
                               norm=norm,
                               mask=mask_index)

    # Initialize session
    sess = tf.Session()
    sess.run(tf.global_variables_initializer())

    adj_label = adj_train + sp.eye(adj_train.shape[0])
    adj_label = sparse_to_tuple(adj_label)

    # Train model
    for epoch in range(FLAGS.epochs):

        t = time.time()
        # Construct feed dictionary
        feed_dict = construct_feed_dict(adj_norm, adj_label, features,
                                        placeholders)
        feed_dict.update({placeholders['dropout']: FLAGS.dropout})
        # Run single weight update
        outs = sess.run([opt.opt_op, opt.cost], feed_dict=feed_dict)

        print("Epoch:", '%04d' % (epoch + 1), "train_loss=",
              "{:.5f}".format(outs[1]))

    print("Optimization Finished!")

    #return embedding for each protein
    emb = sess.run(model.z_mean, feed_dict=feed_dict)
    return emb
Exemplo n.º 9
0
    def runner(self):
        model_str = FLAGS.model
        placeholders = [{
            'features':
            tf.sparse_placeholder(tf.float32),
            'adj':
            tf.sparse_placeholder(tf.float32),
            'adj_orig':
            tf.sparse_placeholder(tf.float32),
            'dropout':
            tf.placeholder_with_default(0., shape=()),
            'num_features':
            tf.placeholder(tf.float32),
            'features_nonzero':
            tf.placeholder(tf.float32),
            'pos_weight':
            tf.placeholder(tf.float32),
            'norm':
            tf.placeholder(tf.float32),
            'reward':
            tf.placeholder(tf.float32),
            'D_W1':
            tf.placeholder_with_default(
                tf.zeros([FLAGS.g_hidden2, FLAGS.d_hidden1]),
                shape=[FLAGS.g_hidden2, FLAGS.d_hidden1]),
            'D_W2':
            tf.placeholder_with_default(tf.zeros([FLAGS.d_hidden1, 1]),
                                        shape=[FLAGS.d_hidden1, 1]),
            'D_b1':
            tf.placeholder_with_default(tf.zeros([FLAGS.d_hidden1]),
                                        shape=[FLAGS.d_hidden1]),
            'D_b2':
            tf.placeholder_with_default(tf.zeros([1]), shape=[1]),
        }, {
            'features': tf.sparse_placeholder(tf.float32),
            'adj': tf.sparse_placeholder(tf.float32),
            'adj_orig': tf.sparse_placeholder(tf.float32),
            'dropout': tf.placeholder_with_default(0., shape=()),
            'num_features': tf.sparse_placeholder(tf.float32),
            'features_nonzero': tf.placeholder(tf.float32),
            'pos_weight': tf.placeholder(tf.float32),
            'norm': tf.placeholder(tf.float32),
            'reward': tf.placeholder(tf.float32)
        }]
        sess = tf.Session()

        real_X = tf.placeholder(tf.float32, shape=[None, FLAGS.g_hidden2])
        fake_X = tf.placeholder(tf.float32, shape=[None, FLAGS.g_hidden2])

        self.D_W1 = tf.Variable(xavier_init([FLAGS.g_hidden2,
                                             FLAGS.d_hidden1]))
        self.D_b1 = tf.Variable(xavier_init([FLAGS.d_hidden1]))
        self.D_W2 = tf.Variable(xavier_init([FLAGS.d_hidden1, 1]))
        self.D_b2 = tf.Variable(xavier_init([1]))
        d_vars = [self.D_W1, self.D_b1, self.D_W2, self.D_b2]

        print('train for the network embedding...')
        # Load data
        dataset_str1 = 'Douban_offline'  # 1118 nodes
        dataset_str2 = 'Douban_online'  # 3906 nodes
        adj1, features1, fea_num1 = load_data(dataset_str1)
        adj2, features2, fea_num2 = load_data(dataset_str2)
        num_features = [features1.shape[1], features2.shape[1]]

        model = None

        if model_str == 'gcn_ae':
            model = GCNModelAE(placeholders, num_features, sess)
        elif model_str == 'gcn_vae':
            model = GCNModelVAE(placeholders, num_features, num_nodes,
                                features_nonzero)

        # Optimizer

        with tf.name_scope('optimizer'):
            opt = OptimizerAE(
                preds=[model.reconstructions1, model.reconstructions2],
                labels=[
                    tf.reshape(
                        tf.sparse_tensor_to_dense(placeholders[0]['adj_orig'],
                                                  validate_indices=False),
                        [-1]),
                    tf.reshape(
                        tf.sparse_tensor_to_dense(placeholders[1]['adj_orig'],
                                                  validate_indices=False),
                        [-1])
                ],
                preds_attribute=[
                    model.attribute_reconstructions1,
                    model.attribute_reconstructions1
                ],
                labels_attribute=[
                    tf.sparse_tensor_to_dense(placeholders[0]['features']),
                    tf.sparse_tensor_to_dense(placeholders[1]['features'])
                ],
                pos_weight=[
                    placeholders[0]['pos_weight'],
                    placeholders[1]['pos_weight']
                ],
                norm=[placeholders[0]['norm'], placeholders[1]['norm']],
                fake_logits=model.fake_logits,
                alpha=FLAGS.AX_alpha)

        real_X = tf.placeholder(tf.float32, shape=[None, FLAGS.g_hidden2])
        fake_X = tf.placeholder(tf.float32, shape=[None, FLAGS.g_hidden2])

        real_logits, fake_logits = self.discriminator(real_X, fake_X)
        real_prob = tf.reduce_mean(real_logits)
        fake_prob = tf.reduce_mean(fake_logits)
        D_loss = -real_prob + fake_prob
        dis_optimizer = tf.train.AdamOptimizer(
            learning_rate=FLAGS.learning_rate_dis)  # Adam Optimizer
        opt_dis = dis_optimizer.minimize(D_loss, var_list=d_vars)

        sess.run(tf.global_variables_initializer())
        final_emb1 = []
        final_emb2 = []
        emb1_id = []
        emb2_id = []
        local_A_1 = adj1
        local_X_1 = features1
        local_A_2 = adj2
        local_X_2 = features2

        adj_norm_1 = preprocess_graph(local_A_1)
        local_X_1 = sparse_to_tuple(local_X_1.tocoo())
        pos_weight_1 = float(local_A_1.shape[0] * local_A_1.shape[0] -
                             local_A_1.sum()) / local_A_1.sum()
        adj_label_1 = local_A_1 + sp.eye(local_A_1.shape[0])
        adj_label_1 = sparse_to_tuple(adj_label_1)
        norm_1 = local_A_1.shape[0] * local_A_1.shape[0] / float(
            (local_A_1.shape[0] * local_A_1.shape[0] - local_A_1.sum()) * 2)

        adj_norm_2 = preprocess_graph(local_A_2)
        local_X_2 = sparse_to_tuple(local_X_2.tocoo())
        pos_weight_2 = float(local_A_2.shape[0] * local_A_2.shape[0] -
                             local_A_2.sum()) / local_A_2.sum()
        adj_label_2 = local_A_2 + sp.eye(local_A_2.shape[0])
        adj_label_2 = sparse_to_tuple(adj_label_2)
        norm_2 = local_A_2.shape[0] * local_A_2.shape[0] / float(
            (local_A_2.shape[0] * local_A_2.shape[0] - local_A_2.sum()) * 2)

        self.tmp_count = {}

        for epoch in range(FLAGS.epoch):
            for circle_epoch in range(FLAGS.circle_epoch):
                for G_epoch in range(FLAGS.g_epoch):
                    # ------------------------------------------------------------------------------------------
                    feed_dict = construct_feed_dict(
                        [adj_norm_2, adj_norm_1], [adj_label_2, adj_label_1],
                        [local_X_2, local_X_1], [pos_weight_2, pos_weight_1],
                        [norm_2, norm_1], placeholders)
                    feed_dict.update(
                        {placeholders[0]['D_W1']: sess.run(self.D_W1)})
                    feed_dict.update(
                        {placeholders[0]['D_W2']: sess.run(self.D_W2)})
                    feed_dict.update(
                        {placeholders[0]['D_b1']: sess.run(self.D_b1)})
                    feed_dict.update(
                        {placeholders[0]['D_b2']: sess.run(self.D_b2)})

                    _, embeddings1_, embeddings2_, gcn_cost, fake_prob_, attr_cost = sess.run(
                        [
                            opt.opt_op, model.embeddings1, model.embeddings2_,
                            opt.cost, model.fake_prob, opt.attribute_cost
                        ],
                        feed_dict=feed_dict)

                for D_epoch in range(FLAGS.d_epoch):
                    feed_dict.update(
                        {placeholders[0]['dropout']: FLAGS.dropout})
                    emb1, emb2 = sess.run(
                        [model.embeddings1, model.embeddings2_],
                        feed_dict=feed_dict)
                    _, real_prob_, fake_prob_ = sess.run(
                        [opt_dis, real_prob, fake_prob],
                        feed_dict={
                            real_X: emb1,
                            fake_X: emb2
                        })

            if epoch % 1 == 0:

                emb1, emb2 = sess.run([model.embeddings1, model.embeddings2_],
                                      feed_dict=feed_dict)
                final_emb1 = np.array(emb1)
                final_emb2 = np.array(emb2)

                similar_matrix = cosine_similarity(final_emb1, final_emb2)

                self.similar_matrix = similar_matrix

                pair = {}
                gnd = np.loadtxt("data/douban_truth.emb")
                count = {}
                topk = [1, 5, 10, 20, 30, 50]
                for i in range(len(topk)):
                    pair[topk[i]] = []
                    count[topk[i]] = 0
                    self.tmp_count[topk[i]] = 0
                for top in topk:
                    for index in range(similar_matrix.shape[0]):
                        top_index = heapq.nlargest(
                            int(top), range(len(similar_matrix[index])),
                            similar_matrix[index].take)
                        top_index = list(map(lambda x: x + 1, top_index))
                        pair[top].append([index + 1, top_index])
                    for ele_1 in gnd:
                        for ele_2 in pair[top]:
                            if ele_1[0] == ele_2[0]:
                                if ele_1[1] in ele_2[1]:
                                    count[top] += 1

                print(
                    f'-----------------------epoch {epoch}------------------------'
                )
                for top in topk:
                    print("top", '%02d' % (top), "count=", '%d' % (count[top]),
                          "precision=", "{:.5f}".format(count[top] / len(gnd)))
                print(
                    f'-----------------------epoch {epoch}------------------------'
                )
Exemplo n.º 10
0
def get_optimizer(model_str, model, model_z2g, D_Graph, discriminator,
                  placeholders, pos_weight, norm, d_real, num_nodes, GD_real):
    if model_str == 'arga_ae':
        output = model.construct()
        embeddings = output[0]
        reconstructions = output[1]
        d_fake = discriminator.construct(embeddings, reuse=True)
        opt = OptimizerAE(preds=reconstructions,
                          labels=tf.reshape(
                              tf.sparse_tensor_to_dense(
                                  placeholders['adj_orig'],
                                  validate_indices=False), [-1]),
                          pos_weight=pos_weight,
                          norm=norm,
                          d_real=d_real,
                          d_fake=d_fake)
    elif model_str == 'DBGAN':

        z2g = model_z2g.construct()
        hidden = z2g[1]
        z2g = z2g[0]
        preds_z2g = model.construct(hidden=hidden, reuse=True)[0]
        g2z = model.construct()

        embeddings = g2z[0]
        reconstructions = g2z[1]
        d_fake = discriminator.construct(embeddings, reuse=True)
        GD_fake = D_Graph.construct(z2g, reuse=True)

        epsilon = tf.random_uniform(shape=[1], minval=0.0, maxval=1.0)
        interpolated_input = epsilon * placeholders['real_distribution'] + (
            1 - epsilon) * embeddings
        gradient = tf.gradients(
            discriminator.construct(interpolated_input, reuse=True),
            [interpolated_input])[0]

        epsilon = tf.random_uniform(shape=[1], minval=0.0, maxval=1.0)
        interpolated_input = epsilon * placeholders['features_dense'] + (
            1 - epsilon) * z2g
        gradient_z = tf.gradients(
            D_Graph.construct(interpolated_input, reuse=True),
            [interpolated_input])[0]

        opt = OptimizerCycle(preds=reconstructions,
                             labels=tf.reshape(
                                 tf.sparse_tensor_to_dense(
                                     placeholders['adj_orig'],
                                     validate_indices=False), [-1]),
                             pos_weight=pos_weight,
                             norm=norm,
                             d_real=d_real,
                             d_fake=d_fake,
                             GD_real=GD_real,
                             GD_fake=GD_fake,
                             preds_z2g=preds_z2g,
                             labels_z2g=placeholders['real_distribution'],
                             preds_cycle=model_z2g.construct(embeddings,
                                                             reuse=True)[0],
                             labels_cycle=placeholders['features_dense'],
                             gradient=gradient,
                             gradient_z=gradient_z)
    return opt
Exemplo n.º 11
0
pos_weight2 = float(S2.shape[0] * S2.shape[0] - S2.sum()) / S2.sum()
norm2 = S2.shape[0] * S2.shape[0] / float((S2.shape[0] * S2.shape[0] - S2.sum()) * 2)

pos_weight = [pos_weight1,pos_weight2]
norm = [norm1,norm2]

# Optimizer
with tf.name_scope('optimizer'):
    if model_str == 'gcn_ae':
        opt = OptimizerAE(preds=[model.reconstructions1,model.reconstructions2],
                          # labels=tf.reshape(tf.sparse_tensor_to_dense(placeholders['adj_orig'],
                        labels=[tf.reshape(tf.sparse_tensor_to_dense(placeholders[0]['adj_orig'],
                                                                      validate_indices=False),[-1]),\
                                tf.reshape(tf.sparse_tensor_to_dense(placeholders[1]['adj_orig'],
                                                                      validate_indices=False),[-1])],
                        pos_weight=pos_weight,
                        norm=norm,
                        fake_emd=model.output,
                        model=model,
                        real_emd=model.z_mean2,
                        y1=y_train1,y2=y_train2,
                        size1=S1.shape[0],size2=S2.shape[0])
    # elif model_str == 'gcn_vae':
    #     opt = OptimizerVAE(preds=[model.reconstructions1, model.reconstructions2],
    #                        # labels=tf.reshape(tf.sparse_tensor_to_dense(placeholders['adj_orig'],
    #                        labels=[tf.reshape(tf.sparse_tensor_to_dense(placeholders[0]['adj_orig'],
    #                                                                   validate_indices=False),[-1]),\
    #                             tf.reshape(tf.sparse_tensor_to_dense(placeholders[1]['adj_orig'],
    #                                                                   validate_indices=False),[-1])],
    #                        pos_weight=pos_weight,
    #                        norm=norm,
Exemplo n.º 12
0
'lambd': tf.placeholder(tf.float32, [])
}

# Create model
if args.autoencoder:
    print('Training an Autoencoder')
    model = GCNModelAE(placeholders, num_features, num_nodes, args)
else:
    print('Training a Variational Autoencoder')
    model = GCNModelVAE(placeholders, num_features, num_nodes, args)

# Optimizer
with tf.name_scope('optimizer'):
    if args.autoencoder:
        opt = OptimizerAE(preds=model.reconstructions,
                           labels=tf.reshape(placeholders['adj_orig'], [-1]),
                           model=model, num_nodes=num_nodes,
                           learning_rate=args.learning_rate)
    else:
        opt = OptimizerVAE(preds=model.reconstructions,
                           labels=tf.reshape(placeholders['adj_orig'], [-1]),
                           model=model, num_nodes=num_nodes,
                           learning_rate=args.learning_rate,
                           lambd=placeholders['lambd'], tolerance=0.1)

def get_next_batch(batch_size, adj, adj_norm):
    adj_idx = np.random.randint(adj.shape[0], size=batch_size)
    adj_norm_batch = adj_norm[adj_idx, :, :]
    adj_norm_batch = np.reshape(adj_norm_batch, [batch_size, num_nodes, num_nodes])
    adj_orig_batch = adj[adj_idx, :, :]
    adj_orig_batch = np.reshape(adj_orig_batch, [batch_size, num_nodes, num_nodes])
    return adj_norm_batch, adj_orig_batch, adj_idx
Exemplo n.º 13
0
def train_gcn(features, adj_train, args, graph_type):
    model_str = args.model

    # Store original adjacency matrix (without diagonal entries) for later
    adj_orig = adj_train
    adj_orig = adj_orig - sp.dia_matrix((adj_orig.diagonal()[np.newaxis, :], [0]), shape=adj_orig.shape)
    adj_orig.eliminate_zeros()

    adj = adj_train

    # Some preprocessing
    adj_norm = preprocess_graph(adj)

    # Define placeholders
    placeholders = {
        'features': tf.sparse_placeholder(tf.float64),
        'adj': tf.sparse_placeholder(tf.float64),
        'adj_orig': tf.sparse_placeholder(tf.float64),
        'dropout': tf.placeholder_with_default(0., shape=())
    }

    num_nodes = adj.shape[0]
    features = sparse_to_tuple(features.tocoo())
    num_features = features[2][1]
    features_nonzero = features[1].shape[0]

    # Create model
    model = None
    if model_str == 'gcn_ae':
        model = GCNModelAE(placeholders, num_features, features_nonzero, args.hidden1, args.hidden2)
    elif model_str == 'gcn_vae':
        model = GCNModelVAE(placeholders, num_features, num_nodes, features_nonzero, args.hidden1, args.hidden2)

    # Optimizer
    with tf.name_scope('optimizer'):
        if model_str == 'gcn_ae':
            opt = OptimizerAE(preds=model.reconstructions,
                          labels=tf.reshape(tf.sparse_tensor_to_dense(placeholders['adj_orig'],
                          validate_indices=False), [-1]),
                          pos_weight=1,
                          norm=1,
                          lr=args.lr)
        elif model_str == 'gcn_vae':
            opt = OptimizerVAE(preds=model.reconstructions,
                           labels=tf.reshape(tf.sparse_tensor_to_dense(placeholders['adj_orig'],
                           validate_indices=False), [-1]),
                           model=model, num_nodes=num_nodes,
                           pos_weight=1,
                           norm=1,
                           lr=args.lr)

    # Initialize session
    sess = tf.Session()
    sess.run(tf.global_variables_initializer())


    adj_label = adj_train + sp.eye(adj_train.shape[0])
    adj_label = sparse_to_tuple(adj_label)


    # Train model
    # use different epochs for ppi and similarity network
    if graph_type == "sequence_similarity":
        epochs = args.epochs_simi
    else:
        epochs = args.epochs_ppi

    for epoch in range(epochs):

        t = time.time()
        # Construct feed dictionary
        feed_dict = construct_feed_dict(adj_norm, adj_label, features, placeholders)
        feed_dict.update({placeholders['dropout']: args.dropout})
        # Run single weight update
        outs = sess.run([opt.opt_op, opt.cost], feed_dict=feed_dict)
        
        if epoch % 10 == 0:
            print("Epoch:", '%04d' % (epoch+1), "train_loss=", "{:.5f}".format(outs[1]))


    print("Optimization Finished!")
    
    
    #return embedding for each protein
    emb = sess.run(model.z_mean,feed_dict=feed_dict)
    
    return emb
Exemplo n.º 14
0
if model_str == 'gcn_ae':
    model = GCNModelAE(placeholders, num_features, features_nonzero)
elif model_str == 'gcn_vae':
    model = GCNModelVAE(placeholders, num_features, num_nodes,
                        features_nonzero)

pos_weight = float(adj.shape[0] * adj.shape[0] - adj.sum()) / adj.sum()
norm = adj.shape[0] * adj.shape[0] / float(
    (adj.shape[0] * adj.shape[0] - adj.sum()) * 2)

# Optimizer
with tf.name_scope('optimizer'):
    if model_str == 'gcn_ae':
        opt = OptimizerAE(preds=model.reconstructions,
                          labels=tf.sparse_tensor_to_dense(
                              placeholders['adj_orig'],
                              pos_weight=pos_weight,
                              norm=norm))
    elif model_str == 'gcn_vae':
        opt = OptimizerVAE(
            preds=model.reconstructions,
            labels=tf.sparse_tensor_to_dense(placeholders['features']),
            model=model,
            num_nodes=num_nodes,
            pos_weight=pos_weight,
            norm=norm,
            index_non=tf.constant(features_training_non_zero_index,
                                  dtype='float32'),
            count_non=count_training,
            index_zero=tf.constant(features_training_zero_index,
                                   dtype='float32'),
Exemplo n.º 15
0
    model = GCNModelAE(placeholders, num_features, features_nonzero)
elif model_str == 'gcn_vae':
    model = GCNModelVAE(placeholders, num_features, num_nodes,
                        features_nonzero)

pos_weight = float(adj.shape[0] * adj.shape[0] - adj.sum()) / adj.sum()
norm = adj.shape[0] * adj.shape[0] / float(
    (adj.shape[0] * adj.shape[0] - adj.sum()) * 2)
logging.info('optimizer')
# Optimizer
with tf.name_scope('optimizer'):
    if model_str == 'gcn_ae':
        opt = OptimizerAE(preds=model.reconstructions,
                          labels=tf.reshape(
                              tf.sparse_tensor_to_dense(
                                  placeholders['adj_orig'],
                                  validate_indices=False), [-1]),
                          pos_weight=pos_weight,
                          norm=norm)
    elif model_str == 'gcn_vae':
        opt = OptimizerVAE(preds=model.reconstructions,
                           labels=tf.reshape(
                               tf.sparse_tensor_to_dense(
                                   placeholders['adj_orig'],
                                   validate_indices=False), [-1]),
                           model=model,
                           num_nodes=num_nodes,
                           pos_weight=pos_weight,
                           norm=norm)
logging.info('initialize session')
# Initialize session
Exemplo n.º 16
0
    # Create model

    pos_weight = float(adj.shape[0] * adj.shape[0] - adj.sum()) / adj.sum()
    norm = adj.shape[0] * adj.shape[0] / float(
        (adj.shape[0] * adj.shape[0] - adj.sum()) * 2)

    gae_model = GCNModelAE(placeholders, num_features, features_nonzero, False,
                           FLAGS.bilinear)
    # Optimizer
    with tf.name_scope('optimizer'):
        opt = OptimizerAE(
            preds=gae_model.reconstructions,
            labels=tf.reshape(
                tf.sparse_tensor_to_dense(
                    placeholders[
                        'adj'],  # adj_orig in the original implementation
                    validate_indices=False),
                [-1]),
            pos_weight=pos_weight,
            norm=norm)

    sess = tf.Session()
    # Initialize session
    with sess.as_default():
        sess.run(tf.global_variables_initializer())

        if FLAGS.write_summary:
            summary_writer = tf.summary.FileWriter(
                './summary/train/' +
                str(datetime.datetime.now()).replace(' ', '_'), sess.graph)
Exemplo n.º 17
0
# 优化器,作为baseline不用改动了
with tf.name_scope('optimizer'):
    if model_str == 'gcn_ae':

        n, _, _ = model.logits_output.get_shape().as_list()
        tensor_list = []
        for adj_channel in placeholders['adj']:
            channel_tensor = tf.reshape(
                tf.sparse_tensor_to_dense(adj_channel, validate_indices=False),
                [n, n])
            tensor_list.append(channel_tensor)

        adj_tensor = tf.stack(tensor_list, axis=2)

        opt = OptimizerAE(preds=model.logits_output,
                          labels=adj_tensor,
                          pos_weight=pos_weight,
                          norm=norm)

    elif model_str == 'gcn_vae':
        n, _, _ = model.logits_output.get_shape().as_list()
        tensor_list = []
        for adj_channel in placeholders['adj']:
            channel_tensor = tf.reshape(
                tf.sparse_tensor_to_dense(adj_channel, validate_indices=False),
                [n, n])
            tensor_list.append(channel_tensor)

        adj_tensor = tf.stack(tensor_list, axis=2)

        opt = OptimizerVAE(preds=model.logits_output,
                           labels=adj_tensor,