gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=config.GPU_USAGE) with tf.Graph().as_default(): sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) with sess.as_default(): model = DetGP(data.num_vocab, data.num_nodes, data.text, data.train_edges) opt = tf.train.AdamOptimizer(config.lr) train_op = opt.minimize(model.total_loss) sess.run(tf.global_variables_initializer()) inducing_points = get_initial_inducing(sess, model, config.inducing_num) model.load_inducing_points(inducing_points) #training log.write('start training : {0}'.format(model_name)) for epoch in range(config.num_epoch): loss_epoch = 0 batches = data.generate_batches() num_batch = len(batches) for i in range(num_batch): batch = batches[i] node1, node2, node3 = zip(*batch) node1, node2, node3 = np.array(node1), np.array( node2), np.array(node3) #text1, text2,text3=data.text[node1],data.text[node2],data.text[node3] feed_dict = { # model.edges: data.edges, # model.text_all: data.text,
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=config.GPU_USAGE) with tf.Graph().as_default(): sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) with sess.as_default(): model = DetGP(data.num_vocab, data.num_nodes, data.text, data.train_edges) opt = tf.train.AdamOptimizer(config.lr) train_op = opt.minimize(model.total_loss) sess.run(tf.global_variables_initializer()) inducing_points = get_initial_inducing(sess, model, config.inducing_num) model.load_inducing_points(inducing_points) #training log.write('start training : {0}'.format(model_name)) auc_best = 0. for epoch in range(config.num_epoch): loss_epoch = 0 batches = data.generate_batches() num_batch = len(batches) for i in range(num_batch): batch = batches[i] node1, node2, node3 = zip(*batch) node1, node2, node3 = np.array(node1), np.array( node2), np.array(node3) feed_dict = { model.node_a_ids: node1, model.node_b_ids: node2, model.node_n_ids: node3