def main(argv=None): print("Loading training data..") if FLAGS.label_flip > 0: train_data = load_data( FLAGS.train_prefix, feats_suf=FLAGS.feats_suffix, corrupt_label=lambda l, g: random_flip(l, g, FLAGS.label_flip)) elif FLAGS.high_freq: train_data = load_data_highfreq() else: train_data = load_data(FLAGS.train_prefix, feats_suf=FLAGS.feats_suffix) print("Done loading training data..") train(train_data)
def main(argv=None): print("Loading training data..") train_data = load_data(FLAGS.train_prefix, FLAGS.weighted_adjacency_matrix_file, load_walks=True) print("Done loading training data..") train(train_data)
def main(argv=None): print("Loading training data..") train_data = load_data( FLAGS.train_prefix, load_walks=True) # G, feats, id_map, walks, class_map print("Done loading training data..") train(train_data)
def main(self): print("Loading training data..") # 下载toy-ppi数据集,该数据集的json文件里有test,val等属性,相当于分好训练集、测试集、验证集了, # 然后通过load_data()处理,就可以使用了 train_data = load_data(FLAGS.train_prefix) print("Done loading training data.. ") train(train_data)
def main(argv=None): #print("Loading training data..") train_data = load_data(FLAGS.train_prefix, n_nodes=FLAGS.num_nodes, load_walks=True, graph_file=FLAGS.name) #print("Done loading training data..") train(train_data)
def main(argv=None): start_time = time.time() print("Loading training data..") train_data = load_data(FLAGS.train_prefix, load_walks=True) print("Done loading training data..") train(train_data) end_time = time.time() print('it take {} s'.format(end_time - start_time))
def main(argv=None): print("Loading training data..") train_data = load_data(FLAGS.train_prefix, load_walks=True, unsupervised=True) print("Training phase..") start_time = time.time() train(train_data) print("--- %s seconds ---" % (time.time() - start_time))
def main(argv=None): print("Loading training data..") train_data = load_data(FLAGS.train_prefix, load_walks=True) print("Done loading training data..") #print(learning_rate) train(train_data) #print(learning_rate) print("cazzo dopo")
def main(argv=None, action=[2, 'leaky_relu', 3, 'leaky_relu']): global train_data print("action:", action) # 共享训练数据 # if train_data == None: print("Loading training data..") train_data = load_data(FLAGS.train_prefix, load_walks=True) print("Done loading training data..") return train(train_data, action)
def main(argv=None): print("Loading training data..") train_data = load_data(FLAGS.train_prefix, FLAGS.walk_len, FLAGS.n_walks, load_walks=True) print("Time for loading data: ", time.time() - T0) # G, feats (None if no feature file), id_map, walks, class_map print("Done loading training data..") logging.info('start training') train(train_data)
def main(argv=None, action=[4, 'sigmoid', 1, 'tanh']): global train_data print(action) # 共享训练数据 if train_data == None: print("Loading training data..") train_data = load_data(FLAGS.train_prefix) print("Done loading training data..") for _ in range(1): train(train_data, action)
def main(argv=None, action=[4, 'relu', 4, 'linear']): global train_data # loadArgsForCora() loadArgsForCiteseer() print(action) # 共享训练数据 if train_data == None: print("Loading training data..") train_data = load_data(FLAGS.train_prefix) print("Done loading training data..") return train(train_data, action)
def main(argv=None): ## train graphsage model print("Loading training data..") train_data = load_data(FLAGS.train_prefix) print("Done loading training data..") print("Start 1st phase: training graphsage model w/ uniform sampling...") if FLAGS.allhop_rewards: dim_2_org = FLAGS.dim_2 dim_3_org = FLAGS.dim_3 samples_2_org = FLAGS.samples_2 samples_3_org = FLAGS.samples_3 dims = [FLAGS.dim_1, FLAGS.dim_2, FLAGS.dim_3] samples = [FLAGS.samples_1, FLAGS.samples_2, FLAGS.samples_3] numhop = np.count_nonzero(samples) for i in reversed(range(0, numhop)): FLAGS.dim_2 = dims[1] FLAGS.dim_3 = dims[2] FLAGS.samples_2 = samples[1] FLAGS.samples_3 = samples[2] print('Obtainining %d/%d hop reward' % (i + 1, numhop)) train(train_data, sampler_name='Uniform') dims[i] = 0 samples[i] = 0 FLAGS.dim_2 = dim_2_org FLAGS.dim_3 = dim_3_org FLAGS.samples_2 = samples_2_org FLAGS.samples_3 = samples_3_org else: train(train_data, sampler_name='Uniform') print("Done 1st phase: training graphsage model w/ uinform sampling..") ## train sampler print("training RL-based regressor...") train_sampler(train_data) print("Done training RL-based regressor...") ## train print( "Start 2nd phase: training graphsage model w/ data-deriven sampler...") if FLAGS.fast_ver: train(train_data, sampler_name='FastML') else: train(train_data, sampler_name='ML') print( "Done 2nd phase: training graphsage model w/ data-deriven sampler...")
def main(argv=None): print("Loading training data..") # [z]: train_data # type: tuple of size 2 # train_data[0]: networkx.Graph object # train_data[1]: array of R^{|V|xD} # train_data[2]: dict of size |V|, {id: id}. Vertex renaming, referred to as id2idx # train_data[3]: empty # train_data[4]: array of |V|xD', where D' is number of output classes # ppi graph: # |V|: 14755 # |E|: 228431 train_data = load_data(FLAGS.train_prefix) print("Done loading training data..") train(train_data)
def main(argv=None): # python -m graphsage.unsupervised_train --train_prefix dbis/dbis --model graphsage_mean --max_total_steps 5000 --epochs 100 --validate_iter 10 --neg_sample_size 5 --batch_size 128 --gpu 0 #FLAGS.train_prefix = '/Volumes/DATA/AUS/2018/code/git/GraphSAGE/dbis/dbis' #FLAGS.model = 'graphsage_mean' #FLAGS.max_total_steps = 500 #FLAGS.epochs = 10 #FLAGS.validate_iter = 10 #FLAGS.neg_sample_size = 5 #FLAGS.batch_size = 128 #FLAGS.identity_dim = 50 #FLAGS.gpu = 0 print("Loading training data..") train_data = load_data(FLAGS.train_prefix, load_walks=True) print("Done loading training data..") train(train_data)
def main(self): #print(device_lib.list_local_devices()) #gpu_device_name = tf.test.gpu_device_name() #print(gpu_device_name) #if tf.test.is_gpu_available(): #print("gpu on loading") #else: #print("no avilable gpu") print("Loading training data..") # 下载toy-ppi数据集,该数据集的json文件里有test,val等属性,相当于分好训练集、测试集、验证集了, # 然后通过load_data()处理,就可以使用了 train_data = load_data(FLAGS.train_prefix) print("Done loading training data.. ") train(train_data)
def main(argv=None): print("Loading training data..") train_data = load_data(FLAGS.train_prefix, load_walks=True) print("Done loading training data..") print("Start training uniform sampling + graphsage model..") train(train_data, sampler_name='Uniform') print("Done training uniform sampling + graphsage model..") print("Start training ML sampler..") train_sampler(train_data) print("Done training ML sampler..") print("Start training ML sampling + graphsage model..") train(train_data, sampler_name='FastML') print("Done training ML sampling + graphsage model..")
def main(pure_filename, out_file_name, aggregator_model, D1, num_epochs, theta_exist, flag_normalized, \ G_data=None, id_map=None, feats=None, Theta=None, fixed_neigh_weights=None, neg_sample_weights=1.0,\ brain_similarity_sizes=None, loss_function=None): #argv=None tf.reset_default_graph() FLAGS.model = aggregator_model FLAGS.dim_1 = D1 FLAGS.dim_2 = D1 FLAGS.epochs = num_epochs FLAGS.theta_exist = theta_exist FLAGS.flag_normalized = flag_normalized if (brain_similarity_sizes is not None): FLAGS.brain_similarity_sizes = brain_similarity_sizes if (loss_function is not None): FLAGS.loss_function = loss_function #print("Loading training data..") train_data = load_data( G_data=G_data, id_map=id_map, feats=feats, prefix=pure_filename ) #changed, FLAGS.train_prefix -> and load_walks=True -> False #print("Done loading training data..") if (fixed_neigh_weights is not None): # print('fixed_neigh_weights shape ', fixed_neigh_weights[0].shape) Theta = tf.constant(Theta, dtype=tf.float32) fixed_neigh_weights = [ tf.constant(a, dtype=tf.float32, name='neigh_weights') for a in fixed_neigh_weights ] final_adj_matrix, G, final_theta_1, Z, loss, U = train( train_data, log_dir, Theta=Theta, fixed_neigh_weights=fixed_neigh_weights, neg_sample_weights=neg_sample_weights ) #, learned_vars # solve(train_data, log_dir) #final_adj_matrix = np.abs(final_adj_matrix) #post_processing_clip(final_adj_matrix, G, out_file_name) return final_adj_matrix, final_theta_1, Z, loss, U #, learned_vars
def main(argv=None): print("Loading training data..") train_data = load_data(FLAGS.train_prefix, load_walks=True) print("Done loading training data..") models = [ "graphsage_mean", "gcn", "graphsage_seq", "graphsage_maxpool", "graphsage_meanpool" ] # models = ["graphsage_mean"] G = train_data[0] # features = train_data[1] id_map = train_data[1] ''' if not features is None: # pad with dummy zero vector features = np.vstack([features, np.zeros((features.shape[1],))]) ''' context_pairs = train_data[2] if FLAGS.random_context else None minibatch = EdgeMinibatchIterator(G, id_map, batch_size=FLAGS.batch_size, max_degree=FLAGS.max_degree, neg_sample_size=FLAGS.neg_sample_size, fea_dim=FLAGS.feats_dim, fea_filename=FLAGS.train_prefix + "-feats.npy", data_num=FLAGS.nodes_number, context_pairs=context_pairs) for model in models: # train(train_data, minibatch, model, True) # tf.reset_default_graph() # print("train {} profile done".format(model)) train(train_data, minibatch, model, False) tf.reset_default_graph() print("train {} no profile done".format(model))
import sys,os from os import path sys.path.insert(0, os.getcwd()) from graphsage.utils import load_data if __name__ == "__main__" : import sys PREFIX = sys.argv[1] print("PREFIX {}".format(PREFIX)) G, feats, id_map, walks, class_map = load_data(PREFIX) fp = open("edgelist",'w') for nodeid in G.nodes(): id_1 = id_map[nodeid] for neighbor in G.neighbors(nodeid): id_2 = id_map[neighbor] fp.write("{}\t{}\n".format(id_1,id_2)) fp.close()
def main(argv=None): print("Loading training data..") train_data = load_data(FLAGS.train_prefix) print("Done loading training data..") predict(train_data)
def main(self): print("Loading training data..") train_data = load_data(FLAGS.train_prefix) print("Done loading training data.. ") train(train_data)
def main(argv=None): print("Loading training data..") train_data = load_data(os.environ['TMPDIR'] + '/' + FLAGS.train_prefix, target_scaling=FLAGS.target_scaling) print("Done loading training data..") train(train_data)
def main(argv=None): print("Loading training data..") train_data = load_data(FLAGS.train_prefix, load_walks=True) print("Done loading training data..") train(train_data)
def main(argv=None): print("Loading training data..") train_data = load_data(os.environ['TMPDIR'] + '/' + FLAGS.train_prefix, load_walks=True) print("Done loading training data..") train(train_data)