def degree_distribution(trans_red = 'False', N=100, m=10, number_of_trials = 1, filename = 'degree_histogram'): ''' Outputs node label (=integer time value) and in-degree before and after Transitive reduction. n, int = final number of nodes, as usual for BA model. m, int = number of edges to add with addition of each new node. trials, int = number of realisations of model to be averaged over. filename, string = txt file name for output ''' f = open(filename, 'w') f.write("<k_in>" + '\t' + "rank <k_in>"+ '\t' + "<k_out>"+ '\t' + "rank <k_out>"+ '\t' + "prob_of_in_degree"+ '\t' + "prob_of_out_degree"+ '\n') in_freq=[] out_freq = [] for i in range(N): in_freq.append(0) out_freq.append(0) for trial in range(number_of_trials): model = models.barabasi_albert_graph(N, m, None) G = model[0] if trans_red: G = tr.trans_red(G) in_list = degree_histogram(G, 'in') out_list = degree_histogram(G, 'out') for i in range(len(in_list)): in_freq[i] += in_list[i] for i in range(len(out_list)): out_freq[i] += out_list[i] print "done trial " , trial prob_of_in_degree = [] prob_of_out_degree = [] for i in range(len(in_freq)): in_freq[i] = float(in_freq[i])/float(number_of_trials) out_freq[i] = float(out_freq[i])/float(number_of_trials) prob_of_in_degree.append(float(in_freq[i])/float(N)) prob_of_out_degree.append(float(out_freq[i])/float(N)) f.write(str(i) + '\t' + str(in_freq[i])+ '\t' + str(i)+ '\t' + str(in_freq[i])+ '\t' + str(prob_of_in_degree[i])+ '\t' + str(prob_of_out_degree[i])+ '\n') f.close() return G
def number_of_lp_BA(m_range=[1,5,10,50], n_max=50, n_step=5): for m in m_range: n_range=range(m+1,n_max,n_step) # n_range=range(101,201,10) myfile = open(str(m)+'C_for_BA', 'w') myfile.write('n' + '\t' + 'lp_av'+'\t' + 'lp_err'+ '\t' + 'no._av' +'\t' + 'no._err'+ '\n') start_time = time.time() trials = 1000 for n in n_range: lp_length_list = [] j_list = [] for _ in range(trials): # model = models.COd(2, N) model = models.barabasi_albert_graph(n, m) DAG = model[0] extremes = model[1] tr_DAG = tr.trans_red(DAG) lp = dl.lpd(tr_DAG, extremes[1], extremes[0]) length_lp = lp[2] lp_length_list.append(length_lp) j=0 paths_list = list(nx.all_simple_paths(tr_DAG, extremes[1], extremes[0], cutoff=length_lp+1)) for i in range(len(paths_list)): if len(paths_list[i])==length_lp+1: j+=1 j_list.append(j) print j_list lp_stats = stats.list_stats(lp_length_list) lp_av = lp_stats[3] lp_err = lp_stats[1] j_stats = stats.list_stats(j_list) j_av = j_stats[3] j_err = j_stats[1] print j_av print j_err myfile.write(str(n) + '\t' + str(lp_av)+'\t' + str(lp_err)+ '\t' + str(j_av) +'\t' + str(j_err)+ '\n') print 'done', n elapsed = time.time() - start_time print 'finished.',m,'m. Time elapsed = ',elapsed return
def BA_lp(m=50, N_range = range(51,555,50)): number_of_trials = 100 myfile = open('BA_lp', 'w') myfile.write('N' + '\t' + 'average lp length'+ '\t'+'std err'+'\n') for N in N_range: lp_list = [] comparison_lp_list = [] for _ in range(number_of_trials): model = models.barabasi_albert_graph(N,m) # model = models.COd(D,N) G = model[0] extremes = model[1] # average_in_degree = float(m)*float((N-1))/(float(N)) # print average_in_degree average_in_degree = float(nx.number_of_edges(G)) /float(nx.number_of_nodes(G)) comparison_model = oneD_random_comparison(N,average_in_degree) comparison_G = comparison_model[0] comparison_extremes = comparison_model[1] if nx.has_path(comparison_G, comparison_extremes[1], comparison_extremes[0]): comparison_lp = dl.lpd(comparison_G, comparison_extremes[1], comparison_extremes[0]) comparison_lp_length = comparison_lp[2] else: comparison_lp_length=None #tr_DAG = tr.trans_red(G) lp = dl.lpd(G,extremes[1],extremes[0]) lp_length = lp[2] lp_list.append(lp_length) comparison_lp_list.append(comparison_lp_length) statistics = stats.list_stats(lp_list) lp_av = statistics[3] lp_std_err = statistics[0] comparison_stats = stats.list_stats(comparison_lp_list) comparison_lp_av = comparison_stats[3] comparison_lp_std_err = comparison_stats[0] print "done ", N myfile.write(str(N) + '\t' + str(lp_av) + '\t' + str(lp_std_err) +'\t'+ str(comparison_lp_av) + '\t' + str(comparison_lp_std_err) + '\n') #nx.draw_random(G, with_labels=False, node_colour ='k', node_size=50, node_color = 'k', width=0.5) #nx.draw_networkx(G, pos=random, with_labels=False, node_colour ='k', node_size=50, node_color = 'k', width=0.5) #plt.show() # display figure return
def BA_node_degree_pre_post_TR(n=100, m=10, trials = 1000, filename = 'degree_before_after'): ''' Outputs node label (=integer time value) and in-degree before and after Transitive reduction. n, int = final number of nodes, as usual for BA model. m, int = number of edges to add with addition of each new node. trials, int = number of realisations of model to be averaged over. filename, string = txt file name for output ''' in_list = [] in_tr_list = [] node_list = [] for i in range(n): in_list.append(0) in_tr_list.append(0) for trial in range(trials): model = models.barabasi_albert_graph(n, m=10) G = model[0] H= copy.deepcopy(G) G_tr = tr.trans_red(H) f = open(filename, 'w') f.write('node' + '\t' + 'degree'+ '\t'+'node_tr'+ '\t' + 'degree_tr'+'\n') j=0 for node, node_tr in zip(G.nodes(), G_tr.nodes()): j+=1 node_list.append(node) in_degree = G.in_degree(node) in_list[node] += in_degree in_degree_tr = G_tr.in_degree(node_tr) in_tr_list[node] += in_degree_tr print "done trial no. ", trial for i in range(i): in_list[i] = in_list[i] / float(trials) in_tr_list[i] = in_tr_list[i] / float(trials) f.write(str(node_list[i]) + '\t' + str(in_list[i])+ '\t'+str(node_list[i])+ '\t' + str(in_tr_list[i])+'\n') return
def BA_sp(m, N_range): number_of_trials = 100 #D=1 myfile = open('BA_shortest_path', 'w') myfile.write('N' + '\t' + 'average shortest path'+ '\t'+'std err'+'\n') for N in N_range: sp_list = [] comparison_sp_list = [] for trial in range(number_of_trials): model = models.barabasi_albert_graph(N,m) # model = models.box_model(D, N) G = model[0] extremes = model[1] #tr_DAG = tr.trans_red(G) sp_length = nx.astar_path_length(G, extremes[1], extremes[0]) # sp_length = sp_list.append(sp_length) average_in_degree = float(m)*float((N-1))/(float(N)) comparison_model = oneD_random_comparison(N,average_in_degree) comparison_G = comparison_model[0] comparison_extremes = comparison_model[1] if nx.has_path(comparison_G, comparison_extremes[1], comparison_extremes[0]): comparison_sp_length = nx.astar_path_length(comparison_G, comparison_extremes[1], comparison_extremes[0]) # comparison_sp_length = comparison_sp[2] else: comparison_sp_length=0 comparison_sp_list.append(comparison_sp_length) statistics = stats.list_stats(sp_list) sp_av = statistics[3] sp_std_err = statistics[0] comparison_stats = stats.list_stats(comparison_sp_list) comparison_sp_av = comparison_stats[3] comparison_sp_std_err = comparison_stats[0] print "done ", N myfile.write(str(N) + '\t' + str(sp_av) + '\t' + str(sp_std_err) + '\t' + str(N) + '\t' + str(comparison_sp_av) + '\t' + str(comparison_sp_std_err) + '\n') return
def mm_of_BA(N_range = range(1000,1500,50), number_of_trials = 10, data_filename = 'BA_dimension'): myfile = open(data_filename, 'w') myfile.write('N' + '\t' + 'average D_ms'+ '\t'+'std err'+'\n') N_list = [] ms_av_list = [] ms_std_err_list = [] for N in N_range: N_list.append(N) dim_list = [] for _ in range(number_of_trials): model = models.barabasi_albert_graph(N,1) G = model[0] # extremes = model[1] dim = mm.MM_dimension(G) dim_list.append(dim) statistics = stats.list_stats(dim_list) ms_av = statistics[3] ms_std_err = statistics[0] print "done ", N myfile.write(str(N) + '\t' + str(ms_av) + '\t' + str(ms_std_err) + '\n') ms_av_list.append(ms_av) ms_std_err_list.append(ms_std_err) return[N_list, ms_av_list, ms_std_err_list]
import models as models import dag_lib as dl import trans_red as tr import statistics as stats m = 1 number_of_trials = 100 D = 1 myfile = open("BA_lp", "w") myfile.write("N" + "\t" + "average lp length" + "\t" + "std err" + "\n") for N in range(2, 100, 5): lp_list = [] for trial in range(number_of_trials): model = models.barabasi_albert_graph(N, m) # model = models.COd(D,N) G = model[0] extremes = model[1] # tr_DAG = tr.trans_red(G) lp = dl.lpd(G, extremes[1], extremes[0]) lp_length = lp[2] lp_list.append(lp_length) statistics = stats.list_stats(lp_list) lp_av = statistics[3] lp_std_err = statistics[0] print "done ", N myfile.write(str(N) + "\t" + str(lp_av) + "\t" + str(lp_std_err) + "\n") # nx.draw_random(G, with_labels=False, node_colour ='k', node_size=50, node_color = 'k', width=0.5) # nx.draw_networkx(G, pos=random, with_labels=False, node_colour ='k', node_size=50, node_color = 'k', width=0.5)