def convergence(n0, l, nt, m, numthreads, display=False): """test convergence of qmax_ave""" #Calling stats, assigning the output to the variables qnetm, qmaxm, qvarm = ns.stats(n0, l, nt, m) #Assigning qmax_ave array qmax_ave = np.zeros(m) #Loop to calculate qmax_ave for j in range(1, m + 1): for i in range(j): qmax_ave[j - 1] = qmax_ave[j - 1] + max(qnetm[:, i]) qmax_ave[j - 1] = qmax_ave[j - 1] / j #Assign x for our plot x = np.arange(1, m + 1) #Polifit to do the estimate k, l = np.polyfit(np.log(x), np.log(qmax_ave), 1) #If loop for the display flag if display == True: plt.figure() #Loglog plot of qmax_ave plt.loglog(x, qmax_ave, color="red") #Loglog plot of the estimate plt.loglog(x, np.exp(l) * x**k) #Legend of the plot plt.legend(['Log-log plot']) plt.xlabel('M') plt.ylabel('Qmax_ave') plt.title('Evgeniia Gleizer. Created by convergence.') plt.show()
def convergence(n0, l, nt, m, numthreads, display=False): """test convergence of qmax_ave""" qmax_ave = np.zeros(m) M = range(1, m + 1) #look at how qmax_ave varies with m for i in M: qnetm, qmaxm, qvarm = ns.stats(n0, l, nt, i) qmax = qnetm.max(axis=0) qmax_ave[i - 1] = sum(qmax) / float(len(qmax)) #fit a polynomial to the log of qmax_ave to estimate #its rate of convergence a, b = np.polyfit(np.log(M), np.log(qmax_ave), 1) fit = a * np.log(M) + b k = -a if (display == True): plt.figure() plt.loglog(M, qmax_ave, label='qmax_ave') plt.loglog(M, np.exp(fit), label='fit with convergece rate k=' + str(k)) plt.xlabel('m') plt.legend(loc='best') plt.axis([0, 1000, 5, 10.5]) plt.title('convergence Mathilde Duverger') plt.show() return k
def degree_average(n0, l, nt, m, display=False): """compute ensemble-averaged degree distribution""" # initialise variables P = np.zeros((n0 + nt, m)) Pave = np.zeros((n0 + nt, m)) PD = np.zeros((n0 + nt, m)) # call stats to assing qnetm qnetm, _, _ = ns.stats(n0, l, nt, m) # create Pave # extract the unique list of qnet values from n=1->m realizations # take the average degree over the n realizations for n in range(m): P, Q = np.unique(qnetm[:, n], return_counts=True) for k in range(len(P)): PD[P[k], n] = Q[k] # normalize Pave and remove zeros Pave = np.mean(PD, axis=1) / (n0 + nt) Pave = Pave[Pave > 0] # declare our domain of 1->Pave realizations x = np.arange(1, np.size(Pave) + 1) # seek to solve for k and a satisfying Pave = a*x**(-b) # reduce problem to log(Pave) = c - k*log(x) (c = log(a), and flip sgn(b) for now) b, c = np.polyfit(np.log(x), np.log(Pave), 1) # create log-log plot for when display is true if display: plt.figure() plt.plot(np.log(x), np.log(Pave), 'b') plt.plot(np.log(x), c + b * np.log(x), 'r') plt.xlabel('log(x) x=1->size(Pave)') plt.ylabel('log(Pave)') plt.title('log-log plot of x against Pave with power law fit') plt.legend(loc='best') plt.show() return -b
def degree_average(n0, l, nt, m, display=False): """compute ensemble-averaged degree distribution""" #Calling stats and assigning its output to varuables qnetm, qmaxm, qvarm = ns.stats(n0, l, nt, m) P = np.zeros(n0 + nt) k = np.amax(qnetm) bins = np.zeros(k) #For loop to find the probabilities for l in range(m): bins = np.bincount(qnetm[:, l]) z = bins.size P[0:z] = P[0:z] + (1.0 * bins) / len(qnetm[:, l]) bins = np.zeros(k) #Averaging our P P = P / m x = np.zeros(len(P) + 1) t = 0 #Loop to get rid of zeros for i in range(len(P)): if P[i] > 0: x[t] = i t = t + 1 print len(P) x = x[x > 0] P = P[P > 0] print len(x) #Polifit for logarithm of probability k, l = np.polyfit(np.log(x), np.log(P), 1) print k #Display flag check if display == True: plt.figure() #Loglog plot of probability plt.loglog(x, P, 'o', color="red") #Loglog plot of the estimate plt.loglog(x, np.exp(l) * x**k) #Legend of the plot plt.legend(['Log-log plot']) plt.xlabel('X') plt.ylabel('P') plt.title('Evgeniia Gleizer. Created by degree_average.') plt.show() return k
def convergence(n0, l, nt, m, numthreads, display=False): """test convergence of qmaxm""" # call stats and initialise variables qnetm, qmaxm, qvarm = ns.stats(n0, l, nt, m) qmax_ave = np.zeros(m) qmax_vec = np.zeros(m) # assign qmax_vec the qmax of qnetn value for n=1->m realizations # assign qmax_ave the value of the avegerage over the n realizations of qmax for n in range(1, m + 1): qmax_vec[n - 1] = float(np.amax(qnetm[:, n - 1])) qmax_ave[n - 1] = np.sum(qmax_vec) / (n) x = np.arange(1, m + 1) # use polyfit to solve for k and a satisfying qmax_ave = a*m**(-k) # reduce problem to log(qmax_ave) = c - k*log(m) (c = log(a), and flip sgn(k) for now) k, c = np.polyfit(np.log(x), np.log(qmax_ave), 1) # if display flag is true, create log-log plot of qmax_ave vs x=1->m if display: #plt.figure() #plt.loglog(x,qmax_ave,'b') #plt.loglog(x,np.exp(b+k*x),'r') #plt.show() plt.figure() plt.plot(np.log(x), np.log(qmax_ave), 'b') plt.plot(np.log(x), c + k * np.log(x), 'r') plt.xlabel('log(x) x=1->m') plt.ylabel('log(qmax_ave)') plt.title( 'log-log plot of m against qmax_ave with rate of convergence fit') plt.legend(loc='best') plt.show() return -k