plt1.set_title('Left edge') plt2.set_xlabel(r'$\alpha$') # plt2.set_ylabel(r'$x_{+}$') plt2.set_title('Right edge') plt2.legend() fig.tight_layout() fig.savefig('./figure/figure7_g_a_edge.png', dpi=300) # pdf of space-sampled iid Gaussian theory, log scale g = 0.9 nx = 40000 f_ls = [0.1, 0.2, 0.4] fig = plt.figure(figsize=(8, 6)) x0_plot = 0.3 t0 = timeit.default_timer() x, px = pdf_g(g, nx=nx) x12_ls = [] x12c_ls = [] tf_plot = x >= x0_plot line = plt.loglog(x[tf_plot], px[tf_plot], 'k', linewidth=1.5, label='non-sampled') px2 = sqrt(3) / (2 * np.pi) * x**(-5 / 3) plt.loglog(x[tf_plot], px2[tf_plot], '--', linewidth=1.5, color=line[0].get_color()) x12_ls.append((x[0], x[-1]))
from numpy.random import randn from numpy.linalg import eigvals, eigvalsh from numpy import sqrt import timeit # created by Yu Hu ([email protected]), Aug 2020 plt.rcParams.update({'font.size': 18}) # different g g_ls = [0.6, 0.7, 0.8] nx_ls = [4000, 10000, 40000] ymax_ls = [1.5, 1.5, 1.7] for i, g in enumerate(g_ls): fig = plt.figure(figsize=(16, 6)) x, px = pdf_g(g, nx=nx_ls[i]) px2 = sqrt(3) / (2 * np.pi) * x**(-5 / 3) plt.subplot(121) plt.plot(x, px, color='b', label='g=' + str(g)) plt.plot([x[0], x[-1]], [0, 0], '.', markersize=10, color='b') plt.plot(x, px2, 'r--', label='power law') plt.ylim([0, ymax_ls[i]]) plt.xlabel('cov eigenvalue $x$') plt.ylabel('probability $p(x)$') plt.legend() plt.subplot(122) plt.loglog(x, px) plt.loglog(x, px2, 'r--', label='power law') plt.xlabel(r'$\log(x)$') plt.ylabel(r'$\log(p(x))$') plt.tight_layout()
# created by Yu Hu ([email protected]), Aug 2020 plt.rcParams.update({'font.size': 18}) # compare simulation and iid Gaussian theory g_ls = [0.5] N_ls = [100, 400, 800] ifig = 0 for g in g_ls: for N in N_ls: ifig += 1 J = g * randn(N, N) / sqrt(N) C = J2C(J) eig_C = eigvalsh(C) x, px = pdf_g(g, nx=1000) fig = plt.figure(figsize=(8, 6)) plt.plot(x, px, linewidth=1.5, label='theory') plt.hist(eig_C, 40, density=True, label='N=' + str(N)) plt.plot([x[0], x[-1]], [0, 0], '.', markersize=10) plt.xlabel('cov eigenvalues') plt.ylabel('probabilty') plt.legend() plt.title('g=' + str(g)) plt.tight_layout() fig.savefig('./figure/figure1a_' + str(ifig) + '.png', dpi=600) # different g g_ls = [0.3, 0.4, 0.5, 0.6, 0.7] x12_ls = [] x12c_ls = []
# r_quantile = 1.0238940717401241 print('0.995 quantile r:', r_quantile) J = randn(N,N)/sqrt(N)*g # kdivh = 0.25 # squared root of the kappa notations in text kdivh = 0.45 # squared root of the kappa notations in text kdiv = sqrt(kdivh**2/(1-kdivh**2)*(g**2/N)) # g is after removing div motifs print('b entry var b/sqrt(N): ', kdiv*sqrt(N)) print('xuv, x=', N*kdiv) b = randn(N) * kdiv # squared root of the kappa notations in text print('check', np.linalg.norm(b)*np.linalg.norm(ones(N))) print('outlier theory:', J_uv_outlier(g,N*kdiv)) print('outlier theory:', J_uv_outlier(g,np.linalg.norm(b)*np.linalg.norm(ones(N)))) Jm = J + np.outer(ones(N),b) x,px = pdf_g(g) C = J2C(J) Cm = J2C(Jm) eig_C = eigvalsh(C) eig_Cm = eigvalsh(Cm) fig = plt.figure(figsize=(8,4)) plt.hist(eig_C, 40, density=True, label='N='+str(N)); plt.plot(x,px, linewidth=1.5, label='g='+str(g)) x_lim = plt.xlim() plt.xlabel('cov eigenvalues') plt.ylabel('probability') plt.legend() plt.title(r'Cov spectrum with $J$') plt.tight_layout() fig.savefig('./figure/figure3a_1_cov_J.png', dpi=600)