def train_model(N, sparsity): W = np.random.normal(0, 1, [N, N]) W = W * (RHO / np.max(np.abs(np.linalg.eig(W)[0]))) WI = random.uniform(-TAU, TAU, N) # make sparse bef = np.count_nonzero(W) for i in range(N): for j in range(N): if i != j and random.random() < sparsity: W[i, j] = 0.0 W = W * (RHO / np.max(np.abs(np.linalg.eig(W)[0]))) mc_max = -inf af = np.count_nonzero(W) print('sparsity', 1.0 - af / bef) # W = learn_orthonormal(W, eta) W = learn_orthogonal(W, eta) af = np.count_nonzero(W) print('sparsity', 1.0 - af / bef) while True: # W = learn_orthonormal(W, eta) W = learn_orthogonal(W, eta) af = np.count_nonzero(W) print('sparsity', 1.0 - af / bef) last_mc = mc(WI, W, iters_skip=N, iters_train=10 * N, iters_test=1000) print(last_mc) if mc_max < last_mc: mc_max = last_mc if mc_max > last_mc: return mc_max
def train_model(N, eta): W = np.random.normal(0, 1, [N, N]) W = W * (RHO / np.max(np.abs(np.linalg.eig(W)[0]))) WI = random.uniform(-TAU, TAU, N) mc_max = -inf opt_iters = 0 W = learn_orthogonal(W, eta) while True: W = learn_orthogonal(W, eta) last_mc = mc(WI, W, iters_skip=N, iters_train=10*N, iters_test=1000) print('N={}, eta={}, mc={}'.format(N, eta, last_mc)) if mc_max < last_mc: mc_max = last_mc if mc_max > last_mc: print() return mc_max, opt_iters # W = learn_orthonormal(W, eta) opt_iters += 1
def train_model(N, sparsity): W = np.random.normal(0, 1, [N, N]) W = W * (RHO / np.max(np.abs(np.linalg.eig(W)[0]))) WI = random.uniform(-TAU, TAU, N) # make sparse for i in range(N): for j in range(N): if i != j and random.random() < sparsity: W[i, j] = 0.0 W = W * (RHO / np.max(np.abs(np.linalg.eig(W)[0]))) mc_max = -inf W = learn_orthogonal(W, eta) while True: W = learn_orthogonal(W, eta) last_mc = mc(WI, W, iters_skip=N, iters_train=10*N, iters_test=1000) if mc_max < last_mc: mc_max = last_mc if mc_max > last_mc: return mc_max
iterations=iterations, iterations_coef_measure=1000, use_input=False, target_later=True, calc_lyapunov=False) mc_mean = np.zeros([len(reservoir_sizes), len(taus)]) mc_std = np.zeros([len(reservoir_sizes), len(taus)]) for tau_idx, tau in enumerate(taus): print(tau_idx) for rsi, N in enumerate(reservoir_sizes): mc = np.zeros(INSTANCES) for inst in range(INSTANCES): W = random.normal(0, 1, [N, N]) W = W * (rho / np.max(np.abs(np.linalg.eig(W)[0]))) WI = random.uniform(-tau, tau, N) for _ in range(ORTHOPROCESS_ITERATIONS): W = learn_orthogonal(W, eta) mc[inst], _ = measure_mc(W, WI, iterations=10 * N) print(N, mc[inst]) mc_mean[rsi, tau_idx] = np.average(mc) mc_std[rsi, tau_idx] = np.std(mc) np.save('mcm', mc_mean) np.save('mcs', mc_std)
memory_max=int(1.1*WI.shape[0]), iterations=1200, iterations_coef_measure=1000, use_input=False, target_later=True, calc_lyapunov=False) mc_mean = np.zeros([len(reservoir_sizes), len(taus)]) mc_std = np.zeros([len(reservoir_sizes), len(taus)]) for tau_idx, tau in enumerate(taus): print(tau_idx) for rsi, N in enumerate(reservoir_sizes): mc = np.zeros(INSTANCES) for inst in range(INSTANCES): W = random.normal(0, 1, [N, N]) W = W * (rho / np.max(np.abs(np.linalg.eig(W)[0]))) WI = random.uniform(-tau, tau, N) for _ in range(ORTHOPROCESS_ITERATIONS): W = learn_orthogonal(W, eta) mc[inst], _ = measure_mc(W, WI) mc_mean[rsi, tau_idx] = np.average(mc) mc_std[rsi, tau_idx] = np.std(mc) np.save('mcm', mc_mean) np.save('mcs', mc_std)
WI = np.random.uniform(-tau, tau, N) WG = W mc_gon = np.zeros(ORTHO_ITERATIONS + 1) les = [] mc_gon[0], le = measure_mc(WG, WI, True) les.append(le) eigen_values = np.zeros([ORTHO_ITERATIONS + 1, N]) singular_values = np.zeros([ORTHO_ITERATIONS + 1, N]) eigen_values[0, :] = np.sort(np.abs(np.linalg.eig(W)[0])) singular_values[0, :] = np.linalg.svd(W, compute_uv=False) for it in range(ORTHO_ITERATIONS): WG = learn_orthogonal(WG, eta) mc_gon[it + 1], le = measure_mc(WG, WI, True) les.append(le) eigen_values[it + 1, :] = np.sort(np.abs(np.linalg.eig(WG)[0])) singular_values[it + 1, :] = np.linalg.svd(WG, compute_uv=False) MCs.append(mc_gon) EVs.append(eigen_values) SVs.append(singular_values) LEs.append(les) print() np.save('mc', MCs) np.save('ev', EVs)
eigenvaluesG = np.zeros([ORTHO_ITERATIONS + 1, N]) singular_valuesG = np.zeros([ORTHO_ITERATIONS + 1, N]) eigenvaluesN = np.zeros([ORTHO_ITERATIONS + 1, N]) singular_valuesN = np.zeros([ORTHO_ITERATIONS + 1, N]) mc_gon[0] = measure_mc(WG, WI) mc_nor[0] = measure_mc(WN, WI) eigenvaluesG[0, :] = np.sort(np.abs(np.linalg.eig(W)[0])) singular_valuesG[0, :] = np.linalg.svd(W, compute_uv=False) eigenvaluesN[0, :] = np.sort(np.abs(np.linalg.eig(W)[0])) singular_valuesN[0, :] = np.linalg.svd(W, compute_uv=False) for it in range(ORTHO_ITERATIONS): print('\riteration', it, 'of', ORTHO_ITERATIONS, end='') WG = learn_orthogonal(WG, eta) WN = learn_orthonormal(WN, eta) mc_gon[it + 1] = measure_mc(WG, WI) mc_nor[it + 1] = measure_mc(WN, WI) eigenvaluesG[it + 1, :] = np.sort(np.abs(np.linalg.eig(WG)[0])) singular_valuesG[it + 1, :] = np.linalg.svd(WG, compute_uv=False) eigenvaluesN[it + 1, :] = np.sort(np.abs(np.linalg.eig(WN)[0])) singular_valuesN[it + 1, :] = np.linalg.svd(WN, compute_uv=False) print() def replot():