def test_unbiased_aug_grad(): # Test using linear 2D system eps N = 100 z = np.random.normal(0.0, 1.0, (N, 4)).astype(DTYPE) log_q_z = np.random.normal(2.0, 3.0, (N, )).astype(DTYPE) mu = np.array([0.0, 0.1, 2 * np.pi, 0.1 * np.pi]).astype(DTYPE) lb = np.NINF ub = np.PINF a11 = Parameter("a11", 1, lb, ub) a12 = Parameter("a12", 1, lb, ub) a21 = Parameter("a21", 1, lb, ub) a22 = Parameter("a22", 1, lb, ub) params = [a11, a12, a21, a22] M = Model("lds", params) M.set_eps(linear2D_freq) nf = NormalizingFlow(arch_type="autoregressive", D=4, num_stages=1, num_layers=2, num_units=15) with tf.GradientTape(persistent=True) as tape: z, log_q_z = nf(N) params = nf.trainable_variables nparams = len(params) tape.watch(params) _, _, R1s, R2 = aug_lag_vars(z, log_q_z, M.eps, mu, N) aug_grad = unbiased_aug_grad(R1s, R2, params, tape) T_x_grads = [[[None for i in range(N // 2)] for i in range(4)] for i in range(nparams)] T_x = M.eps(z) for i in range(N // 2): T_x_i_grads = [] for j in range(4): _grads = tape.gradient(T_x[i, j] - mu[j], params) for k in range(nparams): T_x_grads[k][j][i] = _grads[k] del tape # Average across the first half of samples for k in range(nparams): T_x_grads[k] = np.mean(np.array(T_x_grads[k]), axis=1) R2_np = np.mean(T_x[N // 2:, :], 0) - mu aug_grad_np = [] for k in range(nparams): aug_grad_np.append(np.tensordot(T_x_grads[k], R2_np, axes=(0, 0))) for i in range(nparams): assert np.isclose(aug_grad_np[i], aug_grad[i], rtol=1e-3).all() return None
def test_aug_lag_vars(): # Test using linear 2D system eps N = 100 z = np.random.normal(0.0, 1.0, (N, 4)).astype(DTYPE) log_q_z = np.random.normal(2.0, 3.0, (N, )).astype(DTYPE) mu = np.array([0.0, 0.1, 2 * np.pi, 0.1 * np.pi]).astype(DTYPE) lb = np.NINF ub = np.PINF a11 = Parameter("a11", 1, lb, ub) a12 = Parameter("a12", 1, lb, ub) a21 = Parameter("a21", 1, lb, ub) a22 = Parameter("a22", 1, lb, ub) params = [a11, a12, a21, a22] M = Model("lds", params) M.set_eps(linear2D_freq) H, R, R1s, R2 = aug_lag_vars(z, log_q_z, M.eps, mu, N) alphas = np.zeros((N, )) omegas = np.zeros((N, )) for i in range(N): alphas[i], omegas[i] = linear2D_freq_np(z[i, 0], z[i, 1], z[i, 2], z[i, 3]) # mean_alphas = np.mean(alphas) # mean_omegas = np.mean(omegas) mean_alphas = 0.0 mean_omegas = 2.0 * np.pi T_x_np = np.stack( ( alphas, np.square(alphas - mean_alphas), omegas, np.square(omegas - mean_omegas), ), axis=1, ) H_np = np.mean(-log_q_z) R_np = np.mean(T_x_np, 0) - mu R1_np = np.mean(T_x_np[:N // 2, :], 0) - mu R2_np = np.mean(T_x_np[N // 2:, :], 0) - mu R1s_np = list(R1_np) rtol = 1e-3 assert np.isclose(H, H_np, rtol=rtol) assert np.isclose(R, R_np, rtol=rtol).all() assert np.isclose(R1s, R1s_np, rtol=rtol).all() assert np.isclose(R2, R2_np, rtol=rtol).all() return None
def LRRNN_setup(N, g, K): D = int(N * RANK) lb = -np.ones((D, )) ub = np.ones((D, )) U = Parameter("U", D, lb=lb, ub=ub) V = Parameter("V", D, lb=lb, ub=ub) parameters = [U, V] model = Model("Rank2Net_g=%.4f_K=%d" % (g, K), parameters) W_eigs = get_W_eigs_tf(g, K) def stable_amp(U, V): U = tf.reshape(U, (-1, N, RANK)) V = tf.reshape(V, (-1, N, RANK)) T_x = W_eigs(U, V) return T_x model.set_eps(stable_amp) return model
def test_check_convergence(): N = 500 nu = 0.1 M_test = 200 N_test = int(nu * N) mu = np.array([0.0, 0.1, 2 * np.pi, 0.1 * np.pi], dtype=np.float32) lb_a12 = 0.0 ub_a12 = 10.0 lb_a21 = -10.0 ub_a21 = 0.0 a11 = Parameter("a11", 1, 0.0) a12 = Parameter("a12", 1, lb_a12, ub_a12) a21 = Parameter("a21", 1, lb_a21, ub_a21) a22 = Parameter("a22", 1, ub=0.0) params = [a11, a12, a21, a22] M = Model("lds_2D", params) M.set_eps(linear2D_freq) q_theta, opt_data, epi_path, failed = M.epi( mu, num_iters=1000, K=10, N=N, stop_early=True, save_movie_data=False, random_seed=1, ) assert not failed assert (opt_data["converged"] == True).sum() > 0 epi_df = M.get_epi_df() epi_df_row = epi_df[epi_df["iteration"] == epi_df["iteration"].max()].iloc[0] init = epi_df_row["init"] init_params = {"mu": init["mu"], "Sigma": init["Sigma"]} nf = M._df_row_to_nf(epi_df_row) aug_lag_hps = M._df_row_to_al_hps(epi_df_row) best_k, converged, best_H = M.get_convergence_epoch( init_params, nf, mu, aug_lag_hps, alpha=0.05, nu=0.1, ) assert converged return None
def lds_2D_model_fixture(): # 1. Define the model. lb, ub = -10.0, 10.0 a11 = Parameter("a11", 1, lb=lb, ub=ub) a12 = Parameter("a12", 1, lb=lb, ub=ub) a21 = Parameter("a21", 1, lb=lb, ub=ub) a22 = Parameter("a22", 1, lb=lb, ub=ub) name = "lds_2D" params = [a11, a12, a21, a22] M = Model(name, params) # 2. Define the emergent property. def linear2D_eig(a11, a12, a21, a22): tau = 1.0 c11 = a11 / tau c12 = a12 / tau c21 = a21 / tau c22 = a22 / tau # Quadratic formula. real_term = 0.5 * (c11 + c22) complex_term = 0.5 * tf.sqrt( tf.complex( tf.square(c11 + c22) - 4.0 * (c11 * c22 - c12 * c21), 0.0)) real_lambda = real_term + tf.math.real(complex_term) imag_lambda = tf.math.imag(complex_term) T_x = tf.concat( ( real_lambda, imag_lambda, tf.square(real_lambda - 0.0), tf.square(imag_lambda - (2.0 * np.pi)), ), axis=1, ) return T_x M.set_eps(linear2D_eig) return M
print('Running epi on 2D-LDS with hyper parameter random seed %d.' % args.seed) # Define the 2D LDS model parameters. # The four entries of the dynamics matrix will be bounded. lb = -10. ub = 10. a11 = Parameter("a11", 1, lb=lb, ub=ub) a12 = Parameter("a12", 1, lb=lb, ub=ub) a21 = Parameter("a21", 1, lb=lb, ub=ub) a22 = Parameter("a22", 1, lb=lb, ub=ub) params = [a11, a12, a21, a22] M = Model("lds_2D", params) # Set the emergent property statistics to frequency. M.set_eps(linear2D_freq) # Set the mergent property values mu = np.array([0.0, 0.5**2, 2 * np.pi, (0.1 * 2 * np.pi)**2]) np.random.seed(args.seed) num_stages = 3 num_layers = np.random.randint(1, 3) num_units = np.random.randint(10, 25) init_params = {'loc': 0., 'scale': 3.} q_theta, opt_data, save_path, failed = M.epi( mu, arch_type='coupling', num_stages=num_stages, num_layers=num_layers,
elif beta == 'P': b = np.array([1., -5., 1., 1.25]) beta_str += '_P' elif beta == 'S': b = np.array([1., 1., -5., 1.25]) beta_str += '_S' else: raise (NotImplentedError("Error: beta = %s ?" % beta)) name = "V1Circuit_%s%s" % (alpha, beta_str) model = Model(name, parameters) # 2. Define the emergent property. # Emergent property statistics (eps). dr = V1_dr_eps(alpha, inc_val, b=b) model.set_eps(dr) # Emergent property values. mu = np.array([inc_val, inc_std**2]) # 3. Run EPI. init_params = {'loc': 0., 'scale': 2.} q_theta, opt_data, save_path, failed = model.epi( mu, arch_type='coupling', num_stages=num_stages, num_layers=2, num_units=num_units, post_affine=True, batch_norm=True, init_params=init_params,
def trace_det(A): diag_div = tf.expand_dims(tf.eye(d), 0) + 1. A_lower = tfp.math.fill_triangular(A) A = (A_lower + tf.transpose(A_lower, [0, 2, 1])) e, v = tf.linalg.eigh(A) trace = tf.reduce_sum(e, axis=1) det = tf.reduce_prod(e, axis=1) T_x = tf.stack( [trace, det, tf.square(trace - mu[0]), tf.square(det - mu[1])], axis=1) return T_x M.set_eps(trace_det) np.random.seed(args.seed) num_stages = 4 #np.random.randint(2, 6) num_layers = 2 #np.random.randint(1, 3) num_units = D #np.random.randint(15, max(30, D)) init_params = {'loc': 0., 'scale': 1.} q_theta, opt_data, save_path = M.epi( mu, arch_type='coupling', num_stages=num_stages, num_layers=num_layers, num_units=num_units, post_affine=False, batch_norm=False,
init_type = "abc" abc_std = mu_std init_params = { "num_keep": 500, "means": np.array([freq]), "stds": np.array([ abc_std, ]) } dt = 0.025 T = 300 network_freq = NetworkFreq(dt, T, sigma_I, mu) model.set_eps(network_freq) # 3. Run EPI. q_theta, opt_data, epi_path, failed = model.epi( mu, arch_type="coupling", num_stages=2, num_layers=num_layers, num_units=25, post_affine=True, elemwise_fn="affine", batch_norm=False, bn_momentum=0.0, K=6, N=400, num_iters=5000,
lb = -5.0 ub = 5.0 sW = Parameter("sW", 1, lb=lb, ub=ub) vW = Parameter("vW", 1, lb=lb, ub=ub) dW = Parameter("dW", 1, lb=lb, ub=ub) hW = Parameter("hW", 1, lb=lb, ub=ub) parameters = [sW, vW, dW, hW] model = Model("SC_Circuit_var", parameters) # EP values mu = np.array([p, 1.0 - p, mu_std**2, mu_std**2]) model.set_eps(SC_acc_var(p)) # 3. Run EPI. q_theta, opt_data, epi_path, failed = model.epi( mu, arch_type="coupling", num_stages=3, num_layers=2, num_units=50, elemwise_fn=elemwise_fn, post_affine=False, batch_norm=False, bn_momentum=0.0, K=15, N=M, num_iters=2000,
def test_epi(): mu = np.array([0.0, 0.1, 2 * np.pi, 0.1 * np.pi]) lb_a12 = 0.0 ub_a12 = 10.0 lb_a21 = -10.0 ub_a21 = 0.0 a11 = Parameter("a11", 1, 0.0) a12 = Parameter("a12", 1, lb_a12, ub_a12) a21 = Parameter("a21", 1, lb_a21, ub_a21) a22 = Parameter("a22", 1, ub=0.0) params = [a11, a12, a21, a22] M = Model("lds", params) M.set_eps(linear2D_freq) q_theta, opt_data, save_path, _ = M.epi( mu, num_iters=100, K=1, save_movie_data=True ) g = q_theta.plot_dist() M.epi_opt_movie(save_path) params = [a11, a12, a21, a22] M = Model("lds_2D", params) M.set_eps(linear2D_freq) q_theta, opt_data, save_path, _ = M.epi( mu, num_iters=100, K=1, save_movie_data=True ) q_theta = M.load_epi_dist(mu, k=1) M.epi_opt_movie(save_path) q_theta, opt_data, save_path, _ = M.epi( mu, num_units=31, num_iters=100, K=1, save_movie_data=True ) M.plot_epi_hpsearch(mu) opt_data_filename = save_path + "opt_data.csv" opt_data_cols = ["k", "iteration", "H", "converged"] + [ "R%d" % i for i in range(1, M.m + 1) ] for x, y in zip(opt_data.columns, opt_data_cols): assert x == y # opt_data_df = pd.read_csv(opt_data_filename) # opt_data_df['iteration'] = 2*opt_data_df['iteration'] # opt_data_df.to_csv(opt_data_filename) # with raises(IOError): # M.epi_opt_movie(save_path) # os.remove(opt_data_filename) # with raises(IOError): # M.epi_opt_movie(save_path) assert q_theta is not None with raises(ValueError): q_theta = M.load_epi_dist(mu, k=20) with raises(TypeError): q_theta = M.load_epi_dist(mu, k="foo") with raises(ValueError): q_theta = M.load_epi_dist(mu, k=-1) M = Model("foo", params) with raises(ValueError): q_theta = M.load_epi_dist(mu, k=-1) z = q_theta(1000) log_q_z = q_theta.log_prob(z) assert np.sum(z[:, 0] < 0.0) == 0 assert np.sum(z[:, 1] < lb_a12) == 0 assert np.sum(z[:, 1] > ub_a12) == 0 assert np.sum(z[:, 2] < lb_a21) == 0 assert np.sum(z[:, 2] > ub_a21) == 0 assert np.sum(z[:, 3] > 0.0) == 0 assert np.sum(1 - np.isfinite(z)) == 0 assert np.sum(1 - np.isfinite(log_q_z)) == 0 # Intentionally swap order in list to insure proper handling. params = [a22, a21, a12, a11] M = Model("lds2", params) M.set_eps(linear2D_freq) q_theta, opt_data, save_path, _ = M.epi( mu, K=2, num_iters=100, stop_early=True, verbose=True ) with raises(IOError): M.epi_opt_movie(save_path) z = q_theta(1000) log_q_z = q_theta.log_prob(z) assert np.sum(z[:, 0] < 0.0) == 0 assert np.sum(z[:, 1] < lb_a12) == 0 assert np.sum(z[:, 1] > ub_a12) == 0 assert np.sum(z[:, 2] < lb_a21) == 0 assert np.sum(z[:, 2] > ub_a21) == 0 assert np.sum(z[:, 3] > 0.0) == 0 assert np.sum(1 - np.isfinite(z)) == 0 assert np.sum(1 - np.isfinite(log_q_z)) == 0 for x, y in zip(opt_data.columns, opt_data_cols): assert x == y with raises(ValueError): def bad_f(a11, a12, a21, a22): return tf.expand_dims(a11 + a12 + a21 + a22, 0) M.set_eps(bad_f) params = [a22, a21, a12, a11] M = Model("lds2", params) nf = NormalizingFlow("autoregressive", 4, 1, 2, 10) al_hps = AugLagHPs() with raises(AttributeError): save_path = M.get_save_path(mu, nf, al_hps, None) save_path = M.get_save_path(mu, nf, al_hps, eps_name="foo") return None
def test_epi(): mu = np.array([0.0, 0.1, 2 * np.pi, 0.1 * np.pi]) lb_a12 = 0.0 ub_a12 = 10.0 lb_a21 = -10.0 ub_a21 = 0.0 a11 = Parameter("a11", 1, 0.0) a12 = Parameter("a12", 1, lb_a12, ub_a12) a21 = Parameter("a21", 1, lb_a21, ub_a21) a22 = Parameter("a22", 1, ub=0.0) params = [a11, a12, a21, a22] M = Model("lds_2D", params) M.set_eps(linear2D_freq) q_theta, opt_data, epi_path, failed = M.epi( mu, num_iters=100, K=1, save_movie_data=True, log_rate=10, ) z = q_theta(50) g = q_theta.plot_dist(z) M.epi_opt_movie(epi_path) params = [a11, a12, a21, a22] # should load from prev epi M = Model("lds_2D", params) M.set_eps(linear2D_freq) q_theta, opt_data, epi_path, failed = M.epi( mu, num_iters=100, K=1, save_movie_data=True ) print("epi_path", epi_path) epi_df = M.get_epi_df() epi_df_row = epi_df[epi_df["iteration"] == 100].iloc[0] q_theta = M.get_epi_dist(epi_df_row) opt_data_filename = os.path.join(epi_path, "opt_data.csv") M.set_eps(linear2D_freq) q_theta, opt_data, epi_path, failed = M.epi( mu, num_iters=100, K=1, save_movie_data=True, log_rate=10, ) opt_data_cols = ["k", "iteration", "H", "cost", "converged"] + [ "R%d" % i for i in range(1, M.m + 1) ] for x, y in zip(opt_data.columns, opt_data_cols): assert x == y assert q_theta is not None z = q_theta(1000) log_q_z = q_theta.log_prob(z) assert np.sum(z[:, 0] < 0.0) == 0 assert np.sum(z[:, 1] < lb_a12) == 0 assert np.sum(z[:, 1] > ub_a12) == 0 assert np.sum(z[:, 2] < lb_a21) == 0 assert np.sum(z[:, 2] > ub_a21) == 0 assert np.sum(z[:, 3] > 0.0) == 0 assert np.sum(1 - np.isfinite(z)) == 0 # Intentionally swap order in list to insure proper handling. params = [a22, a21, a12, a11] M = Model("lds", params) M.set_eps(linear2D_freq) q_theta, opt_data, epi_path, _ = M.epi( mu, K=2, num_iters=100, stop_early=True, verbose=True, save_movie_data=True, log_rate=10, ) M.epi_opt_movie(epi_path) z = q_theta(1000) log_q_z = q_theta.log_prob(z) assert np.sum(z[:, 0] < 0.0) == 0 assert np.sum(z[:, 1] < lb_a12) == 0 assert np.sum(z[:, 1] > ub_a12) == 0 assert np.sum(z[:, 2] < lb_a21) == 0 assert np.sum(z[:, 2] > ub_a21) == 0 assert np.sum(z[:, 3] > 0.0) == 0 assert np.sum(1 - np.isfinite(z)) == 0 print("DOING ABC NOW") # Need finite support for ABC a11 = Parameter("a11", 1, -10.0, 10.0) a12 = Parameter("a12", 1, -10.0, 10.0) a21 = Parameter("a21", 1, -10.0, 10.0) a22 = Parameter("a22", 1, -10.0, 10.0) params = [a11, a12, a21, a22] M = Model("lds_2D", params) M.set_eps(linear2D_freq) init_type = "abc" init_params = {"num_keep": 50, "mean": mu[:2], "std": np.sqrt(mu[2:])} q_theta, opt_data, epi_path, failed = M.epi( mu, num_iters=100, K=1, init_type=init_type, init_params=init_params, save_movie_data=True, log_rate=10, ) params = [a11, a12, a21, a22] M = Model("lds2", params) M.set_eps(linear2D_freq) # This should cause opt to fail with nan since c0=1e20 is too high. q_theta, opt_data, epi_path, _ = M.epi( mu, K=3, num_iters=1000, c0=1e20, stop_early=True, verbose=True, save_movie_data=False, log_rate=10, ) with raises(IOError): M.epi_opt_movie(epi_path) for x, y in zip(opt_data.columns, opt_data_cols): assert x == y with raises(ValueError): def bad_f(a11, a12, a21, a22): return tf.expand_dims(a11 + a12 + a21 + a22, 0) M.set_eps(bad_f) params = [a11, a12, a21, a22] M = Model("lds2", params) init_params = {"mu": 2 * np.zeros((4,)), "Sigma": np.eye(4)} nf = NormalizingFlow("autoregressive", 4, 1, 2, 10) al_hps = AugLagHPs() epi_path, exists = M.get_epi_path(init_params, nf, mu, al_hps, eps_name="foo") assert not exists return None
J_shape = tf.shape(J) N = J_shape[0] J = tf.reshape(J, (N, num_neurons, num_neurons)) x = tf.tile(x0, (N,1,1)) xs = [x] for t in range(T): x = x + tf.tanh(tf.matmul(J, x)) xs.append(x) x = tf.concat(xs, axis=2) out = tf.reduce_sum(tf.square(tf.tensordot(x, w, [[1], [0]]) - targ), axis=1) T_x = tf.stack((out, tf.square(out)), axis=1) return T_x M.set_eps(sim) mu = np.array([0., 0.1], dtype=DTYPE) else: def sim_end(J): J_shape = tf.shape(J) N = J_shape[0] J = tf.reshape(J, (N, num_neurons, num_neurons)) x = x0 for t in range(T): x = x + tf.tanh(tf.matmul(J, x)) out = tf.tensordot(x, w, [[1], [0]]) T_x = tf.concat((out, tf.square(out)), axis=1) return T_x
parameters = [sigma_eps] model = Model(name, parameters) dt = 0.0005 T = 150 N = 100 stddev = get_stddev_sigma(alpha, W_mat, h, N=N, dt=dt, T=T, T_ss=T - 50, mu=sE_mean) model.set_eps(stddev) # Emergent property values. mu = np.array([sE_mean, sE_std**2]) # 3. Run EPI. q_theta, opt_data, epi_path, failed = model.epi( mu, arch_type="coupling", num_stages=3, num_layers=2, num_units=25, post_affine=True, batch_norm=False, bn_momentum=0.0, K=10,
@tf.function def _det(A): N = A.get_shape()[0] A = tf.reshape(A, (N, d, d)) detA = tf.linalg.det(A) T_x = tf.stack([detA, tf.square(detA - mu[0])], axis=1) return T_x def det(A): return _det(A) M.set_eps(det) np.random.seed(args.seed) num_stages = np.random.randint(2, 6) num_layers = 2 #np.random.randint(1, 3) num_units = np.random.randint(15, max(30, D)) init_params = {'loc': 0., 'scale': 5.} q_theta, opt_data, save_path = M.epi( mu, arch_type='coupling', num_stages=num_stages, num_layers=num_layers, num_units=num_units, post_affine=False, batch_norm=False,
Js_eig_max_mean = 1.5 eig_std = 0.25 mu = np.array([J_eig_realmax_mean, Js_eig_max_mean, eig_std**2, eig_std**2], dtype=DTYPE) W_eigs = get_W_eigs_tf(g, K) def stable_amp(U, V): U = tf.reshape(U, (-1, N, 2)) V = tf.reshape(V, (-1, N, 2)) T_x = W_eigs(U, V) return T_x M.set_eps(stable_amp) q_theta, opt_data, save_path, failed = M.epi( mu, arch_type="coupling", lr=1e-3, N=200, num_stages=3, num_layers=2, num_units=100, batch_norm=False, bn_momentum=0.0, post_affine=True, num_iters=20, c0=c0, beta=4.0,