def gen_bound(n_train, n_test, base_param, k): n_gate = 1 values = np.random.uniform(-k, +k, n_train) ticks = np.random.uniform(0, 1, (n_train, n_gate)) < 0.01 train_data = generate_data(values, ticks) values = smoothen(np.random.uniform(-1, +1, n_test)) ticks = np.random.uniform(0, 1, (n_test, n_gate)) < 0.01 test_data = generate_data(values, ticks, last=train_data["output"][-1]) return base_param, train_data, test_data
def gen_discrete(n_train, n_test, base_param, k): n_gate = 1 values = np.random.uniform(-1, +1, n_train) ticks = np.random.uniform(0, 1, (n_train, n_gate)) < 0.01 discrete_values = np.random.uniform(-1, 1, k) idx = np.where(ticks == 1)[0] values[idx] = np.random.choice(discrete_values, len(idx)) train_data = generate_data(values, ticks) values = smoothen(np.random.uniform(-1, +1, n_test)) ticks = np.random.uniform(0, 1, (n_test, n_gate)) < 0.01 test_data = generate_data(values, ticks, last=train_data["output"][-1]) return base_param, train_data, test_data
def gen_gate(n_train, n_test, base_param, k): n_gate = k values = np.random.uniform(-1, +1, n_train) ticks = np.random.uniform(0, 1, (n_train, n_gate)) < 0.01 train_data = generate_data(values, ticks) values = smoothen(np.random.uniform(-1, +1, n_test)) ticks = np.random.uniform(0, 1, (n_test, n_gate)) < 0.01 test_data = generate_data(values, ticks, last=train_data["output"][-1]) param = base_param.copy() param["shape"] = (1 + n_gate, param["shape"][1], n_gate) return param, train_data, test_data
def gen_trigger(n_train, n_test, base_param, k): n_gate = 1 values = np.random.uniform(-1, +1, n_train) ticks_interval = np.random.randint(1, k + 1, size=(n_train)) ticks_time = np.cumsum(ticks_interval) i_max = np.max(np.where(ticks_time < n_train)[0]) ticks = np.zeros((n_train, )) ticks[ticks_time[:i_max]] = 1 train_data = generate_data(values, ticks) values = smoothen(np.random.uniform(-1, +1, n_test)) ticks = np.random.uniform(0, 1, (n_test, n_gate)) < 0.01 test_data = generate_data(values, ticks, last=train_data["output"][-1]) return base_param, train_data, test_data
def gen_value(n_train, n_test, base_param, k): n_gate = 1 values = np.random.uniform(-1, +1, (n_train, k)) ticks = np.random.uniform(0, 1, (n_train, n_gate)) < 0.01 train_data = generate_data(values, ticks) values = np.empty((n_test, k)) for i in range(k): values[:, i] = smoothen(np.random.uniform(-1, +1, n_test)) ticks = np.random.uniform(0, 1, (n_test, n_gate)) < 0.01 test_data = generate_data(values, ticks, last=train_data["output"][-1]) param = base_param.copy() param["shape"] = (k + n_gate, param["shape"][1], n_gate) return param, train_data, test_data
# Random generator initialization np.random.seed(1) # Build memoryticks n_gate = 1 # Training data n = 25000 values = np.random.uniform(-1, +1, n) ticks = np.random.uniform(0, 1, (n, n_gate)) < 0.01 train_data = generate_data(values, ticks) # Testing data n = 2500 values = smoothen(np.random.uniform(-1, +1, n)) ticks = np.random.uniform(0, 1, (n, n_gate)) < 0.01 test_data = generate_data(values, ticks, last = train_data["output"][-1]) base_param = {"shape":(1+n_gate,1000,n_gate), "sparsity":0.5, "radius":0.1, "scaling":(1.0,1.0), "leak":1.0, "noise":(0, 1e-4, 0), "seed":None} n_res = 20 n_sample = 20 noise_values = np.stack([np.zeros(n_sample), np.logspace(-8, 0, n_sample), np.zeros(n_sample)], axis = 1) noise_test_errors = sharedmem.empty((len(noise_values),n_res)) noise_test_errors[...] = np.nan radius_values = np.concatenate([[0], np.logspace(-2, 1, n_sample)]) radius_test_errors = sharedmem.empty((len(radius_values),n_res)) radius_test_errors[...] = np.nan sparsity_values = np.logspace(-2, 0, n_sample)
model = generate_model(shape=(1 + n_gate, 1000, n_gate), sparsity=0.5, radius=0.1, scaling=(1.0, 1.0), leak=1.0, noise=(0.0000, 0.0001, 0.0001)) # Training data n = 25000 # 300000 values = np.random.uniform(-1, +1, n) ticks = np.random.uniform(0, 1, (n, n_gate)) < 0.01 train_data = generate_data(values, ticks) # Testing data n = 2500 values = smoothen(np.random.uniform(-1, +1, n)) ticks = np.random.uniform(0, 1, (n, n_gate)) < 0.01 test_data = generate_data(values, ticks, last=train_data["output"][-1]) error = train_model(model, train_data) print("Training error : {0}".format(error)) error = test_model(model, test_data) print("Testing error : {0}".format(error)) np.save(files[0], test_data) np.save(files[1], model["output"]) np.save(files[2], model["state"]) else: test_data = np.load(files[0]) model = {} model["output"] = np.load(files[1])
sparsity=0.5, radius=0.1, scaling=1.0, leak=1.0, noise=(0, 1e-4, 0)) # Sparsify W_in & W_fb model["W_in"] *= np.random.uniform(0, 1, model["W_in"].shape) < sparsity model["W_fb"] *= np.random.uniform(0, 1, model["W_fb"].shape) < sparsity # Training data n = 50000 values = np.random.uniform(-1, +1, (n, n_values)) for i in range(n_values): values[:, i] = smoothen(values[:, i]) gates = np.random.uniform(0, 1, (n, n_gates)) < 0.01 train_data = generate_data(values, gates) # Testing data n = 2500 values = np.random.uniform(-1, +1, (n, n_values)) for i in range(n_values): values[:, i] = smoothen(values[:, i]) gates = np.random.uniform(0, 1, (n, n_gates)) < 0.01 test_data = generate_data(values, gates, last=train_data["output"][-1]) print("Training") rmse_train = train_model(model, train_data) print("Training error : {0:.5f}".format(rmse_train))