def generation(): y_train = y[:2000] y_test = y[2000:4000] esn = ESN(n_input=1, n_output=1, n_reservoir=500, noise_level=0.001, spectral_radius=0.47, leak_rate=0.20, random_seed=42, sparseness=0.2) train_acc = esn.fit(inputData=y_train[:-1], outputData=y_train[1:]) print("training acc: {0:4f}\r\n".format(train_acc)) y_test_pred = esn.generate(n=len(y_test), initial_input=y_train[-1]) mse = np.mean((y_test_pred - y_test)[:500]**2) rmse = np.sqrt(mse) nrmse = rmse / np.var(y_test) print("testing mse: {0}".format(mse)) print("testing rmse: {0:4f}".format(rmse)) print("testing nrmse: {0:4f}".format(nrmse)) plt.plot(y_test_pred, "g") plt.plot(y_test, "b") plt.show()
def train_test_esn(input_idx, output_idx, training_data, test_data, merged_prediction, esn=None, rs=7, lr=0.8, n=200, reg=1e-2, rho=0.2): input_y = input_idx[:, 0] input_x = input_idx[:, 1] output_y = output_idx[:, 0] output_x = output_idx[:, 1] training_data_in = training_data[:, input_y, input_x].reshape(-1, len(input_y)) training_data_out = training_data[:, output_y, output_x].reshape(-1, len(output_y)) test_data_in = test_data[:, input_y, input_x].reshape(-1, len(input_y)) test_data_out = test_data[:, output_y, output_x].reshape(-1, len(output_y)) generate_new = True if (esn is None): print("setting up...") esn = ESN( n_input=len(input_y), n_output=len(output_y), n_reservoir=n, #was 30, use 1700 for best performance! weight_generation="advanced", leak_rate=lr, spectral_radius=rho, random_seed=7, noise_level=0.0001, sparseness=.1, regression_parameters=[reg], solver="lsqr") train_error = esn.fit( training_data_in, training_data_out, ) np.random.seed(42) pred = esn.predict(test_data_in) pred[pred > 1.0] = 1.0 pred[pred < 0.0] = 0.0 merged_prediction[:, output_y, output_x] = pred diff = pred.reshape((-1, len(output_y))) - test_data_out mse = np.mean((diff)**2) #print("train/test: {0:5f}/ {1:5f}".format(train_error, mse)) return esn
def train_test_esn(input_idx, output_idx, training_data, test_data, merged_prediction, esn=None): input_y = input_idx[:, 0] input_x = input_idx[:, 1] output_y = output_idx[:, 0] output_x = output_idx[:, 1] training_data_in = training_data[:, input_y, input_x].reshape(-1, len(input_y)) training_data_out = training_data[:, output_y, output_x].reshape(-1, len(output_y)) test_data_in = test_data[:, input_y, input_x].reshape(-1, len(input_y)) test_data_out = test_data[:, output_y, output_x].reshape(-1, len(output_y)) generate_new = True if (esn is None): print("setting up...") esn = ESN( n_input=len(input_y), n_output=len(output_y), n_reservoir=20, # use1700 for best performance! weight_generation="advanced", leak_rate=0.8, spectral_radius=0.2, random_seed=42, noise_level=0.0001, sparseness=.1, regression_parameters=[6e-1], solver="lsqr") #, #out_activation = lambda x: 0.5*(1+np.tanh(x/2)), out_inverse_activation = lambda x:2*np.arctanh(2*x-1)) train_error = esn.fit( training_data_in, training_data_out, ) np.random.seed(42) pred = esn.predict(test_data_in) pred[pred > 1.0] = 1.0 pred[pred < 0.0] = 0.0 merged_prediction[:, output_y, output_x] = pred diff = pred.reshape((-1, len(output_y))) - test_data_out mse = np.mean((diff)**2) print("train/test: {0:5f}/ {1:5f}".format(train_error, mse)) return esn
def prepare_predicter(y, x, training_data_in, training_data_out): global w_stored if prediction_mode == "ESN": isInner = False if y < patch_radius or y >= N - patch_radius or x < patch_radius or x >= N - patch_radius: #frame min_border_distance = np.min([y, x, N - 1 - y, N - 1 - x]) input_dimension = int((2 * min_border_distance + 1)**2) else: #inner isInner = True input_dimension = eff_sigma * eff_sigma input_scaling = None if useInputScaling: #approximate the input scaling using the MI input_scaling = calculate_esn_mi_input_scaling( training_data_in, training_data_out[:, 0]) predicter = ESN(n_input=input_dimension, n_output=1, n_reservoir=n_units, weight_generation="advanced", leak_rate=leaking_rate, spectral_radius=spectral_radius, random_seed=random_seed, noise_level=noise_level, sparseness=sparseness, input_scaling=input_scaling, regression_parameters=[regression_parameter], solver="lsqr") if isInner: if (not bool(w_stored.value)): shared_w[:] = predicter._W[:] w_stored.value = True print("generated new weights...") else: predicter._W[:] = shared_w[:] #print("used old weights...") elif prediction_mode == "NN": predicter = NN(k=k) elif prediction_mode == "RBF": predicter = RBF(sigma=width, basisPoints=basis_points) else: raise ValueError( "No valid prediction_mode choosen! (Value is now: {0})".format( prediction_mode)) return predicter
def prepare_predicter(y, x): if prediction_mode == "ESN": predicter = ESN(n_input=shared_input_data.shape[1], n_output=1, n_reservoir=n_units, weight_generation="advanced", leak_rate=leak_rate, spectral_radius=spectral_radius, random_seed=random_seed, noise_level=noise_level, sparseness=sparseness, regression_parameters=[regression_parameter], solver="lsqr", input_density=input_density / shared_input_data.shape[1]) elif prediction_mode == "NN": predicter = NN(k=k) elif prediction_mode == "RBF": predicter = RBF(sigma=width, basisPoints=basis_points) else: raise ValueError( "No valid prediction_mode choosen! (Value is now: {0})".format( prediction_mode)) return predicter
def prepare_predicter(y, x, training_data_in, training_data_out): if y < patch_radius or y >= N - patch_radius or x < patch_radius or x >= N - patch_radius: #frame min_border_distance = np.min([y, x, N - 1 - y, N - 1 - x]) input_dimension = int((2 * min_border_distance + 1)**2) else: #inner input_dimension = eff_sigma * eff_sigma input_scaling = None if useInputScaling: #approximate the input scaling using the MI input_scaling = hp.calculate_esn_mi_input_scaling( training_data_in, training_data_out[:, 0]) predicter = ESN(n_input=input_dimension, n_output=1, n_reservoir=n_units, weight_generation="advanced", leak_rate=leaking_rate, spectral_radius=spectral_radius, random_seed=random_seed, noise_level=noise_level, sparseness=sparseness, input_scaling=input_scaling, regression_parameters=[regression_parameter], solver="lsqr") return predicter
def evaluate_esn_1d(dataset, params, runs_per_iteration=1, test_snrs=None): if len(params.keys()) != 1: class InvalidParameterDictException(Exception): pass raise InvalidParameterDictException( '1d grid search requires exactly 1 parameter lists') output = [] param_grid = ParameterGrid(params) p1 = sorted(params.keys())[0] # We need the SNR of u/v. u_train, _, u_test, _ = dataset for params in param_grid: print(params) nrmses = [] test_snrs_t = [] for i in range(runs_per_iteration): esn = ESN(**params) nrmses.append(evaluate_esn(dataset, esn)) if test_snrs is not None: test_snrs_t.append(snr(u_test.var(), esn.v.var())) v = esn.v output.append(np.mean(nrmses)) if test_snrs is not None: test_snrs.append(np.mean(test_snrs_t)) return output
def get_prediction(data, def_param=(shared_training_data, shared_test_data, frame_output_weights, output_weights, last_states)): y, x, running_index = data pred = None if (y >= patch_radius and y < N - patch_radius and x >= patch_radius and x < N - patch_radius): #inner point esn = ESN(n_input=eff_sigma * eff_sigma, n_output=1, n_reservoir=n_units, leak_rate=0.5, spectral_radius=0.9, random_seed=38, noise_level=0.001, sparseness=.1, regression_parameters=[5e-6], output_input_scaling=0.1, solver="lsqr") #weight_generation = "advanced", pred = fit_predict_pixel(y, x, running_index, last_states, output_weights, shared_training_data, shared_test_data, esn, True) else: #frame esn = ESN(n_input=1, n_output=1, n_reservoir=n_units, weight_generation="advanced", leak_rate=0.70, spectral_radius=0.8, random_seed=42, noise_level=0.001, sparseness=.1, regression_parameters=[6e-6], solver="lsqr") pred = fit_predict_frame_pixel(y, x, running_index, last_states, frame_output_weights, shared_training_data, shared_test_data, esn, True) get_prediction.q.put((y, x, pred))
def get_prediction(data, def_param=(shared_training_data, shared_test_data)): y, x, running_index = data #print(np.max(shared_test_data[1])) pred = None if (y >= patch_radius and y < N - patch_radius and x >= patch_radius and x < N - patch_radius): #inner point esn = ESN(n_input=eff_sigma * eff_sigma, n_output=1, n_reservoir=n_units, weight_generation="advanced", leak_rate=0.70, spectral_radius=0.8, random_seed=42, noise_level=0.0001, sparseness=.1, regression_parameters=[5e-7], solver="lsqr") pred = fit_predict_pixel(y, x, running_index, shared_training_data, shared_test_data, esn, True) else: #frame min_border_distance = np.min([y, x, N - 1 - y, N - 1 - x]) esn = ESN(n_input=int((2 * min_border_distance + 1)**2), n_output=1, n_reservoir=n_units, weight_generation="advanced", leak_rate=0.70, spectral_radius=0.8, random_seed=42, noise_level=0.0001, sparseness=.1, regression_parameters=[5e-7], solver="lsqr") pred = fit_predict_frame_pixel(y, x, running_index, shared_training_data, shared_test_data, esn, True) get_prediction.q.put((y, x, pred))
def pred(predictionHorizon): print("predicting x(t+{0})".format(predictionHorizon)) #optimized for: predictionHorizon = 48 y_train = y[:2000] y_test = y[2000-predictionHorizon:4000] #manual optimization #esn = ESN(n_input=1, n_output=1, n_reservoir=1000, noise_level=0.001, spectral_radius=.4, leak_rate=0.2, random_seed=42, sparseness=0.2) #gridsearch results esn = ESN(n_input=1, n_output=1, n_reservoir=1000, noise_level=0.0001, spectral_radius=1.35, leak_rate=0.7, random_seed=42, sparseness=0.2, solver="lsqr", regression_parameters=[1e-8]) train_acc = esn.fit(inputData=y_train[:-predictionHorizon], outputData=y_train[predictionHorizon:], transient_quota = 0.2) print("training acc: {0:4f}\r\n".format(train_acc)) y_test_pred = esn.predict(y_test[:-predictionHorizon]) mse = np.mean( (y_test_pred-y_test[predictionHorizon:])[:]**2) rmse = np.sqrt(mse) nrmse = rmse/np.var(y_test) print("testing mse: {0}".format(mse)) print("testing rmse: {0:4f}".format(rmse)) print("testing nrmse: {0:4f}".format(nrmse)) import matplotlib plt.rc('font', **{'family': 'serif', 'serif': ['Computer Modern'], 'size': 13}) plt.rc('text', usetex=True) plt.rc('text.latex', preamble="\\usepackage{mathtools}") plt.figure(figsize=(8,5)) plt.plot(y_test[predictionHorizon:], 'r', linestyle=":" ) plt.plot(y_test_pred, 'b' , linestyle="--") plt.ylim([0.3, 1.6]) plt.legend(['Signal $x(t)$', 'Vorhersage $x\'(t) \\approx x(t+{0})$'.format(predictionHorizon)], fancybox=True, shadow=True, ncol=2, loc="upper center") plt.xlabel("Zeit t") plt.ylabel("Signal") plt.savefig("mackeyglass_pred.pdf") plt.show() return mse
def sineshit(): x = np.linspace(1,200*np.pi, 20000) y = (0*np.log(x)+np.sin(x)*np.cos(x)).reshape(20000,1)*2 esn = ESN(n_input=1, n_output=1, n_reservoir=200, random_seed=42, noise_level=0.001, leak_rate=0.7, spectral_radius=1.35, sparseness=0.1, solver="lsqr", regression_parameters=[2e-4], weight_generation='SORM') train_error = esn.fit(inputData=y[:5000, :], outputData=y[1:5001,:], transient_quota=0.4) print("train error: {0:4f}".format(train_error)) print("<|W_out|> = {0:4f}".format(np.mean(np.abs(esn._W_out)))) Y = esn.generate(n=14999, continuation=True, initial_input=y[5000,:]) #plt.plot(esn._X.T) #plt.show() print("test error: {0:4f}".format(np.mean((Y[:,0]-y[5001:,0])**2))) #plt.plot(x,y[:,0], "b", linestyle="--") #plt.plot(x[5001:],Y[:, 0], "r", linestyle=":") plt.plot(x[5001:],Y[:,0]-y[5001:,0], linestyle=":") plt.show()
def solve_single(merged_prediction): esn = ESN(n_input=len(input_y), n_output=1, n_reservoir=n_units, weight_generation="advanced", leak_rate=0.2, spectral_radius=0.1, random_seed=42, noise_level=0.0001, sparseness=.1, regression_parameters=[5e-0], solver="lsqr") print("fitting and predicting...") bar = progressbar.ProgressBar(max_value=len(output_y), redirect_stdout=True) bar.update(0) mse_train = [] for i in range(len(output_y)): y = output_y[i] x = output_x[i] train_error = esn.fit(training_data_in, training_data[:, y, x].reshape((-1, 1)), verbose=0) mse_train.append(train_error) pred = esn.predict(test_data_in, verbose=0) pred[pred > 1.0] = 1.0 pred[pred < 0.0] = 0.0 merged_prediction[:, y, x] = pred.ravel() bar.update(i) bar.finish() return (merged_prediction, mse_train)
def solve_multi(merged_prediction): esn = ESN(n_input=len(input_y), n_output=len(output_y), n_reservoir=n_units, weight_generation="advanced", leak_rate=0.2, spectral_radius=0.1, random_seed=42, noise_level=0.0001, sparseness=.1, regression_parameters=[5e+1], solver="lsqr") print("fitting and predicting...") train_error = esn.fit(training_data_in, training_data_out, verbose=1) pred = esn.predict(test_data_in, verbose=1) pred[pred > 1.0] = 1.0 pred[pred < 0.0] = 0.0 merged_prediction[:, output_y, output_x] = pred return (merged_prediction, train_error)
def prepare_predicter(y, x, training_data_in, training_data_out): if prediction_mode == "ESN": if y < patch_radius or y >= N - patch_radius or x < patch_radius or x >= N - patch_radius: #frame min_border_distance = np.min([y, x, N - 1 - y, N - 1 - x]) input_dimension = int((2 * min_border_distance + 1)**2) else: #inner input_dimension = eff_sigma * eff_sigma #if a custom input scaling according to the mutual information shall be used, the useInputScaling has to be set to True input_scaling = None if useInputScaling: #approximate the input scaling using the MI input_scaling = calculate_esn_mi_input_scaling( training_data_in, training_data_out[:, 0]) #setup the ESN predicter = ESN(n_input=input_dimension, n_output=1, n_reservoir=n_units, weight_generation="advanced", leak_rate=leaking_rate, spectral_radius=spectral_radius, random_seed=random_seed, noise_level=noise_level, sparseness=sparseness, input_scaling=input_scaling, regression_parameters=[regression_parameter], solver="lsqr") elif prediction_mode == "NN": #setup the NN approach. predicter = NN(k=k) elif prediction_mode == "RBF": #setup the RBF approach predicter = RBF(sigma=width, basisPoints=basis_points) else: raise ValueError( "No valid prediction_mode choosen! (Value is now: {0})".format( prediction_mode)) return predicter
def mainFunction(): data = generate_data(ndata, 20000, 50, Ngrid=N) input_data = data[:-prediction_length, N//2-patch_radius:N//2+patch_radius+1, N//2-patch_radius:N//2+patch_radius+1][:, ::sigma_skip, ::sigma_skip].reshape((-1, input_size)) output_data = data[prediction_length:, N//2, N//2].reshape((-1, 1)) predicter = ESN(n_input=input_size, n_output=1, n_reservoir=n_units, weight_generation="naive", leak_rate=leaking_rate, spectral_radius=spectral_radius, random_seed=random_seed, noise_level=noise_level, sparseness=sparseness, regression_parameters=[regression_parameter], solver="lsqr") print("start fitting...") #plot_errors(predicter, input_data[:trainLength], output_data[:trainLength], input_data[trainLength:trainLength+testLength], output_data[trainLength:trainLength+testLength]) exit() sys.stdout.flush() #optimize(predicter, 1e-2, 1e-2, input_data[:trainLength], output_data[:trainLength], input_data[trainLength:trainLength+testLength], output_data[trainLength:trainLength+testLength]) optimize_scipy(predicter, input_data[:trainLength], output_data[:trainLength], input_data[trainLength:trainLength+testLength], output_data[trainLength:trainLength+testLength]) [(input_data[trainLength:trainLength+testLength], output_data[trainLength:trainLength+testLength])],
def eval_esn_with_params(dataset, params={}): esn = ESN(**params) return evaluate_esn(dataset, esn), esn
from numpy import * from ESN import ESN from weights_generator.input_weights.RandomInputWeightsGenerator import RandomInputWeightsGenerator from weights_generator.reservoir_weights.RandomReservoirWeightsGenerator import RandomReservoirWeightsGenerator from trainer.LinearRegressionTrainer import LinearRegressionTrainer from tester.NrmseTester import NrmseTester from data.MackeyGlassSeries import MackeyGlassSeries MackeyGlassSeries().generate_data(tau=16, series_length=10000) data = loadtxt('data/MackeyGlass_t16.txt') esn = ESN(input_dimension=1, reservoir_dimension=500, output_dimension=1) esn.configure(leaky_rate=0.3, spectral_radius=0.95, input_scale=1.0) esn.generate_input_weights(input_weights_generator=RandomInputWeightsGenerator()) esn.generate_reservoir_weights(reservoir_weights_generator=RandomReservoirWeightsGenerator()) esn.train(data=data[0: 2000], trainer=LinearRegressionTrainer()) esn.test(data=data[2001: 4000], tester=NrmseTester(test_mode=NrmseTester.TEST_MODE_GENERATIVE))
data = generate_data(ndata, 50000, 5, Ngrid=N) np.save("../cache/raw/{0}_{1}.uv.dat.npy".format(ndata, N), data) print("generating finished") else: print("loading data...") data = np.load("../cache/raw/{0}_{1}.uv.dat.npy".format(ndata, N)) print("loading finished") generate_new = False if (os.path.exists("../cache/esn/uv/cross_pred_patches{0}_{1}_{2}_{3}.dat".format(N, ndata, sigma, n_units)) == False): generate_new = True if (generate_new): print("setting up...") esn = ESN(n_input = sigma*sigma, n_output = sigma*sigma, n_reservoir = n_units, weight_generation = "advanced", leak_rate = 0.70, spectral_radius = 0.8, random_seed=42, noise_level=0.0001, sparseness=.1, regression_parameters=[5e-1], solver = "lsqr") last_states = np.empty(((N//sigma)*(N//sigma), n_units, 1)) output_weights = np.empty(((N//sigma)*(N//sigma),sigma*sigma, sigma*sigma+1+n_units)) else: print("loading existing model...") f = open("../cache/esn/uv/cross_pred_patches{0}_{1}_{2}_{3}.dat".format(N, ndata, sigma, n_units), "rb") output_weights = pickle.load(f) last_states = pickle.load(f) esn = pickle.load(f) f.close() training_data = data[:, :ndata-2000] test_data = data[:, ndata-2000:]
"(": np.empty(inst), ")": np.empty(inst), "[": np.empty(inst), "]": np.empty(inst), "@": np.empty(inst), "Other": np.empty(inst) } } Wmem = np.empty((inst, 6, K + N + WM)) # All Wmem values for averages errors_y = np.empty(inst) errors_y_alphabet = np.empty((inst, 65)) for i in range(inst): print("\n\nINSTANCE {}/{}\n".format(i + 1, inst)) esn = ESN(font, seed + 4 * i) # Training Wmem esn.train_Wmem() # Training Wout esn.train_Wout() # Testing the ESN esn.test() bracket_errors["fn"][i] = esn.bracket_errors["fn"] bracket_errors["fn_per_brackets"][i] = esn.bracket_errors[ "fn_per_brackets"] bracket_errors["fn_per_char"][i] = esn.bracket_errors["fn_per_char"] bracket_errors["fn_per_T"][i] = esn.bracket_errors["fn_per_T"]
esn = ESN(n_input = 1, n_output = len(index_y), n_reservoir = 400, weight_generation = "advanced", leak_rate = 1.0, spectral_radius = 1.05, random_seed=42, noise_level=0.0001, sparseness=.1, solver = "lsqr", regression_parameters=[1e-9]) """ """ #T=20 esn = ESN(n_input = 1, n_output = len(index_y), n_reservoir = 500, weight_generation = "advanced", leak_rate = 0.75, spectral_radius = 1.15, random_seed=41, noise_level=0.0001, sparseness=.1, solver = "lsqr", regression_parameters=[1e-6]) """ #T=100 #(0.09311725748279541, 0.29777506387407898, {'random_seed': 41, 'n_reservoir': 500, 'solver': 'lsqr', 'sparseness': 0.05, 'regression_parameters': [3e-08], 'leak_rate': 0.95, 'spectral_radius': 1.2, 'weight_generation': 'advanced'}) esn = ESN(n_input = len(index_y), n_output = 1, n_reservoir = 500, weight_generation = "advanced", leak_rate = 0.95, spectral_radius = 1.2, random_seed=41, noise_level=0.0001, sparseness=.05, solver = "lsqr", regression_parameters=[3e-8]) """ T=10: weight_generation = "naive", leak_rate = 0.9, spectral_radius = 1.18, random_seed=42, noise_level=0.0001, sparseness=.1, solver = "lsqr", regression_parameters=[1e-8]) T=20: weight_generation = "naive", leak_rate = 0.75, spectral_radius = 0.80, random_seed=42, noise_level=0.0001, sparseness=.1, solver = "lsqr", regression_parameters=[1e-8]) """ ###print("fitting...") train_error = esn.fit(xData[:trainLength], yData[:trainLength], transient_quota=0.2) print("train error: {0}".format(train_error))
solution[iteration] = solver.y iteration += 1 return solution data = roessler(20000) data = data[:, :] mode = "cross" if mode == "gen": print("set up") esn = ESN(n_reservoir=2000, n_input=3, n_output=3, leak_rate=0.55, spectral_radius=0.60, random_seed=42, weight_generation='advanced') #0.4 print("fitting...") train_error = esn.fit(inputData=data[:trainLength, :], outputData=data[1:trainLength + 1, :]) print("train error: {0:4f}".format(train_error)) testLength = 5000 print("generating...") Y = esn.generate(n=testLength, initial_input=data[trainLength]) errorLength = 4000 mse = np.sum( np.square(data[trainLength:trainLength + errorLength, 0] -
input_data = input_data[:, indices[0], indices[1]].reshape((ndata, N, N)) output_data = output_data[:, indices[0], indices[1]].reshape((ndata, N, N)) n_units = N * N print("reshaping data...") input_data = input_data[:trainLength + predictionLength] output_data = output_data[:trainLength + predictionLength] input_data_f = input_data.reshape((trainLength + predictionLength, -1)) output_data_f = output_data.reshape((trainLength + predictionLength, -1)) print("setting up...") predicter = ESN(n_input=n_units, n_output=n_units, n_reservoir=n_units, weight_generation="custom", leak_rate=leak_rate, spectral_radius=spectral_radius, random_seed=random_seed, noise_level=noise_level, regression_parameters=[regression_parameter], solver="lsqr") print("custom weights generating...") generate_weight(predicter) print("fitting...") predicter.fit(input_data_f[:trainLength], output_data_f[:trainLength], verbose=1) print("predicting...") prediciton_f = predicter.predict(input_data_f[trainLength:trainLength + predictionLength - testLength],
font = str(sys.argv[1]) else: font = "freemono" dirname = "data/PCA/{}".format(font) esnDirname = "data/test/{}".format(font) files = ("{}/PC_X.npy".format(dirname), "{}/PC_U.npy".format(dirname), "{}/length_x.npy".format(dirname)) reload = False # Change to True to compute again # Generate data only if not already generated. Delete every file in files list to be able to generate new data if not np.all([os.path.exists(file) for file in files]) or reload == True: esn = ESN() esn.Win = np.load("{}/Win.npy".format(esnDirname)) esn.W = np.load("{}/W.npy".format(esnDirname)) esn.Wb = np.load("{}/Wb.npy".format(esnDirname)) esn.Wmem = np.load("{}/Wmem.npy".format(esnDirname)) esn.Wout = np.load("{}/Wout.npy".format(esnDirname)) print("\nGenerating data...") length_x = np.empty(7, dtype=np.int) for i in range(7): print("\n------ Memory: {} ------".format(i)) alphascii = Alphascii("PCA", 6500, seed=esn.seed + i, set_i=i) if i == 0: U, X = esn.test(alphascii)
x_train[i, tale_encoded[i]] = 1.0 x_test = np.zeros((test_encoded.shape[0], 29)) for i in range(x_test.shape[0]): x_test[i, test_encoded[i]] = 1.0 print(x_train) x_train = np.vstack((x_train[:100, :], x_train)) x_test = np.vstack((x_test[:100, :], x_test)) print(x_train.shape) esn = ESN(n_input=29, n_output=29, n_reservoir=500, random_seed=42, noise_level=0.001, leak_rate=0.7, spectral_radius=0.8, sparity=1.0) esn.fit(inputData=x_test, outputData=x_test, transient_quota=0.025) #for i in range(10): testSeq = "she live far off said the wolf oh i say, answered " def reencode(encoded): res = "" for i in range(encoded.shape[0]): res += encodingString[np.argmax(encoded[i])] return res
np.save("../cache/raw/{0}_{1}.uv.dat.npy".format(ndata, N), data) print("generating finished") else: print("loading data...") data = np.load("../cache/raw/{0}_{1}.uv.dat.npy".format(ndata, N)) print("loading finished") generate_new = False if (os.path.exists("../cache/esn/uv/cross_pred_patches_advanced{0}_{1}_{2}_{3}.dat".format(N, ndata, sigma, n_units)) == False): generate_new = True if (generate_new): print("setting up...") esn = ESN(n_input = sigma*sigma, n_output = 1, n_reservoir = n_units, weight_generation = "advanced", leak_rate = 0.70, spectral_radius = 0.8, random_seed=42, noise_level=0.0001, sparseness=.1, regression_parameters=[5e-1], solver = "lsqr") frameEsn = ESN(n_input = 4, n_output = 4, n_reservoir = n_units, weight_generation = "advanced", leak_rate = 0.70, spectral_radius = 0.8, random_seed=42, noise_level=0.0001, sparseness=.1, regression_parameters=[5e-1], solver = "lsqr") last_states = np.empty(((N-2)*(N-2), n_units, 1)) output_weights = np.empty(((N-2)*(N-2),sigma*sigma, sigma*sigma+1+n_units)) frame_output_weights = np.empty(((N-2)*(N-2),2*2, 2*2+1+n_units)) else: print("loading existing model...") f = open("../cache/esn/uv/cross_pred_patches_advanced{0}_{1}_{2}_{3}.dat".format(N, ndata, sigma, n_units), "rb") output_weights = pickle.load(f)
n_units = 2000 model_save_name = "../cache/esn/uv/cross_pred_" + str(N) + "_" + str( n_units) + ".dat" generate_new = False if (os.path.exists(model_save_name) == False): generate_new = True if (generate_new): print("setting up...") esn = ESN(n_input=N * N, n_output=N * N, n_reservoir=n_units, weight_generation="advanced", leak_rate=0.70, spectral_radius=0.8, random_seed=42, noise_level=0.0001, sparseness=.1, regression_parameters=[5e-1], solver="lsqr") #, #out_activation = lambda x: 0.5*(1+np.tanh(x/2)), out_inverse_activation = lambda x:2*np.arctanh(2*x-1)) print("fitting...") train_error = esn.fit(training_data_in, training_data_out, verbose=1) print("train error: {0}".format(train_error)) print("saving to: " + model_save_name) if not os.path.exists("../cache/esn/uv/"): os.makedirs("../cache/esn/uv/")
training_data_in = training_data[:, input_y, input_x].reshape(-1, len(input_y)) training_data_out = training_data[:, output_y, output_x].reshape(-1, len(output_y)) test_data_in = test_data[:, input_y, input_x].reshape(-1, len(input_y)) test_data_out = test_data[:, output_y, output_x].reshape(-1, len(output_y)) print("setting up...") esn = ESN( n_input=len(input_y), n_output=len(output_y), n_reservoir=n_units, #used to be 1700 weight_generation="advanced", leak_rate=0.2, spectral_radius=0.1, random_seed=42, noise_level=0.0001, sparseness=.1, regression_parameters=[5e-0], solver="lsqr", ) print("fitting...") train_error = esn.fit(training_data_in, training_data_out, verbose=1) print("train error: {0}".format(train_error)) print("predicting...") pred = esn.predict(test_data_in, verbose=1) #pred[pred>1.0] = 1.0
exit() """ #T=50: (0.033947233385081932, 0.071219920423523347, {'sparseness': 0.05, 'solver': 'lsqr', 'spectral_radius': 0.3, 'random_seed': 42, 'regression_parameters': [0.0003], 'n_reservoir': 500, 'leak_rate': 0.2, 'weight_generation': 'naive'}) #T=10: (0.00078495804606547662, {'leak_rate': 0.9, 'n_reservoir': 500, 'spectral_radius': 0.95, 'solver': 'pinv', 'weight_generation': 'naive', 'random_seed': 44, 'sparseness': 0.05}) #T=100: (0.050939652813549598, 0.13093701743229968, {'regression_parameters': [0.003], 'sparseness': 0.05, 'spectral_radius': 0.3, 'n_reservoir': 500, 'leak_rate': 0.2, 'solver': 'lsqr', 'weight_generation': 'naive', 'random_seed': 41}) #T=100, solo: (0.032009262884647588, 0.11699984785782426, {'random_seed': 42, 'sparseness': 0.1, 'n_reservoir': 800, 'solver': 'lsqr', 'weight_generation': 'naive', 'regression_parameters': [0.0003], 'spectral_radius': 1.1, 'leak_rate': 0.6}) print("setting up...") if (generate_new): esn = ESN(n_input=len(index_y), n_output=1, n_reservoir=400, weight_generation="advanced", leak_rate=0.7, spectral_radius=0.7, random_seed=42, noise_level=0.0001, sparseness=.1, solver="lsqr", regression_parameters=[3e-3]) #out_activation = lambda x: 0.5*(1+np.tanh(x/2)), out_inverse_activation = lambda x:2*np.arctanh(2*x-1)) print("fitting...") train_error = esn.fit(training_data_in_flat[:-T], training_data_out[T:]) esn.save("esn" + str(len(index_y)) + ".dat") print("train error: {0}".format(train_error)) else: esn = ESN.load("esn" + str(len(index_y)) + ".dat")