def Bif_Test(test_length=800, train_length=800, N=400, eta=0.4, tau=400, bits=np.inf, preload=False, write=False, mask=0.1, activate='mg', beta=1.0, t=1, theta=0.2, hayes_p=1, power=1): """ Args: test_length: length of testing data train_length: length of training data a: ridge regression parameter N: number of virtual high_nodes plot: display calculated time series gamma: input gain eta: oscillation strength bits: bit precision preload: preload mask and time-series data mask: amplitude of mask values activate: activation function to be used (sin**2,tanh,mg) cv: perform leave-one-out cross validation beta: driver gain t: timestep used to solve diffeq Returns: NRMSE: Normalized Root Mean Square Error """ # Import u and m u, m, _ = load_NARMA(preload, train_length, test_length, mask, N) # Instantiate Reservoir, feed in training and predictiondatasets r1 = DelayReservoir(N=N, eta=eta, gamma=0, theta=theta, beta=beta, tau=tau, fudge=hayes_p, power=power) x = r1.calculate(u[:train_length], m, bits, t, activate) x_max = np.max(x) x_min = np.min(x) return x_min, x_max
def single_node_calculate(): activation = ["hayes","mg"] calc_results = {} for activate in activation: # Set parameters based on activation used if activate == "hayes": delay_res = hayes_special_Delay_Res( k1 = 1.15, N = 400, eta = 1, gamma = 0.05, theta = 0.2, beta = 1, tau = 400, # 1 being 1 theta back. here i'm assuming theta = 0.2 ) else: delay_res = mod_Delay_Res( k1 = 1.15, N = 400, eta = 1, gamma = 0.05, theta = 0.2, beta = 1, tau = 400, # 1 being 1 theta back. here i'm assuming theta = 0.2 ) # Load in data preload = True train_length = 800 test_length = 800 N = 1 u, m, target = load_NARMA(preload, train_length, test_length, N) # m = np.eye(u.shape[0]) # Actually, we want m to be an identity matrix! # Store results of calculate calc_results[activate] = delay_res.calculate(u, m, np.inf, t = 1, act = activate) x = r1.calculate(u[:train_length], m, bits, t, activate, no_act_res = no_act_res) x_test = r1.calculate(u[train_length:], m, bits, t, activate, no_act_res = no_act_res) if cv: NRMSE, y_test, y_input = cross_validate(alphas=np.logspace(-20, 5, 16), x=x, x_test=x_test, target=target) else: clf = Ridge(alpha=0) clf.fit(x, target[:train_length]) y_test = clf.predict(x_test) # Calculate NRMSE of prediction data NRMSE = np.sqrt( np.mean(np.square(y_test[50:] - target[train_length + 50:])) / np.var(target[train_length + 50:]))
def Values_Test(test_length=800, train_length=800, N=400, eta=0.4, tau=400, preload=True, mask=0.1, activate='mg', beta=1.0, t=1, theta=0.2, power=1, fudge=1.0, gamma=0.1): """ Args: test_length: length of testing data train_length: length of training data a: ridge regression parameter N: number of virtual high_nodes plot: display calculated time series gamma: input gain eta: oscillation strength bits: bit precision preload: preload mask and time-series data mask: amplitude of mask values activate: activation function to be used (sin**2,tanh,mg) cv: perform leave-one-out cross validation beta: driver gain t: timestep used to solve diffeq Returns: NRMSE: Normalized Root Mean Square Error """ # Import u and m u, m, _ = load_NARMA(preload, train_length, test_length, mask, N) # Instantiate Reservoir, feed in training and predictiondatasets if type(activate) != list: r1 = ModifiedDelayRC(N=N, eta=eta, gamma=gamma, theta=theta, beta=beta, tau=tau, power=power, fudge=fudge) M_x, J, X_lag = r1.calculate(u[:train_length], m, t, activate) else: M_x, J, X_lag = [], [], [] r1 = ModifiedDelayRC(N=N, eta=eta, gamma=gamma, theta=theta, beta=beta, tau=tau, power=power, fudge=fudge) for activation in activate: a, b, c = r1.calculate(u[:train_length], m, t, activation) M_x.append(a) J.append(b) X_lag.append(c) # plt.figure(2) # plt.scatter(M_x.flatten(), J.flatten(), s=.01) return M_x, J, X_lag
import matplotlib.pyplot as plt # out = run_test() # plt.scatter(range(len(out[0,:])), out[0,:]) # plt.scatter(range(len(out[0,:])), out[1,:]) # plt.show preload = False train_length, test_length = 800, 800 mask = 0.1 N, tau = 400, 200000 eta, theta, beta = 1.0, 0.2, 0.5 power = 7 activate = 'mg' t = 1 gamma = 0.5 u, m, _ = load_NARMA(preload, train_length, test_length, mask, N) r1 = ModifiedDelayRC(N=N, eta=eta, gamma=gamma, theta=theta, beta=beta, tau=tau, power=power) x = r1.calculate(u[:train_length], m, t, activate) print(np.max(x[len(x) // 2:])) plt.plot(x[len(x) // 2:]) plt.show()
def optimal_NARMA_Comp(test_length=800, train_length=800, plot=True, N=400, eta=0.4, gamma=0.05, tau=400, bits=np.inf, preload=True, write=False, mask=0.1, activate='mg', cv=False, beta=1.0, t=1, k1=1.15, theta=0.2, no_act_res=False): """ Args: test_length: length of testing data train_length: length of training data N: number of virtual nodes plot: display calculated time series gamma: input gain eta: oscillation strength bits: bit precision preload: preload mask and time-series data mask: amplitude of mask values activate: activation function to be used (sin**2,tanh,mg) cv: perform leave-one-out cross validation beta: driver gain t: timestep used to solve diffeq, theta: distance between virtual nodes in time Returns: NRMSE: Normalized Root Mean Square Error """ # hayes_sol = np.array(1,u.flatten().size()) # hayes_sol = np.array() # NRMSE_sol_mg = np.array() activate = ["mg", "hayes"] for activate in activate: if activate == "Hayes" and (k1 / eta) < 1: NRSME = 1 x_test_bot = 0 return NRSME, x_test_bot # Should the parameters be those that put Hayes in unstable territory, if activate == "hayes": N = 400 tau = 400 eta = 0.401 gamma = 0.531 theta = 0.2 beta = 0.7 if activate == "mg": N = 509 tau = 509 eta = 0.94 gamma = 0.28 theta = 0.834 beta = 0.74 # Import u, m, and target u, m, target = load_NARMA(preload, train_length, test_length, mask, N) # Instantiate Reservoir, feed in training and predictiondatasets r1 = mod_Delay_Res(N=N, k1=k1, eta=eta, gamma=gamma, theta=theta, beta=beta, tau=tau) x = r1.calculate(u[:train_length], m, bits, t, activate, no_act_res=no_act_res) #[0] # Is this correct? It looks like x_test and x_test_bot are defined as the same thing x_test = r1.calculate(u[train_length:], m, bits, t, activate, no_act_res=no_act_res ) #[0] # Changed from [1] to [0] if no_act_res == True: x_test_bot = r1.calculate(u[train_length:], m, bits, t, activate, no_act_res=no_act_res)[1] # Train using Ridge Regression with hyperparameter tuning if cv: NRMSE, y_test, y_input = cross_validate(alphas=np.logspace( -20, 5, 16), x=x, x_test=x_test, target=target) else: clf = Ridge(alpha=0) clf.fit(x, target[:train_length]) y_test = clf.predict(x_test) # Calculate NRMSE of prediction data NRMSE = np.sqrt( np.mean(np.square(y_test[50:] - target[train_length + 50:])) / np.var(target[train_length + 50:])) # Store the NRMSE predictions of mg and hayes if activate == "mg": pre_ridge_mg = x.flatten() NRMSE_sol_mg = y_test.flatten() if activate == "hayes": pre_ridge_hayes = x.flatten() hayes_sol = y_test.flatten() if not no_act_res: x_test_bot = 0 # If I don't want to find the x(t)-x(t-tau) term, set flag before plotting # Plot predicted Time Series if plot: plt.figure(1) plt.plot(target.flatten()[train_length:], label="NRMSE Input Sequence") plt.plot(NRMSE_sol_mg, label="mackey Glass") plt.plot(hayes_sol, label="hayes") plt.title("Post-Ridge Regression mg vs. hayes") plt.legend() plt.figure(2) plt.plot(pre_ridge_mg, label="mackey Glass") plt.plot(pre_ridge_hayes, label="hayes") plt.title("Pre-Ridge Regression mg vs. hayes") plt.legend() plt.show()
def mod_NARMA_Test(test_length=800, train_length=800, plot=True, N=400, eta=0.4, gamma=0.05, tau=400, bits=np.inf, preload=False, write=False, mask=0.1, activate='mg', cv=False, beta=1.0, t=1, k1=1, theta=0.2, no_act_res=False): """ Args: test_length: length of testing data train_length: length of training data N: number of virtual nodes plot: display calculated time series gamma: input gain eta: oscillation strength bits: bit precision preload: preload mask and time-series data mask: amplitude of mask values activate: activation function to be used (sin**2,tanh,mg) cv: perform leave-one-out cross validation beta: driver gain t: timestep used to solve diffeq, theta: distance between virtual nodes in time Returns: NRMSE: Normalized Root Mean Square Error """ if activate == "Hayes" and (k1 / eta) < 1: NRSME = 1 x_test_bot = 0 return NRSME, x_test_bot # Should the parameters be those that put Hayes in unstable territory, # Import u, m, and target u, m, target = load_NARMA(preload, train_length, test_length, mask, N) # Instantiate Reservoir, feed in training and predictiondatasets r1 = mod_Delay_Res(N=N, k1=k1, eta=eta, gamma=gamma, theta=theta, beta=beta, tau=tau) x = r1.calculate(u[:train_length], m, bits, t, activate, no_act_res=no_act_res) #[0] # Is this correct? It looks like x_test and x_test_bot are defined as the same thing x_test = r1.calculate( u[train_length:], m, bits, t, activate, no_act_res=no_act_res) #[0] # Changed from [1] to [0] if no_act_res == True: x_test_bot = r1.calculate(u[train_length:], m, bits, t, activate, no_act_res=no_act_res)[1] # Train using Ridge Regression with hyperparameter tuning if cv: NRMSE, y_test, y_input = cross_validate(alphas=np.logspace(-20, 5, 16), x=x, x_test=x_test, target=target) else: clf = Ridge(alpha=0) clf.fit(x, target[:train_length]) y_test = clf.predict(x_test) # Calculate NRMSE of prediction data NRMSE = np.sqrt( np.mean(np.square(y_test[50:] - target[train_length + 50:])) / np.var(target[train_length + 50:])) # Write to File if write: write_func(x, x_test) if not no_act_res: x_test_bot = 0 # If I don't want to find the x(t)-x(t-tau) term, set flag before plotting # Plot predicted Time Series if plot: plot_func(x, x_test_bot, u, y_test, target, NRMSE, train_length, N) return NRMSE, x_test_bot, u
def Identical_NARMA_Comp(test_length=800, train_length=800, plot=True, N=400, eta=0.4, gamma=0.05, tau=400, bits=np.inf, preload=False, write=False, mask=0.1, activate='mg', cv=True, beta=1.0, t=1, k1=1.15, theta=0.2, no_act_res=False): """ Args: test_length: length of testing data train_length: length of training data N: number of virtual nodes plot: display calculated time series gamma: input gain eta: oscillation strength bits: bit precision preload: preload mask and time-series data mask: amplitude of mask values activate: activation function to be used (sin**2,tanh,mg) cv: perform leave-one-out cross validation beta: driver gain t: timestep used to solve diffeq, theta: distance between virtual nodes in time Returns: NRMSE: Normalized Root Mean Square Error """ ### Redefine redefine Parameters ### gamma = 0.5 eta = 0.941 beta = 0.83435 N = 509 tau = 509 theta = 0.20034 # Import u, m, and target u, m, target = load_NARMA(preload, train_length, test_length, mask, N) # hayes_sol = np.array(1,u.flatten().size()) # hayes_sol = np.array() # mg_sol_0 = np.array() activate_ls = ["mg", "hayes"] for count, activate in enumerate(activate_ls): if activate == "Hayes" and (k1 / eta) < 1: NRSME = 1 x_test_bot = 0 return NRSME, x_test_bot # Should the parameters be those that put Hayes in unstable territory, # Instantiate Reservoir, feed in training and predictiondatasets if activate == "mg": r1 = mod_Delay_Res(N=N, k1=k1, eta=eta, gamma=gamma, theta=theta, beta=beta, tau=tau) if activate == "hayes": r1 = hayes_special_Delay_Res(N=N, k1=k1, eta=eta, gamma=gamma, theta=theta, beta=beta, tau=tau) x = r1.calculate(u[:train_length], m, bits, t, activate, no_act_res=no_act_res) # Is this correct? It looks like x_test and x_test_bot are defined as the same thing x_test = r1.calculate( u[train_length:], m, bits, t, activate, no_act_res=no_act_res ) # Don't reference [0] unless no_act_res == True # Train using Ridge Regression with hyperparameter tuning if cv: NRMSE, y_test, y_input = cross_validate(alphas=np.logspace( -20, 5, 16), x=x, x_test=x_test, target=target) else: clf = Ridge(alpha=0) clf.fit(x, target[:train_length]) y_test = clf.predict(x_test) # Calculate NRMSE of prediction data NRMSE = np.sqrt( np.mean(np.square(y_test[50:] - target[train_length + 50:])) / np.var(target[train_length + 50:])) # Store the NRMSE predictions of mg and hayes if activate == "mg" and count == 0: mg_x = x mg_test = x_test pre_ridge_mg_0 = x.flatten() np.append( pre_ridge_mg_0, x_test.flatten() ) # Add on the end the testing pre-ridge data to see if that lines up also mg_0_NRMSE = NRMSE mg_sol_0 = y_test.flatten() if activate == "hayes": hayes_x = x hayes_test = x_test pre_ridge_hayes = x.flatten() np.append(pre_ridge_hayes, x_test.flatten()) hayes_NRMSE = NRMSE hayes_sol = y_test.flatten() if activate == "mg" and count == 1: # This one checks out, regardless of order both mg runs score same NRMSE pre_ridge_mg_1 = x.flatten() NRMSE_sol_mg_1 = y_test.flatten() if not no_act_res: x_test_bot = 0 # If I don't want to find the x(t)-x(t-tau) term, set flag before plotting ## Let's try to add/subtract the differences between mg (which works) and hayes (which doesn't) and run it back through. Is it just this small difference that's causing the difference in performance? if 'hayes' in activate_ls: activate = "hayes" x_diff = mg_x - hayes_x x_test_diff = mg_test - hayes_test # Add the differences to hayes original output x = hayes_x + x_diff x_test = hayes_test + x_test_diff # Calculate and train # Train using Ridge Regression with hyperparameter tuning if cv: NRMSE, y_test, y_input = cross_validate(alphas=np.logspace( -20, 5, 16), x=x, x_test=x_test, target=target) else: clf = Ridge(alpha=0) clf.fit(x, target[:train_length]) y_test = clf.predict(x_test) # Calculate NRMSE of prediction data NRMSE = np.sqrt( np.mean(np.square(y_test[50:] - target[train_length + 50:])) / np.var(target[train_length + 50:])) # Save the resulting NRMSE Values and also the y-solutions of the altered hayes altered_hayes_NRMSE = NRMSE altered_hayes_sol = y_test.flatten() # Plot predicted Time Series if plot: if 'hayes' in activate_ls: plt.figure(1) plt.plot(target.flatten()[train_length:], label="NRMSE Input Sequence") plt.plot(mg_sol_0, label="mackey Glass") plt.plot(hayes_sol, label="hayes") plt.title("Post-Ridge Regression mg vs. hayes: NRMSE_h = " + str(round(hayes_NRMSE, 3)) + ", NRMSE_mg = " + str(round(mg_0_NRMSE, 3))) plt.legend() plt.figure(2) plt.plot(pre_ridge_mg_0, label="mackey Glass") plt.plot(pre_ridge_hayes, label="hayes") plt.title("Pre-Ridge Regression mg vs. hayes") plt.legend() plt.figure(3) plt.plot(altered_hayes_sol, label='altered hayes') plt.plot(mg_sol_0, label='mackey glass') plt.title("corrected hayes vs mackey glass: NRMSE_altH = " + str(altered_hayes_NRMSE) + ", NRMSE_mg = " + str(mg_0_NRMSE)) plt.legend() plt.figure(4) plt.plot(np.append(x_diff.flatten(), x_test_diff.flatten()), label='mg - hayes') plt.title("difference between mg and hayes before ridge") plt.legend() plt.show() else: plt.figure(1) plt.plot(target.flatten()[train_length:], label="NRMSE Input Sequence") plt.plot(mg_sol_0, label="mackey glass 1st pass") plt.plot(NRMSE_sol_mg_1, label="mackey glass 2nd pass") plt.title("Post-Ridge Regression mg vs. mg") plt.legend() plt.figure(2) plt.plot(pre_ridge_mg_0, label="mackey glass 1st pass") plt.plot(pre_ridge_mg_1, label="mackey glass 2nd pass") plt.title("Pre-Ridge Regression mg vs. mg") plt.legend() plt.show()
def NARMA_Test(self, test_length=500, train_length=5000, plot=False, N=400, eta=0.4, gamma=0.05, tau=400, fudge=1.0, preload=False, write=False, mask=0.1, activate='mg', cv=False, beta=1.0, t=1, theta=0.2, power=1, mix_p=0.1): """ Args: test_length: length of testing data train_length: length of training data N: number of virtual high_nodes plot: display calculated time series gamma: input gain eta: oscillation strength bits: bit precision preload: preload mask and time-series data mask: amplitude of mask values activate: activation function to be used (sin**2,tanh,mg) cv: perform leave-one-out cross validation beta: driver gain t: timestep used to solve diffeq, theta: distance between virtual high_nodes in time Returns: NRMSE: Normalized Root Mean Square Error """ # Import u, m, and target u, m, target = load_NARMA(preload, train_length, test_length, mask, N) # Instantiate Reservoir, feed in training and predictiondatasets r1 = ModifiedDelayRC(N=N, eta=eta, gamma=gamma, theta=theta, beta=beta, tau=tau, fudge=fudge, power=power) x = r1.calculate_mix(u[:train_length], m, t, mix_p)[0] # Is this correct? It looks like x_test and x_test_bot are defined as the same thing x_test = r1.calculate_mix(u[train_length:], m, t, mix_p)[0] # Train using Ridge Regression with hyperparameter tuning if cv: alphas = np.logspace(-100, 1, 100) NRMSE, y_test, y_input1, clf = cross_validate(alphas=alphas, x=x, x_test=x_test, target=target) else: clf = Ridge(alpha=0) # clf1 = LinearRegression(n_jobs=-1) clf.fit(x, target[:train_length]) y_test = clf.predict(x) # Calculate NRMSE of prediction data NRMSE = np.sqrt( np.mean(np.square(y_test[50:] - target[50:train_length])) / np.var(target[50:train_length])) return NRMSE, x, target, x_test, y_test, clf
def NARMA_Test(self, test_length=500, train_length=5000, plot=False, N=400, eta=0.4, gamma=0.05, tau=400, fudge=1.0, preload=False, write=False, mask=0.1, activate='mg', cv=True, beta=1.0, t=1, theta=0.2, power=1): """ Args: test_length: length of testing data train_length: length of training data N: number of virtual high_nodes plot: display calculated time series gamma: input gain eta: oscillation strength bits: bit precision preload: preload mask and time-series data mask: amplitude of mask values activate: activation function to be used (sin**2,tanh,mg) cv: perform leave-one-out cross validation beta: driver gain t: timestep used to solve diffeq, theta: distance between virtual high_nodes in time Returns: NRMSE: Normalized Root Mean Square Error """ # Import u, m, and target u, m, target = load_NARMA(preload, train_length, test_length, mask, N) # Instantiate Reservoir, feed in training and predictiondatasets r1 = ModifiedDelayRC(N=N, eta=eta, gamma=gamma, theta=theta, beta=beta, tau=tau, fudge=fudge, power=power) x1 = r1.calculate(u[:train_length], m, t, 'mg')[0] x2 = r1.calculate(u[:train_length], m, t, 'hayes')[0] # Is this correct? It looks like x_test and x_test_bot are defined as the same thing x_test1 = r1.calculate(u[train_length:], m, t, 'mg')[0] x_test2 = r1.calculate(u[train_length:], m, t, 'hayes')[0] # Train using Ridge Regression with hyperparameter tuning if cv: alphas = np.logspace(-100, 1, 100) NRMSE1, y_test1, y_input1, clf1 = cross_validate(alphas=alphas, x=x1, x_test=x_test1, target=target) # res1 = np.linalg.norm(target[50+train_length:] - y_test1[50:]) ** 2 # ssr = np.sum((target[50+train_length:] - y_test1[50:]) ** 2) # # # total sum of squares # sst = np.sum((target[50+train_length:] - np.mean(target[50+train_length:])) ** 2) # # r2_1 = 1 - (ssr / sst) # NRMSE2, y_test2, y_input2 = cross_validate(alphas=np.logspace(-50, 5, 500), x=x2, x_test=x_test2, # target=target) # res2 = np.linalg.norm(target[50+train_length:] - y_test2[50:]) ** 2 # ssr = np.sum((target[50+train_length:] - y_test2[50:]) ** 2) # # r2_2 = 1 - (ssr / sst) res1 = np.linalg.norm(target[50:train_length] - y_input1[50:]) ** 2 ssr = np.sum((target[50:train_length] - y_input1[50:]) ** 2) # total sum of squares sst = np.sum((target[50:train_length] - np.mean(target[50:train_length])) ** 2) r2_1 = 1 - (ssr / sst) NRMSE2, y_test2, y_input2, clf2 = cross_validate(alphas=alphas, x=x2, x_test=x_test2, target=target) res2 = np.linalg.norm(target[50:train_length] - y_input2[50:]) ** 2 ssr = np.sum((target[50:train_length:] - y_input2[50:]) ** 2) r2_2 = 1 - (ssr / sst) else: clf1 = Ridge(alpha=0) # clf1 = LinearRegression(n_jobs=-1) clf1.fit(x1, target[:train_length]) y_test1 = clf1.predict(x1) # Calculate NRMSE of prediction data NRMSE1 = np.sqrt( np.mean(np.square(y_test1[50:] - target[50:train_length])) / np.var(target[50:train_length])) res1 = np.linalg.norm(target[50:train_length] - y_test1[50:]) ** 2 r2_1 = 0 clf2 = Ridge(alpha=0) # clf2 = LinearRegression(n_jobs=-1) clf2.fit(x2, target[:train_length]) y_test2 = clf2.predict(x2) # Calculate NRMSE of prediction data NRMSE2 = np.sqrt( np.mean(np.square(y_test2[50:] - target[50:train_length])) / np.var(target[50:train_length])) res2 = np.linalg.norm(target[50:train_length] - y_test2[50:]) ** 2 r2_2 = 0 return NRMSE1, NRMSE2, x1, x2, target, x_test1, x_test2, y_test1, y_test2, res1, res2, r2_1, r2_2, clf1, clf2
import numpy as np from matplotlib import pyplot as plt from helper_files import load_NARMA u, m, target = load_NARMA(True, N=1) u = np.reshape(u, (-1, 1)) m = np.reshape(m, (1, -1)) j_t = u @ m j_t = j_t.flatten() #Plot j(t) plt.plot(np.linspace(0, 400 * 0.2, 1600), j_t, label="J(t)") plt.xlabel("seconds (theta = 0.2)") plt.title("J(t), for system with N = 1") axes = plt.gca() # axes.set_xlim([0,400*0.2]) plt.legend() plt.show()