def optimal_NARMA_Comp(test_length=800,
                       train_length=800,
                       plot=True,
                       N=400,
                       eta=0.4,
                       gamma=0.05,
                       tau=400,
                       bits=np.inf,
                       preload=True,
                       write=False,
                       mask=0.1,
                       activate='mg',
                       cv=False,
                       beta=1.0,
                       t=1,
                       k1=1.15,
                       theta=0.2,
                       no_act_res=False):
    """
	Args:
		test_length: length of testing data
		train_length: length of training data
		N: number of virtual nodes
		plot: display calculated time series
		gamma: input gain
		eta: oscillation strength
		bits: bit precision
		preload: preload mask and time-series data
		mask: amplitude of mask values
		activate: activation function to be used (sin**2,tanh,mg)
		cv: perform leave-one-out cross validation
		beta: driver gain
		t: timestep used to solve diffeq,
		theta: distance between virtual nodes in time

	Returns:
		NRMSE: Normalized Root Mean Square Error
	"""

    # hayes_sol = np.array(1,u.flatten().size())
    # hayes_sol = np.array()
    # NRMSE_sol_mg = np.array()
    activate = ["mg", "hayes"]
    for activate in activate:
        if activate == "Hayes" and (k1 / eta) < 1:
            NRSME = 1
            x_test_bot = 0
            return NRSME, x_test_bot  # Should the parameters be those that put Hayes in unstable territory,

        if activate == "hayes":
            N = 400
            tau = 400
            eta = 0.401
            gamma = 0.531
            theta = 0.2
            beta = 0.7

        if activate == "mg":
            N = 509
            tau = 509
            eta = 0.94
            gamma = 0.28
            theta = 0.834
            beta = 0.74

        # Import u, m, and target
        u, m, target = load_NARMA(preload, train_length, test_length, mask, N)

        # Instantiate Reservoir, feed in training and predictiondatasets
        r1 = mod_Delay_Res(N=N,
                           k1=k1,
                           eta=eta,
                           gamma=gamma,
                           theta=theta,
                           beta=beta,
                           tau=tau)
        x = r1.calculate(u[:train_length],
                         m,
                         bits,
                         t,
                         activate,
                         no_act_res=no_act_res)  #[0]
        # Is this correct? It looks like x_test and x_test_bot are defined as the same thing
        x_test = r1.calculate(u[train_length:],
                              m,
                              bits,
                              t,
                              activate,
                              no_act_res=no_act_res
                              )  #[0]                # Changed from [1] to [0]

        if no_act_res == True:
            x_test_bot = r1.calculate(u[train_length:],
                                      m,
                                      bits,
                                      t,
                                      activate,
                                      no_act_res=no_act_res)[1]

        # Train using Ridge Regression with hyperparameter tuning
        if cv:
            NRMSE, y_test, y_input = cross_validate(alphas=np.logspace(
                -20, 5, 16),
                                                    x=x,
                                                    x_test=x_test,
                                                    target=target)
        else:
            clf = Ridge(alpha=0)
            clf.fit(x, target[:train_length])
            y_test = clf.predict(x_test)

            # Calculate NRMSE of prediction data
            NRMSE = np.sqrt(
                np.mean(np.square(y_test[50:] - target[train_length + 50:])) /
                np.var(target[train_length + 50:]))

        # Store the NRMSE predictions of mg and hayes
        if activate == "mg":
            pre_ridge_mg = x.flatten()
            NRMSE_sol_mg = y_test.flatten()
        if activate == "hayes":
            pre_ridge_hayes = x.flatten()
            hayes_sol = y_test.flatten()

    if not no_act_res:
        x_test_bot = 0  # If I don't want to find the x(t)-x(t-tau) term, set flag before plotting

    # Plot predicted Time Series
    if plot:
        plt.figure(1)
        plt.plot(target.flatten()[train_length:], label="NRMSE Input Sequence")
        plt.plot(NRMSE_sol_mg, label="mackey Glass")
        plt.plot(hayes_sol, label="hayes")
        plt.title("Post-Ridge Regression mg vs. hayes")
        plt.legend()

        plt.figure(2)
        plt.plot(pre_ridge_mg, label="mackey Glass")
        plt.plot(pre_ridge_hayes, label="hayes")
        plt.title("Pre-Ridge Regression mg vs. hayes")
        plt.legend()

        plt.show()
def mod_NARMA_Test(test_length=800,
                   train_length=800,
                   plot=True,
                   N=400,
                   eta=0.4,
                   gamma=0.05,
                   tau=400,
                   bits=np.inf,
                   preload=False,
                   write=False,
                   mask=0.1,
                   activate='mg',
                   cv=False,
                   beta=1.0,
                   t=1,
                   k1=1,
                   theta=0.2,
                   no_act_res=False):
    """
	Args:
		test_length: length of testing data
		train_length: length of training data
		N: number of virtual nodes
		plot: display calculated time series
		gamma: input gain
		eta: oscillation strength
		bits: bit precision
		preload: preload mask and time-series data
		mask: amplitude of mask values
		activate: activation function to be used (sin**2,tanh,mg)
		cv: perform leave-one-out cross validation
		beta: driver gain
		t: timestep used to solve diffeq,
		theta: distance between virtual nodes in time

	Returns:
		NRMSE: Normalized Root Mean Square Error
	"""

    if activate == "Hayes" and (k1 / eta) < 1:
        NRSME = 1
        x_test_bot = 0
        return NRSME, x_test_bot  # Should the parameters be those that put Hayes in unstable territory,

    # Import u, m, and target
    u, m, target = load_NARMA(preload, train_length, test_length, mask, N)

    # Instantiate Reservoir, feed in training and predictiondatasets
    r1 = mod_Delay_Res(N=N,
                       k1=k1,
                       eta=eta,
                       gamma=gamma,
                       theta=theta,
                       beta=beta,
                       tau=tau)
    x = r1.calculate(u[:train_length],
                     m,
                     bits,
                     t,
                     activate,
                     no_act_res=no_act_res)  #[0]
    # Is this correct? It looks like x_test and x_test_bot are defined as the same thing
    x_test = r1.calculate(
        u[train_length:], m, bits, t, activate,
        no_act_res=no_act_res)  #[0]                # Changed from [1] to [0]

    if no_act_res == True:
        x_test_bot = r1.calculate(u[train_length:],
                                  m,
                                  bits,
                                  t,
                                  activate,
                                  no_act_res=no_act_res)[1]

    # Train using Ridge Regression with hyperparameter tuning
    if cv:
        NRMSE, y_test, y_input = cross_validate(alphas=np.logspace(-20, 5, 16),
                                                x=x,
                                                x_test=x_test,
                                                target=target)
    else:
        clf = Ridge(alpha=0)
        clf.fit(x, target[:train_length])
        y_test = clf.predict(x_test)

        # Calculate NRMSE of prediction data
        NRMSE = np.sqrt(
            np.mean(np.square(y_test[50:] - target[train_length + 50:])) /
            np.var(target[train_length + 50:]))

    # Write to File
    if write:
        write_func(x, x_test)

    if not no_act_res:
        x_test_bot = 0  # If I don't want to find the x(t)-x(t-tau) term, set flag before plotting

    # Plot predicted Time Series
    if plot:
        plot_func(x, x_test_bot, u, y_test, target, NRMSE, train_length, N)

    return NRMSE, x_test_bot, u
def Identical_NARMA_Comp(test_length=800,
                         train_length=800,
                         plot=True,
                         N=400,
                         eta=0.4,
                         gamma=0.05,
                         tau=400,
                         bits=np.inf,
                         preload=False,
                         write=False,
                         mask=0.1,
                         activate='mg',
                         cv=True,
                         beta=1.0,
                         t=1,
                         k1=1.15,
                         theta=0.2,
                         no_act_res=False):
    """
	Args:
		test_length: length of testing data
		train_length: length of training data
		N: number of virtual nodes
		plot: display calculated time series
		gamma: input gain
		eta: oscillation strength
		bits: bit precision
		preload: preload mask and time-series data
		mask: amplitude of mask values
		activate: activation function to be used (sin**2,tanh,mg)
		cv: perform leave-one-out cross validation
		beta: driver gain
		t: timestep used to solve diffeq,
		theta: distance between virtual nodes in time

	Returns:
		NRMSE: Normalized Root Mean Square Error
	"""
    ### Redefine redefine Parameters ###
    gamma = 0.5
    eta = 0.941
    beta = 0.83435
    N = 509
    tau = 509
    theta = 0.20034

    # Import u, m, and target
    u, m, target = load_NARMA(preload, train_length, test_length, mask, N)

    # hayes_sol = np.array(1,u.flatten().size())
    # hayes_sol = np.array()
    # mg_sol_0 = np.array()
    activate_ls = ["mg", "hayes"]
    for count, activate in enumerate(activate_ls):

        if activate == "Hayes" and (k1 / eta) < 1:
            NRSME = 1
            x_test_bot = 0
            return NRSME, x_test_bot  # Should the parameters be those that put Hayes in unstable territory,

        # Instantiate Reservoir, feed in training and predictiondatasets
        if activate == "mg":
            r1 = mod_Delay_Res(N=N,
                               k1=k1,
                               eta=eta,
                               gamma=gamma,
                               theta=theta,
                               beta=beta,
                               tau=tau)
        if activate == "hayes":
            r1 = hayes_special_Delay_Res(N=N,
                                         k1=k1,
                                         eta=eta,
                                         gamma=gamma,
                                         theta=theta,
                                         beta=beta,
                                         tau=tau)
        x = r1.calculate(u[:train_length],
                         m,
                         bits,
                         t,
                         activate,
                         no_act_res=no_act_res)
        # Is this correct? It looks like x_test and x_test_bot are defined as the same thing
        x_test = r1.calculate(
            u[train_length:], m, bits, t, activate, no_act_res=no_act_res
        )  # Don't reference [0] unless no_act_res == True

        # Train using Ridge Regression with hyperparameter tuning
        if cv:
            NRMSE, y_test, y_input = cross_validate(alphas=np.logspace(
                -20, 5, 16),
                                                    x=x,
                                                    x_test=x_test,
                                                    target=target)
        else:
            clf = Ridge(alpha=0)
            clf.fit(x, target[:train_length])
            y_test = clf.predict(x_test)

            # Calculate NRMSE of prediction data
            NRMSE = np.sqrt(
                np.mean(np.square(y_test[50:] - target[train_length + 50:])) /
                np.var(target[train_length + 50:]))

        # Store the NRMSE predictions of mg and hayes
        if activate == "mg" and count == 0:
            mg_x = x
            mg_test = x_test
            pre_ridge_mg_0 = x.flatten()
            np.append(
                pre_ridge_mg_0, x_test.flatten()
            )  # Add on the end the testing pre-ridge data to see if that lines up also
            mg_0_NRMSE = NRMSE
            mg_sol_0 = y_test.flatten()

        if activate == "hayes":
            hayes_x = x
            hayes_test = x_test
            pre_ridge_hayes = x.flatten()
            np.append(pre_ridge_hayes, x_test.flatten())
            hayes_NRMSE = NRMSE
            hayes_sol = y_test.flatten()

        if activate == "mg" and count == 1:  # This one checks out, regardless of order both mg runs score same NRMSE
            pre_ridge_mg_1 = x.flatten()
            NRMSE_sol_mg_1 = y_test.flatten()

    if not no_act_res:
        x_test_bot = 0  # If I don't want to find the x(t)-x(t-tau) term, set flag before plotting

    ## Let's try to add/subtract the differences between mg (which works) and hayes (which doesn't) and run it back through. Is it just this small difference that's causing the difference in performance?
    if 'hayes' in activate_ls:
        activate = "hayes"
        x_diff = mg_x - hayes_x
        x_test_diff = mg_test - hayes_test
        # Add the differences to hayes original output
        x = hayes_x + x_diff
        x_test = hayes_test + x_test_diff

        # Calculate and train
        # Train using Ridge Regression with hyperparameter tuning
        if cv:
            NRMSE, y_test, y_input = cross_validate(alphas=np.logspace(
                -20, 5, 16),
                                                    x=x,
                                                    x_test=x_test,
                                                    target=target)
        else:
            clf = Ridge(alpha=0)
            clf.fit(x, target[:train_length])
            y_test = clf.predict(x_test)

            # Calculate NRMSE of prediction data
            NRMSE = np.sqrt(
                np.mean(np.square(y_test[50:] - target[train_length + 50:])) /
                np.var(target[train_length + 50:]))

        # Save the resulting NRMSE Values and also the y-solutions of the altered hayes
        altered_hayes_NRMSE = NRMSE
        altered_hayes_sol = y_test.flatten()

    # Plot predicted Time Series
    if plot:
        if 'hayes' in activate_ls:
            plt.figure(1)
            plt.plot(target.flatten()[train_length:],
                     label="NRMSE Input Sequence")
            plt.plot(mg_sol_0, label="mackey Glass")
            plt.plot(hayes_sol, label="hayes")
            plt.title("Post-Ridge Regression mg vs. hayes: NRMSE_h = " +
                      str(round(hayes_NRMSE, 3)) + ", NRMSE_mg = " +
                      str(round(mg_0_NRMSE, 3)))
            plt.legend()

            plt.figure(2)
            plt.plot(pre_ridge_mg_0, label="mackey Glass")
            plt.plot(pre_ridge_hayes, label="hayes")
            plt.title("Pre-Ridge Regression mg vs. hayes")
            plt.legend()

            plt.figure(3)
            plt.plot(altered_hayes_sol, label='altered hayes')
            plt.plot(mg_sol_0, label='mackey glass')
            plt.title("corrected hayes vs mackey glass: NRMSE_altH = " +
                      str(altered_hayes_NRMSE) + ", NRMSE_mg = " +
                      str(mg_0_NRMSE))
            plt.legend()

            plt.figure(4)
            plt.plot(np.append(x_diff.flatten(), x_test_diff.flatten()),
                     label='mg - hayes')
            plt.title("difference between mg and hayes before ridge")
            plt.legend()

            plt.show()
        else:
            plt.figure(1)
            plt.plot(target.flatten()[train_length:],
                     label="NRMSE Input Sequence")
            plt.plot(mg_sol_0, label="mackey glass 1st pass")
            plt.plot(NRMSE_sol_mg_1, label="mackey glass 2nd pass")
            plt.title("Post-Ridge Regression mg vs. mg")
            plt.legend()

            plt.figure(2)
            plt.plot(pre_ridge_mg_0, label="mackey glass 1st pass")
            plt.plot(pre_ridge_mg_1, label="mackey glass 2nd pass")
            plt.title("Pre-Ridge Regression mg vs. mg")
            plt.legend()

            plt.show()
Пример #4
0
def NARMA_Test(test_length=800,
               train_length=800,
               N=400,
               eta=0.35,
               gamma=0.05,
               tau=400,
               bits=np.inf,
               preload=False,
               mask=0.1,
               activate='mg',
               cv=False,
               beta=1.0,
               t=1,
               theta=0.2,
               noise_scale=0.001):
    """
	Args:
		test_length: length of testing data
		train_length: length of training data
		num_loops: number of delay loops in reservoir
		a: ridge regression parameter
		N: number of virtual nodes
		plot: display calculated time series
		gamma: input gain
		eta: oscillation strength
		phi: phase of MZN
		r: loop delay length
		bits: bit precision
		preload: preload mask and time-series data
		mask: amplitude of mask values
		activate: activation function to be used (sin**2,tanh,mg)
		cv: perform leave-one-out cross validation
		beta: driver gain
		V_low: ADC lower bound
		V_high: ADC upper bound
		t: timestep used to solve diffeq
		layers: number of hidden layers, ie number of cascaded reservoirs
		sr: splitting ratio
		switching: WDM switching
		w: number of wavelengths
		auto: automatically calculate ADC range
		IA: input to all layers of (deep) network

	Returns:
		NRMSE: Normalized Root Mean Square Error
	"""
    # Import u and m
    if preload:
        file1 = open("Data/Input_sequence.txt",
                     "r")  # Reads input and masking files. stores them in u/m
        file2 = open("Data/mask_2.txt", "r")
        contents = file1.readlines()
        contents2 = file2.readlines()
        u = []
        m = []
        for i in range(1000):
            u.append(float(contents[i][0:contents[i].find("\t")]))
            if i < 400:
                m.append(float(contents2[i][0:contents2[i].find("\n")]))
        file1.close()
        file2.close()
        u = np.array(u)
        m = np.array(m)
    # Randomly initialize u and m
    else:
        u = np.random.rand(train_length + test_length) / 2.
        while NARMA_Diverges(u):
            u = np.random.rand(train_length + test_length) / 2.
        m = np.array([random.choice([-mask, mask]) for i in range(N)])

    # Calculate NARMA10 target
    target = NARMA_Generator(len(u), u)
    alphas = np.logspace(-20, 5, 16)

    # Instantiate Reservoir, feed in training and predictiondatasets
    r1 = ModifiedDelayRC(N=N,
                         eta=eta,
                         gamma=gamma,
                         theta=theta,
                         beta=beta,
                         tau=tau)
    x = r1.calculate(u[:train_length], m, t, activate, noise_scale)
    x_test = r1.calculate(u[train_length:], m, t, activate, noise_scale)

    # Train using Ridge Regression with hyperparameter tuning
    if (cv):
        NRMSE, y_test, y_input = cross_validate(alphas, x, x_test, target)
    else:
        clf = Ridge(alpha=0)
        clf.fit(x, target[:train_length])
        y_test = clf.predict(x_test)
        y_input = clf.predict(x)

        # Calculate NRMSE of prediction data
        NRMSE = np.sqrt(np.mean(np.square(y_test[50:] - target[train_length + 50:])) / \
                        np.var(target[train_length + 50:]))

    return NRMSE
Пример #5
0
    def NARMA_Test(self,
                   test_length=500,
                   train_length=5000,
                   plot=False,
                   N=400,
                   eta=0.4,
                   gamma=0.05,
                   tau=400,
                   fudge=1.0,
                   preload=False,
                   write=False,
                   mask=0.1,
                   activate='mg',
                   cv=False,
                   beta=1.0,
                   t=1,
                   theta=0.2,
                   power=1,
                   mix_p=0.1):
        """
		Args:
			test_length: length of testing data
			train_length: length of training data
			N: number of virtual high_nodes
			plot: display calculated time series
			gamma: input gain
			eta: oscillation strength
			bits: bit precision
			preload: preload mask and time-series data
			mask: amplitude of mask values
			activate: activation function to be used (sin**2,tanh,mg)
			cv: perform leave-one-out cross validation
			beta: driver gain
			t: timestep used to solve diffeq,
			theta: distance between virtual high_nodes in time

		Returns:
			NRMSE: Normalized Root Mean Square Error
		"""

        # Import u, m, and target
        u, m, target = load_NARMA(preload, train_length, test_length, mask, N)

        # Instantiate Reservoir, feed in training and predictiondatasets
        r1 = ModifiedDelayRC(N=N,
                             eta=eta,
                             gamma=gamma,
                             theta=theta,
                             beta=beta,
                             tau=tau,
                             fudge=fudge,
                             power=power)
        x = r1.calculate_mix(u[:train_length], m, t, mix_p)[0]
        # Is this correct? It looks like x_test and x_test_bot are defined as the same thing
        x_test = r1.calculate_mix(u[train_length:], m, t, mix_p)[0]

        # Train using Ridge Regression with hyperparameter tuning
        if cv:
            alphas = np.logspace(-100, 1, 100)
            NRMSE, y_test, y_input1, clf = cross_validate(alphas=alphas,
                                                          x=x,
                                                          x_test=x_test,
                                                          target=target)

        else:
            clf = Ridge(alpha=0)
            # clf1 = LinearRegression(n_jobs=-1)
            clf.fit(x, target[:train_length])
            y_test = clf.predict(x)

            # Calculate NRMSE of prediction data
            NRMSE = np.sqrt(
                np.mean(np.square(y_test[50:] - target[50:train_length])) /
                np.var(target[50:train_length]))

        return NRMSE, x, target, x_test, y_test, clf
Пример #6
0
	def NARMA_Test(self, test_length=500, train_length=5000,
	               plot=False, N=400, eta=0.4, gamma=0.05, tau=400, fudge=1.0,
	               preload=False, write=False, mask=0.1, activate='mg',
	               cv=True, beta=1.0, t=1, theta=0.2, power=1):
		"""
		Args:
			test_length: length of testing data
			train_length: length of training data
			N: number of virtual high_nodes
			plot: display calculated time series
			gamma: input gain
			eta: oscillation strength
			bits: bit precision
			preload: preload mask and time-series data
			mask: amplitude of mask values
			activate: activation function to be used (sin**2,tanh,mg)
			cv: perform leave-one-out cross validation
			beta: driver gain
			t: timestep used to solve diffeq,
			theta: distance between virtual high_nodes in time

		Returns:
			NRMSE: Normalized Root Mean Square Error
		"""

		# Import u, m, and target
		u, m, target = load_NARMA(preload, train_length, test_length, mask, N)

		# Instantiate Reservoir, feed in training and predictiondatasets
		r1 = ModifiedDelayRC(N=N, eta=eta, gamma=gamma, theta=theta,
		                     beta=beta, tau=tau, fudge=fudge, power=power)
		x1 = r1.calculate(u[:train_length], m, t, 'mg')[0]
		x2 = r1.calculate(u[:train_length], m, t, 'hayes')[0]
		# Is this correct? It looks like x_test and x_test_bot are defined as the same thing
		x_test1 = r1.calculate(u[train_length:], m, t, 'mg')[0]
		x_test2 = r1.calculate(u[train_length:], m, t, 'hayes')[0]

		# Train using Ridge Regression with hyperparameter tuning
		if cv:
			alphas = np.logspace(-100, 1, 100)
			NRMSE1, y_test1, y_input1, clf1 = cross_validate(alphas=alphas, x=x1, x_test=x_test1, target=target)
			# res1 = np.linalg.norm(target[50+train_length:] - y_test1[50:]) ** 2
			# ssr = np.sum((target[50+train_length:] - y_test1[50:]) ** 2)
			#
			# #  total sum of squares
			# sst = np.sum((target[50+train_length:] - np.mean(target[50+train_length:])) ** 2)
			#
			# r2_1 = 1 - (ssr / sst)
			# NRMSE2, y_test2, y_input2 = cross_validate(alphas=np.logspace(-50, 5, 500), x=x2, x_test=x_test2,
			# 											target=target)
			# res2 = np.linalg.norm(target[50+train_length:] - y_test2[50:]) ** 2
			# ssr = np.sum((target[50+train_length:] - y_test2[50:]) ** 2)
			#
			# r2_2 = 1 - (ssr / sst)
			res1 = np.linalg.norm(target[50:train_length] - y_input1[50:]) ** 2
			ssr = np.sum((target[50:train_length] - y_input1[50:]) ** 2)

			#  total sum of squares
			sst = np.sum((target[50:train_length] - np.mean(target[50:train_length])) ** 2)

			r2_1 = 1 - (ssr / sst)
			NRMSE2, y_test2, y_input2, clf2 = cross_validate(alphas=alphas, x=x2, x_test=x_test2, target=target)
			res2 = np.linalg.norm(target[50:train_length] - y_input2[50:]) ** 2
			ssr = np.sum((target[50:train_length:] - y_input2[50:]) ** 2)

			r2_2 = 1 - (ssr / sst)
		else:
			clf1 = Ridge(alpha=0)
			# clf1 = LinearRegression(n_jobs=-1)
			clf1.fit(x1, target[:train_length])
			y_test1 = clf1.predict(x1)

			# Calculate NRMSE of prediction data
			NRMSE1 = np.sqrt(
				np.mean(np.square(y_test1[50:] - target[50:train_length])) / np.var(target[50:train_length]))
			res1 = np.linalg.norm(target[50:train_length] - y_test1[50:]) ** 2
			r2_1 = 0

			clf2 = Ridge(alpha=0)
			# clf2 = LinearRegression(n_jobs=-1)
			clf2.fit(x2, target[:train_length])
			y_test2 = clf2.predict(x2)

			# Calculate NRMSE of prediction data
			NRMSE2 = np.sqrt(
				np.mean(np.square(y_test2[50:] - target[50:train_length])) / np.var(target[50:train_length]))
			res2 = np.linalg.norm(target[50:train_length] - y_test2[50:]) ** 2
			r2_2 = 0

		return NRMSE1, NRMSE2, x1, x2, target, x_test1, x_test2, y_test1, y_test2, res1, res2, r2_1, r2_2, clf1, clf2