Esempio n. 1
0
 def test_randomstate_object(self):
     """two esns with the same randomstate objects should be the same"""
     rstA = np.random.RandomState(1)
     esnA = ESN(N_in, N_out, random_state=rstA)
     rstB = np.random.RandomState(1)
     esnB = ESN(N_in, N_out, random_state=rstB)
     self._compare(esnA, esnB, should_be="same")
Esempio n. 2
0
def calculate_ESN(name, rand_seed, nReservoir, spectralRadius, future, futureTotal):
    data = open(name + ".txt").read().split()
    data = np.array(data).astype("float64")
    n_resevoir = 500
    sparsity = 0.2
    rand_seed = 23
    spectral_radius = 1.2
    noise = 0.0005
    future = 1
    futureTotal = 7

    esn = ESN(
        n_inputs=1,
        n_outputs=1,
        n_reservoir=nReservoir,
        sparsity=sparsity,
        random_state=rand_seed,
        spectral_radius=spectralRadius,
        noise=noise,
    )

    trainlen = data.__len__() - futureTotal
    pred_tot = np.zeros(futureTotal)

    for i in range(0, futureTotal, future):
        pred_training = esn.fit(np.ones(trainlen), data[i : trainlen + i])
        prediction = esn.predict(np.ones(future))
        pred_tot[i : i + future] = prediction[:, 0]
    return pred_tot
Esempio n. 3
0
    def test_nonsense(self):
        """parameter random_state should only accept positive integers"""
        with self.assertRaises(ValueError):
            ESN(N_in, N_out, random_state=-1)

        with self.assertRaises(Exception) as cm:
            ESN(N_in, N_out, random_state=0.5)
        self.assertIn("Invalid seed", str(cm.exception))
Esempio n. 4
0
    def test_freqgen(self):
        rng = np.random.RandomState(42)
        def frequency_generator(N,min_period,max_period,n_changepoints):
            """returns a random step function + a sine wave signal that
               changes its frequency at each such step."""
            # vector of random indices < N, padded with 0 and N at the ends:
            changepoints = np.insert(np.sort(rng.randint(0,N,n_changepoints)),[0,n_changepoints],[0,N])
            # list of interval boundaries between which the control sequence should be constant:
            const_intervals = zip(changepoints,np.roll(changepoints,-1))[:-1]
            # populate a control sequence
            frequency_control = np.zeros((N,1))
            for (t0,t1) in const_intervals:
                frequency_control[t0:t1] = rng.rand()
            periods = frequency_control * (max_period - min_period) + max_period

            # run time through a sine, while changing the period length
            frequency_output = np.zeros((N,1))
            z = 0
            for i in range(N):
                z = z + 2 * np.pi / periods[i]
                frequency_output[i] = (np.sin(z) + 1)/2
            return np.hstack([np.ones((N,1)),1-frequency_control]),frequency_output

        N = 15000
        min_period = 2
        max_period = 10
        n_changepoints = N/200
        frequency_control,frequency_output = frequency_generator(N,min_period,max_period,n_changepoints)

        traintest_cutoff = np.ceil(0.7*N)
        train_ctrl,train_output = frequency_control[:traintest_cutoff],frequency_output[:traintest_cutoff]
        test_ctrl, test_output  = frequency_control[traintest_cutoff:],frequency_output[traintest_cutoff:]

        esn = ESN(n_inputs = 2,
                  n_outputs = 1,
                  n_reservoir = 200,
                  spectral_radius = 0.25,
                  sparsity = 0.95,
                  noise = 0.001,
                  input_shift = [0,0],
                  input_scaling = [0.01, 3],
                  teacher_scaling = 1.12,
                  teacher_shift = -0.7,
                  out_activation = np.tanh,
                  inverse_out_activation = np.arctanh,
                  random_state = rng,
                  silent = True)

        pred_train = esn.fit(train_ctrl,train_output)
        #print "test error:"
        pred_test = esn.predict(test_ctrl)
        error = np.sqrt(np.mean((pred_test - test_output)**2))
        self.assertAlmostEqual(error,0.39037112433756577)
Esempio n. 5
0
 def test_IODimensions(self):
     """try different combinations of input & output dimensionalities & teacher forcing"""
     tasks = [(1,1,100,True),(10,1,100,True),(1,10,100,True),(10,10,100,True),
              (1,1,100,False),(10,1,100,False),(1,10,100,False),(10,10,100,False)]
     for t in tasks:
         N_in ,N_out, N_samples, tf = t
         X = np.random.randn(N_samples,N_in)  if N_in > 1 else np.random.randn(N_samples)
         y = np.random.randn(N_samples,N_out) if N_out > 1 else np.random.randn(N_samples)
         Xp = np.random.randn(N_samples,N_in) if N_in > 1 else np.random.randn(N_samples)
         esn = ESN(N_in,N_out,teacher_forcing=tf)
         prediction_tr = esn.fit(X,y)
         prediction_t = esn.predict(Xp)
         self.assertEqual(prediction_tr.shape,(N_samples,N_out))
         self.assertEqual(prediction_t.shape,(N_samples,N_out))
def create_neural_network(input_, output_, reservoir_, spectral_, sparsity_,
                          noise_, input_scale, random_, silent_):
    """Create an Echo State Network.
    :rtype: pyESN.ESN
    :param input_: number of input units to use in ESN
    :param output_: number of output units to use in ESN
    :param reservoir_: number of hidden units to use in the ESN
    :param spectral_: the spectral radius - scale the hidden state as such that the biggest eigenvalue equals this
    :param sparsity_: sparsity of the weight connections (proportion of weights set to 0)
    :param noise_: noise to add to each neuron, helps reduce generalization error
    :param input_scale: scale of the input - smaller will leave more trace of previous timestep input during fitting
    :param random_: use for random_state when initializing network
    :param silent_: if true, print fitting data
    :return: pyESN Echo State Network model
    """
    model = ESN(
        n_inputs=input_,  # Inputs: ones
        n_outputs=output_,  # Outputs: predicted daily open
        n_reservoir=reservoir_,  # Size of reservoir
        spectral_radius=
        spectral_,  # More: longer range interactions and slowed decay of information
        sparsity=sparsity_,  # Just keep this small
        noise=
        noise_,  # Add noise to better model a dynamic system - reduces generalisation
        input_scaling=[
            input_scale
        ],  # Scale is important - not too big as to wipe out past information
        random_state=random_,  # Random number generator
        silent=silent_
    )  # Silent = False means we can see which stage ESN is at via print()
    return model
Esempio n. 7
0
 def test_IODimensions(self):
     """try different combinations of input & output dimensionalities & teacher forcing"""
     tasks = [(1, 1, 100, True), (10, 1, 100, True), (1, 10, 100, True), (10, 10, 100, True),
              (1, 1, 100, False), (10, 1, 100, False), (1, 10, 100, False), (10, 10, 100, False)]
     for t in tasks:
         N_in, N_out, N_samples, tf = t
         X = np.random.randn(
             N_samples, N_in) if N_in > 1 else np.random.randn(N_samples)
         y = np.random.randn(
             N_samples, N_out) if N_out > 1 else np.random.randn(N_samples)
         Xp = np.random.randn(
             N_samples, N_in) if N_in > 1 else np.random.randn(N_samples)
         esn = ESN(N_in, N_out, teacher_forcing=tf)
         prediction_tr = esn.fit(X, y)
         prediction_t = esn.predict(Xp)
         self.assertEqual(prediction_tr.shape, (N_samples, N_out))
         self.assertEqual(prediction_t.shape, (N_samples, N_out))
Esempio n. 8
0
def pso_esn_parameters_for_elasticnet(x):
    ic_s =x[0]
    ic_e =x[1]
    is_s = x[2]
    is_e = x[3]
    teacher_scaling = x[4]
    teacher_shift = x[5]
    alpha =x[6]
    l1_ratio=x[7]

     
    esn = ESN(n_inputs = 2,
             n_outputs = 1,
             n_reservoir = n_reservoir,
             spectral_radius = spectral_radius, 
             sparsity = sparsity,
             noise = noise,
             input_shift = [is_s,is_e],#[0,0]
             input_scaling =[ic_s,ic_e],# [0.01, 3]
             teacher_scaling = teacher_scaling,#1.12,
             teacher_shift = teacher_shift,#-0.7,
             out_activation = np.tanh,
             inverse_out_activation = np.arctanh,
             random_state = rng,
             silent = False)
    esn.alpha = alpha
    esn.l1_ratio = l1_ratio
    internal_states,transient = esn.train_reservior(train_ctrl,train_output)
    pred_train = esn.train_readout_with_elasticnet(internal_states,train_output,transient)
    pred_test = esn.predict(test_ctrl)
    test_error_rate= np.sqrt(np.mean((pred_test - test_output)**2))
    #get function name as title
    title = inspect.stack()[0][3]
    print "#### {} ## train_error:{},test_error:{}".format(title,esn.train_error_rate,test_error_rate)
    return test_error_rate
Esempio n. 9
0
    def test_mackey(self):
        try:
            data = np.load('mackey_glass_t17.npy')
        except IOError:
            self.skipTest("missing data")

        esn = ESN(n_inputs = 1,
                  n_outputs = 1,
                  n_reservoir = 500,
                  spectral_radius = 1.5,
                  random_state=42)

        trainlen = 2000
        future = 2000
        esn.fit(np.ones(trainlen),data[:trainlen])
        prediction = esn.predict(np.ones(future))
        error = np.sqrt(np.mean((prediction.flatten() - data[trainlen:trainlen+future])**2))
        self.assertAlmostEqual(error,0.1396039098653574)
Esempio n. 10
0
 def test_serialisation(self):
     import pickle
     import io
     esn = ESN(N_in, N_out, random_state=1)
     with io.BytesIO() as buf:
         pickle.dump(esn, buf)
         buf.flush()
         buf.seek(0)
         esn_unpickled = pickle.load(buf)
     self._compare(esn, esn_unpickled, should_be='same')
Esempio n. 11
0
    def test_mackey(self):
        try:
            data = np.load('mackey_glass_t17.npy')
        except IOError:
            self.skipTest("missing data")

        esn = ESN(n_inputs=1,
                  n_outputs=1,
                  n_reservoir=500,
                  spectral_radius=1.5,
                  random_state=42)

        trainlen = 2000
        future = 2000
        esn.fit(np.ones(trainlen), data[:trainlen])
        prediction = esn.predict(np.ones(future))
        error = np.sqrt(
            np.mean((prediction.flatten() - data[trainlen:trainlen + future])**2))
        self.assertAlmostEqual(error, 0.1396039098653574)
def henon(reservoir_size, spectral_radius, train_len, future):

    # Load in mackey-glass numpy array
    data_full = hn.henon()
    data = data_full[0]

    #Initialize ESN
    esn = ESN(n_inputs=1,
              n_outputs=1,
              n_reservoir=reservoir_size,
              spectral_radius=spectral_radius,
              random_state=42)

    # Fit the model
    pred_training = esn.fit(np.ones(train_len), data[:train_len])

    # Predict and find the error
    prediction = esn.predict(np.ones(future))
    error = np.sqrt(
        np.mean(
            (prediction.flatten() - data[train_len:train_len + future])**2))

    return error, prediction, reservoir_size, spectral_radius, train_len, future
def mackey(reservoir_size, spectral_radius, train_len, future):

    # Load in mackey-glass numpy array
    data = np.load('mackey_glass_t17.npy'
                   )  #  http://minds.jacobs-university.de/mantas/code

    #Initialize ESN
    esn = ESN(n_inputs=1,
              n_outputs=1,
              n_reservoir=reservoir_size,
              spectral_radius=spectral_radius,
              random_state=42)

    # Fit the model
    pred_training = esn.fit(np.ones(train_len), data[:train_len])

    # Predict and find the error
    prediction = esn.predict(np.ones(future))
    error = np.sqrt(
        np.mean(
            (prediction.flatten() - data[train_len:train_len + future])**2))

    return error, prediction, reservoir_size, spectral_radius, train_len, future
Esempio n. 14
0
def pso_esn_parameters_for_scad(x):
    # 0: tao, 1:c0, 2:IC_s,3:IC_e 4:IS_s,5:IS_e, ,6:teacher sacling,7:teacher shift
    # 0:IC_s,1:IC_e, 2:IS_s,3:IS_e, 4:teacher sacling,5:teacher shift,6: tao, 7:c0,
    ic_s =x[0]
    ic_e =x[1]
    is_s = x[2]
    is_e = x[3]
    teacher_scaling = x[4]
    teacher_shift = x[5]
    tao = x[6]
    c0 =  x[7]
     
    esn = ESN(n_inputs = 2,
             n_outputs = 1,
             n_reservoir = n_reservoir,
             spectral_radius = spectral_radius, 
             sparsity = sparsity,
             noise = noise,
             input_shift = [is_s,is_e],#[0,0]
             input_scaling =[ic_s,ic_e],# [0.01, 3]
             teacher_scaling = teacher_scaling,#1.12,
             teacher_shift = teacher_shift,#-0.7,
             out_activation = np.tanh,
             inverse_out_activation = np.arctanh,
             random_state = rng,
             silent = False)
    esn.penal_tao =tao
    esn.penal_c0 = c0
    internal_states,transient = esn.train_reservior(train_ctrl,train_output)
    pred_train = esn.train_readout_with_scad(internal_states,train_output,transient)
    pred_test = esn.predict(test_ctrl)
    #test_error_rate= np.sqrt(np.mean((pred_test - test_output)**2))
    test_error_rate= np.sqrt((pred_test - test_output)**2)
    #get function name as title
    title = inspect.stack()[0][3]
    print "#### {} ## train_error:{},test_error:{}".format(title,esn.train_error_rate,test_error_rate)
    return test_error_rate
    这里画一个原始测试序列的图和一个test_output的图。
Esempio n. 15
0
#Create Data
lengthTrain = 1000
lengthTest = 1000
parity = 3
input_shift = [0]
(u,y) = generateParity(lengthTrain, parity)

#Create network
esn = ESN(n_inputs = 1, 
          n_outputs = parity - 1,
          n_reservoir = 300,
#          spectral_radius = 0.25,
          sparsity = 0.9,
          noise = 0.01,
#          input_shift = input_shift,
#          input_scaling = [0.01],
#          teacher_scaling = 1.12,
          teacher_shift = -0.7,
          out_activation = np.tanh,
          inverse_out_activation = np.arctanh,
          random_state = rng,
          silent = False)

print('fitting')
pred_train = esn.fit(u, y)

#Assess Training Error     
vec_bounder = np.vectorize(bounder)
bounded = vec_bounder(pred_train)

#print(bounded)
Esempio n. 16
0
    def test_freqgen(self):
        rng = np.random.RandomState(42)

        def frequency_generator(N, min_period, max_period, n_changepoints):
            """returns a random step function + a sine wave signal that
               changes its frequency at each such step."""
            # vector of random indices < N, padded with 0 and N at the ends:
            changepoints = np.insert(
                np.sort(rng.randint(0, N, n_changepoints)),
                [0, n_changepoints], [0, N])
            # list of interval boundaries between which the control sequence
            # should be constant:
            const_intervals = list(zip(changepoints, np.roll(changepoints,
                                                             -1)))[:-1]
            # populate a control sequence
            frequency_control = np.zeros((N, 1))
            for (t0, t1) in const_intervals:
                frequency_control[t0:t1] = rng.rand()
            periods = frequency_control * \
                (max_period - min_period) + max_period

            # run time through a sine, while changing the period length
            frequency_output = np.zeros((N, 1))
            z = 0
            for i in range(N):
                z = z + 2 * np.pi / periods[i]
                frequency_output[i] = (np.sin(z) + 1) / 2
            return np.hstack([np.ones((N, 1)),
                              1 - frequency_control]), frequency_output

        N = 15000
        min_period = 2
        max_period = 10
        n_changepoints = int(N / 200)
        frequency_control, frequency_output = frequency_generator(
            N, min_period, max_period, n_changepoints)

        traintest_cutoff = int(np.ceil(0.7 * N))
        train_ctrl, train_output = frequency_control[:
                                                     traintest_cutoff], frequency_output[:
                                                                                         traintest_cutoff]
        test_ctrl, test_output = frequency_control[
            traintest_cutoff:], frequency_output[traintest_cutoff:]

        esn = ESN(n_inputs=2,
                  n_outputs=1,
                  n_reservoir=200,
                  spectral_radius=0.25,
                  sparsity=0.95,
                  noise=0.001,
                  input_shift=[0, 0],
                  input_scaling=[0.01, 3],
                  teacher_scaling=1.12,
                  teacher_shift=-0.7,
                  out_activation=np.tanh,
                  inverse_out_activation=np.arctanh,
                  random_state=rng,
                  silent=True)

        pred_train = esn.fit(train_ctrl, train_output)
        # print "test error:"
        pred_test = esn.predict(test_ctrl)
        error = np.sqrt(np.mean((pred_test - test_output)**2))
        self.assertAlmostEqual(error, 0.30519018985725715)
Esempio n. 17
0
def compair_readout():
    esn = ESN(n_inputs = 8,
             n_outputs = 1,
             n_reservoir = n_reservoir,
             spectral_radius = spectral_radius, 
             sparsity = sparsity,
             noise = noise,
             input_shift = [0.51293657,0.51293657,0.51293657,0.51293657,0.03489584,0.03489584,0.03489584,0.03489584],
             input_scaling = [0.18636639,0.18636639,0.18636639,0.18636639, 0.11791364, 0.11791364, 0.11791364, 0.11791364],
             teacher_scaling = 1.45377531,
             teacher_shift = -0.7997228,
             out_activation = np.tanh,
             inverse_out_activation = np.arctanh,
             random_state = rng,
             silent = False)
             
    esn = ESN(n_inputs = 8,
          n_outputs = 1,
          n_reservoir = n_reservoir,
          spectral_radius = spectral_radius, 
          sparsity = sparsity,
          noise = noise,
          input_shift = [0,0,0,0,0,0,0,0],
          input_scaling = [0.01,0.01,0.01,0.01,0.01,0.01,3,3],
          teacher_scaling = 1.12,
          teacher_shift = -0.7,
          out_activation = np.tanh,
          inverse_out_activation = np.arctanh,
          random_state = rng,
          silent = False)
    esn.penal_tao =0.57201544
    esn.penal_c0 = 3.76108161

    esn.penal_tao = 0.1
    esn.penal_c0 = 3.7
    #pred_train = esn.fit(train_ctrl,train_output,inspect=True)
    internal_states,transient = esn.train_reservior(train_ctrl,train_output)
#    esn_Lasso = copy.deepcopy(esn)
#    esn_Ridge = copy.deepcopy(esn)
#    esn_ElasticNet = copy.deepcopy(esn)
    esn_SCAD = copy.deepcopy(esn)
    esn.dump_parameters()
#    print "####pin"
#    pred_train = esn.train_readout_with_pin(internal_states,train_output,transient)
#    test_error("pinv",esn,pred_train)
#    exit()
#
#    print "####ridge"
#    pred_train = esn_Ridge.train_readout_with_ridge(internal_states,train_output,transient)
#    test_error("pinv",esn_Ridge,pred_train)
#
#    print "####Lasso"
#    pred_train = esn_Lasso.train_readout_with_lasso(internal_states,train_output,transient)
#    test_error("pinv",esn_Lasso,pred_train)
#
#    print "####ElasticNet"
#    pred_train = esn_ElasticNet.train_readout_with_elasticnet(internal_states,train_output,transient)
#    test_error("pinv",esn_ElasticNet,pred_train)

    print "####SCAD"
    pred_train = esn_SCAD.train_readout_with_scad(internal_states,train_output,transient)
    test_error("pinv",esn_SCAD,pred_train)
Esempio n. 18
0
from pyESN import ESN

#from matplotlib.pylab import rcParams
#rcParams['figure.figsize'] = 20,10

n_reservoir = 600
sparsity = 0.4
rand_seed = 20
spectral_radius = 1.2
noise = .0010

esn = ESN(n_inputs=1,
          n_outputs=1,
          n_reservoir=n_reservoir,
          sparsity=sparsity,
          random_state=rand_seed,
          spectral_radius=spectral_radius,
          noise=noise)

df = pd.read_csv("bank_stock_prediction.csv")

dfabbank = df['CLOSEP (ABBANK)']
dfabbank_N = dfabbank.to_numpy()

dfbankasia = df['CLOSEP (BANKASIA)']
dfbankasia_N = dfbankasia.to_numpy()

dfebl = df['CLOSEP (EBL)']
dfebl_N = dfebl.to_numpy()
Esempio n. 19
0
File: mackey.py Progetto: gwli/pyESN
import numpy as np
from pyESN import ESN
from matplotlib import pyplot as plt

data = np.load(
    'mackey_glass_t17.npy')  #  http://minds.jacobs-university.de/mantas/code
esn = ESN(n_inputs=1,
          n_outputs=1,
          n_reservoir=500,
          spectral_radius=1.5,
          random_state=42)

trainlen = 2000
future = 2000
pred_training = esn.fit(np.ones(trainlen), data[:trainlen])

prediction = esn.predict(np.ones(future))
print("test error: \n" + str(
    np.sqrt(
        np.mean((prediction.flatten() -
                 data[trainlen:trainlen + future])**2))))

plt.figure(figsize=(11, 1.5))
plt.plot(range(0, trainlen + future),
         data[0:trainlen + future],
         'k',
         label="target system")
plt.plot(range(trainlen, trainlen + future),
         prediction,
         'r',
         label="free running ESN")
def esn_training(index, data, ntwk_obj):
    '''ESN Training for each MBS'''
    #print(index)
    num = netcfg.num_lte_bs_mobile - netcfg.num_lte_bs

    #print(num)
    index = index - (netcfg.num_lte_bs_mobile - num)
    #print(index)
    col_mbs = 3 * netcfg.num_lte_bs_mobile + 1 + index
    #print('a',col_mbs)
    #print('all data',data)

    data_rate = data[:, col_mbs:col_mbs + 1]
    #print('data_rate',data_rate)

    data = data[:, 0:col_mbs - index]
    #print('intermediate',data)

    data = np.hstack((data, data_rate))
    #print('data',data)

    coord_x = ntwk_obj.grid_x_coord
    #print(coord_x,'\n')
    coord_y = ntwk_obj.grid_y_coord
    #print(coord_y,'\n')

    length = len(coord_x)
    #print(length)
    length1 = len(coord_y)
    #print(length)
    #print(length1)
    #xyz_coord = np.array([1]
    xyz_data = np.array([])
    z = 100
    utility = 0

    for i in range(length):
        xyz_coord = np.array([1])
        for num in range(netcfg.num_lte_bs_mobile):
            #print(i)
            #print(length)
            j = i + num
            #print(j)
            if j < length:
                #print(j)
                x = coord_x[j]
                y = coord_y[j]
                xy = np.hstack((x, y))
                xyz = np.hstack((xy, z))
                #xyz_coord = np.hstack((xyz_coord,xyz))
                #print('a',xyz_coord)
            else:
                #print('j',j)
                j = j - length
                #print('j',j)
                x = coord_x[j]
                y = coord_y[j]
                xy = np.hstack((x, y))
                xyz = np.hstack((xy, z))
                #xyz_coord = np.hstack((xyz_coord,xyz))
                #print('b',xyz_coord)

            #print(xyz)
            if num == 0:
                #print(xyz_coord)
                xyz = np.hstack((1, xyz))
                data_xyz = xyz
                #print(data_xyz)
            else:
                data_xyz = np.hstack((data_xyz, xyz))
                #print(data_xyz)

        data_xyz = np.hstack((data_xyz, utility))
        #print(data_xyz)
        #print(i)
        if i == 0:
            xyz_coord_data = data_xyz
            #print(xyz_coord_data)
        else:
            xyz_coord_data = np.vstack((xyz_coord_data, data_xyz))
            #print(xyz_coord_data)

    #print(data)
    print(xyz_coord_data)
    data = np.vstack((data, xyz_coord_data))
    #print(data)

    #data_rate = np.transpose(data_rate)
    #print(data_rate)
    #print('data',data)
    data_rate = data[:, col_mbs - index]
    #print(data_rate)
    data_rate_max = max(data_rate)
    #print('max',data_rate_max)
    #exit(0)

    data_rate = data_rate / data_rate_max / 2
    #print(data_rate)
    data_rate = np.transpose(np.array([data_rate]))
    #print('data rate',data_rate)

    N = netcfg.total_tick

    rng = np.random.RandomState(42)
    traintest_cutoff = int(np.ceil(N))
    train_data, train_data_rate = data[:N], data_rate[:N]
    #print('train data',train_data)
    #print('train data rate',train_data_rate)
    #print('train data',train_data.shape)
    #print('train data rate',train_data_rate.shape)
    test_data, test_data_rate = data[N:], data_rate[N:]
    #print('test data',test_data)
    #print('test data rate',test_data_rate)
    #print('test data',test_data.shape)
    #print('test data rate',test_data_rate.shape)
    col_mbs = col_mbs + 1 - index
    zeros = np.zeros(col_mbs)
    #print('zeros', zeros)
    ones = np.ones(col_mbs)
    #print('ones', ones)

    # create an echo state network
    esn = ESN(
        n_inputs=col_mbs,
        n_outputs=1,
        n_reservoir=475,
        spectral_radius=0.25,
        sparsity=0.25,
        noise=0.001,
        input_shift=zeros,
        input_scaling=ones,
        #input_scaling=[1, 1],
        teacher_scaling=1.50,
        #teacher_scaling=1,
        teacher_shift=-0.7,
        #teacher_shift=0,
        # teacher_scaling = None,
        # teacher_shift = None,
        out_activation=np.tanh,
        inverse_out_activation=np.arctanh,
        random_state=rng,
        silent=False)

    print('----------------------------------------------------------------')
    print('esn {}'.format(index + 1))
    print('----------------------------------------------------------------')

    print('n_inputs=', esn.n_inputs)
    print('n_outputs=', esn.n_outputs)
    print('n_reservoir=', esn.n_reservoir)
    print('spectral radius=', esn.spectral_radius)
    print('sparsity=', esn.sparsity)
    print('noise=', esn.noise)
    print('input_shift=', esn.input_shift)
    print('input_scaling=', esn.input_scaling)
    print('teacher_forcing=', esn.teacher_forcing)
    # print ('feedback_scaling=' , esn1.feedback_scaling)
    print('teacher_scaling=', esn.teacher_scaling)
    print('teacher_shift=', esn.teacher_shift)
    print('out_activation=', esn.out_activation)
    print('inverse_out=', esn.inverse_out_activation)
    print('random_state=', esn.random_state)
    print('silent=', esn.silent)

    #print('1')
    # fitting
    pred_train = esn.fit(train_data, train_data_rate)

    #print('2')
    # prediction
    pred_test = esn.predict(data)
    #print(pred_test)
    #print('data rate',data_rate)
    #print('pred test',pred_test,'\n')
    #print('3')
    #pp.figure(1)
    axis = (netcfg.num_lte_bs_mobile * 100) + 10 + (index + 1)
    pp.subplot(axis)
    pp.title('MBS {}'.format(index + 1))
    #print(data_rate)
    #print(pred_test)
    pp.plot(range(len(data)), data_rate, range(len(pred_test)), pred_test)
Esempio n. 21
0
n_features = len(products)  #Quantidade de variáveis para predição.

data = np.array(df_mba).astype('float64')

## Parâmetros iniciais para echo state network (ESN), escolhidos arbitrariamente
n_reservoir = 5000
sparsity = 0.5
random_state = 1
spectral_radius = 1.7
noise = 0.004216965034286

esn = ESN(
    n_inputs=n_features,  #Número de entradas (produtos) utilizadas na rede.
    n_outputs=
    n_features,  #Número de saídas (produtos) utilizadas na rede. Neste caso, são iguais.
    n_reservoir=n_reservoir,
    sparsity=sparsity,
    random_state=1,
    spectral_radius=spectral_radius,
    noise=noise)
##

#Primeira predição:
trainlen = 130  #Quantidade de pontos (dias) a serem usados para treino
future = 30  #Quantidade de pontos (dias) a serem previstos.

pred_training0 = esn.fit(np.ones((trainlen, n_features)),
                         data[:trainlen, :trainlen])
prediction0 = esn.predict(np.ones((future, n_features)))
errorlist0, errormean0 = results(
    data, prediction0, trainlen, future, plotfigures=False
Esempio n. 22
0
if __name__ == '__main__':
    import time
    import datetime

    print(datetime.datetime.today())
    print(f'pickle number: {str(pknum)}')
    print(f'delay: {delay_minutes} min')
    print(f'model_ui: {model_env["model_ui"]}')
    print(f'tasks: {str(tasks)}')
    time.sleep(delay_minutes * 60)
    portal = DataPortal()
    loop = 0
    while loop != end_int:
        if 'new' in tasks:
            gen = portal.iter_get_uids('daily_prices', 'default', model_env['tickers'])
            net = ESN(1, 1, n_reservoir=model_env['n_res_list'][0], sparsity=model_env['sparsity_list'][0], noise=0)
            for i in range(1, len(model_env['n_res_list'])):
                temp_net = ESN(1, 1, n_reservoir=model_env['n_res_list'][i], sparsity=model_env['sparsity_list'][i],
                               noise=0)
                net.merge(temp_net)
            x_train, x_test = np.zeros((0, sum(model_env['n_res_list']) + 1)), np.zeros(
                (0, sum(model_env['n_res_list']) + 1))
            y_train, y_test, y_cv, y_tcv = [], [], [], []
            w_train, w_test = [], []
            prep = Pipeline([('detrend', LinDetrend()), ('scaler', StandardScaler())])
            for ticker in gen:
                log_prices = np.log10(np.array(ticker['adjusted_close']).reshape((len(ticker), 1)))
                if len(log_prices) > model_env['train_len']:
                    prep.fit(log_prices[:model_env['train_len']])
                    log_prices = prep.transform(log_prices)
                    if model_env['vol']:
Esempio n. 23
0
from pyESN import ESN
import driving_utils

x_data, y_data = driving_utils.getData("/train_data/aalborg.csv")
x_data1, y_data1 = driving_utils.getData("/train_data/alpine-1.csv")
x_data2, y_data2 = driving_utils.getData("/train_data/f-speedway.csv")
print(x_data.shape)
print(x_data1.shape)
print(x_data2.shape)
print(y_data.shape)
print(y_data1.shape)
print(y_data2.shape)

x_data = np.concatenate((x_data, x_data1), axis=0)
x_data = np.concatenate((x_data, x_data2), axis=0)
y_data = np.concatenate((y_data, y_data1), axis=0)
y_data = np.concatenate((y_data, y_data2), axis=0)
print(y_data.shape)
print(x_data.shape)
rng = np.random.RandomState(42)
esn = ESN(n_inputs=22,
          n_outputs=3,
          n_reservoir=2000,
          spectral_radius=1.5,
          random_state=42)
esn.fit(x_data, y_data, inspect=True)


def predict(X):
    return esn.predict(X)
Esempio n. 24
0
    def test_inputshift(self):
        """input shift factors of different formats should be correctly interpreted or rejected"""
        esn = ESN(N_in,N_out,input_shift=1)
        self.assertTrue(np.all(1+self.X == esn._scale_inputs(self.X)))
        esn.fit(self.X,self.y)
        esn.predict(self.Xp)

        esn = ESN(N_in,N_out,input_shift=[1]*N_in)
        self.assertTrue(np.all(1+self.X == esn._scale_inputs(self.X)))
        esn.fit(self.X,self.y)
        esn.predict(self.Xp)

        esn = ESN(N_in,N_out,input_shift=np.array([1]*N_in))
        self.assertTrue(np.all(1+self.X == esn._scale_inputs(self.X)))
        esn.fit(self.X,self.y)
        esn.predict(self.Xp)

        with self.assertRaises(ValueError):
            esn = ESN(N_in,N_out,input_shift=[1]*(N_in+1))

        with self.assertRaises(ValueError):
            esn = ESN(N_in,N_out,input_shift=np.array([[1]*N_in]))
Esempio n. 25
0
max_period = 10
n_changepoints = int(N/200)
frequency_control,frequency_output = frequency_generator(N,min_period,max_period,n_changepoints)

traintest_cutoff = int(np.ceil(0.7*N))

train_ctrl,train_output = frequency_control[:traintest_cutoff],frequency_output[:traintest_cutoff]
test_ctrl, test_output  = frequency_control[traintest_cutoff:],frequency_output[traintest_cutoff:]

esn = ESN(n_inputs = 2,
          n_outputs = 1,
          n_reservoir = 200,
          spectral_radius = 0.25,
          sparsity = 0.95,
          noise = 0.001,
          input_shift = [0,0],
          input_scaling = [0.01, 3],
          teacher_scaling = 1.12,
          teacher_shift = -0.7,
          out_activation = np.tanh,
          inverse_out_activation = np.arctanh,
          random_state = rng,
          silent = False)

pred_train = esn.fit(train_ctrl,train_output)

print("test error:")
pred_test = esn.predict(test_ctrl)
print(np.sqrt(np.mean((pred_test - test_output)**2)))

window_tr = range(int(len(train_output)/4),int(len(train_output)/4+2000))
plt.figure(figsize=(10,1.5))
Esempio n. 26
0
 def test_none(self):
     """two esns with no specified seed should be different"""
     esnA = ESN(N_in, N_out, random_state=None)
     esnB = ESN(N_in, N_out, random_state=None)
     self._compare(esnA, esnB, should_be="different")
Esempio n. 27
0
rc('text', usetex=False)
import import_ipynb
import sklearn
from sklearn.pipeline import Pipeline
import pyESN
import pickle
from pyESN import ESN
#%matplotlib inline

data = open("tesla.txt").read().split()
data = np.array(data).astype('float64')

esn = ESN(n_inputs=1,
          n_outputs=1,
          n_reservoir=43,
          sparsity=0.1,
          random_state=35,
          spectral_radius=0.1,
          noise=0.0007)


def customfunct(img):
    img = open("tesla.txt").read().split()
    img = np.array(img).astype('float64')
    trainlen = 1000
    future = 1
    futureTotal = 200
    pred_tot = np.zeros(futureTotal)

    for i in range(0, futureTotal, future):
        pred_training = esn.fit(np.ones(trainlen), data[i:trainlen + i])
Esempio n. 28
0
loss = np.zeros([k_size,radius_set_size, reservoir_set_size])


for v in range(k_size):
    futureTotal=test-(test%k_set[v])
    for l in range(radius_set_size):
        rho = radius_set[l]
        for j in range(reservoir_set_size):
            n_reservoir = reservoir_set[j]
            future = k_set[v]
            pred_tot=np.zeros(futureTotal)

            esn = ESN(n_inputs = 1,
              n_outputs = 1, 
              n_reservoir = n_reservoir,
              sparsity=sparsity,
              random_state=rand_seed,
              spectral_radius = rho,
              noise=noise)

            for i in range(0,futureTotal,future):
                pred_training = esn.fit(np.ones(trainlen),data[i:trainlen+i])
                prediction = esn.predict(np.ones(future))
                pred_tot[i:i+future] = prediction[:,0]
            
            loss[v, l, j] = MSE(pred_tot, data[trainlen-1:trainlen+futureTotal-1])        
            print('window = ', k_set[v],'rho = ', radius_set[l], ', reservoir_sixe = ', reservoir_set[j], ', MSE = ', loss[v][l][j] )


# In[39]:
Esempio n. 29
0
def esn2_training(data, index2):
    #print('a',data)
    N = np.size(data, 0)
    #print(N)
    rng = np.random.RandomState(42)
    traintest_cutoff = int(np.ceil(0.8 * N))
    data_index = data[:, 3 * netcfg.num_lte_bs_mobile + 1]
    #print(np.transpose(np.array([data_index])))
    #print('a',data_index)
    data_index_max = max(data_index)
    data_index = data_index / data_index_max / 2
    data_index = np.transpose(np.array([data_index]))
    #print(data_index)
    train_data, train_data_index = data[:
                                        traintest_cutoff], data_index[:
                                                                      traintest_cutoff]
    test_data, test_data_index = data[traintest_cutoff:], data_index[
        traintest_cutoff:]
    #print(test_data)
    col_mbs = 1 + 3 * netcfg.num_lte_bs_mobile + 1
    zeros = np.zeros(col_mbs)
    #print('zeros', zeros)
    ones = np.ones(col_mbs)

    esn = ESN(
        n_inputs=col_mbs,
        n_outputs=1,
        n_reservoir=14,
        spectral_radius=0.25,
        sparsity=0.25,
        noise=0.001,
        input_shift=zeros,
        input_scaling=ones,
        #input_scaling=[1, 1],
        teacher_scaling=0.5,
        #teacher_scaling=1,
        teacher_shift=-0,
        #teacher_shift=0,
        # teacher_scaling = None,
        # teacher_shift = None,
        out_activation=np.tanh,
        inverse_out_activation=np.arctanh,
        random_state=rng,
        silent=False)

    print('----------------------------------------------------------------')
    print('esn {}'.format(index2 + 1))
    print('----------------------------------------------------------------')

    print('n_inputs=', esn.n_inputs)
    print('n_outputs=', esn.n_outputs)
    print('n_reservoir=', esn.n_reservoir)
    print('spectral radius=', esn.spectral_radius)
    print('sparsity=', esn.sparsity)
    print('noise=', esn.noise)
    print('input_shift=', esn.input_shift)
    print('input_scaling=', esn.input_scaling)
    print('teacher_forcing=', esn.teacher_forcing)
    # print ('feedback_scaling=' , esn1.feedback_scaling)
    print('teacher_scaling=', esn.teacher_scaling)
    print('teacher_shift=', esn.teacher_shift)
    print('out_activation=', esn.out_activation)
    print('inverse_out=', esn.inverse_out_activation)
    print('random_state=', esn.random_state)
    print('silent=', esn.silent)

    pred_train = esn.fit(train_data, train_data_index)
    pred_test = esn.predict(test_data)
    #print('b',pred_test*2*data_index_max)
    predicted_index = pred_test * 2 * data_index_max
    #print(predicted_index)
    pp.figure(2)
    axis = (netcfg.num_lte_bs_mobile * 100) + 10 + (index2 + 1)
    pp.subplot(axis)
    #pp.title('MBS {}'.format (index2+1))
    #pp.title('ESN Performance for Predicting Next Best Location Index')
    pp.xlabel('Number of iterations')
    pp.ylabel('Next Location Index Value')
    pp.ylim(0, 400)
    #pp.plot(range(len(test_data)), test_data_index*2*data_index_max, range(len(pred_test)), pred_test*2*data_index_max)
    pp.plot(range(len(test_data_index)),
            test_data_index * 2 * data_index_max,
            c='r',
            marker='o',
            label='Exhaustive Search')
    pp.plot(range(len(test_data_index)),
            predicted_index,
            c='g',
            linestyle='--',
            label='ESN-Opt')
    pp.xlabel('Network Run Time (slot)')
    pp.ylabel('Location Index')
    pp.grid(True)
    pp.legend()
Esempio n. 30
0
train_ctrl,train_output = frequency_control[:traintest_cutoff],frequency_output[:traintest_cutoff]
test_ctrl, test_output  = frequency_control[traintest_cutoff:],frequency_output[traintest_cutoff:]


n_reservoir = 800
spectral_radius = 0.9
sparsity = 0.05
noise = 0.001
#####################
esn = ESN(n_inputs = 8,
          n_outputs = 1,
          n_reservoir = n_reservoir,
          spectral_radius = spectral_radius, 
          sparsity = sparsity,
          noise = noise,
          input_shift = [0,0,0,0,0,0,0,0],
          input_scaling = [0.01,0.01,0.01,0.01,0.01,0.01,3,3],
          teacher_scaling = 1.12,
          teacher_shift = -0.7,
          out_activation = np.tanh,
          inverse_out_activation = np.arctanh,
          random_state = rng,
          silent = False)


def pso_esn_parameters_for_scad(x):
    # 0: tao, 1:c0, 2:IC_s,3:IC_e 4:IS_s,5:IS_e, ,6:teacher sacling,7:teacher shift
    # 0:IC_s,1:IC_e, 2:IS_s,3:IS_e, 4:teacher sacling,5:teacher shift,6: tao, 7:c0,
    ic_s =x[0]
    ic_e =x[1]
    is_s = x[2]
    is_e = x[3]
Esempio n. 31
0
def ESN_train(data):
    #print(data)
    total_col = data.shape[1]

    opt_ang_col = data[:, total_col - 1]
    #print(opt_ang_col)

    max_angle = max(opt_ang_col)
    #print(max_angle)
    opt_ang_col = opt_ang_col / max_angle / 2

    #print(opt_ang_col)

    opt_ang_col = np.transpose(opt_ang_col)

    #print(opt_ang_col)

    N = flytera_cfg.sim_tick

    rng = np.random.RandomState(42)
    traintest_cutoff = int(np.ceil(0.66 * N))
    train_data, train_data_angle = data[:
                                        traintest_cutoff], opt_ang_col[:
                                                                       traintest_cutoff]
    test_data, test_data_angle = data[traintest_cutoff:], opt_ang_col[
        traintest_cutoff:]
    #print('TEST DAtA ANGLE', test_data_angle)
    rows_zeros_ip_shift = np.zeros(total_col)
    #print('zeros', zeros)
    rows_ones_ip_scale = np.ones(total_col)

    esn = ESN(
        n_inputs=total_col,
        n_outputs=1,
        n_reservoir=50,
        spectral_radius=0.25,
        sparsity=0.25,
        noise=0.001,
        input_shift=rows_zeros_ip_shift,
        input_scaling=rows_ones_ip_scale,
        #input_scaling=[1, 1],
        teacher_scaling=0.4,
        #teacher_scaling=1,
        teacher_shift=0,
        #teacher_shift=0,
        # teacher_scaling = None,
        # teacher_shift = None,
        out_activation=np.tanh,
        inverse_out_activation=np.arctanh,
        random_state=rng,
        silent=False)

    print('n_inputs=', esn.n_inputs)
    print('n_outputs=', esn.n_outputs)
    print('n_reservoir=', esn.n_reservoir)
    print('spectral radius=', esn.spectral_radius)
    print('sparsity=', esn.sparsity)
    print('noise=', esn.noise)
    print('input_shift=', esn.input_shift)
    print('input_scaling=', esn.input_scaling)
    print('teacher_forcing=', esn.teacher_forcing)
    # print ('feedback_scaling=' , esn1.feedback_scaling)
    print('teacher_scaling=', esn.teacher_scaling)
    print('teacher_shift=', esn.teacher_shift)
    print('out_activation=', esn.out_activation)
    print('inverse_out=', esn.inverse_out_activation)
    print('random_state=', esn.random_state)
    print('silent=', esn.silent)

    pred_train = esn.fit(train_data, train_data_angle)
    #print(pred_train)
    pred_test = esn.predict(test_data)
    #print('PREDICTED TEST', pred_test)
    pred_max_angle = max(pred_test) * 2 * max_angle
    return_angles = np.ceil(pred_test * 2 * max_angle)
    pred_max_angle = int(np.ceil(pred_max_angle))
    #print(pred_max_angle)
    #exit()
    pp.figure(1)
    font = {'family': 'sans', 'size': 14}
    pp.rc('font', **font)
    pp.xlim(0, 100)
    pp.ylim(0, 20)
    pp.xlabel('Network Run Time (slot)')
    pp.ylabel('Directivity Angle (degree)')
    #print()
    #print()
    a = (test_data_angle * 2 * max_angle)[0]
    b = (pred_test * 2 * max_angle)[0]
    #print(a)
    #print(b)
    acc = 100 - ((a - b) / a) * 100
    #print(acc)
    #exit()
    #print(test_data_angle*2*max_angle)
    #print(pred_test*2*max_angle)
    before = sum(test_data_angle * 2 * max_angle)
    after = sum(pred_test * 2 * max_angle)
    #print(before)
    #print(after)
    error = (abs(after - before) / after) * 100
    #print(accuracy1)
    #accuracy2 = (after/before)*100
    #print(accuracy2)

    pp.plot(range(len(test_data_angle)),
            test_data_angle * 2 * max_angle,
            c='r',
            marker='o',
            label='Optimal Directivity Angle')
    pp.plot(range(len(test_data_angle)),
            pred_test * 2 * max_angle,
            c='k',
            linestyle='--',
            label='Predicted Directivity Angle')
    pp.legend()
    pp.grid(True)
    pp.show()
    exit()
    list_angles = [(pred_max_angle), (pred_max_angle) * 2,
                   (pred_max_angle) * 4, (pred_max_angle) * 6,
                   (pred_max_angle) * 8]
    #print(list_angles)

    return return_angles
def esn_training(index, data, ntwk_obj):
    '''ESN Training for each MBS'''
    #print(index)
    num = netcfg.num_lte_bs_mobile - netcfg.num_lte_bs

    #print(num)
    index = index - (netcfg.num_lte_bs_mobile - num)
    #print(index)
    col_mbs = 3 * netcfg.num_lte_bs_mobile + 1 + index
    #print('a',col_mbs)
    #print('all data',data)

    data_rate = data[:, col_mbs:col_mbs + 1]
    #print('data_rate',data_rate)

    data = data[:, 0:col_mbs - index]
    #print('intermediate',data)

    data = np.hstack((data, data_rate))

    #print('data',data)

    coord_x = ntwk_obj.grid_x_coord
    #print(coord_x,'\n')
    coord_y = ntwk_obj.grid_y_coord
    #print(coord_y,'\n')

    #data_rate = np.transpose(data_rate)
    #print(data_rate)
    #print('data',data)
    data_rate = data[:, col_mbs - index]
    #print(data_rate)
    data_rate_max = max(data_rate)
    #print('max',data_rate_max)
    #exit(0)
    ntwk_obj.max_data = data_rate_max
    #print(ntwk_obj.max_data)
    data_rate = data_rate / data_rate_max / 2
    #print(data_rate)
    data_rate = np.transpose(np.array([data_rate]))
    #print('data rate',data_rate)

    N = netcfg.total_tick

    rng = np.random.RandomState(42)
    traintest_cutoff = int(np.ceil(0.8 * N))
    train_data, train_data_rate = data[:
                                       traintest_cutoff], data_rate[:
                                                                    traintest_cutoff]
    #print('train data',train_data)
    #print('train data rate',train_data_rate)
    #print('train data',train_data.shape)
    #print('train data rate',train_data_rate.shape)
    test_data, test_data_rate = data[traintest_cutoff:], data_rate[
        traintest_cutoff:]
    #print('test data',test_data)
    #print('test data rate',test_data_rate)
    p1 = sum(test_data_rate)
    print('p1', p1)
    #print('test data',test_data.shape)
    #print('test data rate',test_data_rate.shape)
    col_mbs = col_mbs + 1 - index
    zeros_row = np.zeros(col_mbs)
    #print('zeros', zeros)
    ones_row = np.ones(col_mbs)
    #print('ones', ones)
    #print(col_mbs)
    # create an echo state network
    esn = ESN(
        n_inputs=col_mbs,
        n_outputs=1,
        n_reservoir=21,
        spectral_radius=0.25,
        sparsity=0.25,
        noise=0.001,
        input_shift=zeros_row,
        input_scaling=ones_row,
        #input_scaling=[1, 1],
        teacher_scaling=0.5,
        #teacher_scaling=1,
        teacher_shift=-0,
        #teacher_shift=0,
        # teacher_scaling = None,
        # teacher_shift = None,
        out_activation=np.tanh,
        inverse_out_activation=np.arctanh,
        random_state=rng,
        silent=False)

    # print('----------------------------------------------------------------')
    # print ('esn {}'.format(index+1))
    # print('----------------------------------------------------------------')

    # print ('n_inputs=', esn.n_inputs)
    # print ('n_outputs=', esn.n_outputs)
    # print ('n_reservoir=', esn.n_reservoir)
    # print ('spectral radius=', esn.spectral_radius)
    # print ('sparsity=', esn.sparsity)
    # print ('noise=', esn.noise)
    # print ('input_shift=', esn.input_shift)
    # print ('input_scaling=', esn.input_scaling)
    # print ('teacher_forcing=', esn.teacher_forcing)
    # # print ('feedback_scaling=' , esn1.feedback_scaling)
    # print ('teacher_scaling=', esn.teacher_scaling)
    # print ('teacher_shift=', esn.teacher_shift)
    # print ('out_activation=', esn.out_activation)
    # print ('inverse_out=', esn.inverse_out_activation)
    # print ('random_state=', esn.random_state)
    # print ('silent=', esn.silent)

    #print('1')
    # fitting

    pred_train = esn.fit(train_data, train_data_rate)

    #print(pred_train)
    #print('2')
    # prediction
    #print(data.shape)

    pred_test = esn.predict(test_data)

    # print('data rate',data_rate)
    # print('pred test',pred_test,'\n')
    # print('3')
    pp.figure(1)
    font = {'family': 'sans', 'size': 16}
    pp.rc('font', **font)
    axis = (netcfg.num_lte_bs_mobile * 100) + 10 + (index + 1)
    pp.subplot(axis)
    #pp.title('MBS {}'.format (index+1))
    #pp.title('ESN Performance for Rate Prediction')
    pp.xlabel('Network Run Time (slot)')
    pp.ylabel('FBS Rate (Mbps)')
    # print(data_rate)
    # print(pred_test)

    pp.plot(range(len(test_data_rate)),
            test_data_rate * 2 * data_rate_max,
            c='r',
            marker='o',
            label='Measured Rate')
    pp.plot(range(len(test_data_rate)),
            pred_test * 2 * data_rate_max,
            c='g',
            linestyle='--',
            label='ESN-Pdt Prediction')
    pp.legend()
    pp.grid(True)
    return esn
Esempio n. 33
0
    def test_inputshift(self):
        """input shift factors of different formats should be correctly interpreted or rejected"""
        esn = ESN(N_in, N_out, input_shift=1)
        self.assertTrue(np.all(1 + self.X == esn._scale_inputs(self.X)))
        esn.fit(self.X, self.y)
        esn.predict(self.Xp)

        esn = ESN(N_in, N_out, input_shift=[1] * N_in)
        self.assertTrue(np.all(1 + self.X == esn._scale_inputs(self.X)))
        esn.fit(self.X, self.y)
        esn.predict(self.Xp)

        esn = ESN(N_in, N_out, input_shift=np.array([1] * N_in))
        self.assertTrue(np.all(1 + self.X == esn._scale_inputs(self.X)))
        esn.fit(self.X, self.y)
        esn.predict(self.Xp)

        with self.assertRaises(ValueError):
            esn = ESN(N_in, N_out, input_shift=[1] * (N_in + 1))

        with self.assertRaises(ValueError):
            esn = ESN(N_in, N_out, input_shift=np.array([[1] * N_in]))
Esempio n. 34
0
 def test_integer(self):
     """two esns with the same seed should be the same"""
     esnA = ESN(N_in, N_out, random_state=1)
     esnB = ESN(N_in, N_out, random_state=1)
     self._compare(esnA, esnB, should_be="same")