示例#1
0
def main():
    """Read data, train on data, test on data, BAM"""
    data = datagen.sine(300, 5.0) * 0.5
    data = hstack((data, datagen.sine(300, 10.0) * 0.5))
    data = hstack((data, datagen.sine(300, 20.0) * 0.5))
    inputs = empty(300)
    inputs.fill(0.0)
    tmp = empty(300)
    tmp.fill(0.5)
    inputs = hstack((inputs, tmp))
    tmp.fill(1.0)
    inputs = hstack((inputs, tmp))

    besttrain = {'val': 1000, 'idx': 0}
    besttest = {'val': 1000, 'idx': 0}

    for i in range(1000):
        random.seed(i)
        esn = ESN.ESN(input_size=1, hidden_size=100, output_size=1)
        train_out = esn.train(data, inputs)
        test_out = esn.test(data, inputs)
        trainmse = mse(train_out, data[100:])
        testmse = mse(test_out, data)

        if trainmse < besttrain['val']:
            besttrain['val'] = trainmse
            besttrain['idx'] = i
            print "newbesttrain", trainmse

        if testmse < besttest['val']:
            besttest['val'] = testmse
            besttest['idx'] = i
            print "newbesttest", testmse

    #calculate mean square error
    print "Train MSE", besttrain
    print "Test MSE", besttest
示例#2
0
f = open('sway.csv', 'r')

reader = csv.reader(f)
header = next(reader)

x = []
y = []

for row in reader:
    x.append(float(row[1]))
    y.append(float(row[2]))
f.close()

import esn

din = np.array(y[0:100], np.float32)
dout = np.array(y[100:200], np.float32)
#yf = fft(din)

plt.plot(y[3:1003], label="y")

echo_state = esn.ESN(1, 200, 1)
echo_state.train(din, dout)
out = echo_state.prop_sequence(y[00:1000])[1]

print din

plt.plot(out, label="esn")

plt.legend()
plt.show()
示例#3
0
            target_signal.append(
                [-1.0])  # this part should be skipped in transient

        else:
            r = 0.0  # identity element of XOR
            for j in range(i - (delay + stride - 1), i - (delay - 1)):
                if input_signal[j] == [1.0]:  # XORing with True
                    r = 1.0 - r  # flips the results
                # else: # dont change

            target_signal.append([r])

    return input_signal, target_signal


e = esn.ESN(1, 1)
e.random_reservoir(size=1000, connectivity=0.01, spectral_radius=0.3)
#e.unit_circle_reservoir(size=1000, spectral_radius=0.01, dense=True)

stride = 4
delay = 3
# roughly when stride+delay >= 8,
# the NRMS-error goes >0.1 with 1000 neurons

print("Stride:", stride, "Delay:", delay)

plotTraining = False
plotTesting = True

# TRAINING
length = 1000
示例#4
0
high = []
low = []
closing = []

for row in reader:
    date.append(str(row[0]))
    opening.append(float(row[1]))
    high.append(float(row[2]))
    low.append(float(row[3]))
    closing.append(float(row[4]))
f.close()

import esn

din = np.array(closing[0:300], np.float32)
dout = np.array([0] * 300, np.float32)
yf = fft(din)

print dout

plt.plot(closing, label="close")
plt.plot(opening, label="open")

echo = esn.ESN(1, 300, 1)
echo.train(yf, dout)
out = esn.prop_sequence(yf)[1]

plt.plot(out, label="esn")

plt.legend()
plt.show()
示例#5
0
        self.saved_actions = []
        self.rewards = []

    def forward(self, x):
        x = F.relu(self.affine1(x))
        action_scores = self.action_head(x)
        state_values = self.value_head(x)
        return F.softmax(action_scores, dim=-1), state_values


input_dim = 4
n_hidden = 200
w_sparsity = 0.1
resevior = esn.ESN(input_dim=input_dim,
                   hidden_dim=n_hidden,
                   output_dim=1,
                   w_sparsity=w_sparsity,
                   feedbacks=True,
                   skip_output=True)
model = Policy()
optimizer = optim.Adam(model.parameters(), lr=3e-4)
eps = np.finfo(np.float32).eps.item()


def select_action(state):
    # state = torch.from_numpy(state).float()
    probs, state_value = model(state)
    m = Categorical(probs)
    action = m.sample()
    model.saved_actions.append(
        SavedAction(m.log_prob(action), state_value.view(-1)))
    return action.item()