Exemple #1
0
def trainedLSTMNN2():
    """
    n = RecurrentNetwork()

    inp = LinearLayer(100, name = 'input')
    hid = LSTMLayer(30, name='hidden')
    out = LinearLayer(1, name='output')

    #add modules
    n.addOutputModule(out)
    n.addInputModule(inp)
    n.addModule(hid)

    #add connections
    n.addConnection(FullConnection(inp, hid))
    n.addConnection(FullConnection(hid, out))

    n.addRecurrentConnection(FullConnection(hid, hid))
    n.sortModules()
    """
    n = buildSimpleLSTMNetwork()

    print "Network created"
    d = load1OrderDataSet()
    print "Data loaded"
    t = RPropMinusTrainer(n, dataset=d, verbose=True)
    t.trainUntilConvergence()

    exportANN(n)

    return n
Exemple #2
0
def trainedLSTMNN2():
    """
    n = RecurrentNetwork()

    inp = LinearLayer(100, name = 'input')
    hid = LSTMLayer(30, name='hidden')
    out = LinearLayer(1, name='output')

    #add modules
    n.addOutputModule(out)
    n.addInputModule(inp)
    n.addModule(hid)

    #add connections
    n.addConnection(FullConnection(inp, hid))
    n.addConnection(FullConnection(hid, out))

    n.addRecurrentConnection(FullConnection(hid, hid))
    n.sortModules()
    """
    n = buildSimpleLSTMNetwork()

    print "Network created"
    d = load1OrderDataSet()
    print "Data loaded"
    t = RPropMinusTrainer(n, dataset=d, verbose=True)
    t.trainUntilConvergence()

    exportANN(n)

    return n
Exemple #3
0
train_errors = []  # save errors for plotting later
EPOCHS_PER_CYCLE = 5
CYCLES = 100
EPOCHS = EPOCHS_PER_CYCLE * CYCLES
for i in xrange(CYCLES):
    trainer.trainEpochs(EPOCHS_PER_CYCLE)
    train_errors.append(trainer.testOnData())
    epoch = (i + 1) * EPOCHS_PER_CYCLE
    print("\r epoch {}/{}".format(epoch, EPOCHS))
    stdout.flush()

print()
print("final error =", train_errors[-1])

for sample, target in zip(X_test, y_test):
    print("               sample = ", sample)
    print("predicted next sample = ", net.activate(sample))
    print("   actual next sample = ", target)
    print()

plt.plot(range(0, EPOCHS, EPOCHS_PER_CYCLE), train_errors)
plt.xlabel('epoch')
plt.ylabel('error')
plt.show()

exit()
print net.activate([2, 1, 1, 2])

trainer = BackpropTrainer(net, ds)
print trainer.trainUntilConvergence()
from pybrain.supervised.trainers import BackpropTrainer