Exemple #1
0
Fichier : rnn.py Projet : cy94/ml2
def main():
	generated_data = [0 for i in range(10000)]
	rate, data = get_data_from_wav("../../data/natabhairavi_violin.wav")
	data = data[1000:190000]
	print("Got wav")
	ds = SequentialDataSet(1, 1)
	for sample, next_sample in zip(data, cycle(data[1:])):
	    ds.addSample(sample, next_sample)

	net = buildNetwork(1, 5, 1, 
                   hiddenclass=LSTMLayer, outputbias=False, recurrent=True)

	trainer = RPropMinusTrainer(net, dataset=ds)
	train_errors = [] # save errors for plotting later
	EPOCHS_PER_CYCLE = 5
	CYCLES = 10
	EPOCHS = EPOCHS_PER_CYCLE * CYCLES
	for i in xrange(CYCLES):
	    trainer.trainEpochs(EPOCHS_PER_CYCLE)
	    train_errors.append(trainer.testOnData())
	    epoch = (i+1) * EPOCHS_PER_CYCLE
	    print("\r epoch {}/{}".format(epoch, EPOCHS), end="")
	    stdout.flush()

	# predict new values
	old_sample = [100]

	for i in xrange(500000):
		new_sample = net.activate(old_sample)
		old_sample = new_sample
		generated_data[i] = new_sample[0]
		print(new_sample)
	
	wavfile.write("../../output/test.wav", rate, np.array(generated_data))
 def train(self, ds, epochs_per_cycle, cycles):
     trainer = RPropMinusTrainer(self.n, dataset=ds)
     train_errors = []
     for i in xrange(cycles):
         trainer.trainEpochs(epochs_per_cycle)
         train_errors.append(trainer.testOnData())
         epoch = (i + 1) * epochs_per_cycle
         print("\r epoch {}/{}".format(epoch, epochs_per_cycle * cycles))
         sys.stdout.flush()
     print("Final Error: " + str(train_errors[-1]))
     return train_errors[-1]
Exemple #3
0
def ltsmXY(tin, tout, title='ltsm.png'):

    #datain = zip(tin[:-3], tin[1:-2], tin[2:-1])
    #datain = zip(tin[:-8], tin[1:-7], tin[2:-6], tin[3:-5], tin[4:-4], tin[5:-3],tin[6:-2], tin[7:-1])
    #datain = zip(tin[:-12], tin[1:-11], tin[2:-10], tin[3:-9], tin[4:-8], tin[5:-7],tin[6:-6], tin[7:-5], tin[8:-4], tin[9:-3], tin[10:-2], tin[11:-1])
    datain = zip(tin[:-16], tin[1:-15], tin[2:-14], tin[3:-13], tin[4:-12], tin[5:-11],tin[6:-10], tin[7:-9], tin[8:-8], tin[9:-7], tin[10:-6], tin[11:-5], tin[12:-4], tin[13:-3], tin[14:-2], tin[15:-1])

    #dataout = tout[3:]
    #dataout = tout[8:]
    #dataout = tout[12:]
    dataout = tout[16:]

    #ds = SequentialDataSet(3, 1)
    #ds = SequentialDataSet(8, 1)
    #ds = SequentialDataSet(12, 1)
    ds = SequentialDataSet(16, 1)

    for x, y in zip(datain[:len(datain)/2], dataout[:len(datain)/2]):
        ds.addSample(x, y)


    # add layers until overfitting the training data
    #net = buildNetwork(3,5,1,hiddenclass=LSTMLayer, outputbias=False, recurrent=True)
    #net = buildNetwork(8, 8, 1, hiddenclass=LSTMLayer, outputbias=False, recurrent=True)
    #net = buildNetwork(12, 20, 1, hiddenclass=LSTMLayer, outputbias=False, recurrent=True)
    net = buildNetwork(16, 20, 1, hiddenclass=LSTMLayer, outputbias=False, recurrent=True)

    
    trainer = RPropMinusTrainer(net, dataset=ds)
    train_errors = []
    EPOCHS_PER_CYCLE = 5
    CYCLES = 100
    EPOCHS = EPOCHS_PER_CYCLE * CYCLES
    for i in xrange(CYCLES):
        trainer.trainEpochs(EPOCHS_PER_CYCLE)
        train_errors.append(trainer.testOnData())
        epoch = (i+1) * EPOCHS_PER_CYCLE
        #print "\r epoch {}/{}".format(epoch, EPOCHS)
        stdout.flush()

    print "final error =", train_errors[-1]

    pred_out = []
    for i in range(len(datain)):
        pred_out.append(net.activate(datain[i]))
    
    fig = plt.figure()
    #tout[16:].plot(ax=ax, title='Occupancy')
    plt.plot(tout[16:].index, tout[16:], 'y', linewidth=1.5)
    plt.plot(tout[16:].index, pred_out, 'b+')
    plt.legend(['Occupancy', 'LTSM'])
    fig.tight_layout()
    plt.savefig(title,inches='tight')
Exemple #4
0
def ltsm(data):
    from pybrain.datasets import SequentialDataSet
    from itertools import cycle
    
    datain = zip(data[:-6], data[1:-5], data[2:-4], data[3:-3], data[4:-2], data[5:-1])
    dataout = data[6:]
    ds = SequentialDataSet(6, 1)
    for x, y in zip(datain, dataout):
        ds.addSample(x, y)

    from pybrain.tools.shortcuts import buildNetwork
    from pybrain.structure.modules import LSTMLayer

    net = buildNetwork(6, 7, 1, hiddenclass=LSTMLayer, outputbias=False, recurrent=True)

    from pybrain.supervised import RPropMinusTrainer
    from sys import stdout
    
    trainer = RPropMinusTrainer(net, dataset=ds)
    train_errors = []
    EPOCHS_PER_CYCLE = 5
    CYCLES = 100
    EPOCHS = EPOCHS_PER_CYCLE * CYCLES
    for i in xrange(CYCLES):
        trainer.trainEpochs(EPOCHS_PER_CYCLE)
        train_errors.append(trainer.testOnData())
        epoch = (i+1) * EPOCHS_PER_CYCLE
        #print "\r epoch {}/{}".format(epoch, EPOCHS)
        stdout.flush()

    print "final error =", train_errors[-1]

    '''
    plt.figure()
    plt.plot(range(0, EPOCHS, EPOCHS_PER_CYCLE), train_errors)
    plt.xlabel('epoch')
    plt.ylabel('error')
    plt.show()
    '''

    test_error = 0.
    cnt = 0
    for sample, target in ds.getSequenceIterator(0):
        #print "sample = ",  sample
        #print "predicted next sample = %4.1f" % net.activate(sample)
        #print "actual next sample = %4.1f" % target
        test_error += abs(net.activate(sample) - target)
        cnt += 1
    test_error /= cnt 
    print "test (train) error =", test_error
    def handle(self, *args, **options):
        ticker = args[0]
        print("****** STARTING PREDICTOR " + ticker + " ******* ")
        prices = Price.objects.filter(
            symbol=ticker).order_by('-created_on').values_list('price',
                                                               flat=True)
        data = normalization(list(prices[0:NUM_MINUTES_BACK].reverse()))
        data = [int(x * MULT_FACTOR) for x in data]
        print(data)

        ds = SupervisedDataSet(5, 1)
        try:
            for i, val in enumerate(data):
                DS.addSample((data[i], data[i + 1], data[i + 2], data[i + 3],
                              data[i + 4]), (data[i + 5], ))
        except Exception:
            pass

        net = buildNetwork(5,
                           40,
                           1,
                           hiddenclass=LSTMLayer,
                           outputbias=False,
                           recurrent=True)

        trainer = RPropMinusTrainer(net, dataset=ds)
        train_errors = []  # save errors for plotting later
        EPOCHS_PER_CYCLE = 5
        CYCLES = 100
        EPOCHS = EPOCHS_PER_CYCLE * CYCLES
        for i in xrange(CYCLES):
            trainer.trainEpochs(EPOCHS_PER_CYCLE)
            train_errors.append(trainer.testOnData())
            epoch = (i + 1) * EPOCHS_PER_CYCLE
            print("\r epoch {}/{}".format(epoch, EPOCHS), end="")
            stdout.flush()

        print()
        print("final error =", train_errors[-1])

        for sample, target in ds.getSequenceIterator(0):
            show_pred_sample = net.activate(sample) / MULT_FACTOR
            show_sample = sample / MULT_FACTOR
            show_target = target / MULT_FACTOR
            show_diff = show_pred_sample - show_target
            show_diff_pct = 100 * show_diff / show_pred_sample
            print("{} => {}, act {}. ({}%)".format(
                show_sample[0], round(show_pred_sample[0], 3), show_target[0],
                int(round(show_diff_pct[0], 0))))
Exemple #6
0
def train(d, cycles=100, epochs_per_cycle=7):
    ds = SequentialDataSet(1, 1)
    net = buildNetwork(1, 5, 1, hiddenclass=LSTMLayer, outputbias=False, recurrent=False)

    for sample, next_sample in zip(d, cycle(d[1:])):
        ds.addSample(sample, next_sample)

    trainer = RPropMinusTrainer(net, dataset=ds)
    train_errors = []  # save errors for plotting later
    for i in xrange(cycles):
        trainer.trainEpochs(epochs_per_cycle)
        train_errors.append(trainer.testOnData())
        stdout.flush()

    return net, train_errors
Exemple #7
0
def train(context, trainX, trainY):
    ds = SequentialDataSet(4, 1)
    for dataX, dataY in zip(trainX, trainY):
        ds.addSample(dataX, dataY)
    net = buildNetwork(4,
                       1,
                       1,
                       hiddenclass=LSTMLayer,
                       outputbias=False,
                       recurrent=True)
    trainer = RPropMinusTrainer(net, dataset=ds)
    EPOCHS_PER_CYCLE = 5
    CYCLES = 5
    for i in range(CYCLES):
        trainer.trainEpochs(EPOCHS_PER_CYCLE)
    return net, trainer.testOnData()
def train (ds, net):
	# Train the network 
	trainer = RPropMinusTrainer(net, dataset=ds)
	train_errors = [] # save errors for plotting later
	EPOCHS_PER_CYCLE = 5
	CYCLES = 100
	EPOCHS = EPOCHS_PER_CYCLE * CYCLES
	for i in xrange(CYCLES):
	    trainer.trainEpochs(EPOCHS_PER_CYCLE)
	    error = trainer.testOnData()
	    train_errors.append(error)
	    epoch = (i+1) * EPOCHS_PER_CYCLE
	    print("\r epoch {}/{}".format(epoch, EPOCHS))
	    stdout.flush()

	# print("final error =", train_errors[-1])

	return train_errors, EPOCHS, EPOCHS_PER_CYCLE
Exemple #9
0
def train(ds, net):
    # Train the network
    trainer = RPropMinusTrainer(net, dataset=ds)
    train_errors = []  # save errors for plotting later
    EPOCHS_PER_CYCLE = 5
    CYCLES = 100
    EPOCHS = EPOCHS_PER_CYCLE * CYCLES
    for i in xrange(CYCLES):
        trainer.trainEpochs(EPOCHS_PER_CYCLE)
        error = trainer.testOnData()
        train_errors.append(error)
        epoch = (i + 1) * EPOCHS_PER_CYCLE
        print("\r epoch {}/{}".format(epoch, EPOCHS))
        stdout.flush()

    # print("final error =", train_errors[-1])

    return train_errors, EPOCHS, EPOCHS_PER_CYCLE
    def handle(self, *args, **options):
        ticker = args[0]
        print("****** STARTING PREDICTOR " + ticker + " ******* ")
        prices = Price.objects.filter(symbol=ticker).order_by('-created_on').values_list('price',flat=True)
        data = normalization(list(prices[0:NUM_MINUTES_BACK].reverse()))
        data = [ int(x * MULT_FACTOR) for x in data]
        print(data)

        ds = SupervisedDataSet(5, 1)
        try:
            for i,val in enumerate(data):
                DS.addSample((data[i], data[i+1], data[i+2], data[i+3], data[i+4]), (data[i+5],))
        except Exception:
            pass;

        net = buildNetwork(5, 40, 1, 
                           hiddenclass=LSTMLayer, outputbias=False, recurrent=True)

        trainer = RPropMinusTrainer(net, dataset=ds)
        train_errors = [] # save errors for plotting later
        EPOCHS_PER_CYCLE = 5
        CYCLES = 100
        EPOCHS = EPOCHS_PER_CYCLE * CYCLES
        for i in xrange(CYCLES):
            trainer.trainEpochs(EPOCHS_PER_CYCLE)
            train_errors.append(trainer.testOnData())
            epoch = (i+1) * EPOCHS_PER_CYCLE
            print("\r epoch {}/{}".format(epoch, EPOCHS), end="")
            stdout.flush()

        print()
        print("final error =", train_errors[-1])

        for sample, target in ds.getSequenceIterator(0):
            show_pred_sample = net.activate(sample) / MULT_FACTOR
            show_sample = sample / MULT_FACTOR
            show_target = target / MULT_FACTOR
            show_diff = show_pred_sample - show_target
            show_diff_pct = 100 * show_diff / show_pred_sample
            print("{} => {}, act {}. ({}%)".format(show_sample[0],round(show_pred_sample[0],3),show_target[0],int(round(show_diff_pct[0],0))))
Exemple #11
0
def main():
    generated_data = [0 for i in range(10000)]
    rate, data = get_data_from_wav("../../data/natabhairavi_violin.wav")
    data = data[1000:190000]
    print("Got wav")
    ds = SequentialDataSet(1, 1)
    for sample, next_sample in zip(data, cycle(data[1:])):
        ds.addSample(sample, next_sample)

    net = buildNetwork(1,
                       5,
                       1,
                       hiddenclass=LSTMLayer,
                       outputbias=False,
                       recurrent=True)

    trainer = RPropMinusTrainer(net, dataset=ds)
    train_errors = []  # save errors for plotting later
    EPOCHS_PER_CYCLE = 5
    CYCLES = 10
    EPOCHS = EPOCHS_PER_CYCLE * CYCLES
    for i in xrange(CYCLES):
        trainer.trainEpochs(EPOCHS_PER_CYCLE)
        train_errors.append(trainer.testOnData())
        epoch = (i + 1) * EPOCHS_PER_CYCLE
        print("\r epoch {}/{}".format(epoch, EPOCHS), end="")
        stdout.flush()

    # predict new values
    old_sample = [100]

    for i in xrange(500000):
        new_sample = net.activate(old_sample)
        old_sample = new_sample
        generated_data[i] = new_sample[0]
        print(new_sample)

    wavfile.write("../../output/test.wav", rate, np.array(generated_data))
Exemple #12
0
def say_hello_text(username = "******",text="You are good"):

    object_data_new = pd.read_csv('/Users/ruiyun_zhou/Documents/cmpe-274/data/data.csv')
    data_area_new = object_data_new[object_data_new.Area==username]
    data_area_new_1=data_area_new[data_area_new.Disease== text]
    data_list_new = data_area_new_1['Count'].values.tolist()
    print data_list_new.__len__()
    data_list=data_list_new
    ds = SequentialDataSet(1,1)
    isZero=0;
    for sample,next_sample in zip(data_list,cycle(data_list[1:])):
        ds.addSample(sample, next_sample)
        if sample:
            isZero=1

    if(isZero==0):
        return '[0, 0]'

    net = buildNetwork(1,5,1,hiddenclass=LSTMLayer,outputbias=False,recurrent=True)
    trainer = RPropMinusTrainer(net, dataset=ds)
    train_errors = [] # save errors for plotting later
    EPOCHS_PER_CYCLE = 5
    CYCLES = 10
    EPOCHS = EPOCHS_PER_CYCLE * CYCLES
    for i in xrange(CYCLES):
        print "Doing epoch %d" %i
        trainer.trainEpochs(EPOCHS_PER_CYCLE)
        train_errors.append(trainer.testOnData())
        epoch = (i+1) * EPOCHS_PER_CYCLE
#    return '<p>%d</p>\n' % (data_list_new.__len__())
#        print("final error =", train_errors[-1])
#    print "Value for last week is %4.1d" % abs(data_list[-1])
#    print "Value for next week is %4.1d" % abs(net.activate(data_list[-1]))
#    result = (abs(data_list[-1]))
    result = (abs(net.activate(data_list[-1])))
    result_1 = (abs(net.activate(result)))
    return '[%d, %d]' % (result,result_1)
def train(data,name):
    ds = SequentialDataSet(1, 1)
    for sample, next_sample in zip(data, cycle(data[1:])):
        ds.addSample(sample, next_sample)
    net = buildNetwork(1, 200, 1, hiddenclass=LSTMLayer, outputbias=False, recurrent=True)

    trainer = RPropMinusTrainer(net, dataset=ds)
    train_errors = [] # save errors for plotting later
    EPOCHS_PER_CYCLE = 5
    CYCLES = 20
    EPOCHS = EPOCHS_PER_CYCLE * CYCLES
    store=[]
    for i in xrange(CYCLES):
        trainer.trainEpochs(EPOCHS_PER_CYCLE)
        train_errors.append(trainer.testOnData())
        epoch = (i+1) * EPOCHS_PER_CYCLE
        print("\r epoch {}/{}".format(epoch, EPOCHS))
        print tm.time()-atm
        stdout.flush() 
    for sample, target in ds.getSequenceIterator(0):
        store.append(net.activate(sample))
    abcd=pd.DataFrame(store)
    abcd.to_csv(pwd+"lstmdata/"+name+".csv",encoding='utf-8')
    print "result printed to file"
Exemple #14
0
                   hiddenclass=LSTMLayer,
                   outputbias=False,
                   recurrent=True)

# Initialize trainer
trainer = RPropMinusTrainer(rnn, dataset=ds)

# Predefine iterations: epochs & cycles
EPOCHS_PER_CYCLE = 5
CYCLES = 100
EPOCHS = EPOCHS_PER_CYCLE * CYCLES

# Training loop
for i in xrange(CYCLES):
    trainer.trainEpochs(EPOCHS_PER_CYCLE)
    error = trainer.testOnData()
    epoch = (i + 1) * EPOCHS_PER_CYCLE
    print("\r Epoch: {}/{} Error: {}".format(epoch, EPOCHS, error), end="")
    stdout.flush()

# Save model
NetworkWriter.writeToFile(rnn, 'rnn3.xml')

# Ad hoc test
for test in test_data:
    for i in xrange(0, len(test) - 6, 5):
        # Get 5 obs, 6th we wish to predict
        obs, nxt = test[i:i + 5], test[i + 6]

        # Predict all
        prds = map(rnn.activate, obs)
train_errors_2 = [] 
train_errors_3 = [] 
train_errors_4 = [] 
train_errors_5 = [] 
train_errors_6 = [] 
train_errors_8 = [] 
train_errors_9 = [] 
train_errors_10 = [] 

# Training
EPOCHS_per_CYCLE = 6
NUM_CYCLES = 15
EPOCHS = EPOCHS_per_CYCLE * NUM_CYCLES
for i in xrange(NUM_CYCLES):
    trainer.trainEpochs(EPOCHS_per_CYCLE)
    train_errors.append(trainer.testOnData())
    trainer_2.trainEpochs(EPOCHS_per_CYCLE)
    train_errors_2.append(trainer_2.testOnData()) 
    trainer_3.trainEpochs(EPOCHS_per_CYCLE)
    train_errors_3.append(trainer_3.testOnData())
    trainer_4.trainEpochs(EPOCHS_per_CYCLE)
    train_errors_4.append(trainer_4.testOnData())
    trainer_5.trainEpochs(EPOCHS_per_CYCLE)
    train_errors_5.append(trainer_5.testOnData())
    trainer_6.trainEpochs(EPOCHS_per_CYCLE)
    train_errors_6.append(trainer_6.testOnData())
    trainer_8.trainEpochs(EPOCHS_per_CYCLE)
    train_errors_8.append(trainer_8.testOnData())
    trainer_9.trainEpochs(EPOCHS_per_CYCLE)
    train_errors_9.append(trainer_9.testOnData())
    trainer_10.trainEpochs(EPOCHS_per_CYCLE)
def rnn():
    # load dataframe from csv file
    df = pi.load_data_frame('../../data/NABIL.csv')
    # column name to match with indicator calculating modules
    # TODO: resolve issue with column name
    df.columns = [
        'Transactions', 'Traded_Shares', 'Traded_Amount', 'High', 'Low',
        'Close'
    ]

    data = df.Close.values
    # TODO: write min_max normalization
    # normalization
    # cp = dataframe.pop(' Close Price')
    # x = cp.values
    temp = np.array(data).reshape(len(data), 1)
    min_max_scaler = preprocessing.MinMaxScaler()
    data = min_max_scaler.fit_transform(temp)
    # dataframe[' Close Price'] = x_scaled

    # prepate sequential dataset for pyBrain rnn network
    ds = SequentialDataSet(1, 1)
    for sample, next_sample in zip(data, cycle(data[1:])):
        ds.addSample(sample, next_sample)

    # build rnn network with LSTM layer
    # if saved network is available
    if (os.path.isfile('random.xml')):
        net = NetworkReader.readFrom('network.xml')
    else:
        net = buildNetwork(1,
                           20,
                           1,
                           hiddenclass=LSTMLayer,
                           outputbias=False,
                           recurrent=True)

    # build trainer
    trainer = RPropMinusTrainer(net, dataset=ds, verbose=True)
    train_errors = []  # save errors for plotting later
    EPOCHS_PER_CYCLE = 5
    CYCLES = 5
    EPOCHS = EPOCHS_PER_CYCLE * CYCLES
    for i in range(CYCLES):
        trainer.trainEpochs(EPOCHS_PER_CYCLE)
        train_errors.append(trainer.testOnData())
        epoch = (i + 1) * EPOCHS_PER_CYCLE
        print("\r epoch {}/{}".format(epoch, EPOCHS), end="")
        sys.stdout.flush()
    # save the network
    NetworkWriter.writeToFile(net, 'network.xml')

    print()
    print("final error =", train_errors[-1])

    predicted = []
    for dat in data:
        predicted.append(net.activate(dat)[0])
    # data = min_max_scaler.inverse_transform(data)
    # predicted = min_max_scaler.inverse_transform(predicted)
    predicted_array = min_max_scaler.inverse_transform(
        np.array(predicted).reshape(-1, 1))
    print(predicted_array[-1])
    plt.figure()

    legend_actual, = plt.plot(range(0, len(data)),
                              temp,
                              label='actual',
                              linestyle='--',
                              linewidth=2,
                              c='blue')
    legend_predicted, = plt.plot(range(0, len(data)),
                                 predicted_array,
                                 label='predicted',
                                 linewidth=1.5,
                                 c='red')
    plt.legend(handles=[legend_actual, legend_predicted])
    plt.savefig('error.png')
    plt.show()
def rnn():
    # load dataframe from csv file
    df = pi.load_data_frame('../../data/NABIL.csv')
    # column name to match with indicator calculating modules
    # TODO: resolve issue with column name
    df.columns = [
        'Transactions',
        'Traded_Shares',
        'Traded_Amount',
        'High',
        'Low',
        'Close']
     
    data = df.Close.values
    # TODO: write min_max normalization
    # normalization
    # cp = dataframe.pop(' Close Price')
    # x = cp.values
    temp = np.array(data).reshape(len(data),1)
    min_max_scaler = preprocessing.MinMaxScaler()
    data = min_max_scaler.fit_transform(temp)
    # dataframe[' Close Price'] = x_scaled
     
    # prepate sequential dataset for pyBrain rnn network
    ds = SequentialDataSet(1, 1)
    for sample, next_sample in zip(data, cycle(data[1:])):
        ds.addSample(sample, next_sample)
     
    # build rnn network with LSTM layer
    # if saved network is available
    if(os.path.isfile('random.xml')):
        net = NetworkReader.readFrom('network.xml')
    else:
        net = buildNetwork(1, 20, 1, 
                           hiddenclass=LSTMLayer, outputbias=False, recurrent=True)
     
    # build trainer
    trainer = RPropMinusTrainer(net, dataset=ds, verbose = True)
    train_errors = [] # save errors for plotting later
    EPOCHS_PER_CYCLE = 5
    CYCLES = 5
    EPOCHS = EPOCHS_PER_CYCLE * CYCLES
    for i in range(CYCLES):
        trainer.trainEpochs(EPOCHS_PER_CYCLE)
        train_errors.append(trainer.testOnData())
        epoch = (i+1) * EPOCHS_PER_CYCLE
        print("\r epoch {}/{}".format(epoch, EPOCHS), end="")
        sys.stdout.flush()
    # save the network
    NetworkWriter.writeToFile(net,'network.xml')
        
    print()
    print("final error =", train_errors[-1])
     
    predicted = []
    for dat in data:
        predicted.append(net.activate(dat)[0])
    # data = min_max_scaler.inverse_transform(data)
    # predicted = min_max_scaler.inverse_transform(predicted)
    predicted_array = min_max_scaler.inverse_transform(np.array(predicted).reshape(-1,1))
    print(predicted_array[-1])
    plt.figure()
     
    legend_actual, = plt.plot(range(0, len(data)),temp, label = 'actual', linestyle = '--', linewidth = 2, c = 'blue')
    legend_predicted, = plt.plot(range(0, len(data)), predicted_array, label = 'predicted', linewidth = 1.5, c='red')
    plt.legend(handles=[legend_actual, legend_predicted])
    plt.savefig('error.png')
    plt.show()
from pybrain.supervised import RPropMinusTrainer
from sys import stdout

trainer = RPropMinusTrainer(net, dataset=ds, verbose=True)
#trainer.trainUntilConvergence()

train_errors = []  # save errors for plotting later
EPOCHS_PER_CYCLE = 100  # increasing the epochs to 20, decreases accuracy drastically,  decreasing epochs is desiredepoch # 5 err = 0.04
CYCLES = 10  # vary the epochs adn the cycles and the LSTM cells to  get more accurate results.
EPOCHS = EPOCHS_PER_CYCLE * CYCLES
for i in xrange(CYCLES):
    trainer.trainEpochs(
        EPOCHS_PER_CYCLE
    )  # train on the given data set for given number of epochs
    train_errors.append(trainer.testOnData())
    epoch = (i + 1) * EPOCHS_PER_CYCLE
    print("\r epoch {}/{}".format(epoch, EPOCHS), end="")
    stdout.flush()

print()
print("final error =", train_errors[-1])

## Plot  the data and the training
import matplotlib.pyplot as plt

plt.plot(range(0, EPOCHS, EPOCHS_PER_CYCLE), train_errors)
plt.xlabel('epoch')
plt.ylabel('error')
plt.show()
mape_error = 0
    def learn(self,
              pathdataset=["dstc4_train"],
              Pathdataroot="data",
              numberOfHiddenUnit=20,
              EPOCHS_PER_CYCLE=10,
              CYCLES=40,
              weightdecayw=0.01):
        print "Start learning LSTM, and make dictionary file"
        #Construct dictionary: variable name -> corresponding index of element in i/o vector
        print "Star make dictionary: variable name -> corresponding index of element in i/o vector"
        self.dictOut = {
        }  #"TOPIC_SLOT_VALUE" -> corresponding index of element
        self.dictIn = {
        }  #"SPEAKER_{val}"or"TOPIC_{val}","WORD_{word}" "BIO_{BIO}", "CLASS_{slot,value}", ""{defined label}-> corresponding  index of element
        #-target vector dictionary
        index = 0
        totalNumSlot = 0
        for topic in self.tagsets.keys():
            for slot in self.tagsets[topic].keys():
                totalNumSlot += 1
                for value in self.tagsets[topic][slot]:
                    self.dictOut[topic + "_" + slot + "_" + value] = index
                    index += 1
        print "totalNumSlot:" + str(totalNumSlot)
        print "outputSize:" + str(len(self.dictOut.keys()))
        #-input dictionry
        dataset = []
        for pathdat in pathdataset:
            dataset.append(
                dataset_walker.dataset_walker(pathdat,
                                              dataroot=Pathdataroot,
                                              labels=False))
        #--(sub input vector 1) Class features i.e., Slot and value ratio (Similar to base line)
        index = 0
        for topic in self.tagsets.keys():
            for slot in self.tagsets[topic].keys():
                if ("CLASS_" + slot) not in self.dictIn:
                    self.dictIn["CLASS_" + slot] = index
                    index += 1
                for value in self.tagsets[topic][slot]:
                    if ("CLASS_" + value) not in self.dictIn:
                        self.dictIn["CLASS_" + value] = index
                        index += 1
        self.TOTALSIZEOFCLASSFeature = index
        f = open(self.FileNameofNumClassFeature, "wb")
        pickle.dump(self.TOTALSIZEOFCLASSFeature, f)
        f.close()
        #--(sub input vector 2) Sentence features
        if not self.isUseSentenceRepresentationInsteadofBOW:
            index = 0
            for elemDataset in dataset:
                for call in elemDataset:
                    for (uttr, _) in call:
                        #General info1 (CLASS; this feature must be rejistered at first)
                        if ("SPEAKER_" + uttr["speaker"]) not in self.dictIn:
                            self.dictIn["SPEAKER_" + uttr["speaker"]] = index
                            index += 1
                        if ("TOPIC_" + uttr["segment_info"]["topic"]
                            ) not in self.dictIn:
                            self.dictIn["TOPIC_" +
                                        uttr["segment_info"]["topic"]] = index
                            index += 1
                        #General info2
                        #-BIO
                        if ("BIO_" + uttr['segment_info']['target_bio']
                            ) not in self.dictIn:
                            self.dictIn[
                                "BIO_" +
                                uttr['segment_info']['target_bio']] = index
                            index += 1

                        #BOW
                        if LSTMWithBOWTracker.isIgnoreUtterancesNotRelatedToMainTask:
                            if not (uttr['segment_info']['target_bio'] == "O"):
                                #-BOW
                                splitedtrans = self.__getRegurelisedBOW(
                                    uttr["transcript"])
                                for word in splitedtrans:
                                    if ("WORD_" + word) not in self.dictIn:
                                        self.dictIn["WORD_" + word] = index
                                        index += 1
            self.TOTALSIZEOFSENTENCEFeature = index
            f = open(self.FileNameofNumSentenceFeature, "wb")
            pickle.dump(self.TOTALSIZEOFSENTENCEFeature, f)
            f.close()
        elif self.isUseSentenceRepresentationInsteadofBOW:
            index = 0
            for i in range(0, LSTMWithBOWTracker.D2V_VECTORSIZE):
                self.dictIn[str(index) + "thElemPV"] = index
                index += 1
            index = 0
            for i in range(0, LSTMWithBOWTracker.D2V_VECTORSIZE):
                self.dictIn[str(index) + "thAvrWord"] = index
                index += 1
            assert self.D2V_VECTORSIZE == LSTMWithBOWTracker.D2V_VECTORSIZE, "D2V_VECTORSIZE is restrected to be same over the class"
        else:
            assert False, "Unexpected block"
        #--(sub input vector 3) Features M1s defined
        index = 0
        if self.isEnableToUseM1sFeature:
            rejisteredFeatures = self.__rejisterM1sInputFeatureLabel(
                self.tagsets, dataset)
            for rFeature in rejisteredFeatures:
                assert rFeature not in self.dictIn, rFeature + " already registered in input vector. Use different label name. "
                self.dictIn[rFeature] = index
                index += 1
            self.TOTALSIZEOFM1DEFINEDFeature = index
            f = open(self.FileNameofNumM1Feature, "wb")
            pickle.dump(self.TOTALSIZEOFM1DEFINEDFeature, f)
            f.close()

        print "inputSize:" + str(len(self.dictIn.keys()))
        assert self.dictIn[
            "CLASS_INFO"] == 0, "Unexpected index CLASS_INFO should has value 0"
        assert self.dictIn[
            "CLASS_Fort Siloso"] == 334, "Unexpected index CLASS_Fort Siloso should has value 334"
        assert self.dictIn[
            "CLASS_Yunnan"] == 1344, "Unexpected index CLASS_Yunnan should has value 1611"
        #--write
        fileObject = open('dictInput.pic', 'w')
        pickle.dump(self.dictIn, fileObject)
        fileObject.close()
        fileObject = open('dictOutput.pic', 'w')
        pickle.dump(self.dictOut, fileObject)
        fileObject.close()

        #Build RNN frame work
        print "Start learning Network"
        #Capability of network is: (30 hidden units can represents 1048576 relations) wherease (10 hidden units can represents 1024)
        #Same to Henderson (http://www.aclweb.org/anthology/W13-4073)?
        net = buildNetwork(len(self.dictIn.keys()),
                           numberOfHiddenUnit,
                           len(self.dictOut.keys()),
                           hiddenclass=LSTMLayer,
                           outclass=SigmoidLayer,
                           outputbias=False,
                           recurrent=True)

        #Train network
        #-convert training data into sequence of vector
        convDataset = []  #[call][uttr][input,targetvec]
        iuttr = 0
        convCall = []
        for elemDataset in dataset:
            for call in elemDataset:
                for (uttr, label) in call:
                    if self.isIgnoreUtterancesNotRelatedToMainTask:
                        if uttr['segment_info']['target_bio'] == "O":
                            continue
                    #-input
                    convInput = self._translateUtteranceIntoInputVector(
                        uttr, call)
                    #-output
                    convOutput = [0.0] * len(
                        self.dictOut.keys())  #Occured:+1, Not occured:0
                    if "frame_label" in label:
                        for slot in label["frame_label"].keys():
                            for value in label["frame_label"][slot]:
                                convOutput[self.dictOut[
                                    uttr["segment_info"]["topic"] + "_" +
                                    slot + "_" + value]] = 1
                    #-post proccess
                    if self.isSeparateDialogIntoSubDialog:
                        if uttr['segment_info']['target_bio'] == "B":
                            if len(convCall) > 0:
                                convDataset.append(convCall)
                            convCall = []
                    convCall.append([convInput, convOutput])
                    #print "Converted utterance" + str(iuttr)
                    iuttr += 1
                if not self.isSeparateDialogIntoSubDialog:
                    if len(convCall) > 0:
                        convDataset.append(convCall)
                    convCall = []
        #Online learning
        trainer = RPropMinusTrainer(net, weightdecay=weightdecayw)
        EPOCHS = EPOCHS_PER_CYCLE * CYCLES
        for i in xrange(CYCLES):
            #Shuffle order
            ds = SequentialDataSet(len(self.dictIn.keys()),
                                   len(self.dictOut.keys()))
            datInd = range(0, len(convDataset))
            random.shuffle(
                datInd
            )  #Backpropergation already implemeted data shuffling, however though RpropMinus don't.
            for ind in datInd:
                ds.newSequence()
                for convuttr in convDataset[ind]:
                    ds.addSample(convuttr[0], convuttr[1])
            #Evaluation and Train
            epoch = (i + 1) * EPOCHS_PER_CYCLE
            print "\r epoch {}/{} Error={}".format(
                epoch, EPOCHS, trainer.testOnData(dataset=ds))
            stdout.flush()
            trainer.trainOnDataset(dataset=ds, epochs=EPOCHS_PER_CYCLE)
            NetworkWriter.writeToFile(
                trainer.module, "LSTM_" + "Epoche" + str(i + 1) + ".rnnw")
            NetworkWriter.writeToFile(trainer.module, "LSTM.rnnw")
from pybrain.supervised import RPropMinusTrainer
from sys import stdout


print 'Starting to train neural network. . .'
trainer = RPropMinusTrainer(net, dataset=ds)
train_errors = []  # save errors for plotting later
EPOCHS_PER_CYCLE = 2
#CYCLES = 200
CYCLES = 100
EPOCHS = EPOCHS_PER_CYCLE * CYCLES
print 'Entering loop. . .'
for i in xrange(CYCLES):
    # Does the training
    trainer.trainEpochs(EPOCHS_PER_CYCLE)
    train_errors.append(trainer.testOnData())
    epoch = (i + 1) * EPOCHS_PER_CYCLE
    print 'i: ', i
    print ('\r epoch {}/{}'.format(epoch, EPOCHS))

    stdout.flush()
print 'Exit loop'
print ''

print 'final error =', train_errors[-1]

# Plot the errors (note that in this simple toy example,
# we are testing and training on the same dataset, which
# is of course not what you'd do for a real project!):

import matplotlib.pyplot as plt
    def learn(self,pathdataset=["dstc4_train"], Pathdataroot="data",numberOfHiddenUnit=20, EPOCHS_PER_CYCLE = 10, CYCLES = 40,weightdecayw=0.01):
        print "Start learning LSTM, and make dictionary file"
        #Construct dictionary: variable name -> corresponding index of element in i/o vector
        print "Star make dictionary: variable name -> corresponding index of element in i/o vector"
        self.dictOut={}#"TOPIC_SLOT_VALUE" -> corresponding index of element
        self.dictIn={}#"SPEAKER_{val}"or"TOPIC_{val}","WORD_{word}" "BIO_{BIO}", "CLASS_{slot,value}", ""{defined label}-> corresponding  index of element
        #-target vector dictionary 
        index=0
        totalNumSlot=0
        for topic in self.tagsets.keys():
            for slot in self.tagsets[topic].keys():
                totalNumSlot+=1
                for value in self.tagsets[topic][slot]:
                    self.dictOut[topic+"_"+slot+"_"+value]=index
                    index+=1
        print "totalNumSlot:" + str(totalNumSlot)
        print "outputSize:"+str(len(self.dictOut.keys()))
        #-input dictionry
        dataset=[]
        for pathdat in pathdataset:
            dataset.append(dataset_walker.dataset_walker(pathdat,dataroot=Pathdataroot,labels=False))
        #--(sub input vector 1) Class features i.e., Slot and value ratio (Similar to base line)
        index=0
        for topic in self.tagsets.keys():
            for slot in self.tagsets[topic].keys():
                if ("CLASS_"+ slot) not in self.dictIn:
                    self.dictIn["CLASS_"+slot]=index
                    index+=1
                for value in self.tagsets[topic][slot]:
                    if ("CLASS_"+ value) not in self.dictIn:
                        self.dictIn["CLASS_"+value]=index
                        index+=1
        self.TOTALSIZEOFCLASSFeature=index
        f=open(self.FileNameofNumClassFeature,"wb")
        pickle.dump(self.TOTALSIZEOFCLASSFeature,f)
        f.close()
        #--(sub input vector 2) Sentence features
        if not self.isUseSentenceRepresentationInsteadofBOW:
            index=0
            for elemDataset in dataset:
                for call in elemDataset:
                    for (uttr,_) in call:
                        #General info1 (CLASS; this feature must be rejistered at first)
                        if ("SPEAKER_"+uttr["speaker"]) not in self.dictIn:
                            self.dictIn["SPEAKER_"+uttr["speaker"]]=index
                            index+=1 
                        if ("TOPIC_"+uttr["segment_info"]["topic"]) not in self.dictIn:
                            self.dictIn["TOPIC_"+uttr["segment_info"]["topic"]]=index
                            index+=1 
                        #General info2
                        #-BIO
                        if ("BIO_"+uttr['segment_info']['target_bio']) not in self.dictIn:
                            self.dictIn["BIO_"+uttr['segment_info']['target_bio']]=index
                            index+=1
        
                        #BOW
                        if LSTMWithBOWTracker.isIgnoreUtterancesNotRelatedToMainTask:
                            if not (uttr['segment_info']['target_bio'] == "O"):
                                #-BOW
                                splitedtrans=self.__getRegurelisedBOW(uttr["transcript"])
                                for word in splitedtrans:
                                    if ("WORD_"+word) not in self.dictIn:
                                        self.dictIn["WORD_"+word]=index
                                        index+=1
            self.TOTALSIZEOFSENTENCEFeature=index
            f=open(self.FileNameofNumSentenceFeature,"wb")
            pickle.dump(self.TOTALSIZEOFSENTENCEFeature,f)
            f.close()
        elif self.isUseSentenceRepresentationInsteadofBOW:
            index=0
            for i in range(0,LSTMWithBOWTracker.D2V_VECTORSIZE):
                self.dictIn[str(index)+"thElemPV"]=index
                index+=1
            index=0
            for i in range(0,LSTMWithBOWTracker.D2V_VECTORSIZE):
                self.dictIn[str(index)+"thAvrWord"]=index
                index+=1
            assert self.D2V_VECTORSIZE == LSTMWithBOWTracker.D2V_VECTORSIZE, "D2V_VECTORSIZE is restrected to be same over the class"
        else:
            assert False, "Unexpected block" 
        #--(sub input vector 3) Features M1s defined
        index=0
        if self.isEnableToUseM1sFeature:
            rejisteredFeatures=self.__rejisterM1sInputFeatureLabel(self.tagsets,dataset)
            for rFeature in rejisteredFeatures:
                assert rFeature not in self.dictIn, rFeature +" already registered in input vector. Use different label name. "
                self.dictIn[rFeature]=index
                index+=1
            self.TOTALSIZEOFM1DEFINEDFeature=index
            f=open(self.FileNameofNumM1Feature,"wb")
            pickle.dump(self.TOTALSIZEOFM1DEFINEDFeature,f)
            f.close()

        print "inputSize:"+str(len(self.dictIn.keys()))
        assert self.dictIn["CLASS_INFO"] == 0, "Unexpected index CLASS_INFO should has value 0"
        assert self.dictIn["CLASS_Fort Siloso"] == 334, "Unexpected index CLASS_Fort Siloso should has value 334"
        assert self.dictIn["CLASS_Yunnan"] == 1344, "Unexpected index CLASS_Yunnan should has value 1611"
        #--write 
        fileObject = open('dictInput.pic', 'w')
        pickle.dump(self.dictIn, fileObject)
        fileObject.close()
        fileObject = open('dictOutput.pic', 'w')
        pickle.dump(self.dictOut, fileObject)
        fileObject.close()
        
        #Build RNN frame work
        print "Start learning Network"
        #Capability of network is: (30 hidden units can represents 1048576 relations) wherease (10 hidden units can represents 1024)
        #Same to Henderson (http://www.aclweb.org/anthology/W13-4073)?
        net = buildNetwork(len(self.dictIn.keys()), numberOfHiddenUnit, len(self.dictOut.keys()), hiddenclass=LSTMLayer, outclass=SigmoidLayer, outputbias=False, recurrent=True)
        
        #Train network
        #-convert training data into sequence of vector 
        convDataset=[]#[call][uttr][input,targetvec]
        iuttr=0
        convCall=[]
        for elemDataset in dataset:
            for call in elemDataset:
                for (uttr,label) in call:
                    if self.isIgnoreUtterancesNotRelatedToMainTask:
                        if uttr['segment_info']['target_bio'] == "O":
                            continue
                    #-input
                    convInput=self._translateUtteranceIntoInputVector(uttr,call)
                    #-output
                    convOutput=[0.0]*len(self.dictOut.keys())#Occured:+1, Not occured:0
                    if "frame_label" in label:
                        for slot in label["frame_label"].keys():
                            for value in label["frame_label"][slot]:
                                convOutput[self.dictOut[uttr["segment_info"]["topic"]+"_"+slot+"_"+value]]=1
                    #-post proccess
                    if self.isSeparateDialogIntoSubDialog:
                        if uttr['segment_info']['target_bio'] == "B":
                            if len(convCall) > 0:
                                convDataset.append(convCall)
                            convCall=[]
                    convCall.append([convInput,convOutput])
                    #print "Converted utterance" + str(iuttr)
                    iuttr+=1
                if not self.isSeparateDialogIntoSubDialog:
                    if len(convCall) > 0:
                        convDataset.append(convCall)
                    convCall=[]
        #Online learning
        trainer = RPropMinusTrainer(net,weightdecay=weightdecayw)
        EPOCHS = EPOCHS_PER_CYCLE * CYCLES
        for i in xrange(CYCLES):
            #Shuffle order
            ds = SequentialDataSet(len(self.dictIn.keys()),len(self.dictOut.keys()))
            datInd=range(0,len(convDataset))
            random.shuffle(datInd)#Backpropergation already implemeted data shuffling, however though RpropMinus don't. 
            for ind in datInd:
                ds.newSequence()
                for convuttr in convDataset[ind]:
                    ds.addSample(convuttr[0],convuttr[1])
            #Evaluation and Train
            epoch = (i+1) * EPOCHS_PER_CYCLE
            print "\r epoch {}/{} Error={}".format(epoch, EPOCHS,trainer.testOnData(dataset=ds))
            stdout.flush()
            trainer.trainOnDataset(dataset=ds,epochs=EPOCHS_PER_CYCLE)
            NetworkWriter.writeToFile(trainer.module, "LSTM_"+"Epoche"+str(i+1)+".rnnw")
            NetworkWriter.writeToFile(trainer.module, "LSTM.rnnw")