Exemple #1
0
    def train(self, params):
        """
    Train LSTM network on buffered dataset history
    After training, run LSTM on history[:-1] to get the state correct
    :param params:
    :return:
    """
        if params['reset_every_training']:
            n = params['encoding_num']
            self.net = buildNetwork(n,
                                    params['num_cells'],
                                    n,
                                    hiddenclass=LSTMLayer,
                                    bias=True,
                                    outputbias=params['output_bias'],
                                    recurrent=True)
            self.net.reset()

        # prepare training dataset
        ds = SequentialDataSet(params['encoding_num'], params['encoding_num'])
        history = self.window(self.history, params)
        resets = self.window(self.resets, params)

        for i in xrange(1, len(history)):
            if not resets[i - 1]:
                ds.addSample(self.encoder.encode(history[i - 1]),
                             self.encoder.encode(history[i]))
            if resets[i]:
                ds.newSequence()

        if params['num_epochs'] > 1:
            trainer = RPropMinusTrainer(self.net,
                                        dataset=ds,
                                        verbose=params['verbosity'] > 0)

            if len(history) > 1:
                trainer.trainEpochs(params['num_epochs'])

            # run network on buffered dataset after training to get the state right
            self.net.reset()
            for i in xrange(len(history) - 1):
                symbol = history[i]
                output = self.net.activate(self.encoder.encode(symbol))
                self.encoder.classify(output, num=params['num_predictions'])

                if resets[i]:
                    self.net.reset()
        else:
            self.trainer.setData(ds)
            self.trainer.train()

            # run network on buffered dataset after training to get the state right
            self.net.reset()
            for i in xrange(len(history) - 1):
                symbol = history[i]
                output = self.net.activate(self.encoder.encode(symbol))
                self.encoder.classify(output, num=params['num_predictions'])

                if resets[i]:
                    self.net.reset()
Exemple #2
0
def trainedLSTMNN2():
    """
    n = RecurrentNetwork()

    inp = LinearLayer(100, name = 'input')
    hid = LSTMLayer(30, name='hidden')
    out = LinearLayer(1, name='output')

    #add modules
    n.addOutputModule(out)
    n.addInputModule(inp)
    n.addModule(hid)

    #add connections
    n.addConnection(FullConnection(inp, hid))
    n.addConnection(FullConnection(hid, out))

    n.addRecurrentConnection(FullConnection(hid, hid))
    n.sortModules()
    """
    n = buildSimpleLSTMNetwork()

    print "Network created"
    d = load1OrderDataSet()
    print "Data loaded"
    t = RPropMinusTrainer(n, dataset=d, verbose=True)
    t.trainUntilConvergence()

    exportANN(n)

    return n
Exemple #3
0
def trainLSTMnet(net, numTrainSequence, seedSeq=1):
    np.random.seed(seedSeq)
    for _ in xrange(numTrainSequence):
        (ds, in_seq, out_seq) = getReberDS(maxLength)
        print("train seq", _, sequenceToWord(in_seq))
        trainer = RPropMinusTrainer(net, dataset=ds)
        trainer.trainEpochs(rptPerSeq)

    return net
 def train(self, ds, epochs_per_cycle, cycles):
     trainer = RPropMinusTrainer(self.n, dataset=ds)
     train_errors = []
     for i in xrange(cycles):
         trainer.trainEpochs(epochs_per_cycle)
         train_errors.append(trainer.testOnData())
         epoch = (i + 1) * epochs_per_cycle
         print("\r epoch {}/{}".format(epoch, epochs_per_cycle * cycles))
         sys.stdout.flush()
     print("Final Error: " + str(train_errors[-1]))
     return train_errors[-1]
Exemple #5
0
def ltsmXY(tin, tout, title='ltsm.png'):

    #datain = zip(tin[:-3], tin[1:-2], tin[2:-1])
    #datain = zip(tin[:-8], tin[1:-7], tin[2:-6], tin[3:-5], tin[4:-4], tin[5:-3],tin[6:-2], tin[7:-1])
    #datain = zip(tin[:-12], tin[1:-11], tin[2:-10], tin[3:-9], tin[4:-8], tin[5:-7],tin[6:-6], tin[7:-5], tin[8:-4], tin[9:-3], tin[10:-2], tin[11:-1])
    datain = zip(tin[:-16], tin[1:-15], tin[2:-14], tin[3:-13], tin[4:-12], tin[5:-11],tin[6:-10], tin[7:-9], tin[8:-8], tin[9:-7], tin[10:-6], tin[11:-5], tin[12:-4], tin[13:-3], tin[14:-2], tin[15:-1])

    #dataout = tout[3:]
    #dataout = tout[8:]
    #dataout = tout[12:]
    dataout = tout[16:]

    #ds = SequentialDataSet(3, 1)
    #ds = SequentialDataSet(8, 1)
    #ds = SequentialDataSet(12, 1)
    ds = SequentialDataSet(16, 1)

    for x, y in zip(datain[:len(datain)/2], dataout[:len(datain)/2]):
        ds.addSample(x, y)


    # add layers until overfitting the training data
    #net = buildNetwork(3,5,1,hiddenclass=LSTMLayer, outputbias=False, recurrent=True)
    #net = buildNetwork(8, 8, 1, hiddenclass=LSTMLayer, outputbias=False, recurrent=True)
    #net = buildNetwork(12, 20, 1, hiddenclass=LSTMLayer, outputbias=False, recurrent=True)
    net = buildNetwork(16, 20, 1, hiddenclass=LSTMLayer, outputbias=False, recurrent=True)

    
    trainer = RPropMinusTrainer(net, dataset=ds)
    train_errors = []
    EPOCHS_PER_CYCLE = 5
    CYCLES = 100
    EPOCHS = EPOCHS_PER_CYCLE * CYCLES
    for i in xrange(CYCLES):
        trainer.trainEpochs(EPOCHS_PER_CYCLE)
        train_errors.append(trainer.testOnData())
        epoch = (i+1) * EPOCHS_PER_CYCLE
        #print "\r epoch {}/{}".format(epoch, EPOCHS)
        stdout.flush()

    print "final error =", train_errors[-1]

    pred_out = []
    for i in range(len(datain)):
        pred_out.append(net.activate(datain[i]))
    
    fig = plt.figure()
    #tout[16:].plot(ax=ax, title='Occupancy')
    plt.plot(tout[16:].index, tout[16:], 'y', linewidth=1.5)
    plt.plot(tout[16:].index, pred_out, 'b+')
    plt.legend(['Occupancy', 'LTSM'])
    fig.tight_layout()
    plt.savefig(title,inches='tight')
Exemple #6
0
 def __init__(self, indim, outdim):
     # construct LSTM network - note the missing output bias
     rnn = buildNetwork(indim,
                        5,
                        outdim,
                        hiddenclass=LSTMLayer,
                        outclass=SoftmaxLayer,
                        outputbias=False,
                        recurrent=True)
     rnn2 = buildNetwork
     # define a training method
     trainer = RPropMinusTrainer(rnn)
Exemple #7
0
    def train(self, params, verbose=False):

        if params['reset_every_training']:
            if verbose:
                print 'create lstm network'

            random.seed(6)
            if params['output_encoding'] == None:
                self.net = buildNetwork(self.nDimInput,
                                        params['num_cells'],
                                        self.nDimOutput,
                                        hiddenclass=LSTMLayer,
                                        bias=True,
                                        outputbias=True,
                                        recurrent=True)
            elif params['output_encoding'] == 'likelihood':
                self.net = buildNetwork(self.nDimInput,
                                        params['num_cells'],
                                        self.nDimOutput,
                                        hiddenclass=LSTMLayer,
                                        bias=True,
                                        outclass=SigmoidLayer,
                                        recurrent=True)

        self.net.reset()

        ds = SequentialDataSet(self.nDimInput, self.nDimOutput)
        networkInput = self.window(self.networkInput, params)
        targetPrediction = self.window(self.targetPrediction, params)

        # prepare a training data-set using the history
        for i in xrange(len(networkInput)):
            ds.addSample(self.inputEncoder.encode(networkInput[i]),
                         self.outputEncoder.encode(targetPrediction[i]))

        if params['num_epochs'] > 1:
            trainer = RPropMinusTrainer(self.net, dataset=ds, verbose=verbose)

            if verbose:
                print " train LSTM on ", len(
                    ds), " records for ", params['num_epochs'], " epochs "

            if len(networkInput) > 1:
                trainer.trainEpochs(params['num_epochs'])

        else:
            self.trainer.setData(ds)
            self.trainer.train()

        # run through the training dataset to get the lstm network state right
        self.net.reset()
        for i in xrange(len(networkInput)):
            self.net.activate(ds.getSample(i)[0])
Exemple #8
0
def ltsm(data):
    from pybrain.datasets import SequentialDataSet
    from itertools import cycle
    
    datain = zip(data[:-6], data[1:-5], data[2:-4], data[3:-3], data[4:-2], data[5:-1])
    dataout = data[6:]
    ds = SequentialDataSet(6, 1)
    for x, y in zip(datain, dataout):
        ds.addSample(x, y)

    from pybrain.tools.shortcuts import buildNetwork
    from pybrain.structure.modules import LSTMLayer

    net = buildNetwork(6, 7, 1, hiddenclass=LSTMLayer, outputbias=False, recurrent=True)

    from pybrain.supervised import RPropMinusTrainer
    from sys import stdout
    
    trainer = RPropMinusTrainer(net, dataset=ds)
    train_errors = []
    EPOCHS_PER_CYCLE = 5
    CYCLES = 100
    EPOCHS = EPOCHS_PER_CYCLE * CYCLES
    for i in xrange(CYCLES):
        trainer.trainEpochs(EPOCHS_PER_CYCLE)
        train_errors.append(trainer.testOnData())
        epoch = (i+1) * EPOCHS_PER_CYCLE
        #print "\r epoch {}/{}".format(epoch, EPOCHS)
        stdout.flush()

    print "final error =", train_errors[-1]

    '''
    plt.figure()
    plt.plot(range(0, EPOCHS, EPOCHS_PER_CYCLE), train_errors)
    plt.xlabel('epoch')
    plt.ylabel('error')
    plt.show()
    '''

    test_error = 0.
    cnt = 0
    for sample, target in ds.getSequenceIterator(0):
        #print "sample = ",  sample
        #print "predicted next sample = %4.1f" % net.activate(sample)
        #print "actual next sample = %4.1f" % target
        test_error += abs(net.activate(sample) - target)
        cnt += 1
    test_error /= cnt 
    print "test (train) error =", test_error
    def handle(self, *args, **options):
        ticker = args[0]
        print("****** STARTING PREDICTOR " + ticker + " ******* ")
        prices = Price.objects.filter(
            symbol=ticker).order_by('-created_on').values_list('price',
                                                               flat=True)
        data = normalization(list(prices[0:NUM_MINUTES_BACK].reverse()))
        data = [int(x * MULT_FACTOR) for x in data]
        print(data)

        ds = SupervisedDataSet(5, 1)
        try:
            for i, val in enumerate(data):
                DS.addSample((data[i], data[i + 1], data[i + 2], data[i + 3],
                              data[i + 4]), (data[i + 5], ))
        except Exception:
            pass

        net = buildNetwork(5,
                           40,
                           1,
                           hiddenclass=LSTMLayer,
                           outputbias=False,
                           recurrent=True)

        trainer = RPropMinusTrainer(net, dataset=ds)
        train_errors = []  # save errors for plotting later
        EPOCHS_PER_CYCLE = 5
        CYCLES = 100
        EPOCHS = EPOCHS_PER_CYCLE * CYCLES
        for i in xrange(CYCLES):
            trainer.trainEpochs(EPOCHS_PER_CYCLE)
            train_errors.append(trainer.testOnData())
            epoch = (i + 1) * EPOCHS_PER_CYCLE
            print("\r epoch {}/{}".format(epoch, EPOCHS), end="")
            stdout.flush()

        print()
        print("final error =", train_errors[-1])

        for sample, target in ds.getSequenceIterator(0):
            show_pred_sample = net.activate(sample) / MULT_FACTOR
            show_sample = sample / MULT_FACTOR
            show_target = target / MULT_FACTOR
            show_diff = show_pred_sample - show_target
            show_diff_pct = 100 * show_diff / show_pred_sample
            print("{} => {}, act {}. ({}%)".format(
                show_sample[0], round(show_pred_sample[0], 3), show_target[0],
                int(round(show_diff_pct[0], 0))))
Exemple #10
0
def train(context, trainX, trainY):
    ds = SequentialDataSet(4, 1)
    for dataX, dataY in zip(trainX, trainY):
        ds.addSample(dataX, dataY)
    net = buildNetwork(4,
                       1,
                       1,
                       hiddenclass=LSTMLayer,
                       outputbias=False,
                       recurrent=True)
    trainer = RPropMinusTrainer(net, dataset=ds)
    EPOCHS_PER_CYCLE = 5
    CYCLES = 5
    for i in range(CYCLES):
        trainer.trainEpochs(EPOCHS_PER_CYCLE)
    return net, trainer.testOnData()
Exemple #11
0
 def updateValue(self,laststate, state, lastaction,lastreward,lbda=1):
     if self.t < self.horizon: return
     qvalues = self.getValues()
     if qvalues is None: return
     qvalue = qvalues[lastaction]
     next_qvalues = self.getTargetValues()
     max_q_index = np.argmax(next_qvalues)
     maxnext = next_qvalues[max_q_index]
     if self.nn:
         update = (lastreward + (self.gamma * maxnext))
         qvalues[lastaction] = update
         from pybrain.supervised import RPropMinusTrainer
         trainer = RPropMinusTrainer(self.nn)
         dataset = Sequential()
         trainer.trainOnDataset(dataset)
     else:
         self.Q[laststate][lastaction]=qvalue + self.alpha * lbda * (lastreward + self.gamma * maxnext - qvalue)
Exemple #12
0
def train(ds, net):
    # Train the network
    trainer = RPropMinusTrainer(net, dataset=ds)
    train_errors = []  # save errors for plotting later
    EPOCHS_PER_CYCLE = 5
    CYCLES = 100
    EPOCHS = EPOCHS_PER_CYCLE * CYCLES
    for i in xrange(CYCLES):
        trainer.trainEpochs(EPOCHS_PER_CYCLE)
        error = trainer.testOnData()
        train_errors.append(error)
        epoch = (i + 1) * EPOCHS_PER_CYCLE
        print("\r epoch {}/{}".format(epoch, EPOCHS))
        stdout.flush()

    # print("final error =", train_errors[-1])

    return train_errors, EPOCHS, EPOCHS_PER_CYCLE
Exemple #13
0
def main():
    generated_data = [0 for i in range(10000)]
    rate, data = get_data_from_wav("../../data/natabhairavi_violin.wav")
    data = data[1000:190000]
    print("Got wav")
    ds = SequentialDataSet(1, 1)
    for sample, next_sample in zip(data, cycle(data[1:])):
        ds.addSample(sample, next_sample)

    net = buildNetwork(1,
                       5,
                       1,
                       hiddenclass=LSTMLayer,
                       outputbias=False,
                       recurrent=True)

    trainer = RPropMinusTrainer(net, dataset=ds)
    train_errors = []  # save errors for plotting later
    EPOCHS_PER_CYCLE = 5
    CYCLES = 10
    EPOCHS = EPOCHS_PER_CYCLE * CYCLES
    for i in xrange(CYCLES):
        trainer.trainEpochs(EPOCHS_PER_CYCLE)
        train_errors.append(trainer.testOnData())
        epoch = (i + 1) * EPOCHS_PER_CYCLE
        print("\r epoch {}/{}".format(epoch, EPOCHS), end="")
        stdout.flush()

    # predict new values
    old_sample = [100]

    for i in xrange(500000):
        new_sample = net.activate(old_sample)
        old_sample = new_sample
        generated_data[i] = new_sample[0]
        print(new_sample)

    wavfile.write("../../output/test.wav", rate, np.array(generated_data))
Exemple #14
0
    def train(self, params):
        n = params['encoding_num']
        net = buildNetwork(n,
                           params['num_cells'],
                           n,
                           hiddenclass=LSTMLayer,
                           bias=True,
                           outputbias=params['output_bias'],
                           recurrent=True)
        net.reset()

        ds = SequentialDataSet(n, n)
        trainer = RPropMinusTrainer(net, dataset=ds)

        history = self.window(self.history, params)
        resets = self.window(self.resets, params)

        for i in xrange(1, len(history)):
            if not resets[i - 1]:
                ds.addSample(self.encoder.encode(history[i - 1]),
                             self.encoder.encode(history[i]))
            if resets[i]:
                ds.newSequence()

        if len(history) > 1:
            trainer.trainEpochs(params['num_epochs'])
            net.reset()

        for i in xrange(len(history) - 1):
            symbol = history[i]
            output = net.activate(self.encoder.encode(symbol))
            predictions = self.encoder.classify(output,
                                                num=params['num_predictions'])

            if resets[i]:
                net.reset()

        return net
def main():
    config = MU.ConfigReader('configs/%s' % sys.argv[1])
    config.read()

    logDir = '%s-%s' % (__file__, sys.argv[1])
    os.mkdir(logDir)

    with open('%s/config.txt' % logDir, 'w') as outfile:
        json.dump(config.getConfigDict(), outfile, indent=4)

    dr = MU.DataReader(config['input_tsv_path'])
    data = dr.read(config['interested_columns'])

    inLabels = config['input_columns']

    outLabels = config['output_columns']

    tds, vds = seqDataSetPair(data, inLabels, outLabels,
                              config['seq_label_column'], config['test_seqno'],
                              config['validation_seqno'])

    inScale = config.getDataScale(inLabels)
    outScale = config.getDataScale(outLabels)

    normalizeDataSet(tds, ins=inScale, outs=outScale)
    normalizeDataSet(vds, ins=inScale, outs=outScale)

    trainData = tds
    validationData = vds

    fdim = tds.indim / 5 + 5
    xdim = tds.outdim * 2

    rnn = buildNetwork(tds.indim,
                       fdim,
                       fdim,
                       fdim,
                       xdim,
                       tds.outdim,
                       hiddenclass=SigmoidLayer,
                       recurrent=True)

    rnn.addRecurrentConnection(FullConnection(rnn['hidden0'], rnn['hidden0']))
    rnn.addRecurrentConnection(FullConnection(rnn['hidden1'], rnn['hidden1']))
    rnn.addRecurrentConnection(FullConnection(rnn['hidden2'], rnn['hidden2']))
    rnn.sortModules()

    trainer = RPropMinusTrainer(rnn,
                                dataset=trainData,
                                batchlearning=True,
                                verbose=True,
                                weightdecay=0.005)

    errTime = []
    errTrain = []
    errValidation = []
    epochNo = 0
    while True:

        for i in range(config['epochs_per_update']):
            trainer.train()

        epochNo += config['epochs_per_update']
        NetworkWriter.writeToFile(rnn, '%s/Epoch_%d.xml' % (logDir, epochNo))
        NetworkWriter.writeToFile(rnn, '%s/Latest.xml' % logDir)

        tOut = ModuleValidator.calculateModuleOutput(rnn, trainData)
        vOut = ModuleValidator.calculateModuleOutput(rnn, validationData)

        tScaler = config.getDataScale([config['output_scalar_label']])[0][1]
        tAvgErr = NP.sqrt(NP.mean((trainData['target'] - tOut)**2)) * tScaler
        vAvgErr = NP.sqrt(NP.mean(
            (validationData['target'] - vOut)**2)) * tScaler

        tMaxErr = NP.max(NP.abs(trainData['target'] - tOut)) * tScaler
        vMaxErr = NP.max(NP.abs(validationData['target'] - vOut)) * tScaler

        errTrain.append(tAvgErr)
        errValidation.append(vAvgErr)
        errTime.append(epochNo)

        print "Training error:      avg %5.3f       max %5.3f" % (tAvgErr,
                                                                  tMaxErr)
        print "Validation error:    avg %5.3f       max %5.3f" % (vAvgErr,
                                                                  vMaxErr)
        print "------------------------------------------------------------------------------"

        if (config['visualize_on_training'] == 'yes'):

            PL.figure(1)
            PL.ioff()
            visulizeDataSet(rnn, trainData, 0,
                            config['visualized_columns']['input'],
                            config['visualized_columns']['output'])
            PL.ion()
            PL.draw()

            PL.figure(2)
            PL.ioff()
            visulizeDataSet(rnn, validationData, 0,
                            config['visualized_columns']['input'],
                            config['visualized_columns']['output'])
            PL.ion()
            PL.draw()

            p = PL.figure(3)
            PL.ioff()
            p.clear()
            PL.plot(errTime, errTrain, label='Train')
            PL.plot(errTime, errValidation, label='Validation')
            PL.legend()
            PL.ion()
            PL.draw()
def rnn():
    # load dataframe from csv file
    df = pi.load_data_frame('../../data/NABIL.csv')
    # column name to match with indicator calculating modules
    # TODO: resolve issue with column name
    df.columns = [
        'Transactions', 'Traded_Shares', 'Traded_Amount', 'High', 'Low',
        'Close'
    ]

    data = df.Close.values
    # TODO: write min_max normalization
    # normalization
    # cp = dataframe.pop(' Close Price')
    # x = cp.values
    temp = np.array(data).reshape(len(data), 1)
    min_max_scaler = preprocessing.MinMaxScaler()
    data = min_max_scaler.fit_transform(temp)
    # dataframe[' Close Price'] = x_scaled

    # prepate sequential dataset for pyBrain rnn network
    ds = SequentialDataSet(1, 1)
    for sample, next_sample in zip(data, cycle(data[1:])):
        ds.addSample(sample, next_sample)

    # build rnn network with LSTM layer
    # if saved network is available
    if (os.path.isfile('random.xml')):
        net = NetworkReader.readFrom('network.xml')
    else:
        net = buildNetwork(1,
                           20,
                           1,
                           hiddenclass=LSTMLayer,
                           outputbias=False,
                           recurrent=True)

    # build trainer
    trainer = RPropMinusTrainer(net, dataset=ds, verbose=True)
    train_errors = []  # save errors for plotting later
    EPOCHS_PER_CYCLE = 5
    CYCLES = 5
    EPOCHS = EPOCHS_PER_CYCLE * CYCLES
    for i in range(CYCLES):
        trainer.trainEpochs(EPOCHS_PER_CYCLE)
        train_errors.append(trainer.testOnData())
        epoch = (i + 1) * EPOCHS_PER_CYCLE
        print("\r epoch {}/{}".format(epoch, EPOCHS), end="")
        sys.stdout.flush()
    # save the network
    NetworkWriter.writeToFile(net, 'network.xml')

    print()
    print("final error =", train_errors[-1])

    predicted = []
    for dat in data:
        predicted.append(net.activate(dat)[0])
    # data = min_max_scaler.inverse_transform(data)
    # predicted = min_max_scaler.inverse_transform(predicted)
    predicted_array = min_max_scaler.inverse_transform(
        np.array(predicted).reshape(-1, 1))
    print(predicted_array[-1])
    plt.figure()

    legend_actual, = plt.plot(range(0, len(data)),
                              temp,
                              label='actual',
                              linestyle='--',
                              linewidth=2,
                              c='blue')
    legend_predicted, = plt.plot(range(0, len(data)),
                                 predicted_array,
                                 label='predicted',
                                 linewidth=1.5,
                                 c='red')
    plt.legend(handles=[legend_actual, legend_predicted])
    plt.savefig('error.png')
    plt.show()
Exemple #17
0
for ts in train_data:
    ds.newSequence()
    # Add obsv and next
    for t_1, t_2 in zip(ts, ts[1:]):
        ds.addSample(t_1, t_2)

# RNN with 1-5-1 architecture: 1 input, 5 hidden, 1 output layer
rnn = buildNetwork(1,
                   5,
                   1,
                   hiddenclass=LSTMLayer,
                   outputbias=False,
                   recurrent=True)

# Initialize trainer
trainer = RPropMinusTrainer(rnn, dataset=ds)

# Predefine iterations: epochs & cycles
EPOCHS_PER_CYCLE = 5
CYCLES = 100
EPOCHS = EPOCHS_PER_CYCLE * CYCLES

# Training loop
for i in xrange(CYCLES):
    trainer.trainEpochs(EPOCHS_PER_CYCLE)
    error = trainer.testOnData()
    epoch = (i + 1) * EPOCHS_PER_CYCLE
    print("\r Epoch: {}/{} Error: {}".format(epoch, EPOCHS, error), end="")
    stdout.flush()

# Save model
Exemple #18
0
trndata = generateNoisySines(50, 40)
trndata._convertToOneOfMany(bounds=[0., 1.])
tstdata = generateNoisySines(50, 20)
tstdata._convertToOneOfMany(bounds=[0., 1.])

# construct LSTM network - note the missing output bias
rnn = buildNetwork(trndata.indim,
                   5,
                   trndata.outdim,
                   hiddenclass=LSTMLayer,
                   outclass=SoftmaxLayer,
                   outputbias=False,
                   recurrent=True)

# define a training method
trainer = RPropMinusTrainer(rnn, dataset=trndata, verbose=True)
# instead, you may also try
##trainer = BackpropTrainer( rnn, dataset=trndata, verbose=True, momentum=0.9, learningrate=0.00001 )

# carry out the training
for i in xrange(100):
    trainer.trainEpochs(2)
    trnresult = 100. * (1.0 - testOnSequenceData(rnn, trndata))
    tstresult = 100. * (1.0 - testOnSequenceData(rnn, tstdata))
    print "train error: %5.2f%%" % trnresult, ",  test error: %5.2f%%" % tstresult

# just for reference, plot the first 5 timeseries
plot(trndata['input'][0:250, :], '-o')
hold(True)
plot(trndata['target'][0:250, 0])
show()
    dataset_8.addSample(current_sample, next_sample)
for current_sample, next_sample in zip(training_data_9, cycle(training_data_9[1:])):
    dataset_9.addSample(current_sample, next_sample)
for current_sample, next_sample in zip(training_data_10, cycle(training_data_10[1:])):
    dataset_10.addSample(current_sample, next_sample)

for current_sample, next_sample in zip(testing_data, cycle(testing_data[1:])):
    dataset_bis.addSample(current_sample, next_sample)    



# Initializing the LSTM RNN: 23 nodes in the hidden layer
network = buildNetwork(1, 23, 1, hiddenclass=LSTMLayer, outputbias=False, recurrent=True)

# Training data
trainer = RPropMinusTrainer(network, dataset=dataset, delta0 = 0.01)
trainer_2 = RPropMinusTrainer(network, dataset=dataset_2, delta0 = 0.01)
trainer_3 = RPropMinusTrainer(network, dataset=dataset_3, delta0 = 0.01)
trainer_4 = RPropMinusTrainer(network, dataset=dataset_4, delta0 = 0.01)
trainer_5 = RPropMinusTrainer(network, dataset=dataset_5, delta0 = 0.01)
trainer_6 = RPropMinusTrainer(network, dataset=dataset_6, delta0 = 0.01)
trainer_8 = RPropMinusTrainer(network, dataset=dataset_8, delta0 = 0.01)
trainer_9 = RPropMinusTrainer(network, dataset=dataset_9, delta0 = 0.01)
trainer_10 = RPropMinusTrainer(network, dataset=dataset_10, delta0 = 0.01)


# Initiazlizing storage for the error curves
train_errors = [] 
train_errors_2 = [] 
train_errors_3 = [] 
train_errors_4 = [] 
    def learn(self,
              pathdataset=["dstc4_train"],
              Pathdataroot="data",
              numberOfHiddenUnit=20,
              EPOCHS_PER_CYCLE=10,
              CYCLES=40,
              weightdecayw=0.01):
        print "Start learning LSTM, and make dictionary file"
        #Construct dictionary: variable name -> corresponding index of element in i/o vector
        print "Star make dictionary: variable name -> corresponding index of element in i/o vector"
        self.dictOut = {
        }  #"TOPIC_SLOT_VALUE" -> corresponding index of element
        self.dictIn = {
        }  #"SPEAKER_{val}"or"TOPIC_{val}","WORD_{word}" "BIO_{BIO}", "CLASS_{slot,value}", ""{defined label}-> corresponding  index of element
        #-target vector dictionary
        index = 0
        totalNumSlot = 0
        for topic in self.tagsets.keys():
            for slot in self.tagsets[topic].keys():
                totalNumSlot += 1
                for value in self.tagsets[topic][slot]:
                    self.dictOut[topic + "_" + slot + "_" + value] = index
                    index += 1
        print "totalNumSlot:" + str(totalNumSlot)
        print "outputSize:" + str(len(self.dictOut.keys()))
        #-input dictionry
        dataset = []
        for pathdat in pathdataset:
            dataset.append(
                dataset_walker.dataset_walker(pathdat,
                                              dataroot=Pathdataroot,
                                              labels=False))
        #--(sub input vector 1) Class features i.e., Slot and value ratio (Similar to base line)
        index = 0
        for topic in self.tagsets.keys():
            for slot in self.tagsets[topic].keys():
                if ("CLASS_" + slot) not in self.dictIn:
                    self.dictIn["CLASS_" + slot] = index
                    index += 1
                for value in self.tagsets[topic][slot]:
                    if ("CLASS_" + value) not in self.dictIn:
                        self.dictIn["CLASS_" + value] = index
                        index += 1
        self.TOTALSIZEOFCLASSFeature = index
        f = open(self.FileNameofNumClassFeature, "wb")
        pickle.dump(self.TOTALSIZEOFCLASSFeature, f)
        f.close()
        #--(sub input vector 2) Sentence features
        if not self.isUseSentenceRepresentationInsteadofBOW:
            index = 0
            for elemDataset in dataset:
                for call in elemDataset:
                    for (uttr, _) in call:
                        #General info1 (CLASS; this feature must be rejistered at first)
                        if ("SPEAKER_" + uttr["speaker"]) not in self.dictIn:
                            self.dictIn["SPEAKER_" + uttr["speaker"]] = index
                            index += 1
                        if ("TOPIC_" + uttr["segment_info"]["topic"]
                            ) not in self.dictIn:
                            self.dictIn["TOPIC_" +
                                        uttr["segment_info"]["topic"]] = index
                            index += 1
                        #General info2
                        #-BIO
                        if ("BIO_" + uttr['segment_info']['target_bio']
                            ) not in self.dictIn:
                            self.dictIn[
                                "BIO_" +
                                uttr['segment_info']['target_bio']] = index
                            index += 1

                        #BOW
                        if LSTMWithBOWTracker.isIgnoreUtterancesNotRelatedToMainTask:
                            if not (uttr['segment_info']['target_bio'] == "O"):
                                #-BOW
                                splitedtrans = self.__getRegurelisedBOW(
                                    uttr["transcript"])
                                for word in splitedtrans:
                                    if ("WORD_" + word) not in self.dictIn:
                                        self.dictIn["WORD_" + word] = index
                                        index += 1
            self.TOTALSIZEOFSENTENCEFeature = index
            f = open(self.FileNameofNumSentenceFeature, "wb")
            pickle.dump(self.TOTALSIZEOFSENTENCEFeature, f)
            f.close()
        elif self.isUseSentenceRepresentationInsteadofBOW:
            index = 0
            for i in range(0, LSTMWithBOWTracker.D2V_VECTORSIZE):
                self.dictIn[str(index) + "thElemPV"] = index
                index += 1
            index = 0
            for i in range(0, LSTMWithBOWTracker.D2V_VECTORSIZE):
                self.dictIn[str(index) + "thAvrWord"] = index
                index += 1
            assert self.D2V_VECTORSIZE == LSTMWithBOWTracker.D2V_VECTORSIZE, "D2V_VECTORSIZE is restrected to be same over the class"
        else:
            assert False, "Unexpected block"
        #--(sub input vector 3) Features M1s defined
        index = 0
        if self.isEnableToUseM1sFeature:
            rejisteredFeatures = self.__rejisterM1sInputFeatureLabel(
                self.tagsets, dataset)
            for rFeature in rejisteredFeatures:
                assert rFeature not in self.dictIn, rFeature + " already registered in input vector. Use different label name. "
                self.dictIn[rFeature] = index
                index += 1
            self.TOTALSIZEOFM1DEFINEDFeature = index
            f = open(self.FileNameofNumM1Feature, "wb")
            pickle.dump(self.TOTALSIZEOFM1DEFINEDFeature, f)
            f.close()

        print "inputSize:" + str(len(self.dictIn.keys()))
        assert self.dictIn[
            "CLASS_INFO"] == 0, "Unexpected index CLASS_INFO should has value 0"
        assert self.dictIn[
            "CLASS_Fort Siloso"] == 334, "Unexpected index CLASS_Fort Siloso should has value 334"
        assert self.dictIn[
            "CLASS_Yunnan"] == 1344, "Unexpected index CLASS_Yunnan should has value 1611"
        #--write
        fileObject = open('dictInput.pic', 'w')
        pickle.dump(self.dictIn, fileObject)
        fileObject.close()
        fileObject = open('dictOutput.pic', 'w')
        pickle.dump(self.dictOut, fileObject)
        fileObject.close()

        #Build RNN frame work
        print "Start learning Network"
        #Capability of network is: (30 hidden units can represents 1048576 relations) wherease (10 hidden units can represents 1024)
        #Same to Henderson (http://www.aclweb.org/anthology/W13-4073)?
        net = buildNetwork(len(self.dictIn.keys()),
                           numberOfHiddenUnit,
                           len(self.dictOut.keys()),
                           hiddenclass=LSTMLayer,
                           outclass=SigmoidLayer,
                           outputbias=False,
                           recurrent=True)

        #Train network
        #-convert training data into sequence of vector
        convDataset = []  #[call][uttr][input,targetvec]
        iuttr = 0
        convCall = []
        for elemDataset in dataset:
            for call in elemDataset:
                for (uttr, label) in call:
                    if self.isIgnoreUtterancesNotRelatedToMainTask:
                        if uttr['segment_info']['target_bio'] == "O":
                            continue
                    #-input
                    convInput = self._translateUtteranceIntoInputVector(
                        uttr, call)
                    #-output
                    convOutput = [0.0] * len(
                        self.dictOut.keys())  #Occured:+1, Not occured:0
                    if "frame_label" in label:
                        for slot in label["frame_label"].keys():
                            for value in label["frame_label"][slot]:
                                convOutput[self.dictOut[
                                    uttr["segment_info"]["topic"] + "_" +
                                    slot + "_" + value]] = 1
                    #-post proccess
                    if self.isSeparateDialogIntoSubDialog:
                        if uttr['segment_info']['target_bio'] == "B":
                            if len(convCall) > 0:
                                convDataset.append(convCall)
                            convCall = []
                    convCall.append([convInput, convOutput])
                    #print "Converted utterance" + str(iuttr)
                    iuttr += 1
                if not self.isSeparateDialogIntoSubDialog:
                    if len(convCall) > 0:
                        convDataset.append(convCall)
                    convCall = []
        #Online learning
        trainer = RPropMinusTrainer(net, weightdecay=weightdecayw)
        EPOCHS = EPOCHS_PER_CYCLE * CYCLES
        for i in xrange(CYCLES):
            #Shuffle order
            ds = SequentialDataSet(len(self.dictIn.keys()),
                                   len(self.dictOut.keys()))
            datInd = range(0, len(convDataset))
            random.shuffle(
                datInd
            )  #Backpropergation already implemeted data shuffling, however though RpropMinus don't.
            for ind in datInd:
                ds.newSequence()
                for convuttr in convDataset[ind]:
                    ds.addSample(convuttr[0], convuttr[1])
            #Evaluation and Train
            epoch = (i + 1) * EPOCHS_PER_CYCLE
            print "\r epoch {}/{} Error={}".format(
                epoch, EPOCHS, trainer.testOnData(dataset=ds))
            stdout.flush()
            trainer.trainOnDataset(dataset=ds, epochs=EPOCHS_PER_CYCLE)
            NetworkWriter.writeToFile(
                trainer.module, "LSTM_" + "Epoche" + str(i + 1) + ".rnnw")
            NetworkWriter.writeToFile(trainer.module, "LSTM.rnnw")
from pybrain.tools.shortcuts import buildNetwork
from pybrain.structure.modules import LSTMLayer
from pybrain.structure.modules import SigmoidLayer

net = buildNetwork(inputSize,
                   inputSize,
                   outputSize,
                   hiddenclass=LSTMLayer,
                   outclass=SigmoidLayer,
                   outputbias=False,
                   recurrent=True)

from pybrain.supervised import RPropMinusTrainer
from sys import stdout

trainer = RPropMinusTrainer(net, dataset=ds)
train_errors = []  # save errors for plotting later
EPOCHS_PER_CYCLE = 5
CYCLES = int(sys.argv[2])
EPOCHS = EPOCHS_PER_CYCLE * CYCLES

import matplotlib.pyplot as plt
plt.xlabel('Training Epoch')
plt.ylabel('Shooting Error')
plt.ion()
plt.show()

for i in range(CYCLES):
    trainer.trainEpochs(EPOCHS_PER_CYCLE)
    train_errors.append(trainer.testOnData())
    epoch = (i + 1) * EPOCHS_PER_CYCLE
Exemple #22
0
    #code for normalize data in ds
    i = np.array([d[0] for d in ds])
    inorm = np.array([d[0] for d in ds])
    i /= np.max(np.abs(i), axis=0)
    o = np.array([d[1] for d in ds])
    onorm = np.array([d[1] for d in ds])
    o /= np.max(np.abs(o), axis=0)

    print routes_frommeasur_ids["11000602"] / np.max(np.abs(inorm), axis=0)
    #creating new object for normalized data
    nds = SupervisedDataSet(1, 1)
    for ix in range(len(ds)):
        nds.addSample(i[ix], o[ix])
    #print routes_frommeasur

    #creating net
    net = buildNetwork(nds.indim,
                       3,
                       nds.outdim,
                       bias=True,
                       hiddenclass=TanhLayer)

    #training net
    trainer = RPropMinusTrainer(net, verbose=True)
    trainer.trainOnDataset(nds, 10)
    #trainer.testOnData(verbose=True)

    p = net.activate(routes_frommeasur_ids["11000602"] /
                     np.max(np.abs(inorm), axis=0))

    print(p[0] * np.max(np.abs(onorm), axis=0)[0])
# Buils a simple LSTM network with 1 input node, 1 output node and 5 LSTM cells
net = buildNetwork(1,
                   12,
                   1,
                   hiddenclass=LSTMLayer,
                   peepholes=False,
                   outputbias=False,
                   recurrent=True)
# net = buildNetwork(1, 1, 1, hiddenclass=LSTMLayer, peepholes = True, outputbias=False, recurrent=True)
# rnn = buildNetwork( trndata.indim, 5, trndata.outdim, hiddenclass=LSTMLayer, outclass=SoftmaxLayer, outputbias=False, recurrent=True)

from pybrain.supervised import RPropMinusTrainer
from sys import stdout

trainer = RPropMinusTrainer(net, dataset=ds, verbose=True)
#trainer.trainUntilConvergence()

train_errors = []  # save errors for plotting later
EPOCHS_PER_CYCLE = 100  # increasing the epochs to 20, decreases accuracy drastically,  decreasing epochs is desiredepoch # 5 err = 0.04
CYCLES = 10  # vary the epochs adn the cycles and the LSTM cells to  get more accurate results.
EPOCHS = EPOCHS_PER_CYCLE * CYCLES
for i in xrange(CYCLES):
    trainer.trainEpochs(
        EPOCHS_PER_CYCLE
    )  # train on the given data set for given number of epochs
    train_errors.append(trainer.testOnData())
    epoch = (i + 1) * EPOCHS_PER_CYCLE
    print("\r epoch {}/{}".format(epoch, EPOCHS), end="")
    stdout.flush()
Exemple #24
0
        #   trainer.setData(ds)
        #   import random
        #   random.shuffle(sequences)
        #   concat_sequences = []
        #   for sequence in sequences:
        #     concat_sequences += sequence
        #     concat_sequences.append(random.randrange(100, 1000000))
        #   # concat_sequences = sum(sequences, [])
        #   for j in xrange(len(concat_sequences) - 1):
        #     ds.addSample(num2vec(concat_sequences[j], nDim), num2vec(concat_sequences[j+1], nDim))

        #   trainer.train()
        net = initializeLSTMnet(nDim, nLSTMcells=50)
        net.reset()
        ds = SequentialDataSet(nDim, nDim)
        trainer = RPropMinusTrainer(net)
        trainer.setData(ds)
        for _ in xrange(1000):
            # Batch training mode
            # print "generate a dataset of sequences"
            import random
            random.shuffle(sequences)
            concat_sequences = []
            for sequence in sequences:
                concat_sequences += sequence
                concat_sequences.append(random.randrange(100, 1000000))
        for j in xrange(len(concat_sequences) - 1):
            ds.addSample(num2vec(concat_sequences[j], nDim),
                         num2vec(concat_sequences[j + 1], nDim))

        trainer.trainEpochs(rptNum)