Ejemplo n.º 1
0
Archivo: rnn.py Proyecto: cy94/ml2
def main():
	generated_data = [0 for i in range(10000)]
	rate, data = get_data_from_wav("../../data/natabhairavi_violin.wav")
	data = data[1000:190000]
	print("Got wav")
	ds = SequentialDataSet(1, 1)
	for sample, next_sample in zip(data, cycle(data[1:])):
	    ds.addSample(sample, next_sample)

	net = buildNetwork(1, 5, 1, 
                   hiddenclass=LSTMLayer, outputbias=False, recurrent=True)

	trainer = RPropMinusTrainer(net, dataset=ds)
	train_errors = [] # save errors for plotting later
	EPOCHS_PER_CYCLE = 5
	CYCLES = 10
	EPOCHS = EPOCHS_PER_CYCLE * CYCLES
	for i in xrange(CYCLES):
	    trainer.trainEpochs(EPOCHS_PER_CYCLE)
	    train_errors.append(trainer.testOnData())
	    epoch = (i+1) * EPOCHS_PER_CYCLE
	    print("\r epoch {}/{}".format(epoch, EPOCHS), end="")
	    stdout.flush()

	# predict new values
	old_sample = [100]

	for i in xrange(500000):
		new_sample = net.activate(old_sample)
		old_sample = new_sample
		generated_data[i] = new_sample[0]
		print(new_sample)
	
	wavfile.write("../../output/test.wav", rate, np.array(generated_data))
Ejemplo n.º 2
0
  def train(self, params):

    self.net.reset()

    ds = SequentialDataSet(self.nDimInput, self.nDimOutput)
    trainer = RPropMinusTrainer(self.net, dataset=ds, verbose=False)

    history = self.window(self.history, params)
    resets = self.window(self.resets, params)

    for i in xrange(params['prediction_nstep'], len(history)):
      if not resets[i-1]:
        ds.addSample(self.inputEncoder.encode(history[i-params['prediction_nstep']]),
                     self.outputEncoder.encode(history[i][0]))
      if resets[i]:
        ds.newSequence()

    # print ds.getSample(0)
    # print ds.getSample(1)
    # print ds.getSample(1000)
    # print " training data size", ds.getLength(), " len(history) ", len(history), " self.history ", len(self.history)
    # print ds

    if len(history) > 1:
      trainer.trainEpochs(params['num_epochs'])

    self.net.reset()
    for i in xrange(len(history) - params['prediction_nstep']):
      symbol = history[i]
      output = self.net.activate(ds.getSample(i)[0])

      if resets[i]:
        self.net.reset()
Ejemplo n.º 3
0
    def train(self, params):
        """
    Train LSTM network on buffered dataset history
    After training, run LSTM on history[:-1] to get the state correct
    :param params:
    :return:
    """
        if params['reset_every_training']:
            n = params['encoding_num']
            self.net = buildNetwork(n,
                                    params['num_cells'],
                                    n,
                                    hiddenclass=LSTMLayer,
                                    bias=True,
                                    outputbias=params['output_bias'],
                                    recurrent=True)
            self.net.reset()

        # prepare training dataset
        ds = SequentialDataSet(params['encoding_num'], params['encoding_num'])
        history = self.window(self.history, params)
        resets = self.window(self.resets, params)

        for i in xrange(1, len(history)):
            if not resets[i - 1]:
                ds.addSample(self.encoder.encode(history[i - 1]),
                             self.encoder.encode(history[i]))
            if resets[i]:
                ds.newSequence()

        if params['num_epochs'] > 1:
            trainer = RPropMinusTrainer(self.net,
                                        dataset=ds,
                                        verbose=params['verbosity'] > 0)

            if len(history) > 1:
                trainer.trainEpochs(params['num_epochs'])

            # run network on buffered dataset after training to get the state right
            self.net.reset()
            for i in xrange(len(history) - 1):
                symbol = history[i]
                output = self.net.activate(self.encoder.encode(symbol))
                self.encoder.classify(output, num=params['num_predictions'])

                if resets[i]:
                    self.net.reset()
        else:
            self.trainer.setData(ds)
            self.trainer.train()

            # run network on buffered dataset after training to get the state right
            self.net.reset()
            for i in xrange(len(history) - 1):
                symbol = history[i]
                output = self.net.activate(self.encoder.encode(symbol))
                self.encoder.classify(output, num=params['num_predictions'])

                if resets[i]:
                    self.net.reset()
Ejemplo n.º 4
0
  def train(self, params):
    n = params['encoding_num']
    net = buildNetwork(n, params['num_cells'], n,
                       hiddenclass=LSTMLayer,
                       bias=True,
                       outputbias=params['output_bias'],
                       recurrent=True)
    net.reset()

    ds = SequentialDataSet(n, n)
    trainer = RPropMinusTrainer(net, dataset=ds)

    history = self.window(self.history, params)
    resets = self.window(self.resets, params)

    for i in xrange(1, len(history)):
      if not resets[i-1]:
        ds.addSample(self.encoder.encode(history[i-1]),
                     self.encoder.encode(history[i]))
      if resets[i]:
        ds.newSequence()

    if len(history) > 1:
      trainer.trainEpochs(params['num_epochs'])
      net.reset()

    for i in xrange(len(history) - 1):
      symbol = history[i]
      output = self.net.activate(self.encoder.encode(symbol))
      predictions = self.encoder.classify(output, num=params['num_predictions'])

      if resets[i]:
        net.reset()

    return net
Ejemplo n.º 5
0
  def train(self, params):
    """
    Train LSTM network on buffered dataset history
    After training, run LSTM on history[:-1] to get the state correct
    :param params:
    :return:
    """
    if params['reset_every_training']:
      n = params['encoding_num']
      self.net = buildNetwork(n, params['num_cells'], n,
                               hiddenclass=LSTMLayer,
                               bias=True,
                               outputbias=params['output_bias'],
                               recurrent=True)
      self.net.reset()

    # prepare training dataset
    ds = SequentialDataSet(params['encoding_num'], params['encoding_num'])
    history = self.window(self.history, params)
    resets = self.window(self.resets, params)

    for i in xrange(1, len(history)):
      if not resets[i - 1]:
        ds.addSample(self.encoder.encode(history[i - 1]),
                     self.encoder.encode(history[i]))
      if resets[i]:
        ds.newSequence()

    print "Train LSTM network on buffered dataset of length ", len(history)
    if params['num_epochs'] > 1:
      trainer = RPropMinusTrainer(self.net,
                                  dataset=ds,
                                  verbose=params['verbosity'] > 0)

      if len(history) > 1:
        trainer.trainEpochs(params['num_epochs'])

      # run network on buffered dataset after training to get the state right
      self.net.reset()
      for i in xrange(len(history) - 1):
        symbol = history[i]
        output = self.net.activate(self.encoder.encode(symbol))
        self.encoder.classify(output, num=params['num_predictions'])

        if resets[i]:
          self.net.reset()
    else:
      self.trainer.setData(ds)
      self.trainer.train()

      # run network on buffered dataset after training to get the state right
      self.net.reset()
      for i in xrange(len(history) - 1):
        symbol = history[i]
        output = self.net.activate(self.encoder.encode(symbol))
        self.encoder.classify(output, num=params['num_predictions'])

        if resets[i]:
          self.net.reset()
Ejemplo n.º 6
0
def trainLSTMnet(net, numTrainSequence, seedSeq=1):
    np.random.seed(seedSeq)
    for _ in xrange(numTrainSequence):
        (ds, in_seq, out_seq) = getReberDS(maxLength)
        print("train seq", _, sequenceToWord(in_seq))
        trainer = RPropMinusTrainer(net, dataset=ds)
        trainer.trainEpochs(rptPerSeq)

    return net
def trainLSTMnet(net, numTrainSequence, seedSeq=1):
  np.random.seed(seedSeq)
  for _ in xrange(numTrainSequence):
    (ds, in_seq, out_seq) = getReberDS(maxLength)
    print("train seq", _, sequenceToWord(in_seq))
    trainer = RPropMinusTrainer(net, dataset=ds)
    trainer.trainEpochs(rptPerSeq)

  return net
 def train(self, ds, epochs_per_cycle, cycles):
     trainer = RPropMinusTrainer(self.n, dataset=ds)
     train_errors = []
     for i in xrange(cycles):
         trainer.trainEpochs(epochs_per_cycle)
         train_errors.append(trainer.testOnData())
         epoch = (i + 1) * epochs_per_cycle
         print("\r epoch {}/{}".format(epoch, epochs_per_cycle * cycles))
         sys.stdout.flush()
     print("Final Error: " + str(train_errors[-1]))
     return train_errors[-1]
Ejemplo n.º 9
0
def ltsmXY(tin, tout, title='ltsm.png'):

    #datain = zip(tin[:-3], tin[1:-2], tin[2:-1])
    #datain = zip(tin[:-8], tin[1:-7], tin[2:-6], tin[3:-5], tin[4:-4], tin[5:-3],tin[6:-2], tin[7:-1])
    #datain = zip(tin[:-12], tin[1:-11], tin[2:-10], tin[3:-9], tin[4:-8], tin[5:-7],tin[6:-6], tin[7:-5], tin[8:-4], tin[9:-3], tin[10:-2], tin[11:-1])
    datain = zip(tin[:-16], tin[1:-15], tin[2:-14], tin[3:-13], tin[4:-12], tin[5:-11],tin[6:-10], tin[7:-9], tin[8:-8], tin[9:-7], tin[10:-6], tin[11:-5], tin[12:-4], tin[13:-3], tin[14:-2], tin[15:-1])

    #dataout = tout[3:]
    #dataout = tout[8:]
    #dataout = tout[12:]
    dataout = tout[16:]

    #ds = SequentialDataSet(3, 1)
    #ds = SequentialDataSet(8, 1)
    #ds = SequentialDataSet(12, 1)
    ds = SequentialDataSet(16, 1)

    for x, y in zip(datain[:len(datain)/2], dataout[:len(datain)/2]):
        ds.addSample(x, y)


    # add layers until overfitting the training data
    #net = buildNetwork(3,5,1,hiddenclass=LSTMLayer, outputbias=False, recurrent=True)
    #net = buildNetwork(8, 8, 1, hiddenclass=LSTMLayer, outputbias=False, recurrent=True)
    #net = buildNetwork(12, 20, 1, hiddenclass=LSTMLayer, outputbias=False, recurrent=True)
    net = buildNetwork(16, 20, 1, hiddenclass=LSTMLayer, outputbias=False, recurrent=True)

    
    trainer = RPropMinusTrainer(net, dataset=ds)
    train_errors = []
    EPOCHS_PER_CYCLE = 5
    CYCLES = 100
    EPOCHS = EPOCHS_PER_CYCLE * CYCLES
    for i in xrange(CYCLES):
        trainer.trainEpochs(EPOCHS_PER_CYCLE)
        train_errors.append(trainer.testOnData())
        epoch = (i+1) * EPOCHS_PER_CYCLE
        #print "\r epoch {}/{}".format(epoch, EPOCHS)
        stdout.flush()

    print "final error =", train_errors[-1]

    pred_out = []
    for i in range(len(datain)):
        pred_out.append(net.activate(datain[i]))
    
    fig = plt.figure()
    #tout[16:].plot(ax=ax, title='Occupancy')
    plt.plot(tout[16:].index, tout[16:], 'y', linewidth=1.5)
    plt.plot(tout[16:].index, pred_out, 'b+')
    plt.legend(['Occupancy', 'LTSM'])
    fig.tight_layout()
    plt.savefig(title,inches='tight')
Ejemplo n.º 10
0
    def train(self, params, verbose=False):

        if params['reset_every_training']:
            if verbose:
                print 'create lstm network'

            random.seed(6)
            if params['output_encoding'] == None:
                self.net = buildNetwork(self.nDimInput,
                                        params['num_cells'],
                                        self.nDimOutput,
                                        hiddenclass=LSTMLayer,
                                        bias=True,
                                        outputbias=True,
                                        recurrent=True)
            elif params['output_encoding'] == 'likelihood':
                self.net = buildNetwork(self.nDimInput,
                                        params['num_cells'],
                                        self.nDimOutput,
                                        hiddenclass=LSTMLayer,
                                        bias=True,
                                        outclass=SigmoidLayer,
                                        recurrent=True)

        self.net.reset()

        ds = SequentialDataSet(self.nDimInput, self.nDimOutput)
        networkInput = self.window(self.networkInput, params)
        targetPrediction = self.window(self.targetPrediction, params)

        # prepare a training data-set using the history
        for i in xrange(len(networkInput)):
            ds.addSample(self.inputEncoder.encode(networkInput[i]),
                         self.outputEncoder.encode(targetPrediction[i]))

        if params['num_epochs'] > 1:
            trainer = RPropMinusTrainer(self.net, dataset=ds, verbose=verbose)

            if verbose:
                print " train LSTM on ", len(
                    ds), " records for ", params['num_epochs'], " epochs "

            if len(networkInput) > 1:
                trainer.trainEpochs(params['num_epochs'])

        else:
            self.trainer.setData(ds)
            self.trainer.train()

        # run through the training dataset to get the lstm network state right
        self.net.reset()
        for i in xrange(len(networkInput)):
            self.net.activate(ds.getSample(i)[0])
Ejemplo n.º 11
0
def ltsm(data):
    from pybrain.datasets import SequentialDataSet
    from itertools import cycle
    
    datain = zip(data[:-6], data[1:-5], data[2:-4], data[3:-3], data[4:-2], data[5:-1])
    dataout = data[6:]
    ds = SequentialDataSet(6, 1)
    for x, y in zip(datain, dataout):
        ds.addSample(x, y)

    from pybrain.tools.shortcuts import buildNetwork
    from pybrain.structure.modules import LSTMLayer

    net = buildNetwork(6, 7, 1, hiddenclass=LSTMLayer, outputbias=False, recurrent=True)

    from pybrain.supervised import RPropMinusTrainer
    from sys import stdout
    
    trainer = RPropMinusTrainer(net, dataset=ds)
    train_errors = []
    EPOCHS_PER_CYCLE = 5
    CYCLES = 100
    EPOCHS = EPOCHS_PER_CYCLE * CYCLES
    for i in xrange(CYCLES):
        trainer.trainEpochs(EPOCHS_PER_CYCLE)
        train_errors.append(trainer.testOnData())
        epoch = (i+1) * EPOCHS_PER_CYCLE
        #print "\r epoch {}/{}".format(epoch, EPOCHS)
        stdout.flush()

    print "final error =", train_errors[-1]

    '''
    plt.figure()
    plt.plot(range(0, EPOCHS, EPOCHS_PER_CYCLE), train_errors)
    plt.xlabel('epoch')
    plt.ylabel('error')
    plt.show()
    '''

    test_error = 0.
    cnt = 0
    for sample, target in ds.getSequenceIterator(0):
        #print "sample = ",  sample
        #print "predicted next sample = %4.1f" % net.activate(sample)
        #print "actual next sample = %4.1f" % target
        test_error += abs(net.activate(sample) - target)
        cnt += 1
    test_error /= cnt 
    print "test (train) error =", test_error
Ejemplo n.º 12
0
    def handle(self, *args, **options):
        ticker = args[0]
        print("****** STARTING PREDICTOR " + ticker + " ******* ")
        prices = Price.objects.filter(
            symbol=ticker).order_by('-created_on').values_list('price',
                                                               flat=True)
        data = normalization(list(prices[0:NUM_MINUTES_BACK].reverse()))
        data = [int(x * MULT_FACTOR) for x in data]
        print(data)

        ds = SupervisedDataSet(5, 1)
        try:
            for i, val in enumerate(data):
                DS.addSample((data[i], data[i + 1], data[i + 2], data[i + 3],
                              data[i + 4]), (data[i + 5], ))
        except Exception:
            pass

        net = buildNetwork(5,
                           40,
                           1,
                           hiddenclass=LSTMLayer,
                           outputbias=False,
                           recurrent=True)

        trainer = RPropMinusTrainer(net, dataset=ds)
        train_errors = []  # save errors for plotting later
        EPOCHS_PER_CYCLE = 5
        CYCLES = 100
        EPOCHS = EPOCHS_PER_CYCLE * CYCLES
        for i in xrange(CYCLES):
            trainer.trainEpochs(EPOCHS_PER_CYCLE)
            train_errors.append(trainer.testOnData())
            epoch = (i + 1) * EPOCHS_PER_CYCLE
            print("\r epoch {}/{}".format(epoch, EPOCHS), end="")
            stdout.flush()

        print()
        print("final error =", train_errors[-1])

        for sample, target in ds.getSequenceIterator(0):
            show_pred_sample = net.activate(sample) / MULT_FACTOR
            show_sample = sample / MULT_FACTOR
            show_target = target / MULT_FACTOR
            show_diff = show_pred_sample - show_target
            show_diff_pct = 100 * show_diff / show_pred_sample
            print("{} => {}, act {}. ({}%)".format(
                show_sample[0], round(show_pred_sample[0], 3), show_target[0],
                int(round(show_diff_pct[0], 0))))
Ejemplo n.º 13
0
def train(d, cycles=100, epochs_per_cycle=7):
    ds = SequentialDataSet(1, 1)
    net = buildNetwork(1, 5, 1, hiddenclass=LSTMLayer, outputbias=False, recurrent=False)

    for sample, next_sample in zip(d, cycle(d[1:])):
        ds.addSample(sample, next_sample)

    trainer = RPropMinusTrainer(net, dataset=ds)
    train_errors = []  # save errors for plotting later
    for i in xrange(cycles):
        trainer.trainEpochs(epochs_per_cycle)
        train_errors.append(trainer.testOnData())
        stdout.flush()

    return net, train_errors
Ejemplo n.º 14
0
def train(context, trainX, trainY):
    ds = SequentialDataSet(4, 1)
    for dataX, dataY in zip(trainX, trainY):
        ds.addSample(dataX, dataY)
    net = buildNetwork(4,
                       1,
                       1,
                       hiddenclass=LSTMLayer,
                       outputbias=False,
                       recurrent=True)
    trainer = RPropMinusTrainer(net, dataset=ds)
    EPOCHS_PER_CYCLE = 5
    CYCLES = 5
    for i in range(CYCLES):
        trainer.trainEpochs(EPOCHS_PER_CYCLE)
    return net, trainer.testOnData()
def train (ds, net):
	# Train the network 
	trainer = RPropMinusTrainer(net, dataset=ds)
	train_errors = [] # save errors for plotting later
	EPOCHS_PER_CYCLE = 5
	CYCLES = 100
	EPOCHS = EPOCHS_PER_CYCLE * CYCLES
	for i in xrange(CYCLES):
	    trainer.trainEpochs(EPOCHS_PER_CYCLE)
	    error = trainer.testOnData()
	    train_errors.append(error)
	    epoch = (i+1) * EPOCHS_PER_CYCLE
	    print("\r epoch {}/{}".format(epoch, EPOCHS))
	    stdout.flush()

	# print("final error =", train_errors[-1])

	return train_errors, EPOCHS, EPOCHS_PER_CYCLE
Ejemplo n.º 16
0
def train(ds, net):
    # Train the network
    trainer = RPropMinusTrainer(net, dataset=ds)
    train_errors = []  # save errors for plotting later
    EPOCHS_PER_CYCLE = 5
    CYCLES = 100
    EPOCHS = EPOCHS_PER_CYCLE * CYCLES
    for i in xrange(CYCLES):
        trainer.trainEpochs(EPOCHS_PER_CYCLE)
        error = trainer.testOnData()
        train_errors.append(error)
        epoch = (i + 1) * EPOCHS_PER_CYCLE
        print("\r epoch {}/{}".format(epoch, EPOCHS))
        stdout.flush()

    # print("final error =", train_errors[-1])

    return train_errors, EPOCHS, EPOCHS_PER_CYCLE
Ejemplo n.º 17
0
  def train(self, params, verbose=False):

    if params['reset_every_training']:
      if verbose:
        print 'create lstm network'

      random.seed(6)
      if params['output_encoding'] == None:
        self.net = buildNetwork(self.nDimInput, params['num_cells'], self.nDimOutput,
                           hiddenclass=LSTMLayer, bias=True, outputbias=True, recurrent=True)
      elif params['output_encoding'] == 'likelihood':
        self.net = buildNetwork(self.nDimInput, params['num_cells'], self.nDimOutput,
                           hiddenclass=LSTMLayer, bias=True, outclass=SigmoidLayer, recurrent=True)

    self.net.reset()

    ds = SequentialDataSet(self.nDimInput, self.nDimOutput)
    networkInput = self.window(self.networkInput, params)
    targetPrediction = self.window(self.targetPrediction, params)

    # prepare a training data-set using the history
    for i in xrange(len(networkInput)):
      ds.addSample(self.inputEncoder.encode(networkInput[i]),
                   self.outputEncoder.encode(targetPrediction[i]))

    if params['num_epochs'] > 1:
      trainer = RPropMinusTrainer(self.net, dataset=ds, verbose=verbose)

      if verbose:
        print " train LSTM on ", len(ds), " records for ", params['num_epochs'], " epochs "

      if len(networkInput) > 1:
        trainer.trainEpochs(params['num_epochs'])

    else:
      self.trainer.setData(ds)
      self.trainer.train()

    # run through the training dataset to get the lstm network state right
    self.net.reset()
    for i in xrange(len(networkInput)):
      self.net.activate(ds.getSample(i)[0])
Ejemplo n.º 18
0
    def handle(self, *args, **options):
        ticker = args[0]
        print("****** STARTING PREDICTOR " + ticker + " ******* ")
        prices = Price.objects.filter(symbol=ticker).order_by('-created_on').values_list('price',flat=True)
        data = normalization(list(prices[0:NUM_MINUTES_BACK].reverse()))
        data = [ int(x * MULT_FACTOR) for x in data]
        print(data)

        ds = SupervisedDataSet(5, 1)
        try:
            for i,val in enumerate(data):
                DS.addSample((data[i], data[i+1], data[i+2], data[i+3], data[i+4]), (data[i+5],))
        except Exception:
            pass;

        net = buildNetwork(5, 40, 1, 
                           hiddenclass=LSTMLayer, outputbias=False, recurrent=True)

        trainer = RPropMinusTrainer(net, dataset=ds)
        train_errors = [] # save errors for plotting later
        EPOCHS_PER_CYCLE = 5
        CYCLES = 100
        EPOCHS = EPOCHS_PER_CYCLE * CYCLES
        for i in xrange(CYCLES):
            trainer.trainEpochs(EPOCHS_PER_CYCLE)
            train_errors.append(trainer.testOnData())
            epoch = (i+1) * EPOCHS_PER_CYCLE
            print("\r epoch {}/{}".format(epoch, EPOCHS), end="")
            stdout.flush()

        print()
        print("final error =", train_errors[-1])

        for sample, target in ds.getSequenceIterator(0):
            show_pred_sample = net.activate(sample) / MULT_FACTOR
            show_sample = sample / MULT_FACTOR
            show_target = target / MULT_FACTOR
            show_diff = show_pred_sample - show_target
            show_diff_pct = 100 * show_diff / show_pred_sample
            print("{} => {}, act {}. ({}%)".format(show_sample[0],round(show_pred_sample[0],3),show_target[0],int(round(show_diff_pct[0],0))))
Ejemplo n.º 19
0
    def train(self, params):
        n = params['encoding_num']
        net = buildNetwork(n,
                           params['num_cells'],
                           n,
                           hiddenclass=LSTMLayer,
                           bias=True,
                           outputbias=params['output_bias'],
                           recurrent=True)
        net.reset()

        ds = SequentialDataSet(n, n)
        trainer = RPropMinusTrainer(net, dataset=ds)

        history = self.window(self.history, params)
        resets = self.window(self.resets, params)

        for i in xrange(1, len(history)):
            if not resets[i - 1]:
                ds.addSample(self.encoder.encode(history[i - 1]),
                             self.encoder.encode(history[i]))
            if resets[i]:
                ds.newSequence()

        if len(history) > 1:
            trainer.trainEpochs(params['num_epochs'])
            net.reset()

        for i in xrange(len(history) - 1):
            symbol = history[i]
            output = net.activate(self.encoder.encode(symbol))
            predictions = self.encoder.classify(output,
                                                num=params['num_predictions'])

            if resets[i]:
                net.reset()

        return net
Ejemplo n.º 20
0
def main():
    generated_data = [0 for i in range(10000)]
    rate, data = get_data_from_wav("../../data/natabhairavi_violin.wav")
    data = data[1000:190000]
    print("Got wav")
    ds = SequentialDataSet(1, 1)
    for sample, next_sample in zip(data, cycle(data[1:])):
        ds.addSample(sample, next_sample)

    net = buildNetwork(1,
                       5,
                       1,
                       hiddenclass=LSTMLayer,
                       outputbias=False,
                       recurrent=True)

    trainer = RPropMinusTrainer(net, dataset=ds)
    train_errors = []  # save errors for plotting later
    EPOCHS_PER_CYCLE = 5
    CYCLES = 10
    EPOCHS = EPOCHS_PER_CYCLE * CYCLES
    for i in xrange(CYCLES):
        trainer.trainEpochs(EPOCHS_PER_CYCLE)
        train_errors.append(trainer.testOnData())
        epoch = (i + 1) * EPOCHS_PER_CYCLE
        print("\r epoch {}/{}".format(epoch, EPOCHS), end="")
        stdout.flush()

    # predict new values
    old_sample = [100]

    for i in xrange(500000):
        new_sample = net.activate(old_sample)
        old_sample = new_sample
        generated_data[i] = new_sample[0]
        print(new_sample)

    wavfile.write("../../output/test.wav", rate, np.array(generated_data))
Ejemplo n.º 21
0
def say_hello_text(username = "******",text="You are good"):

    object_data_new = pd.read_csv('/Users/ruiyun_zhou/Documents/cmpe-274/data/data.csv')
    data_area_new = object_data_new[object_data_new.Area==username]
    data_area_new_1=data_area_new[data_area_new.Disease== text]
    data_list_new = data_area_new_1['Count'].values.tolist()
    print data_list_new.__len__()
    data_list=data_list_new
    ds = SequentialDataSet(1,1)
    isZero=0;
    for sample,next_sample in zip(data_list,cycle(data_list[1:])):
        ds.addSample(sample, next_sample)
        if sample:
            isZero=1

    if(isZero==0):
        return '[0, 0]'

    net = buildNetwork(1,5,1,hiddenclass=LSTMLayer,outputbias=False,recurrent=True)
    trainer = RPropMinusTrainer(net, dataset=ds)
    train_errors = [] # save errors for plotting later
    EPOCHS_PER_CYCLE = 5
    CYCLES = 10
    EPOCHS = EPOCHS_PER_CYCLE * CYCLES
    for i in xrange(CYCLES):
        print "Doing epoch %d" %i
        trainer.trainEpochs(EPOCHS_PER_CYCLE)
        train_errors.append(trainer.testOnData())
        epoch = (i+1) * EPOCHS_PER_CYCLE
#    return '<p>%d</p>\n' % (data_list_new.__len__())
#        print("final error =", train_errors[-1])
#    print "Value for last week is %4.1d" % abs(data_list[-1])
#    print "Value for next week is %4.1d" % abs(net.activate(data_list[-1]))
#    result = (abs(data_list[-1]))
    result = (abs(net.activate(data_list[-1])))
    result_1 = (abs(net.activate(result)))
    return '[%d, %d]' % (result,result_1)
Ejemplo n.º 22
0
    def Train(self, dataset, error_observer, logger, dump_file):
        gradientCheck(self.m_net)

        net_dataset = SequenceClassificationDataSet(4, 2)
        for record in dataset:
            net_dataset.newSequence()

            gl_raises = record.GetGlRises()
            gl_min = record.GetNocturnalMinimum()

            if DayFeatureExpert.IsHypoglycemia(record):
                out_class = [1, 0]
            else:
                out_class = [0, 1]

            for gl_raise in gl_raises:
                net_dataset.addSample([gl_raise[0][0].total_seconds() / (24*3600), gl_raise[0][1] / 300, gl_raise[1][0].total_seconds() / (24*3600), gl_raise[1][1] / 300] , out_class)

        train_dataset, test_dataset = net_dataset.splitWithProportion(0.8)

        trainer = RPropMinusTrainer(self.m_net, dataset=train_dataset, momentum=0.8, learningrate=0.3, lrdecay=0.9, weightdecay=0.01, verbose=True)
        validator = ModuleValidator()

        train_error = []
        test_error = []
        for i in range(0, 80):
            trainer.trainEpochs(1)
            train_error.append(validator.MSE(self.m_net, train_dataset)) # here is validate func, think it may be parametrised by custom core function
            test_error.append(validator.MSE(self.m_net, test_dataset))
            print train_error
            print test_error
            error_observer(train_error, test_error)
            gradientCheck(self.m_net)

        dump_file = open(dump_file, 'wb')
        pickle.dump(self.m_net, dump_file)
def train(data,name):
    ds = SequentialDataSet(1, 1)
    for sample, next_sample in zip(data, cycle(data[1:])):
        ds.addSample(sample, next_sample)
    net = buildNetwork(1, 200, 1, hiddenclass=LSTMLayer, outputbias=False, recurrent=True)

    trainer = RPropMinusTrainer(net, dataset=ds)
    train_errors = [] # save errors for plotting later
    EPOCHS_PER_CYCLE = 5
    CYCLES = 20
    EPOCHS = EPOCHS_PER_CYCLE * CYCLES
    store=[]
    for i in xrange(CYCLES):
        trainer.trainEpochs(EPOCHS_PER_CYCLE)
        train_errors.append(trainer.testOnData())
        epoch = (i+1) * EPOCHS_PER_CYCLE
        print("\r epoch {}/{}".format(epoch, EPOCHS))
        print tm.time()-atm
        stdout.flush() 
    for sample, target in ds.getSequenceIterator(0):
        store.append(net.activate(sample))
    abcd=pd.DataFrame(store)
    abcd.to_csv(pwd+"lstmdata/"+name+".csv",encoding='utf-8')
    print "result printed to file"
Ejemplo n.º 24
0
#rnn.addOutputModule(SoftmaxLayer(1, name='out'))
#
#rnn.addConnection(FullConnection(rnn['in'], rnn['hidden'], name='c1'))
#rnn.addConnection(FullConnection(rnn['hidden'], rnn['out'], name='c2'))
#
#rnn.addRecurrentConnection(FullConnection(rnn['hidden'], rnn['hidden'], name='c3'))
#rnn.sortModules()

# define a training method
trainer = RPropMinusTrainer(rnn, dataset=trndata, verbose=True )
# instead, you may also try
##trainer = BackpropTrainer( rnn, dataset=trndata, verbose=True, momentum=0.9, learningrate=0.00001 )

# carry out the training
for i in range(100):
    trainer.trainEpochs( 2 )
    trnresult = 100. * (1.0-testOnSequenceData(rnn, trndata))
    tstresult = 100. * (1.0-testOnSequenceData(rnn, tstdata))
    print("train error: %5.2f%%" % trnresult, ",  test error: %5.2f%%" % tstresult)

# just for reference, plot the first 5 timeseries
plot(trndata['input'][0:250,:],'-o')
hold(True)
plot(trndata['target'][0:250,0])
show()





Ejemplo n.º 25
0
net.addRecurrentConnection(FullConnection(h, h, inSliceTo = dim, outSliceTo = 4*dim, name = 'r1'))
net.addRecurrentConnection(IdentityConnection(h, h, inSliceFrom = dim, outSliceFrom = 4*dim, name = 'rstate'))
net.addConnection(FullConnection(h, o, inSliceTo = dim, name = 'f3'))
net.sortModules()

print net

ds = SequentialDataSet(15, 1)
ds.newSequence()

input = open(sys.argv[1], 'r')
for line in input.readlines():
    row = np.array(line.split(','))
    ds.addSample([float(x) for x in row[:15]], float(row[16]))
print ds

if len(sys.argv) > 2:
    test = SequentialDataSet(15, 1)
    test.newSequence()
    input = open(sys.argv[2], 'r')
    for line in input.readlines():
        row = np.array(line.split(','))
        test.addSample([float(x) for x in row[:15]], float(row[16]))
else:
    test = ds
print test

net.reset()
trainer = RPropMinusTrainer( net, dataset=ds, verbose=True)
trainer.trainEpochs(1000)
evalRnnOnSeqDataset(net, test, verbose = True)
Ejemplo n.º 26
0
                   1,
                   hiddenclass=LSTMLayer,
                   outputbias=False,
                   recurrent=True)

# Initialize trainer
trainer = RPropMinusTrainer(rnn, dataset=ds)

# Predefine iterations: epochs & cycles
EPOCHS_PER_CYCLE = 5
CYCLES = 100
EPOCHS = EPOCHS_PER_CYCLE * CYCLES

# Training loop
for i in xrange(CYCLES):
    trainer.trainEpochs(EPOCHS_PER_CYCLE)
    error = trainer.testOnData()
    epoch = (i + 1) * EPOCHS_PER_CYCLE
    print("\r Epoch: {}/{} Error: {}".format(epoch, EPOCHS, error), end="")
    stdout.flush()

# Save model
NetworkWriter.writeToFile(rnn, 'rnn3.xml')

# Ad hoc test
for test in test_data:
    for i in xrange(0, len(test) - 6, 5):
        # Get 5 obs, 6th we wish to predict
        obs, nxt = test[i:i + 5], test[i + 6]

        # Predict all
net = buildNetwork(1, 12, 1, hiddenclass=LSTMLayer, peepholes = False, outputbias=False, recurrent=True)
# net = buildNetwork(1, 1, 1, hiddenclass=LSTMLayer, peepholes = True, outputbias=False, recurrent=True)
# rnn = buildNetwork( trndata.indim, 5, trndata.outdim, hiddenclass=LSTMLayer, outclass=SoftmaxLayer, outputbias=False, recurrent=True)

from pybrain.supervised import RPropMinusTrainer
from sys import stdout

trainer = RPropMinusTrainer(net, dataset=ds, verbose = True)
#trainer.trainUntilConvergence()

train_errors = [] # save errors for plotting later
EPOCHS_PER_CYCLE = 100            # increasing the epochs to 20, decreases accuracy drastically,  decreasing epochs is desiredepoch # 5 err = 0.04
CYCLES = 10                   # vary the epochs adn the cycles and the LSTM cells to  get more accurate results.
EPOCHS = EPOCHS_PER_CYCLE * CYCLES
for i in xrange(CYCLES):
    trainer.trainEpochs(EPOCHS_PER_CYCLE)     # train on the given data set for given number of epochs
    train_errors.append(trainer.testOnData())
    epoch = (i+1) * EPOCHS_PER_CYCLE
    print("\r epoch {}/{}".format(epoch, EPOCHS), end="")
    stdout.flush()

print()
print("final error =", train_errors[-1])


## Plot  the data and the training
import matplotlib.pyplot as plt
plt.plot(range(0, EPOCHS, EPOCHS_PER_CYCLE), train_errors)
plt.xlabel('epoch')
plt.ylabel('error')
plt.show()
Ejemplo n.º 28
0
    ds = SequentialDataSet(nDim, nDim)
    trainer = RPropMinusTrainer(net)
    trainer.setData(ds)
    for _ in xrange(1000):
      # Batch training mode
      # print "generate a dataset of sequences"
      import random
      random.shuffle(sequences)
      concat_sequences = []
      for sequence in sequences:
        concat_sequences += sequence
        concat_sequences.append(random.randrange(100, 1000000))
    for j in xrange(len(concat_sequences) - 1):
      ds.addSample(num2vec(concat_sequences[j], nDim), num2vec(concat_sequences[j+1], nDim))

    trainer.trainEpochs(rptNum)

    print
    print "test LSTM, repeats =", rptNum
    # test LSTM
    correct = []
    for i in xrange(len(sequences)):
      net.reset()
      sequence = sequences[i]
      sequence = sequence + [random.randrange(100, 1000000)]
      print sequence
      predictedInput = []
      for j in xrange(len(sequence)):
        sample = num2vec(sequence[j], nDim)
        netActivation = net.activate(sample)
        if j+1 < len(sequence) - 1:
Ejemplo n.º 29
0
        ds.newSequence()
        for j in range(length):
            ds.addSample(x[j], target[j])
    return ds


if __name__ == '__main__':
    # Choose network parameters (see pygfnn.tools.shortcuts for more)
    oscParams = gfnn.OSC_CRITICAL
    freqDist = { 'fspac': 'log', 'min': 0.5, 'max': 8 }
    gfnnLearnParams = None
    gfnnDim = 50
    lstmDim = 5

    # Build network
    n = buildGFNNLSTM(gfnnDim, lstmDim,
        oscParams = oscParams, freqDist = freqDist, learnParams = gfnnLearnParams)

    # Create a dataset - 10, 40s pulses at various tempos
    ds = buildDS(n, 10, 40)

    # Train (hopfully you'll see errors go down!)
    tr = RPropMinusTrainer(n, dataset=ds, verbose=True)

    timer = timeit.default_timer
    start = timer()
    err = tr.trainEpochs(5)
    end = timer()
    print('Elapsed time is %f seconds' % (end - start))
Ejemplo n.º 30
0
trndata._convertToOneOfMany(bounds=[0., 1.])
tstdata = generateNoisySines(50, 20)
tstdata._convertToOneOfMany(bounds=[0., 1.])

# construct LSTM network - note the missing output bias
rnn = buildNetwork(trndata.indim,
                   5,
                   trndata.outdim,
                   hiddenclass=LSTMLayer,
                   outclass=SoftmaxLayer,
                   outputbias=False,
                   recurrent=True)

# define a training method
trainer = RPropMinusTrainer(rnn, dataset=trndata, verbose=True)
# instead, you may also try
##trainer = BackpropTrainer( rnn, dataset=trndata, verbose=True, momentum=0.9, learningrate=0.00001 )

# carry out the training
for i in xrange(100):
    trainer.trainEpochs(2)
    trnresult = 100. * (1.0 - testOnSequenceData(rnn, trndata))
    tstresult = 100. * (1.0 - testOnSequenceData(rnn, tstdata))
    print "train error: %5.2f%%" % trnresult, ",  test error: %5.2f%%" % tstresult

# just for reference, plot the first 5 timeseries
plot(trndata['input'][0:250, :], '-o')
hold(True)
plot(trndata['target'][0:250, 0])
show()
Ejemplo n.º 31
0
trainer = RPropMinusTrainer(net, dataset=ds)

epochcount = 0
while True:
    startingnote = random.choice(range(1, 17))
    startingnote2 = random.choice(range(1, 17))
    startingduration = random.choice(range(1, 17))
    startingduration2 = random.choice(range(1, 17))
    song = [[
        startingnote, startingduration, 1, 1, 0, startingnote2,
        startingduration2, 1, 1, 0
    ]]
    length = 50
    while len(song) < length:
        song.append(net.activate(song[-1]).tolist())
    newsong = []
    for x in song:
        newx = []
        newy = []
        for i in x:
            if len(newx) < 5:
                newx.append(int(i))
            else:
                newy.append(int(i))
        newsong.append(newx)
        newsong.append(newy)

    print newsong
    print "The above song is after " + str(epochcount) + " epochs."
    trainer.trainEpochs(epochs=1)
    epochcount += 1
train_errors = [] 
train_errors_2 = [] 
train_errors_3 = [] 
train_errors_4 = [] 
train_errors_5 = [] 
train_errors_6 = [] 
train_errors_8 = [] 
train_errors_9 = [] 
train_errors_10 = [] 

# Training
EPOCHS_per_CYCLE = 6
NUM_CYCLES = 15
EPOCHS = EPOCHS_per_CYCLE * NUM_CYCLES
for i in xrange(NUM_CYCLES):
    trainer.trainEpochs(EPOCHS_per_CYCLE)
    train_errors.append(trainer.testOnData())
    trainer_2.trainEpochs(EPOCHS_per_CYCLE)
    train_errors_2.append(trainer_2.testOnData()) 
    trainer_3.trainEpochs(EPOCHS_per_CYCLE)
    train_errors_3.append(trainer_3.testOnData())
    trainer_4.trainEpochs(EPOCHS_per_CYCLE)
    train_errors_4.append(trainer_4.testOnData())
    trainer_5.trainEpochs(EPOCHS_per_CYCLE)
    train_errors_5.append(trainer_5.testOnData())
    trainer_6.trainEpochs(EPOCHS_per_CYCLE)
    train_errors_6.append(trainer_6.testOnData())
    trainer_8.trainEpochs(EPOCHS_per_CYCLE)
    train_errors_8.append(trainer_8.testOnData())
    trainer_9.trainEpochs(EPOCHS_per_CYCLE)
    train_errors_9.append(trainer_9.testOnData())
def rnn():
    # load dataframe from csv file
    df = pi.load_data_frame('../../data/NABIL.csv')
    # column name to match with indicator calculating modules
    # TODO: resolve issue with column name
    df.columns = [
        'Transactions', 'Traded_Shares', 'Traded_Amount', 'High', 'Low',
        'Close'
    ]

    data = df.Close.values
    # TODO: write min_max normalization
    # normalization
    # cp = dataframe.pop(' Close Price')
    # x = cp.values
    temp = np.array(data).reshape(len(data), 1)
    min_max_scaler = preprocessing.MinMaxScaler()
    data = min_max_scaler.fit_transform(temp)
    # dataframe[' Close Price'] = x_scaled

    # prepate sequential dataset for pyBrain rnn network
    ds = SequentialDataSet(1, 1)
    for sample, next_sample in zip(data, cycle(data[1:])):
        ds.addSample(sample, next_sample)

    # build rnn network with LSTM layer
    # if saved network is available
    if (os.path.isfile('random.xml')):
        net = NetworkReader.readFrom('network.xml')
    else:
        net = buildNetwork(1,
                           20,
                           1,
                           hiddenclass=LSTMLayer,
                           outputbias=False,
                           recurrent=True)

    # build trainer
    trainer = RPropMinusTrainer(net, dataset=ds, verbose=True)
    train_errors = []  # save errors for plotting later
    EPOCHS_PER_CYCLE = 5
    CYCLES = 5
    EPOCHS = EPOCHS_PER_CYCLE * CYCLES
    for i in range(CYCLES):
        trainer.trainEpochs(EPOCHS_PER_CYCLE)
        train_errors.append(trainer.testOnData())
        epoch = (i + 1) * EPOCHS_PER_CYCLE
        print("\r epoch {}/{}".format(epoch, EPOCHS), end="")
        sys.stdout.flush()
    # save the network
    NetworkWriter.writeToFile(net, 'network.xml')

    print()
    print("final error =", train_errors[-1])

    predicted = []
    for dat in data:
        predicted.append(net.activate(dat)[0])
    # data = min_max_scaler.inverse_transform(data)
    # predicted = min_max_scaler.inverse_transform(predicted)
    predicted_array = min_max_scaler.inverse_transform(
        np.array(predicted).reshape(-1, 1))
    print(predicted_array[-1])
    plt.figure()

    legend_actual, = plt.plot(range(0, len(data)),
                              temp,
                              label='actual',
                              linestyle='--',
                              linewidth=2,
                              c='blue')
    legend_predicted, = plt.plot(range(0, len(data)),
                                 predicted_array,
                                 label='predicted',
                                 linewidth=1.5,
                                 c='red')
    plt.legend(handles=[legend_actual, legend_predicted])
    plt.savefig('error.png')
    plt.show()
def rnn():
    # load dataframe from csv file
    df = pi.load_data_frame('../../data/NABIL.csv')
    # column name to match with indicator calculating modules
    # TODO: resolve issue with column name
    df.columns = [
        'Transactions',
        'Traded_Shares',
        'Traded_Amount',
        'High',
        'Low',
        'Close']
     
    data = df.Close.values
    # TODO: write min_max normalization
    # normalization
    # cp = dataframe.pop(' Close Price')
    # x = cp.values
    temp = np.array(data).reshape(len(data),1)
    min_max_scaler = preprocessing.MinMaxScaler()
    data = min_max_scaler.fit_transform(temp)
    # dataframe[' Close Price'] = x_scaled
     
    # prepate sequential dataset for pyBrain rnn network
    ds = SequentialDataSet(1, 1)
    for sample, next_sample in zip(data, cycle(data[1:])):
        ds.addSample(sample, next_sample)
     
    # build rnn network with LSTM layer
    # if saved network is available
    if(os.path.isfile('random.xml')):
        net = NetworkReader.readFrom('network.xml')
    else:
        net = buildNetwork(1, 20, 1, 
                           hiddenclass=LSTMLayer, outputbias=False, recurrent=True)
     
    # build trainer
    trainer = RPropMinusTrainer(net, dataset=ds, verbose = True)
    train_errors = [] # save errors for plotting later
    EPOCHS_PER_CYCLE = 5
    CYCLES = 5
    EPOCHS = EPOCHS_PER_CYCLE * CYCLES
    for i in range(CYCLES):
        trainer.trainEpochs(EPOCHS_PER_CYCLE)
        train_errors.append(trainer.testOnData())
        epoch = (i+1) * EPOCHS_PER_CYCLE
        print("\r epoch {}/{}".format(epoch, EPOCHS), end="")
        sys.stdout.flush()
    # save the network
    NetworkWriter.writeToFile(net,'network.xml')
        
    print()
    print("final error =", train_errors[-1])
     
    predicted = []
    for dat in data:
        predicted.append(net.activate(dat)[0])
    # data = min_max_scaler.inverse_transform(data)
    # predicted = min_max_scaler.inverse_transform(predicted)
    predicted_array = min_max_scaler.inverse_transform(np.array(predicted).reshape(-1,1))
    print(predicted_array[-1])
    plt.figure()
     
    legend_actual, = plt.plot(range(0, len(data)),temp, label = 'actual', linestyle = '--', linewidth = 2, c = 'blue')
    legend_predicted, = plt.plot(range(0, len(data)), predicted_array, label = 'predicted', linewidth = 1.5, c='red')
    plt.legend(handles=[legend_actual, legend_predicted])
    plt.savefig('error.png')
    plt.show()
Ejemplo n.º 35
0
net.addConnection(FullConnection(net["input"], net["hidden1"], name="c1"))
net.addConnection(FullConnection(net["hidden1"], net["hidden2"], name="c3"))
net.addConnection(FullConnection(net["bias"], net["hidden2"], name="c4"))
net.addConnection(FullConnection(net["hidden2"], net["output"], name="c5"))
net.addRecurrentConnection(FullConnection(net["hidden1"], net["hidden1"], name="c6"))
net.sortModules()
# net = buildNetwork(n_input, 256, n_output, hiddenclass=LSTMLayer, outclass=TanhLayer, outputbias=False, recurrent=True)
# net = NetworkReader.readFrom('signal_weight.xml')

# train network
trainer = RPropMinusTrainer(net, dataset=training_dataset, verbose=True, weightdecay=0.01)
# trainer = BackpropTrainer(net, dataset=training_dataset, learningrate = 0.04, momentum = 0.96, weightdecay = 0.02, verbose = True)

for i in range(100):
  # train the network for 1 epoch
  trainer.trainEpochs(5)

  # evaluate the result on the training and test data
  trnresult = percentError(trainer.testOnClassData(), training_dataset['class'])
  tstresult = percentError(trainer.testOnClassData(dataset=testing_dataset), testing_dataset['class'])

  # print the result
  print("epoch: %4d" % trainer.totalepochs, \
        "  train error: %5.2f%%" % trnresult, \
        "  test error: %5.2f%%" % tstresult)
  if tstresult <= 0.5 :
       print('Bingo !!!!!!!!!!!!!!!!!!!!!!')
       break

  # export network
  NetworkWriter.writeToFile(net, 'signal_weight.xml')
Ejemplo n.º 36
0
# net = buildNetwork(1, 1, 1, hiddenclass=LSTMLayer, peepholes = True, outputbias=False, recurrent=True)
# rnn = buildNetwork( trndata.indim, 5, trndata.outdim, hiddenclass=LSTMLayer, outclass=SoftmaxLayer, outputbias=False, recurrent=True)

from pybrain.supervised import RPropMinusTrainer
from sys import stdout

trainer = RPropMinusTrainer(net, dataset=ds, verbose=True)
#trainer.trainUntilConvergence()

train_errors = []  # save errors for plotting later
EPOCHS_PER_CYCLE = 100  # increasing the epochs to 20, decreases accuracy drastically,  decreasing epochs is desiredepoch # 5 err = 0.04
CYCLES = 10  # vary the epochs adn the cycles and the LSTM cells to  get more accurate results.
EPOCHS = EPOCHS_PER_CYCLE * CYCLES
for i in xrange(CYCLES):
    trainer.trainEpochs(
        EPOCHS_PER_CYCLE
    )  # train on the given data set for given number of epochs
    train_errors.append(trainer.testOnData())
    epoch = (i + 1) * EPOCHS_PER_CYCLE
    print("\r epoch {}/{}".format(epoch, EPOCHS), end="")
    stdout.flush()

print()
print("final error =", train_errors[-1])

## Plot  the data and the training
import matplotlib.pyplot as plt

plt.plot(range(0, EPOCHS, EPOCHS_PER_CYCLE), train_errors)
plt.xlabel('epoch')
plt.ylabel('error')
Ejemplo n.º 37
0
net.addConnection(FullConnection(net['hidden' + str(layerCount - 1)], net['out'], name='cOut'))
net.sortModules()
from pybrain.supervised import RPropMinusTrainer
trainer = RPropMinusTrainer(net, dataset=ds)

epochcount = 0
while True:
    startingnote = random.choice(range(1, 17))
    startingnote2 = random.choice(range(1, 17))
    startingduration = random.choice(range(1,17))
    startingduration2 = random.choice(range(1, 17))
    song = [[startingnote, startingduration, 1, 1, 0, startingnote2, startingduration2, 1, 1, 0]]
    length = 50
    while len(song) < length:
        song.append(net.activate(song[-1]).tolist())
    newsong = []
    for x in song:
        newx = []
        newy = []
        for i in x:
            if len(newx) < 5:
                newx.append(int(i))
            else:
                newy.append(int(i))
        newsong.append(newx)
        newsong.append(newy)

    print newsong
    print "The above song is after " + str(epochcount) + " epochs."
    trainer.trainEpochs(epochs=1)
    epochcount += 1
from pybrain.supervised import RPropMinusTrainer
from sys import stdout


print 'Starting to train neural network. . .'
trainer = RPropMinusTrainer(net, dataset=ds)
train_errors = []  # save errors for plotting later
EPOCHS_PER_CYCLE = 2
#CYCLES = 200
CYCLES = 100
EPOCHS = EPOCHS_PER_CYCLE * CYCLES
print 'Entering loop. . .'
for i in xrange(CYCLES):
    # Does the training
    trainer.trainEpochs(EPOCHS_PER_CYCLE)
    train_errors.append(trainer.testOnData())
    epoch = (i + 1) * EPOCHS_PER_CYCLE
    print 'i: ', i
    print ('\r epoch {}/{}'.format(epoch, EPOCHS))

    stdout.flush()
print 'Exit loop'
print ''

print 'final error =', train_errors[-1]

# Plot the errors (note that in this simple toy example,
# we are testing and training on the same dataset, which
# is of course not what you'd do for a real project!):
Ejemplo n.º 39
0
        trainer = RPropMinusTrainer(net)
        trainer.setData(ds)
        for _ in xrange(1000):
            # Batch training mode
            # print "generate a dataset of sequences"
            import random
            random.shuffle(sequences)
            concat_sequences = []
            for sequence in sequences:
                concat_sequences += sequence
                concat_sequences.append(random.randrange(100, 1000000))
        for j in xrange(len(concat_sequences) - 1):
            ds.addSample(num2vec(concat_sequences[j], nDim),
                         num2vec(concat_sequences[j + 1], nDim))

        trainer.trainEpochs(rptNum)

        print
        print "test LSTM, repeats =", rptNum
        # test LSTM
        correct = []
        for i in xrange(len(sequences)):
            net.reset()
            sequence = sequences[i]
            sequence = sequence + [random.randrange(100, 1000000)]
            print sequence
            predictedInput = []
            for j in xrange(len(sequence)):
                sample = num2vec(sequence[j], nDim)
                netActivation = net.activate(sample)
                if j + 1 < len(sequence) - 1: