Beispiel #1
0
def training(d):
    # net = buildNetwork(d.indim, 55, d.outdim, bias=True,recurrent=False, hiddenclass =SigmoidLayer , outclass = SoftmaxLayer)
    net = FeedForwardNetwork()
    inLayer = SigmoidLayer(d.indim)
    hiddenLayer1 = SigmoidLayer(d.outdim)
    hiddenLayer2 = SigmoidLayer(d.outdim)
    outLayer = SigmoidLayer(d.outdim)

    net.addInputModule(inLayer)
    net.addModule(hiddenLayer1)
    net.addModule(hiddenLayer2)
    net.addOutputModule(outLayer)

    in_to_hidden = FullConnection(inLayer, hiddenLayer1)
    hidden_to_hidden = FullConnection(hiddenLayer1, hiddenLayer2)
    hidden_to_out = FullConnection(hiddenLayer2, outLayer)

    net.addConnection(in_to_hidden)
    net.addConnection(hidden_to_hidden)
    net.addConnection(hidden_to_out)

    net.sortModules()
    print net

    t = BackpropTrainer(net, d, learningrate = 0.9,momentum=0.9, weightdecay=0.01, verbose = True)
    t.trainUntilConvergence(continueEpochs=1200, maxEpochs=1000)
    NetworkWriter.writeToFile(net, 'myNetwork'+str(time.time())+'.xml')
    return t
 def train(self, epochs=None):
     trainer = BackpropTrainer(
         self.net,
         self.training_data
     )
     if epochs:
         trainer.trainEpochs(epochs)
     else:
         trainer.trainUntilConvergence()
Beispiel #3
0
def main(f_samples):
    f_reading = open(f_samples, 'r')
    global data
    data = []

    for line in f_reading:
        line = line.split()
        data.append( (float(line[0]), float(line[-1])) )

    #function
    data_module = lambda x: map( lambda z: data[z], filter( lambda y: y% 5 == x, xrange(len(data)) ) )

    global data1
    data1 = [data_module(0), data_module(1), data_module(2), data_module(3), data_module(4)]

    global data_transformed
    data_transformed = take(data, rate = 60)

    global data_transformed_training
    data_transformed_training = map( lambda x: data_transformed[x], filter( lambda x: uniform(0, 1) > 0.3, xrange(len(data_transformed)) ))

    #Learning process-----------------------------------------------------------------

    global net, samples, trainer
    net = FeedForwardNetwork()
    inLayer = LinearLayer(3)
    hiddenLayer0 = SigmoidLayer(1)
    hiddenLayer1 = SigmoidLayer(3)
    outLayer = LinearLayer(1)

    net.addInputModule(inLayer)
#    net.addModule(hiddenLayer0)
#    net.addModule(hiddenLayer1)
    net.addOutputModule(outLayer)

#    net.addConnection(FullConnection(inLayer, hiddenLayer0))
    net.addConnection(FullConnection(inLayer, outLayer))
#    net.addConnection(FullConnection(hiddenLayer0, outLayer))
#    net.addConnection(FullConnection(hiddenLayer0, hiddenLayer1))
#    net.addConnection(FullConnection(hiddenLayer1, outLayer))
    net.sortModules()
    print net
    ##Net with 3 inputs, 8 hidden neurons in a layerand 8 in another, and 1 out.
    #net = buildNetwork(3,8,8,1)
    ##Set with 2 inputs and one output for each sample
    samples = SupervisedDataSet(3,1)

    for i in data_transformed_training:
        samples.addSample(i['past'], i['next'] - i['average'])
    trainer = BackpropTrainer(net, samples)

    print 'Training'
    trainer.trainUntilConvergence(maxEpochs= 10)

    print 'Comparing'
    compare_net_samples(net, data_transformed)
    print "Number of samples %d for training." %len(data_transformed_training)
    def initializeNetwork(self):        
        self.net = buildNetwork(26, 15, 5, hiddenclass=TanhLayer, outclass=SoftmaxLayer) # 15 is just a mean
        ds = ClassificationDataSet(26, nb_classes=5)
        
        for x in self.train:
            ds.addSample(x.frequency, self.encodingDict[x.lang])
        ds._convertToOneOfMany()

        trainer = BackpropTrainer(self.net, dataset=ds, weightdecay=0.01, momentum=0.1, verbose=True)
        trainer.trainUntilConvergence(maxEpochs=100)
Beispiel #5
0
def training(d):
    """
    Builds a network and trains it.
    """
    n = buildNetwork(d.indim, INPUTS-3,INPUTS-4, d.outdim,recurrent=True)
    print n;
    t = BackpropTrainer(n, d, learningrate = 0.02, momentum = 0.88)
    #for epoch in range(0,700):
    t.trainUntilConvergence(d, 1190)
    
    return t
    def train(self, **kwargs):

        if "verbose" in kwargs:
            verbose = kwargs["verbose"]
        else:
            verbose = False

        """t = BackpropTrainer(self.rnn, dataset=self.trndata, learningrate = 0.1, momentum = 0.0, verbose = True)
        for i in range(1000):
            t.trainEpochs(5)

        """
       # pdb.set_trace()
        #print self.nn.outdim, " nn | ", self.trndata.outdim, " trndata "
        trainer = BackpropTrainer(self.nn, self.trndata, learningrate = 0.0005, momentum = 0.99)
        assert (self.tstdata is not None)
        assert (self.trndata is not None)
        b1, b2 = trainer.trainUntilConvergence(verbose=verbose,
                              trainingData=self.trndata,
                              validationData=self.tstdata,
                              maxEpochs=10)
        #print b1, b2
        #print "new parameters are: "
        #self.print_connections()

        return b1, b2
 def train(training_data):
     training_set = ClassificationDataSet(len(feats), nb_classes=len(classes))
     for inst in training_data:
         training_set.appendLinked(inst.features(), [inst.class_idx()])
     training_set._convertToOneOfMany([0, 1])
     net_placeholder[0] = buildNetwork(
         training_set.indim,
         int((training_set.indim + training_set.outdim)/2),
         training_set.outdim, bias=True,
         hiddenclass=TanhLayer,
         outclass=SoftmaxLayer
     )
     trainer = BackpropTrainer(
         net_placeholder[0], training_set, momentum=0.75, verbose=False, learningrate=0.05
     )
     trainer.trainUntilConvergence(maxEpochs=100, validationProportion=0.1)
 def build_net(self):
     if os.path.exists(self.NET_FILE):
         return NetworkReader.readFrom(self.NET_FILE)
     ds = ClassificationDataSet(len(feats), nb_classes=len(classes))
     for c in classes:
         print c
         with codecs.open(os.path.join(self.data_root, c+".txt"), 'r', 'utf8') as f:
             for line in f:
                 r = Record("11", line, c, "")
                 ds.appendLinked(r.features(), [r.class_idx()])
     ds._convertToOneOfMany([0, 1])
     net = buildNetwork(ds.indim, int((ds.indim + ds.outdim)/2), ds.outdim, bias=True, hiddenclass=TanhLayer, outclass=SoftmaxLayer)
     trainer = BackpropTrainer(net, ds, momentum=0.75, verbose=True)
     trainer.trainUntilConvergence(maxEpochs=300)
     NetworkWriter.writeToFile(net, self.NET_FILE)
     return net
Beispiel #9
0
 def learn_until_convergence(self, learning_rate, momentum, max_epochs, continue_epochs, verbose=True):
     if verbose:
         print "Training neural network..."
     trainer = BackpropTrainer(self.network, self.learn_data, learningrate=learning_rate, momentum=momentum)
     training_errors, validation_errors = trainer.trainUntilConvergence(continueEpochs=continue_epochs,
                                                                        maxEpochs=max_epochs)
     self.x = range(1, len(training_errors) + 1)
     self.err = training_errors
     return self.network
def create_network(timesteps):
    trndata, validdata, tstdata = read_data_MNIST(timesteps)
    rnn = buildNetwork(
        trndata.indim, 20, trndata.outdim, hiddenclass=LSTMLayer, outclass=SoftmaxLayer, outputbias=True, recurrent=True
    )
    # 20 is the number of LSTM blocks in the hidden layer
    # we use the BPTT algo to train

    trainer = BackpropTrainer(rnn, dataset=trndata, verbose=True, momentum=0.9, learningrate=0.00001)
    print "Training started ..."
    t1 = time.clock()
    # trainer.trainEpochs(10)
    trainer.trainUntilConvergence(maxEpochs=1000)
    t2 = time.clock()
    print "Training 1000 epochs took :  ", (t2 - t1) / 60.0, "minutes "
    # train for 1000 epochs
    trnresult = 100.0 * (1.0 - testOnSequenceData(rnn, trndata))
    tstresult = 100.0 * (1.0 - testOnSequenceData(rnn, tstdata))
    print "Train Error : %5.2f%%" % trnresult, " , test error :%5.2f%%" % tstresult
Beispiel #11
0
def neuralNetworkRegression(X,Y):
    print ("NEURAL NETWORK REGRESSION")
    print ("Executing...")

    X_train, X_test, Y_train, Y_test = cross_validation.train_test_split(X, Y, test_size = 0.10, random_state = 5)
    Y_test = Y_test.reshape(-1,1)
    Y_train = Y_train.reshape(-1,1)
    RMSEerror = []

    train = np.vstack((X_train, X_test))  # append both testing and training into one array
    outputTrain = np.vstack((Y_train, Y_test))
    outputTrain = outputTrain.reshape( -1, 1 )

    inputSize = train.shape[1]
    targetSize = outputTrain.shape[1]

    ds = SupervisedDataSet(inputSize, targetSize)
    ds.setField('input', train)
    ds.setField('target', outputTrain)

    hiddenSize = 100
    epochs = 100  # got after parameter tuning

    # neural network training model
    net = buildNetwork( inputSize, hiddenSize, targetSize, bias = True )
    trainer = BackpropTrainer(net, ds)

    # uncomment out to plot epoch vs rmse
    # takes time to execute as gets best epoch value
    # getting the best value of epochs

    print ("training for {} epochs...".format( epochs ))
    '''
    for i in range(epochs):
        print (i)
        mse = trainer.train()
        rmse = mse ** 0.5
        RMSEerror.append(rmse)

    plt.plot(range(epochs), RMSEerror)
    plt.xlabel("Epochs")
    plt.ylabel("RMSE")
    plt.title("RMSE vs Epochs")
    plt.savefig("../Graphs/Network/Question 2c/RMSE vs Epochs.png")

    plt.show()
    '''
    print ("Model training in process...")
    train_mse, validation_mse = trainer.trainUntilConvergence(verbose = True, validationProportion = 0.15, maxEpochs = epochs, continueEpochs = 10)
    p = net.activateOnDataset(ds)
    
    mse = mean_squared_error(outputTrain, p)
    rmse = mse ** 0.5

    print ("Root Mean Squared Error for Best Parameters : " + str(rmse))
def main(args=[__file__]):
    trnDs, tstDs = getSeparateDataSets()
    net = buildNetwork(trnDs.indim, int((trnDs.indim + trnDs.outdim)/2), trnDs.outdim, bias=True, hiddenclass=TanhLayer, outclass=SoftmaxLayer)
    trainer = BackpropTrainer(net, trnDs, momentum=0.75, verbose=True, learningrate=0.05)
    trainer.trainUntilConvergence(maxEpochs=100, validationProportion=0.1)
    eval = evaluate(net, tstDs)
    print "accuracy:", eval.getWeightedAccuracy()
    print "recall:", eval.getWeightedRecall()
    print "precision:", eval.getWeightedPrecision()
    print "F-measure:", eval.getWeightedFMeasure()

    if detailed:
        for evalRes in eval.evals:
            print "Class:", evalRes.clazz
            print "Accuracy:", evalRes.getAccuracy()
            print "Recall:", evalRes.getRecall()
            print "Precision:", evalRes.getPrecision()
            print "F-measure:", evalRes.getFMeasure()
            print '-'*35

    print '-'*70
Beispiel #13
0
    def train(self):
        """t = BackpropTrainer(self.rnn, dataset=self.trndata, learningrate = 0.1, momentum = 0.0, verbose = True)
        for i in range(1000):
            t.trainEpochs(5)

        """
        print self.nn.outdim, " nn | ", self.trndata.outdim, " trndata "
        trainer = BackpropTrainer(self.nn, self.trndata, learningrate = 0.0005, momentum = 0.99)
        b1, b2 = trainer.trainUntilConvergence(verbose=True,
                              trainingData=self.trndata,
                              validationData=self.tstdata,
                              maxEpochs=10)
        print b1, b2
        print "new parameters are: "
        self.print_connections()
def get_third_nn(value, good_data, bad_data):
    build_network = FeedForwardNetwork()
    inLayer = LinearLayer(len(good_data[0]))
    hiddenLayer = SigmoidLayer(value)
    outLayer = SigmoidLayer(1)

    build_network.addInputModule(inLayer)
    build_network.addModule(hiddenLayer)
    build_network.addOutputModule(outLayer)

    in_to_hidden = FullConnection(inLayer, hiddenLayer)
    hidden_to_out = FullConnection(hiddenLayer, outLayer)
    in_to_out = FullConnection(inLayer, outLayer)

    build_network.addConnection(in_to_hidden)
    build_network.addConnection(hidden_to_out)
    build_network.addConnection(in_to_out)

    build_network.sortModules()
    trainer = BackpropTrainer(build_network,
                              get_supervised_data_set(good_data, bad_data))

    result = trainer.trainUntilConvergence()
    return result[0][-1]
 def trainNetworkBackprop(self, dataset, maxIter):
     trainer = BackpropTrainer(self.net, dataset)
     print "\tInitialised backpropogation traininer.  Now execute until convergence::"
     trainer.trainUntilConvergence(verbose=True, maxEpochs=maxIter)
     print "\tConvergence achieved."
Beispiel #16
0
class RWR(DirectSearchLearner):
    """ Reward-weighted regression.

    The algorithm is currently limited to discrete-action episodic tasks, subclasses of POMDPTasks.
    """

    # parameters
    batchSize = 20

    # feedback settings
    verbose = True
    greedyRuns = 20
    supervisedPlotting = False

    # settings for the supervised training
    learningRate = 0.005
    momentum = 0.9
    maxEpochs = 20
    validationProportion = 0.33
    continueEpochs = 2

    # parameters for the variation that uses a value function
    # TODO: split into 2 classes.
    valueLearningRate = None
    valueMomentum = None
    #valueTrainEpochs = 5
    resetAllWeights = False
    netweights = 0.01

    def __init__(self, net, task, valueNetwork=None, **args):
        self.net = net
        self.task = task
        self.setArgs(**args)
        if self.valueLearningRate == None:
            self.valueLearningRate = self.learningRate
        if self.valueMomentum == None:
            self.valueMomentum = self.momentum
        if self.supervisedPlotting:
            from pylab import ion
            ion()

        # adaptive temperature:
        self.tau = 1.

        # prepare the datasets to be used
        self.weightedDs = ImportanceDataSet(self.task.outdim, self.task.indim)
        self.rawDs = ReinforcementDataSet(self.task.outdim, self.task.indim)
        self.valueDs = SequentialDataSet(self.task.outdim, 1)

        # prepare the supervised trainers
        self.bp = BackpropTrainer(self.net,
                                  self.weightedDs,
                                  self.learningRate,
                                  self.momentum,
                                  verbose=False,
                                  batchlearning=True)

        # CHECKME: outsource
        self.vnet = valueNetwork
        if valueNetwork != None:
            self.vbp = BackpropTrainer(self.vnet,
                                       self.valueDs,
                                       self.valueLearningRate,
                                       self.valueMomentum,
                                       verbose=self.verbose)

        # keep information:
        self.totalSteps = 0
        self.totalEpisodes = 0

    def shapingFunction(self, R):
        return exp(self.tau * R)

    def updateTau(self, R, U):
        self.tau = sum(U) / dot((R - self.task.minReward), U)

    def reset(self):
        self.weightedDs.clear()
        self.valueDs.clear()
        self.rawDs.clear()
        self.bp.momentumvector *= 0.0
        if self.vnet != None:
            self.vbp.momentumvector *= 0.0
            if self.resetAllWeights:
                self.vnet.params[:] = randn(len(
                    self.vnet.params)) * self.netweights

    def greedyEpisode(self):
        """ run one episode with greedy decisions, return the list of rewards recieved."""
        rewards = []
        self.task.reset()
        self.net.reset()
        while not self.task.isFinished():
            obs = self.task.getObservation()
            act = self.net.activate(obs)
            chosen = argmax(act)
            self.task.performAction(chosen)
            reward = self.task.getReward()
            rewards.append(reward)
        return rewards

    def learn(self, batches):
        self.greedyAvg = []
        self.rewardAvg = []
        self.lengthAvg = []
        self.initr0Avg = []
        for b in range(batches):
            if self.verbose:
                print()
                print(('Batch', b + 1))
            self.reset()
            self.learnOneBatch()
            self.totalEpisodes += self.batchSize

            # greedy measure (avg over some greedy runs)
            rws = 0.
            for dummy in range(self.greedyRuns):
                tmp = self.greedyEpisode()
                rws += (sum(tmp) / float(len(tmp)))
            self.greedyAvg.append(rws / self.greedyRuns)
            if self.verbose:
                print(('::', round(rws / self.greedyRuns, 5), '::'))

    def learnOneBatch(self):
        # collect a batch of runs as experience
        r0s = []
        lens = []
        avgReward = 0.
        for dummy in range(self.batchSize):
            self.rawDs.newSequence()
            self.valueDs.newSequence()
            self.task.reset()
            self.net.reset()
            acts, obss, rewards = [], [], []
            while not self.task.isFinished():
                obs = self.task.getObservation()
                act = self.net.activate(obs)
                chosen = drawIndex(act)
                self.task.performAction(chosen)
                reward = self.task.getReward()
                obss.append(obs)
                y = zeros(len(act))
                y[chosen] = 1
                acts.append(y)
                rewards.append(reward)
            avgReward += sum(rewards) / float(len(rewards))

            # compute the returns from the list of rewards
            current = 0
            returns = []
            for r in reversed(rewards):
                current *= self.task.discount
                current += r
                returns.append(current)
            returns.reverse()
            for i in range(len(obss)):
                self.rawDs.addSample(obss[i], acts[i], returns[i])
                self.valueDs.addSample(obss[i], returns[i])
            r0s.append(returns[0])
            lens.append(len(returns))

        r0s = array(r0s)
        self.totalSteps += sum(lens)
        avgLen = sum(lens) / float(self.batchSize)
        avgR0 = mean(r0s)
        avgReward /= self.batchSize
        if self.verbose:
            print((
                '***',
                round(avgLen, 3),
                '***',
                '(avg init exp. return:',
                round(avgR0, 5),
                ')',
            ))
            print(('avg reward', round(avgReward,
                                       5), '(tau:', round(self.tau, 3), ')'))
            print(lens)
        # storage:
        self.rewardAvg.append(avgReward)
        self.lengthAvg.append(avgLen)
        self.initr0Avg.append(avgR0)

        #        if self.vnet == None:
        #            # case 1: no value estimator:

        # prepare the dataset for training the acting network
        shaped = self.shapingFunction(r0s)
        self.updateTau(r0s, shaped)
        shaped /= max(shaped)
        for i, seq in enumerate(self.rawDs):
            self.weightedDs.newSequence()
            for sample in seq:
                obs, act, dummy = sample
                self.weightedDs.addSample(obs, act, shaped[i])

#        else:
#            # case 2: value estimator:
#
#
#            # train the value estimating network
#            if self.verbose: print('Old value error:  ', self.vbp.testOnData())
#            self.vbp.trainEpochs(self.valueTrainEpochs)
#            if self.verbose: print('New value error:  ', self.vbp.testOnData())
#
#            # produce the values and analyze
#            rminusvs = []
#            sizes = []
#            for i, seq in enumerate(self.valueDs):
#                self.vnet.reset()
#                seq = list(seq)
#                for sample in seq:
#                    obs, ret = sample
#                    val = self.vnet.activate(obs)
#                    rminusvs.append(ret-val)
#                    sizes.append(len(seq))
#
#            rminusvs = array(rminusvs)
#            shapedRminusv = self.shapingFunction(rminusvs)
#            # CHECKME: here?
#            self.updateTau(rminusvs, shapedRminusv)
#            shapedRminusv /= array(sizes)
#            shapedRminusv /= max(shapedRminusv)
#
#            # prepare the dataset for training the acting network
#            rvindex = 0
#            for i, seq in enumerate(self.rawDs):
#                self.weightedDs.newSequence()
#                self.vnet.reset()
#                for sample in seq:
#                    obs, act, ret = sample
#                    self.weightedDs.addSample(obs, act, shapedRminusv[rvindex])
#                    rvindex += 1

# train the acting network
        tmp1, tmp2 = self.bp.trainUntilConvergence(
            maxEpochs=self.maxEpochs,
            validationProportion=self.validationProportion,
            continueEpochs=self.continueEpochs,
            verbose=self.verbose)
        if self.supervisedPlotting:
            from pylab import plot, legend, figure, clf, draw
            figure(1)
            clf()
            plot(tmp1, label='train')
            plot(tmp2, label='valid')
            legend()
            draw()

        return avgLen, avgR0
Beispiel #17
0
def neuralNetworkRegression(X, Y, X_TEST, Y_TEST):
    """
    :param X: data consisting of features (excluding class variable)
    :param Y: column vector consisting of class variable
    :return: models neural network regression with fine-tuning of epochs
    """
    print "NEURAL NETWORK REGRESSION"
    print "Executing..."
    print

    try:
        print "Loading saved model..."
        net = pickle.load(open("Models/neural.sav", 'rb'))
        """ predict new value """
        prediction = net.activate(X_TEST)
        print "Predicted: ",prediction," True: ", Y_TEST#, "Error: ",np.sqrt(mean_squared_error(map(float,Y_test), ridge.predict(X_test)))
        return prediction
    except:

        # can change to model on the entire dataset but by convention splitting the dataset is a better option
        X_train, X_test, Y_train, Y_test = cross_validation.train_test_split(X, Y, test_size = 0.10, random_state = 5)
        Y_test = Y_test.reshape(-1,1)
        Y_train = Y_train.reshape(-1,1)
        RMSEerror = []

        train = np.vstack((X_train, X_test))  # append both testing and training into one array
        outputTrain = np.vstack((Y_train, Y_test))
        outputTrain = [float(s.item()) for s in outputTrain]
        outputTrain = np.asarray(outputTrain, dtype=np.float64)
        # print outputTrain
        outputTrain = outputTrain.reshape( -1, 1 )

        inputSize = train.shape[1]
        targetSize = outputTrain.shape[1]

        ds = SupervisedDataSet(inputSize, targetSize)
        ds.setField('input', train)
        ds.setField('target', outputTrain)

        hiddenSize = 3
        epochs = 10000  # got after parameter tuning

        # neural network training model
        net = buildNetwork( inputSize, hiddenSize, targetSize, hiddenclass=TanhLayer, bias = True )
        trainer = BackpropTrainer(net, ds, learningrate=0.1)

        print "Model training in process..."
        train_mse, validation_mse = trainer.trainUntilConvergence(verbose = True, validationProportion = 0.15, maxEpochs = epochs, continueEpochs = 10)
        p = net.activateOnDataset(ds)

        mse = mean_squared_error(map(float, outputTrain), map(float, p))
        rmse = mse ** 0.5

        print "Root Mean Squared Error for Best Parameters : " + str(rmse)
        
        """ save model """
        # pickle.dump(net, open("Models/neural.sav", 'wb'))

        """ predict new value """
        prediction = net.activate(X_TEST)
        print "Predicted: ",prediction," True: ", Y_TEST#, "Error: ",np.sqrt(mean_squared_error(map(float,Y_test), ridge.predict(X_test)))
        return prediction
Beispiel #18
0
class RWR(DirectSearchLearner):
    """ Reward-weighted regression.
    
    The algorithm is currently limited to discrete-action episodic tasks, subclasses of POMDPTasks.
    """
    
    # parameters
    batchSize = 20
    
    # feedback settings
    verbose = True
    greedyRuns = 20
    supervisedPlotting = False
    
    # settings for the supervised training
    learningRate = 0.005
    momentum = 0.9
    maxEpochs = 20
    validationProportion = 0.33
    continueEpochs = 2
    
    # parameters for the variation that uses a value function
    # TODO: split into 2 classes.
    valueLearningRate = None
    valueMomentum = None
    #valueTrainEpochs = 5
    resetAllWeights = False
    netweights = 0.01
    
    def __init__(self, net, task, valueNetwork=None, **args):
        self.net = net
        self.task = task
        self.setArgs(**args)
        if self.valueLearningRate == None:
            self.valueLearningRate = self.learningRate
        if self.valueMomentum == None:
            self.valueMomentum = self.momentum        
        if self.supervisedPlotting:
            from pylab import ion
            ion() 
        
        # adaptive temperature:
        self.tau = 1.
        
        # prepare the datasets to be used
        self.weightedDs = ImportanceDataSet(self.task.outdim, self.task.indim)
        self.rawDs = ReinforcementDataSet(self.task.outdim, self.task.indim)
        self.valueDs = SequentialDataSet(self.task.outdim, 1)
        
        # prepare the supervised trainers
        self.bp = BackpropTrainer(self.net, self.weightedDs, self.learningRate,
                                  self.momentum, verbose=False,
                                  batchlearning=True)            
        
        # CHECKME: outsource
        self.vnet = valueNetwork
        if valueNetwork != None:
            self.vbp = BackpropTrainer(self.vnet, self.valueDs, self.valueLearningRate,
                                       self.valueMomentum, verbose=self.verbose)
            
        # keep information:
        self.totalSteps = 0
        self.totalEpisodes = 0
            
    def shapingFunction(self, R):
        return exp(self.tau * R)        
    
    def updateTau(self, R, U):
        self.tau = sum(U) / dot((R - self.task.minReward), U)
        
    def reset(self):
        self.weightedDs.clear()
        self.valueDs.clear()
        self.rawDs.clear()
        self.bp.momentumvector *= 0.0
        if self.vnet != None:
            self.vbp.momentumvector *= 0.0
            if self.resetAllWeights:
                self.vnet.params[:] = randn(len(self.vnet.params)) * self.netweights            
            
    def greedyEpisode(self):
        """ run one episode with greedy decisions, return the list of rewards recieved."""
        rewards = []
        self.task.reset()
        self.net.reset()
        while not self.task.isFinished():
            obs = self.task.getObservation()
            act = self.net.activate(obs)
            chosen = argmax(act)
            self.task.performAction(chosen)
            reward = self.task.getReward()
            rewards.append(reward)
        return rewards
            
    def learn(self, batches):
        self.greedyAvg = []
        self.rewardAvg = []
        self.lengthAvg = []
        self.initr0Avg = []
        for b in range(batches):
            if self.verbose:
                print
                print 'Batch', b + 1
            self.reset()
            self.learnOneBatch()
            self.totalEpisodes += self.batchSize
            
            # greedy measure (avg over some greedy runs)
            rws = 0.
            for dummy in range(self.greedyRuns):
                tmp = self.greedyEpisode()
                rws += (sum(tmp) / float(len(tmp)))
            self.greedyAvg.append(rws / self.greedyRuns)
            if self.verbose:
                print '::', round(rws / self.greedyRuns, 5), '::'
            
    def learnOneBatch(self):
        # collect a batch of runs as experience
        r0s = []
        lens = []
        avgReward = 0.
        for dummy in range(self.batchSize):
            self.rawDs.newSequence()
            self.valueDs.newSequence()
            self.task.reset()
            self.net.reset()
            acts, obss, rewards = [], [], []
            while not self.task.isFinished():
                obs = self.task.getObservation()
                act = self.net.activate(obs)
                chosen = drawIndex(act)
                self.task.performAction(chosen)
                reward = self.task.getReward()
                obss.append(obs)
                y = zeros(len(act))
                y[chosen] = 1
                acts.append(y)
                rewards.append(reward)
            avgReward += sum(rewards) / float(len(rewards))
            
            # compute the returns from the list of rewards
            current = 0        
            returns = []
            for r in reversed(rewards):
                current *= self.task.discount
                current += r
                returns.append(current)
            returns.reverse()
            for i in range(len(obss)):
                self.rawDs.addSample(obss[i], acts[i], returns[i])
                self.valueDs.addSample(obss[i], returns[i])
            r0s.append(returns[0])
            lens.append(len(returns))
            
        r0s = array(r0s)  
        self.totalSteps += sum(lens)
        avgLen = sum(lens) / float(self.batchSize)
        avgR0 = mean(r0s)
        avgReward /= self.batchSize
        if self.verbose:
            print '***', round(avgLen, 3), '***', '(avg init exp. return:', round(avgR0, 5), ')',
            print 'avg reward', round(avgReward, 5), '(tau:', round(self.tau, 3), ')'
            print lens        
        # storage:
        self.rewardAvg.append(avgReward)
        self.lengthAvg.append(avgLen)
        self.initr0Avg.append(avgR0)
        
        
#        if self.vnet == None:
#            # case 1: no value estimator:
            
        # prepare the dataset for training the acting network  
        shaped = self.shapingFunction(r0s)
        self.updateTau(r0s, shaped)
        shaped /= max(shaped)
        for i, seq in enumerate(self.rawDs):
            self.weightedDs.newSequence()
            for sample in seq:
                obs, act, dummy = sample
                self.weightedDs.addSample(obs, act, shaped[i])
                    
#        else:
#            # case 2: value estimator:
#            
#            
#            # train the value estimating network
#            if self.verbose: print 'Old value error:  ', self.vbp.testOnData()
#            self.vbp.trainEpochs(self.valueTrainEpochs)
#            if self.verbose: print 'New value error:  ', self.vbp.testOnData()
#            
#            # produce the values and analyze
#            rminusvs = []
#            sizes = []
#            for i, seq in enumerate(self.valueDs):
#                self.vnet.reset()
#                seq = list(seq)
#                for sample in seq:
#                    obs, ret = sample
#                    val = self.vnet.activate(obs)
#                    rminusvs.append(ret-val)
#                    sizes.append(len(seq))
#                    
#            rminusvs = array(rminusvs)
#            shapedRminusv = self.shapingFunction(rminusvs)
#            # CHECKME: here?
#            self.updateTau(rminusvs, shapedRminusv)
#            shapedRminusv /= array(sizes)
#            shapedRminusv /= max(shapedRminusv)
#            
#            # prepare the dataset for training the acting network    
#            rvindex = 0
#            for i, seq in enumerate(self.rawDs):
#                self.weightedDs.newSequence()
#                self.vnet.reset()
#                for sample in seq:
#                    obs, act, ret = sample
#                    self.weightedDs.addSample(obs, act, shapedRminusv[rvindex])
#                    rvindex += 1
                    
        # train the acting network                
        tmp1, tmp2 = self.bp.trainUntilConvergence(maxEpochs=self.maxEpochs,
                                                   validationProportion=self.validationProportion,
                                                   continueEpochs=self.continueEpochs,
                                                   verbose=self.verbose)
        if self.supervisedPlotting:
            from pylab import plot, legend, figure, clf, draw
            figure(1)
            clf()
            plot(tmp1, label='train')
            plot(tmp2, label='valid')
            legend()
            draw()  
            
        return avgLen, avgR0                        
Beispiel #19
0
def main(f_samples):
    f_reading = open(f_samples, 'r')
    global data
    data = []

    for line in f_reading:
        line = line.split()
        data.append( (float(line[0]), float(line[-1])) )

    #function
    data_module = lambda x: map( lambda z: data[z], filter( lambda y: y% 5 == x, xrange(len(data)) ) )

    global data1
    data1 = [data_module(0), data_module(1), data_module(2), data_module(3), data_module(4)]

    global data_transformed
    data_transformed = take(data, rate = 60)

    global data_transformed_training
    data_transformed_training = map( lambda x: data_transformed[x], filter( lambda x: uniform(0, 1) > 0.3, xrange(len(data_transformed)) ))

    #Learning process-----------------------------------------------------------------

    global net, samples, trainer
    net = FeedForwardNetwork()
    inLayer = LinearLayer(3)
    hiddenLayer0 = SigmoidLayer(1)
    hiddenLayer1 = SigmoidLayer(3)
    outLayer = LinearLayer(1)

    net.addInputModule(inLayer)
#    net.addModule(hiddenLayer0)
#    net.addModule(hiddenLayer1)
    net.addOutputModule(outLayer)

#    net.addConnection(FullConnection(inLayer, hiddenLayer0))
    net.addConnection(FullConnection(inLayer, outLayer))
#    net.addConnection(FullConnection(hiddenLayer0, outLayer))
#    net.addConnection(FullConnection(hiddenLayer0, hiddenLayer1))
#    net.addConnection(FullConnection(hiddenLayer1, outLayer))
    net.sortModules()
    print net
    ##Net with 3 inputs, 8 hidden neurons in a layer and 8 in another, and 1 out.
    #net = buildNetwork(3,8,8,1)
    ##Set with 2 inputs and one output for each sample
    samples = SupervisedDataSet(3,1)

    for i in data_transformed_training:
        samples.addSample(i['past'], i['next'] - i['average'])
    trainer = BackpropTrainer(net, samples)

    print 'Training'
    trainer.trainUntilConvergence(maxEpochs= 10)

    #Comparing step-------------------------------------------------------------------

    print 'Naive1'
    aux = map(lambda y: y['past'], data_transformed)
    aux2 = map(lambda y: y['next']-y['average'], data_transformed)
    compare_forecast_samples(Forecaster(predict_function = lambda x: aux2[aux.index(x)-1]), data_transformed)

    print 'Network'
    compare_forecast_samples(Forecaster(predict_function = net.activate), data_transformed)
    print "Number of samples %d for training." %len(data_transformed_training)
Beispiel #20
0
            leg = plt.legend([dot1, dot2], ['0','1'])
        ax = plt.gca().add_artist(leg)
        plt.show()
        """
    elif sys.argv[1] == 'nn':
        DS = ClassificationDataSet(165)
        training_data = np.array(training_data)
        training_target = np.vstack(np.array(training_target))
        print len(training_data[0])
        #print len(training_target[0])
        assert(training_data.shape[0] == training_target.shape[0])
        DS.setField('input', training_data)
        DS.setField('target', training_target)
        tstdata, trndata = DS.splitWithProportion(0.15)
        hidden_layer_neurons = (DS.indim+DS.outdim)/2
        rnn = buildNetwork(DS.indim,hidden_layer_neurons,DS.outdim,hiddenclass=LSTMLayer,outclass=SigmoidLayer,outputbias=False,recurrent=True)
        #print hidden_layer_neurons
        # define a training method
        trainer = BackpropTrainer(rnn,dataset=trndata, verbose=True) 
        trainer.trainUntilConvergence(verbose = True, validationProportion = 0.3, maxEpochs = 1000, continueEpochs = 10)

        print 'Percent Error on Test dataset: ' , percentError(trainer.testOnClassData(tstdata, verbose=True), tstdata['target'] )
        print 'Percent Error on Test dataset: ' , percentError(trainer.testOnClassData(tstdata, verbose=True), tstdata['target'] )
        #print 'Percent Error on Training dataset: ' , percentError(trainer.testOnClassData(trndata), trndata['target'] )
    else:
        print("Current classifier algorithms available: svm, knn, dt, kmeans, nn")
        sys.exit(1)

if running_tests:
    print "\nMax Accuracy: " + str(max(accuracy_arr))
Beispiel #21
0
	outData = tuple([outputData])

	trainingDataset.addSample(inData, outData)
f_Training.close()

network = buildNetwork (trainingDataset.indim,
		12,
		trainingDataset.outdim,
		bias = True,
		hiddenclass = SigmoidLayer,
		outclass = SigmoidLayer)

trainer = BackpropTrainer (network, trainingDataset, learningrate = 0.01, momentum = 0.9, verbose = True, weightdecay = 0.0)
training = trainer.trainUntilConvergence(dataset = trainingDataset,
		maxEpochs = 25,
		continueEpochs = 10,
		verbose = True,
		validationProportion = 0.2)

training_Time = time.time() - trainingTime
print("Training time: %s seconds" % (training_Time))

f_Testing = open('ISCXTesting.txt', 'r')
f_Output = open('ISCXTesting.dat.out', 'w')

testingTime = time.time()

line = f_Testing.readline()
for line in f_Testing.xreadlines():
	allData = line.strip().split(' ')
    synergy_dict = read_synergy_data(synergy)
    # dump_drug_dict_as_flat(pca_dict, out)
    training_input, input_len = build_training_input(pca_dict, synergy_dict)
    # input_len = training_input[list(training_input.keys())[0]]['INPUT']
    target_len = 1
    ds = SupervisedDataSet(input_len, target_len)
    for t1 in training_input:
        for t2 in training_input[t1]:
            print("Input Vector", training_input[t1][t2]['INPUT'],
                  training_input[t1][t2]['OUTPUT'])
            ds.addSample(training_input[t1][t2]['INPUT'],
                         training_input[t1][t2]['OUTPUT'])

    n = buildNetwork(ds.indim, 3, ds.outdim, bias=True)
    t = BackpropTrainer(n, learningrate=0.001, momentum=0.05, verbose=True)
    print("Training")
    t.trainUntilConvergence(ds, verbose=True)
    NetworkWriter.writeToFile(n, 'trainedNetwork.xml')

    # n = NetworkReader.readFrom('trainedNetwork_2.xml')

    predictions = {}
    for d1 in pca_dict:
        if not predictions.get(d1, None):
            predictions[d1] = {}
        for d2 in pca_dict:
            predictions[d1][d2] = n.activate(pca_dict[d1] + pca_dict[d2])[0]

    with open('predictions_4.json', 'w') as outfile:
        json.dump(predictions, outfile)
Beispiel #23
0
def train(net, data_set):
    trainer = BackpropTrainer(net, data_set, learningrate=0.04, momentum=0.5, verbose=True)
    trainer.trainUntilConvergence(dataset=data_set, maxEpochs=5000)
Beispiel #24
0
def show_weights(net):
    for mod in net.modules:
        for conn in net.connections[mod]:
            print(conn)
            for cc in range(len(conn.params)):
                print(conn.whichBuffers(cc), conn.params)
                print('\n')


#show_weights(net)

trainer = BackpropTrainer(net,
                          dataset=train_data,
                          learningrate=0.01,
                          momentum=0.1)
trainer.trainUntilConvergence(maxEpochs=1000)

#show_weights(net)

out_test = net.activateOnDataset(test_data).argmax(axis=1)
print("Erro de teste: %f" % percentError(out_test, test_data['target'][:, 0]))

out_val = net.activateOnDataset(val_data).argmax(axis=1)
print("Erro de validadação: %f" %
      percentError(out_val, val_data['target'][:, 0]))

print("-------------------------------------------\nTeste:")
print("saída da rede:\t", out_test)
print("correto      :\t", test_data['target'][:, 0])
'''
from datetime import datetime, timedelta
Beispiel #25
0
        trainIn = []
        for x in row[:numberOfInputs]:
            trainIn.append(x)

        trainOut = []
        for x in row[numberOfInputs:]:
            trainOut.append(x)

        d.appendLinked(trainIn, trainOut)

    # build a neural network with the second parameter being the number of hidden layers
    n = buildNetwork(d.indim, 3, d.outdim, recurrent=True)

    # configure the trainer
    t = BackpropTrainer(n, learningrate=0.01, momentum=0.99, verbose=True)

    # split the data randomly into 75% training - 25% testing
    train, test = d.splitWithProportion(0.75)
    print "{} - {}".format(len(train), len(test))

    # train the data with n number of epochs
    t.trainOnDataset(train, 10)

    # test the data with the remaining data
    t.testOnData(test, verbose=True)

    # try the same test but with a different method
    net = buildNetwork(d.indim, 3, d.outdim, bias=True, hiddenclass=TanhLayer)
    trainer = BackpropTrainer(net, d)
    trainer.trainUntilConvergence(verbose=True)
Beispiel #26
0
                      [entry['age_id']])

#particionando dataset para treino
train_data, part_data = dataset.splitWithProportion(0.7)

#particao para teste/validacao
test_data, val_data = part_data.splitWithProportion(0.5)

network = buildNetwork(dataset.indim, 4, dataset.outdim)
trainer = BackpropTrainer(network,
                          dataset=train_data,
                          learningrate=0.01,
                          momentum=0.1,
                          verbose=True)

training_errors, val_errors = trainer.trainUntilConvergence(dataset=train_data)

out = network.activateOnDataset(test_data)
for i in range(len(out)):
    print("out: %f, correct: %f" % (out[i], test_data['target'][i]))
'''
while True:
    while True:
        start = datetime.now()
        user_string = "Eu quero fotos do homem-aranha, na minha mesa, agora."
        user_input = input("Digite a frase abaixo:\n\n %s \n\nPara sair digite 'exit'." % (user_string))

        if user_string == user_input:
            tempo_corrido = datetime.now() - start
            our_out = network.activate([len(user_string), tempo_corrido.seconds])
            print("O output é: %f" % (our_out[0]))
Beispiel #27
0
train = pd.read_hdf('../input/train.h5')
train = train[col]
d_mean = train.median(axis=0)
train = []

ds = SupervisedDataSet(len(col), 1)
ds.setField('input', o.train[col].fillna(d_mean))
ds.setField('target', pd.DataFrame(o.train['y']))
nn = buildNetwork(
    len(col), 2, 1,
    hiddenclass=TanhLayer)  #hiddenclass=SigmoidLayer, outclass=LinearLayer
tr = BackpropTrainer(nn,
                     ds,
                     learningrate=0.001,
                     momentum=0.0000001,
                     verbose=True)
tr.trainUntilConvergence(maxEpochs=5,
                         continueEpochs=2,
                         verbose=True,
                         validationProportion=0.35)

while True:
    test = o.features[col].fillna(d_mean)
    pred = o.target
    pred['y'] = [float(nn.activate(row)) for row in test.values]
    o, reward, done, info = env.step(pred)
    if done:
        print("Info Result: ", info["public_score"])
        break
    if o.features.timestamp[0] % 100 == 0:
        print(reward)
def neuralNetworkRegression(X,Y):
    """
    :param X: data consisting of features (excluding class variable)
    :param Y: column vector consisting of class variable
    :return: models neural network regression with fine-tuning of epochs
    """
    print "NEURAL NETWORK REGRESSION"
    print "Executing..."
    print

    # can change to model on the entire dataset but by convention splitting the dataset is a better option
    X_train, X_test, Y_train, Y_test = cross_validation.train_test_split(X, Y, test_size = 0.10, random_state = 5)
    Y_test = Y_test.reshape(-1,1)
    Y_train = Y_train.reshape(-1,1)
    RMSEerror = []

    train = np.vstack((X_train, X_test))  # append both testing and training into one array
    outputTrain = np.vstack((Y_train, Y_test))
    outputTrain = outputTrain.reshape( -1, 1 )

    inputSize = train.shape[1]
    targetSize = outputTrain.shape[1]

    ds = SupervisedDataSet(inputSize, targetSize)
    ds.setField('input', train)
    ds.setField('target', outputTrain)

    hiddenSize = 100
    epochs = 100  # got after parameter tuning

    # neural network training model
    net = buildNetwork( inputSize, hiddenSize, targetSize, bias = True )
    trainer = BackpropTrainer(net, ds)

    # uncomment out to plot epoch vs rmse
    # takes time to execute as gets best epoch value
    # getting the best value of epochs

    """
    print "training for {} epochs...".format( epochs )

    for i in range(epochs):
        print i
        mse = trainer.train()
        rmse = mse ** 0.5
        RMSEerror.append(rmse)

    plt.plot(range(epochs), RMSEerror)
    plt.xlabel("Epochs")
    plt.ylabel("RMSE")
    plt.title("RMSE vs Epochs")
    plt.savefig("../Graphs/Network/Question 2c/RMSE vs Epochs.png")

    plt.show()
    """

    print "Model training in process..."
    train_mse, validation_mse = trainer.trainUntilConvergence(verbose = True, validationProportion = 0.15, maxEpochs = epochs, continueEpochs = 10)
    p = net.activateOnDataset(ds)

    mse = mean_squared_error(outputTrain, p)
    rmse = mse ** 0.5

    print "Root Mean Squared Error for Best Parameters : " + str(rmse)
Beispiel #29
0
import os
from pybrain.datasets import ClassificationDataSet
from pybrain.tools.shortcuts import buildNetwork
from pybrain.supervised import BackpropTrainer
from pybrain.structure.modules import SoftmaxLayer

classes = ['apple', 'orange', 'peach', 'banana']*10
input = ['ap','or','pea','bana']

data = ClassificationDataSet(len(input), 1, nb_classes=len(classes), class_labels=classes)

data._convertToOneOfMany( )                 # recommended by PyBrain

fnn = buildNetwork( data.indim, 5, data.outdim, outclass=SoftmaxLayer ) 

trainer = BackpropTrainer( fnn, dataset=data, momentum=0.99, verbose=True, weightdecay=0.01)

trainer.trainUntilConvergence(maxEpochs=80)

# stop training and start using my trained network here

output = fnn.activate(input)

class_index = max(xrange(len(output)), key=output.__getitem__)
class_name = classes[class_index]
print class_name
iris = datasets.load_iris()
X, y = iris.data, iris.target
dataset = ClassificationDataSet(4, 1, nb_classes=3)

for sample_input, sample_output in zip(X, y):
    dataset.addSample(sample_input, sample_output)

# Partitioning data for training
training_data, partitioned_data = dataset.splitWithProportion(0.6)

# Spliting data for testing and validation
testing_data, validation_data, = partitioned_data.splitWithProportion(0.5)

network = buildNetwork(dataset.indim, 2, 2, dataset.outdim)
trainer = BackpropTrainer(network,
                          dataset=training_data,
                          learningrate=0.01,
                          momentum=0.1,
                          verbose=True)

training_errors, validation_errors = trainer.trainUntilConvergence(
    dataset=training_data, maxEpochs=200)
plt.plot(training_errors, 'b', validation_errors, 'r')
plt.legend()
plt.show()

outputs = network.activateOnDataset(testing_data)
for out, target in zip(outputs, testing_data['target']):
    print(f"output: {out}, target: {target}")
    synergy_dict = read_synergy_data(synergy)
    # dump_drug_dict_as_flat(pca_dict, out)
    training_input,input_len = build_training_input(pca_dict, synergy_dict)
    # input_len = training_input[list(training_input.keys())[0]]['INPUT']
    target_len = 1
    ds = SupervisedDataSet(input_len, target_len)
    for t1 in training_input:
        for t2 in training_input[t1]:
            print("Input Vector", training_input[t1][t2]['INPUT'], training_input[t1][t2]['OUTPUT'])
            ds.addSample(training_input[t1][t2]['INPUT'], training_input[t1][t2]['OUTPUT'])


    n = buildNetwork(ds.indim, 3, ds.outdim, bias=True)
    t = BackpropTrainer(n, learningrate=0.001, momentum=0.05, verbose=True)
    print("Training")
    t.trainUntilConvergence(ds,
                            verbose=True)
    NetworkWriter.writeToFile(n, 'trainedNetwork.xml')

    # n = NetworkReader.readFrom('trainedNetwork_2.xml')

    predictions = {}
    for d1 in pca_dict:
        if not predictions.get(d1, None):
            predictions[d1]={}
        for d2 in pca_dict:
            predictions[d1][d2] = n.activate(pca_dict[d1] + pca_dict[d2])[0]

    with open('predictions_4.json', 'w') as outfile:
        json.dump(predictions, outfile)

Beispiel #32
0
def train(log_filepath, sensor_idxs, action_idxs, max_laps):
    f_log = open(log_filepath, 'rb')

    x = []
    d = []
    l = []
    n_lap = 0
    last_dist = 0

    for i,line in enumerate(f_log):
        sys.stdout.write('Processing log line '+str(i+1)+'\r')
        sys.stdout.flush()

        # if re.match('^LAP\s5.*', line):
        #     print 'lines: ',len(x)

        if re.match('^Received: \(', line):

            current_dist = parse_line(line, [distFromStart_idx])[0]
            if float(current_dist) - float(last_dist) < -1000:
                n_lap += 1
            last_dist = current_dist
            if n_lap > max_laps:
                break

            l.append(max(n_lap,1))

            x.append(parse_line(line, sensor_idxs))
        elif re.match('^Sending: \(', line):
            d.append(parse_line(line, action_idxs))

    sys.stdout.write('\n')

    x = np.array(x, dtype=float)
    d = np.array(d, dtype=float)

    assert x.shape[0] == d.shape[0], 'X and D have different number of samples!'

    n_hidden = 100
    ds = SupervisedDataSet(x.shape[1], d.shape[1])
    ds.setField('input', x)
    ds.setField('target', d)

    net = buildNetwork(x.shape[1], n_hidden, d.shape[1], bias=True, outputbias=True, hiddenclass=TanhLayer, outclass=TanhLayer)
    trainer = BackpropTrainer(net, ds, learningrate=0.01, verbose=True)
    trainer.trainUntilConvergence(validationProportion=0.15, maxEpochs=10, continueEpochs=10)
    # trainer.trainOnDataset(ds, 200, verbose=True)
    params = net.params
    w_in = net.params[:n_hidden*x.shape[1]].reshape((n_hidden,x.shape[1]))
    b_in = net.params[n_hidden*x.shape[1]:n_hidden*x.shape[1]+n_hidden]
    w_out = net.params[n_hidden*x.shape[1]+n_hidden:n_hidden*x.shape[1]+n_hidden+n_hidden*d.shape[1]].reshape((d.shape[1],n_hidden))
    b_out = net.params[n_hidden*x.shape[1]+n_hidden+n_hidden*d.shape[1]:]

    for mod in net.modules:
        if mod._name == 'out':
            continue
        params = net.connections[mod][0].params
        if mod._name == 'in':
            w_in = params.reshape(x.shape[1],n_hidden)
        if mod._name == 'hidden0':
            w_out = params
        if mod._name =='bias':
            b_in = params
        if mod._name =='outputbias':
            b_out = params
    for i in range(10):
        true_val = d[i]
        man_pred = w_out.T.dot(np.tanh(w_in.T.dot(x[i]) + b))
        aut_pred = net.activate(x[i])
 def trainNetworkBackprop(self, dataset,maxIter):
     trainer = BackpropTrainer(self.net, dataset)
     print "\tInitialised backpropogation traininer.  Now execute until convergence::"
     trainer.trainUntilConvergence(verbose=True,maxEpochs=maxIter)
     print "\tConvergence achieved."