def main():
    images, labels = load_labeled_training(flatten=True)
    images = standardize(images)
    # images, labels = load_pca_proj(K=100)
    shuffle_in_unison(images, labels)
    ds = ClassificationDataSet(images.shape[1], 1, nb_classes=7)
    for i, l in zip(images, labels):
        ds.addSample(i, [l - 1])
    # ds._convertToOneOfMany()
    test, train = ds.splitWithProportion(0.2)
    test._convertToOneOfMany()
    train._convertToOneOfMany()
    net = shortcuts.buildNetwork(train.indim, 1000, train.outdim, outclass=SoftmaxLayer)

    trainer = BackpropTrainer(net, dataset=train, momentum=0.1, learningrate=0.01, weightdecay=0.05)
    # trainer = RPropMinusTrainer(net, dataset=train)
    # cv = validation.CrossValidator(trainer, ds)
    # print cv.validate()
    net.randomize()
    tr_labels_2 = net.activateOnDataset(train).argmax(axis=1)
    trnres = percentError(tr_labels_2, train["class"])
    # trnres = percentError(trainer.testOnClassData(dataset=train), train['class'])
    testres = percentError(trainer.testOnClassData(dataset=test), test["class"])
    print "Training error: %.10f, Test error: %.10f" % (trnres, testres)
    print "Iters: %d" % trainer.totalepochs

    for i in range(100):
        trainer.trainEpochs(10)
        trnres = percentError(trainer.testOnClassData(dataset=train), train["class"])
        testres = percentError(trainer.testOnClassData(dataset=test), test["class"])
        trnmse = trainer.testOnData(dataset=train)
        testmse = trainer.testOnData(dataset=test)
        print "Iteration: %d, Training error: %.5f, Test error: %.5f" % (trainer.totalepochs, trnres, testres)
        print "Training MSE: %.5f, Test MSE: %.5f" % (trnmse, testmse)
Ejemplo n.º 2
0
def anntrain(xdata,ydata):#,epochs):
    #print len(xdata[0])
    ds=SupervisedDataSet(len(xdata[0]),1)
    #ds=ClassificationDataSet(len(xdata[0]),1, nb_classes=2)
    for i,algo in enumerate (xdata):
        ds.addSample(algo,ydata[i])
    #ds._convertToOneOfMany( ) esto no
    net= FeedForwardNetwork()
    inp=LinearLayer(len(xdata[0]))
    h1=SigmoidLayer(1)
    outp=LinearLayer(1)
    net.addOutputModule(outp) 
    net.addInputModule(inp) 
    net.addModule(h1)
    #net=buildNetwork(len(xdata[0]),1,1,hiddenclass=TanhLayer,outclass=SoftmaxLayer)
    
    net.addConnection(FullConnection(inp, h1))  
    net.addConnection(FullConnection(h1, outp))

    net.sortModules()

    trainer=BackpropTrainer(net,ds)#, verbose=True)#dataset=ds,verbose=True)
    #trainer.trainEpochs(40)
    trainer.trainOnDataset(ds,40) 
    #trainer.trainUntilConvergence(ds, 20, verbose=True, validationProportion=0.15)
    trainer.testOnData()#verbose=True)
    #print 'Final weights:',net.params
    return net
Ejemplo n.º 3
0
    def handle(self, *args, **options):
        better_thans = BetterThan.objects.all() #.filter(pk__lte=50)

        ds = SupervisedDataSet(204960, 1)
        for better_than in better_thans:
            bt = imread(better_than.better_than.image.file)
            wt = imread(better_than.worse_than.image.file)
            better_than.better_than.image.file.close()
            better_than.worse_than.image.file.close()

            bt = filters.sobel(bt)
            wt = filters.sobel(wt)

            bt_input_array = np.reshape(bt, (bt.shape[0] * bt.shape[1]))
            wt_input_array = np.reshape(wt, (wt.shape[0] * wt.shape[1]))
            input_1 = np.append(bt_input_array, wt_input_array)
            input_2 = np.append(wt_input_array, bt_input_array)
            ds.addSample(np.append(bt_input_array, wt_input_array), [-1])
            ds.addSample(np.append(wt_input_array, bt_input_array), [1])
        
        net = buildNetwork(204960, 2, 1)

        train_ds, test_ds = ds.splitWithProportion(options['train_test_split'])
        _, test_ds = ds.splitWithProportion(options['test_split'])

        trainer = BackpropTrainer(net, ds)

        avgerr = trainer.testOnData(dataset=test_ds)
        print 'untrained avgerr: {0}'.format(avgerr)

        trainer.train()

        avgerr = trainer.testOnData(dataset=test_ds)
        print 'trained avgerr: {0}'.format(avgerr)
Ejemplo n.º 4
0
    def neuralnetworktrain(self):
        dataset = self.getdata()

        # Constructing a multiple output neural network.
        # Other neural network architectures will also be experimented,
        # like using different single output neural networks.
        net = FeedForwardNetwork()
        inp = LinearLayer(9)
        h1 = SigmoidLayer(20)
        h2 = TanhLayer(10)
        outp = LinearLayer(3)

        # Adding the modules to the architecture
        net.addOutputModule(outp)
        net.addInputModule(inp)
        net.addModule(h1)
        net.addModule(h2)

        # Creating the connections
        net.addConnection(FullConnection(inp, h1))
        net.addConnection(FullConnection(h1, h2))
        net.addConnection(FullConnection(h2, outp))
        net.sortModules()

        # Training the neural network using Backpropagation
        t = BackpropTrainer(net, learningrate=0.01, momentum=0.5, verbose=True)
        t.trainOnDataset(dataset, 5)
        t.testOnData(verbose=False)

        # Saving the trained neural network information to file
        self.writetrainedinfo(net)
Ejemplo n.º 5
0
def main():
    trainingSet = buildDataSet("days", -1) #build data set. 
    net = buildNetwork(5,3,1,bias=True,hiddenclass=TanhLayer)
    trainer = BackpropTrainer(net,trainingSet,verbose=True)
    testSet = buildDataSet("hours", -6) #build another set for testing/validating
    #In my testing 4000 epochs has been enough to almost reach the lowest error without taking all day.
    #You could use trainUntilConvergence() but that takes all night and does only minimally better
    trainer.trainEpochs(4000)  
    #net.activateOnDataset(testSet)
    trainer.testOnData(testSet, verbose = True) # test on the data set.
Ejemplo n.º 6
0
def train_callback():
        trainer = BackpropTrainer(net, learningrate=0.001, lrdecay=1, momentum=0.0, verbose=True)
	print 'MSE before', trainer.testOnData(ds, verbose=True)
	epoch_count = 0
	while epoch_count < 1000:
		epoch_count += 10
		trainer.trainUntilConvergence(dataset=ds, maxEpochs=10)
		networkwriter.NetworkWriter.writeToFile(net,'autosave.network')
	print 'MSE after', trainer.testOnData(ds, verbose=True)
	print ("\n")
	print 'Total epochs:', trainer.totalepochs
def estimateNot():
    ds_not = SupervisedDataSet(1, 1)
    ds_not.addSample( (0,) , (1,))
    ds_not.addSample( (1,) , (0,))
    net = buildNetwork(1, 100, 1, bias=True)
    trainer = BackpropTrainer(net, learningrate = 0.01, momentum = 0.99)
    trainer.trainOnDataset(ds_not, 3000)
    trainer.testOnData() 
    print '\nthe prediction for NOT value:'
    print 'NOT 0  = ', net.activate((0,))
    print 'NOT 1  = ', net.activate((1,))
class NNet(object):
    def __init__(self):
        self.net = buildNetwork(2, 4, 2, bias=True)
        self.net.randomize()
        print self.net
        self.ds = SupervisedDataSet(2,2)
        self.trainer = BackpropTrainer(self.net, self.ds, learningrate = 0.1, momentum=0.99)
    def addTrainDS(self, data1, data2, max):
        for x in [1,2]:
            norm1 = self.normalize(data1,max)
            norm2 = self.normalize(data2,max)
        for x in range(len(norm1)):
            self.ds.addSample(norm1[x], norm2[x])
    def train(self):
        print "Training"
        # print self.trainer.train()
        trndata, tstdata = self.ds.splitWithProportion(.25)
        self.trainer.trainUntilConvergence(verbose=True,
                                           trainingData=trndata,
                                           validationData=tstdata,
                                           validationProportion=.3,
                                           maxEpochs=500)
        # self.trainer.trainOnDataset(trndata,500)
        self.trainer.testOnData(tstdata, verbose= True)

    def activate(self, data):
        for x in data:
            self.net.activate(x)

    def normalize(self, data, max):
        normData = np.zeros((len(data), 2))
        for x in [0,1]:
            for y in range(len(data)):
                val = data[y][x]
                normData[y][x] = (val)/(max[x])
        # print normData
        return normData

    def denormalize(self, data, max):
        deNorm = np.zeros((len(data), 2))
        for x in [0,1]:
            for y in range(len(data)):
                val = data[y][x]
                deNorm[y][x] = val*max[x]
        return deNorm

    def getOutput(self, mat, max):
        norm = self.normalize(mat, max)
        out = []
        for val in norm:
            out.append(self.net.activate(val))
        return self.denormalize(out, max)
Ejemplo n.º 9
0
def RunNet(net, dataset, train_epochs):
	"a function to build a neural net and test on it, for testing purposes right now"
	#print net.activate([2, 1])
	#ds = SupervisedDataSet(15, 1)
	#ds.addSample((1,1,1,1,1,1,1,1,1,1,1,1,1,1,1), (100))
	#ds.addSample((0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), (0))

	#trainer = BackpropTrainer(net, learningrate = 0.01, momentum = 0.99, verbose = True)
	trainer = BackpropTrainer(net, learningrate = 0.01, momentum = 0.5, verbose = True)
	
	trainer.trainOnDataset(dataset, train_epochs)
	
	trainer.testOnData(verbose = True)
Ejemplo n.º 10
0
def buildAndTrain(ds):
  
  net = buildNetwork(2, 4, 1, bias=True)

  # try:
        #         f = open('_learned', 'r')
  #   net = pickle.load(f)
  #   f.close()
  # except:
  trainer = BackpropTrainer(net, learningrate = 0.01, momentum = 0.99)
  trainer.trainOnDataset(ds, 1000)
  trainer.testOnData()
  return net
Ejemplo n.º 11
0
    def xtrain(self):
        dataset = self.getdata()

        # Constructing a two hidden layes Neural Network
        net = buildNetwork(9, 15, 5, 1, recurrent=True)

        # Training using Back Propagation
        trainer = BackpropTrainer(net, learningrate=0.01, momentum=0.75,
                                  weightdecay=0.02, verbose=True)
        trainer.trainOnDataset(dataset, 10)
        trainer.testOnData(verbose=False)

        # Saving the trained neural network information to file
        self.writetrainedinfo(net)
def estimateAnd():
    ds_and = SupervisedDataSet(2, 1)
    ds_and.addSample( (0,0) , (0,))
    ds_and.addSample( (0,1) , (0,))
    ds_and.addSample( (1,0) , (0,))
    ds_and.addSample( (1,1) , (1,))
    net = buildNetwork(2, 4, 1, bias=True)
    trainer = BackpropTrainer(net, learningrate = 0.01, momentum = 0.99)
    trainer.trainOnDataset(ds_and, 3000)
    trainer.testOnData() 
    print '\nthe prediction for AND value:'
    print '1 AND 1 = ', net.activate((1,1))
    print '1 AND 0 = ', net.activate((1,0))
    print '0 AND 1 = ', net.activate((0,1))
    print '0 AND 0 = ', net.activate((0,0))
def estimateNor():
    ds_nor = SupervisedDataSet(2, 1)
    ds_nor.addSample( (0,0) , (1,))
    ds_nor.addSample( (0,1) , (0,))
    ds_nor.addSample( (1,0) , (0,))
    ds_nor.addSample( (1,1) , (0,))
    net = buildNetwork(2, 100, 1, bias=True)
    trainer = BackpropTrainer(net, learningrate = 0.01, momentum = 0.99)
    trainer.trainOnDataset(ds_nor, 3000)
    trainer.testOnData() 
    print '\nthe prediction for NOR value:'
    print '1 NOR 1 = ', net.activate((1,1))
    print '1 NOR 0 = ', net.activate((1,0))
    print '0 NOR 1 = ', net.activate((0,1))
    print '0 NOR 0 = ', net.activate((0,0))
Ejemplo n.º 14
0
	def computeModel(self, path, user):
		# Create a supervised dataset for training.
		trndata = SupervisedDataSet(24, 1)
		tstdata = SupervisedDataSet(24, 1)
		
		#Fill the dataset.
		for number in range(0,10):
			for variation in range(0,7):
				# Pass all the features as inputs.
				trndata.addSample(self.getSample(user, number, variation),(user.key,))
				
			for variation in range(7,10):
				# Pass all the features as inputs.
				tstdata.addSample(self.getSample(user, number, variation),(user.key,))
				
		# Build the LSTM.
		n = buildNetwork(24, 50, 1, hiddenclass=LSTMLayer, recurrent=True, bias=True)

		# define a training method
		trainer = BackpropTrainer(n, dataset = trndata, momentum=0.99, learningrate=0.00002)

		# carry out the training
		trainer.trainOnDataset(trndata, 2000)
		valueA = trainer.testOnData(tstdata)
		print '\tMSE -> {0:.2f}'.format(valueA)
		self.saveModel(n, '.\NeuralNets\SavedNet_%d' %(user.key))
		
		return n
Ejemplo n.º 15
0
def _run_training(net, data_set):
    logger.info("Running training...")
    data_set_training, data_set_test = data_set.splitWithProportion(0.9)
    rate = LEARNING_RATE
    trainer = BackpropTrainer(net, data_set_training, learningrate=rate)
    for epoch in xrange(NUM_EPOCHS):
        logger.info("Calculating EPOCH %d", epoch)
        logger.info("Result on training set %f", trainer.train())
        if epoch % 4 == 0:
            logger.info("Result on test set %f", trainer.testOnData(data_set_test, verbose=True))
        if epoch == 0 or epoch % 10 == 9:
            rate /= 10
            trainer = BackpropTrainer(net, data_set_training, learningrate=rate)
Ejemplo n.º 16
0
    def trainingOnDataSet(self):
        trainer = BackpropTrainer(self.n, self.ds)
        result = 200
        while result > 100:
            result = trainer.train()
            print result


        newds = SupervisedDataSet(
            self.pix_size * self.pix_size, len(results))

        newds.addSample(
                self.input_value[0], utils.getPosition('a', results))

        print 'MSE train', trainer.testOnData(newds, verbose = True)
def problemC():
    ds_and = SupervisedDataSet(3, 1)
    ds_and = SupervisedDataSet(3, 1)
    ds_and.addSample( (0,0,0) , (1,))
    ds_and.addSample( (0,0,1) , (1,))
    ds_and.addSample( (0,1,0) , (1,))
    ds_and.addSample( (0,1,1) , (0,))
    ds_and.addSample( (1,0,0) , (1,))
    ds_and.addSample( (1,0,1) , (0,))
    ds_and.addSample( (1,1,0) , (1,))
    ds_and.addSample( (1,1,1) , (0,))
    net = buildNetwork(3, 10, 1, bias=True)
    trainer = BackpropTrainer(net, learningrate = 0.01, momentum = 0.99)
    trainer.trainOnDataset(ds_and, 3000)
    trainer.testOnData() 
    print '\n3) NOT ( (A OR B) AND C) '
    print '0 0 0  = ', net.activate((0,0,0))
    print '0 0 1  = ', net.activate((0,0,1))
    print '0 1 0  = ', net.activate((0,1,0))
    print '0 1 1  = ', net.activate((0,1,1))
    print '1 0 0  = ', net.activate((1,0,0))
    print '1 0 1  = ', net.activate((1,0,1))
    print '1 1 0  = ', net.activate((1,1,0))
    print '1 1 1  = ', net.activate((1,1,1))
Ejemplo n.º 18
0
def main():
	emotion={}
	dataset__generator(emotion)
	print('dataset generated')
	tstdata,trndata=ds.splitWithProportion(0.50)
	print('data splitted')
	#ds.getLength()
	trndata._convertToOneOfMany( )
	tstdata._convertToOneOfMany( )
	emotion={}
	if os.path.isfile('train.xml'):
		fnn=NetworkReader.readFrom('train.xml')
	else:
		fnn=buildNetwork(1292,3,2,outclass=SoftmaxLayer)
	NetworkWriter.writeToFile(fnn, 'train.xml')
	print('starting training')
	trainer=BackpropTrainer(fnn,dataset=trndata,momentum=0.1,verbose=True,weightdecay=0.01)	
	
	print('epoch level '+str(1000))
	i=10
	j1=range(10,200)
	temp=[]
	t=1
	while t<10:
		t=t+1
		i=random.choice(j1)
		temp.append(i)
		print('starting '+str(i))
		time.sleep(1)
		trainer.trainEpochs(i)
		NetworkWriter.writeToFile(fnn, 'train.xml')
		trnresult=percentError(trainer.testOnData(),trndata['class'])
		tstresult=percentError(trainer.testOnClassData(dataset=tstdata),tstdata['class'])
		temp.append([trnresult,tstresult])
		r_server.set('errortest'+str(i),tstresult)
		r_server.set('errortrain'+str(i),trnresult)
		
	for i in temp:
		print(i)
    def classicNeuralNetwork(self,features,labels,autoencoder=False):
        dataSet = SupervisedDataSet(features.shape[1], 1)
        dataSet.setField('input', features)
        if autoencoder: labels = features      
        dataSet.setField('target', labels)
        tstdata, trndata = dataSet.splitWithProportion( 0.25 )
        print features.shape
        simpleNeuralNetwork = _buildNetwork(\
                                    (LinearLayer(features.shape[1],'in'),),\
                                    (SigmoidLayer(20,'hidden0'),),\
                                    (LinearLayer(labels.shape[1],'out'),),\
                                    bias=True)
        trainer = BackpropTrainer(simpleNeuralNetwork, dataset=trndata, verbose=True)#, momentum=0.1)
        trainer.trainUntilConvergence(maxEpochs=15)
        
        trnresult = percentError( trainer.testOnData( dataset=trndata ), trndata['target'] )
        tstresult = percentError( trainer.testOnData( dataset=tstdata ), tstdata['target'] )

        print "epoch: %4d" % trainer.totalepochs, \
          "  train error: %5.2f%%" % trnresult, \
          "  test error: %5.2f%%" % tstresult

        self.neuralNetwork = simpleNeuralNetwork
Ejemplo n.º 20
0
    print i

net = buildNetwork(size[0] * size[1], 3000, 1, bias=True)

st = time()

'''
try:
    f = open('_learned', 'r')
    net = pickle.load(f)
    f.close()
except:
'''

if True:
    trainer = BackpropTrainer(net, learningrate = 0.01, momentum = 0.99)
    trainer.trainOnDataset(ds, 10)
    trainer.testOnData()
    print 'Learning time:', time() - st
    #f = open('_learned', 'w')
    #pickle.dump(net, f)
    #f.close()

#print net.activate(FeaturesFromFile('pos/5.jpg'))

for i in xrange(1, 6):
    print i, 'pos', net.activate(FeaturesFromFile('pos/' + str(i) + '.jpg', size))

for i in xrange(1, 6):
    print i, 'neg', net.activate(FeaturesFromFile('neg/' + str(i) + '.jpg', size))
Ejemplo n.º 21
0
def train(data_file, vis_matrix, vis_graph, save_file=''):
	load_params()
	#import dataset
	ds = ClassificationDataSet(micro_dim, 1, nb_classes=num_classes)
	extract_data(data_file, ds)
	
	tr, val = ds.splitWithProportion(2/3.)
	#softmax output layer
	tr._convertToOneOfMany()
	val._convertToOneOfMany()
	
	#build network
	layer_sizes = [tr.indim]
	for layer_size in num_hidden:
		layer_sizes.append(layer_size)
	layer_sizes.append(tr.outdim)
	if save_file == '':
		ann = buildNetwork(layer_sizes, hiddenclass=SigmoidLayer, recurrent=False, outclass=SoftmaxLayer, bias=inc_bias)
		iteration = 0
	else:
		ann = NetworkReader.readFrom(save_file)
		match = re.search('([0-9]+)_(?:[0-9]{1,3}).xml', save_file)
		if match == None:
			print 'Net save files should be named I_E.xml, where I is the iteration and E is the rounded error from 0-100'
			exit(1)
		else:
			iteration = int(match.group(1)) + 1
	
	#training 
	trainer = BackpropTrainer(ann,	dataset=tr, momentum=momentum, weightdecay=weight_decay)
	done = False
	errors, variations = [], []
	testing_errors, testing_variations = [], []
	
	while(not done):
		trainer.trainEpochs(num_epochs)
		
		# visualize iteration
		if vis_matrix or vis_graph:
			vertices, edges = vertsEdges(ann)
			if vis_matrix:
				matrixVisualizer(edges)
			if vis_graph:
				graphVisualizer(vertices, edges, iteration)
		
		# calculate and print error info
		training_error, testing_error, training_variation, testing_variation = trainer.testOnData(), trainer.testOnData(dataset=val), calcVariation(trainer), calcVariation(trainer, dataset=val)
		errors.append(training_error)
		variations.append(training_variation)
		testing_errors.append(testing_error)
		testing_variations.append(testing_variation)
		fig, ax1 = plt.subplots()
		iterations = range(iteration+1)
		ax1.plot(iterations, map(log10, errors), 'r-')
		ax1.plot(iterations, map(log10, testing_errors), 'b-')
		ax1.set_xlabel('iteration')
		ax1.set_ylabel('log mean squared error (red=train, blue=test)')
		for tick in ax1.get_yticklabels():
			tick.set_color('b')
		ax2 = ax1.twinx()
		ax2.plot(iterations, map(log10, variations), 'r--')
		ax2.plot(iterations, map(log10, testing_variations), 'b--')
		ax2.set_ylabel('log variation (L1 error) (red=train, blue=test)')
		for tick in ax2.get_yticklabels():
			tick.set_color('r')
		plt.savefig('error-3layer-48.pdf')
		plt.close()
		print 'iter %d, training error %f, testing error %f, training variation %f, testing variation %f' % (iteration, training_error, testing_error, training_variation, testing_variation)
		
		#save every <snapshot> iterations
		if iteration % snapshot == -1:
			file_data = (iteration, int(errors[-1]*100))
			print 'Saving model %d_%d.xml...' % file_data
			NetworkWriter.writeToFile(ann, '%d_%d.xml' % file_data)
		
		# go to the next iteration if not done
		iteration = iteration + 1
		if iteration >= max_iterations:
			done = True
	
	#testing
	val_errors, val_variations = [], []
	for i in range(5):
		val_error, val_variation = trainer.testOnData(dataset=val), calcVariation(trainer, dataset=val)
		print 'error %f, variation %f' % (val_error, val_variation)
		val_errors.append(val_error)
		val_variations.append(val_variation)
		tr, val = ds.splitWithProportion(0.9)
		val._convertToOneOfMany()
	print 'average error %f, average variation %f' % (np.average(val_errors), np.average(val_variations))
	
	#plotting
	iterations = range(max_iterations)
	fig, ax1 = plt.subplots()
	ax1.plot(iterations, map(log10, errors), 'b-')
	ax1.set_xlabel('iteration')
	ax1.set_ylabel('log mean squared error')
	for tick in ax1.get_yticklabels():
		tick.set_color('b')
	ax1.set_title('error for validation dataset: %f, variation for validation dataset: %f' % (val_error, val_variation))
	ax2 = ax1.twinx()
	ax2.plot(iterations, map(log10, variations), 'r-')
	ax2.set_ylabel('log variation (L1 error)')
	for tick in ax2.get_yticklabels():
		tick.set_color('r')
	plt.savefig('error-4layer-48-96.pdf')
Ejemplo n.º 22
0
import numpy as np
import matplotlib.pyplot as plt

import pdb

ds = ClassificationDataSet(11, 1, nb_classes=10)

tf = open('winequality-white.csv', 'r')

for line in tf.readlines():
    data = [float(x) for x in line.strip().split(',') if x != '']
    indata = tuple(data[:11])
    outdata = tuple(data[11:])
    ds.addSample(indata, outdata)

tstdata, trndata = ds.splitWithProportion(0.25)

trndata._convertToOneOfMany()
tstdata._convertToOneOfMany()

n = buildNetwork(trndata.indim, 8, 8, trndata.outdim, recurrent=True)
t = BackpropTrainer(n,
                    learningrate=0.01,
                    momentum=0.5,
                    verbose=True,
                    weightdecay=0.01)
t.trainUntilConvergence(trndata, maxEpochs=100, verbose=True)
pdb.set_trace()
t.testOnData(verbose=True)
Ejemplo n.º 23
0
                    outclass = SoftmaxLayer)
trainer = BackpropTrainer( fnn, dataset=train_data, momentum=0.2, verbose=True, learningrate=0.05, lrdecay=1.0)
# trainer = RPropMinusTrainer( fnn, dataset=train_data, momentum=0.1, verbose=True, learningrate=0.01, lrdecay=1.0)

# trainer.trainUntilConvergence()

best = fnn.copy()
best_test = 1

for i in range(5):
    print("training")
    trainer.trainEpochs(1)

    print("testing")
    # trnresult = trainer.testOnData()
    tstresult = trainer.testOnData( dataset=test_data )

    if tstresult < best_test:
        best = fnn.copy()
        best_test = tstresult

    print "epoch: %4d" % trainer.totalepochs, \
        "  test error: %.3f" % tstresult
        # "  train error: %.3f" % trnresult, \

    # if tstresult <= 0.14:
    #     break

fnn = best
trainer.module = best
Ejemplo n.º 24
0
    # [1 0] corresponding to 0
    # [0 1] corresponding to 1
    # it is considered as classification problem with class 0 and class 1
    # convert output of net to real result using function output above
    net = buildNetwork(2, 5, 2, hiddenclass=TanhLayer, outclass=SoftmaxLayer)
    ds = SupervisedDataSet(2, 2)

    # might need more data to train
    # but in this example, ds with 4 samples is pretty enough
    for i in xrange(1):
        ds.addSample((0.0, 0.0), (1.0, 0.0))
        ds.addSample((0.0, 1.0), (0.0, 1.0))
        ds.addSample((1.0, 0.0), (0.0, 1.0))
        ds.addSample((1.0, 1.0), (1.0, 0.0))

    print len(ds)

    trainer = BackpropTrainer(net, ds, learningrate=0.01, momentum=0.99)

    # train 100 epoches
    for i in xrange(100):
        print trainer.train()

    print "test on data", trainer.testOnData()

    # test
    print output(net.activate((0.0, 0.0)))  # 1 0 --> 0
    print output(net.activate((1.0, 0.0)))  # 0 1 --> 1
    print output(net.activate((0.0, 1.0)))  # 0 1 --> 1
    print output(net.activate((1.0, 1.0)))  # 1 0 --> 0
Ejemplo n.º 25
0
# Read all info on the csv. 
for line in tf.readlines():
    data = [float(x) for x in line.strip().split(',') if x != '']
    indata =  tuple(data[:8])
    outdata = tuple(data[8:])
    ds.addSample(indata,outdata)

'''
Make the actual neural networks
TOPOLOGY: 8 in, 8 hidden-0, 8 hidden-1, 8 out. Hidden layers subject to change
'''
n = buildNetwork(ds.indim,8,8,ds.outdim,recurrent=True)

#change learningrate based on gradient
t = BackpropTrainer(n,learningrate=0.01,momentum=0.5,verbose=True)

# 100 iterations
t.trainOnDataset(ds,100)
t.testOnData(verbose=True)

# Our prediction given 8 inputs, will print 8 estimated outputs
guess = n.activate((1,2,3,4,5,6,7,8)) 
print 'Final weights:',n.params

# Print our Guess 
print '\nGUESS???' + str(guess)

#print n['in'], n['out'], n[h0], n['h1']
print (printConnections(n))
#print "Number of training patterns: ", len(trndata)
#print "Input and output dimensions: ", trndata.indim, trndata.outdim
#print "First sample (input, target, class):"
#print trndata['input'][0], trndata['target'][0], trndata['class'][0]


fnn = buildNetwork( trndata.indim, 107, trndata.outdim, outclass=SoftmaxLayer )
trainer = BackpropTrainer( fnn, dataset=trndata, momentum=0.1,learningrate=0.05 , verbose=True, weightdecay=0.001)





trainer.trainEpochs(1)
trainer.testOnData(tstdata, verbose=True)
alg_score =  np.array([fnn.activate(x) for x, _ in tstdata])


print ("Printing Test Data: ")
print (alg_score[:, 0])

#print (tmp_tst_for_validation)

alg_y_test = []
for test_index in range(len(tmp_tst_for_validation)):
    alg_y_test.append(int(tmp_tst_for_validation.getSample(test_index)[1]) )



alg_y = label_binarize(alg_y_test, classes=[0, 1, 2])
    def goClassifer(self, iteration, learningrate, momentum, toFile):
        self.TrainingSetEventList[:] = []
        print "Iteration Count: " + str(iteration)
        #Set up Classicication Data, 4 input, output is a one dim. and 2 possible outcome or two possible classes
        trndata = ClassificationDataSet(14, nb_classes=7)
        tstdata = ClassificationDataSet(14, nb_classes=7)
        SAMPLE_SIZE = 100
        AmountPerSpecies = 100
        SingleBatIDToAdd = [1, 2, 3, 5, 6] # for single
        MultiBatIDToAdd = [10, 11, 12, 14]# for multi
        AddBatIDToAdd = [1, 2, 3, 5, 6]
        AddSingleMulti = [1, 2, 3, 5, 6,10, 11, 12, 14]
        TraningDataAmount = 5000

        print "Adding Bat Single Species Events"
        minFreq, maxFreq, Durantion, fl1, fl2, fl3, fl4, fl5, fl6, fl7, fl8, fl9, fl10, pixelAverage, target = self.getTrainingSpeciesDistributedData(SingleBatIDToAdd, AmountPerSpecies)

        SAMPLE_SIZE = len(minFreq)
        for i in range (0, SAMPLE_SIZE):
            #trndata.addSample([ minFreq[i], maxFreq[i], Durantion[i], fl1[i], fl2[i], fl3[i], fl4[i], fl5[i], fl6[i], fl7[i], fl8[i], fl9[i], fl10[i], pixelAverage[i] ], [1]) #self.convertID(target[i])
            trndata.addSample([ minFreq[i], maxFreq[i], Durantion[i], fl1[i], fl2[i], fl3[i], fl4[i], fl5[i], fl6[i], fl7[i], fl8[i], fl9[i], fl10[i], pixelAverage[i] ], [self.convertIDSingle(target[i])]) #self.convertID(target[i])

        #print "Adding Bat Multi Species Events"
        #minFreq, maxFreq, Durantion, fl1, fl2, fl3, fl4, fl5, fl6, fl7, fl8, fl9, fl10, pixelAverage, target = self.getTrainingSpeciesDistributedData(MultiBatIDToAdd, AmountPerSpecies)

        #SAMPLE_SIZE = len(minFreq)
        #for i in range (0, SAMPLE_SIZE):
        #    trndata.addSample([ minFreq[i], maxFreq[i], Durantion[i], fl1[i], fl2[i], fl3[i], fl4[i], fl5[i], fl6[i], fl7[i], fl8[i], fl9[i], fl10[i], pixelAverage[i] ], [2])


        print "Adding noise events"
        NoiseID = 8
        minFreq, maxFreq, Durantion, fl1, fl2, fl3, fl4, fl5, fl6, fl7, fl8, fl9, fl10, pixelAverage = self.getDistributedData(AmountPerSpecies, NoiseID)
        SAMPLE_SIZE = len(minFreq)
        for i in range (0, SAMPLE_SIZE):
            trndata.addSample([ minFreq[i], maxFreq[i], Durantion[i], fl1[i], fl2[i], fl3[i], fl4[i], fl5[i], fl6[i], fl7[i], fl8[i], fl9[i], fl10[i], pixelAverage[i] ], [self.convertIDSingle(NoiseID)]) #self.convertID(NoiseID)

        print "Adding something else events"
        SomethingElseID = 9
        SEAmount = 20
        minFreq, maxFreq, Durantion, fl1, fl2, fl3, fl4, fl5, fl6, fl7, fl8, fl9, fl10, pixelAverage = self.getDistributedData(SEAmount, SomethingElseID)
        SAMPLE_SIZE = len(minFreq)
        for i in range (0, SAMPLE_SIZE):
            trndata.addSample([ minFreq[i], maxFreq[i], Durantion[i], fl1[i], fl2[i], fl3[i], fl4[i], fl5[i], fl6[i], fl7[i], fl8[i], fl9[i], fl10[i], pixelAverage[i] ], [self.convertIDSingle(SomethingElseID)])

        # Try to put all multievent in the something else event
        print "Adding something else events"
        SomethingElseID = 9
        BatIDToAdd2 = [10, 11, 12, 14]
        minFreq, maxFreq, Durantion, fl1, fl2, fl3, fl4, fl5, fl6, fl7, fl8, fl9, fl10, pixelAverage, target = self.getTrainingSpeciesDistributedData(BatIDToAdd2, SEAmount)
        SAMPLE_SIZE = len(minFreq)
        for i in range (0, SAMPLE_SIZE):
            trndata.addSample([ minFreq[i], maxFreq[i], Durantion[i], fl1[i], fl2[i], fl3[i], fl4[i], fl5[i], fl6[i], fl7[i], fl8[i], fl9[i], fl10[i], pixelAverage[i] ], [self.convertIDSingle(SomethingElseID)])


        print "Adding test data"
        minFreq, maxFreq, Durantion, fl1, fl2, fl3, fl4, fl5, fl6, fl7, fl8, fl9, fl10, pixelAverage, target = self.getDistrubedTestData(TraningDataAmount, SingleBatIDToAdd)
        maxSize = len(minFreq)
        for i in range (0, maxSize):
            tstdata.addSample([minFreq[i], maxFreq[i], Durantion[i], fl1[i], fl2[i], fl3[i], fl4[i], fl5[i], fl6[i], fl7[i], fl8[i], fl9[i], fl10[i], pixelAverage[i]], [ self.convertIDSingle (target[i]) ])

        trndata._convertToOneOfMany( )
        tstdata._convertToOneOfMany( )
        print "Number of training patterns: ", len(trndata)
        print "Input and output dimensions: ", trndata.indim, trndata.outdim
        print "Learning Rate: " + str(learningrate)
        print "Momentum: " + str(momentum)
        #print "First sample (input, target, class):"
        #print trndata['input'][0], trndata['target'][0], trndata['class'][0]
        #print "200th sample (input, target, class):"
        #print trndata['input'][100], trndata['target'][100], trndata['class'][100]


        #set up the Feed Forward Network
        HiddenNeurons = 10
        #learningrate = 0.01
        #momentum = 0.1
        weightdecay = 0
        #from datainterface import ModuleWrapper, ClassificationModuleWrapper
        #from sgd import SGD

        net = buildNetwork(trndata.indim, HiddenNeurons, trndata.outdim, bias=True, outclass=SoftmaxLayer)
        #p0 = net.params.copy()

        #provider = ClassificationModuleWrapper(trndata, net, shuffling=False)
        #algo = SGD(provider, net.params.copy(), callback=self.printy, learning_rate=learningrate, momentum=momentum)
        #print '\n' * 2
        #print 'SGD-CE'
        #algo.run(1000)
        trainer = BackpropTrainer(net, dataset=trndata, momentum=momentum, learningrate=learningrate, verbose=False, weightdecay=weightdecay)
        #raw_input("Press Enter to continue...")
        print "Training data"
        if toFile:
            #filename = "InputN" + str(trndata.indim) + "HiddenN" + str(HiddenNeurons) + "OutputN" + str(trndata.outdim) + "Momentum"+ str(momentum) + "LearningRate" + str(learningrate) + "Weightdecay" + str(weightdecay)
            root = "/home/anoch/Dropbox/SDU/10 Semester/MSc Project/Data Results/Master/BinarySpeciesTestMSE/"
            filename = "ClassifierSpeciesTest_" + str(iteration) +"_MSE_LR_"+str(learningrate) + "_M_"+str(momentum)
            folderName = root + "ClassifierSpeciesTest_MSE_LR_"+str(learningrate) + "_M_"+str(momentum)
            if not os.path.exists(folderName):
                os.makedirs(folderName)
            f = open(folderName + "/"+ filename + ".txt", 'w')

            value = "Added Bat Species: " + str(AddBatIDToAdd) + "\n"
            f.write(value)

            value = "Number of bat patterns: " + str(len(trndata)) + "\n"
            f.write(value)

            value = "Number of noise patterns: " + str(AmountPerSpecies) + "\n"
            f.write(value)

            value = "Number of patterns per species: " + str(AmountPerSpecies) + "\n"
            f.write(value)

            value = "Number of test data: " + str(TraningDataAmount) + "\n"
            f.write(value)

            value = "Input, Hidden and output dimensions: " + str(trndata.indim) + ", " + str(HiddenNeurons) + ", " + str(trndata.outdim) + "\n"
            f.write(value)

            value = "Momentum: " + str(momentum) + "\n"
            f.write(value)

            value = "Learning Rate: " + str(learningrate) + "\n"
            f.write(value)

            value = "Weight Decay: " + str(weightdecay) + "\n"
            f.write(value)

            f.write("Input Activation function: Linear function\n")
            f.write("Hidden Activation function: Sigmoid function\n")
            f.write("Output Activation function: Softmax function\n")

        maxEpoch = 100
        for i in range(0,maxEpoch):
            # Train one epoch
            trainer.trainEpochs(10)
            averageError = trainer.testOnData(dataset=tstdata, verbose=False)

            #averageCEE = self.CrossEntropyErrorAveraged(net, tstdata)
            #print "Average Cross Entropy Error: " + str(averageCEE)
            #print "Mean Square Error: " + str(averageError)

            #"""procentError(out, true) return percentage of mismatch between out and target values (lists and arrays accepted) error= ((out - true)/true)*100"""
            trnresult = percentError(trainer.testOnClassData(), trndata['class'])
            tstresult = percentError(trainer.testOnClassData(dataset=tstdata), tstdata['class'])

            print("epoch: %4d" % trainer.totalepochs,"  train error: %5.2f%%" % trnresult,"  test error: %5.2f%%" % tstresult)

            if tstresult < 27.0:
                raw_input("Press Enter to continue...")
                break

            if toFile:
                dataString = str(trainer.totalepochs) + ", " + str(averageError) + ", " + str(trnresult) + ", " + str(tstresult) + "\n"
                f.write(dataString)
        NetworkWriter.writeToFile(net, "ThirdStageClassifier.xml")
        if toFile:
            import numpy as np
            f.close()
            ConfusionMatrix, BatTarget = self.CorrectRatio(trainer.testOnClassData(dataset=tstdata), tstdata['class'])
            filename = filename+ "_CR"
            result_file = open(folderName + "/"+ filename + ".txt", 'w')
            result_file.write("[Species]")
            result_file.write(str(BatTarget))
            result_file.write(str(ConfusionMatrix))
            np.savetxt(folderName + "/"+ filename+".csv", ConfusionMatrix, delimiter=",")
            result_file.close()
        self.CorrectRatio(trainer.testOnClassData(dataset=tstdata), tstdata['class'])
        print "Done training"
Ejemplo n.º 28
0
    # List all the different networks we want to test
    net=buildNetwork(trndata.indim,15,trndata.outdim, outclass=SigmoidLayer, bias=True)
    print net

    # Setup a trainer that will use backpropogation for training
    trainer = BackpropTrainer(net, dataset=trndata, verbose=True, weightdecay=0.01, momentum=.9)
    train_errors = []
    test_errors = []

    for i in range(max_epochs):
        start = time.time()
        error = trainer.train()

        print "Epoch: %d, Error: %7.4f" % (i, error)

        train_errors.append(trainer.testOnData(trndata))
        print train_errors[i]

        test_errors.append(trainer.testOnData(tstdata))
        print test_errors[i]
        print "Elapsed time: {}".format(time.time()-start)

    # Plot training and test error as a function of the number of hidden layers
    pl.figure()
    pl.title('Neural Networks: Performance vs Epochs')
    pl.plot(range(max_epochs), test_errors, lw=2, label = 'test error')
    pl.plot(range(max_epochs), train_errors, lw=2, label = 'training error')
    pl.legend(loc=0)
    pl.xlabel('epoch')
    pl.ylabel('Error Rate')
    pl.show()
Ejemplo n.º 29
0
    header = btc_dataset.readline()
    count = 0
    for line in btc_dataset.readlines():
        line = line.replace('\n', '')
        values = line.split(';')
        if count >= train_begin and count <= train_end:
            ds.addSample(values[:-1], values[-1])
        elif count >= test_begin and count <= test_end:
            test_params.append(values[:-1])
            test_targets.append(values[-1])
        count += 1

n = buildNetwork(ds.indim, 1000, ds.outdim, recurrent=True)
t = BackpropTrainer(n, learningrate=0.001, momentum=0, verbose=True)
t.trainUntilConvergence(ds, 200)
t.testOnData(verbose=False)

fileObject = open('bpnn', 'w')
pickle.dump(n, fileObject)
fileObject.close()

# fileObject = open('bpnn','r')
# n = pickle.load(fileObject)

predicted = []

for i in range(len(test_params)):
    predicted.append(n.activate(test_params[i]))
    print("Predicted:", predicted[-1])
    print("Real", test_targets[i])
Ejemplo n.º 30
0
 def train(self, dataset):
     TrainDS, TestDS = dataset.splitWithProportion(0.8)
     trainer = BackpropTrainer(self.n, TrainDS);
     for i in xrange(6):
         trainer.trainEpochs(1)
     trainer.testOnData(TestDS, True)
Ejemplo n.º 31
0
net.addModule(h1)
net.addModule(bias)

# create connections 
net.addConnection(IdentityConnection(inp, h1)) 
net.addConnection(FullConnection(h1, outp))
#net.addConnection(FullConnection(bias, outp))
#net.addConnection(FullConnection(bias, h1))

# finish up 
net.sortModules()

# initialize the backprop trainer and train 
trainer = BackpropTrainer(net, ds, momentum=.99, learningrate=0.01)
trainer.trainOnDataset(ds,10)
trainer.testOnData(verbose=True)

print 'Final weights:',net.params
		
print net

test_x = test_x[0]
preds_y = []
index_inpoints = size_ls

for i in range(len(xt)):
	pred_y = net.activate(test_x)
	
	test_x = test_x[1:]
	test_x.append(pred_y[0])
	
Ejemplo n.º 32
0
def trainNetwork(train_ds, test_ds,
                 train_ds_labels, test_ds_labels,
                 features,
                 learningrate, lrdecay,
                 momentum, weightdecay,
                 hidden_layers,
                 time_limit_seconds):
    fnn = FeedForwardNetwork()
    inLayer = LinearLayer(train_ds.indim)
    fnn.addInputModule(inLayer)
    lastLayer = inLayer
    connection_number = 0 # connection-0 is the connection from the input layer.
    for hidden_layer_size in hidden_layers:
#        hiddenLayer = SigmoidLayer(hidden_layer_size)
        hiddenLayer = TanhLayer(hidden_layer_size)
        fnn.addModule(hiddenLayer)
        fnn.addConnection(
            FullConnection(lastLayer, hiddenLayer,
                           name="connection-%d" % connection_number))
        connection_number = connection_number + 1
        bias = BiasUnit()
        fnn.addModule(bias)
        fnn.addConnection(FullConnection(bias, hiddenLayer))
        lastLayer = hiddenLayer
    outLayer = SigmoidLayer(train_ds.outdim)
    fnn.addOutputModule(outLayer)
    fnn.addConnection(
        FullConnection(lastLayer, outLayer,
                       name="connection-%d" % connection_number))
    bias = BiasUnit()
    fnn.addModule(bias)
    fnn.addConnection(FullConnection(bias, outLayer))
    fnn.sortModules()

    trainer = BackpropTrainer(fnn, dataset=train_ds,
                              learningrate=learningrate,
                              lrdecay=lrdecay,
                              momentum=momentum,
                              verbose=False,
                              weightdecay=weightdecay)

    # Train
    (initial_train_error, initial_train_F1) = percentClassErrorAndF1(fnn, train_ds, train_ds_labels, features)
    train_errors = [initial_train_error]
    train_F1s = [initial_train_F1]
    (initial_test_error, initial_test_F1) = percentClassErrorAndF1(fnn, test_ds, test_ds_labels, features)
    test_errors = [initial_test_error]
    test_F1s = [initial_test_F1]
    train_algo_errors = [trainer.testOnData(train_ds) * 100]
    test_algo_errors = [trainer.testOnData(test_ds) * 100]
    epochs = [0]
    try:
        start_time = time.time()
        for i in range(200):
            for _ in xrange(50):
                train_algo_error = trainer.train() * 100.0
                if math.isnan(train_algo_error):
                    break
            if math.isnan(train_algo_error):
                break
            (trnresult, trnF1) = percentClassErrorAndF1(fnn, train_ds, train_ds_labels, features)
            (tstresult, tstF1) = percentClassErrorAndF1(fnn, test_ds, test_ds_labels, features)
            test_algo_error = trainer.testOnData(test_ds)* 100
            now_time = time.time()
            time_left = time_limit_seconds - (now_time - start_time)
            print("epoch %3d:" % trainer.totalepochs,
                  "  train error: %6.4f%%" % train_algo_error,
                  "  test error: %6.4f%%" % test_algo_error,
                  "  train F1: %s" % ", ".join([("%.2f" % x) for x in trnF1]),
                  "  test F1: %s" % ", ".join([("%.2f" % x) for x in tstF1]),
                  "  %ds left" % int(round(time_left)))

            epochs.append(trainer.totalepochs)
            train_errors.append(trnresult)
            train_F1s.append(trnF1)
            test_errors.append(tstresult)
            test_F1s.append(tstF1)
            train_algo_errors.append(train_algo_error)
            test_algo_errors.append(test_algo_error)
            if time_left <= 0:
                print("Timeout: Time to report the results.")
                break;
            # if test_algo_errors[-1] < 4:
            #     print("Good enough? Don't want to overtrain")
            #     break;

    except KeyboardInterrupt:
        # Someone pressed Ctrl-C, try to still plot the data.
        print("Aborted training...")
        pass

    return (fnn, epochs, train_algo_errors, test_algo_errors, train_F1s, test_F1s)
Ejemplo n.º 33
0
def run():
    import scipy
    from scipy import linalg

    f = open("modelfitDatabase1.dat", "rb")
    import pickle

    dd = pickle.load(f)
    node = dd.children[13]

    rfs = node.children[0].data["ReversCorrelationRFs"]

    pred_act = numpy.array(node.children[0].data["ReversCorrelationPredictedActivities"])
    pred_val_act = numpy.array(node.children[0].data["ReversCorrelationPredictedValidationActivities"])

    training_set = node.data["training_set"]
    validation_set = node.data["validation_set"]
    training_inputs = node.data["training_inputs"]
    validation_inputs = node.data["validation_inputs"]

    ofs = contrib.modelfit.fit_sigmoids_to_of(numpy.mat(training_set), numpy.mat(pred_act))
    pred_act_t = contrib.modelfit.apply_sigmoid_output_function(numpy.mat(pred_act), ofs)
    pred_val_act_t = contrib.modelfit.apply_sigmoid_output_function(numpy.mat(pred_val_act), ofs)

    (sx, sy) = numpy.shape(rfs[0])
    print sx, sy
    n = FeedForwardNetwork()

    inLayer = LinearLayer(sx * sy)
    hiddenLayer = SigmoidLayer(4)
    outputLayer = SigmoidLayer(1)

    n.addInputModule(inLayer)
    n.addModule(hiddenLayer)
    n.addOutputModule(outputLayer)

    in_to_hidden = RBFConnection(sx, sy, inLayer, hiddenLayer)
    # in_to_hidden = FullConnection(inLayer, hiddenLayer)
    hidden_to_out = FullConnection(hiddenLayer, outputLayer)

    n.addConnection(in_to_hidden)
    n.addConnection(hidden_to_out)
    n.sortModules()
    gradientCheck(n)
    return

    from pybrain.datasets import SupervisedDataSet

    ds = SupervisedDataSet(sx * sy, 1)
    val = SupervisedDataSet(sx * sy, 1)

    for i in xrange(0, len(training_inputs)):
        ds.addSample(training_inputs[i], training_set[i, 0])

    for i in xrange(0, len(validation_inputs)):
        val.addSample(validation_inputs[i], validation_set[i, 0])

    tstdata, trndata = ds.splitWithProportion(0.1)

    from pybrain.supervised.trainers import BackpropTrainer

    trainer = BackpropTrainer(n, trndata, momentum=0.1, verbose=True, learningrate=0.002)

    training_set = numpy.array(numpy.mat(training_set)[:, 0])
    validation_set = numpy.array(numpy.mat(validation_set)[:, 0])
    pred_val_act_t = numpy.array(numpy.mat(pred_val_act_t)[:, 0])

    out = n.activateOnDataset(val)
    (ranks, correct, pred) = contrib.modelfit.performIdentification(validation_set, out)
    print "Correct:", correct, "Mean rank:", numpy.mean(ranks), "MSE", numpy.mean(numpy.power(validation_set - out, 2))

    print "Start training"
    for i in range(50):
        trnresult = percentError(trainer.testOnData(), trndata)
        tstresult = percentError(trainer.testOnData(dataset=tstdata), tstdata)

        print "epoch: %4d" % trainer.totalepochs, "  train error: %5.2f%%" % trnresult, "  test error: %5.2f%%" % tstresult
        trainer.trainEpochs(1)

        out = n.activateOnDataset(val)
        (ranks, correct, pred) = contrib.modelfit.performIdentification(validation_set, out)
        print "Correct:", correct, "Mean rank:", numpy.mean(ranks), "MSE", numpy.mean(
            numpy.power(validation_set - out, 2)
        )

    out = n.activateOnDataset(val)

    print numpy.shape(out)
    print numpy.shape(validation_set)

    (ranks, correct, pred) = contrib.modelfit.performIdentification(validation_set, out)
    print "Correct:", correct, "Mean rank:", numpy.mean(ranks), "MSE", numpy.mean(numpy.power(validation_set - out, 2))

    (ranks, correct, pred) = contrib.modelfit.performIdentification(validation_set, pred_val_act_t)
    print "Correct:", correct, "Mean rank:", numpy.mean(ranks), "MSE", numpy.mean(
        numpy.power(validation_set - pred_val_act_t, 2)
    )

    return n
Ejemplo n.º 34
0
from pybrain.datasets import *
from pybrain.tools.shortcuts import buildNetwork
from pybrain.supervised.trainers import BackpropTrainer
import pickle

dataset_and = SupervisedDataSet(2, 1)
dataset_and.addSample( (0,0) , (0,))
dataset_and.addSample( (0,1) , (0,))
dataset_and.addSample( (1,0) , (0,))
dataset_and.addSample( (1,1) , (1,))

net_and = buildNetwork(2, 4, 1, bias=True)

trainer_and = BackpropTrainer(net_and, learningrate = 0.01, momentum = 0.99)
trainer_and.trainOnDataset(dataset_and, 3000)
trainer_and.testOnData(verbose=True)


######################################################
dataset_or = SupervisedDataSet(2, 1)
dataset_or.addSample( (0,0) , (0,))
dataset_or.addSample( (0,1) , (1,))
dataset_or.addSample( (1,0) , (1,))
dataset_or.addSample( (1,1) , (1,))

net_or = buildNetwork(2, 4, 1, bias=True)

trainer_or = BackpropTrainer(net_or, learningrate = 0.01, momentum = 0.99)
trainer_or.trainOnDataset(dataset_or, 3000)
trainer_or.testOnData(verbose=True)
Ejemplo n.º 35
0
while(ind<totalSize):
	indx = 0
	while(indx < 14):
		x[indx] =float((ds['input'][ind][indx])*(1.0/MaximoInput[indx]))
		indx+=1
	y = float((ds['target'][ind])*(1.0/MaximoTarget))
	trainSet.addSample(x,y)
	ind+=1


if(camada2==0):
	net = buildNetwork(trainSet.indim,camada1,trainSet.outdim,recurrent=True)
else:
	net = buildNetwork(trainSet.indim,camada1,camada2,trainSet.outdim,recurrent=True)
trainer = BackpropTrainer(net,dataset=trainSet,learningrate=Learning,momentum=Momentum,verbose=True)
trainer.trainOnDataset(trainSet,Ciclos)

avgErr =trainer.testOnData(trainSet,verbose=True)

#totalError =0
#n =0
#for error in errorList:
	#totalError += error
	#n+=1
#erroMedio = float(totalError/float(n))

outFile = open("outputFileTest.txt","a")
outFile.write(repr(PorcDivTest)+", "+repr(Ciclos)+", "+repr(Learning)+", "+repr(Momentum)+", "+repr(camada1)+", "+repr(camada2)+", "+repr(avgErr)+"\n")
outFile.close()
trainingData = createDataset(X_train, Y_train)
validationData = createDataset(X_valid, Y_valid)
testData = createDataset(X_test, Y_test)

trainer = BackpropTrainer(net, trainingData) #, verbose=True)
#trainer.trainUntilConvergence(verbose=True, trainingData=trainingData, validationData=validationData)

maxEpochs = 100
continueEpochs = 10
convergence_threshold = 10
trainingErrors = []
validationErrors = []
trainer.ds = trainingData
bestweights = trainer.module.params.copy()
bestverr = trainer.testOnData(validationData)
bestepoch = 0
trainingErrors = []
validationErrors = [bestverr]

print('> Training')

epochs = 0
while True:
  trainingError = trainer.train()
  validationError = trainer.testOnData(validationData)
  
  print('Validation error = %f - Training error = %f' % (validationError, trainingError))

  if isnan(trainingError) or isnan(validationError):
      raise Exception("Training produced NaN results")
Ejemplo n.º 37
0
dataset = SupervisedDataSet(5, 1)
dataset.setField('input', X)
dataset.setField('target', y)

trainset, testset = dataset.splitWithProportion(0.75)
trainer = BackpropTrainer(net, dataset=trainset, learningrate=0.1, momentum=0.5, verbose=False)

epochs = np.array([])
train_errors = np.array([])
test_errors = np.array([])

for i in range(10):
    print 'round', i + 1
    trainer.trainEpochs(5)

    train_error = trainer.testOnData(trainset)
    test_error = trainer.testOnData(testset)

    epochs = np.append(epochs, trainer.totalepochs)
    train_errors = np.append(train_errors, train_error)
    test_errors = np.append(test_errors, test_error)

    print 'train error', train_error
    print 'test error', test_error


plt.title("Learning rate for 5-20-1 Neural Net")
plt.xlabel("Epoch")
plt.ylabel("MSE")
plt.plot(epochs, train_errors, 'o-', color="g", label="Training error")
plt.plot(epochs, test_errors, 'o-', color="r", label="Testing error")
Ejemplo n.º 38
0
nn = buildNetwork(48, 20, 1, bias=True, outclass=SigmoidLayer)
nn.reset()

trainer = BackpropTrainer(nn, train)

training_errors, validation_errors = trainer.trainUntilConvergence()
j = 0
print(
    'erros de treino ------------------------------------------------------------'
)
for value in training_errors:
    #print(training_errors)
    print("%s %s" % (value, j))
    j += 1
print(
    'erros de validacao ----------------------------------------------------------'
)
k = 0
for val in validation_errors:
    #print(training_errors)
    print("%s %s" % (val, k))
    k += 1

#for i in xrange(3000):
#   print(trainer.train())

print('Teste ----------------------------------------------------------')

trainer.testOnData(test, verbose=True)