Ejemplo n.º 1
0
def getModel(inputSize,hiddenSize1,hiddenSize2,trainData,target):
	fnn = FeedForwardNetwork()
	inLayer = LinearLayer(inputSize,name = 'inLayer')
	hiddenLayer0 = SigmoidLayer(hiddenSize1,name='hiddenLayer0')
	hiddenLayer1 = SigmoidLayer(hiddenSize2,name='hiddenLayer1')
	outLayer = LinearLayer(1,name = 'outLayer')

	fnn.addInputModule(inLayer)
	fnn.addModule(hiddenLayer0)
	fnn.addModule(hiddenLayer1)
	fnn.addOutputModule(outLayer)

	inToHidden0 = FullConnection(inLayer,hiddenLayer0)
	hidden0ToHidden1 = FullConnection(hiddenLayer0,hiddenLayer1)
	hidden1ToHiddenOutput = FullConnection(hiddenLayer1,outLayer)

	fnn.addConnection(inToHidden0)
	fnn.addConnection(hidden0ToHidden1)
	fnn.addConnection(hidden1ToHiddenOutput)

	fnn.sortModules()
	Ds = SupervisedDataSet(inputSize,1)
	scaler = preprocessing.StandardScaler().fit(trainData)
	x = scaler.transform(trainData)
	# print(len(target))
	# print(len(x))
	for i in range(len(target)):
		Ds.addSample(x[i],[target[i]])
	trainer = BackpropTrainer(fnn,Ds,learningrate=0.01,verbose=False)
	trainer.trainUntilConvergence(maxEpochs=1000)
	return fnn
Ejemplo n.º 2
0
def startTrials(ds, maxTrials = 2, maxExperiments = 2):
	"""start and run the trials"""
	hpCount = []
	for i in range(0, maxExperiments):
		for j in range(0, maxTrials):
			enemyTestPos = runExperiments.makeTestDataset()
			net = NetworkReader.readFrom("net.xml")

			netResults = net.activate([val for pair in normalize(enemyTestPos) for val in pair])
			netIter = iter(netResults)
			allyTestPos = zip(netIter, netIter)
			#undo normalization
			allyTestPos = map(lambda p: (abs(p[0]*640), abs(p[1]*720)), allyTestPos)
			print(allyTestPos)
			runExperiments.writeTestData(allyTestPos)
			runExperiments.run()

			with open("exp_results_raw.txt", "r") as resultsFile:
				lines = resultsFile.readlines()
				if "Zerg_Zergling" in lines[1]:
					x = normalize(enemyTestPos)
					y = normalize(allyTestPos)
					x = [val for pair in x for val in pair]
					y = [val for pair in y for val in pair]
					ds.addSample(x, y)
					lineSplit = lines[1].split("Zerg_Zergling")[-1]
					hpCount.append(lineSplit.split(" ")[2])
		trainer = BackpropTrainer(net, ds)
        trainer.trainUntilConvergence()
	return hpCount
Ejemplo n.º 3
0
def trainNetwork(inData, numOfSamples, numOfPoints, epochs):
    # Build the dataset
    alldata = createRGBdataSet(inData, numOfSamples, numOfPoints)
    # Split into test and training data
    trndata, tstdata = splitData(alldata)

    # Report  stats
    print "Number of training patterns: ", len(trndata)
    print "Input and output dimensions: ", trndata.indim, trndata.outdim
    print "First sample (input, target, class):"
    print trndata['input'][0], trndata['target'][0], trndata['class'][0]

    # Build and train the network
    fnn = buildNetwork( trndata.indim, 256, trndata.outdim, outclass=SoftmaxLayer )
    trainer = BackpropTrainer( fnn, dataset=trndata, momentum=0.001, verbose=True, weightdecay=0.001)
    #trainer.trainEpochs( epochs )
    trainer.trainUntilConvergence(maxEpochs=epochs)

    # Report results
    trnresult = percentError( trainer.testOnClassData(), trndata['class'] )
    tstresult = percentError( trainer.testOnClassData( dataset=tstdata ), tstdata['class'] )
    print "epoch: %4d" % trainer.totalepochs, \
      "  train error: %5.2f%%" % trnresult, \
      "  test error: %5.2f%%" % tstresult

    # Report results of final network
    checkNeuralNet(trainer, alldata, numOfSamples)
    return fnn
Ejemplo n.º 4
0
def pybrain_high():
	back=[]
	alldate=New_stock.objects.filter().exclude(name='CIHKY')[0:100]
	wholelen=len(alldate)
	test=New_stock.objects.filter(name__contains="CIHKY")
	testlen=len(test)
	# test dateset
	testdata= SupervisedDataSet(5, 1)
	testwhole=newalldate(test,testlen)
	for i in testwhole:
		testdata.addSample((i[0],i[2],i[3],i[4],i[5]), (0,))	
	# 实验 dateset
	data= SupervisedDataSet(5, 1)
	wholedate=newalldate(alldate,wholelen)
	for i in wholedate:
		data.addSample((i[0],i[2],i[3],i[4],i[5]), (i[1]))	
	#print testwhole
	# 建立bp神经网络
	net = buildNetwork(5, 3, 1,bias=True,hiddenclass=TanhLayer, outclass=SoftmaxLayer)
	
	trainer = BackpropTrainer(net,data)
	trainer.trainEpochs(epochs=100)
	# train and test the network
#	print trainer.train()
	trainer.train()
	print 'ok'
	out=net.activateOnDataset(testdata)
	for j in  test:
                back.append((j.high))
	print back
	print out
	backout=backnormal(back,out)
	print 'okokokoko'
	print backout # 输出22的测试集合
	return out 
Ejemplo n.º 5
0
def createNet():
	"""Create and seed the intial neural network"""
	#CONSTANTS
	nn_input_dim = 6 #[x_enemy1, y_enemy1, x_enemy2, y_enemy2, x_enemy3, y_enemy3]
	nn_output_dim = 6 #[x_ally1, y_ally1, x_ally2, y_ally2, x_ally3, y_ally3]

	allyTrainingPos, enemyTrainingPos = runExperiments.makeTrainingDataset()

	ds = SupervisedDataSet(nn_input_dim, nn_output_dim)

	#normalizes and adds it to the dataset
	for i in range(0, len(allyTrainingPos)):
		x = normalize(enemyTrainingPos[i])
		y = normalize(allyTrainingPos[i])
		x = [val for pair in x for val in pair]
		y = [val for pair in y for val in pair]
		ds.addSample(x, y)

	for inpt, target in ds:
		print inpt, target

	net = buildNetwork(nn_input_dim, 30, nn_output_dim, bias=True, hiddenclass=TanhLayer)
	trainer = BackpropTrainer(net, ds)
	trainer.trainUntilConvergence()
	NetworkWriter.writeToFile(net, "net.xml")
	enemyTestPos = runExperiments.makeTestDataset()
	print(net.activate([val for pair in normalize(enemyTestPos) for val in pair]))
	return ds
Ejemplo n.º 6
0
def trainDataSet():
    cases = Case.objects.exclude(geocode__isnull=True, geocode__grid=-1)

    print "Data Representation"
    ds = SupervisedDataSet(5245, 5245)
    for w in xrange(0,52):
        print "Start week w",
        dataset_input = [0 for i in xrange(0,5245)]
        dataset_output = [0 for i in xrange(0,5245)]
        for i in xrange(0,5245):
            dataset_input[i] = cases.filter(geocode__grid=i, morbidity__week=w).count()
            dataset_output[i] = 1 if (cases.filter(geocode__grid=i, morbidity__week=w+1).count() > 0 or cases.filter(geocode__grid=i, morbidity__week=w+2).count() > 0) else 0
        ds.addSample( (dataset_input), (dataset_output))
        print " - done week w"
    # tstdata, trndata = ds.splitWithProportion(0.25)
    print "Train"
    net = buildNetwork( 5245, 1000, 5245, bias=True)
    trainer = BackpropTrainer(net, ds, learningrate=0.1, momentum=0.99)

    terrors = trainer.trainUntilConvergence(verbose = None, validationProportion = 0.33, maxEpochs = 1000, continueEpochs = 10 )
    # print terrors[0][-1],terrors[1][-1]
    fo = open("data.txt", "w")
    for input, expectedOutput in ds:
        output = net.activate(input)
        count = 0
        for q in xrange(0, 5245):
            print math.floor(output[q]), math.floor(expectedOutput[q])
            if math.floor(output[q]) == math.floor(expectedOutput[q]):
                count+=1    
        m = count/5245
        fo.write("{0} ::  {1}".format(count, m));
Ejemplo n.º 7
0
def run_data():
    with open('new_data2.txt') as data_file:
        data = json.load(data_file)
    ds = SupervisedDataSet(1316, 1316)
    for i in xrange(0, 51):
        print "Adding {}th data sample".format(i),
        input = tuple(data[str(i)]['input'])
        output = tuple(data[str(i)]['output'])        
        # print len(input), len(output)
        ds.addSample( input, output)
        print ":: Done"

    print "Train"
    net = buildNetwork( 1316, 100, 1316, bias=True, )
    trainer = BackpropTrainer(net, ds)

    terrors = trainer.trainUntilConvergence(verbose = True, validationProportion = 0.33, maxEpochs = 20, continueEpochs = 10 )
    # print terrors[0][-1],terrors[1][-1]
    fo = open("results2.txt", "w")
    for input, expectedOutput in ds:
        output = net.activate(input)
        count = 0
        for q in xrange(0, 1316):
            print output[q], expectedOutput[q]
            if math.floor(output[q]) == math.floor(expectedOutput[q]):
                count+=1    
        m = float(count)/1316.00
        print "{0} ::  {1}".format(count, m)
        fo.write("{0} ::  {1}\n".format(count, m))
Ejemplo n.º 8
0
 def train(self):
     '''
     Perform batch regression
     '''
     self.getTrainingData2()
     trainer = BackpropTrainer(self.net, self.ds)
     trainer.train()
    def run(self, ds_train, ds_test):
        """
        This function both trains the ANN and evaluates the ANN using a specified training and testing set
        Args:
        :param ds_train (TweetClassificationDatasetFactory): the training dataset the neural network is trained with.
        :param ds_test (TweetClassificationDatasetFactory): the test dataset evaluated.
        :returns: error (float): the percent error of the test dataset, tested on the neural network.
        """
        ds_train._convertToOneOfMany()
        ds_test._convertToOneOfMany()

        trainer = BackpropTrainer(
            self.network,
            dataset=ds_train,
            momentum=0.1,
            verbose=True,
            weightdecay=0.01)

        trainer.trainUntilConvergence(
            dataset=ds_train,
            maxEpochs=self.max_epochs,
            continueEpochs=self.con_epochs)
        result = trainer.testOnClassData(dataset=ds_test)
        error = percentError(result, ds_test['class'])

        return error
Ejemplo n.º 10
0
def big_training(np_data, num_nets=1, num_epoch=20, net_builder=net_full, train_size=.1, testing=False):
    sss = cross_validation.StratifiedShuffleSplit(np_data[:,:1].ravel(), n_iter=num_nets , test_size=1-train_size, random_state=3476)
    nets=[None for net_ind in range(num_nets)]
    trainaccu=[[0 for i in range(num_epoch)] for net_ind in range(num_nets)]
    testaccu=[[0 for i in range(num_epoch)] for net_ind in range(num_nets)]
    net_ind=0
    for train_index, test_index in sss:
        print ('%s Building %d. network.' %(time.ctime(), net_ind+1))
        #print("TRAIN:", len(train_index), "TEST:", len(test_index))
        trainset = ClassificationDataSet(np_data.shape[1] - 1, 1)
        trainset.setField('input', np_data[train_index,1:]/100-.6)
        trainset.setField('target', np_data[train_index,:1])
        trainset._convertToOneOfMany( )
        trainlabels = trainset['class'].ravel().tolist()
        if testing:
            testset = ClassificationDataSet(np_data.shape[1] - 1, 1)
            testset.setField('input', np_data[test_index,1:]/100-.6)
            testset.setField('target', np_data[test_index,:1])
            testset._convertToOneOfMany( )
            testlabels = testset['class'].ravel().tolist()
        nets[net_ind] = net_builder()
        trainer = BackpropTrainer(nets[net_ind], trainset)
        for i in range(num_epoch):
            for ii in range(3):
                err = trainer.train()
            print ('%s Epoch %d: Network trained with error %f.' %(time.ctime(), i+1, err))
            trainaccu[net_ind][i]=accuracy_score(trainlabels,trainer.testOnClassData())
            print ('%s Epoch %d: Train accuracy is %f' %(time.ctime(), i+1, trainaccu[net_ind][i]))
            print ([sum([trainaccu[y][i]>tres for y in range(net_ind+1)]) for tres in [0,.1,.2,.3,.4,.5,.6]])
            if testing:
                testaccu[net_ind][i]=accuracy_score(testlabels,trainer.testOnClassData(testset))
                print ('%s Epoch %d: Test accuracy is %f' %(time.ctime(), i+1, testaccu[net_ind][i]))
        NetworkWriter.writeToFile(nets[net_ind], 'nets/'+net_builder.__name__+str(net_ind)+'.xml')
        net_ind +=1
    return [nets, trainaccu, testaccu]
Ejemplo n.º 11
0
	def computeModel(self, path, user):
		# Create a supervised dataset for training.
		trndata = SupervisedDataSet(24, 1)
		tstdata = SupervisedDataSet(24, 1)
		
		#Fill the dataset.
		for number in range(0,10):
			for variation in range(0,7):
				# Pass all the features as inputs.
				trndata.addSample(self.getSample(user, number, variation),(user.key,))
				
			for variation in range(7,10):
				# Pass all the features as inputs.
				tstdata.addSample(self.getSample(user, number, variation),(user.key,))
				
		# Build the LSTM.
		n = buildNetwork(24, 50, 1, hiddenclass=LSTMLayer, recurrent=True, bias=True)

		# define a training method
		trainer = BackpropTrainer(n, dataset = trndata, momentum=0.99, learningrate=0.00002)

		# carry out the training
		trainer.trainOnDataset(trndata, 2000)
		valueA = trainer.testOnData(tstdata)
		print '\tMSE -> {0:.2f}'.format(valueA)
		self.saveModel(n, '.\NeuralNets\SavedNet_%d' %(user.key))
		
		return n
Ejemplo n.º 12
0
class Classifier():
    def __init__(self, testing = False):
        self.training_set, self.test_set = split_samples(0.5 if testing else 1.0)
        self.net = buildNetwork( self.training_set.indim, self.training_set.outdim, outclass=SoftmaxLayer )
        self.trainer = BackpropTrainer( self.net, dataset=self.training_set, momentum=0.1, verbose=True, weightdecay=0.01)
        self.train()

    def train(self):
        self.trainer.trainEpochs( EPOCHS )
        trnresult = percentError( self.trainer.testOnClassData(),
                                  self.training_set['class'] )
        print "  train error: %5.2f%%" % trnresult

    def classify(self, file):
        strengths = self.net.activate(process_sample(*load_sample(file)))
        print strengths
        best_match = None
        strength = 0.0
        for i,s in enumerate(strengths):
            if s > strength:
                best_match = i
                strength = s
        return SOUNDS[best_match]

    def test(self):
        tstresult = percentError( self.trainer.testOnClassData(
               dataset=self.test_set ), self.test_set['class'] )

        print "  test error: %5.2f%%" % tstresult
Ejemplo n.º 13
0
def trainNetwork(epochs, rate, trndata, tstdata, network=None):
    '''
    epochs: number of iterations to run on dataset
    trndata: pybrain ClassificationDataSet
    tstdat: pybrain ClassificationDataSet
    network: filename of saved pybrain network, or None
    '''
    if network is None:
        net = buildNetwork(400, 25, 25, 9, bias=True, hiddenclass=SigmoidLayer, outclass=SigmoidLayer)
    else:
        net = NetworkReader.readFrom(network)

    print "Number of training patterns: ", len(trndata)
    print "Input and output dimensions: ", trndata.indim, trndata.outdim
    print "First sample input:"
    print trndata['input'][0]
    print ""
    print "First sample target:", trndata['target'][0]
    print "First sample class:", trndata.getClass(int(trndata['class'][0]))
    print ""

    trainer = BackpropTrainer(net, dataset=trndata, learningrate=rate)
    for i in range(epochs):
        trainer.trainEpochs(1)
        trnresult = percentError(trainer.testOnClassData(), trndata['class'])
        tstresult = percentError(trainer.testOnClassData(dataset=tstdata), tstdata['class'])
        print "epoch: %4d" % trainer.totalepochs, "  train error: %5.2f%%" % trnresult, "  test error: %5.2f%%" % tstresult

    return net
def train_neural_network():
    start = time.clock()
    ds = get_ds()

    # split main data to train and test parts
    train, test = ds.splitWithProportion(0.75)

    # build nn with 10 inputs, 3 hidden layers, 1 output neuron
    net = buildNetwork(10,3,1, bias=True)

    # use backpropagation algorithm
    trainer = BackpropTrainer(net, train, momentum = 0.1, weightdecay = 0.01)

    # plot error
    trnError, valError = trainer.trainUntilConvergence(dataset = train, maxEpochs = 50)

    plot_error(trnError, valError)

    print "train the model..."
    trainer.trainOnDataset(train, 500)
    print "Total epochs: %s" % trainer.totalepochs

    print "activate..."
    out = net.activateOnDataset(test).argmax(axis = 1)
    percent = 100 - percentError(out, test['target'])
    print "%s" % percent

    end = time.clock()
    print "Time: %s" % str(end-start)
Ejemplo n.º 15
0
class Brain:
	def __init__(self, hiddenNodes = 30):
		# construct neural network 
		self.myClassifierNet = buildNetwork(12, hiddenNodes, 1, bias=True, hiddenclass=TanhLayer) #parameters to buildNetwork are inputs, hidden, output
		# set up dataset
		self.myDataset = SupervisedDataSet(12, 1)
		self.myClassifierTrainer = BackpropTrainer(self.myClassifierNet, self.myDataset)

	def addSampleImageFromFile(self, imageFile, groupId):
		"adds a data sample from an image file, including needed processing"
		myImage = Image.open(imageFile)
		self.myDataset.addSample(twelveToneParallel(myImage), (groupId,))

	def train(self):
		#myClassifierTrainer.trainUntilConvergence() #this will take forever (possibly literally in the pathological case)
		for i in range(0, 15):
			self.myClassifierTrainer.train() #this may result in an inferior network, but in practice seems to work fine

	def save(self, saveFileName="recognizernet.brain"):
		saveFile = open(saveFileName, 'w')
		pickle.dump(self.myClassifierNet, saveFile)
		saveFile.close()

	def load(self, saveFileName="recognizernet.brain"):
		saveFile = open(saveFileName, 'r')
		myClassifierNet = pickle.load(saveFile)
		saveFile.close()

	def classify(self, fileName):
		myImage = Image.open(fileName)
		if self.myClassifierNet.activate(twelveToneParallel(myImage)) < 0.5:
			return 0
		else:
			return 1
def trainNetwork(net, ds, epochs, learningrate = 0.01, momentum=0.4, weightdecay = 0.0):
    trainer = BackpropTrainer(net,
                              dataset=ds,
                              learningrate=learningrate,
                              momentum=momentum,
                              weightdecay=weightdecay)
    trainer.trainEpochs(epochs)
Ejemplo n.º 17
0
def main():
    for stock in STOCK_TICKS:
        # Download Data
        get_data(stock)

        # Import Data
        days = extract_data(stock)
        today = days.pop(0)

        # Make DataSet
        data_set = ClassificationDataSet(INPUT_NUM, 1, nb_classes=2)
        for day in days:
            target = 0
            if day.change > 0:
                target = 1
            data_set.addSample(day.return_metrics(), [target])

        # Make Network
        network = buildNetwork(INPUT_NUM, MIDDLE_NUM, MIDDLE_NUM, OUTPUT_NUM)

        # Train Network
        trainer = BackpropTrainer(network)
        trainer.setData(data_set)
        trainer.trainUntilConvergence(maxEpochs=EPOCHS_MAX)

        # Activate Network
        prediction = network.activate(today.return_metrics())
        print prediction
Ejemplo n.º 18
0
 def neuralNetwork_eval_func(self, chromosome):
     node_num, learning_rate, window_size = self.decode_chromosome(chromosome)
     if self.check_log(node_num, learning_rate, window_size):
         return self.get_means_from_log(node_num, learning_rate, window_size)[0]
     folded_dataset = self.create_folded_dataset(window_size)
     indim = 21 * (2 * window_size + 1)
     mean_AUC = 0
     mean_decision_value = 0
     mean_mcc = 0
     sample_size_over_thousand_flag = False
     for test_fold in xrange(self.fold):
         test_labels, test_dataset, train_labels, train_dataset = folded_dataset.get_test_and_training_dataset(test_fold)
         if len(test_labels) + len(train_labels) > 1000:
             sample_size_over_thousand_flag = True
         ds = SupervisedDataSet(indim, 1)
         for i in xrange(len(train_labels)):
             ds.appendLinked(train_dataset[i], [train_labels[i]])
         net = buildNetwork(indim, node_num, 1, outclass=SigmoidLayer, bias=True)
         trainer = BackpropTrainer(net, ds, learningrate=learning_rate)
         trainer.trainUntilConvergence(maxEpochs=self.maxEpochs_for_trainer)
         decision_values = [net.activate(test_dataset[i]) for i in xrange(len(test_labels))]
         decision_values = map(lambda x: x[0], decision_values)
         AUC, decision_value_and_max_mcc = validate_performance.calculate_AUC(decision_values, test_labels)
         mean_AUC += AUC
         mean_decision_value += decision_value_and_max_mcc[0]
         mean_mcc += decision_value_and_max_mcc[1]
         if sample_size_over_thousand_flag:
             break
     if not sample_size_over_thousand_flag:
         mean_AUC /= self.fold
         mean_decision_value /= self.fold
         mean_mcc /= self.fold
     self.write_log(node_num, learning_rate, window_size, mean_AUC, mean_decision_value, mean_mcc)
     self.add_log(node_num, learning_rate, window_size, mean_AUC, mean_decision_value, mean_mcc)
     return mean_AUC
def main():
	inputs = ReadCSV('./data/input.csv')
	outputs = ReadCSV('./data/output.csv')
	
	test_set = test.keys()
	train_set = []
	for k in inputs.keys():
		if k not in test_set:
			train_set.append(k)
	print "Number of training samples", len(train_set)
	print "Number of testing samples", len(test_set)
			
	net = buildNetwork(178, 6, 5)
	ds=SupervisedDataSet(178,5)
	for id in train_set:
		ds.addSample(inputs[id],outputs[id])

	trainer = BackpropTrainer(net, ds, learningrate=0.001, momentum = 0.001)

	trainer.trainUntilConvergence(maxEpochs=1000, validationProportion = 0.5)
	
	
	for id in test_set:
		predicted = net.activate(inputs[id])
		actual = outputs[id]
		print '-----------------------------'
		print test[id]
		print '-----------------------------'
		print 'Trait\t\tPredicted\tActual\tError'
		for i in range(0,5):
			error = abs(predicted[i] - actual[i])*100/4.0
			print traits[i], '\t', predicted[i], '\t', actual[i], '\t', error,"%" 
Ejemplo n.º 20
0
 def train(self, data, LRATE, MOMENTUM, ITERATIONS):
     trainer = BackpropTrainer(module=self.net, dataset=data, learningrate=LRATE,
                               momentum=MOMENTUM, lrdecay=0.99999, verbose=True)
     # for i in xrange(0, self.initialization_periods):
     #     self.net.activate(data.getSequence(i)[0])
     print "Training..."
     return trainer.trainUntilConvergence(maxEpochs=ITERATIONS)
Ejemplo n.º 21
0
class NNetwork:
	def __init__(self):
		self.ds = ClassificationDataSet(7, 1, nb_classes=8)  #8 since we have 8 gestures, 7 since we have 7 features
		
	def add_data(self, training_data):
		for gesture in training_data:
			self.ds.addSample(gesture[1], gesture[0])  #a method to add all the training data we have
			
	def newData(self, training_data):   #a method for replacing the data already existing and adding data from scratch
		self.ds = ClassificationDataSet(7, 1, nb_classes=8)
		for gesture in training_data:
			self.ds.addSample(gesture[1], gesture[0])
	
	def train(self, shouldPrint):
		tstdata, trndata = self.ds.splitWithProportion(0.2)  #splits the data into training and verification data
		trndata._convertToOneOfMany()
		tstdata._convertToOneOfMany()
		self.fnn = buildNetwork(trndata.indim, 64, trndata.outdim, outclass=SoftmaxLayer) #builds a network with 64 hidden neurons
		self.trainer = BackpropTrainer(self.fnn, dataset=trndata, momentum=0.1, learningrate=0.01, verbose=True, weightdecay=0.1)
		#uses the backpropagation algorithm
		self.trainer.trainUntilConvergence(dataset=trndata, maxEpochs=100, verbose=True, continueEpochs=10, validationProportion=0.20) #early stopping with 20% as testing data
		trnresult = percentError( self.trainer.testOnClassData(), trndata['class'] )
		tstresult = percentError( self.trainer.testOnClassData(dataset=tstdata ), tstdata['class'] )
		
		if shouldPrint:
			print "epoch: %4d" % self.trainer.totalepochs, "  train error: %5.2f%%" % trnresult, "  test error: %5.2f%%" % tstresult
	def activate(self, data): #tests a particular data point (feature vector)
	    return self.fnn.activate(data)
Ejemplo n.º 22
0
def measuredLearning(ds):

    trndata,tstdata = splitData(ds,.025)

    #build network


    ###
    # This network has no hidden layters, you might need to add some
    ###
    fnn = buildNetwork( trndata.indim, 22, trndata.outdim, outclass=SoftmaxLayer )
    trainer = BackpropTrainer( fnn, verbose=True,dataset=trndata)
                               
    ####
    #   Alter this to figure out how many runs you want.  Best to start small and be sure that you see learning.
    #   Before you ramp it up.
    ###
    for i in range(150):
        trainer.trainEpochs(5)
   
        
        trnresult = percentError(trainer.testOnClassData(),trndata['class'] )

        
        tstresult = percentError( trainer.testOnClassData(
           dataset=tstdata ), tstdata['class'] )

        print "epoch: %4d" % trainer.totalepochs, \
            "  train error: %5.2f%%" % trnresult, \
            "  test error: %5.2f%%" % tstresult
        if(trnresult<.5): 
            return
Ejemplo n.º 23
0
def getErrorPercent(training_dataset, eval_dataset_list, num_hidden, num_epochs):
  num_datapoints = len(training_dataset)
  num_inputs = len(training_dataset[0][0])
  num_outputs = len(training_dataset[0][1])

  # print "Num Inputs:", num_inputs
  # print "Num Outputs:", num_outputs
  # print "Num Hidden Nodes:", num_hidden

  NN = buildNetwork(num_inputs, num_hidden, num_outputs, bias=True, hiddenclass=SigmoidLayer, outclass=SigmoidLayer)

  dataset = SupervisedDataSet(num_inputs, num_outputs)
  for datapoint in training_dataset:
    dataset.addSample(datapoint[0], datapoint[1])


  trainer = BackpropTrainer(NN, dataset=dataset, momentum=0.0, verbose=False, weightdecay=0.0)

  for epoch in range(0, num_epochs):
    #print epoch 
    trainer.train()

  errors = []
  for eval_set in eval_dataset_list:
    total_percent_errors = [0]*num_outputs
    for jj in range(0, len(eval_set)):
      nn_out = NN.activate(eval_set[jj][0])
      percent_error = computeError(eval_set[jj][1], nn_out)
      #print percent_error
      total_percent_errors = map(operator.add, percent_error, total_percent_errors)
    #print total_percent_errors
    errors.append(map(operator.div, total_percent_errors, [len(dataset)]*num_outputs))
  #print errors
  return errors
Ejemplo n.º 24
0
def train(data):
	"""
	See http://www.pybrain.org/docs/tutorial/fnn.html

	Returns a neural network trained on the test data.

	Parameters:
	  data - A ClassificationDataSet for training.
	         Should not include the test data.
	"""
	network = buildNetwork(
		# This is where we specify the architecture of
		# the network.  We can play around with different
		# parameters here.
		# http://www.pybrain.org/docs/api/tools.html
		data.indim, 5, data.outdim,
		hiddenclass=SigmoidLayer,
		outclass=SoftmaxLayer
	)

	# We can fiddle around with this guy's options as well.
	# http://www.pybrain.org/docs/api/supervised/trainers.html
	trainer = BackpropTrainer(network, dataset=data)
	trainer.trainUntilConvergence(maxEpochs=20)

	return network
Ejemplo n.º 25
0
def NeuralNetwork(tRiver, qRiver, pRiver, TRiver, qnewRiver, pnewRiver, TnewRiver):
    # build neural network with 20 neurons for historic data on flux, 3 for last 3 temp data, 3 for last precipitation,
    # hidden layer with more than input neurons (hinder specification)
    # and 3 output neurons (flux for next day, first derivative, second derivative

    Ndim = 10+3+3
    Nout = 3
    net = buildNetwork(Ndim, Ndim, Nout, hiddenclass=TanhLayer)
    ds = SupervisedDataSet(Ndim, Nout)

    # next big job: find data values to build up library of training set
    for t in range(len(tRiver)-3):
        input_flow = qRiver[t-20:2:t]
        input_prec = pRiver[t-3:t]
        input_temp = TRiver[t-3:t]
        input_vec = np.hstack([input_flow, input_prec, input_temp])

        output_flow = np.hstack([qRiver[t:t+3]]) # first approx, split later for long predictions
        ds.addSample(input_vec, output_flow)

    trainer = BackpropTrainer(net, ds)
    #trainer.train()
    trainer.trainUntilConvergence()

    # now call it repeatedly on the second set

    prediction = net.activate(np.hstack([qnewRiver[:20], pnewRiver[:3], TnewRiver[:3]]))
    return prediction
    def run(self, fold, X_train, y_train, X_test, y_test):
        DS_train, DS_test = ClassificationData.convert_to_DS(
            X_train,
            y_train,
            X_test,
            y_test)

        NHiddenUnits = self.__get_best_hu(DS_train)
        fnn = buildNetwork(
            DS_train.indim,
            NHiddenUnits,
            DS_train.outdim,
            outclass=SoftmaxLayer,
            bias=True)

        trainer = BackpropTrainer(
            fnn,
            dataset=DS_train,
            momentum=0.1,
            verbose=False,
            weightdecay=0.01)

        trainer.trainEpochs(self.epochs)
        tstresult = percentError(
            trainer.testOnClassData(dataset=DS_test),
            DS_test['class'])

        print "NN fold: %4d" % fold, "; test error: %5.2f%%" % tstresult
        return tstresult / 100.0
def move_function(board):
    global net  
    best_max_move = None 
    max_value = -1000
    best_min_move = None
    min_value = 1000

    #value is the chance of black winning
    for m in board.get_moves():
        nextboard = board.peek_move(m)
        value = net.activate(board_to_input(nextboard))
        if value > max_value: 
            max_value = value
            best_max_move = m 
        if value < min_value:
            min_value = value
            best_min_move = m

    ds = SupervisedDataSet(97, 1)
    best_move = None 

    #active player
    if board.active == BLACK:
        ds.addSample(board_to_input(board), max_value)
        best_move = best_max_move
    elif board.active == WHITE: 
        ds.addSample(board_to_input(board), min_value)
        best_move = best_min_move

    trainer = BackpropTrainer(net, ds)
    trainer.train()
    NetworkWriter.writeToFile(net, 'CheckersMini/synapsemon_random_black_mini_140.xml')
    NetworkWriter.writeToFile(net, 'SynapsemonPie/synapsemon_random_black_mini_140_copy.xml') 
    return best_move 
Ejemplo n.º 28
0
    def train(self):
        trainer = BackpropTrainer(self.network, self.data_set)

        trainer.trainUntilConvergence(
            verbose=False, validationProportion=0.15, maxEpochs=1000, continueEpochs=10)

        return trainer
Ejemplo n.º 29
0
def nnTest(tx, ty, rx, ry, iterations):
    print "NN start"
    print strftime("%a, %d %b %Y %H:%M:%S", localtime())

    resultst = []
    resultsr = []
    positions = range(iterations)
    network = buildNetwork(16, 16, 1, bias=True)
    ds = ClassificationDataSet(16, 1, class_labels=["1", "0"])
    for i in xrange(len(tx)):
        ds.addSample(tx[i], [ty[i]])
    trainer = BackpropTrainer(network, ds, learningrate=0.05)
    validator = CrossValidator(trainer, ds, n_folds=10)
    print validator.validate()
    for i in positions:
        print trainer.train()
        resultst.append(sum((np.array([round(network.activate(test)) for test in tx]) - ty)**2)/float(len(ty)))
        resultsr.append(sum((np.array([round(network.activate(test)) for test in rx]) - ry)**2)/float(len(ry)))
        print i, resultst[i], resultsr[i]
    plt.plot(positions, resultst, 'g-', positions, resultsr, 'r-')
    plt.axis([0, iterations, 0, 1])
    plt.ylabel("Percent Error")
    plt.xlabel("Network Epoch")
    plt.title("Neural Network Error")
    plt.savefig('nn.png', dpi=500)
    print "NN end"
    print strftime("%a, %d %b %Y %H:%M:%S", localtime())
Ejemplo n.º 30
0
 def parse_and_train(self):
     f = open(self.file,'r')
     learn_lines = []
     for line in f:
         if line.strip() != '':
             learn_lines.append(line)
     i = 0
     f.close()
     while i < len(learn_lines):
         ins, outs = self.convert_to_tuple(learn_lines[i],learn_lines[i+1])
         i += 2
         self.ds.addSample(ins,outs)
     self.nn = buildNetwork(self.ios,self.hns,25,self.ios)
     #self.train_dat, self.test_dat = self.ds.splitWithProportion(0.75)
     self.train_dat = self.ds
     trnr = BackpropTrainer(self.nn,dataset=self.train_dat,momentum=0.1,verbose=False,weightdecay=0.01)
     i = 150
     trnr.trainEpochs(150)
     while i < self.epochs:
         trnr.trainEpochs(50)
         i += 50
         print 'For epoch ' + str(i)
         print 'For train:'
         self.print_current_error()
         #print 'For test:'
         #self.print_validation()
     self.nn.sortModules()
Ejemplo n.º 31
0
from pybrain.tools.shortcuts import buildNetwork
from pybrain.supervised.trainers import BackpropTrainer
from pybrain.datasets import SupervisedDataSet
import csv

header = []
trainingData = []
price = []

with open('../../data/working1.csv') as f:

    f = csv.reader(f)
    header = next(f)

    for row in f:
        price.append(float(row[3]))
        del row[3:5]
        trainingData.append(map(float, row))

ds = SupervisedDataSet(len(trainingData[0]), 1)

for i in range(len(price)):
    ds.addSample(trainingData[i], price[i])

net = buildNetwork(len(trainingData[0]), 20, 1)
trainer = BackpropTrainer(net, ds)

trainer.train()
print net.activate((1763.3538916308, 5753.496311597, 2092.2486098239, 0, 4, 3,
                    0, 3, 0, 450, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0))
Ejemplo n.º 32
0
net.sortModules()

# create a dataset object, make output Y a softmax matrix
allData = SupervisedDataSet(n, numLabels)
Y2 = convertToOneOfMany(Y)

# add data samples to dataset object, both ways are correct
allData.setField('input', X_train)
allData.setField('target', Y2)

#separate training and testing data
dataTrain = allData

# create object for training
train = BackpropTrainer(net,
                        dataset=dataTrain,
                        learningrate=0.03,
                        momentum=0.3)

# evaluate correct output for trainer
#trueTrain = dataTrain['target'].argmax(axis=1)

# train step by step
EPOCHS = 2
#size = EPOCHS
#accTrain = zeros(size)

train.trainEpochs(EPOCHS)
'''
i = 0
for i in range(EPOCHS):
    train.trainEpochs(1)
Ejemplo n.º 33
0
    def model_net(self, fields, datas=None):
        # 对需要处理的数据进行归一化处理,防止大数吃掉小数
        # https://www.jianshu.com/p/682c24aef525 用python做数据分析4|pandas库介绍之DataFrame基本操作
        # 归一 https://www.zhihu.com/question/57509028
        # 标准化和归一化什么区别? https://www.zhihu.com/question/20467170
        # sklearn库中数据预处理函数fit_transform()和transform()的区别 http://blog.csdn.net/quiet_girl/article/details/72517053
        # 需具体了解其实现方式
        from sklearn.preprocessing import MinMaxScaler
        from pybrain.structure import SoftmaxLayer
        from pybrain.datasets import ClassificationDataSet
        from pybrain.tools.shortcuts import buildNetwork
        from pybrain.supervised.trainers import BackpropTrainer
        from pybrain.utilities import percentError
        from pybrain.structure import TanhLayer

        scaler = MinMaxScaler()
        datas[fields] = scaler.fit_transform(datas[fields])

        tran_data = datas[fields].values
        tran_target = datas['Flag'].values
        tran_label = ['Sell', 'Hold', 'Buy']

        class_datas = ClassificationDataSet(6,
                                            1,
                                            nb_classes=3,
                                            class_labels=tran_label)
        print(type(tran_target))
        print(tran_target)
        for i in range(len(tran_data)):
            class_datas.appendLinked(tran_data[i], tran_target[i])

        tstdata_temp, trndata_temp = class_datas.splitWithProportion(0.25)

        print(len(tstdata_temp), len(trndata_temp))

        tstdata = ClassificationDataSet(6,
                                        1,
                                        nb_classes=3,
                                        class_labels=tran_label)
        trndata = ClassificationDataSet(6,
                                        1,
                                        nb_classes=3,
                                        class_labels=tran_label)

        for n in range(0, trndata_temp.getLength()):
            trndata.appendLinked(
                trndata_temp.getSample(n)[0],
                trndata_temp.getSample(n)[1])

        for n in range(0, tstdata_temp.getLength()):
            tstdata.appendLinked(
                tstdata_temp.getSample(n)[0],
                tstdata_temp.getSample(n)[1])

        tstdata._convertToOneOfMany()
        trndata._convertToOneOfMany()

        tnet = buildNetwork(trndata.indim,
                            5,
                            trndata.outdim,
                            hiddenclass=TanhLayer,
                            outclass=SoftmaxLayer)
        trainer = BackpropTrainer(tnet,
                                  dataset=trndata,
                                  batchlearning=True,
                                  momentum=0.1,
                                  verbose=True,
                                  weightdecay=0.01)

        for i in range(5000):
            trainer.trainEpochs(20)
            trnresult = percentError(trainer.testOnClassData(),
                                     trndata['class'])
            testResult = percentError(trainer.testOnClassData(dataset=tstdata),
                                      tstdata['class'])
            print("epoch: %4d" % trainer.totalepochs, \
                  "  train error: %5.2f%%" % trnresult, \
                  "  test error: %5.2f%%" % testResult)

        return trainer, class_datas
Ejemplo n.º 34
0
#split data into test data and training data
tstdata, trndata = alldata.splitWithProportion(0.25)

#build network
fnn = buildNetwork(trndata.indim,
                   100,
                   trndata.outdim,
                   outclass=SigmoidLayer,
                   fast=True)
#print fnn

#build trainer
trainer = BackpropTrainer(fnn,
                          dataset=trndata,
                          momentum=0.0,
                          verbose=True,
                          weightdecay=0.0)

#start training
trainer.trainUntilConvergence(maxEpochs=1)
#trainer.trainEpochs( 5 )

#save the network using cpickle
cPickle.dump(fnn, fonet)
fonet.close()
out = fnn.activateOnDataset(tstdata)

#print out
print type(out)
out2 = (out > 0.5)
Ejemplo n.º 35
0
fnn.modules
print("建立数据集")
# 建立数据集
iris = load_iris()
train, test, train_label, test_label = train_test_split(iris.data, iris.target)
ds = SupervisedDataSet(4, 1)
for i in range(len(train)):
    ds.addSample(train[i], train_label[i])
len(ds)
for inpt, target in ds:
    print(inpt, target)
    
print("构造bp")
# 构造bp训练节
trainer = BackpropTrainer(fnn, ds, 
                          momentum = 0.1, 
                          verbose = True, 
                          weightdecay = 0.01)
print("开始训练")
trainer.trainEpochs(epochs = 50)

print("开始返回结果")
out = SupervisedDataSet(4, 1)
for i in range(len(test)):
    temp = 0
    out.addSample(test[i], temp)
pred = fnn.activateOnDataset(out)
pred_p = np.round(pred, 2)
print(test_label)
print(pred_p)
## 用均方误差评估模型预测性能
mean_squared_error(test_label, pred_p)
Ejemplo n.º 36
0
print(rede['hidden0'])
print(rede['out'])
print(rede['bias'])

#2 atributos previsores + 1 classe
base = SupervisedDataSet(2, 1)

#XOR
base.addSample((0, 0), (0, ))
base.addSample((0, 1), (1, ))
base.addSample((1, 0), (1, ))
base.addSample((1, 1), (0, ))

print(base['input'])  #previsores
print(base['target'])  #classe

treinamento = BackpropTrainer(rede,
                              dataset=base,
                              learningrate=0.01,
                              momentum=0.06)

for i in range(1, 1000):
    erro = treinamento.train()
    if i % 1000 == 0:
        print("Erro: %s" % erro)

print(rede.activate([0, 0]))
print(rede.activate([0, 1]))
print(rede.activate([1, 0]))
print(rede.activate([1, 1]))
Ejemplo n.º 37
0
def predict_ball(hidden_nodes, is_elman=True, training_data=5000, epoch=-1, parameters={}, predict_count=128):

    # build rnn
    n = construct_network(hidden_nodes, is_elman)

    # make training data
    ep = 1 if epoch < 0 else epoch
    initial_v = ball_data.gen_velocity(BOX_SIZE)
    data_set = ball_data.bounce_ball((training_data + 1) * ep, BOX_SIZE, None, initial_v=initial_v)
    total_avg = np.average(data_set, axis=0)
    total_std = np.std(data_set, axis=0)
    # initial_p = data_set[np.random.choice(range(training_data))][:2]

    training_ds = []
    normalized_d = __normalize(data_set)
    for e_index in range(ep):
        t_ds = SupervisedDataSet(4, 4)
        e_begin = e_index * training_data
        for j in range(e_begin,  e_begin + training_data):
            # from current, predict next
            p_in = normalized_d[j].tolist()
            p_out = normalized_d[j + 1].tolist()
            t_ds.addSample(p_in, p_out)

        training_ds.append(t_ds)

    del data_set  # release memory

    # training network
    err1 = 0
    if epoch < 0:
        trainer = BackpropTrainer(n, training_ds[0], **parameters)
        err1 = trainer.train()
    else:
        trainer = BackpropTrainer(n, **parameters)
        epoch_errs = []
        for ds in training_ds:
            trainer.setData(ds)
            epoch_errs.append(trainer.train())

        err1 = max(epoch_errs)

    del training_ds  # release memory

    # predict
    initial_p = ball_data.gen_position(BOX_SIZE)
    predict = None
    next_pv = np.hstack((initial_p, initial_v))

    n.reset()
    for i in range(predict_count):
        predict = next_pv if predict is None else np.vstack((predict, next_pv))

        p_normalized = (next_pv - total_avg) / total_std
        next_pv = n.activate(p_normalized.tolist())
        restored = np.array(next_pv) * total_std + total_avg
        next_pv = restored

    real = ball_data.bounce_ball(predict_count, BOX_SIZE, initial_p, initial_v)
    err_matrix = (predict - real) ** 2
    err_distance = np.sqrt(np.sum(err_matrix[:, 0:2], axis=1)).reshape((predict_count, 1))
    err_velocity = np.sum(np.sqrt(err_matrix[:, 2:4]), axis=1).reshape((predict_count, 1))
    err2 = np.hstack((err_distance, err_velocity))

    return predict, real, err1, err2
Ejemplo n.º 38
0
#Preparing the Classification dataset
test_data = ClassificationDataSet(784, 1, nb_classes=10)
training_data = ClassificationDataSet(784, 1, nb_classes=10)

for n in range(0, testdata.getLength()):
    test_data.addSample( testdata.getSample(n)[0], testdata.getSample(n)[1])
for n in range(0, traindata.getLength()):
    training_data.addSample( traindata.getSample(n)[0], traindata.getSample(n)[1])

#Converting to One-hot encoding
test_data._convertToOneOfMany()
training_data._convertToOneOfMany()


#Building the network
net = buildNetwork(training_data.indim, 200, training_data.outdim,outclass=SoftmaxLayer)
trainer = BackpropTrainer(net, dataset=training_data, momentum=0.1,learningrate=0.01,verbose=True,weightdecay=0.01)

#For printing the network
print(net)

#Training the network on 20 epochs
trainee,validation = trainer.trainUntilConvergence(dataset=training_data,maxEpochs=20)


#Accuracy using scikit-learn
from sklearn.metrics import accuracy_score
print ("Accuracy on test set: %7.4f" % accuracy_score(trainer.testOnClassData(dataset=test_data), test_data['class'], normalize=True))

Ejemplo n.º 39
0
lmin = label.min()
lmea = label.mean()
label = (label - lmin) * 1.0 / (lmax - lmin)
#label = (label - lmea) * 2.0 / (lmax - lmin)
ds = SupervisedDataSet(16, 1)
#print(type(train[0]))
for i in range(len(train)):
    ds.addSample([
        train[i], train2[i], train3[i], train4[i], train5[i], train6[i],
        train7[i], train8[i], train9[i], train10[i], train11[i], train12[i],
        train13[i], train14[i], train15[i], train16[i]
    ], [label[i]])
x = ds['input']
y = ds['target']
#print(ds)
trainer = BackpropTrainer(fnn, ds, verbose=True, learningrate=0.05)
trainer.trainEpochs(epochs=1000)  # 迭代次数
'''
train = np.hstack((train, np.array([0.36])))  # 添加自变量1 为3
print(train)
train2 = np.hstack((train2, np.array([6.75])))  # 添加自变量2 为4
train3 = np.hstack((train3, np.array([23.25])))  # 添加自变量3 为3
train4 = np.hstack((train4, np.array([17.775])))  # 添加自变量4 为4
train5 = np.hstack((train5, np.array([5.85])))  # 添加自变量5 为3
train6 = np.hstack((train6, np.array([77.55])))  # 添加自变量6 为4
label = np.hstack((label, np.array([2.739454094])))  # 添加因变量为403(4*100+3)
'''
sheet2 = read_val()
add = np.array(sheet2.col_values(0))
add2 = np.array(sheet2.col_values(1))
add3 = np.array(sheet2.col_values(2))
Ejemplo n.º 40
0
from pybrain.datasets import *
from pybrain.tools.shortcuts import buildNetwork
from pybrain.supervised.trainers import BackpropTrainer
import pickle

if __name__ == "__main__":
    ds = SupervisedDataSet(2, 1)
    ds.addSample((0, 0), (0, ))
    ds.addSample((0, 1), (1, ))
    ds.addSample((1, 0), (1, ))
    ds.addSample((1, 1), (0, ))

    net = buildNetwork(2, 4, 1, bias=True)

    # try:
    #         f = open('_learned', 'r')
    # 	net = pickle.load(f)
    # 	f.close()
    # except:
    trainer = BackpropTrainer(net, learningrate=0.01, momentum=0.99)
    trainer.trainOnDataset(ds, 3000)
    trainer.testOnData()
    # f = open('_learned', 'w')
    # pickle.dump(net, f)
    # f.close()

    print net.activate((1, 1))
    print net.activate((1, 0))
    print net.activate((0, 1))
    print net.activate((0, 0))
 def __init__(self):
     self.net = buildNetwork(2, 4, 2, bias=True)
     self.net.randomize()
     print self.net
     self.ds = SupervisedDataSet(2,2)
     self.trainer = BackpropTrainer(self.net, self.ds, learningrate = 0.1, momentum=0.99)
Ejemplo n.º 42
0
        sample_path = r'E:\APC\sample_test\data_test.npz'

        X, y = Get_data(sample_path)

        num_features = X.shape[-1]
        num_classes = len(set(y))

        fnn = fast_fnn(num_features, num_classes, num_hidden_ep)
        data = norm_data(X, y)

        tsd, trd = data.splitWithProportion(0.25)
        print len(trd)
        #trainer = BackpropTrainer(fnn, trd, verbose=True)

        trainer = BackpropTrainer(fnn, trd, verbose=True, weightdecay=0.01, learningrate=0.001)

        trainer.trainUntilConvergence(maxEpochs=100)
        tstresult = percentError(trainer.testOnClassData(dataset=tsd),
                                 tsd['class'])
        print "epoch: %4d" % trainer.totalepochs, \
              "  test error: %5.2f%%" % tstresult

        np.save(model_path, fnn.params)

    if TEST_MODEL:

        test_path = r'E:\APC\sample_test\0'
        vector_stub_path = r'e:\APC\feature\canditate.vec'
        vec_stu = stub.read(vector_stub_path)
        test_files = os.listdir(test_path)
Ejemplo n.º 43
0
from pybrain.datasets import SupervisedDataSet

training = SupervisedDataSet(X.shape[1], y.shape[1])
for i in range(X_train.shape[0]):
    training.addSample(X_train[i], y_train[i])

testing = SupervisedDataSet(X.shape[1], y.shape[1])
for i in range(X_test.shape[0]):
    testing.addSample(X_test[i], y_test[i])

from pybrain.tools.shortcuts import buildNetwork
net = buildNetwork(X.shape[1], 100, y.shape[1], bias=True)

from pybrain.supervised.trainers import BackpropTrainer
trainer = BackpropTrainer(net, training, learningrate=0.01, weightdecay=0.01)
start2 = time.time()
trainer.trainEpochs(epochs=20)
end2 = time.time()

predictions = trainer.testOnClassData(dataset=testing)
from sklearn.metrics import f1_score
print("F-score: {0:.2f}".format(
    f1_score(predictions, y_test.argmax(axis=1), average="micro")))


def predict_captcha(captcha_image, neural_network):
    subimages = segment_image(captcha_image)
    predicted_word = ""
    for subimage in subimages:
        subimage = resize(subimage, (20, 20))
Ejemplo n.º 44
0
ds.addSample((0, 0), (0))
ds.addSample((0, 1), (1))
ds.addSample((1, 0), (1))
ds.addSample((1, 1), (0))
ds.addSample((0, 0), (0))
ds.addSample((0, 1), (1))
ds.addSample((1, 0), (1))
ds.addSample((1, 1), (0))
ds.addSample((0, 0), (0))
ds.addSample((0, 1), (1))
ds.addSample((1, 0), (1))
ds.addSample((1, 1), (0))
ds.addSample((0, 0), (0))
ds.addSample((0, 1), (1))
ds.addSample((1, 0), (1))
ds.addSample((1, 1), (0))

red_neuronal = buildNetwork(2, 4, 1, bias=True)
entrenador = BackpropTrainer(red_neuronal,
                             ds,
                             learningrate=0.01,
                             momentum=0.99)
print("Training, please wait...")
entrenador.trainUntilConvergence(maxEpochs=10000,
                                 verbose=False,
                                 validationProportion=0.5)
print("Training finished!")
print '(0,0) :', red_neuronal.activate([0, 0])
print '(0,1) :', red_neuronal.activate([0, 1])
print '(1,0) :', red_neuronal.activate([1, 0])
print '(1,1) :', red_neuronal.activate([1, 1])
Ejemplo n.º 45
0
    else:
        Y.append(1)
dataset = pd.DataFrame(zip(X1, X2, Y), columns=["X1", "X2", "Y"])

X_train = dataset[["X1", "X2"]]
Y_train = dataset[["Y"]]
'''Arquitectura de la red neuronal'''
net = buildNetwork(
    2, 4, 1
)  #### Neuronas en capa entrada, cuantas neuronas en capa intermedia, capa final
ds = SupervisedDataSet(2, 1)
ds.setField('input', X_train)
ds.setField('target', Y_train)
plt.scatter(ds['input'][:, 0], ds['input'][:, 1], c=ds['target'], linewidths=0)
'''Entreno mi red neuronal'''
trainer = BackpropTrainer(net, ds)
trainer.trainEpochs(500)
'''Creo Datos de prueba'''
ntest = 1000
X1test = map(random.uniform, [0] * ntest, [1] * ntest)
X2test = map(random.uniform, [0] * ntest, [1] * ntest)
Ytest = []
for i in range(ntest):
    if (X1test[i] > 0.5 and X2test[i] > 0.5) or (X1test[i] <= 0.5
                                                 and X2test[i] <= 0.5):
        Ytest.append(0)
    else:
        Ytest.append(1)
test = pd.DataFrame(zip(X1test, X2test, Ytest), columns=["X1", "X2", "Y"])
'''Grafico mis datos de prueba'''
plt.scatter(test['X1'], test['X2'], c=test['Y'], linewidths=0)
Ejemplo n.º 46
0
    trndata._convertToOneOfMany()
    tstdata._convertToOneOfMany()
    train_errors = []
    test_errors = []

    # List all the different networks we want to test
    net = (buildNetwork(trndata.indim,
                        8,
                        trndata.outdim,
                        bias=True,
                        hiddenclass=SoftmaxLayer,
                        outclass=SoftmaxLayer))

    # Setup a trainer that will use backpropogation for training
    trainer = BackpropTrainer(net,
                              dataset=trndata,
                              learningrate=0.001,
                              verbose=True)

    for i in range(max_epochs):
        error = trainer.train()
        print "Epoch: %d, Error: %7.4f" % (i, error)
        train_errors.append(
            percentError(trainer.testOnClassData(), trndata['class']))

        test_errors.append(
            percentError(trainer.testOnClassData(dataset=tstdata),
                         tstdata['class']))

    # Plot training and test error as a function of the number of hidden layers
    pl.figure()
    pl.title('Neural Networks: Performance vs Epochs')
Ejemplo n.º 47
0
with open('matches.txt') as f:
    matches = json.load(f)

teams = []
with open('teams.txt') as f:
    teams = json.load(f)

ds = SupervisedDataSet(len(teams), 2)
for match in matches:
    inputs = [0] * len(teams)
    inputs[teams.index(match['home'])] = 1
    inputs[teams.index(match['away'])] = -1
    if match['home_score'] > factor or match['away_score'] > factor:
        print('skip: ', match['home'], match['away'], match['home_score'],
              match['away_score'])
        continue
    outputs = [normalize(match['home_score']), normalize(match['away_score'])]
    ds.addSample(inputs, outputs)

net = buildNetwork(len(teams), len(teams), 2)
trainer = BackpropTrainer(net, ds)

de = 1.0
error = 0.0
while run:
    tmp = error
    error = trainer.train()
    de = error - tmp
    print(error, de)

NetworkWriter.writeToFile(net, 'net.xml')
Ejemplo n.º 48
0
Created on Fri Sep 08 23:08:48 2017

@author: pedro
"""

from pybrain.tools.shortcuts import buildNetwork
from pybrain.datasets import SupervisedDataSet
from pybrain.supervised.trainers import BackpropTrainer

ds = SupervisedDataSet(2, 1)  #Base de dados com 2 parametros e 1 saida
ds.addSample((0.8, 0.4), (0.7))  #Estudou 8 horas e dormiu 4 horas, tirou 7
ds.addSample((0.5, 0.7), (0.5))  #Estudou 5 horas e dormiu 7 horas, tirou 5
ds.addSample((1.0, 0.8), (0.95))  #Estudou 10 horas e dormiu 8 horas, tirou 9.5

nn = buildNetwork(2, 4, 1, bias=True)  #2 entradas, 4 ocultos e um saida

trainer = BackpropTrainer(nn, ds)

for i in xrange(2000):
    print trainer.train()

while True:
    #dormiu = float(raw_input('Tempo que dormiu: '))
    #estudou = float(raw_input('Tempo que estudou: '))
    dormiu = input('Tempo que dormiu: ')
    estudou = input('Tempo que estudou: ')

    resultado = nn.activate((dormiu * 0.1, estudou * 0.1))
    #resultado = nn.activate((dormiu, estudou))[0] * 10

    print 'Previsao de nota: ', str(resultado)
Ejemplo n.º 49
0
tstdata, trndata = alldata.splitWithProportion( 0.25 )
trndata._convertToOneOfMany( )
tstdata._convertToOneOfMany( )

print "____________________________________________________________________________"
print "Number of training patterns: ", len(trndata)
print "Input and output dimensions: ", trndata.indim, trndata.outdim
print "First sample (input, target, class):"
print trndata['input'][0], trndata['target'][0], trndata['class'][0]
print "____________________________________________________________________________\n"



fnn = buildNetwork( trndata.indim, 5, trndata.outdim, outclass=SoftmaxLayer )
trainer = BackpropTrainer( fnn, dataset=trndata, momentum=0.1, verbose=True, weightdecay=0.01)

ticks = arange(-3.,6.,0.2)
X, Y = meshgrid(ticks, ticks)
# need column vectors in dataset, not arrays
griddata = ClassificationDataSet(2,1, nb_classes=3)
for i in xrange(X.size):
    griddata.addSample([X.ravel()[i],Y.ravel()[i]], [0])
griddata._convertToOneOfMany()  # this is still needed to make the fnn feel comfy

for i in range(2):
    trainer.trainEpochs( 1 )

for i in xrange(0,len(tstdata)):

    a=int(tstdata['target'][i][0])
Ejemplo n.º 50
0
    i = float(separado[i][8])
    j = float(separado[i][9])
    k = float(separado[i][10])
    l = float(separado[i][11])
    # m = float(separado[i][12])
    #n = float(separado[i][13])
    #o = float(separado[i][14])
    #p = float(separado[i][15])
    #r = float(separado[i][16])
    s = float(separado[i][12])

    ds.addSample((a, b, c, d, e, f, g, h, i, j, k, l), (s))

nn = buildNetwork(2, 4, 1, bias=True)  #2 entradas, 4 ocultos e um saida

trainer = BackpropTrainer(nn, ds)
curva_treinamento = []
for i in xrange(2000):
    curva_treinamento.append(trainer.train())
    print 'Epoca: ', i, 'Erro: ', curva_treinamento[i]
#plt.plot(curva_treinamento)

while True:
    #dormiu = float(raw_input('Tempo que dormiu: '))
    #estudou = float(raw_input('Tempo que estudou: '))
    dormiu = input('Tempo que dormiu: ')
    estudou = input('Tempo que estudou: ')

    resultado = nn.activate((dormiu * 0.1, estudou * 0.1))
    #resultado = nn.activate((dormiu, estudou))[0] * 10
Ejemplo n.º 51
0
 def train(self):
     '''
     Perform batch regression
     '''
     trainer = BackpropTrainer(self.net, self.ds)
     trainer.train()
Ejemplo n.º 52
0
train = np.loadtxt(train_file, delimiter=',')
validation = np.loadtxt(validation_file, delimiter=',')
train = np.vstack((train, validation))
x_train = train[:, 0:-1]
y_train = train[:, -1]
y_train = y_train.reshape(-1, 1)

input_size = x_train.shape[1]
target_size = y_train.shape[1]

ds = SDS(input_size, target_size)
ds.setField('input', x_train)
ds.setField('target', y_train)

net = buildNetwork(input_size, hidden_size, target_size, bias=True)
trainer = BackpropTrainer(net, ds, verbose=True)

print "training for {} epochs...".format(epochs)

# for inp, tar in ds:
#     print [net.activate(inp), tar]

# trainer.trainUntilConvergence(maxEpochs=1000, continueEpochs=10, validationProportion=0.1)

for inp, tar in ds:
    print[net.activate(inp), tar]

for i in range(epochs):
    mse = trainer.train()
    rmse = sqrt(mse)
    print "training RMSE, epoch {}: {}".format(i + 1, rmse)
from pybrain.structure import LinearLayer, SigmoidLayer, BiasUnit
from pybrain.structure import FullConnection
from pybrain.datasets import SupervisedDataSet
from pybrain.supervised.trainers import BackpropTrainer

neuralNetwork = buildNetwork(2,3,1, bias=True)

dataset = SupervisedDataSet(2,1)

dataset.addSample((0,0),(0))
dataset.addSample((1,0),(1))
dataset.addSample((0,1),(1))
dataset.addSample((1,1),(0))


trainer = BackpropTrainer(neuralNetwork, dataset=dataset , learningrate=0.01, momentum=0.06)

for i in range(1,10000):
    
    error=trainer.train()
    
    if i%1000 == 0:
        print("Error in iteration ", i, "is" , error)
        print(neuralNetwork.activate([0,0]))
        print(neuralNetwork.activate([0,1]))
        print(neuralNetwork.activate([1,0]))
        print(neuralNetwork.activate([1,1]))
        
        
        
print("\n Final Error and Final Result \n")
Ejemplo n.º 54
0
def train(fn, model):
    series_name = re.search(r'(\(.+\))', fn).group(1)
    pdata = pd.read_csv(fn, parse_dates=['date'])
    series_orig = series_orig_data[series_name]
    series_min = series_orig.min()
    series_max = series_orig.max()


    #print 'Features:'
    #print pdata.columns

    data = pdata.as_matrix()
    

    inputSize = data.shape[1]-2 # minus output and index
    outputSize = 1
    
    print 'fn:',fn
    print 'model:',model
    print 'inputSize:', inputSize

    if len(model) == 1:
        net = buildNetwork(inputSize, model[0], 3, 1)
    elif len(model) == 2:
        net = buildNetwork(inputSize, model[0], model[1], 1)
    else:
        assert len(model) == 3
        net = buildNetwork(inputSize, model[0], model[1], model[2], 1)

    ds = SupervisedDataSet(inputSize, outputSize)
    train_ds = SupervisedDataSet(inputSize, outputSize)
    test_ds = SupervisedDataSet(inputSize, outputSize)
    for i, (input, target) in enumerate(zip(data[:,1:-1], data[:,-1])):
        ds.addSample(input, target)
        if i < .85 * len(data):
            train_ds.addSample(input, target)
        else:
            test_ds.addSample(input, target)
    
    print 'variance of the test data:', np.var(test_ds['target'][:,0])

    print 'starting'

    full_date_range = pd.date_range(date_start, date_end, freq=hare_freq)
    #predicted_date_range = pd.date_range(date_start, date_end, freq=tortoise_freq)
    
    # XXX hardcoded
    #predicted_date_range = predicted_date_range[9:]
    predicted_date_range = pdata['date']

    full_date_range = full_date_range[skip_first:]

    #test = pd.DataFrame([predicted_date_range, series_orig_data['date']],
    #        columns=['mine', 'file'])
    #print test
    print 'predicted_date_range:',predicted_date_range[0], \
        predicted_date_range[len(predicted_date_range)-1], \
        len(predicted_date_range)
    print 'full_date_range:',full_date_range[0], \
        full_date_range[len(full_date_range)-1], \
        len(full_date_range)

    if interpolated:
        test_offset = 4*len(train_ds)
    else:
        test_offset = len(train_ds)


    bprop = BackpropTrainer(net, train_ds, verbose=True)
    for i in xrange(300):
        print 'At epoch',i
        bprop.train()
        if (i+1)%10 == 1:
            actual = ds['target'][:,0]
            activations_test = net.activateOnDataset(test_ds)
            actual_test = test_ds['target']
            errs = 1./len(test_ds) * (activations_test - actual_test)**2
            err = np.sum(errs)
            print 'Average test error:', err
            
            if offerConstantFeedback:
                series = net.activateOnDataset(ds)[:,0]
                series = pd.Series(series, index=predicted_date_range,
                        dtype=np.float64)
            else:
                guesses = []
                for input, target in ds:
                    if len(guesses) >= feedbackPeriod:
                        input[-feedbackPeriod-1:-1] = guesses[-feedbackPeriod:]
                    guess = net.activate(input)
                    guesses.append(guess)
                series = pd.Series(guesses, index=predicted_date_range,
                        dtype=np.float64)


            if interpolated:
                series = series.reindex(full_date_range)
                series = series.interpolate('linear')

            actual = denormalize(actual, series_min, series_max)
            series = denormalize(series, series_min, series_max)

            #series = series[len(predicted_date_range) - len(series):]
            #predicted = pd.DataFrame({'date': predicted_date_range, 
            #'pred': series,
            #'actual': ds['target'][:,0]},
            #columns=['date', 'actual', 'pred'])

            #print np.hstack( (series_orig[4*9:100:4][:,np.newaxis], 
            #                  actual[:25-9][:,np.newaxis]) )
            #sys.exit(-1)

            if interpolated:
                plt.plot(full_date_range, series, 'g-', label='pred')
                #plt.plot(full_date_range, ds['target'][:,0], label='actual')
                plt.plot(full_date_range, 
                        series_orig_data[series_name][skip_first:skip_first +
                            len(full_date_range)],
                        'b-', label='train')
                plt.plot(full_date_range[test_offset:], 
                        series[test_offset:], 'r-', label='test')
            else:
                plt.plot(predicted_date_range, series, 'g-', label='pred')
                #plt.plot(predicted_date_range, actual, label='train')
                plt.plot(predicted_date_range[test_offset:],
                        series[test_offset:], 'r-', label='test')

            #plt.plot(full_date_range, 
            #        series_orig_data[series_name][4*9:4*9 +
            #            len(full_date_range)],
            #        'b-', label='train')
            plt.title('\nmodel: %s - %s, \nepoch: %d\ntest err: %.4f' % 
                    (model, fn, i+1, err))
            plt.legend(loc=2)
            plt.grid()
            plt.savefig('svg/pred___%s___%s___epoch%d.svg' % (
                os.path.splitext(os.path.basename(fn))[0], model, i+1))
            plt.close()
start_time=time.time()

#our data set has 4 input parameters and 3 classes
ds = SupervisedDataSet(4,3)

tf=open('IRIS.csv','r')
for line in tf.readlines():
    data = [float(x) for x in line.strip().split(',') if x != '']
    indata =  tuple(data[:4])
    outdata = tuple(data[4:])
    ds.addSample(indata,outdata)
print ds.indim
print ds.outdim
# pybrain.tools.shortcuts.buildNetwork(*layers, **options)
#Build arbitrarily deep networks.
#layers should be a list or tuple of integers, that indicate how many neurons the layers should have.
#change the hidden layer neurons to maximise the accuracy
n = buildNetwork(ds.indim,3,ds.outdim,recurrent=True)

#bpa
t = BackpropTrainer(n,learningrate=0.01,momentum=0.5,verbose=True)

t.trainOnDataset(ds,5)
t.testOnData(verbose=True)


end_time=time.time()

print "time taken is ",end_time-start_time," seconds"
Ejemplo n.º 56
0
from sklearn import datasets
from pybrain.datasets.classification import ClassificationDataSet
from pybrain.tools.shortcuts import buildNetwork
from pybrain.supervised.trainers import BackpropTrainer

iris = datasets.load_iris()

x, y = iris.data, iris.target
print(len(x))

dataset = ClassificationDataSet(4, 1, nb_classes=3)

for i in range(len(x)):
    dataset.addSample(x[i], y[i])

train_data, part_data = dataset.splitWithProportion(0.6)

test_data, val_data = part_data.splitWithProportion(0.5)

net = buildNetwork(dataset.indim, 3, dataset.outdim)
trainer = BackpropTrainer(net,
                          dataset=train_data,
                          learningrate=0.01,
                          momentum=0.1,
                          verbose=True)

train_errors, val_errors = trainer.trainUntilConvergence(dataset=train_data,
                                                         maxEpochs=100)

trainer.totalepochs
Ejemplo n.º 57
0
for f in posFilenames:
    img = (cv2.imread(posDir + f, 0)).ravel()
    img = img / 127.5 - 1
    DS.appendLinked(img, [1])

negDir = 'neg/'
negFilenames = [f for f in listdir(negDir)]
for f in negFilenames:
    img = cv2.imread(negDir + f, 0).ravel()
    img = img / 127.5 - 1
    DS.appendLinked(img, [0])  # Dataset setup here

Momen = 0.0
WeiDecay = 0.003
print 'training...'
net = buildNetwork(896, 100, 10, 1, bias=True, outclass=SigmoidLayer)
trainer = BackpropTrainer(net, DS, momentum=Momen, weightdecay=WeiDecay)
proportion2Cost = trainer.trainUntilConvergence(validationProportion=0.20,
                                                maxEpochs=1000,
                                                continueEpochs=10)
raw_input(proportion2Cost)

xmlName = 'trainWithMomen%sWeiDecay%sMaxE1000ExamRefreshedNormalized.xml' % (
    Momen, WeiDecay)

if not exists(xmlName):
    NetworkWriter.writeToFile(net, xmlName)
    print 'output xml'
else:
    print 'xml name already existed'
Ejemplo n.º 58
0
    for n in range(0, len(entries)):
        test_data.addSample( entries[n], [data_test[n][-1]])

    train_data._convertToOneOfMany( )
    test_data._convertToOneOfMany( )
    
    network = FeedForwardNetwork()
    inLayer = LinearLayer(3)    
    outLayer = LinearLayer(3)
    network.addInputModule(inLayer)
    network.addOutputModule(outLayer)
    in_to_out = FullConnection(inLayer, outLayer)
    network.addConnection(in_to_out)
    
    
    network.sortModules()

    trainer = BackpropTrainer( network, dataset=train_data, verbose=False)
    for i in range(1):
        trainer.trainEpochs(1000)
    
    result = trainer.testOnClassData(test_data,return_targets=True)
    result = classification(result[1],result[0]) 
    print(result)
    
    
    
    
    
    
    
Ejemplo n.º 59
0
    reportOnCollections(data, 'loaded')
    #this gives a list of (rowSamples, colSamples) where *Samples is a SupervisedDataSet
    data = [convertSampleCollection2SupervisedDataSet(d) for d in data]
    #now we split it into row and column data
    rowData = [d[0] for d in data]
    colData = [d[1] for d in data]

    print 'training row classifiers...'
    inputDataList = rowData
    clfs = []
    for d in inputDataList:
        train, test = d.splitWithProportion(0.9)
        dim = d['input'].shape[1]
        #convert data sets back to ClassificationDataSet
        #this is required due to a bug on the current version
        train = convert2ClassificationDataSet(train)
        test = convert2ClassificationDataSet(test)

        net = buildNetwork(dim, dim, dim / 2, dim / 2, 2)
        clfs.append(net)
        trainer = BackpropTrainer(net, train)
        print 'training classifier for 5 epochs...'
        for i in range(5):
            trainer.train()
        print 'error on training data', percentError(trainer.testOnClassData(),
                                                     train['class'])
        print 'error on test data', percentError(
            trainer.testOnClassData(dataset=test), test['class'])
        print np.max(net.activateOnDataset(train), axis=1)
        quit()
Ejemplo n.º 60
0
    error2 = 0.0
    local_min_error = 100000
    for x in range(0,35):

        if count == 16 and local_min_error > 5.4:
            break

        error2 = error

        fnn = buildNetwork( DS.indim, 30, DS.outdim, hiddenclass = SigmoidLayer, outclass=SoftmaxLayer )
        target = [16,14,9,16,-7,-2,16,-1,-6,8,7,10,-6,-2,12,2,3]

    # while 1:


        trainer = BackpropTrainer( n, DS, verbose=True)
        trainer.trainUntilConvergence(dataset = None, maxEpochs=750, continueEpochs=10, validationProportion=0.35)
        # trainer.trainEpochs(50)
        # trainer.trainOnDataset(DS, 1500)
        # trainer.testOnData(verbose = True)

        vals = []


        for x in prediction_inputs:
            vals.append(float(n.activate(x)))

        error = 0.0
        num = 0.0;
        for o, t in zip(vals, prediction_outputs):
            if abs(t - o) < 10: