Ejemplo n.º 1
0
    def fit(self, X, y):
        """
        Train the regressor model.

        :param X: pandas.DataFrame of shape [n_samples, n_features]
        :param y: values - array-like of shape [n_samples]

        :return: self
        """

        dataset = self._prepare_net_and_dataset(X, y, 'regression')

        trainer = BackpropTrainer(self.net,
                                  dataset,
                                  learningrate=self.learningrate,
                                  lrdecay=self.lrdecay,
                                  momentum=self.momentum,
                                  verbose=self.verbose,
                                  batchlearning=self.batchlearning,
                                  weightdecay=self.weightdecay)
        if self.epochs < 0:
            trainer.trainUntilConvergence(maxEpochs=self.max_epochs,
                                          continueEpochs=self.continue_epochs,
                                          verbose=self.verbose,
                                          validationProportion=self.validation_proportion)
        else:
            for i in range(self.epochs):
                trainer.train()
        self.__fitted = True

        return self
def main():
	inputs = ReadCSV('./data/input.csv')
	outputs = ReadCSV('./data/output.csv')
	
	test_set = test.keys()
	train_set = []
	for k in inputs.keys():
		if k not in test_set:
			train_set.append(k)
	print "Number of training samples", len(train_set)
	print "Number of testing samples", len(test_set)
			
	net = buildNetwork(178, 6, 5)
	ds=SupervisedDataSet(178,5)
	for id in train_set:
		ds.addSample(inputs[id],outputs[id])

	trainer = BackpropTrainer(net, ds, learningrate=0.001, momentum = 0.001)

	trainer.trainUntilConvergence(maxEpochs=1000, validationProportion = 0.5)
	
	
	for id in test_set:
		predicted = net.activate(inputs[id])
		actual = outputs[id]
		print '-----------------------------'
		print test[id]
		print '-----------------------------'
		print 'Trait\t\tPredicted\tActual\tError'
		for i in range(0,5):
			error = abs(predicted[i] - actual[i])*100/4.0
			print traits[i], '\t', predicted[i], '\t', actual[i], '\t', error,"%" 
Ejemplo n.º 3
0
class NNetwork:
	def __init__(self):
		self.ds = ClassificationDataSet(7, 1, nb_classes=8)  #8 since we have 8 gestures, 7 since we have 7 features
		
	def add_data(self, training_data):
		for gesture in training_data:
			self.ds.addSample(gesture[1], gesture[0])  #a method to add all the training data we have
			
	def newData(self, training_data):   #a method for replacing the data already existing and adding data from scratch
		self.ds = ClassificationDataSet(7, 1, nb_classes=8)
		for gesture in training_data:
			self.ds.addSample(gesture[1], gesture[0])
	
	def train(self, shouldPrint):
		tstdata, trndata = self.ds.splitWithProportion(0.2)  #splits the data into training and verification data
		trndata._convertToOneOfMany()
		tstdata._convertToOneOfMany()
		self.fnn = buildNetwork(trndata.indim, 64, trndata.outdim, outclass=SoftmaxLayer) #builds a network with 64 hidden neurons
		self.trainer = BackpropTrainer(self.fnn, dataset=trndata, momentum=0.1, learningrate=0.01, verbose=True, weightdecay=0.1)
		#uses the backpropagation algorithm
		self.trainer.trainUntilConvergence(dataset=trndata, maxEpochs=100, verbose=True, continueEpochs=10, validationProportion=0.20) #early stopping with 20% as testing data
		trnresult = percentError( self.trainer.testOnClassData(), trndata['class'] )
		tstresult = percentError( self.trainer.testOnClassData(dataset=tstdata ), tstdata['class'] )
		
		if shouldPrint:
			print "epoch: %4d" % self.trainer.totalepochs, "  train error: %5.2f%%" % trnresult, "  test error: %5.2f%%" % tstresult
	def activate(self, data): #tests a particular data point (feature vector)
	    return self.fnn.activate(data)
Ejemplo n.º 4
0
def main():
    a = 0
    for i in range(0,100):
        inLayer = SigmoidLayer(2)
        hiddenLayer = SigmoidLayer(3)
        outLayer = SigmoidLayer(1)
        
        net = FeedForwardNetwork()
        net.addInputModule(inLayer)
        net.addModule(hiddenLayer)
        net.addOutputModule(outLayer)
        
        in_to_hidden = FullConnection(inLayer,hiddenLayer)
        hidden_to_out = FullConnection(hiddenLayer,outLayer)
        
        net.addConnection(in_to_hidden)
        net.addConnection(hidden_to_out)
        
        net.sortModules()
        
        ds = SupervisedDataSet(2,1)
        ds.addSample((1,1), (0))
        ds.addSample((1,0), (1))
        ds.addSample((0,1), (1))
        ds.addSample((0,0), (0))
        
        trainer = BackpropTrainer(net,ds)
        trainer.trainUntilConvergence()
        
        out = net.activate((1,1))
        if (out < 0.5):
            a = a + 1
    print(str(a) + "/100")
Ejemplo n.º 5
0
    def training_nerual_network(self):
        dataTrain,dataTest = self.DS.splitWithProportion(0.7)
        xTrain, yTrain = dataTrain['input'], dataTrain['target']

        xTest, yTest = dataTest['input'], dataTest['target']

        trainer = BackpropTrainer(self.fnn, dataTrain,verbose = True, learningrate=0.03, momentum=0.1)
        trainer.trainUntilConvergence(maxEpochs=20)

        output = self.fnn.activateOnDataset(dataTest)
        count = 0
        countRight = 0
        error = 0
        for i in range(len(output)):
            posReal = yTest[i].argmax()
            posPredict = output[i].argmax()
            #print('o',output[i],posPredict)
            #print('r',yTest[i],posReal)
            error += abs(posReal-posPredict)

            if posReal == posPredict:
                countRight+=1
            count +=1
        error/=count
        print('Correct rate:{:.2f}   Average error:{:.2f}'.format(countRight/count,error))
Ejemplo n.º 6
0
 def neuralNetwork_eval_func(self, chromosome):
     node_num, learning_rate, window_size = self.decode_chromosome(chromosome)
     if self.check_log(node_num, learning_rate, window_size):
         return self.get_means_from_log(node_num, learning_rate, window_size)[0]
     folded_dataset = self.create_folded_dataset(window_size)
     indim = 21 * (2 * window_size + 1)
     mean_AUC = 0
     mean_decision_value = 0
     mean_mcc = 0
     sample_size_over_thousand_flag = False
     for test_fold in xrange(self.fold):
         test_labels, test_dataset, train_labels, train_dataset = folded_dataset.get_test_and_training_dataset(test_fold)
         if len(test_labels) + len(train_labels) > 1000:
             sample_size_over_thousand_flag = True
         ds = SupervisedDataSet(indim, 1)
         for i in xrange(len(train_labels)):
             ds.appendLinked(train_dataset[i], [train_labels[i]])
         net = buildNetwork(indim, node_num, 1, outclass=SigmoidLayer, bias=True)
         trainer = BackpropTrainer(net, ds, learningrate=learning_rate)
         trainer.trainUntilConvergence(maxEpochs=self.maxEpochs_for_trainer)
         decision_values = [net.activate(test_dataset[i]) for i in xrange(len(test_labels))]
         decision_values = map(lambda x: x[0], decision_values)
         AUC, decision_value_and_max_mcc = validate_performance.calculate_AUC(decision_values, test_labels)
         mean_AUC += AUC
         mean_decision_value += decision_value_and_max_mcc[0]
         mean_mcc += decision_value_and_max_mcc[1]
         if sample_size_over_thousand_flag:
             break
     if not sample_size_over_thousand_flag:
         mean_AUC /= self.fold
         mean_decision_value /= self.fold
         mean_mcc /= self.fold
     self.write_log(node_num, learning_rate, window_size, mean_AUC, mean_decision_value, mean_mcc)
     self.add_log(node_num, learning_rate, window_size, mean_AUC, mean_decision_value, mean_mcc)
     return mean_AUC
Ejemplo n.º 7
0
def NeuralNetwork(tRiver, qRiver, pRiver, TRiver, qnewRiver, pnewRiver, TnewRiver):
    # build neural network with 20 neurons for historic data on flux, 3 for last 3 temp data, 3 for last precipitation,
    # hidden layer with more than input neurons (hinder specification)
    # and 3 output neurons (flux for next day, first derivative, second derivative

    Ndim = 10+3+3
    Nout = 3
    net = buildNetwork(Ndim, Ndim, Nout, hiddenclass=TanhLayer)
    ds = SupervisedDataSet(Ndim, Nout)

    # next big job: find data values to build up library of training set
    for t in range(len(tRiver)-3):
        input_flow = qRiver[t-20:2:t]
        input_prec = pRiver[t-3:t]
        input_temp = TRiver[t-3:t]
        input_vec = np.hstack([input_flow, input_prec, input_temp])

        output_flow = np.hstack([qRiver[t:t+3]]) # first approx, split later for long predictions
        ds.addSample(input_vec, output_flow)

    trainer = BackpropTrainer(net, ds)
    #trainer.train()
    trainer.trainUntilConvergence()

    # now call it repeatedly on the second set

    prediction = net.activate(np.hstack([qnewRiver[:20], pnewRiver[:3], TnewRiver[:3]]))
    return prediction
Ejemplo n.º 8
0
def train(data):
	"""
	See http://www.pybrain.org/docs/tutorial/fnn.html

	Returns a neural network trained on the test data.

	Parameters:
	  data - A ClassificationDataSet for training.
	         Should not include the test data.
	"""
	network = buildNetwork(
		# This is where we specify the architecture of
		# the network.  We can play around with different
		# parameters here.
		# http://www.pybrain.org/docs/api/tools.html
		data.indim, 5, data.outdim,
		hiddenclass=SigmoidLayer,
		outclass=SoftmaxLayer
	)

	# We can fiddle around with this guy's options as well.
	# http://www.pybrain.org/docs/api/supervised/trainers.html
	trainer = BackpropTrainer(network, dataset=data)
	trainer.trainUntilConvergence(maxEpochs=20)

	return network
Ejemplo n.º 9
0
def trainNetwork(inData, numOfSamples, numOfPoints, epochs):
    # Build the dataset
    alldata = createRGBdataSet(inData, numOfSamples, numOfPoints)
    # Split into test and training data
    trndata, tstdata = splitData(alldata)

    # Report  stats
    print "Number of training patterns: ", len(trndata)
    print "Input and output dimensions: ", trndata.indim, trndata.outdim
    print "First sample (input, target, class):"
    print trndata['input'][0], trndata['target'][0], trndata['class'][0]

    # Build and train the network
    fnn = buildNetwork( trndata.indim, 256, trndata.outdim, outclass=SoftmaxLayer )
    trainer = BackpropTrainer( fnn, dataset=trndata, momentum=0.001, verbose=True, weightdecay=0.001)
    #trainer.trainEpochs( epochs )
    trainer.trainUntilConvergence(maxEpochs=epochs)

    # Report results
    trnresult = percentError( trainer.testOnClassData(), trndata['class'] )
    tstresult = percentError( trainer.testOnClassData( dataset=tstdata ), tstdata['class'] )
    print "epoch: %4d" % trainer.totalepochs, \
      "  train error: %5.2f%%" % trnresult, \
      "  test error: %5.2f%%" % tstresult

    # Report results of final network
    checkNeuralNet(trainer, alldata, numOfSamples)
    return fnn
Ejemplo n.º 10
0
def getModel(inputSize,hiddenSize1,hiddenSize2,trainData,target):
	fnn = FeedForwardNetwork()
	inLayer = LinearLayer(inputSize,name = 'inLayer')
	hiddenLayer0 = SigmoidLayer(hiddenSize1,name='hiddenLayer0')
	hiddenLayer1 = SigmoidLayer(hiddenSize2,name='hiddenLayer1')
	outLayer = LinearLayer(1,name = 'outLayer')

	fnn.addInputModule(inLayer)
	fnn.addModule(hiddenLayer0)
	fnn.addModule(hiddenLayer1)
	fnn.addOutputModule(outLayer)

	inToHidden0 = FullConnection(inLayer,hiddenLayer0)
	hidden0ToHidden1 = FullConnection(hiddenLayer0,hiddenLayer1)
	hidden1ToHiddenOutput = FullConnection(hiddenLayer1,outLayer)

	fnn.addConnection(inToHidden0)
	fnn.addConnection(hidden0ToHidden1)
	fnn.addConnection(hidden1ToHiddenOutput)

	fnn.sortModules()
	Ds = SupervisedDataSet(inputSize,1)
	scaler = preprocessing.StandardScaler().fit(trainData)
	x = scaler.transform(trainData)
	# print(len(target))
	# print(len(x))
	for i in range(len(target)):
		Ds.addSample(x[i],[target[i]])
	trainer = BackpropTrainer(fnn,Ds,learningrate=0.01,verbose=False)
	trainer.trainUntilConvergence(maxEpochs=1000)
	return fnn
Ejemplo n.º 11
0
def createNet():
	"""Create and seed the intial neural network"""
	#CONSTANTS
	nn_input_dim = 6 #[x_enemy1, y_enemy1, x_enemy2, y_enemy2, x_enemy3, y_enemy3]
	nn_output_dim = 6 #[x_ally1, y_ally1, x_ally2, y_ally2, x_ally3, y_ally3]

	allyTrainingPos, enemyTrainingPos = runExperiments.makeTrainingDataset()

	ds = SupervisedDataSet(nn_input_dim, nn_output_dim)

	#normalizes and adds it to the dataset
	for i in range(0, len(allyTrainingPos)):
		x = normalize(enemyTrainingPos[i])
		y = normalize(allyTrainingPos[i])
		x = [val for pair in x for val in pair]
		y = [val for pair in y for val in pair]
		ds.addSample(x, y)

	for inpt, target in ds:
		print inpt, target

	net = buildNetwork(nn_input_dim, 30, nn_output_dim, bias=True, hiddenclass=TanhLayer)
	trainer = BackpropTrainer(net, ds)
	trainer.trainUntilConvergence()
	NetworkWriter.writeToFile(net, "net.xml")
	enemyTestPos = runExperiments.makeTestDataset()
	print(net.activate([val for pair in normalize(enemyTestPos) for val in pair]))
	return ds
Ejemplo n.º 12
0
	def train(self):
		print "Enter the number of times to train, -1 means train until convergence:"
		t = int(raw_input())
		print "Training the Neural Net"
		print "self.net.indim = "+str(self.net.indim)
		print "self.train_data.indim = "+str(self.train_data.indim)

		trainer = BackpropTrainer(self.net, dataset=self.train_data, momentum=0.1, verbose=True, weightdecay=0.01)
		
		if t == -1:
			trainer.trainUntilConvergence()
		else:
			for i in range(t):
				trainer.trainEpochs(1)
				trnresult = percentError( trainer.testOnClassData(), self.train_data['class'])
				# print self.test_data

				tstresult = percentError( trainer.testOnClassData(dataset=self.test_data), self.test_data['class'] )

				print "epoch: %4d" % trainer.totalepochs, \
					"  train error: %5.2f%%" % trnresult, \
					"  test error: %5.2f%%" % tstresult

				if i % 10 == 0 and i > 1:
					print "Saving Progress... Writing to a file"
					NetworkWriter.writeToFile(self.net, self.path)

		print "Done training... Writing to a file"
		NetworkWriter.writeToFile(self.net, self.path)
		return trainer
Ejemplo n.º 13
0
    def train(self):

        #self.init_iri()
        self.init_image()
        self.ds = ClassificationDataSet(self.IN, 1, nb_classes=128)
        #classifier.init_image()
        self.load_data()
        print "Number of trianing patterns: ", len(self.trndata)
        print "Input and output dimensions: ", self.trndata.indim, self.trndata.outdim
        print "First sample (input, target, class):"
        print self.trndata['input'][0], self.trndata['target'][0], self.trndata['class'][0]
        print self.trndata.indim, self.trndata.outdim
        self.net = buildNetwork(self.trndata.indim, 7, self.trndata.outdim)


        trainer = BackpropTrainer(self.net, dataset=self.trndata, momentum=0.1, verbose=True, weightdecay=0.01)

        """
        for i in range(200):
            trainer.trainEpochs(1)
            trnresult = percentError(trainer.testOnClassData(), self.trndata['class'])
            tstresult = percentError(trainer.testOnClassData(dataset = self.tstdata), self.tstdata["class"])
            print "epch: %4d" %  trainer.totalepochs, \
                " train error: %5.2f%%" % trnresult, \
                " test error: %5.2f%%" % tstresult
        """
        trainer.trainUntilConvergence()
        trnresult = percentError(trainer.testOnClassData(), self.trndata['class'])
        tstresult = percentError(trainer.testOnClassData(dataset = self.tstdata), self.tstdata["class"])
        print "epch: %4d" %  trainer.totalepochs, \
            " train error: %5.2f%%" % trnresult, \
            " test error: %5.2f%%" % tstresult
Ejemplo n.º 14
0
    def train(self):
        trainer = BackpropTrainer(self.network, self.data_set)

        trainer.trainUntilConvergence(
            verbose=False, validationProportion=0.15, maxEpochs=1000, continueEpochs=10)

        return trainer
Ejemplo n.º 15
0
def run_nn_fold(training_data, test_data):
    test_features, ignore, featureMap, labels, labelMap = fs.mutualinfo(training_data)

    input_len = len(test_features[0])
    num_classes = len(labelMap.keys())
    train_ds = ClassificationDataSet(input_len, 1,nb_classes=num_classes)
    for i in range(len(test_features)):
        train_ds.addSample(tuple(test_features[i]), (labels[i]))
    train_ds._convertToOneOfMany()
    net = buildNetwork(train_ds.indim, 2, train_ds.outdim, bias=True, hiddenclass=TanhLayer, outclass=SoftmaxLayer)
    trainer = BackpropTrainer(net, train_ds, verbose=True)
    print "training until convergence..."
    trainer.trainUntilConvergence(maxEpochs=100)
    print "done. testing..."


    test_ds = ClassificationDataSet(input_len, 1,nb_classes=num_classes)  

    labels = []
    for tweetinfo in test_data:
        featuresFound = tweetinfo["Features"]
        label = tweetinfo["Answer"]
        labels.append(label)
        features = [0]*len(featureMap.keys())
        for feat in featuresFound:
            if feat in featureMap:
                features[ featureMap[feat] ] = 1
        test_ds.addSample(tuple(features), (labelMap[label]))

    test_ds._convertToOneOfMany()
    tstresult = percentError( trainer.testOnClassData(
            dataset=test_ds ), test_ds['class'] )
    print tstresult
    def run(self, ds_train, ds_test):
        """
        This function both trains the ANN and evaluates the ANN using a specified training and testing set
        Args:
        :param ds_train (TweetClassificationDatasetFactory): the training dataset the neural network is trained with.
        :param ds_test (TweetClassificationDatasetFactory): the test dataset evaluated.
        :returns: error (float): the percent error of the test dataset, tested on the neural network.
        """
        ds_train._convertToOneOfMany()
        ds_test._convertToOneOfMany()

        trainer = BackpropTrainer(
            self.network,
            dataset=ds_train,
            momentum=0.1,
            verbose=True,
            weightdecay=0.01)

        trainer.trainUntilConvergence(
            dataset=ds_train,
            maxEpochs=self.max_epochs,
            continueEpochs=self.con_epochs)
        result = trainer.testOnClassData(dataset=ds_test)
        error = percentError(result, ds_test['class'])

        return error
Ejemplo n.º 17
0
def neural_network(data, target, network):
    DS = SupervisedDataSet(len(data[0]), 1)
    nn = buildNetwork(len(data[0]), 7, 1, bias = True)
    kf = KFold(len(target), 10, shuffle = True);
    RMSE_NN = []
    for train_index, test_index in kf:
        data_train, data_test = data[train_index], data[test_index]
        target_train, target_test = target[train_index], target[test_index]
        for d,t in zip(data_train, target_train):
            DS.addSample(d, t)
        bpTrain = BackpropTrainer(nn,DS, verbose = True)
        #bpTrain.train()
        bpTrain.trainUntilConvergence(maxEpochs = 10)
        p = []
        for d_test in data_test:
            p.append(nn.activate(d_test))
        
        rmse_nn = sqrt(np.mean((p - target_test)**2))
        RMSE_NN.append(rmse_nn)
        DS.clear()
    time = range(1,11)
    plt.figure()
    plt.plot(time, RMSE_NN)
    plt.xlabel('cross-validation time')
    plt.ylabel('RMSE')
    plt.show()
    print(np.mean(RMSE_NN))
Ejemplo n.º 18
0
def entrenarSomnolencia(red):
    #Se inicializa el dataset
    ds = SupervisedDataSet(4096,1)

    """Se crea el dataset, para ello procesamos cada una de las imagenes obteniendo los rostros,
       luego se le asignan los valores deseados del resultado la red neuronal."""

    print "Somnolencia - cara"
    for i,c in enumerate(os.listdir(os.path.dirname('/home/taberu/Imágenes/img_tesis/somnoliento/'))):
        try:
            im = cv2.imread('/home/taberu/Imágenes/img_tesis/somnoliento/'+c)
            pim = pi.procesarImagen(im)
            cara = d.deteccionFacial(pim)
            if cara == None:
                print "No hay cara"
            else:
                print i
                ds.appendLinked(cara.flatten(),10)
        except:
            pass

    trainer = BackpropTrainer(red, ds)
    print "Entrenando hasta converger"
    trainer.trainUntilConvergence()
    NetworkWriter.writeToFile(red, 'rna_somnolencia.xml')
Ejemplo n.º 19
0
def dynamic_data_network(slept, study):

    train_set = SupervisedDataSet(2, 1)
    for r1 in range(0, 15):
        slept = random.randint(0,10)
        study = random.randint(0,10)
        score = random.randint(60,100)
        train_set.addSample((slept, study), score)
        
        #print train_set['input']
        
        print 'build net'
        net = buildNetwork(2, 3, 1, bias=True)
        print 'initial guess'
        print net.activate((slept,study))
        print 'trainer'
        trainer = BackpropTrainer(net, train_set)
        print 'training'
        trainer.trainUntilConvergence()
        print 'final guess'
        print net.activate((slept,study))
        

        while True:
        
            slept = int(raw_input('hours slept: '))
            if slept < 0:
                break
            study = raw_input('hours studied: ')
            
            print 'new guess :'
            print net.activate((slept,study))
Ejemplo n.º 20
0
def bayesian_net(train, train_label, layers):
    
    inputs = 0
    targets = 0
    print train_label[0]
    if type(train[0]) is list:
        inputs = len(train[0])
    else:
        inputs = 1
    
    if type(train_label[0]) is list:
        targets = len(train_label[0])
    else: targets = 1
        
    
    ds = SupervisedDataSet(inputs,targets)
    
    for i in range(len(train)):
        ds.addSample(train[i], train_label[i])
        
    net = buildNetwork(inputs, layers, targets, bias=True, hiddenclass=SigmoidLayer)
    
    trainer = BackpropTrainer(net, ds)
    
    trainer.trainUntilConvergence()
    
    return trainer
Ejemplo n.º 21
0
def makeAndTrainANN(ls_x, ls_y, window_size, hiddenSize=5, epochs=1):
	
	# Build Pybrain dataset
	ds = SupervisedDataSet(window_size, 1)

	for i, sample in enumerate(ls_x):
		ds.addSample(sample, ls_y[i])

	inLayer = LinearLayer(window_size)
	hiddenLayer = SigmoidLayer(hiddenSize)
	outLayer = LinearLayer(1)
	constant = BiasUnit()

	n = FeedForwardNetwork()

	n.addInputModule(inLayer)
	n.addModule(hiddenLayer)
	n.addOutputModule(outLayer)
	n.addModule(constant)
	
	n.addConnection(FullConnection(inLayer, hiddenLayer))
	n.addConnection(FullConnection(hiddenLayer, outLayer))
	n.addConnection(FullConnection(constant, inLayer))
	n.addConnection(FullConnection(constant, hiddenLayer))
	n.addConnection(FullConnection(constant, outLayer))
	
	n.sortModules()
	n.reset()
	#trainer = BackpropTrainer(n, dataset=ds)
	trainer = BackpropTrainer(n, dataset=ds, momentum=0.2)
	#trainer.trainEpochs(epochs)
	trainer.trainUntilConvergence(maxEpochs=epochs)
	
	return n
Ejemplo n.º 22
0
  def trainprediction(self, data=None, biased=False, maxEpochs = 10000):
    """Trains the neural network with the provided trainings data and returns true, if the training was successful"""
    if not data:
      out.writeDebug('No training data! The net stays initialized with random weights!')
      return False

    #create supervised data set from the training nodes
    ds = SupervisedDataSet(len(self.features), 2)
    reduced_dataset = [set([]),set([])]
    for node, target in data:
      featuresValue = []
      for feature in self.features:
        featuresValue.append(feature(self, node, None, node.querySequence))
        
      if target:
        reduced_dataset[0].add(tuple(featuresValue+[ACCEPTED, NOTACCEPTED]))        
      else:
        reduced_dataset[1].add(tuple(featuresValue+[NOTACCEPTED, ACCEPTED]))

    for posInstance, negInstance in zip(reduced_dataset[0],reduced_dataset[1]):
      ds.addSample(posInstance[:-2],posInstance[-2:])
      ds.addSample(negInstance[:-2],negInstance[-2:])

    if biased:
      ds = SupervisedDataSet(len(self.features), 2)
      for instance in reduced_dataset[0]:
        ds.addSample(instance[:-2],instance[-2:])      
      for instance in reduced_dataset[1]:
        ds.addSample(instance[:-2],instance[-2:])
    out.writeDebug('Start training neural net with %s training examples. Dataset bias is set to %s'%(len(ds), biased ))
    trainer = BackpropTrainer(self.net, ds)
    trainer.trainUntilConvergence(maxEpochs = maxEpochs)
    
    return True      
Ejemplo n.º 23
0
    def train(self, x, y):
        ''' Trains on the given inputs and labels for either a fixed number of epochs or until convergence.
            Normalizes the input with a z-transform'''

        print "training..."
        
        # normalize input
        m = x.mean()
        s = x.std()
        x = self.z_transform(x, m, s)

        ds = SupervisedDataSet(x.shape[1], 1) 
        ds.setField('input', x)
        ds.setField('target', y)
        
        trainer = BackpropTrainer(self.n,ds, learningrate=self.learning_rate, momentum=self.momentum, verbose=True)

        if (self.epochs == 0):
            trainer.trainUntilConvergence()
        else:
            for i in range(0, self.epochs):
                start_time = time.time()
                trainer.train() 
                print "epoch: ", i
                print "time: ", time.time() - start_time, " seconds"
            
        print "finished"
Ejemplo n.º 24
0
def startTrials(ds, maxTrials = 2, maxExperiments = 2):
	"""start and run the trials"""
	hpCount = []
	for i in range(0, maxExperiments):
		for j in range(0, maxTrials):
			enemyTestPos = runExperiments.makeTestDataset()
			net = NetworkReader.readFrom("net.xml")

			netResults = net.activate([val for pair in normalize(enemyTestPos) for val in pair])
			netIter = iter(netResults)
			allyTestPos = zip(netIter, netIter)
			#undo normalization
			allyTestPos = map(lambda p: (abs(p[0]*640), abs(p[1]*720)), allyTestPos)
			print(allyTestPos)
			runExperiments.writeTestData(allyTestPos)
			runExperiments.run()

			with open("exp_results_raw.txt", "r") as resultsFile:
				lines = resultsFile.readlines()
				if "Zerg_Zergling" in lines[1]:
					x = normalize(enemyTestPos)
					y = normalize(allyTestPos)
					x = [val for pair in x for val in pair]
					y = [val for pair in y for val in pair]
					ds.addSample(x, y)
					lineSplit = lines[1].split("Zerg_Zergling")[-1]
					hpCount.append(lineSplit.split(" ")[2])
		trainer = BackpropTrainer(net, ds)
        trainer.trainUntilConvergence()
	return hpCount
def classify(Xtrain, Ytrain, n_hidden=5):
    """ Use entirety of provided X, Y to predict

    Arguments
    Xtrain -- Training data
    Ytrain -- Training prediction

    Returns
    classifier -- a classifier fitted to Xtrain and Ytrain
    """

    # PyBrain expects data in its DataSet format
    trndata = ClassificationDataSet(Xtrain.shape[1], nb_classes=2)
    trndata.setField('input', Xtrain)
    # Apprently, arrays don't work here as they try to access second dimension size...
    trndata.setField('target', mat(Ytrain).transpose())

    trndata._convertToOneOfMany() # one output neuron per class

    # build neural net and train it
    net = buildNetwork(trndata.indim, n_hidden, trndata.outdim, outclass=SoftmaxLayer)
    trainer = BackpropTrainer(net, dataset=trndata, momentum=0.1, verbose=True, weightdecay=0.01)

    trainer.trainUntilConvergence()
    #trainer.trainEpochs(5)

    print "trained"
    #trainer.trainEpochs(5)

    # Return a functor that wraps calling predict
    return NeuralNetworkClassifier(trainer)
Ejemplo n.º 26
0
def main():
    for stock in STOCK_TICKS:
        # Download Data
        get_data(stock)

        # Import Data
        days = extract_data(stock)
        today = days.pop(0)

        # Make DataSet
        data_set = ClassificationDataSet(INPUT_NUM, 1, nb_classes=2)
        for day in days:
            target = 0
            if day.change > 0:
                target = 1
            data_set.addSample(day.return_metrics(), [target])

        # Make Network
        network = buildNetwork(INPUT_NUM, MIDDLE_NUM, MIDDLE_NUM, OUTPUT_NUM)

        # Train Network
        trainer = BackpropTrainer(network)
        trainer.setData(data_set)
        trainer.trainUntilConvergence(maxEpochs=EPOCHS_MAX)

        # Activate Network
        prediction = network.activate(today.return_metrics())
        print prediction
def encoderdecoder(outersize,innersize,indata,
                   fname):
    # create network
    n = FeedForwardNetwork()

    inLayer = LinearLayer(outersize)
    hiddenLayer = SigmoidLayer(innersize)
    outLayer = LinearLayer(outersize)

    n.addInputModule(inLayer)
    n.addModule(hiddenLayer)
    n.addOutputModule(outLayer)

    in_to_hidden = FullConnection(inLayer, hiddenLayer)
    hidden_to_out = FullConnection(hiddenLayer, outLayer)
    n.addConnection(in_to_hidden)
    n.addConnection(hidden_to_out)

    n.sortModules()
    
    # create dataset
    ds = SupervisedDataSet(outersize,outersize)
    for x,y in indata,indata:
        ds.addSample(x,y)

    # train network
    trainer = BackpropTrainer(n,ds)
    trainer.trainUntilConvergence()

    n.saveNetwork(fname)
    
    return [[in_to_hidden,hidden_to_out],
            [inLayer,hiddenLayer,outLayer],
            n]
Ejemplo n.º 28
0
def main():
    #read in pre-processed features
    print('reading preprocessed data')
    bag = read_bag_of_word('features')
    #read in sentimental dictionary
    print('reading dictionary')
    [word_vector, sentiments] = read_dictionary("positive.txt", "negative.txt")
    features,target,features_dict=create_feature_matrix(bag, sentiments)
    # Sort dates in order
    dates=dow_jones_labels.keys()
    dates = [datetime.datetime.strptime(ts, "%Y-%m-%d") for ts in dates]
    dates.sort()
    dates = [datetime.datetime.strftime(ts, "%Y-%m-%d") for ts in dates]

    ds = SupervisedDataSet(4, 1)
    ds.setField('input', features)
    target=np.array(target).reshape( -1, 1 )
    ds.setField('target', target)
    net = buildNetwork(4, 40, 1, bias=True)
    trainer = BackpropTrainer(net, ds)
    trainer.trainUntilConvergence(verbose=True, validationProportion=0.15, maxEpochs=10000, continueEpochs=10)
    count=0
    for i in range(0,len(target)):
        print("predict={0},actual={1}".format(net.activate(features[i]),target[i]))
        if net.activate(features[i])*target[i]>0:
            count+=1
    print("accuracy={0}".format(float(count) / len(dow_jones_labels)))
Ejemplo n.º 29
0
def makeNet(learning_rate):
	ds = SupervisedDataSet(20, 20)
	with open('data/misspellingssmall.csv', 'rbU') as f:
		reader = csv.reader(f)
		for row in reader:
			ds.addSample(convert(row[0]),convert(row[1]))

	#testds, trainds = ds.splitWithProportion(0.2)

	net = buildNetwork(20, 20, 20)
	#trainer = BackpropTrainer(net, dataset=trainds, learningrate=learning_rate)
	trainer = BackpropTrainer(net, dataset=ds, learningrate=learning_rate)
	#trainer.train()
	#trainer.trainEpochs(5)
	trainer.trainUntilConvergence()

	score = 0
	for x, y in testds:
		predict = unconvert(net.activate(x))
		score += damerau_levenshtein_distance(predict,unconvert(y))

	global lastNet
	lastNet = net

	global netNum
	netNum += 1

	print "Network " + str(netNum) + " done with score " + str(score)
	
	return score
Ejemplo n.º 30
0
    def xtest_simple(self):
        # Create a network with 2 input nodes, 3 hidden nodes, 1 output node
        net = buildNetwork(2, 3, 1)

        # Create 2 input, 1 output dataset
        ds = SupervisedDataSet(2, 1)
        ds.addSample((0, 0), (0,))
        ds.addSample((0, 1), (1,))
        ds.addSample((1, 0), (1,))
        ds.addSample((1, 1), (0,))
        ds.addSample((0, 0), (0,))
        ds.addSample((0, 1), (1,))
        ds.addSample((1, 0), (1,))
        ds.addSample((1, 1), (0,))
        ds.addSample((1, 1), (0,))
        ds.addSample((1, 1), (0,))
        ds.addSample((1, 1), (0,))
        ds.addSample((1, 1), (0,))
        ds.addSample((1, 1), (0,))
        ds.addSample((1, 1), (0,))
        #print ds['input']
        #print ds['target']
        #self.assertEqual(len(ds), 8)
        #self.show_activation(net, ds)

        trainer = BackpropTrainer(net, ds, learningrate=1.0)
        trainer.trainUntilConvergence(verbose=True, validationProportion=0.125)
        self.show_activation(net, ds)
Ejemplo n.º 31
0
train = pandas.read_csv("train.csv")
target = train["Survived"]

m = Massager()
train_array = m.transform(train, True)
input_len = train_array.shape[1]
net = buildNetwork(input_len, 4, 1)
ds = ClassificationDataSet(input_len, 1)
for i in xrange(train_array.shape[0]):
    ds.addSample(train_array[i, :], target[i])

trainer = BackpropTrainer(net,
                          ds,
                          learningrate=0.02,
                          momentum=0.5,
                          verbose=True)
trainer.trainUntilConvergence(maxEpochs=150, verbose=True)

test = pandas.read_csv("test.csv")
answers = pandas.DataFrame(test["PassengerId"])
test_array = m.transform(test)
test_ds = ClassificationDataSet(train_array.shape[1], 1)
predictions = []
for i in xrange(test_array.shape[0]):
    out = net.activate(test_array[i, :])
    survived = 1 if out[0] >= 0.5 else 0
    predictions.append(survived)
answers['Survived'] = pandas.Series(predictions)

answers.to_csv("solution_nn.csv", index=False)
Ejemplo n.º 32
0
from pybrain.structure.modules import SoftmaxLayer
from pybrain.supervised.trainers import BackpropTrainer
from pybrain.utilities import percentError

n_hidden = 500
bp_nn = buildNetwork(trndata.indim,
                     n_hidden,
                     trndata.outdim,
                     outclass=SoftmaxLayer)
trainer = BackpropTrainer(bp_nn,
                          dataset=trndata,
                          verbose=True,
                          momentum=0.5,
                          learningrate=0.0001,
                          batchlearning=True)
err_train, err_valid = trainer.trainUntilConvergence(maxEpochs=1000,
                                                     validationProportion=0.25)

# 收敛曲线用于累积BP算法的过程
import matplotlib.pyplot as plt

f1 = plt.figure(1)
plt.plot(err_train, 'b', err_valid, 'r')
plt.title('BP network classification')
plt.ylabel('error rate')
plt.xlabel('epochs')
plt.show()

# 测试
tst_result = percentError(trainer.testOnClassData(tstdata), tstdata['class'])
print("epoch: %4d" % trainer.totalepochs, " test error: %5.2f%%" % tst_result)
Ejemplo n.º 33
0
in_to_hidden0 = FullConnection(inLayer, hiddenLayer0)
hidden0_to_out = FullConnection(hiddenLayer0, outLayer)

# add the links to neural network
fnn.addConnection(in_to_hidden0)
fnn.addConnection(hidden0_to_out)

# make neural network come into effect
fnn.sortModules()

# train the NN
# we use BP Algorithm
# verbose = True means print th total error
trainer = BackpropTrainer(fnn, DS, verbose=True, learningrate=0.01)
# set the epoch times to make the NN  fit
trainer.trainUntilConvergence(maxEpochs=10)

NetworkWriter.writeToFile(net, 'srnNet.xml')

for mod in fnn.modules:
    print("Module:", mod.name)
    if mod.paramdim > 0:
        print("--parameters:", mod.params)
    for conn in fnn.connections[mod]:
        print("-connection to", conn.outmod.name)
        if conn.paramdim > 0:
            print("- parameters", conn.params)
    if hasattr(fnn, "recurrentConns"):
        print("Recurrent connections")
        for conn in fnn.recurrentConns:
            print("-", conn.inmod.name, " to", conn.outmod.name)
Ejemplo n.º 34
0
net.sortModules()
#net = buildNetwork(ds_train.indim, HIDDEN_NEURONS_NUM, ds_train.outdim, bias=True,outclass=SoftmaxLayer)
# ds.indim -- количество нейронов входного слоя, равне количеству признаков
# ds.outdim -- количество нейронов выходного слоя, равное количеству меток классов
# SoftmaxLayer -- функция активации, пригодная для решения задачи многоклассовой классификации

init_params = np.random.random(
    (len(net.params)
     ))  # Инициализируем веса сети для получения воспроизводимого результата
net._setParameters(init_params)
#%%
np.random.seed(0)
# Модуль настройки параметров pybrain использует модуль random; зафиксируем seed для получения воспроизводимого результата
trainer = BackpropTrainer(
    net, dataset=ds_train)  # Инициализируем модуль оптимизации
err_train, err_val = trainer.trainUntilConvergence(maxEpochs=MAX_EPOCHS)
line_train = plt.plot(err_train, 'b', err_val, 'r')  # Построение графика
xlab = plt.xlabel('Iterations')
ylab = plt.ylabel('Error')

#%%
#ROC - кривые - порог
grance = 0.5
res_train = net.activateOnDataset(
    ds_train)  # Подсчет результата на обучающей выборке
res_train_bin = []
for i in res_train:
    if i > grance:
        res_train_bin.append(1)
    else:
        res_train_bin.append(0)
Ejemplo n.º 35
0
from pybrain.datasets import SupervisedDataSet

dataModel = [
    [(0, 0), (0, )],
    [(0, 1), (1, )],
    [(1, 0), (1, )],
    [(1, 1), (0, )],
]

ds = SupervisedDataSet(2, 1)
for input, target in dataModel:
    ds.addSample(input, target)

random.seed()
trainingSet = SupervisedDataSet(2, 1)
for ri in range(0, 1000):
    input, target = dataModel[random.getrandbits(2)]
    trainingSet.addSample(input, target)

net = buildNetwork(2, 2, 1, bias=True)
trainer = BackpropTrainer(net, ds, learningrate=0.001, momentum=0.99)
trainer.trainUntilConvergence(verbose=True,
                              trainingData=trainingSet,
                              validationData=ds,
                              maxEpochs=10)

print '0,0->', net.activate([0, 0])
print '0,1->', net.activate([0, 1])
print '1,0->', net.activate([1, 0])
print '1,1->', net.activate([1, 1])
Ejemplo n.º 36
0
    validation_samples.append(ts[i:i + 4])

# Normalize
validation_samples = np.array(validation_samples) / float(max_sample)

inputs_validation = validation_samples[:, 0:3]
target_validation = validation_samples[:, 3]
target_validation = target_validation.reshape(-1, 1)

ds = SupervisedDataSet(len(inputs_training), len(target_training))
ds.setField('input', inputs_training)
ds.setField('target', target_training)

trainer = BackpropTrainer(n, ds)
trainer.trainUntilConvergence(verbose=True,
                              validationProportion=0.15,
                              maxEpochs=100,
                              continueEpochs=10)

for i in range(700):
    mse = trainer.train()
    rmse = np.sqrt(mse)
    print "training RSME, epoch {}: {}".format(i + 1, rmse)

#==============================================================================
# trainer = BackpropTrainer(n, ds, learningrate=0.01, momentum=0.1)
#
# for epoch in range(1, 100000000):
#     if epoch % 10000000 == 0:
#         error = trainer.train()
#         print 'Epoch: ', epoch
#         print 'Error: ', error
Ejemplo n.º 37
0
    xlu = [
        xtu[0], xtu[1], xtu[2], xtu[3], xtu[4], xtu[5], xtu[6], xtu[7], xtu[8],
        xtu[9], xtu[10]
    ]
    xvu = [xbu[0], xbu[1]]
    ltrain_input.append(xlu)
    ltrain_output.append(xvu)
    ds.addSample(xlu, xvu)

##for i in x:
##    ds.addSample(i,50*math.sin(i))
##print ds

n = buildNetwork(ds.indim, 40, ds.outdim, recurrent=True)
t = BackpropTrainer(n, learningrate=0.05, momentum=0.5, verbose=True)
t.trainUntilConvergence(ds, 10, continueEpochs=10, validationProportion=0.01)
t.testOnData(verbose=True)

fileObject = open('trained_net', 'w')
pickle.dump(n, fileObject)
fileObject.close()

N1 = 3111
L = 10
j = 1
T = 0.24
##N=100
##y=np.empty(100)
x = np.empty(100)
qw = 0
##yn=np.empty(100)
Ejemplo n.º 38
0
net = buildNetwork(trndata.indim,
                   500,
                   tstdata.outdim,
                   hiddenclass=TanhLayer,
                   outclass=SoftmaxLayer,
                   bias=True)
trainer = BackpropTrainer(net,
                          trndata,
                          learningrate=0.01,
                          lrdecay=1,
                          momentum=0.00,
                          verbose=False,
                          batchlearning=False,
                          weightdecay=0.0)
trainer.setData(trndata)
trainer.trainUntilConvergence(verbose=True, trainingData=data, maxEpochs=1)

net.offset = 0

m = myo.Myo()
e = []


def proc_emg(emg, moving, times=[]):

    global e, emg_correctmean, emg_filtered, emg_rectified, low_pass, sfreq, emg_envelope
    e = emg
    #emg_correctmean = e - np.mean(e)
    emg_correctmean = scipy.signal.detrend(e)
    high = 20 / (1000 / 2)
    low = 450 / (1000 / 2)
Ejemplo n.º 39
0
def run():
    # Parameters used for program
    HIDDEN_LAYERS = [ 35, 35 ]
    LEARNING_DECAY = 1 # Set in range [0.9, 1]
    LEARNING_RATE = 0.096 # Set in range [0, 1]
    MOMENTUM = 0.1 # Set in range [0, 0.5]
    TRAINING_ITERATIONS = 1500
    BATCH_LEARNING = False
    VALIDATION_PROPORTION = 0.0

    # Import the data for the two spirals Task
    dataset, classes = csv.loadCSV(path.abspath('spirals/SpiralOut.txt'))

    # Set up the network and trainer
    inDimension = dataset.indim
    outDimension = dataset.outdim

    layers = [inDimension] + HIDDEN_LAYERS + [outDimension]
    neuralNet = buildNetwork(*layers)

    print neuralNet

    trainer = BackpropTrainer(neuralNet, dataset, learningrate=LEARNING_RATE, momentum=MOMENTUM, 
    	lrdecay=LEARNING_DECAY, batchlearning=BATCH_LEARNING)

    # Train the network
    trainingErrors = []
    validationErrors = []

    for i in xrange(TRAINING_ITERATIONS):
        print "Training iteration: ", i

        # Check if VALIDATION_PROPORTION is not 0. This will split the input dataset into
        # VALIDATION_PROPORTION % for Validation Data and
        # (1 - VALIDATION_PROPORTION) % for Training Data
        # e.g. 25% ValidationData and 75% Training Data

        if VALIDATION_PROPORTION == 0.0 or VALIDATION_PROPORTION == 0:
            # Cannot split the data set into Training and Validation Data. Train the 
            # Neural Network by standard means. This will not calculate Validatinon Error

            # The result of training is the proportional error for the number of epochs run
            trainingError = trainer.train()
            trainingErrors.append(trainingError)

            # Display the result of training for the iteration
            print "   Training error:    ", trainingError
        else:
            trainingErrors, validationErrors = trainer.trainUntilConvergence(validationProportion=VALIDATION_PROPORTION)

    # create path if it doesn't exist
    generated_dir = path.abspath(path.join("generated", "TaskA-TrainedNN-{}".format(strftime("%Y-%m-%d_%H-%M-%S"))))
    if not path.exists(generated_dir):
      makedirs(generated_dir)

    # save parameters
    with open(path.normpath(path.join(generated_dir, "params.txt")), "a") as f:
      f.write("HIDDEN_LAYERS = {}\n".format(HIDDEN_LAYERS))
      f.write("LEARNING_DECAY = {}\n".format(LEARNING_DECAY))
      f.write("LEARNING_RATE = {}\n".format(LEARNING_RATE))
      f.write("MOMENTUM = {}\n".format(MOMENTUM))
      f.write("TRAINING_ITERATIONS = {}\n".format(TRAINING_ITERATIONS))
      f.write("BATCH_LEARNING = {}\n".format(BATCH_LEARNING))
      f.write("VALIDATION_PROPORTION = {}\n".format(VALIDATION_PROPORTION))

    # Save the Trained Neural Network
    uniqueFileName = path.normpath(path.join(generated_dir, "data.pkl"))

    writeMode = 'wb' # Write Bytes
    pickle.dump(neuralNet, open(uniqueFileName, writeMode))

    import matplotlib.pyplot as plot

    # Plot the results of training
    plot.plot(trainingErrors, 'b')
    plot.ylabel("Training Error")
    plot.xlabel("Training Steps")
    plot.savefig(path.normpath(path.join(generated_dir, "errors.png")))
    plot.show()
    plot.clf()
    
    plot = NN2D.plotNN(network=neuralNet, lowerBound=-6.0, upperBound=6.0, step=0.1)

    if VALIDATION_PROPORTION != 0.0 or VALIDATION_PROPORTION != 0:
      plot = NN2D.plotBarComparison(trainingErrors, validationErrors)

    plot.savefig(path.normpath(path.join(generated_dir, "result.png")))
    plot.show()
Ejemplo n.º 40
0
        f.write(repr(hidden_to_out))
        f.write('\n')


print 'Create network'
net = buildNetwork(NUM_LINES * NUM_COLUMNS, HIDDEN_LAYER_SIZE, 1, bias=True)

ds = None
trainer = None
step = 100
count = 0
while True:
    print 'Create dataset'
    ds = SupervisedDataSet(NUM_LINES * NUM_COLUMNS, 1)
    set_dataset(ds, count * step, (count + 1) * step)

    print 'Train'
    trainer = BackpropTrainer(net, ds)

    start = time.time()
    trainer.trainUntilConvergence()
    print time.time() - start

    # Export net
    # get_connections(net)
    print 'save network from ' + str(count * step) + ' to ' + str(
        (count + 1) * step)
    get_connections_to_file(net, 'network_' + str(count) + '.data')

    count += 1
Ejemplo n.º 41
0
class ml_attack(object):

    """docstring for ml_attack"""
    def __init__(self, pufsimu, pufSize, numOfchallenges):
        self.pufSize = pufSize
        self.numOfchallenges = numOfchallenges
        self.pufsimu = pufsimu

    def run(self):

        # create challenge list
        self.chall_list = pufsim.genChallengeList(self.pufSize, self.numOfchallenges)

        # build network
        self.net = buildNetwork(self.pufSize, 2**self.pufSize, 1, bias=True)

        # set dataset
        self.ds = SupervisedDataSet(self.pufSize, 1)

        for challenge in self.chall_list:
            # chg = changeChallenge(challenge)
            # chg_res = 1 if self.pufsimu.challengeBit(challenge) == 1 else -1
            # self.ds.addSample(chg, [chg_res,])

            # add challenges multiple times
            self.ds.addSample(challenge, [self.pufsimu.challengeBit(challenge),])
            self.ds.addSample(challenge, [self.pufsimu.challengeBit(challenge),])
            self.ds.addSample(challenge, [self.pufsimu.challengeBit(challenge),])
            self.ds.addSample(challenge, [self.pufsimu.challengeBit(challenge),])


        # create trainer
        self.trainer = BackpropTrainer(self.net,
                                        dataset=self.ds,
                                        learningrate = 0.01,
                                        momentum = 0.,
                                        weightdecay=0.)
        #self.trainer.setData(self.ds)

        # train network
        a = self.trainer.trainUntilConvergence(dataset=self.ds,
                                    #trainingData=self.ds,
                                    #validationData=self.ds,
                                    maxEpochs=100,
                                    verbose=False,
                                    continueEpochs=10,
                                    validationProportion=0.25)

        return 0


    def validate(self):
        complete_chall_list = pufsim.genChallengeList(self.pufSize, 2 ** self.pufSize)
        corr_count = 0

        for challenge in complete_chall_list:
            pufResult = self.pufsimu.challengeBit(challenge)

            # chg = changeChallenge(challenge)
            chg = challenge

            annResult = -1
            if (self.net.activate(chg)[0] >= 0.5):
                annResult = 1
            else:
                annResult = 0

            #print "challenge:", challenge, "with result", pufResult, "and ANN result", annResult, "from", self.net.activate(chg)[0]
            if (pufResult == annResult):
                corr_count = corr_count + 1
        print "Possible challenges:", 2**self.pufSize, "with correct guesses:", corr_count
        print "Ratio", (float(corr_count) / 2 ** self.pufSize)

        return (float(corr_count) / 2 ** self.pufSize)
Ejemplo n.º 42
0
 def trainFromFile(self, filename):
     trainer = BackpropTrainer(self.net,
                               self.createDataSetFromFile(filename))
     trainer.trainUntilConvergence(maxEpochs=10)
Ejemplo n.º 43
0
    ds.addSample(input, target)

# create a large random data set
import random
random.seed()
trainingSet = SupervisedDataSet(2, 1);
for ri in range(0,1000):
    input,target = dataModel[random.getrandbits(2)];
    trainingSet.addSample(input, target)

from pybrain.tools.shortcuts import buildNetwork
net = buildNetwork(2, 2, 1, bias=True)

from pybrain.supervised.trainers import BackpropTrainer
trainer = BackpropTrainer(net, ds, learningrate = 0.001, momentum = 0.99)
trainer.trainUntilConvergence(verbose=True, dataset=trainingSet, maxEpochs=10)
'''
print '0,0->', net.activate([0,0])
print '0,1->', net.activate([0,1])
print '1,0->', net.activate([1,0])
print '1,1->', net.activate([1,1])

'''


#learn XOR with a nerual network with saving of the learned paramaters

import pybrain
from pybrain.datasets import *
from pybrain.tools.shortcuts import buildNetwork
from pybrain.supervised.trainers import BackpropTrainer
Ejemplo n.º 44
0
ds = SupervisedDataSet(48, 1)

for input in lines:
    input = input.split(",")
    [float(i) for i in input if i != '']
    ds.addSample(input[1:], input[0])

train, test = ds.splitWithProportion(0.25)

nn = buildNetwork(48, 20, 1, bias=True, outclass=SigmoidLayer)
nn.reset()

trainer = BackpropTrainer(nn, train)

training_errors, validation_errors = trainer.trainUntilConvergence()
j = 0
print(
    'erros de treino ------------------------------------------------------------'
)
for value in training_errors:
    #print(training_errors)
    print("%s %s" % (value, j))
    j += 1
print(
    'erros de validacao ----------------------------------------------------------'
)
k = 0
for val in validation_errors:
    #print(training_errors)
    print("%s %s" % (val, k))