def run(self, fold, X_train, y_train, X_test, y_test):
        DS_train, DS_test = ClassificationData.convert_to_DS(
            X_train,
            y_train,
            X_test,
            y_test)

        NHiddenUnits = self.__get_best_hu(DS_train)
        fnn = buildNetwork(
            DS_train.indim,
            NHiddenUnits,
            DS_train.outdim,
            outclass=SoftmaxLayer,
            bias=True)

        trainer = BackpropTrainer(
            fnn,
            dataset=DS_train,
            momentum=0.1,
            verbose=False,
            weightdecay=0.01)

        trainer.trainEpochs(self.epochs)
        tstresult = percentError(
            trainer.testOnClassData(dataset=DS_test),
            DS_test['class'])

        print "NN fold: %4d" % fold, "; test error: %5.2f%%" % tstresult
        return tstresult / 100.0
def trainNetwork(net, ds, epochs, learningrate = 0.01, momentum=0.4, weightdecay = 0.0):
    trainer = BackpropTrainer(net,
                              dataset=ds,
                              learningrate=learningrate,
                              momentum=momentum,
                              weightdecay=weightdecay)
    trainer.trainEpochs(epochs)
Ejemplo n.º 3
0
def pybrain_high():
	back=[]
	alldate=New_stock.objects.filter().exclude(name='CIHKY')[0:100]
	wholelen=len(alldate)
	test=New_stock.objects.filter(name__contains="CIHKY")
	testlen=len(test)
	# test dateset
	testdata= SupervisedDataSet(5, 1)
	testwhole=newalldate(test,testlen)
	for i in testwhole:
		testdata.addSample((i[0],i[2],i[3],i[4],i[5]), (0,))	
	# 实验 dateset
	data= SupervisedDataSet(5, 1)
	wholedate=newalldate(alldate,wholelen)
	for i in wholedate:
		data.addSample((i[0],i[2],i[3],i[4],i[5]), (i[1]))	
	#print testwhole
	# 建立bp神经网络
	net = buildNetwork(5, 3, 1,bias=True,hiddenclass=TanhLayer, outclass=SoftmaxLayer)
	
	trainer = BackpropTrainer(net,data)
	trainer.trainEpochs(epochs=100)
	# train and test the network
#	print trainer.train()
	trainer.train()
	print 'ok'
	out=net.activateOnDataset(testdata)
	for j in  test:
                back.append((j.high))
	print back
	print out
	backout=backnormal(back,out)
	print 'okokokoko'
	print backout # 输出22的测试集合
	return out 
Ejemplo n.º 4
0
class Classifier():
    def __init__(self, testing = False):
        self.training_set, self.test_set = split_samples(0.5 if testing else 1.0)
        self.net = buildNetwork( self.training_set.indim, self.training_set.outdim, outclass=SoftmaxLayer )
        self.trainer = BackpropTrainer( self.net, dataset=self.training_set, momentum=0.1, verbose=True, weightdecay=0.01)
        self.train()

    def train(self):
        self.trainer.trainEpochs( EPOCHS )
        trnresult = percentError( self.trainer.testOnClassData(),
                                  self.training_set['class'] )
        print "  train error: %5.2f%%" % trnresult

    def classify(self, file):
        strengths = self.net.activate(process_sample(*load_sample(file)))
        print strengths
        best_match = None
        strength = 0.0
        for i,s in enumerate(strengths):
            if s > strength:
                best_match = i
                strength = s
        return SOUNDS[best_match]

    def test(self):
        tstresult = percentError( self.trainer.testOnClassData(
               dataset=self.test_set ), self.test_set['class'] )

        print "  test error: %5.2f%%" % tstresult
Ejemplo n.º 5
0
def train_network():

    start = timeit.default_timer()

    # Read data
    race_data = pd.read_csv('../data/half_ironman_data_v1.csv')
    race_factors = pd.read_csv('../data/half_ironman_race_factors.csv')

    # Prepare input data
    supervised_dataset = get_supervised_dataset(race_data, race_factors)

    # Create network
    network = create_feedforward_network(supervised_dataset)

    train_data, test_data = supervised_dataset.splitWithProportion(0.9)

    trainer = BackpropTrainer(network, dataset=train_data)

    # Train our network
    trainer.trainEpochs(1)

    # check network accuracy
    print _sum_square_error(network.activateOnDataset(dataset=train_data), train_data['target'])
    print _sum_square_error(network.activateOnDataset(dataset=test_data), test_data['target'])

    print 'Execution time =>', timeit.default_timer() - start, 'secs'
class bpNetController(object):
	def __init__(self, *args):
		self.debug = False
		self.setup(*args)

	def setup(self, depth = 4, refLen =5):
		self.inCnt = refLen + 1
		self.net = buildNetwork(self.inCnt, depth, 1, bias=True, hiddenclass=TanhLayer)
		self.ds = SupervisedDataSet(self.inCnt, 1)
		self.trainer = BackpropTrainer(self.net, self.ds)
		self.clear()

	def enableDebug(self):
		self.debug = True

	def sample(self, refs, inp, expectedOut):
		if self.debug: print "added {}".format([refs, inp, expectedOut])
		self.ds.addSample(refs+[inp], expectedOut)

	def train(self, epochs = 100):
		self.trainer.trainEpochs(epochs)

	def clear(self):
		self.ds.clear()

	def act(self, refs, inp):
		return self.net.activate(refs+[inp])

	@property
	def curEpoch(self):
		return self.trainer.epoch
Ejemplo n.º 7
0
 def parse_and_train(self):
     f = open(self.file,'r')
     learn_lines = []
     for line in f:
         if line.strip() != '':
             learn_lines.append(line)
     i = 0
     f.close()
     while i < len(learn_lines):
         ins, outs = self.convert_to_tuple(learn_lines[i],learn_lines[i+1])
         i += 2
         self.ds.addSample(ins,outs)
     self.nn = buildNetwork(self.ios,self.hns,25,self.ios)
     #self.train_dat, self.test_dat = self.ds.splitWithProportion(0.75)
     self.train_dat = self.ds
     trnr = BackpropTrainer(self.nn,dataset=self.train_dat,momentum=0.1,verbose=False,weightdecay=0.01)
     i = 150
     trnr.trainEpochs(150)
     while i < self.epochs:
         trnr.trainEpochs(50)
         i += 50
         print 'For epoch ' + str(i)
         print 'For train:'
         self.print_current_error()
         #print 'For test:'
         #self.print_validation()
     self.nn.sortModules()
Ejemplo n.º 8
0
def trainNetwork(epochs, rate, trndata, tstdata, network=None):
    '''
    epochs: number of iterations to run on dataset
    trndata: pybrain ClassificationDataSet
    tstdat: pybrain ClassificationDataSet
    network: filename of saved pybrain network, or None
    '''
    if network is None:
        net = buildNetwork(400, 25, 25, 9, bias=True, hiddenclass=SigmoidLayer, outclass=SigmoidLayer)
    else:
        net = NetworkReader.readFrom(network)

    print "Number of training patterns: ", len(trndata)
    print "Input and output dimensions: ", trndata.indim, trndata.outdim
    print "First sample input:"
    print trndata['input'][0]
    print ""
    print "First sample target:", trndata['target'][0]
    print "First sample class:", trndata.getClass(int(trndata['class'][0]))
    print ""

    trainer = BackpropTrainer(net, dataset=trndata, learningrate=rate)
    for i in range(epochs):
        trainer.trainEpochs(1)
        trnresult = percentError(trainer.testOnClassData(), trndata['class'])
        tstresult = percentError(trainer.testOnClassData(dataset=tstdata), tstdata['class'])
        print "epoch: %4d" % trainer.totalepochs, "  train error: %5.2f%%" % trnresult, "  test error: %5.2f%%" % tstresult

    return net
def main():
    images, labels = load_labeled_training(flatten=True)
    images = standardize(images)
    # images, labels = load_pca_proj(K=100)
    shuffle_in_unison(images, labels)
    ds = ClassificationDataSet(images.shape[1], 1, nb_classes=7)
    for i, l in zip(images, labels):
        ds.addSample(i, [l - 1])
    # ds._convertToOneOfMany()
    test, train = ds.splitWithProportion(0.2)
    test._convertToOneOfMany()
    train._convertToOneOfMany()
    net = shortcuts.buildNetwork(train.indim, 1000, train.outdim, outclass=SoftmaxLayer)

    trainer = BackpropTrainer(net, dataset=train, momentum=0.1, learningrate=0.01, weightdecay=0.05)
    # trainer = RPropMinusTrainer(net, dataset=train)
    # cv = validation.CrossValidator(trainer, ds)
    # print cv.validate()
    net.randomize()
    tr_labels_2 = net.activateOnDataset(train).argmax(axis=1)
    trnres = percentError(tr_labels_2, train["class"])
    # trnres = percentError(trainer.testOnClassData(dataset=train), train['class'])
    testres = percentError(trainer.testOnClassData(dataset=test), test["class"])
    print "Training error: %.10f, Test error: %.10f" % (trnres, testres)
    print "Iters: %d" % trainer.totalepochs

    for i in range(100):
        trainer.trainEpochs(10)
        trnres = percentError(trainer.testOnClassData(dataset=train), train["class"])
        testres = percentError(trainer.testOnClassData(dataset=test), test["class"])
        trnmse = trainer.testOnData(dataset=train)
        testmse = trainer.testOnData(dataset=test)
        print "Iteration: %d, Training error: %.5f, Test error: %.5f" % (trainer.totalepochs, trnres, testres)
        print "Training MSE: %.5f, Test MSE: %.5f" % (trnmse, testmse)
Ejemplo n.º 10
0
def measuredLearning(ds):

    trndata,tstdata = splitData(ds,.025)

    #build network


    ###
    # This network has no hidden layters, you might need to add some
    ###
    fnn = buildNetwork( trndata.indim, 22, trndata.outdim, outclass=SoftmaxLayer )
    trainer = BackpropTrainer( fnn, verbose=True,dataset=trndata)
                               
    ####
    #   Alter this to figure out how many runs you want.  Best to start small and be sure that you see learning.
    #   Before you ramp it up.
    ###
    for i in range(150):
        trainer.trainEpochs(5)
   
        
        trnresult = percentError(trainer.testOnClassData(),trndata['class'] )

        
        tstresult = percentError( trainer.testOnClassData(
           dataset=tstdata ), tstdata['class'] )

        print "epoch: %4d" % trainer.totalepochs, \
            "  train error: %5.2f%%" % trnresult, \
            "  test error: %5.2f%%" % tstresult
        if(trnresult<.5): 
            return
Ejemplo n.º 11
0
def train(args):
  inputs, ys, gc = args
  row_length = len(inputs[0])
  d = ds.ClassificationDataSet(
      row_length, nb_classes=2, class_labels=['Poisonous',
                                              'Edible'])
  d.setField('input', inputs)
  d.setField('target', ys)
  test, train = d.splitWithProportion(.25)
  test._convertToOneOfMany()
  train._convertToOneOfMany()

  hidden = row_length // 2
  print "indim:", train.indim
  net = buildNetwork(train.indim,
                     hidden,
                     train.outdim,
                     outclass=SoftmaxLayer)
  trainer = BackpropTrainer(net,
                            dataset=train,
                            momentum=0.0,
                            learningrate=0.1,
                            verbose=True,
                            weightdecay=0.0)
  for i in xrange(20):
      trainer.trainEpochs(1)
      trnresult = percentError(trainer.testOnClassData(),
                                train['class'])
      tstresult = percentError(
              trainer.testOnClassData(dataset=test),
              test['class'])
      print "epoch: %4d" % trainer.totalepochs, \
            "  train error: %5.2f%%" % trnresult, \
            "  test error: %5.2f%%" % tstresult
  return net, gc
Ejemplo n.º 12
0
class Network():
  
  network = None
  trainer = None
  hidden_layer = None
  hidden_nodes = None
  data = None

  def __init__(self, data, n_hidden_nodes, layertype="Linear"):
    self.hidden_layer = NH.layers[layertype]
    self.data = data

    train_dim_in = data.train_data.indim
    train_dim_out = data.train_data.outdim

    self.network = buildNetwork(train_dim_in, n_hidden_nodes, train_dim_out, outclass=self.hidden_layer)
    self.hidden_nodes = n_hidden_nodes

  def init_backprop_trainer(self, b_momentum=0.1, b_learningrate=0.01, b_verbose=True, b_weightdecay=0.1):
    train_in = self.data.train_data.indim
    train_out = self.data.train_data.outdim
    self.trainer = BackpropTrainer(self.network, dataset=self.data.train_data, \
                                  momentum=b_momentum, learningrate=b_learningrate, verbose=b_verbose, \
                                  weightdecay=b_weightdecay)
  
  def run_network(self, epoch):
    NetworkWriter.writeToFile(self.network, "test.xml")
    self.trainer.trainEpochs(epoch)
    error = percentError(self.trainer.testOnClassData(dataset=self.data.test_data), \
          self.data.test_data['class'])
    return error

  
  """
    def fit(self, Xtrain, Ytrain):
        """ Use entirety of provided X, Y to predict

        Arguments
        Xtrain -- Training data
        Ytrain -- Training prediction
        n_hidden -- each entry in the list n_hidden tells how many hidden nodes at that layer
        epocs_to_train -- number of iterations to train the NN for

        Returns
        classifier -- a classifier fitted to Xtrain and Ytrain
        """
        
        self.Xt = Xtrain
        self.Yt = Ytrain
        n_hidden = self.params['n_hidden']
        epochs_to_train = self.params['epochs_to_train']

        # PyBrain expects data in its DataSet format
        trndata = convert_to_pybrain_dataset(Xtrain, Ytrain)

        # build neural net and train it
        net = buildNetwork(trndata.indim, *(n_hidden + [trndata.outdim]), outclass=SoftmaxLayer)
        trainer = BackpropTrainer(net, dataset=trndata, momentum=0.1, verbose=True, weightdecay=0.01)

        with open('nn_progress_report.txt', 'a') as f:
            f.write('training %s for %d epochs\n' % (self.params, epochs_to_train))

        #trainer.trainUntilConvergence()
        trainer.trainEpochs(epochs_to_train)

        # Return a functor that wraps calling predict
        self.trainer = trainer
Ejemplo n.º 14
0
def createnetwork(n_hoglist,n_classlist,n_classnum,n_hiddensize=100):
    n_inputdim=len(n_hoglist[0])
    n_alldata = ClassificationDataSet(n_inputdim,1, nb_classes=n_classnum)
    for i in range(len(n_hoglist)):
        n_input = n_hoglist[i]
        n_class = n_classlist[i]
        n_alldata.addSample(n_input, [n_class])
    n_tstdata, n_trndata = n_alldata.splitWithProportion( 0.25 )
    n_trndata._convertToOneOfMany( )
    n_tstdata._convertToOneOfMany( )

    print "Number of training patterns: ", len(n_trndata)
    print "Input and output dimensions: ", n_trndata.indim, n_trndata.outdim
    print "First sample (input, target, class):"
    print n_trndata['input'][0], n_trndata['target'][0], n_trndata['class'][0]

    n_fnn = buildNetwork(n_trndata.indim,n_hiddensize, n_trndata.outdim, outclass=SoftmaxLayer)
    n_trainer = BackpropTrainer(n_fnn, dataset=n_trndata, momentum=0.1, verbose=True, weightdecay=0.01)

    n_result = 1
    while n_result > 0.1:
        print n_result
        n_trainer.trainEpochs(1)
        n_trnresult = percentError(n_trainer.testOnClassData(),
                                 n_trndata['class'])
        n_tstresult = percentError(n_trainer.testOnClassData(
            dataset=n_tstdata), n_tstdata['class'])

        print "epoch: %4d" % n_trainer.totalepochs, \
            "  train error: %5.2f%%" % n_trnresult, \
            "  test error: %5.2f%%" % n_tstresult
        n_result = n_tstresult
Ejemplo n.º 15
0
def trainByImg(net, test, target, trainNum):
    h, l = test.shape[:2]
    for i in range(h):
        for j in range(l):
            if target[i, j] != 0:
                target[i, j] = 255

    ds = SupervisedDataSet(data_size, 10, data_size)
    # ds = ClassificationDataSet(block_size, nb_classes=2, class_labels=[0,255])
    """
    for i in range(trainNum):
        x = random.uniform(block_width, data_width-block_width-1)
        y = random.uniform(block_length,data_length-block_length-1)
        if target[y,x] == 255:
            targetValue = 1
        else:
            targetValue = 0
        ds.addSample(np.ravel(test[y-block_length:y+block_length+1,x-block_width:x+block_width+1]), [targetValue])

    for i in range(0,data_length,block_width):
        for j in range(0,data_width,block_length):
            ds.addSample(np.ravel(test[i:i+block_length,j:j+block_width]), np.ravel(target[i:i+block_length,j:j+block_width]))

    #ds._convertToOneOfMany()
    """
    # ds.addSample(np.ravel(test), np.ravel(target))
    trainer = BackpropTrainer(net, ds)
    for i in range(1):
        trainer.trainEpochs(1)
	def test_train(self, epochs=1):
		print("Training...")

		split = int(len(self.samples) * 0.7)
		train_samples = self.samples[0:split]
		train_labels  = self.labels[0:split]

		test_samples = self.samples[split:]
		test_labels  = self.labels[split:]

		net = buildNetwork(300, 300, 1)	
		ds = SupervisedDataSet(300, 1)
		for i in range(len(train_samples)):  
			ds.addSample(tuple(np.array(train_samples[i], dtype='float64')), (train_labels[i],))
		
		trainer = BackpropTrainer(net, ds, verbose=True)
		trainer.trainEpochs(epochs)
		self.totalEpochs = epochs
		
		error = 0
		counter = 0
		for i in range(0, 100):
			output = net.activate(tuple(np.array(test_samples[i], dtype='float64')))
			if round(output[0]) != test_labels[i]:
				counter += 1
				print(counter, " : output : ", output[0], " real answer : ", test_labels[i])
				error += 1
			else:
				counter += 1
				print(counter, " : output : ", output[0], " real answer : ", test_labels[i])
		
		print("Trained with " + str(epochs) + " epochs; Total: " + str(self.totalEpochs) + ";")
		return error
Ejemplo n.º 17
0
def networkbuild(x, ds, testds, newpredsds):
    net = buildNetwork(13, 30, 1, hiddenclass = TanhLayer, outclass=LinearLayer)
    preds = dict()
    sector = dict()    
    testpreds = dict()
    testsector = dict()
    newnnpreds = dict()
    trainer = BackpropTrainer( net,ds[x],momentum=0.01,verbose=True, learningrate=.001)
    trainer.trainEpochs(500)
    predictions = net.activateOnDataset( ds[x] )
    for i in range(0, len(predictions)):
        preds[i]=predictions[i][0]
    preds = preds.values()
    for i in range(0, len(ds[x])):
        sector[i] = ds[x]['target'][i][0]
    sector= sector.values()
    testpredictions = net.activateOnDataset( testds[x] )
    for i in range(0, len(testpredictions)):
        testpreds[i]=testpredictions[i][0]
    testpreds = testpreds.values()
    for i in range(0, len(testds[x])):
        testsector[i] = testds[x]['target'][i][0]
    testsector= testsector.values()
    newnnpredictions= net.activateOnDataset( newpredsds[x] )
    for i in range(0, len(newnnpredictions)):
        newnnpreds[i]=newnnpredictions[i][0]
    newnnpreds = newnnpreds.values()
    everything = {'sector': x,
    'inputs':ds[x]['input'],
    'network': net,
    'trainer': trainer,
    'results':preds,
    'testresults':testpreds,
    'newpredictions':newnnpreds}
    return everything
Ejemplo n.º 18
0
	def train(self):
		print "Enter the number of times to train, -1 means train until convergence:"
		t = int(raw_input())
		print "Training the Neural Net"
		print "self.net.indim = "+str(self.net.indim)
		print "self.train_data.indim = "+str(self.train_data.indim)

		trainer = BackpropTrainer(self.net, dataset=self.train_data, momentum=0.1, verbose=True, weightdecay=0.01)
		
		if t == -1:
			trainer.trainUntilConvergence()
		else:
			for i in range(t):
				trainer.trainEpochs(1)
				trnresult = percentError( trainer.testOnClassData(), self.train_data['class'])
				# print self.test_data

				tstresult = percentError( trainer.testOnClassData(dataset=self.test_data), self.test_data['class'] )

				print "epoch: %4d" % trainer.totalepochs, \
					"  train error: %5.2f%%" % trnresult, \
					"  test error: %5.2f%%" % tstresult

				if i % 10 == 0 and i > 1:
					print "Saving Progress... Writing to a file"
					NetworkWriter.writeToFile(self.net, self.path)

		print "Done training... Writing to a file"
		NetworkWriter.writeToFile(self.net, self.path)
		return trainer
Ejemplo n.º 19
0
def main():
  trndata, tstdata = createDS()
  for repeat in xrange(repeats):
    iter_trn_results = []
    iter_tst_results = []
    nn = createNNLong(trndata)
    hiddenAstrocyteLayer, outputAstrocyteLayer = associateAstrocyteLayers(nn)
    trainer = BackpropTrainer(nn, dataset=trndata, learningrate=0.01,
                              momentum=0.1, verbose=False, weightdecay=0.0)
    for grand_iter in xrange(iterations):
      trainer.trainEpochs(1)
      trnresult = percentError(trainer.testOnClassData(), trndata['class'])
      iter_trn_results.append(trnresult)
      tstresult = percentError(trainer.testOnClassData(dataset=tstdata), tstdata['class'])
      iter_tst_results.append(tstresult)
      
      if not grand_iter%20:
        print 'epoch %4d' %trainer.totalepochs, 'train error %5.2f%%' %trnresult, \
            'test error %5.2f%%' %tstresult
            
      inputs  = list(trndata['input'])
      random.shuffle(inputs)
      for inpt in trndata['input']:
        nn.activate(inpt)
        for minor_iter in range(hiddenAstrocyteLayer.astrocyte_processing_iters):
          hiddenAstrocyteLayer.update()
          outputAstrocyteLayer.update()
        hiddenAstrocyteLayer.reset()
        outputAstrocyteLayer.reset()
    all_trn_results.append(iter_trn_results)
    all_tst_results.append(iter_tst_results)
  plotResults(all_trn_results)
  plotResults(all_tst_results)
  plt.show()
Ejemplo n.º 20
0
def ann(training_filename , testing_filename,itr,epoch,model_type):
    training_start_time = "The generation of data set and training started at :%s" % datetime.datetime.now()
    training_dataset            = np.genfromtxt(training_filename, skip_header=0,dtype="int", delimiter='\t' )
    data = ClassificationDataSet(len(training_dataset[0])-1, 2, nb_classes=2)
    for aSample in training_dataset:
        data.addSample(aSample[0:len(aSample)-1],[aSample[len(aSample)-1]] );
        
    #  
    data._convertToOneOfMany( )

    fann = buildNetwork(314,2,outclass=SoftmaxLayer);
    trainer = BackpropTrainer( fann, dataset=data, momentum=0.1, verbose=False, weightdecay=0.01)
    counter = 0;
    print training_start_time
    while(counter < itr):
        trainer.trainEpochs( epoch );
        counter = counter + 1;
    
    trnresult = percentError( trainer.testOnClassData(),data['class'] )
    trained_result_log = "epoch: %4d" % trainer.totalepochs, \
          "  train error: %5.2f%%" % trnresult;
    
    
    training_time_end = "The training and result logging ended at %s :" % datetime.datetime.now()
    
    filename = working_dir + "\models\\"+model_type + ".obj"
    save_trained_model(fann, filename)
    
    log_file.write("\n" + training_start_time+"\n")
    log_file.write(str(trained_result_log)+"\n")
    log_file.write(training_time_end+"\n")
Ejemplo n.º 21
0
def process_symbol(net, symbol):
 print "processing ", symbol
 #zuerst train_data prüfen, wenn keine Trainingsdaten da sind, dann brauchen wir nicht weitermachen
 train_data = load(symbol+'.train')
 if (len(train_data) == 0):
  print "--no training data, skip", symbol
  return
 print "-traing data loaded"
 data = load_stockdata(symbol)
 if (len(data) == 0):
  print "--no data, skip", symbol
  return
 print "-stock data loaded"
 settings = load_settings(symbol,data)
 if(len(settings) == 0):
  print "--no settings, skip", symbol
  return
 print "-settings loaded"
 #jetzt sind alle Daten vorhanden
 ds = build_dataset(data, train_data, settings)
 print "-train"
 trainer = BackpropTrainer(net, ds)
 trainer.trainEpochs(epochs)
 print "-saving network"
 NetworkWriter.writeToFile(net, 'network.xml') 
 return net
Ejemplo n.º 22
0
class neuralNetwork():

	def __init__( self, n_classes ):
		self.n_classes = n_classes

	def fit( self, X, Y ):
		n_features = X.shape[1]
		self.train_ds = ClassificationDataSet( n_features, 1, nb_classes = self.n_classes )
		for train, target in zip( X, Y ):
			self.train_ds.addSample( train, [target] )

		self.train_ds._convertToOneOfMany( )

		self.net = buildNetwork( self.train_ds.indim, 2*n_features, self.train_ds.outdim, outclass = SoftmaxLayer )
		self.trainer = BackpropTrainer( self.net, self.train_ds )

	def predict( self, X ):
		n_features = X.shape[1]
		self.test_ds = ClassificationDataSet( n_features, 1, nb_classes = self.n_classes )
		for test in X:
			self.test_ds.addSample( test, [1] )

		self.test_ds._convertToOneOfMany( )

		for i in range( 100 ):
			self.trainer.trainEpochs( 5 )
			self.labels = self.net.activateOnDataset( self.test_ds )
			self.labels = self.labels.argmax(axis=1)
		return self.labels
Ejemplo n.º 23
0
 def trainBackProp(self):
     trainer = BackpropTrainer(self.neuralNet, self.dataSet)
     start = time.time()
     trainer.trainEpochs(EPOCHS)
     end = time.time()
     print("Training time -> " + repr(end-start))
     print(repr(trainer.train()))
  def train(self, inputData, verbose=True):

    # Set of data to classify:
    # - IMG_SIZE input dimensions per data point
    # - 1 dimensional output
    # - 4 clusters of classification
    all_faces = ClassificationDataSet(IMG_SIZE, 1, nb_classes=4)

    for entry in inputData:
      (emotion, data) = entry
      all_faces.addSample(data, [emotion])
     
    # Generate a test and a train set from our data
    test_faces, train_faces = all_faces.splitWithProportion(0.25)

    # Hack to convert a 1-dimensional output into 4 output neurons
    test_faces._convertToOneOfMany()   
    train_faces._convertToOneOfMany()
    
    # Set up the actual network. These are the tunable params
    self.fnn = buildNetwork( 
      train_faces.indim, 
      20, 
      train_faces.outdim, 
      outclass=SoftmaxLayer
    )
    
    # Set up the network trainer. Also nice tunable params
    trainer = BackpropTrainer(
      self.fnn, 
      dataset=train_faces, 
      momentum=0.1, 
      verbose=False,
      weightdecay=0.01
    )
    
    tabledata = []     

    # Train this bitch. 
    if verbose:
      # Report after every epoch if verbose
      for i in range(EPOCHS):
        trainer.trainEpochs(1)

        trnresult = percentError( trainer.testOnClassData(),
                                  train_faces['class'] )
        tstresult = percentError( trainer.testOnClassData(
               dataset=test_faces ), test_faces['class'] )

        tabledata.append((trainer.totalepochs,trnresult,tstresult))
    else:
      trainer.trainEpochs(EPOCHS)

    if verbose:
      print "Epoch\tTrain Error\tTest Error"
      for line in tabledata:
         print "%4d\t" % line[0], \
               "%5.2f%%\t\t" % line[1], \
               "%5.2f%%" % line[2]
Ejemplo n.º 25
0
def output_model(ds, model_name, input_len, output_len):
    model_file = file(model_name, "wb")
    neural_network = buildNetwork(input_len, 20, output_len, bias=True)
    trainer = BackpropTrainer(neural_network, ds, weightdecay=0.1, learningrate=0.0001)
    trainer.trainEpochs(epochs=100)
    model_str = pickle.dumps(neural_network)
    pickle.dump(model_str, model_file, True)
    return neural_network
Ejemplo n.º 26
0
def fnn_classify(ds_train, ds_test):
    """Train neural network without bagging"""
    net = buildNetwork(24, 18, 16, 8, hiddenclass=TanhLayer, outclass=SoftmaxLayer) # define neural net
    trainer = BackpropTrainer(net, dataset=ds_train, learningrate=0.01, momentum=0.1, verbose=True, weightdecay=0.01)
    trainer.trainEpochs(20) # train
    test_result = percentError(trainer.testOnClassData(
                    dataset=ds_test), ds_test['class'])
    return 100-test_result 
Ejemplo n.º 27
0
def nn_classify():
    # train_X,Y = load_svmlight_file('data/train_metrix')
    # rows = pd.read_csv('data/log_test2.csv',index_col=0).sort_index().index.unique()
    # train_X = pd.read_csv('data/train_tfidf.csv',index_col=0)
    # test_X = pd.read_csv('data/test_tfidf.csv',index_col=0)
    # select = SelectPercentile(f_classif, percentile=50)
    # select.fit(train_X,Y)
    # train_X = select.transform(train_X)
    # test_X = select.transform(test_X)
    # print 'dump train...'
    # dump_svmlight_file(train_X,Y,'data/train_last')
    # test_Y = [0]*(test_X.shape[0])
    # print 'dump test...'
    # dump_svmlight_file(test_X,test_Y,'data/test_last')

    train_X,Y = load_svmlight_file('data/train_last')
    test_X,test_Y = load_svmlight_file('data/test_last')
    train_X = train_X.toarray()
    test_X = test_X.toarray()
    Y = [int(y)-1 for y in Y]
    print 'Y:',len(Y)
    rows = pd.read_csv('data/log_test2.csv',index_col=0).sort_index().index.unique()
    train_n = train_X.shape[0]
    m = train_X.shape[1]
    test_n = test_X.shape[0]
    print train_n,m,#test_n
    train_data = ClassificationDataSet(m,1,nb_classes=12)
    test_data = ClassificationDataSet(m,1,nb_classes=12)
    # test_data = ClassificationDataSet(test_n,m,nb_classes=12)
    for i in range(train_n):
        train_data.addSample(np.ravel(train_X[i]),Y[i])
    for i in range(test_n):
        test_data.addSample(test_X[i],Y[i])
    trndata = train_data
    # tstdata = train_data

    trndata._convertToOneOfMany()
    # tstdata._convertToOneOfMany()
    test_data._convertToOneOfMany()

     # 先用训练集训练出所有的分类器
    print 'train classify...'
    fnn = buildNetwork( trndata.indim, 400 , trndata.outdim, outclass=SoftmaxLayer )
    trainer = BackpropTrainer( fnn, dataset=trndata, momentum=0.1, learningrate=0.01 , verbose=True, weightdecay=0.01)
    trainer.trainEpochs(3)
    # print 'Percent Error on Test dataset: ' , percentError( trainer.testOnClassData (
    #            dataset=tstdata )
    #            , )
    print 'end train classify'
    pre_y = trainer.testOnClassData(dataset=trndata)
    print metrics.classification_report(Y,pre_y)
    pre_y = trainer.testOnClassData(dataset=test_data)
    print 'write result...'
    print 'before:',pre_y[:100]
    pre_y = [int(y)+1 for y in pre_y]
    print 'after:',pre_y[:100]
    DataFrame(pre_y,index=rows).to_csv('data/info_test2.csv', header=False)
    print 'end...'
Ejemplo n.º 28
0
    def train_net(self, train_data, tags):
        print 'Training the network... {0} ({1} items)'.format(tags, len(train_data))
        train_data._convertToOneOfMany()

        network = buildNetwork(train_data.indim, 8, train_data.outdim, bias=True, hiddenclass=TanhLayer)#SigmoidLayer)
        trainer = BackpropTrainer(network, dataset=train_data, momentum=0.1, verbose=False, weightdecay=0.01)
        trainer.trainEpochs(10)

        return network
    def __get_best_hu(self, DS):
        X = DS['input']
        y = DS['class']

        hu_number = self.max_hu-self.min_hu+1
        N, M = X.shape
        inner_errors = np.zeros((self.inner_cross, hu_number))

        cv = cross_validation.KFold(N, self.inner_cross, shuffle=True)

        j = 0
        for train_index, test_index in cv:
            X_train = X[train_index, :]
            y_train = y[train_index, :]
            X_test = X[test_index, :]
            y_test = y[test_index, :]

            DS_train, DS_test = ClassificationData.convert_to_DS(
                X_train,
                y_train,
                X_test,
                y_test)

            for i in range(0, hu_number):
                fnn = buildNetwork(
                    DS_train.indim,
                    self.min_hu+i,
                    DS_train.outdim,
                    outclass=SoftmaxLayer,
                    bias=True)

                trainer = BackpropTrainer(
                    fnn,
                    dataset=DS_train,
                    momentum=0.1,
                    verbose=False,
                    weightdecay=0.01)

                trainer.trainEpochs(10)
                inner_errors[j, i] = percentError(
                    trainer.testOnClassData(dataset=DS_test),
                    DS_test['class'])

                print "Inner errors [", j, "] ", inner_errors[j]
            j += 1

        errors = sum(inner_errors, 0) / float(self.inner_cross)

        print "\nNeural network error rates: ", errors, "\n"
        sys.stdout.flush()

        optimal_hu = self.min_hu + errors.argmin()

        print "\nNN best hidden layer neurons = ", optimal_hu, "\n"
        sys.stdout.flush()

        return optimal_hu
Ejemplo n.º 30
0
 def fit(self):
     trainds = SupervisedDataSet(self.INPUT_SIZE, 1)
     for i in range(self.str_train, self.end_train):
         trainds.appendLinked(self.data[i-self.INPUT_SIZE:i],self.data[i])
     
     trainer = BackpropTrainer(self.net, trainds, learningrate=self.eta, weightdecay=self.lmda, momentum=0.1, shuffle=False)
     trainer.trainEpochs(self.epochs)
                 
     trainer = None
Ejemplo n.º 31
0
    def train_many_pca_reductions(self,
                                  dataset,
                                  portion=1.00,
                                  iters=20,
                                  pca_reductions=[num for num in xrange(3, 7)],
                                  outlier_cutoff=0):
        if portion >= 1:
            training_data = copy.deepcopy(dataset.trn_data)
            test_data = copy.deepcopy(dataset.tst_data)
        if portion < 1:
            dataset.get_portion(portion)
            training_data = copy.deepcopy(dataset.portion["training"])
            test_data = copy.deepcopy(dataset.portion["test"])
        entro = dataset.entro

        if outlier_cutoff > 0.0:
            low_bound = stats.scoreatpercentile(training_data['target'],
                                                outlier_cutoff)
            up_bound = stats.scoreatpercentile(training_data['target'],
                                               100 - outlier_cutoff)
            training_data = self.keep_data_within_bounds(
                training_data, low_bound, up_bound)

        self.iters = iters
        self.sample_size = dataset.tot_size

        self.create_net(in_size=self.in_dim,
                        hidden_size=self.hidden_dim,
                        out_size=self.out_dim,
                        override=True)
        neural_trainer = BackpropTrainer(self.neural_net,
                                         dataset=training_data,
                                         momentum=0.1,
                                         verbose=True,
                                         weightdecay=0.01)

        for reduction in pca_reductions:
            dataset.create_pca_data(pca_dimension_target=reduction)
            pca_training_data = dataset.pca_training_data

            self.create_pca_net(reduction, override=True)
            pca_trainer = BackpropTrainer(self.pca_net,
                                          dataset=pca_training_data,
                                          momentum=0.1,
                                          verbose=True,
                                          weightdecay=0.01)

            ###################################
            if reduction == 10:
                for i in xrange(len(pca_training_data['input'])):
                    print "____________________________________________"
                    print training_data['input'][i]
                    print pca_training_data['input'][i]
                    print "||||||||||||||||||||||||||||||||||||||||||||"
                    print training_data['target'][i]
                    print pca_training_data['target'][i]
                    ########## USE RMSE AND SEE WHAT HAPPENS
            ###################################

            trn_pca_pair_errors = []
            tst_pca_pair_errors = []

            for i in range(iters):
                print "Reduction: " + str(
                    reduction) + " Dimensions of Data, Iteration " + str(i)

                # old_stdout   = sys.stdout            ### CAPTURE
                # capturer     = StringIO.StringIO()   ### CAPTURE
                # sys.stdout   = capturer              ### CAPTURE

                #print "-------------------------"
                neural_trainer.trainEpochs(1)
                #print "---"
                pca_trainer.trainEpochs(1)

                # sys.stdout   = old_stdout            ### CAPTURE
                # output       = capturer.getvalue()   ### CAPTURE
                # trn_err_pair = self.process_output_error_pair(output)

                trn_err_pair = []
                trn_err_pair.append(
                    self.nrmsd_evaluation(training_data, "full"))
                trn_err_pair.append(
                    self.nrmsd_evaluation(pca_training_data, "pca"))

                trn_pca_pair_errors.append(tuple(trn_err_pair))

            self.trn_error_pairs["pca"].append(trn_pca_pair_errors)
        self.generate_pca_error_comparison(pca_reductions)
Ejemplo n.º 32
0
def execute_mlp(n_neurons, data_size, learn_rate, momentum_rate, f):

    dic = {'Iris-setosa\n': 0, 'Iris-versicolor\n': 1, 'Iris-virginica\n': 2}

    filename = "iris.txt"
    file = read_file(filename)
    file = change_class_name(file, dic)
    file = str_to_number(file)
    file_array = np.array(file)
    data = normalize_data(file_array)

    #data = order_data(data)

    data = data[::-1]  #INTERTENDO A ORDEM DOS ITENS

    inputs = data[:, :-1]  #COPIAR TODAS AS COLUNAS MENOS A ULTIMA
    targets = data[:, -1]  #COPIAR ULTIMA COLUNA

    train_data_temp, test_data_temp = train_test_data(data, data_size)

    train_data = ClassificationDataSet(
        4, nb_classes=3)  #TAMANHO DA ENTRADA, NUMERO DE CLASSES
    test_data = ClassificationDataSet(
        4, nb_classes=3)  #TAMANHO DA ENTRADA, NUMERO DE CLASSES

    cont = 0
    for n in range(0, len(train_data_temp)):
        train_data.addSample(train_data_temp[n][:-1], [train_data_temp[n][-1]])
        #print(train_data.getSample(cont))
        #cont = cont + 1

    for n in range(0, len(test_data_temp)):
        test_data.addSample(test_data_temp[n][:-1], [test_data_temp[n][-1]])

    train_data._convertToOneOfMany()
    test_data._convertToOneOfMany()
    '''
    print ("Number of training patterns: ", len(train_data))
    print ("Input and output dimensions: ", train_data.indim, train_data.outdim)
    print ("First sample (input, target, class):")
    print (test_data['input'][0], test_data['target'][0], test_data['class'][0])
    '''

    network = FeedForwardNetwork()

    inLayer = SigmoidLayer(train_data.indim)
    first_hiddenLayer = SigmoidLayer(n_neurons)
    second_hiddenLayer = SigmoidLayer(n_neurons)
    outLayer = SigmoidLayer(train_data.outdim)

    network.addInputModule(inLayer)
    network.addModule(first_hiddenLayer)
    network.addModule(second_hiddenLayer)
    network.addOutputModule(outLayer)

    in_to_hidden = FullConnection(inLayer, first_hiddenLayer)
    hidden_to_hidden = FullConnection(first_hiddenLayer, second_hiddenLayer)
    hidden_to_out = FullConnection(second_hiddenLayer, outLayer)

    network.addConnection(in_to_hidden)
    network.addConnection(hidden_to_hidden)
    network.addConnection(hidden_to_out)

    network.sortModules()
    #trainer = BackpropTrainer( network, dataset=train_data, momentum=momentum_rate, verbose=False, weightdecay=learn_rate)

    trainer = BackpropTrainer(network, dataset=train_data, verbose=False)

    for i in range(1):
        trainer.trainEpochs(1000)

    result = trainer.testOnClassData(test_data, return_targets=True)
    #result = classification(result[1],result[0])
    print(result)
    f.write(str(result))
    f.flush()
Ejemplo n.º 33
0
def perceptron(hidden_neurons=5, weightdecay=0.01, momentum=0.1):
    INPUT_FEATURES = 2
    CLASSES = 3
    HIDDEN_NEURONS = hidden_neurons
    WEIGHTDECAY = weightdecay
    MOMENTUM = momentum

    # Generate the labeled set
    g = generate_data()
    #g = generate_data2()
    alldata = g['d']
    minX, maxX, minY, maxY = g['minX'], g['maxX'], g['minY'], g['maxY']

    # Split data into test and training dataset
    tstdata, trndata = alldata.splitWithProportion(0.25)

    trndata._convertToOneOfMany()  # This is necessary, but I don't know why
    tstdata._convertToOneOfMany()  # http://stackoverflow.com/q/8154674/562769

    print("Number of training patterns: %i" % len(trndata))
    print("Input and output dimensions: %i, %i" %
          (trndata.indim, trndata.outdim))
    print("Hidden neurons: %i" % HIDDEN_NEURONS)
    print("First sample (input, target, class):")
    print(trndata['input'][0], trndata['target'][0], trndata['class'][0])

    fnn = buildNetwork(trndata.indim,
                       HIDDEN_NEURONS,
                       trndata.outdim,
                       outclass=SoftmaxLayer)

    trainer = BackpropTrainer(fnn,
                              dataset=trndata,
                              momentum=MOMENTUM,
                              verbose=True,
                              weightdecay=WEIGHTDECAY)

    # Visualization
    ticksX = arange(minX - 1, maxX + 1, 0.2)
    ticksY = arange(minY - 1, maxY + 1, 0.2)
    X, Y = meshgrid(ticksX, ticksY)

    # need column vectors in dataset, not arrays
    griddata = ClassificationDataSet(INPUT_FEATURES, 1, nb_classes=CLASSES)
    for i in range(X.size):
        griddata.addSample([X.ravel()[i], Y.ravel()[i]], [0])

    for i in range(20):
        trainer.trainEpochs(1)
        trnresult = percentError(trainer.testOnClassData(), trndata['class'])
        tstresult = percentError(trainer.testOnClassData(dataset=tstdata),
                                 tstdata['class'])

        print("epoch: %4d" % trainer.totalepochs,
              "  train error: %5.2f%%" % trnresult,
              "  test error: %5.2f%%" % tstresult)
        out = fnn.activateOnDataset(griddata)
        # the highest output activation gives the class
        out = out.argmax(axis=1)
        out = out.reshape(X.shape)

        figure(1)  # always print on the same canvas
        ioff()  # interactive graphics off
        clf()  # clear the plot
        for c in [0, 1, 2]:
            here, _ = where(tstdata['class'] == c)
            plot(tstdata['input'][here, 0], tstdata['input'][here, 1], 'o')
        if out.max() != out.min():  # safety check against flat field
            contourf(X, Y, out)  # plot the contour
        ion()  # interactive graphics on
        draw()  # update the plot

    ioff()
    show()
                   2,
                   PyBDataTrain_nn.outdim,
                   bias=True,
                   outclass=SoftmaxLayer)
trainer = BackpropTrainer(fnn,
                          dataset=PyBDataTrain_nn,
                          momentum=0.1,
                          verbose=True,
                          weightdecay=0.01)

epochs = 6
trnerr = []
tsterr = []
for i in xrange(epochs):
    # If you set the 'verbose' trainer flag, this will print the total error as it goes.
    trainer.trainEpochs(3)
    trnresult = percentError(trainer.testOnClassData(),
                             PyBDataTrain_nn['class'])
    tstresult = percentError(trainer.testOnClassData(dataset=PyBDataTest_nn),
                             PyBDataTest_nn['class'])
    print("epoch: %4d" % trainer.totalepochs,
          " train error: %5.2f%%" % trnresult,
          " test error: %5.2f%%" % tstresult)
    trnerr.append(trnresult)
    tsterr.append(tstresult)

fig_nn = plt.figure()
ax = fig_nn.add_subplot(1, 1, 1)
ax.set_title("Neural Network Convergence")
ax.set_xlabel('Epoch')
ax.set_ylabel('Error')
Ejemplo n.º 35
0
边的权重经过训练得到)。代码如下:

"""
from pybrain.tools.shortcuts import buildNetwork
net = buildNetwork(X.shape[1], 100, y.shape[1], bias=True)

################################反向传播算法训练神经网络########################################
#反向传播算法(back propagation, backprop)的工作机制为对预测错误的神经元施以惩罚。
#从输出层开始,向上层层查找预测错误的神经元,微调这些神经元输入值的权重,以达到修复输出错误的目的。

#PyBrain提供了backprop算法的一种实现,在神经网络上调用trainer类即可
from pybrain.supervised.trainers import BackpropTrainer
trainer = BackpropTrainer(net, training, learningrate=0.01, weightdecay=0.01)

#运行代码固定的步数(epoch)我们这里训练时,使用了20步
trainer.trainEpochs(epochs=20)

predictions = trainer.testOnClassData(dataset=testing)
#得到预测值后,可以用scikit-learn计算F1值。
from sklearn.metrics import f1_score
print("F-score: {0:.2f}".format(
    f1_score(predictions, y_test.argmax(axis=1), average='micro')))

from sklearn.metrics import classification_report
print(classification_report(y_test.argmax(axis=1), predictions))

##################################预测单词#####################################################
#定义一个函数,接收验证码,用神经网络进行训练,返回单词预测结果


def predict_captcha(captcha_image, neural_network):
Ejemplo n.º 36
0
class NeuralNetwork:
    def __init__(self,
                 data,
                 learning_rate=0.1,
                 momentum=0.1,
                 n_hidden_units=5):
        self.features = data['features']
        self.weights = data['weights']
        labels = data['labels']
        self.labels = np.array([1 if l == 's' else 0 for l in labels])
        self.learning_rate = learning_rate
        self.momentum = momentum
        self.n_hidden_units = n_hidden_units
        self._prepare_data()
        self._build_network(n_hidden_units)

    def _prepare_data(self):
        self.dataset = split_dataset(self.features, self.weights, self.labels)
        classes = set(self.labels)

        def training_set():
            ds = ClassificationDataSet(
                self.dataset['training']['features'].shape[1],
                1,
                nb_classes=len(classes))
            for i in range(self.dataset['training']['features'].shape[0]):
                ds.addSample(self.dataset['training']['features'][i],
                             self.dataset['training']['labels'][i])
            return ds

        def test_set():
            ds = ClassificationDataSet(self.features.shape[1],
                                       1,
                                       nb_classes=len(classes))
            for i in range(self.dataset['test']['features'].shape[0]):
                ds.addSample(self.dataset['test']['features'][i],
                             self.dataset['test']['labels'][i])
            return ds

        self.trndata = training_set()
        self.tstdata = test_set()
        self.tstdata._convertToOneOfMany()
        self.trndata._convertToOneOfMany()

    def _build_network(self, n_hidden_units):
        self.fnn = buildNetwork(self.trndata.indim,
                                n_hidden_units,
                                self.trndata.outdim,
                                outclass=SoftmaxLayer)

    def _create_trainer(self, learning_rate, momentum):
        self.trainer = BackpropTrainer(self.fnn,
                                       dataset=self.trndata,
                                       momentum=momentum,
                                       verbose=False,
                                       weightdecay=0.01,
                                       learningrate=learning_rate)

    def train(self, train_epoch=5):
        self._create_trainer(self.learning_rate, self.momentum)
        self.trainer.trainEpochs(train_epoch)

    def learn_weights(self, max_evaluations, algoritm):
        alg = algoritm(self.trndata.evaluateModuleMSE,
                       self.fnn,
                       verbose=False,
                       minimize=True,
                       maxEvaluations=max_evaluations)
        for i in range(max_evaluations):
            self.fnn = alg.learn(0)[0]

    def predict(self, dataset=None):
        if dataset is None:
            dataset = self.tstdata
        out = self.fnn.activateOnDataset(dataset)
        out = out.argmax(axis=1)
        return out

    def estimate_error(self):
        trnerror = percentError(
            self.trainer.testOnClassData(dataset=self.trndata),
            self.trndata['class'])
        tsterror = percentError(
            self.trainer.testOnClassData(dataset=self.tstdata),
            self.tstdata['class'])
        return self.trainer.totalepochs, trnerror, tsterror

    def train_accuracy(self):
        return accuracy_score(y_pred=self.predict(self.trndata),
                              y_true=self.trndata['class'])

    def test_accuracy(self):
        return accuracy_score(y_pred=self.predict(self.tstdata),
                              y_true=self.tstdata['class'])
        Normalize(metadata['maxmins'][attr], game[attr][1])
        for attr in attributelist
    ])
    outputs = [game['points'][0] - game['points'][1]]
    alldata.addSample(inputs, outputs)

testdata, traindata = alldata.splitWithProportion(0.70)

print "IMPORTANT ", traindata.outdim
print "Number of training patterns: ", len(traindata)
trainer = BackpropTrainer(n,
                          dataset=traindata,
                          momentum=0.1,
                          verbose=True,
                          weightdecay=0.01)
trainer.trainEpochs(200)

totalcount = 0
rightcount = 0
sumerrors = 0.0
for data in testdata:
    inputvalues = []
    for attr in attributelist:
        inputvalues.append(Normalize(metadata['maxmins'][attr], game[attr][0]))
        inputvalues.append(Normalize(metadata['maxmins'][attr], game[attr][1]))

    expectedOutput = n.activate(data[0])
    print expectedOutput[0]

    sumerrors += abs((game['points'][0] - game['points'][1]) -
                     expectedOutput[0])
Ejemplo n.º 38
0


for tuple in data_training_raw:
    tuple_data = []
    output_tmp1 = []
    for i in range(6, 21):
        if i == magic1[0] or i == magic1[1] or i == magic1[2] or i == magic1[3] or i == magic1[4] or i == magic1[5] or i == magic1[6]:
            output_tmp1.append(float(tuple[i]))
        else:
            tuple_data.append(float(tuple[i]))
    ds1.addSample(tuple_data, output_tmp1)

net_1 = buildNetwork(8, 10, 7, bias=True)
trainer_1 = BackpropTrainer(net_1, ds1,learningrate=0.01)
error_1 = trainer_1.trainEpochs(300)


iterOut = -1
for line in data_fit:
    iterOut += 1
    input_1_example = []
    output_1_example = []
    nullAttr = []
    nullNum = 0
    #get the null num:
    iterIn = 0
    for value in line:
        if value == 'NULL' or value == 'PrivacySuppressed':
            nullAttr.append(iterIn)
            nullNum+=1
Ejemplo n.º 39
0
EPOCHS = 20

trainer = BackpropTrainer(net,
                          dataset=train_data,
                          momentum=0.3,
                          learningrate=0.01,
                          verbose=False)

trainResultArr = []
epochs = []
testResultArr = []

for i in range(EPOCHS):
    # set the epochs
    trainer.trainEpochs(1)

    outputTrain = net.activateOnDataset(train_data)
    outputTrain = outputTrain.argmax(axis=1)
    trainResult = percentError(outputTrain, real_train)

    outputTest = net.activateOnDataset(test_data)
    outputTest = outputTest.argmax(axis=1)
    testResult = percentError(outputTest, real_test)

    finalTrainResult = 100 - trainResult
    finalTestResult = 100 - testResult

    print "Epoch: " + str(i + 1) + "\tTraining set accuracy: " + str(
        finalTrainResult) + "\tTest set accuracy: " + str(finalTestResult)
    #getStatistics(	)
class PHC_NN(PHC_FA):
    '''PHC with neural function approximation. '''
    delta = 0.1
    maxNumberofAverage = 30
    weightdecay = 0.001
    trainingEpochPerUpdateWight = 2

    def __init__(self, num_features, num_actions, indexOfAgent=None):
        PHC_FA.__init__(self, num_features, num_actions, indexOfAgent)
        self.linQ = buildNetwork(num_features + num_actions,
                                 (num_features + num_actions),
                                 1,
                                 hiddenclass=SigmoidLayer,
                                 outclass=LinearLayer)
        self.linPolicy = buildNetwork(num_features,
                                      (num_features + num_actions),
                                      num_actions,
                                      hiddenclass=SigmoidLayer,
                                      outclass=SigmoidLayer)
        self.trainer4LinQ = BackpropTrainer(self.linQ,
                                            weightdecay=self.weightdecay)
        self.trainer4LinPolicy = BackpropTrainer(self.linPolicy,
                                                 weightdecay=self.weightdecay)

    def _pi(self, state):
        """Given state, compute probabilities for each action."""
        values = np.array(self.linPolicy.activate(r_[state]))
        z = np.sum(values)
        return (values / z).flatten()

    def _qValues(self, state):
        """ Return vector of q-values for all actions, 
        given the state(-features). """
        values = np.array([
            self.linQ.activate(r_[state, one_to_n(i, self.num_actions)])
            for i in range(self.num_actions)
        ])
        return values.flatten()

    def _updateWeights(self, state, action, reward, next_state):
        """ state and next_state are vectors, action is an integer. """
        #update Q-value function approximator
        target = reward + self.rewardDiscount * max(self._qValues(next_state))
        inp = r_[asarray(state), one_to_n(action, self.num_actions)]
        self.trainer4LinQ = BackpropTrainer(self.linQ,
                                            weightdecay=self.weightdecay)
        ds = SupervisedDataSet(self.num_features + self.num_actions, 1)
        ds.addSample(inp, target)
        self.trainer4LinQ.trainOnDataset(ds)
        #Update policy
        bestAction = r_argmax(self._qValues(state))
        target = one_to_n(bestAction, self.num_actions)
        inp = r_[asarray(state)]
        ds = SupervisedDataSet(self.num_features, self.num_actions)
        ds.addSample(inp, target)
        self.trainer4LinPolicy = BackpropTrainer(self.linPolicy,
                                                 learningrate=self.delta,
                                                 weightdecay=self.weightdecay)
        self.trainer4LinPolicy.setData(ds)
        self.trainer4LinPolicy.trainEpochs(
            epochs=self.trainingEpochPerUpdateWight)
class PHC_WoLF_NN(PHC_FA):
    '''PHC_WoLF with neural function '''
    deltaW = 0.05
    deltaL = 0.2
    maxNumberofAverage = 30
    weightdecay = 0.001
    trainingEpochPerUpdateWight = 1

    def __init__(self, num_features, num_actions, indexOfAgent=None):
        PHC_FA.__init__(self, num_features, num_actions, indexOfAgent)
        self.linQ = buildNetwork(num_features + num_actions,
                                 (num_features + num_actions),
                                 1,
                                 hiddenclass=SigmoidLayer,
                                 outclass=LinearLayer)
        self.linPolicy = buildNetwork(num_features,
                                      (num_features + num_actions),
                                      num_actions,
                                      hiddenclass=SigmoidLayer,
                                      outclass=SigmoidLayer)
        self.averagePolicy = []
        self.trainer4LinQ = BackpropTrainer(self.linQ,
                                            weightdecay=self.weightdecay)
        self.trainer4LinPolicy = BackpropTrainer(self.linPolicy,
                                                 weightdecay=self.weightdecay)

    def _pi(self, state):
        """Given state, compute softmax probability for each action."""
        values = np.array(self.linPolicy.activate(r_[state]))
        z = np.sum(values)
        return (values / z).flatten()

    def _qValues(self, state):
        """ Return vector of q-values for all actions, 
        given the state(-features). """
        values = np.array([
            self.linQ.activate(r_[state, one_to_n(i, self.num_actions)])
            for i in range(self.num_actions)
        ])
        return values.flatten()

    def _piAvr(self, state):
        pi = np.zeros(self.num_actions)
        for elem in self.averagePolicy:
            values = np.array(elem.activate(r_[state]))
            pi = np.add(pi.flatten(), values.flatten())
        z = np.sum(pi)
        pi = pi / z
        return pi.flatten()

    def _updateWeights(self, state, action, reward, next_state):
        """ state and next_state are vectors, action is an integer. """
        #update Q-value function approximator
        target = reward + self.rewardDiscount * max(self._qValues(next_state))
        inp = r_[asarray(state), one_to_n(action, self.num_actions)]
        self.trainer4LinQ = BackpropTrainer(self.linQ,
                                            weightdecay=self.weightdecay)
        ds = SupervisedDataSet(self.num_features + self.num_actions, 1)
        ds.addSample(inp, target)
        self.trainer4LinQ.trainOnDataset(ds)

        #update estimate of average policy
        self.averagePolicy.append(copy.deepcopy(self.linPolicy))
        if len(self.averagePolicy) > self.maxNumberofAverage:
            self.averagePolicy.pop(np.random.randint(len(self.averagePolicy)))

        #update policy function approximator
        delta = None
        cumRewardOfCurrentPolicy = 0.0
        values = self._qValues(state)
        pi = self._pi(state)
        for elem_action in range(self.num_actions):
            cumRewardOfCurrentPolicy = pi[elem_action] * values[elem_action]
        cumRewardOfAveragePolicy = 0.0
        api = self._piAvr(state)
        for elem_action in range(self.num_actions):
            cumRewardOfAveragePolicy = api[elem_action] * values[elem_action]
        if cumRewardOfCurrentPolicy > cumRewardOfAveragePolicy:
            delta = self.deltaW
        else:
            delta = self.deltaL

        #Update policy
        bestAction = r_argmax(self._qValues(state))
        target = one_to_n(bestAction, self.num_actions)
        inp = r_[asarray(state)]
        ds = SupervisedDataSet(self.num_features, self.num_actions)
        ds.addSample(inp, target)
        self.trainer4LinPolicy = BackpropTrainer(self.linPolicy,
                                                 learningrate=(delta),
                                                 weightdecay=self.weightdecay)
        self.trainer4LinPolicy.setData(ds)
        self.trainer4LinPolicy.trainEpochs(
            epochs=self.trainingEpochPerUpdateWight)
Ejemplo n.º 42
0
#coding=utf-8

from pybrain.tools.shortcuts import buildNetwork
from pybrain.datasets import SupervisedDataSet
from pybrain.supervised.trainers import BackpropTrainer
import random
# create nerual net work  , with one input layer , one hidden layer , one outputlayer
net = buildNetwork(2, 2, 1, bias=True)
ds = SupervisedDataSet(2, 1)
data = []
data.append([(0, 0), (0, )])
data.append([(0, 1), (1, )])
data.append([(1, 1), (0, )])
data.append([(1, 0), (1, )])
data.append([(0, 0), (0, )])
for i in xrange(10000):
    index = random.randint(0, 4)
    ds.addSample(data[index][0], data[index][1])
trainer = BackpropTrainer(net, ds)
trainer.trainEpochs()
print net.activate((0, 0))
print net.activate((1, 0))
Ejemplo n.º 43
0
    def _train(self):
        hidden_layers = []
        bias_layers = []
        compressed_data = copy.copy(
            self.unsupervised
        )  # it isn't compressed at this point, but will be later on
        compressed_supervised = self.supervised

        mid_layers = self.layers[1:-1]  # remove the first and last
        for i, current in enumerate(mid_layers):
            prior = self.layers[
                i]  # This accesses the layer before the "current" one, since the indexing in mid_layers and self.layers is offset by 1

            # build the NN with a bottleneck
            bottleneck = FeedForwardNetwork()
            in_layer = LinearLayer(prior)
            hidden_layer = self.hidden_layer(current)
            out_layer = self.hidden_layer(prior)
            bottleneck.addInputModule(in_layer)
            bottleneck.addModule(hidden_layer)
            bottleneck.addOutputModule(out_layer)
            in_to_hidden = FullConnection(in_layer, hidden_layer)
            hidden_to_out = FullConnection(hidden_layer, out_layer)
            bottleneck.addConnection(in_to_hidden)
            bottleneck.addConnection(hidden_to_out)
            if self.bias:
                bias1 = BiasUnit()
                bias2 = BiasUnit()
                bottleneck.addModule(bias1)
                bottleneck.addModule(bias2)
                bias_in = FullConnection(bias1, hidden_layer)
                bias_hidden = FullConnection(bias2, out_layer)
                bottleneck.addConnection(bias_in)
                bottleneck.addConnection(bias_hidden)
            bottleneck.sortModules()

            # train the bottleneck
            print "\n...training for layer ", prior, " to ", current
            ds = SupervisedDataSet(prior, prior)
            if self.dropout_on:
                noisy_data, originals = self.dropout(compressed_data,
                                                     noise=0.2,
                                                     bag=1)
                for i, n in enumerate(noisy_data):
                    original = originals[i]
                    ds.addSample(n, original)
            else:
                for d in (compressed_data):
                    ds.addSample(d, d)
            trainer = BackpropTrainer(bottleneck,
                                      dataset=ds,
                                      learningrate=0.001,
                                      momentum=0.05,
                                      verbose=self.verbose,
                                      weightdecay=0.05)
            trainer.trainEpochs(self.compression_epochs)
            if self.verbose:
                print "...data:\n...", compressed_data[
                    0][:8], "\nreconstructed to:\n...", bottleneck.activate(
                        compressed_data[0])[:8]

            hidden_layers.append(in_to_hidden)
            if self.bias: bias_layers.append(bias_in)

            # use the params from the bottleneck to compress the training data
            compressor = FeedForwardNetwork()
            compressor.addInputModule(in_layer)
            compressor.addOutputModule(
                hidden_layer)  # use the hidden layer from above
            compressor.addConnection(in_to_hidden)
            compressor.sortModules()
            compressed_data = [compressor.activate(d) for d in compressed_data]
            compressed_supervised = [
                compressor.activate(d) for d in compressed_supervised
            ]

            self.nn.append(compressor)

        # Train the softmax layer
        print "\n...training for softmax layer "
        softmax = FeedForwardNetwork()
        in_layer = LinearLayer(self.layers[-2])
        out_layer = self.final_layer(self.layers[-1])
        softmax.addInputModule(in_layer)
        softmax.addOutputModule(out_layer)
        in_to_out = FullConnection(in_layer, out_layer)
        softmax.addConnection(in_to_out)
        if self.bias:
            bias = BiasUnit()
            softmax.addModule(bias)
            bias_in = FullConnection(bias, out_layer)
            softmax.addConnection(bias_in)
        softmax.sortModules()

        # see if it's for classification or regression
        if self.final_layer == SoftmaxLayer:
            print "...training for a softmax network"
            ds = ClassificationDataSet(self.layers[-2], 1)
        else:
            print "...training for a regression network"
            ds = SupervisedDataSet(self.layers[-2], self.layers[-1])
        bag = 1
        noisy_data, _ = self.dropout(compressed_supervised, noise=0.5, bag=bag)
        bagged_targets = []
        for t in self.targets:
            for b in range(bag):
                bagged_targets.append(t)
        for i, d in enumerate(noisy_data):
            target = bagged_targets[i]
            ds.addSample(d, target)

        # see if it's for classification or regression
        if self.final_layer == SoftmaxLayer:
            ds._convertToOneOfMany()

        # TODO make these configurable
        trainer = BackpropTrainer(softmax,
                                  dataset=ds,
                                  learningrate=0.001,
                                  momentum=0.05,
                                  verbose=self.verbose,
                                  weightdecay=0.05)
        trainer.trainEpochs(self.compression_epochs)
        self.nn.append(softmax)
        hidden_layers.append(in_to_out)
        if self.bias: bias_layers.append(bias_in)

        # Recreate the whole thing
        # connect the first two
        autoencoder = FeedForwardNetwork()
        first_layer = hidden_layers[0].inmod
        next_layer = hidden_layers[0].outmod
        autoencoder.addInputModule(first_layer)
        connection = FullConnection(first_layer, next_layer)
        connection.params[:] = hidden_layers[0].params
        autoencoder.addConnection(connection)

        # decide whether this should be the output layer or not
        if self.autoencoding_only and (
                len(self.layers) <= 3
        ):  # TODO change this to 2 when you aren't using the softmax above
            autoencoder.addOutputModule(next_layer)
        else:
            autoencoder.addModule(next_layer)
        if self.bias:
            bias = bias_layers[0]
            bias_unit = bias.inmod
            autoencoder.addModule(bias_unit)
            connection = FullConnection(bias_unit, next_layer)
            connection.params[:] = bias.params
            autoencoder.addConnection(connection)

        # connect the middle layers
        for i, h in enumerate(hidden_layers[1:-1]):
            new_next_layer = h.outmod

            # decide whether this should be the output layer or not
            if self.autoencoding_only and i == (len(hidden_layers) - 3):
                autoencoder.addOutputModule(new_next_layer)
            else:
                autoencoder.addModule(new_next_layer)
            connection = FullConnection(next_layer, new_next_layer)
            connection.params[:] = h.params
            autoencoder.addConnection(connection)
            next_layer = new_next_layer

            if self.bias:
                bias = bias_layers[i + 1]
                bias_unit = bias.inmod
                autoencoder.addModule(bias_unit)
                connection = FullConnection(bias_unit, next_layer)
                connection.params[:] = bias.params
                autoencoder.addConnection(connection)

        return autoencoder, hidden_layers, next_layer, bias_layers
Ejemplo n.º 44
0
    def train_many_k_means_reductions(
            self,
            dataset,
            portion=1.00,
            iters=20,
            k_means_reductions=[float(num) / 10 for num in xrange(1, 10)],
            outlier_cutoff=0):
        if portion >= 1:
            training_data = copy.deepcopy(dataset.trn_data)
            test_data = copy.deepcopy(dataset.tst_data)
        if portion < 1:
            dataset.get_portion(portion)
            training_data = copy.deepcopy(dataset.portion["training"])
            test_data = copy.deepcopy(dataset.portion["test"])
        entro = dataset.entro

        if outlier_cutoff > 0.0:
            low_bound = stats.scoreatpercentile(training_data['target'],
                                                outlier_cutoff)
            up_bound = stats.scoreatpercentile(training_data['target'],
                                               100 - outlier_cutoff)
            training_data = self.keep_data_within_bounds(
                training_data, low_bound, up_bound)

        self.iters = iters
        self.sample_size = dataset.tot_size

        self.create_net(in_size=self.in_dim,
                        hidden_size=self.hidden_dim,
                        out_size=self.out_dim,
                        override=True)
        neural_trainer = BackpropTrainer(self.neural_net,
                                         dataset=training_data,
                                         momentum=0.1,
                                         verbose=True,
                                         weightdecay=0.01)

        for reduction in k_means_reductions:
            dataset.create_k_means_data(k_means_reduction=reduction)
            k_means_training_data = dataset.k_means_training_data

            self.create_k_means_net(override=True)
            k_means_trainer = BackpropTrainer(self.k_means_net,
                                              dataset=k_means_training_data,
                                              momentum=0.1,
                                              verbose=True,
                                              weightdecay=0.01)

            trn_k_means_pair_errors = []
            tst_k_means_pair_errors = []

            for i in range(iters):
                print "Reduction: " + str(
                    float(reduction) * 100) + "% of Data, Iteration " + str(i)

                # old_stdout   = sys.stdout            ### CAPTURE
                # capturer     = StringIO.StringIO()   ### CAPTURE
                # sys.stdout   = capturer              ### CAPTURE

                #print "-------------------------"
                neural_trainer.trainEpochs(1)
                #print "---"
                k_means_trainer.trainEpochs(1)

                # sys.stdout   = old_stdout            ### CAPTURE
                # output       = capturer.getvalue()   ### CAPTURE
                # trn_err_pair = self.process_output_error_pair(output)

                trn_err_pair = []
                # trn_err_pair.append(self.nrmsd_evaluation(training_data,"full"))
                # trn_err_pair.append(self.nrmsd_evaluation(k_means_training_data,"k-means"))

                trn_err_pair.append(self.nrmsd_evaluation(test_data, "full"))
                trn_err_pair.append(self.nrmsd_evaluation(
                    test_data, "k-means"))

                trn_k_means_pair_errors.append(tuple(trn_err_pair))

            self.trn_error_pairs["k-means"].append(trn_k_means_pair_errors)
        self.generate_k_means_error_comparison(k_means_reductions)
Ejemplo n.º 45
0
    20,  # number of hidden units
    3,
    bias=True,
    hiddenclass=SigmoidLayer,
    outclass=LinearLayer)

net4 = buildNetwork(
    5,
    15,  # number of hidden units
    15,  # number of hidden units
    3,
    bias=True,
    hiddenclass=SigmoidLayer,
    outclass=LinearLayer)
#initialize the structures
net2.randomize()
net2.sortModules()
net4.randomize()
net4.sortModules()
#create trainers
#train for set amount of epochs
#save networks to disc
trainer2 = BackpropTrainer(net2, ds2, verbose=True)
trainer2.trainEpochs(nEpochs)
NetworkWriter.writeToFile(net2, net_fold + 'network_Type2H1New.xml')
trainer4 = BackpropTrainer(net4, ds2, verbose=True)
trainer4.trainEpochs(nEpochs)
NetworkWriter.writeToFile(net4, net_fold + 'network_Type2H2New.xml')

print 'Work completed. Check out the networks have been saved'
Ejemplo n.º 46
0
# convert a supervised dataset to a classification dataset
def _convert_supervised_to_classification(supervised_dataset,classes):
    classification_dataset = ClassificationDataSet(supervised_dataset.indim,supervised_dataset.outdim,classes)
    
    for n in xrange(0, supervised_dataset.getLength()):
        classification_dataset.addSample(supervised_dataset.getSample(n)[0], supervised_dataset.getSample(n)[1])

    return classification_dataset

olivetti = datasets.fetch_olivetti_faces()
X, y = olivetti.data, olivetti.target
ds = ClassificationDataSet(4096, 1 , nb_classes=40)
for k in xrange(len(X)): 
	ds.addSample(np.ravel(X[k]),y[k])
tstdata, trndata = ds.splitWithProportion( 0.25 )
tstdata = _convert_supervised_to_classification(tstdata,40)
trndata = _convert_supervised_to_classification(trndata,40)
trndata._convertToOneOfMany( )
tstdata._convertToOneOfMany( )
#fnn = buildNetwork( trndata.indim, 64 , trndata.outdim, outclass=SoftmaxLayer )
if  os.path.isfile('oliv.xml'): 
 	fnn = NetworkReader.readFrom('oliv.xml')
 	trainer = BackpropTrainer( fnn, dataset=trndata, momentum=0.1, learningrate=0.01 , verbose=True, weightdecay=0.01)
 	trainer.trainEpochs (50)
else:
 	fnn = buildNetwork( trndata.indim, 64 , trndata.outdim, outclass=SoftmaxLayer )
 	trainer = BackpropTrainer( fnn, dataset=trndata, momentum=0.1, learningrate=0.01 , verbose=True, weightdecay=0.01)
 	trainer.trainEpochs (50)
NetworkWriter.writeToFile(fnn, 'oliv.xml')
print 'Percent Error on Test dataset: ' , percentError( trainer.testOnClassData (dataset=tstdata ), tstdata['class'] )
Ejemplo n.º 47
0
trainingset = collection.createAnnTrainingsets()
	

# Map trainingsets and test sets to PyBrain
DS = ClassificationDataSet(trainingset['input_dimension'], trainingset['output_dimension'])
for i in range(0, len(trainingset['input_arrays'])):
	DS.appendLinked(trainingset['input_arrays'][i] , trainingset['output_arrays'][i])


fnn = buildNetwork( DS.indim, 100, DS.outdim, outclass=SoftmaxLayer, fast=False )

trainer = BackpropTrainer(fnn, dataset=DS, momentum=0.01, verbose=True, weightdecay=0.0001)

#Train network and plot
if not args.l:
	errorlist = trainer.trainEpochs(args.e)

#Load network
if args.l:
        f = open (args.l, "r")
        fnn = pickle.load(f)

#Save network 
if args.s:
        with open("network.obj", "w") as f:
                pickle.dump(fnn, f)
# Plot heatmap
if args.pt:
        xcategories = list(collection.unique_categories)
        ytestsets = []
        zdata = []
Ejemplo n.º 48
0
    letter = random.choice(numbers)
    print(letter, end='')
    shear = random.choice(shears)
    image = create_captcha(letter, shear)
    segement = tf.resize(segement_image(image)[0], (20, 20))
    y = np.zeros(len(numbers))
    index = numbers.index(letter)
    y[index] = 1
    testing.addSample(segement.flatten(), y)
print('\n')

# 创建一个 输入层为400个神经元,隐藏测为100个,输出层为10个的神经网络结构,使用BP神经网络算法(反向传播)
net = buildNetwork(400, 5, len(numbers), bias=True)
trainer = BackpropTrainer(net, training, learningrate=0.01, weightdecay=0.01)
# 设置训练步数
trainer.trainEpochs(epochs=50)

# 保存模型
pickle.dump(trainer, open('number——tow_predictor.model', 'wb'), 0)

# 测试
predictions = trainer.testOnClassData(dataset=testing)
print("预测数字:")
for v in predictions:
    print(numbers[v], end='')
print()

# 读取图片,,经过反相处理,重定尺寸处理,模式转换为L
image = Image.open('1.png')
plt.imshow(image)
image = ImageOps.invert(image)
Ejemplo n.º 49
0
def neural_network(data_model, classes, runs):
    # Python brain
    from pybrain.structure import FullConnection, FeedForwardNetwork, LinearLayer, SigmoidLayer, SoftmaxLayer
    from pybrain.datasets import ClassificationDataSet
    from pybrain.utilities import percentError
    from pybrain.supervised.trainers import BackpropTrainer
    from pybrain.tools.xml.networkwriter import NetworkWriter
    from pybrain.tools.xml.networkreader import NetworkReader
    import csv

    # Build Network
    try:

        n = NetworkReader.readFrom('resources/net.xml')
        print 'Loading previous network'

    except:

        print 'Generating new network'
        # Create a new Network
        n = FeedForwardNetwork()

        # Define the input layer
        inLayer = LinearLayer(len(data_model[0][0]))

        # Define a hidden layer
        hiddenLayer = SigmoidLayer(10)
        hiddenLayer2 = SigmoidLayer(10)

        # Define the output layer
        outLayer = LinearLayer(classes)

        # Add layers to network n
        n.addInputModule(inLayer)
        n.addModule(hiddenLayer)
        n.addModule(hiddenLayer2)
        n.addOutputModule(outLayer)

        # Create layers
        in_to_hidden = FullConnection(inLayer, hiddenLayer)
        hidden_to_hidden2 = FullConnection(hiddenLayer, hiddenLayer2)
        hidden2_to_out = FullConnection(hiddenLayer2, outLayer)

        # Add connectors to network n
        n.addConnection(in_to_hidden)
        n.addConnection(hidden_to_hidden2)
        n.addConnection(hidden2_to_out)

        # Finish Network
        n.sortModules()

    # Other Stuff

    ds = ClassificationDataSet(len(data_model[0][0]), 1, nb_classes=classes)
    # di = ClassificationDataSet(2,1,0)
    for o in data_model:
        ds.addSample(o[0], o[1])
    testing_data, training_data = ds.splitWithProportion(0.3)

    training_data._convertToOneOfMany()
    testing_data._convertToOneOfMany()

    print "Number of training patterns: ", len(training_data)
    print "Input and output dimensions: ", training_data.indim, training_data.outdim
    print "First sample (input, target, class):"
    print training_data['input'][0], training_data['target'][0], training_data[
        'class'][0]

    trainer = BackpropTrainer(n, dataset=training_data)
    smart = []
    dumb = []

    with open("resources/minimum_error.csv", 'rb') as f:
        reader = csv.reader(f)
        for row in reader:
            smart.append(row)

    smart[0] = float(smart[0][0])
    print 'The minimum error from previous runs =', smart[0]

    for t in range(runs):
        trainer.trainEpochs(1)
        trnresult = percentError(trainer.testOnClassData(),
                                 training_data['class'])
        tstresult = percentError(trainer.testOnClassData(dataset=testing_data),
                                 testing_data['class'])
        print "epoch: %4d" % trainer.totalepochs, "  train error: %5.5f%%" % trnresult, " test error: %5.5f%%" % tstresult
        smart.append(tstresult)

        if tstresult <= min(smart):
            NetworkWriter.writeToFile(n, 'resources/net.xml')
            print 'Best!'
        else:
            dumb.append('1')
            print 'Worst!'

    minimum_error = []
    minimum_error.append(min(smart))

    with open("resources/minimum_error.csv", 'wb') as f:
        writer = csv.writer(f)
        writer.writerow(minimum_error)

    print 'Minimum error (current state)', min(smart)
    return n
Ejemplo n.º 50
0
class testLearnedWeights(unittest.TestCase):
    def setUp(self):
        # self.net = buildNetwork(4, 6, 3, bias=False, inclass=TanhLayer,
        # 	hiddenclass=TanhLayer, outclass=LinearLayer)
        # self.net.sortModules()
        self.net = createNN()
        self.trn_d, self.tst_d = pybrainData(0.01)
        self.trainer = BackpropTrainer(self.net,
                                       dataset=self.trn_d,
                                       learningrate=0.01,
                                       momentum=0.1,
                                       verbose=True,
                                       weightdecay=0.0)
        self.trainer.trainEpochs(1)

    def testBPHasLearned(self):
        trnresult = percentError(self.trainer.testOnClassData(),
                                 self.trn_d['class'])
        tstresult = percentError(
            self.trainer.testOnClassData(dataset=self.tst_d),
            self.tst_d['class'])
        print 'trn perc error', trnresult
        print 'tst perc error', tstresult

    def testBPWeightsOnMyNetwork(self):
        pyb_ws = self.net.params.copy()
        pop = createPop()
        for nn in pop:
            nn.wi = pyb_ws[:nn.wi.size].reshape(NN.nh, NN.ni).T
            nn.wo = pyb_ws[nn.wi.size:].reshape(NN.no, NN.nh).T
        pairPop(pop, verbose=20)

    def testWeightsAndActivationsEquivalent(self):
        pyb_ws = self.net.params
        nn = NN()
        nn.wi = pyb_ws[:nn.wi.size].reshape(NN.nh, NN.ni).T
        nn.wo = pyb_ws[nn.wi.size:].reshape(NN.no, NN.nh).T
        for i, x in enumerate(self.trn_d['input']):
            nn.activate(x)
            out = self.net.activate(x)
            npt.assert_array_equal(nn.ai, self.net['in'].outputbuffer[0])
            # self.assertItemsEqual(list(nn.ah), list(self.net['hidden0'].outputbuffer[0]))
            for j, pb_ah in enumerate(self.net['hidden0'].outputbuffer[0]):
                self.assertAlmostEqual(nn.ah[j], pb_ah)
            for k, pb_ao in enumerate(out):
                self.assertAlmostEqual(nn.ao[k], pb_ao)

    def testDataAssignedCorrectly(self):
        NN.pat = zip(self.trn_d['input'], self.trn_d['target'])
        pyb_ws = self.net.params.copy()
        nn = NN()
        nn.wi = pyb_ws[:nn.wi.size].reshape(NN.nh, NN.ni).T
        nn.wo = pyb_ws[nn.wi.size:].reshape(NN.no, NN.nh).T
        correct = 0
        wrong = 0
        all_aos = []
        for i, x in enumerate(self.trn_d['input']):
            nn.activate(x)
            out = self.net.activate(x)
            all_aos.append(nn.ao)
            if not (out - self.trn_d['target'][i]).any():
                correct += 1
            else:
                wrong += 1
        for i in range(len(array(NN.pat)[:, 0])):
            npt.assert_array_equal(self.trn_d['input'][i],
                                   array(NN.pat)[:, 0][i])
            npt.assert_array_equal(self.trn_d['input'][i],
                                   array(nn.pat)[:, 0][i])
            npt.assert_array_equal(self.trn_d['target'][i],
                                   array(NN.pat)[:, 1][i])
            npt.assert_array_equal(self.trn_d['target'][i],
                                   array(nn.pat)[:, 1][i])

    def testPercentErrorIsSame(self):
        NN.pat = zip(self.trn_d['input'], self.trn_d['target'])
        pyb_ws = self.net.params.copy()
        nn = NN()
        nn.wi = pyb_ws[:nn.wi.size].reshape(NN.nh, NN.ni).T
        nn.wo = pyb_ws[nn.wi.size:].reshape(NN.no, NN.nh).T
        correct = 0
        wrong = 0
        argmax_cor = 0
        argmax_wng = 0
        all_aos = []
        for i, x in enumerate(self.trn_d['input']):
            nn.activate(x)
            out = self.net.activate(x)
            # print 'ga bp trg', nn.ao, out, self.trn_d['target'][i], '++++' if not (out - self.trn_d['target'][i]).any() else '-'
            all_aos.append(nn.ao.copy())
            if not (out - self.trn_d['target'][i]).any():
                correct += 1
            else:
                wrong += 1
            if argmax(out) == argmax(self.trn_d['target'][i]):
                argmax_cor += 1
            else:
                argmax_wng += 1
        print 'actual', wrong, 'wrong', correct, 'correct', float(wrong) / (
            wrong + correct) * 100
        print 'using argmax', argmax_wng, 'wrong', argmax_cor, 'correct', float(
            argmax_wng) / (argmax_wng + argmax_cor) * 100
        argmax_perc_err = float(argmax_wng) / (argmax_wng + argmax_cor) * 100
        res = nn.sumErrors()
        nn_perc_err = 100 - res[1]
        pb_nn_perc_err = percentError(self.trainer.testOnClassData(),
                                      self.trn_d['class'])
        self.assertAlmostEqual(nn_perc_err, pb_nn_perc_err)
        self.assertAlmostEqual(nn_perc_err, pb_nn_perc_err, argmax_perc_err)
Ejemplo n.º 51
0
ds._convertToOneOfMany()
ts._convertToOneOfMany()

n = buildNetwork(ds.indim,
                 14,
                 14,
                 ds.outdim,
                 recurrent=True,
                 outclass=SoftmaxLayer)
t = BackpropTrainer(n,
                    dataset=ds,
                    learningrate=0.01,
                    momentum=0.5,
                    verbose=True)
t.trainEpochs(5)
#t.trainUntilConvergence(dataset=ds, maxEpochs=10000, verbose=True)
trnresult = percentError(t.testOnClassData(), ds['class'])
testresult = percentError(t.testOnClassData(), ts['class'])
print "epoch: %4d" % t.totalepochs, "  train error: %5.2f%%" % trnresult, "  test error: %5.2f%%" % testresult

#guesses = []
#
#def one_or_zero(array):
#    return array[1]>.5
#
#for line in test_final:
#    guesses.append(one_or_zero(n.activate(line)))
#    final_guess = pd.DataFrame(guesses,columns=['requester_received_pizza'],dtype=int).join(test_ids)
#    final_guess.set_index('request_id', inplace=True)
#    final_guess.to_csv('/desktop/submission.csv')
Ejemplo n.º 52
0
for tuple in data_training_raw:
    tuple_data = []
    output_tmp17_19 = []
    for i in range(6, 21):
        if i == 17 or i == 19: #here
                      #here
            output_tmp17_19.append(float(tuple[i]))
        else:
            tuple_data.append(float(tuple[i]))
    #here                                   #here
    ds17_19.addSample(tuple_data, output_tmp17_19)


net9_20 = buildNetwork(13, 10, 2, bias=True)
trainer9_20 = BackpropTrainer(net9_20, ds9_20,learningrate=0.01)
error9_20 = trainer9_20.trainEpochs(300)

net13_15 = buildNetwork(13, 10, 2, bias=True)
trainer13_15 = BackpropTrainer(net13_15, ds13_15,learningrate=0.01)
error13_15 = trainer13_15.trainEpochs(300)

net9_16 = buildNetwork(13, 10, 2, bias=True)
trainer9_16 = BackpropTrainer(net9_16, ds9_16,learningrate=0.01)
error9_16 = trainer9_16.trainEpochs(300)

net14_18 = buildNetwork(13, 10, 2, bias=True)
trainer14_18 = BackpropTrainer(net14_18, ds14_18,learningrate=0.01)
error14_18 = trainer14_18.trainEpochs(300)

net19_20 = buildNetwork(13, 10, 2, bias=True)
trainer19_20 = BackpropTrainer(net19_20, ds19_20,learningrate=0.01)
Ejemplo n.º 53
0
    def generate_k_means_baseline_comparisons(self,
                                              dataset,
                                              k_reduction,
                                              iters=20):
        test_data = copy.deepcopy(dataset.tst_data)
        train_size = len(dataset.trn_data['target'])
        sub_portion = int(k_reduction * train_size)
        iterations = [it + 1 for it in range(iters)]

        training_data = copy.deepcopy(dataset.trn_data)
        self.create_net(in_size=self.in_dim,
                        hidden_size=self.hidden_dim,
                        out_size=self.out_dim,
                        override=True)
        full_neural_trainer = BackpropTrainer(self.neural_net,
                                              dataset=training_data,
                                              momentum=0.1,
                                              verbose=True,
                                              weightdecay=0.01)

        full_data_error = [[], []]
        for i in range(iters):
            full_neural_trainer.trainEpochs(1)
            full_data_error[0].append(
                self.nrmsd_evaluation(training_data, "full"))
            full_data_error[1].append(self.nrmsd_evaluation(test_data, "full"))

        dataset.create_k_means_data(k_means_reduction=k_reduction)
        k_means_training_data = dataset.k_means_training_data
        self.create_k_means_net(override=True)
        k_means_trainer = BackpropTrainer(self.k_means_net,
                                          dataset=k_means_training_data,
                                          momentum=0.1,
                                          verbose=True,
                                          weightdecay=0.01)

        k_means_data_error = [[], []]
        for i in range(iters):
            k_means_trainer.trainEpochs(1)
            k_means_data_error[0].append(
                self.nrmsd_evaluation(k_means_training_data, "k-means"))
            k_means_data_error[1].append(
                self.nrmsd_evaluation(test_data, "k-means"))

        random_indices = random.sample(range(train_size), sub_portion)
        random_training_data = SupervisedDataSet(self.in_dim, self.out_dim)
        for ind in random_indices:
            in_datum = training_data['input'][ind]
            out_datum = training_data['target'][ind]
            random_training_data.addSample(in_datum, out_datum)
        self.create_net(in_size=self.in_dim,
                        hidden_size=self.hidden_dim,
                        out_size=self.out_dim,
                        override=True)
        random_neural_trainer = BackpropTrainer(self.neural_net,
                                                dataset=random_training_data,
                                                momentum=0.1,
                                                verbose=True,
                                                weightdecay=0.01)

        random_data_error = [[], []]
        for i in range(iters):
            random_neural_trainer.trainEpochs(1)
            random_data_error[0].append(
                self.nrmsd_evaluation(random_training_data, "random"))
            random_data_error[1].append(
                self.nrmsd_evaluation(test_data, "random"))

        early_indices = range(train_size)[0:sub_portion]
        early_training_data = SupervisedDataSet(self.in_dim, self.out_dim)
        for ind in early_indices:
            in_datum = training_data['input'][ind]
            out_datum = training_data['target'][ind]
            early_training_data.addSample(in_datum, out_datum)
        self.create_net(in_size=self.in_dim,
                        hidden_size=self.hidden_dim,
                        out_size=self.out_dim,
                        override=True)
        early_neural_trainer = BackpropTrainer(self.neural_net,
                                               dataset=early_training_data,
                                               momentum=0.1,
                                               verbose=True,
                                               weightdecay=0.01)

        early_data_error = [[], []]
        for i in range(iters):
            early_neural_trainer.trainEpochs(1)
            early_data_error[0].append(
                self.nrmsd_evaluation(early_training_data, "early"))
            early_data_error[1].append(
                self.nrmsd_evaluation(test_data, "early"))

        plt.hold(True)

        plt.plot(iterations, full_data_error[0], 'k--', label="FULL_TRAIN")
        plt.plot(iterations,
                 k_means_data_error[0],
                 'r--',
                 label="KMEANS_TRAIN")
        plt.plot(iterations, random_data_error[0], 'b--', label="RANDOM_TRAIN")
        plt.plot(iterations, early_data_error[0], 'm--', label="EARLY_TRAIN")

        plt.plot(iterations, full_data_error[1], 'k-', label="FULL_TEST")
        plt.plot(iterations, k_means_data_error[1], 'r-', label="KMEANS_TEST")
        plt.plot(iterations, random_data_error[1], 'b-', label="RANDOM_TEST")
        plt.plot(iterations, early_data_error[1], 'm-', label="EARLY_TEST")

        plt.legend(loc='upper right')
        # plt.ylim()
        # plt.xlim()
        plt.title("Error Comparison for Reduction=" + str(k_reduction))
        plt.xlabel("Iteration")
        plt.ylabel("Normalized Prediction Error")
        plt.show()
Ejemplo n.º 54
0
import sys


data = SupervisedDataSet(3, 1)

data.addSample([0,0,0],[1]) #1
data.addSample([0,0,1],[0])	#2
data.addSample([0,1,0],[1])	#3
data.addSample([0,1,1],[0])	#4
data.addSample([1,0,0],[1])	#5
data.addSample([1,0,1],[1])	#6
data.addSample([1,1,0],[1]) #7
data.addSample([1,1,1],[0])	#8



if sys.argv[1] == "train":


	print sys.argv[2]
	net1 = buildNetwork( data.indim, 3 , data.outdim )
	trainer1 = BackpropTrainer(net1, dataset=data, verbose=True)
	for i in xrange(int(sys.argv[2])):
	    trainer1.trainEpochs(1)
	    print '\tvalue after %d epochs: %.2f'%(i, net1.activate([sys.argv[3],sys.argv[4],sys.argv[5]])[0])
	pickle.dump(net1, open('testNetwork.dump', 'w'))
if len(sys.argv) == 4:
	net1 = pickle.load(open('testNetwork.dump'))
	trainer1 = BackpropTrainer(net1, dataset=data, verbose=True)
	trainer1.trainEpochs(1)
	print 'value: %.2f'%(net1.activate([sys.argv[1],sys.argv[2],sys.argv[3]])[0])
Ejemplo n.º 55
0
for i in range(len(training['x'])):
    trndata.addSample(ravel(training['x'][i]), [training['y'][i]])

# For neural network classification, it is highly advisable to
# encode classes with one output neuron per class. Note that this
# operation duplicates the original targets and stores them in an (integer) field named ‘clas# s’.
trndata._convertToOneOfMany(
)  # this is still needed to make the fnn feel comfy
tstdata._convertToOneOfMany()

fnn = buildNetwork(trndata.indim, 250, trndata.outdim, outclass=SoftmaxLayer)

trainer = BackpropTrainer(fnn,
                          dataset=trndata,
                          momentum=0.1,
                          verbose=True,
                          weightdecay=0.01,
                          learningrate=0.01,
                          lrdecay=1)
for i in range(30):
    trainer.trainEpochs(
        1
    )  # Train the network for some epochs. Usually you would set something like 5 here, but for visualization purposes we do this one epoch at a time.
    trnresult = percentError(trainer.testOnClassData(), trndata['class'])
    tstresult = percentError(trainer.testOnClassData(dataset=tstdata),
                             tstdata['class'])

    print("epoch: %4d" % trainer.totalepochs,
          "  train error: %5.2f%%" % trnresult,
          "  test error: %5.2f%%" % tstresult)
Ejemplo n.º 56
0
trndata._convertToOneOfMany()
tstdata._convertToOneOfMany()
'''
implementation of BP network
'''
from pybrain.tools.shortcuts import buildNetwork  # for building network raw model
from pybrain.structure import SoftmaxLayer  # for output layer activation function
from pybrain.supervised.trainers import BackpropTrainer  # for model trainer

# network structure
n_h = 5  # hidden layer nodes number
net = buildNetwork(19, n_h, 2, outclass=SoftmaxLayer)

# 1.1 model training, using standard BP algorithm
trainer = BackpropTrainer(net, trndata)
trainer.trainEpochs(1)  # training for once

# 1.2 model training, using accumulative BP algorithm
# trainer = BackpropTrainer(net, trndata, batchlearning=True)
# trainer.trainEpochs(50)
# err_train, err_valid = trainer.trainUntilConvergence(maxEpochs=50)

# convergence curve for accumulative BP algorithm process
# import matplotlib.pyplot as plt
# plt.plot(err_train,'b',err_valid,'r')
# plt.title('BP network classification')
# plt.ylabel('accuracy')
# plt.xlabel('epochs')
# plt.show()

# 1.3 model testing
Ejemplo n.º 57
0
train_data._convertToOneOfMany()
test_data._convertToOneOfMany()
all_data._convertToOneOfMany()

print("building")
fnn = buildNetwork( train_data.indim, 10, train_data.outdim, fast=True,
                    outclass = SoftmaxLayer)
trainer = BackpropTrainer( fnn, dataset=train_data, momentum=0.2, verbose=True, learningrate=0.1, lrdecay=1.0)
# trainer = RPropMinusTrainer( fnn, dataset=train_data, momentum=0.1, verbose=True, learningrate=0.01, lrdecay=1.0)

# trainer.trainUntilConvergence()

for i in range(3):
    print("training")
    trainer.trainEpochs(5)

    print("testing")
    trnresult = trainer.testOnData()
    tstresult = trainer.testOnData( dataset=test_data )

    print "epoch: %4d" % trainer.totalepochs, \
        "  train error: %.3f" % trnresult, \
        "  test error: %.3f" % tstresult

    if tstresult <= 0.14:
        break

print("testing")
trnresult = trainer.testOnData()
tstresult = trainer.testOnData( dataset=test_data )
Ejemplo n.º 58
0
# Build neural network
fnn = buildNetwork(trndata.indim,
                   hiddenlayers[0],
                   hiddenlayers[1],
                   hiddenlayers[2],
                   trndata.outdim,
                   outclass=SoftmaxLayer)
trainer = BackpropTrainer(fnn,
                          dataset=trndata,
                          learningrate=0.005,
                          momentum=0.0,
                          verbose=True,
                          weightdecay=0.0)

# Train neural network
trainer.trainEpochs(10)
# trainer.trainUntilConvergence(continueEpochs=5, validationProportion=0.25) # Can take a long time
# trainer.train() # Train on one epoch only

# Training error and testing error
trnresult = percentError(trainer.testOnClassData(), trndata['class'])
tstresult = percentError(trainer.testOnClassData(dataset=tstdata),
                         tstdata['class'])

print "The hidden layers are", hiddenlayers[0], hiddenlayers[1], hiddenlayers[
    2]
print "Percentage training error: ", trnresult
print "Percentage testing error: ", tstresult

# Test on a couple pictures:
testout = []
Ejemplo n.º 59
0
def Norm(Slave, position=0, box=4, boxC=7):

    order = box * 2
    order2 = boxC * 2

    stock = Slave.database.keys()[position]
    ticker = Slave.database[stock]

    start2 = len(ticker['CLOSE']) % order2

    close = np.array(ticker['CLOSE'])
    open_ = np.array(ticker['OPEN'])
    high = np.array(ticker['HIGH'])
    low = np.array(ticker['LOW'])

    cI, cO, o, h, l = [], [], [], [], []
    cIC, oC, hC, lC = [], [], [], []
    cOC = []

    for i in range(start2, len(close) - order2):

        cI.append((close[i + (boxC - box) + 1:i + boxC + 1]) / close[i + boxC])
        cIC.append((close[i:i + boxC + 1]) / close[i + boxC])
        print " início: ", i
        print ' primeiro index numerador: ', i + (boxC - box)
        print ' primeiro index denominador: ', i + boxC
        raw_input()

        o.append((open_[i + (boxC - box) + 1:i + boxC + 1]) / close[i + boxC])
        oC.append((open_[i:i + boxC + 1]) / close[i + boxC])

        h.append((high[i + (boxC - box) + 1:i + boxC + 1]) / close[i + boxC])
        hC.append((high[i:i + boxC + 1]) / close[i + boxC])

        l.append((low[i + (boxC - box) + 1:i + boxC + 1]) / close[i + boxC])
        lC.append((low[i:i + boxC + 1]) - 1 / close[i + boxC])

        cO.append((close[i + boxC:i + (boxC + box)]) / close[i + boxC])
        cOC.append((close[i:i + (boxC + box)]) / close[i + boxC])

    return close, cI, cIC, cO

    #==============================================================================
    #                          Construção da Rede Neural
    #==============================================================================

    #    FNN = FeedForwardNetwork()
    #
    #    inLayer     = LinearLayer(inputs)
    #    hiddenLayer = SigmoidLayer(3)
    #    outLayer    = SigmoidLayer(len(cO))
    #
    #    in2hidden  = FullConnection(inLayer,hiddenLayer)
    #    hidden2out = FullConnection(hiddenLayer,outLayer)
    #
    #    FNN.addConnection(in2hidden)
    #    FNN.addConnection(hidden2out)

    #==============================================================================
    #                       Modelagem dos dados a serem inputados
    #==============================================================================

    #    alldata = SupervisedDataSet(inputs,len(cO[0]))
    geocI = [[np.prod(cI[i]**(1. / (box - 1)))] for i in range(len(cI))]
    geocIC = [[np.prod(cIC[i]**(1. / (boxC)))] for i in range(len(cIC))]

    geocO = [[np.prod(cO[i]**(1. / (box - 1)))] for i in range(len(cO))]
    geocOC = [[np.prod(cOC[i]**(1. / (boxC)))] for i in range(len(cOC))]

    alldata = ClassificationDataSet(26, 5, nb_classes=24)

    dataInput = np.hstack((geocIC, cIC, geocI, cI, o, h, l))
    dataOutput = np.hstack((geocO, cO))

    for i in range(len(cI)):
        alldata.addSample(dataInput[i], dataOutput[i])

    tstdata, trndata = alldata.splitWithProportion(0.25)

    trndata._convertToOneOfMany()
    tstdata._convertToOneOfMany()

    fnn = buildNetwork(trndata.indim,
                       26,
                       trndata.outdim,
                       outclass=SoftmaxLayer)

    trainer = BackpropTrainer(fnn,
                              dataset=trndata,
                              momentum=0.1,
                              verbose=True,
                              weightdecay=0.01)
    #

    trainer.trainEpochs(40)

    return cO, cI, o, h, l