def __init__(self, motion, memory, sonar, posture):
         self.motionProxy = motion
         self.memoryProxy = memory
         self.sonarProxy = sonar
         self.postureProxy = posture
         self.useSensors    = True
         self.inputLength = 26+18
         self.outputLength = 26
         self.sonarProxy.subscribe("Closed-Loop Motor Babbling") #Start the sonor
         self.set_stiffness(0.3)
         self.net = buildNetwork(INPUTSIZE,HIDDENSIZE,OUTPUTSIZE)

         #Hierarchical Control Networks 
         self.netH1 = buildNetwork(INPUTSIZE,HIDDENSIZE,OUTPUTSIZE)
         self.netH2 = buildNetwork(INPUTSIZE,HIDDENSIZE,OUTPUTSIZE)
         self.sMemory1 = np.array([1]*(INPUTSIZE + PREDICTSIZE))
         self.sMemory2 = np.array([1]*(INPUTSIZE + PREDICTSIZE))
         self.mMemory1 = np.array([0]*OUTPUTSIZE)
         self.mMemory2 = np.array([0]*OUTPUTSIZE)
         

         # Access global joint limits.
         self.Body = motion.getLimits("Body")
         self.bangles =  [1] * 26
         self.othersens = [2] * 18
         self.sMemory = np.array([1]*(INPUTSIZE + PREDICTSIZE))
         self.mMemory = np.array([0]*OUTPUTSIZE)
         self.cl = curiosityLoop()

         self.rand = Random()
         self.rand.seed(int(time()))

         #Initialize a model dictionary
         self.models = dict()
Exemple #2
0
  def reset(self, params, repetition):
    print params

    self.nDimInput = 3
    self.inputEncoder = PassThroughEncoder()

    if params['output_encoding'] == None:
      self.outputEncoder = PassThroughEncoder()
      self.nDimOutput = 1
    elif params['output_encoding'] == 'likelihood':
      self.outputEncoder = ScalarBucketEncoder()
      self.nDimOutput = self.outputEncoder.encoder.n

    if params['dataset'] == 'nyc_taxi' or params['dataset'] == 'nyc_taxi_perturb_baseline':
      self.dataset = NYCTaxiDataset(params['dataset'])
    else:
      raise Exception("Dataset not found")

    self.testCounter = 0
    self.resets = []
    self.iteration = 0

    # initialize LSTM network
    random.seed(6)
    if params['output_encoding'] == None:
      self.net = buildNetwork(self.nDimInput, params['num_cells'], self.nDimOutput,
                         hiddenclass=LSTMLayer, bias=True, outputbias=True, recurrent=True)
    elif params['output_encoding'] == 'likelihood':
      self.net = buildNetwork(self.nDimInput, params['num_cells'], self.nDimOutput,
                         hiddenclass=LSTMLayer, bias=True, outclass=SigmoidLayer, recurrent=True)

    (self.networkInput, self.targetPrediction, self.trueData) = \
      self.dataset.generateSequence(
      prediction_nstep=params['prediction_nstep'],
      output_encoding=params['output_encoding'])
 def __init__(self, hidden, **args):
     self.setArgs(**args)
     if self.useSpecialInfo:
         net = buildNetwork(self.inGridSize**2+2, hidden, self.usedActions, outclass = SigmoidLayer)
     else:
         net = buildNetwork(self.inGridSize**2, hidden, self.usedActions, outclass = SigmoidLayer)
     ModuleMarioAgent.__init__(self, net)
 def buildCustomNetwork(self, hiddenLayers, train_faces):
     myfnn = None     
     print "building network..."
     if len(hiddenLayers) == 1:
         myfnn = buildNetwork( 
           train_faces.indim, 
           hiddenLayers[0],
           train_faces.outdim, 
           outclass=SoftmaxLayer
         )
     elif len(hiddenLayers) == 2:
         myfnn = buildNetwork( 
           train_faces.indim, 
           hiddenLayers[0],
           hiddenLayers[1],
           train_faces.outdim, 
           outclass=SoftmaxLayer
         )
     elif len(hiddenLayers) == 3:
         myfnn = buildNetwork( 
           train_faces.indim, 
           hiddenLayers[0],
           hiddenLayers[1],
           hiddenLayers[2],
           train_faces.outdim, 
           outclass=SoftmaxLayer
         )
     return myfnn
 def __init__(self, num_features, num_actions, indexOfAgent=None):    
     PHC_FA.__init__(self, num_features, num_actions, indexOfAgent)
     self.linQ = buildNetwork(num_features + num_actions, (num_features + num_actions), 1, hiddenclass = SigmoidLayer, outclass = LinearLayer)
     self.linPolicy = buildNetwork(num_features, (num_features + num_actions), num_actions, hiddenclass = SigmoidLayer,outclass = SigmoidLayer)
     self.averagePolicy=[]
     self.trainer4LinQ=BackpropTrainer(self.linQ,weightdecay=self.weightdecay)
     self.trainer4LinPolicy=BackpropTrainer(self.linPolicy,weightdecay=self.weightdecay)
Exemple #6
0
def buildFNN(testData, trainData):
    '''
    Input: testing data object, training data object
    Output: Prints details of best FNN
    '''
        
    accuracy=0
    model = None
    params = None
    fnn = buildNetwork( trainData.indim, (trainData.indim + trainData.outdim)/2, trainData.outdim, hiddenclass=TanhLayer, outclass=SoftmaxLayer, bias='true' )    
    trainer = BackpropTrainer(fnn, dataset=trainData, momentum=0.1, verbose=False, weightdecay=0.01)        
    a=calculateANNaccuracy(fnn, trainData, testData, trainer)    
    if a>accuracy:
        model=fnn
        accuracy=a
        params='''network = [Hidden Layer = TanhLayer; Hidden Layer Units= (Input+Output)Units/2; Output Layer = SoftmaxLayer]\n'''
    
    fnn = buildNetwork( trainData.indim, trainData.indim, trainData.outdim, hiddenclass=TanhLayer, outclass=SoftmaxLayer, bias='true' )
    trainer = BackpropTrainer(fnn, dataset=trainData, momentum=0.1, verbose=False, weightdecay=0.01)
    a=calculateANNaccuracy(fnn, trainData, testData, trainer)
    if a>accuracy:
        model=fnn
        accuracy=a
        params='''network = [Hidden Layer = TanhLayer; Hidden Layer Units = Input Units; Output Layer = SoftmaxLayer]\n'''
    
        
    fnn = buildNetwork( trainData.indim, (trainData.indim + trainData.outdim)/2, trainData.outdim, hiddenclass=TanhLayer, outclass=SigmoidLayer, bias='true' )
    trainer = BackpropTrainer(fnn, dataset=trainData, momentum=0.1, verbose=False, weightdecay=0.01)
    a=calculateANNaccuracy(fnn, trainData, testData, trainer)
    if a>accuracy:
        model=fnn
        accuracy=a
        params='''network = [Hidden Layer = TanhLayer; Hidden Layer Units = (Input+Output)Units/2; Output Layer = SigmoidLayer]\n'''
    
        
    fnn = buildNetwork( trainData.indim, (trainData.indim + trainData.outdim)/2, trainData.outdim, hiddenclass=TanhLayer, outclass=SigmoidLayer, bias='true' )
    trainer = BackpropTrainer(fnn, dataset=trainData, momentum=0.1, verbose=False, weightdecay=0.01)
    a=calculateANNaccuracy(fnn, trainData, testData, trainer)
    if a>accuracy:
        model=fnn
        accuracy=a
        params='''network = [Hidden Layer = TanhLayer; Hidden Layer Units = Input Units; Output Layer = SigmoidLayer]\n'''
    
    fnn = buildNetwork( trainData.indim, (trainData.indim + trainData.outdim)/2, (trainData.indim + trainData.outdim)/2, trainData.outdim, hiddenclass=TanhLayer, outclass=SoftmaxLayer, bias='true' )
    trainer = BackpropTrainer(fnn, dataset=trainData, momentum=0.1, verbose=False, weightdecay=0.01)
    a=calculateANNaccuracy(fnn, trainData, testData, trainer)
    if a>accuracy:
        model=fnn
        accuracy=a
        params='''network = [TWO (2) Hidden Layers = TanhLayer; Hidden Layer Units = (Input+Output)Units/2; Output Layer = SoftmaxLayer]\n'''
    
        
    print '\nThe best model had '+str(accuracy)+'% accuracy and used the parameters:\n'+params+'\n'
    def __init__(self, prev=5):
        # timsig beat, timsig denom, prev + curr dur/freq, prev 3 chords, bass note
        self.t_ds = SupervisedDataSet((prev+1) * 2 + 4, 2)
        self.t_net = buildNetwork((prev+1) * 2 + 4, 50, 75, 25, 2)
        self.t_freq_err = []
        self.t_dur_err = []

        self.b_ds = SupervisedDataSet((prev+1) * 2 + 4, 2)
        self.b_net = buildNetwork((prev+1) * 2 + 4, 50, 75, 25, 2)
        self.b_freq_err = []
        self.b_dur_err = []

        self.prev = prev
        self.corpus = []
Exemple #8
0
    def __init__(self, array=None):

        if array == None:
            ##self.net  = [Network((18,18,1)) for i in range(9)]
            ##self.theta = [self.net[i].theta for i in range(9)]
            self.net = buildNetwork(18, 18, 9)
            self.theta = self.net.params

        else:
            ##self.theta = array
            ##self.net = [Network((18,18,1),self.theta[i]) for i in range(9)]
            self.theta = array
            self.net = buildNetwork(18, 18, 9)
            self.net._params = self.theta
def buildDecomposableNetwork():
    """ three hidden neurons, with 2 in- and 2 outconnections each. """
    n = buildNetwork(2, 3, 2, bias = False)
    ndc = NeuronDecomposableNetwork.convertNormalNetwork(n)
    # set all the weights to 1
    ndc._setParameters(ones(12))
    return ndc
Exemple #10
0
def ANN(
    trainFeature, trainLabel, testFeature, testLabel, netStructure, para_rate, para_momentum
):  # netStructure is a list [in, hidden, out], momentum is a parameter in SGD
    sampleNum = trainFeature.shape[0]
    featureNum = trainFeature.shape[1]
    Dataset = SupervisedDataSet(featureNum, 1)
    i = 0
    while i < sampleNum:
        print(i)
        Dataset.addSample(list(trainFeature[i]), [trainLabel[i]])
        i += 1
    Network = buildNetwork(
        netStructure[0],
        netStructure[1],
        netStructure[2],
        netStructure[3],
        hiddenclass=SigmoidLayer,
        outclass=SigmoidLayer,
    )
    T = BackpropTrainer(Network, Dataset, learningrate=para_rate, momentum=para_momentum, verbose=True)
    # print(Dataset['input'])
    errorList = []
    errorList.append(T.testOnData(Dataset))
    T.trainOnDataset(Dataset)
    errorList.append(T.testOnData(Dataset))
    T.trainOnDataset(Dataset)
    while abs(T.testOnData(Dataset) - errorList[-1]) > 0.0001:
        T.trainOnDataset(Dataset)
        errorList.append(T.testOnData(Dataset))
    pass  # this step is for the output of predictedLabel
    print(np.array([Network.activate(x) for x in trainFeature]))
    # print(testLabel)
    print(Network.activate([0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]))
    return errorList
def main():
	inputs = ReadCSV('./data/input.csv')
	outputs = ReadCSV('./data/output.csv')
	
	test_set = test.keys()
	train_set = []
	for k in inputs.keys():
		if k not in test_set:
			train_set.append(k)
	print "Number of training samples", len(train_set)
	print "Number of testing samples", len(test_set)
			
	net = buildNetwork(178, 6, 5)
	ds=SupervisedDataSet(178,5)
	for id in train_set:
		ds.addSample(inputs[id],outputs[id])

	trainer = BackpropTrainer(net, ds, learningrate=0.001, momentum = 0.001)

	trainer.trainUntilConvergence(maxEpochs=1000, validationProportion = 0.5)
	
	
	for id in test_set:
		predicted = net.activate(inputs[id])
		actual = outputs[id]
		print '-----------------------------'
		print test[id]
		print '-----------------------------'
		print 'Trait\t\tPredicted\tActual\tError'
		for i in range(0,5):
			error = abs(predicted[i] - actual[i])*100/4.0
			print traits[i], '\t', predicted[i], '\t', actual[i], '\t', error,"%" 
 def neuralNetwork_eval_func(self, chromosome):
     node_num, learning_rate, window_size = self.decode_chromosome(chromosome)
     if self.check_log(node_num, learning_rate, window_size):
         return self.get_means_from_log(node_num, learning_rate, window_size)[0]
     folded_dataset = self.create_folded_dataset(window_size)
     indim = 21 * (2 * window_size + 1)
     mean_AUC = 0
     mean_decision_value = 0
     mean_mcc = 0
     sample_size_over_thousand_flag = False
     for test_fold in xrange(self.fold):
         test_labels, test_dataset, train_labels, train_dataset = folded_dataset.get_test_and_training_dataset(test_fold)
         if len(test_labels) + len(train_labels) > 1000:
             sample_size_over_thousand_flag = True
         ds = SupervisedDataSet(indim, 1)
         for i in xrange(len(train_labels)):
             ds.appendLinked(train_dataset[i], [train_labels[i]])
         net = buildNetwork(indim, node_num, 1, outclass=SigmoidLayer, bias=True)
         trainer = BackpropTrainer(net, ds, learningrate=learning_rate)
         trainer.trainUntilConvergence(maxEpochs=self.maxEpochs_for_trainer)
         decision_values = [net.activate(test_dataset[i]) for i in xrange(len(test_labels))]
         decision_values = map(lambda x: x[0], decision_values)
         AUC, decision_value_and_max_mcc = validate_performance.calculate_AUC(decision_values, test_labels)
         mean_AUC += AUC
         mean_decision_value += decision_value_and_max_mcc[0]
         mean_mcc += decision_value_and_max_mcc[1]
         if sample_size_over_thousand_flag:
             break
     if not sample_size_over_thousand_flag:
         mean_AUC /= self.fold
         mean_decision_value /= self.fold
         mean_mcc /= self.fold
     self.write_log(node_num, learning_rate, window_size, mean_AUC, mean_decision_value, mean_mcc)
     self.add_log(node_num, learning_rate, window_size, mean_AUC, mean_decision_value, mean_mcc)
     return mean_AUC
Exemple #13
0
def measuredLearning(ds):

    trndata,tstdata = splitData(ds,.025)

    #build network


    ###
    # This network has no hidden layters, you might need to add some
    ###
    fnn = buildNetwork( trndata.indim, 22, trndata.outdim, outclass=SoftmaxLayer )
    trainer = BackpropTrainer( fnn, verbose=True,dataset=trndata)
                               
    ####
    #   Alter this to figure out how many runs you want.  Best to start small and be sure that you see learning.
    #   Before you ramp it up.
    ###
    for i in range(150):
        trainer.trainEpochs(5)
   
        
        trnresult = percentError(trainer.testOnClassData(),trndata['class'] )

        
        tstresult = percentError( trainer.testOnClassData(
           dataset=tstdata ), tstdata['class'] )

        print "epoch: %4d" % trainer.totalepochs, \
            "  train error: %5.2f%%" % trnresult, \
            "  test error: %5.2f%%" % tstresult
        if(trnresult<.5): 
            return
def createNN(indim, hiddim, outdim):
    nn = buildNetwork(indim, hiddim, outdim,
                    bias=False,
                    hiddenclass=TanhLayer, 
                    outclass=TanhLayer)
    nn.sortModules()
    return nn
Exemple #15
0
def getErrorPercent(training_dataset, eval_dataset_list, num_hidden, num_epochs):
  num_datapoints = len(training_dataset)
  num_inputs = len(training_dataset[0][0])
  num_outputs = len(training_dataset[0][1])

  # print "Num Inputs:", num_inputs
  # print "Num Outputs:", num_outputs
  # print "Num Hidden Nodes:", num_hidden

  NN = buildNetwork(num_inputs, num_hidden, num_outputs, bias=True, hiddenclass=SigmoidLayer, outclass=SigmoidLayer)

  dataset = SupervisedDataSet(num_inputs, num_outputs)
  for datapoint in training_dataset:
    dataset.addSample(datapoint[0], datapoint[1])


  trainer = BackpropTrainer(NN, dataset=dataset, momentum=0.0, verbose=False, weightdecay=0.0)

  for epoch in range(0, num_epochs):
    #print epoch 
    trainer.train()

  errors = []
  for eval_set in eval_dataset_list:
    total_percent_errors = [0]*num_outputs
    for jj in range(0, len(eval_set)):
      nn_out = NN.activate(eval_set[jj][0])
      percent_error = computeError(eval_set[jj][1], nn_out)
      #print percent_error
      total_percent_errors = map(operator.add, percent_error, total_percent_errors)
    #print total_percent_errors
    errors.append(map(operator.div, total_percent_errors, [len(dataset)]*num_outputs))
  #print errors
  return errors
def train(data):
	"""
	See http://www.pybrain.org/docs/tutorial/fnn.html

	Returns a neural network trained on the test data.

	Parameters:
	  data - A ClassificationDataSet for training.
	         Should not include the test data.
	"""
	network = buildNetwork(
		# This is where we specify the architecture of
		# the network.  We can play around with different
		# parameters here.
		# http://www.pybrain.org/docs/api/tools.html
		data.indim, 5, data.outdim,
		hiddenclass=SigmoidLayer,
		outclass=SoftmaxLayer
	)

	# We can fiddle around with this guy's options as well.
	# http://www.pybrain.org/docs/api/supervised/trainers.html
	trainer = BackpropTrainer(network, dataset=data)
	trainer.trainUntilConvergence(maxEpochs=20)

	return network
Exemple #17
0
 def trainNetwork(self,proportion = 0):        
     if proportion != 0:
         tstdata, trndata = self.alldata.splitWithProportion( 0.01*proportion )
     else:
         trndata = self.alldata
     trndata._convertToOneOfMany( )
     if proportion != 0:
         tstdata._convertToOneOfMany( )
     print "Number of training patterns: ", len(trndata)
     print "Input and output dimensions: ", trndata.indim, trndata.outdim
     self.fnn = buildNetwork( trndata.indim, self.hidden_layer_size, trndata.outdim, 
                              hiddenclass=SigmoidLayer,outclass=SoftmaxLayer )
     self.trainer = BackpropTrainer( self.fnn, dataset=trndata, momentum=0.1, verbose=True, weightdecay=0.01)
     for i in range(self.iterations_number):
         self.trainer.trainEpochs( 1 )
         trnresult = percentError( self.trainer.testOnClassData(),
                                   trndata['class'] )
         if proportion != 0:
             tstresult = percentError( self.trainer.testOnClassData(
                dataset=tstdata ), tstdata['class'] )
     
         if proportion != 0:
             print "epoch: %4d" % self.trainer.totalepochs, \
               "  train error: %5.2f%%" % trnresult, \
               "  test error: %5.2f%%" % tstresult
         else:
             print "epoch: %4d" % self.trainer.totalepochs, \
               "  train error: %5.2f%%" % trnresult
Exemple #18
0
    def nn_1(self):
        logging.info('Beginning Neural Network model.')
        
        class ThisNN(): # Used to abstract away fit function
            def __init__(self, nn, kg):
                self.nn = nn
                self.kg = kg
                
            def fit(self, X, Y):
                logging.info('Generating a Pybrain SupervisedDataSet')
                ds = SupervisedDataSet(X,Y)
                trainer = BackpropTrainer(self.nn,ds)
                for i in range(0,10):
                    logging.debug(trainer.train()) # XXX Runs once
                logging.info('Training Neural Network until Convergence')

                cv = SupervisedDataSet(self.kg.X_cv[:,1:],self.kg.Y_cv[:,1:])
                trainer.trainUntilConvergence(verbose=11, validationData=cv, trainingData=ds)
            
            def predict_x(self, X):
                Y = []
                for i in range(0,X.shape[0]):
                    Y.append(self.nn.activate(X[i,:]))
                return np.asarray(Y)

        net = buildNetwork(self.X_train.shape[1] - 1,3,1) # X - 1 to avoid ID
        this_nn = ThisNN(net,self) 
        self.__fit(net,this_nn.fit) 
        self.__score_cv(net,this_nn.predict_x)        
        self.__score_test(net,this_nn.predict_x)
        self.predict_y_submission(this_nn.predict_x)
        self.write_submission('nn.csv')
        self.models['nn'] = net
        logging.info('Completed Neural Network model.')
        return net
 def setUp(self):
   self.nn = buildNetwork(4,6,3, bias=False, hiddenclass=TanhLayer, 
                    outclass=TanhLayer)
   self.nn.sortModules()
   self.in_to_hidden, = self.nn.connections[self.nn['in']]
   self.hiddenAstroLayer = AstrocyteLayer(self.nn['hidden0'], 
                                          self.in_to_hidden)
	def __init__(self, inSize, outSize, LearningRate):

		self.learning_rate = LearningRate
		self.ds = SupervisedDataSet(inSize, outSize)
		self.net = buildNetwork(inSize, 10, outSize, hiddenclass=TanhLayer, bias=True)
		self.trainer = BackpropTrainer(self.net, self.ds, learningrate=self.learning_rate, verbose = False, weightdecay=WEIGHT_DECAY)
		self.prediction = [0] * outSize
		self.mse = 100
		self.age=0

		#Specific to Mai's code. Make input and output masks.  
		self.inputMask = [1 for i in range(inSize)]
		
#		self.outputMask = [random.randint(0, 1) for i in range(outSize)]
		self.outputMask = [0]*outSize
		r = random.randint(0,outSize-1)
		self.outputMask[r] = 1

		self.error = 0
		self.errorHistory = []
		self.dErrorHistory = []
		self.slidingError = 0
		self.dError = 0
		self.fitness = 0
		self.problem=r
		self.previousData=[]
    def _InitNet(self):

        # -----------------------------------------------------------------------
        self._pr_line();
        print("| _InitNet(self): \n");
        start_time = time.time();
        # -----------------------------------------------------------------------
        if self._NET_NAME:
            
            # -----------------------------------------------------------------------
            self._SDS = SupervisedDataSet(900, 52); 

            if self._NET_NEW:

                print('| Bulding new NET: '+self._NET_NAME)
                self._NET = buildNetwork(self._SDS.indim, self._NET_HIDDEN, self._SDS.outdim, bias=True); #,hiddenclass=TanhLayer)
                self._SaveNET();
            else:

                print('| Reading NET from: '+self._NET_NAME)
                self._NET = NetworkReader.readFrom(self._NET_NAME)
            # -----------------------------------------------------------------------
            print('| Making AutoBAK: '+str(self._MK_AUTO_BAK))
            
            if self._MK_AUTO_BAK:
                NetworkWriter.writeToFile(self._NET, self._NET_NAME+".AUTO_BAK.xml");
            # -----------------------------------------------------------------------
            print("| Done in: "+str(time.time()-start_time)+'sec');
            # -----------------------------------------------------------------------

        else:
            
            print('| Unknown NET name: >|'+self._NET_NAME+'|<')
            exit();
Exemple #22
0
def NeuralNetwork(tRiver, qRiver, pRiver, TRiver, qnewRiver, pnewRiver, TnewRiver):
    # build neural network with 20 neurons for historic data on flux, 3 for last 3 temp data, 3 for last precipitation,
    # hidden layer with more than input neurons (hinder specification)
    # and 3 output neurons (flux for next day, first derivative, second derivative

    Ndim = 10+3+3
    Nout = 3
    net = buildNetwork(Ndim, Ndim, Nout, hiddenclass=TanhLayer)
    ds = SupervisedDataSet(Ndim, Nout)

    # next big job: find data values to build up library of training set
    for t in range(len(tRiver)-3):
        input_flow = qRiver[t-20:2:t]
        input_prec = pRiver[t-3:t]
        input_temp = TRiver[t-3:t]
        input_vec = np.hstack([input_flow, input_prec, input_temp])

        output_flow = np.hstack([qRiver[t:t+3]]) # first approx, split later for long predictions
        ds.addSample(input_vec, output_flow)

    trainer = BackpropTrainer(net, ds)
    #trainer.train()
    trainer.trainUntilConvergence()

    # now call it repeatedly on the second set

    prediction = net.activate(np.hstack([qnewRiver[:20], pnewRiver[:3], TnewRiver[:3]]))
    return prediction
Exemple #23
0
def nnTest(tx, ty, rx, ry, iterations):
    print "NN start"
    print strftime("%a, %d %b %Y %H:%M:%S", localtime())

    resultst = []
    resultsr = []
    positions = range(iterations)
    network = buildNetwork(16, 16, 1, bias=True)
    ds = ClassificationDataSet(16, 1, class_labels=["1", "0"])
    for i in xrange(len(tx)):
        ds.addSample(tx[i], [ty[i]])
    trainer = BackpropTrainer(network, ds, learningrate=0.05)
    validator = CrossValidator(trainer, ds, n_folds=10)
    print validator.validate()
    for i in positions:
        print trainer.train()
        resultst.append(sum((np.array([round(network.activate(test)) for test in tx]) - ty)**2)/float(len(ty)))
        resultsr.append(sum((np.array([round(network.activate(test)) for test in rx]) - ry)**2)/float(len(ry)))
        print i, resultst[i], resultsr[i]
    plt.plot(positions, resultst, 'g-', positions, resultsr, 'r-')
    plt.axis([0, iterations, 0, 1])
    plt.ylabel("Percent Error")
    plt.xlabel("Network Epoch")
    plt.title("Neural Network Error")
    plt.savefig('nn.png', dpi=500)
    print "NN end"
    print strftime("%a, %d %b %Y %H:%M:%S", localtime())
		def copyAndMutatePredictor(self, winner, loser,distribution):
			newLoser = deepcopy(self.predictors[winner])
			self.predictors[loser] = newLoser

			self.predictors[loser].learning_rate =  FLAGS.learning_rate
			self.predictors[loser].ds = SupervisedDataSet(10, 8)
			self.predictors[loser].net = buildNetwork(10,10,8, bias=True)
			self.predictors[loser].trainer = BackpropTrainer(self.predictors[loser].net, self.predictors[loser].ds, learningrate=self.predictors[loser].learning_rate, verbose = False, weightdecay=WEIGHT_DECAY)
		
			
			if FLAGS.replication:
				for i in range(len(self.predictors[loser].net.params)):
					if random.uniform(0,1)<FLAGS.replication_prob:
						self.predictors[loser].net.params[i] = self.predictors[winner].net.params[i]
			
			# self.predictors[loser].net._setParameters(self.predictors[loser].net.params) # why?

			if FLAGS.mutate_input:
				for i in range(len(self.predictors[loser].inputMask)):
					if random.uniform(0,1) < FLAGS.input_mutation_prob:
						if self.predictors[loser].inputMask[i] == 0:
							self.predictors[loser].inputMask[i] = 1
						else:
							self.predictors[loser].inputMask[i] = 0

			if random.uniform(0,1) < FLAGS.output_mutation_prob:
				self.predictors[loser].outputMask = [0]*World.state_size
				r = np.random.choice(range(World.state_size),p=distribution)
				self.predictors[loser].outputMask[r] = 1
				self.predictors[loser].problem=r
Exemple #25
0
    def reset(self):
        FA.reset(self)

        # self.network = buildNetwork(self.indim, 2*(self.indim+self.outdim), self.outdim)
        self.network = buildNetwork(self.indim, self.outdim, bias=True)
        self.network._setParameters(random.normal(0, 0.1, self.network.params.shape))
        self.pybdataset = SupervisedDataSet(self.indim, self.outdim)
Exemple #26
0
    def fit(self, X, y):
        self.nn = buildNetwork(*self.layers, bias=True, hiddenclass=SigmoidLayer)

        self.ds = SupervisedDataSet(self.layers[0], self.layers[-1])
        for i, row in enumerate(X):
            self.ds.addSample(row.tolist(), y[i])
        self.improve()
    def run(self, fold, X_train, y_train, X_test, y_test):
        DS_train, DS_test = ClassificationData.convert_to_DS(
            X_train,
            y_train,
            X_test,
            y_test)

        NHiddenUnits = self.__get_best_hu(DS_train)
        fnn = buildNetwork(
            DS_train.indim,
            NHiddenUnits,
            DS_train.outdim,
            outclass=SoftmaxLayer,
            bias=True)

        trainer = BackpropTrainer(
            fnn,
            dataset=DS_train,
            momentum=0.1,
            verbose=False,
            weightdecay=0.01)

        trainer.trainEpochs(self.epochs)
        tstresult = percentError(
            trainer.testOnClassData(dataset=DS_test),
            DS_test['class'])

        print "NN fold: %4d" % fold, "; test error: %5.2f%%" % tstresult
        return tstresult / 100.0
Exemple #28
0
    def train_net(self,training_times_input=100,num_neroun=200,learning_rate_input=0.1,weight_decay=0.1,momentum_in = 0,verbose_input=True):
        '''
        The main function to train the network
        '''
        print self.trndata['input'].shape
        raw_input()
        self.network=buildNetwork(self.trndata.indim,
                                  num_neroun,self.trndata.outdim,
                                  bias=True,
                                  hiddenclass=SigmoidLayer,
                                  outclass = LinearLayer)
        self.trainer=BackpropTrainer(self.network,
                                     dataset=self.trndata,
                                     learningrate=learning_rate_input,
                                     momentum=momentum_in,
                                     verbose=True,
                                     weightdecay=weight_decay )

        for iter in range(training_times_input):
            print "Training", iter+1,"times"
            self.trainer.trainEpochs(1)
            trn_error = self._net_performance(self.network, self.trndata)
            tst_error = self._net_performance(self.network, self.tstdata)
            print "the trn error is: ", trn_error
            print "the test error is: ",tst_error

        '''prediction on all data:'''
def trainNetwork(inData, numOfSamples, numOfPoints, epochs):
    # Build the dataset
    alldata = createRGBdataSet(inData, numOfSamples, numOfPoints)
    # Split into test and training data
    trndata, tstdata = splitData(alldata)

    # Report  stats
    print "Number of training patterns: ", len(trndata)
    print "Input and output dimensions: ", trndata.indim, trndata.outdim
    print "First sample (input, target, class):"
    print trndata['input'][0], trndata['target'][0], trndata['class'][0]

    # Build and train the network
    fnn = buildNetwork( trndata.indim, 256, trndata.outdim, outclass=SoftmaxLayer )
    trainer = BackpropTrainer( fnn, dataset=trndata, momentum=0.001, verbose=True, weightdecay=0.001)
    #trainer.trainEpochs( epochs )
    trainer.trainUntilConvergence(maxEpochs=epochs)

    # Report results
    trnresult = percentError( trainer.testOnClassData(), trndata['class'] )
    tstresult = percentError( trainer.testOnClassData( dataset=tstdata ), tstdata['class'] )
    print "epoch: %4d" % trainer.totalepochs, \
      "  train error: %5.2f%%" % trnresult, \
      "  test error: %5.2f%%" % tstresult

    # Report results of final network
    checkNeuralNet(trainer, alldata, numOfSamples)
    return fnn
Exemple #30
0
 def parse_and_train(self):
     f = open(self.file,'r')
     learn_lines = []
     for line in f:
         if line.strip() != '':
             learn_lines.append(line)
     i = 0
     f.close()
     while i < len(learn_lines):
         ins, outs = self.convert_to_tuple(learn_lines[i],learn_lines[i+1])
         i += 2
         self.ds.addSample(ins,outs)
     self.nn = buildNetwork(self.ios,self.hns,25,self.ios)
     #self.train_dat, self.test_dat = self.ds.splitWithProportion(0.75)
     self.train_dat = self.ds
     trnr = BackpropTrainer(self.nn,dataset=self.train_dat,momentum=0.1,verbose=False,weightdecay=0.01)
     i = 150
     trnr.trainEpochs(150)
     while i < self.epochs:
         trnr.trainEpochs(50)
         i += 50
         print 'For epoch ' + str(i)
         print 'For train:'
         self.print_current_error()
         #print 'For test:'
         #self.print_validation()
     self.nn.sortModules()
Exemple #31
0
from pybrain.tools.shortcuts import buildNetwork
from pybrain.supervised.trainers import BackpropTrainer

from librosa import core

from scipy import fft

import csv

import math

import os

# SESSAO TREINAMENTO
VERSAO = "48000-100-100-10-5-5"
NETWORK = buildNetwork(48000, 100, 100, 10, 5, 5)

# SESSAO TESTES
# VERSAO = "48000-100-100-10-5-TESTE-AUDIO-JOAS"
# NETWORK = NetworkReader.readFrom('resultado/48000-100-100-10-5/network.xml');

# OUTROS
CAMINHO_RESULTADO = "resultado/" + VERSAO + "/"
EXTENSAO_ARQUIVO_AUDIO = ".ogg"
NOME_DOS_CAMPOS_VALIDACAO = [
    "Nome do arquivo", "Segundo", "Valor esperado 1", "Valor obtido 1",
    "Erro 1", "Valor esperado 2", "Valor obtido 2", "Erro 2",
    "Valor esperado 3", "Valor obtido 3", "Erro 3", "Valor esperado 4",
    "Valor obtido 4", "Erro 4", "Valor esperado 5", "Valor obtido 5", "Erro 5",
    "Erro quadrático médio"
]
Exemple #32
0
print train
## DataSet
ds = SupervisedDataSet(11, 1)
for i in train.values:
    #print [i[1],i[6],i[34],i[35],i[36]]
    #ds.addSample(tuple(i[1:-1]), i[-1])
    ds.addSample([
        i[1], i[5], i[6], i[10], i[12], i[15], i[17], i[21], i[25], i[26],
        i[32]
    ], i[-1])

## Build neural net
net = buildNetwork(
    11,
    30,  # number of hidden units
    1,
    bias=True,
    hiddenclass=SigmoidLayer,
    outclass=LinearLayer)
trainer = BackpropTrainer(net,
                          ds,
                          verbose=True,
                          momentum=0.1,
                          weightdecay=0.01)
trainer.trainUntilConvergence(verbose=True,
                              validationProportion=0.2,
                              maxEpochs=200,
                              continueEpochs=10)
# modval = ModuleValidator()
# for i in range(100):
#     trainer.trainEpochs(1)
for i in range(0, 115, 1):
	trainingSet.addSample((normalized_data[i], normalized_data[i+1], normalized_data[i+2], normalized_data[i+3]), (normalized_data[i+4]))#116 tuples

# print "trainginSet"
# for inpt, target in trainingSet:
# 	print inpt, target

ds = SupervisedDataSet(4, 1)
for i in range(119, 162, 1):
	ds.addSample((normalized_data[i], normalized_data[i+1], normalized_data[i+2], normalized_data[i+3]), (normalized_data[i+4]))#44 tuples

# print "dataset"
# for inpt, target in ds:
# 	print inpt, target

net = buildNetwork(4, 2, 1, bias = True, hiddenclass = SigmoidLayer)
# net = buildNetwork(3, 4, 1, bias = True, hiddenclass = TanhLayer)

trainer = BackpropTrainer(net, ds, learningrate = 0.001, momentum = 0.99)
print "entrenando hasta converger..."
print "training until the Convergence: ", trainer.trainUntilConvergence(verbose=True,
															trainingData=trainingSet,
															validationData=ds,
															maxEpochs=100)

print "predicting the next month: 0.28733060532417387"
y = net.activate([0.12897219396266188, 0.10031611360730924, 0.13198268361816343, 0.239516498322237])
print y

print "des normalizando"
#((y - d1) * (x_max - x_min) / (d2 - d1)) + x_min= x
Exemple #34
0
    file = open(path, "r")

    data = []

    for linha in file:  # obtem cada linha do arquivo
        linha = linha.rstrip()  # remove caracteres de controle, \n
        digitos = linha.split(" ")  # pega os dígitos
        for numero in digitos:  # para cada número da linha
            data.append(numero)  # add ao vetor de dados

    file.close()
    return data


# configurando a rede neural artificial e o dataSet de treinamento
network = buildNetwork(45, 1000, 1000, 1)  # define network
dataSet = SupervisedDataSet(45, 1)  # define dataSet
'''
arquivos = ['1.txt', '1a.txt', '1b.txt', '1c.txt',
            '1d.txt', '1e.txt', '1f.txt']
'''
arquivos = ['1.txt']
# a resposta do número
resposta = [[1]]
#resposta = [[1], [1], [1], [1], [1], [1], [1]]

i = 0
for arquivo in arquivos:  # para cada arquivo de treinamento
    data = getData(arquivo)  # pegue os dados do arquivo
    dataSet.addSample(data, resposta[i])  # add dados no dataSet
    i = i + 1
Exemple #35
0
# Our dataset is divided, with 80% as training data and 
# 20% as test data
tstdata_temp, trndata_temp = alldata.splitWithProportion(0.2)

tstdata = ClassificationDataSet(1600, 1, nb_classes=2)
for n in xrange(0, tstdata_temp.getLength()):
    tstdata.addSample( tstdata_temp.getSample(n)[0], tstdata_temp.getSample(n)[1] )

trndata = ClassificationDataSet(1600, 1, nb_classes=2)
for n in xrange(0, trndata_temp.getLength()):
    trndata.addSample( trndata_temp.getSample(n)[0], trndata_temp.getSample(n)[1] )

trndata._convertToOneOfMany( )
tstdata._convertToOneOfMany( )

fnn = buildNetwork( trndata.indim,120,trndata.outdim, outclass=SoftmaxLayer )
trainer = BackpropTrainer(fnn, dataset=trndata, momentum=0.1, verbose=True, weightdecay=0.00001)
modval = ModuleValidator()

# We define the number of iterations we want to train our model.
for i in range(100):
	trainer.trainEpochs(1)
	trnresult = percentError(trainer.testOnClassData(dataset=trndata),trndata['class'])
	print "epoch : " , trainer.totalepochs," train error: " , trnresult

# We validate our model by applying the n-folds technique and check the Mean Square Error
cv = CrossValidator( trainer, trndata, n_folds=5, valfunc=modval.MSE )
print "MSE %f at loop %i"%(cv.validate(),i)

# Finally we test our data on the model we built
perror = percentError(trainer.testOnClassData(dataset=tstdata),tstdata['class'])
def run(args):
    manual_validated_file = args.manual_validated_file # 'JHotDraw54b1_clones.xml.clones2'
    save_target_name = args.save_target_name #'newTrainedModel'



    print 'Training the Model. Please wait ...'

    manual_validation_data = pd.read_csv('manual_validator/input_clone_pairs/'+manual_validated_file)
    inputDim = 6
    alldata = ClassificationDataSet(inputDim, 1, nb_classes=2)
    txlHelper = TXLHelper()
    for i in range(0, len(manual_validation_data)):
        #print manual_validation_data.iloc[i][3], manual_validation_data.iloc[i][4]
        #print manual_validation_data.iloc[i][2]
        cloneFragment_1_path, cloneFragment_1_start, cloneFragment_1_end = manual_validation_data.iloc[i][3].split()[0], \
                                                                           manual_validation_data.iloc[i][3].split()[1], \
                                                                           manual_validation_data.iloc[i][3].split()[2]
        cloneFragment_2_path, cloneFragment_2_start, cloneFragment_2_end = manual_validation_data.iloc[i][4].split()[0], \
                                                                           manual_validation_data.iloc[i][4].split()[1], \
                                                                           manual_validation_data.iloc[i][4].split()[2]
        cloneFragment_1 = read_file_in_line_range(filePath='manual_validator/input_clone_pairs/'+cloneFragment_1_path, \
                                                  startLine=cloneFragment_1_start, endLine=cloneFragment_1_end)
        cloneFragment_2 = read_file_in_line_range(filePath='manual_validator/input_clone_pairs/' + cloneFragment_2_path,
                                                  startLine=cloneFragment_2_start, endLine=cloneFragment_2_end)


        type1sim_by_line, type2sim_by_line, type3sim_by_line = txlHelper.app_code_clone_similaritiesNormalizedByLine(cloneFragment_1,
                                                     cloneFragment_2, 'java')

        type1sim_by_token, type2sim_by_token, type3sim_by_token = txlHelper.app_code_clone_similaritiesNormalizedByToken(cloneFragment_1,
                                                     cloneFragment_2, 'java')

        label = manual_validation_data.iloc[i][2]
        if label == 'true':
            label = 1
        else:
            label = 0



        input = np.array([type1sim_by_token, type2sim_by_line, type3sim_by_line, type1sim_by_token, type2sim_by_token, type3sim_by_token])

        alldata.addSample(input, int(label))



    # # np.nan_to_num(alldata)
    # # alldata = alldata[~np.isnan(alldata)]
    # #alldata.fillna(0)
    # np.set_printoptions(precision=3)
    # print alldata

    #
    # def load_training_dataSet(fileName):
    #     data = pd.read_csv(fileName, sep=',', header=None)
    #     #data.columns = ["state", "outcome"]
    #     return data
    #
    # myclones_data = load_training_dataSet('Datasets/new_dataset_with_new_features.csv')
    # myclones_data = myclones_data.values
    #
    #
    # inputDim = 6
    #
    #
    # means = [(-1,0),(2,4),(3,1)]
    # cov = [diag([1,1]), diag([0.5,1.2]), diag([1.5,0.7])]
    # alldata = ClassificationDataSet(inputDim, 1, nb_classes=2)
    #
    #
    # #input = np.array([ myclones_data[n][16], myclones_data[n][17], myclones_data[n][18], myclones_data[n][15],myclones_data[n][11],myclones_data[n][12],   myclones_data[n][26], myclones_data[n][27]] )
    #
    # for n in xrange(len(myclones_data)):
    #     #for klass in range(3):
    #     input = np.array(
    #         [myclones_data[n][11], myclones_data[n][17], myclones_data[n][12], myclones_data[n][15], myclones_data[n][18],
    #          myclones_data[n][16]])
    #     #print (n, "-->", input)
    #     alldata.addSample(input, int(myclones_data[n][35]))
    #
    #
    tstdata, trndata = alldata.splitWithProportion( 0.25 )

    #print(tstdata)

    tstdata_new = ClassificationDataSet(inputDim, 1, nb_classes=2)
    for n in xrange(0, tstdata.getLength()):
        tstdata_new.addSample( tstdata.getSample(n)[0], tstdata.getSample(n)[1] )

    trndata_new = ClassificationDataSet(inputDim, 1, nb_classes=2)
    for n in xrange(0, trndata.getLength()):
        trndata_new.addSample( trndata.getSample(n)[0], trndata.getSample(n)[1])

    trndata = trndata_new
    tstdata = tstdata_new

    #print("Before --> ", trndata)

    trndata._convertToOneOfMany( )
    tstdata._convertToOneOfMany( )



    fnn = buildNetwork( trndata.indim, 107, trndata.outdim, outclass=SoftmaxLayer )
    trainer = BackpropTrainer( fnn, dataset=trndata, momentum=0.1,learningrate=0.05 , verbose=True, weightdecay=0.001)



    #print "Printing Non-Trained Network..."






    """
    ticks = arange(-3.,6.,0.2)
    X, Y = meshgrid(ticks, ticks)
    # need column vectors in dataset, not arrays
    griddata = ClassificationDataSet(7,1, nb_classes=2)
    for i in xrange(X.size):
        griddata.addSample([X.ravel()[i],Y.ravel()[i]], [0])
    griddata._convertToOneOfMany()  # this is still needed to make the fnn feel comfy
    
    """



    #trainer.trainEpochs(1)
    #trainer.testOnData(verbose=True)
    #print(np.array([fnn.activate(x) for x, _ in tstdata]))





    for i in range(1):
        trainer.trainEpochs(10)
        trnresult = percentError(trainer.testOnClassData(),
                                 trndata['class'])
        tstresult = percentError(trainer.testOnClassData(
            dataset=tstdata), tstdata['class'])




        #print "epoch: %4d" % trainer.totalepochs, \
        #    "  train error: %5.2f%%" % trnresult, \
         #   "  test error: %5.2f%%" % tstresult


    #print "Printing Trained Network..."
    #print fnn.params


    print "Saving the trined Model at : ", 'pybrain/'+save_target_name
    #saving the trained network...
    import pickle

    fileObject = open('pybrain/'+save_target_name, 'w')

    pickle.dump(fnn, fileObject)
    fileObject.close()

    #
    # fileObject = open('trainedNetwork79', 'r')
    # loaded_fnn = pickle.load(fileObject)
    #
    #
    # print "Printing the result prediction..."
    #
    # print loaded_fnn.activate([0.2,0.5,0.6,0.1,0.3,0.7])
    #
    # print fnn.activate([0.2,0.5,0.6,0.1,0.3,0.7])
    #


        #out = fnn.activateOnDataset(griddata)
        #out = out.argmax(axis=1)  # the highest output activation gives the class
        #out = out.reshape(X.shape)

    """
Exemple #37
0
    def predict_class(self, _x, _y, test_file, epochs, steps):
        print("Iniciando funcao predict_class() .............")

        traindata = self.ReadTrainFile(_x, _y)
        #testdata = self.ReadTestFile( test_file, len(_x[0]) )

        print(
            "____________________________________________________________________________"
        )
        print("A matrix de treino tem ", len(traindata), "linhas de dados")
        print("Dimensoes de Input e Output : ", traindata.indim,
              traindata.outdim)
        print(
            "____________________________________________________________________________\n"
        )

        print("convertendo arquivos .................")

        traindata._convertToOneOfMany()
        #testdata._convertToOneOfMany( )

        import os.path
        if os.path.exists('rede_animal.xml'):
            print(
                " Carregando a rede de treinos do arquivo rede_animal.xml *************** "
            )
            fnn = NetworkReader.readFrom('rede_animal.xml')
        else:
            print(
                " Criando rede de treinos no arquivo rede_animal.xml *************** "
            )
            fnn = buildNetwork(traindata.indim,
                               5,
                               traindata.outdim,
                               outclass=SoftmaxLayer)

        trainer = BackpropTrainer(fnn,
                                  dataset=traindata,
                                  momentum=0.1,
                                  verbose=True,
                                  weightdecay=0.01)

        print("Treinando .............")

        for i in range(epochs):
            print("Treinando epoca ", i)
            trainer.trainEpochs(steps)
            NetworkWriter.writeToFile(fnn, 'rede_animal.xml')
            print(" Rede salva em rede_animal.xml (Ok) ")

        print("Lendo arquivo de teste e classificando ..........")
        print("Gerando resultados em ANIMAL_OUTPUT.CSV ..........")
        output = open('animal_output.csv', 'wb')
        i = 1
        output.write("ID,Adoption,Died,Euthanasia,Return_to_owner,Transfer\n")
        for line in open(test_file, 'r'):
            x = ast.literal_eval(line)
            output.write("{},{},{},{},{},{} \n".format(i,
                                                       fnn.activate(x)[0],
                                                       fnn.activate(x)[1],
                                                       fnn.activate(x)[2],
                                                       fnn.activate(x)[3],
                                                       fnn.activate(x)[4]))
            i = i + 1
        print("Concluido")
from pybrain.tools.shortcuts import buildNetwork
from pybrain.datasets import SupervisedDataSet
from pybrain.supervised.trainers import BackpropTrainer

rede = buildNetwork(2, 3, 1)  # 2 neuronios na camada de entrada;
# 3 na camada oculta;
# 1 na camada de saída.

dados = SupervisedDataSet(2,
                          1)  # (quantidade de atributos, quantidade classes)
dados.addSample((0, 0), (0, ))
dados.addSample((0, 1), (1, ))
dados.addSample((1, 1), (0, ))
dados.addSample((1, 0), (1, ))

treinamento = BackpropTrainer(rede, dados, momentum=0.06)

for i in range(30000):
    erro = treinamento.train()
    if i % 1000 == 0:
        print("Erro:", erro)

print(rede.activate([0, 0]))  # predicao
print(rede.activate([0, 1]))  # predicao
print(rede.activate([1, 1]))  # predicao
    if scale_data == 1:
        scalizer = preprocessing.StandardScaler().fit(dataX)
        dataX = scalizer.transform(dataX)
    if normalize_data == 1:
        normalizer = preprocessing.Normalizer().fit(dataX)
        dataX = normalizer.transform(dataX)
    # / scalarization && normalization

    # training dataset construction
    for i in range(0, len(dataX)):
        ds.addSample(dataX[i], dataY[i])
    # / training dataset construction

    # nn && trainer construction
    net = buildNetwork(ds.indim, (ds.indim + ds.outdim) / 2,
                       ds.outdim,
                       bias=True,
                       outclass=SoftmaxLayer)  # building the n
    trainer = BackpropTrainer(net,
                              ds,
                              learningrate=0.3,
                              momentum=0,
                              verbose=False)  # building the trainer
    # / nn && trainer construction

    # training
    trainer.trainUntilConvergence(maxEpochs=maxEpk)  # Train, until convergence
    # for epoch in range(0,1000):
    #         trainer.train()
    # / training

    # cross validation
Exemple #40
0
from pybrain.optimization import FEM
from pybrain.rl.experiments import EpisodicExperiment

batch = 2  #number of samples per learning step
prnts = 100  #number of learning steps after results are printed
epis = int(4000 / batch / prnts)  #number of roleouts
numbExp = 10  #number of experiments
et = ExTools(batch, prnts)  #tool for printing and plotting

for runs in range(numbExp):
    # create environment
    env = CartPoleEnvironment()
    # create task
    task = BalanceTask(env, 200, desiredValue=None)
    # create controller network
    net = buildNetwork(4, 1, bias=False)
    # create agent with controller and learner (and its options)
    agent = OptimizationAgent(net, FEM(storeAllEvaluations=True))
    et.agent = agent
    # create the experiment
    experiment = EpisodicExperiment(task, agent)

    #Do the experiment
    for updates in range(epis):
        for i in range(prnts):
            experiment.doEpisodes(batch)
        #print "Epsilon   : ", agent.learner.sigma
        et.printResults((agent.learner._allEvaluations)[-50:-1], runs, updates)
    et.addExps()
et.showExps()
Exemple #41
0
    pl.xlim(np.min(entrada),1.2*np.max(entrada))
    pl.ylim(1.2*np.min(salida),1.2*np.max(salida))
    pl.grid()
    pl.legend(loc='lower right',ncol=2,fontsize=fsize)
    pl.title('Target range = [0,1]',fontsize=fsize)
      

epochs = 2500#numero de iteraciones de la red

size = MPI.COMM_WORLD.Get_size()
rank = MPI.COMM_WORLD.Get_rank()
name = MPI.Get_processor_name()
sys.stdout.write("Hello, World! I am process %d of %d on %s.\n"% (rank, size, name))
comm = MPI.COMM_WORLD
np.random.seed(0)
net = pybrain_tools.buildNetwork(1, 40, 1)#ajusta la red a i entradas,j capas ocultas,k salidas
net.randomize()#ajusta aleatoriamente los parametros de la red
print 'entrenando red standard'
data=init_sin_dataset()
trainer = pybrain_rprop.RPropMinusTrainer(net, dataset=data)#red, datos de entrenamiento
trainer.trainEpochs(epochs)#numero de iteraciones

entrada = data['input']
salida = data['target']
L = len(entrada)
out = np.zeros(L)
aux3 = np.zeros(L)
aux4 = np.zeros(L)
for c in range(L):
    out[c] = net.activate([entrada[c]])
#chart_original_output(data,net)#graficar dataset
Exemple #42
0
start_time=time.time()

#our data set has 14 input parameters and 50 classes
ds = SupervisedDataSet(14,50)
#tf.csv has been modifies such that the csv file has 64 values on each line separated by a comma. The first 14 values are input values and the rest 50 are outputs. 
tf=open('tf.csv','r')
for line in tf.readlines():
    data = [float(x) for x in line.strip().split(',') if x != '']
    indata =  tuple(data[:14])
    outdata = tuple(data[14:])
    ds.addSample(indata,outdata)
print ds.indim
print ds.outdim
# pybrain.tools.shortcuts.buildNetwork(*layers, **options)
#Build arbitrarily deep networks.
#layers should be a list or tuple of integers, that indicate how many neurons the layers should have.
#buiding the network sucg that is has 14 input neurons, 2 hidden layers of 14 neurons each and output layer of 50 neurons.
#change the hidden layer neurons to maximise the accuracy
n = buildNetwork(ds.indim,14,ds.outdim,recurrent=True)

#bpa
t = BackpropTrainer(n,learningrate=0.01,momentum=0.5,verbose=True)

t.trainOnDataset(ds,5)
t.testOnData(verbose=True)


end_time=time.time()

print "time taken is ",end_time-start_time," seconds"
Exemple #43
0

hiddenUnits = 10
loadNet = False
saveNet = False
saveName = "grasp.wgt"
numbExp = 1  #number of experiments
for runs in range(numbExp):
    # create environment
    #Options: XML-Model, Bool(OpenGL), Bool(Realtime simu. while client is connected), ServerIP(default:localhost), Port(default:21560)
    env = CCRLEnvironment()
    # create task
    task = CCRLGlasVarTask(env)
    # create controller network
    net = buildNetwork(len(task.getObservation()),
                       hiddenUnits,
                       env.actLen,
                       outclass=TanhLayer)
    # create agent with controller and learner
    agent = FiniteDifferenceAgent(net, SPLA())
    # learning options
    agent.learner.gd.alpha = 0.2  #step size of \mu adaption
    agent.learner.gdSig.alpha = 0.085  #step size of \sigma adaption
    agent.learner.gd.momentum = 0.0

    #Loading weights
    if loadNet:
        agent.learner.original = loadWeights("grasp.wgt")
        agent.learner.gd.init(agent.learner.original)
        agent.learner.epsilon = 0.2
        agent.learner.initSigmas()
# coding=utf-8
from pybrain.optimization.hillclimber import HillClimber
from pybrain.rl.agents.optimization import OptimizationAgent

from pybrain.rl.environments.cartpole.balancetask import BalanceTask
from pybrain.rl.experiments.episodic import EpisodicExperiment
from pybrain.tools.shortcuts import buildNetwork


task = BalanceTask()

net = buildNetwork(task.outdim, 3, task.indim)

agent = OptimizationAgent(net, HillClimber())
exp = EpisodicExperiment(task, agent)
exp.doEpisodes(100)

print(exp)
Exemple #45
0
def buildNet_deep_mnist(n_input, n_output):
    return buildNetwork(n_input, 40, 30, 20, n_output, hiddenclass=TanhLayer, outclass=SoftmaxLayer, bias=True)
Exemple #46
0
for k in xrange(0, test_t.getLength()):
    test.addSample(test_t.getSample(k)[0], test_t.getSample(k)[1])

for k in xrange(0, training_t.getLength()):
    training.addSample(training_t.getSample(k)[0], training_t.getSample(k)[1])

print(training.getLength())
print(test.getLength())

print(test.indim)
print(test.outdim)
print(training.indim)
print(training.outdim)

fnn = buildNetwork(training.indim, 64, training.outdim, outclass=SoftmaxLayer)
trainer = BackpropTrainer(fnn,
                          dataset=training,
                          momentum=0.1,
                          learningrate=0.01,
                          verbose=True,
                          weightdecay=0.01)
trainer.trainEpochs(10)

print(percentError(trainer.testOnClassData(), training['class']))
print(percentError(trainer.testOnClassData(dataset=test), test['class']))

plt.imshow(digits.images[0], cmap=plt.cm.gray_r, interpolation='nearest')
plt.show()

for i in range(0, 10):
Exemple #47
0
def buildNet_perceptron(n_input, n_output):
    return buildNetwork(n_input, n_output, hiddenclass=TanhLayer, outclass=SoftmaxLayer, bias=True)
# Esse projeto tem como dependências códigos de terceiros com livre distribuição:
#
# python-numpy - Para integração com algebra linear
# python-scipy - integração e otimização numérica
# python-matplotlib - Plotagens
# ipython - parallel and distributed computing (SPMD/MPMD)**
# python-pandas - Analise e modelagem de dados
# python-sympy - algebra**
# pybrain - Rotinas para criações de redes neorais artificiais e treinamentos

# Criando rede neoral
from pybrain.tools.shortcuts import buildNetwork
net = buildNetwork(2, 3, 1, bias=True)

# Construindo tipo de entrada
from pybrain.datasets import SupervisedDataSet
ds = SupervisedDataSet(2, 1)

#       Imputando valores para serem reconhecidos (XOR)
ds.addSample((0, 0), (0, ))
ds.addSample((0, 1), (1, ))
ds.addSample((1, 0), (1, ))
ds.addSample((1, 1), (0, ))

# Treinando
from pybrain.supervised.trainers import BackpropTrainer
trainer = BackpropTrainer(net, ds)

#Iniciando
trainer.train()
Exemple #49
0

#********************NN With GA***************************
def fitFunction(net,
                dataset=training_data,
                targetClass=training_data['class']):
    error = percentError(testOnClassData_custom(net, dataset=training_data),
                         targetClass)
    return error


stepSize = [.05, .5, 1]
for s in stepSize:
    fnn_ga = buildNetwork(training_data.indim,
                          2,
                          training_data.outdim,
                          bias=True,
                          outclass=SoftmaxLayer)

    domain = [(-1, 1)] * len(fnn_ga.params)
    #print domain
    epochs = 20
    epoch_v = []
    trnerr_ga = []
    tsterr_ga = []
    iteration = 5
    for i in xrange(epochs):
        winner = geneticoptimize(iteration,
                                 domain,
                                 fnn_ga,
                                 fitFunction,
from pybrain.tools.shortcuts import buildNetwork
import rospy
net = buildNetwork(2, 3, 1)
print(net)
Exemple #51
0
    if y[i] != 0:
        train_data.addSample(X_new[i,], [target[i]])

for i in xrange(N_train+1, N_test_end):
    if y[i] != 0:
        test_data.addSample(X_new[i,], [target[i]])

for i in xrange(X_new.shape[0]):
    all_data.addSample(X_new[i,], [target[i]])

train_data._convertToOneOfMany()
test_data._convertToOneOfMany()
all_data._convertToOneOfMany()

print("building")
fnn = buildNetwork( train_data.indim, 5, train_data.outdim, fast=True,
                    outclass = SoftmaxLayer)
trainer = BackpropTrainer( fnn, dataset=train_data, momentum=0.2, verbose=True, learningrate=0.05, lrdecay=1.0)
# trainer = RPropMinusTrainer( fnn, dataset=train_data, momentum=0.1, verbose=True, learningrate=0.01, lrdecay=1.0)

# trainer.trainUntilConvergence()

best = fnn.copy()
best_test = 1

for i in range(5):
    print("training")
    trainer.trainEpochs(1)

    print("testing")
    trnresult = trainer.testOnData()
    tstresult = trainer.testOnData( dataset=test_data )
    rules[49] = 6
    rules[50] = '3'
    rules[51] = '3'
    rules[52] = '5'
    rules[53] = '2'
    rules[54] = 'killSprite'
    rules[55] = 'killSprite'
    rules[56] = 'killBoth'
    rules[57] = 0.8
    setRule(rules)
    #certainGame


#start
certainInitial()
net = buildNetwork(336, 10, 8, hiddenclass=SigmoidLayer)

from pybrain.optimization import SNES
from pybrain.optimization import OriginalNES
from pybrain.optimization import GA
from numpy import ndarray
best = 0
print "SNES starting......"
algo = SNES(lambda x: evaluate(x), net, verbose=True)
#algo = GA(lambda x: evaluate(x), net, verbose=True)
#algo = OriginalNES(lambda x: evaluate(x), net, verbose=True, desiredEvaluation=0.85)
episodesPerStep = 10
for i in range(99999):
    algo.learn(episodesPerStep)
    print net.params
    if isinstance(algo.bestEvaluable, ndarray):
Exemple #53
0
def main():
    # the existing lyrics you've filled the text document with
    if word_by_word == 0:
        lyrics = open(filename).read().split("\n")
    elif word_by_word == 1:
        lyrics = markov(filename).split("\n")

    # the (now empty) song the neural network is going to write
    song = []

    # all of the possible rhymes based on the contents of the stuff you fed it
    if training == 1:
        all_possible_rhymes = rhymeindex(lyrics)
    elif training == 0:
        all_possible_rhymes  = opennetwork(epoch)[1]
        rhymes_in_lyrics = rhymeindex(lyrics)
        for rhyme in rhymes_in_lyrics:
            if rhyme not in all_possible_rhymes:
                all_possible_rhymes.append(rhyme)
        print all_possible_rhymes

    if training == 1:
        net = buildNetwork(4, 8, 8, 8, 8, 8, 8, 8, 8, 4, recurrent=True, hiddenclass=TanhLayer)
        t = BackpropTrainer(net, learningrate=0.05, momentum=0.5, verbose=True)

    # This loads a neural network that has already been trained on an actual rap song - so it knows how the rhymes and syllables should fit together
    if training == 0:
        rapdict = []
        rhyme_list_generator(lyrics, rapdict, all_possible_rhymes)
        net = opennetwork(epoch)[0]
    t = BackpropTrainer(net, learningrate=0.01, momentum=0.5, verbose=True)



    # debug stuff...
    print "\n\nAlright, here are all of the possible rhymes from the lyrics it can draw from."
    print all_possible_rhymes

    if training == 1:
        # rapdict is just the list containing smaller lists as follows;
        # [the text of the line, the number of syllables in the line, the number of the rhyme scheme of the line]

        rapdict = []
        rhyme_list_generator(lyrics, rapdict, all_possible_rhymes)
        print "\n\nAlright, here's the information it will be working with - in the form of lyric, syllables, and rhyming scheme"
        print rapdict

        # makes a dataset
        ds = SupervisedDataSet(4,4)
        # the dataset is in the form of the amount of syllables and rhyme scheme of TWO lines that are next to each other in the song.


        for i in rapdict[:-3]:
            if i != "" and rapdict[rapdict.index(i) + 1] != "" and rapdict[rapdict.index(i) + 2] != "" and rapdict[rapdict.index(i) + 3] != "":
                # twobars is just a list containing the aspects of two lines in a row
                twobars = [i[1], i[2], rapdict[rapdict.index(i) + 1][1], rapdict[rapdict.index(i) + 1][2], rapdict[rapdict.index(i) + 2][1], rapdict[rapdict.index(i) + 2][2], rapdict[rapdict.index(i) + 3][1], rapdict[rapdict.index(i) + 3][2]]

                # twobars gets formatted into floating point values between 0 and 1 so it can be entered into the dataset
                ds.addSample((twobars[0] / float(20), int(twobars[1]) / float(len(all_possible_rhymes)), twobars[2] / float(20), int(twobars[3]) / float(len(all_possible_rhymes))), (twobars[4] / float(20), int(twobars[5]) / float(len(all_possible_rhymes)), twobars[6] / float(20), int(twobars[7]) / float(len(all_possible_rhymes))))

        # printing the dataset
        print "\n\nAlright, here is the dataset."
        print ds


    #just to make sure it doesn't keep using the same lyric over and over
    lyricsused = []

    trainingcount = 0

    # The part that actually writes a rap.
    final_song = open("neural_rap.txt", "r+")

    # The number 3 at the end of this line can be tweaked- it's just so things don't get too repetitive/drawn out.
    # for example; if i had 30 lines to draw from, I wouldn't want to try and rearrange them into a song with 30 lines.
    # it would be much better if i only tried to take 10 rhyming lines and make a song with those.
    if training == 0:
        while len(song) < len(lyrics) / 3 and len(song) < 50:
            verse = writearap([(random.choice(range(1,20))) / 20.0 , (random.choice(range(1, len(all_possible_rhymes)))) / float(len(all_possible_rhymes)), (random.choice(range(1, 20))) / 20.0, (random.choice(range(1, len(all_possible_rhymes)))) / float(len(all_possible_rhymes))], net, rapdict, all_possible_rhymes, lyricsused, song)
            if len(verse) > 3: # this number can be adjusted - usually the short verses it generates are low quality.
                for line in lyricsused:
                    final_song.write(str(line) + "\n")

                    # actually write the line to the song
                    song.append(line)
                final_song.write("\n...\n")
                print "Just wrote a verse to the file... - " + str(lyricsused)
                lyricsused = []

    if training == 1:
        while True:
            epochs_per_iteration = 100
            trainingcount += epochs_per_iteration
            t.trainOnDataset(ds, epochs_per_iteration)
            print "just wrote " + str(trainingcount) + "/" + "..."
            savenetwork(net, all_possible_rhymes, trainingcount)
Exemple #54
0
iris = datasets.load_iris()
x, y = iris.data, iris.target

dataset = ClassificationDataSet(4, 1, nb_classes=3)

# add amostra
for i in range(len(x)):
    dataset.addSample(x[i], y[i])

# dados para treinamento
train_data, part_data = dataset.splitWithProportion(0.6)
print("Quantidade para treino: %d" % len(train_data))

# teste e validação
test_data, val_data = part_data.splitWithProportion(0.5)
print("Quantidade para teste: %d" % len(test_data))
print("Quantidade para validação: %d" % len(val_data))

net = buildNetwork(dataset.indim, 3, dataset.outdim)
trainer = BackpropTrainer(net,
                          dataset=train_data,
                          learningrate=0.01,
                          momentum=0.1,
                          verbose=True)

train_errors, val_errors = trainer.trainUntilConvergence(dataset=train_data,
                                                         maxEpochs=1000)

plt.plot(train_errors, 'b', val_errors, 'r')
plt.show()
Exemple #55
0
from pybrain.tools.shortcuts import buildNetwork
from pybrain.datasets import SupervisedDataSet
from pybrain.supervised.trainers import BackpropTrainer
from pybrain.structure.modules import SoftmaxLayer
from pybrain.structure.modules import SigmoidLayer
'''rede = buildNetwork(2, 3, 1, outclass = SoftmaxLayer,
                    hiddenclass = SigmoidLayer, bias = False)
print(rede['in'])
print(rede['hidden0'])
print(rede['out'])
print(rede['bias'])'''

rede = buildNetwork(2, 3, 1)
base = SupervisedDataSet(2, 1)
base.addSample((0, 0), (0, ))
base.addSample((0, 1), (1, ))
base.addSample((1, 0), (1, ))
base.addSample((1, 1), (0, ))
#print(base['input'])
#print(base['target'])

treinamento = BackpropTrainer(rede,
                              dataset=base,
                              learningrate=0.01,
                              momentum=0.06)

for i in range(1, 30000):
    erro = treinamento.train()
    if i % 1000 == 0:
        print("Erro: %s" % erro)
Exemple #56
0
def main():
    #prefix     = '/media/wtluo/8A6C28646C284CEF/w2/'
    #pointing   = 'w2m0m0/step1/'
    prefix = '../'
    pointing = 'step1/'
    exposure = '831549'
    chipid = '_01'
    allgalfits = prefix + pointing + 'gal_' + exposure + chipid + '.fits'
    allstarfits = prefix + pointing + 'star_' + exposure + chipid + '.fits'
    galcat = prefix + pointing + 'gal_info' + exposure + chipid + '.dat'
    starcat = prefix + pointing + 'star_info' + exposure + chipid + '.dat'

    #hdr     = pf.open(allgalfits)
    sky = 2653.10250279776
    galim = pf.getdata(allgalfits)
    starim = pf.getdata(allstarfits)
    galdat = np.loadtxt(galcat, unpack=True, skiprows=1)
    stardat = np.loadtxt(starcat, unpack=True, skiprows=1)

    ra_gal = galdat[0][:]
    dec_gal = galdat[1][:]
    e1_lf = galdat[4][:]
    e2_lf = galdat[5][:]
    w_lf = galdat[6][:]
    m = galdat[10][:]
    c = galdat[11][:]
    xgal = galdat[17][:]
    ygal = galdat[18][:]
    snrgal = galdat[19][:]
    ngal = np.size(ra_gal)

    xstar = stardat[1][:]
    ystar = stardat[2][:]
    ra_st = stardat[3][:]
    dec_st = stardat[4][:]
    snrst = stardat[5][:]
    nstar = np.size(ra_st)

    xmax_gal = np.size(galim[:][0])
    ymax_gal = np.size(galim[0][:])
    xmax_str = np.size(starim[:][0])
    ymax_str = np.size(starim[0][:])
    #print xmax_str/48.,ymax_str/48.
    #---test image extraction---

    nx = int(xmax_str / 48.0)
    ny = int(ymax_str / 48.0)

    imstar_3d = np.zeros([nstar, 48 * 48])
    imstars = np.zeros([nstar, 48 * 48])
    ellipstar = np.zeros([nstar, 2])
    Rstar = np.zeros(nstar)

    for i in range(nx):
        for j in range(ny):
            i_min = i * 48
            i_max = (i + 1) * 48
            j_min = j * 48
            j_max = (j + 1) * 48
            slc_i = slice(i_min, i_max)
            slc_j = slice(j_min, j_max)
            #galsub= galim[slc_i,slc_j]
            if j + i * nx < nstar:
                starsub = starim[slc_i, slc_j]
                mnstr = moments(starsub)
                cen_star = mnstr['centroid']
                mxx, mxy, myy = mnstr['moments']
                ellipstar[j + i * nx][0] = (mxx - myy) / (mxx + myy)
                ellipstar[j + i * nx][1] = (2. * mxy) / (mxx + myy)
                Rstar[j + i * nx] = mxx + myy
                cen_ini = np.array([24, 24])
                offset       = np.array([int(round(cen_star[0]-cen_ini[0])),\
                                         int(round(cen_star[1]-cen_ini[1]))])
                shift_im = shiftcen(starsub, offset)
                #imstar_3d[j+i*nx][:]    = shift_im.reshape(48*48)/np.sum(starsub)
                imstar_3d[j + i * nx][:] = starsub.reshape(
                    48 * 48) / np.sum(starsub)
                imstars[j + i * nx][:] = starsub.reshape(48 * 48)

    pcstr = pcaimages(imstar_3d)
    coeffs_ini = [1., 1., 1., 1.]

    ind = np.random.randint(low=0, high=100, size=100)
    idx = np.unique(ind)
    ntrain = len(idx)
    comatrix = np.zeros((ntrain, 4))
    trainsample = np.zeros((ntrain, 4))

    for ix in range(len(idx)):
        i = idx[ix]
        chi2 = lambda *args: -lnlike(*args)
        constraints= opt.minimize(chi2,coeffs_ini,\
                  args=(imstars[i][:]/np.sum(imstars[i][:]),pcstr))
        comatrix[ix][:] = constraints["x"]
        trainsample[ix][0] = xstar[i] - np.mean(xstar)
        trainsample[ix][1] = ystar[i] - np.mean(ystar)
        trainsample[ix][2] = ellipstar[i][0]
        trainsample[ix][3] = ellipstar[i][1]

    from mpl_toolkits.mplot3d import Axes3D
    """fig = plt.figure()
   ax = fig.add_subplot(111, projection='3d')
   ax.scatter(xstar[idx]-np.mean(xstar),ystar[idx]-np.mean(ystar),comatrix[:,3], c='r', marker='o')

   ax.set_xlabel('xccd-center')
   ax.set_ylabel('yccd-center')
   ax.set_zlabel('Coeff_4')
   plt.show()"""
    # ANN learning as interpolation----------------------
    from pybrain.tools.shortcuts import buildNetwork
    from pybrain.structure import SigmoidLayer, LinearLayer
    from pybrain.datasets import SupervisedDataSet
    from pybrain.supervised.trainers import BackpropTrainer

    net = buildNetwork(2,3,3,4,bias=True,hiddenclass=SigmoidLayer,\
          outclass=LinearLayer)
    ds = SupervisedDataSet(2, 4)

    for iy in range(len(idx)):
        i = idx[iy]
        ds.addSample((xstar[i]-np.mean(xstar),ystar[i]-np.mean(ystar)),\
               (comatrix[iy][0],comatrix[iy][1],comatrix[iy][2],comatrix[iy][3]))

    trainer = BackpropTrainer(net, ds)
    trainer.trainUntilConvergence()

    nout = nstar - len(idx)
    pos = np.zeros([nout, 2])
    for j in range(nout):
        if j != idx[j]:
            pos[j][0] = xstar[j] - np.mean(xstar)
            pos[j][1] = ystar[j] - np.mean(ystar)
            outmatrix = net.activate(
                [xstar[j] - np.mean(xstar), ystar[j] - np.mean(ystar)])
            psfim = psfmodel(outmatrix, pcstr)
            #print psfim.shape
            mnpsf = moments(psfim.reshape(48, 48))
            mxx, mxy, myy = mnpsf['moments']
            Rpsf = mxx + myy
            e1psf = (mxx - myy) / (mxx + myy)
            e2psf = (2. * mxy) / (mxx + myy)
            print Rstar[j],ellipstar[j][0],ellipstar[j][1],\
                  Rpsf,e1psf,e2psf
    """fig = plt.figure()
Exemple #57
0
    trndata.appendLinked(
        trndata_temp.getSample(n)[0],
        trndata_temp.getSample(n)[1])

trndata._convertToOneOfMany()
tstdata._convertToOneOfMany()
'''
implementation of BP network
'''
from pybrain.tools.shortcuts import buildNetwork  # for building network raw model
from pybrain.structure import SoftmaxLayer  # for output layer activation function
from pybrain.supervised.trainers import BackpropTrainer  # for model trainer

# network structure
n_h = 5  # hidden layer nodes number
net = buildNetwork(19, n_h, 2, outclass=SoftmaxLayer)

# 1.1 model training, using standard BP algorithm
trainer = BackpropTrainer(net, trndata)
trainer.trainEpochs(1)  # training for once

# 1.2 model training, using accumulative BP algorithm
# trainer = BackpropTrainer(net, trndata, batchlearning=True)
# trainer.trainEpochs(50)
# err_train, err_valid = trainer.trainUntilConvergence(maxEpochs=50)

# convergence curve for accumulative BP algorithm process
# import matplotlib.pyplot as plt
# plt.plot(err_train,'b',err_valid,'r')
# plt.title('BP network classification')
# plt.ylabel('accuracy')
Exemple #58
0
We now build an equivalent network to the one before, but with a more concise syntax:
"""
n2 = RecurrentNetwork(name='net2')
n2.addInputModule(LinearLayer(2, name='in'))
n2.addModule(SigmoidLayer(3, name='h'))
n2.addOutputModule(LinearLayer(1, name='out'))
n2.addConnection(FullConnection(n2['in'], n2['h'], name='c1'))
n2.addConnection(FullConnection(n2['h'], n2['out'], name='c2'))
n2.sortModules()
""" Printouts look more concise and readable: """
print n2
""" There is an even quicker way to build networks though, as long as their structure is nothing 
more fancy than a stack of fully connected layers: """

n3 = buildNetwork(2, 3, 1, bias=False)
""" Recurrent networks are working in the same way, except that the recurrent connections
need to be explicitly declared upon construction. 

We can modify our existing network 'net2' and add a recurrent connection on the hidden layer: """

n2.addRecurrentConnection(FullConnection(n2['h'], n2['h'], name='rec'))
""" After every structural modification, if we want ot use the network, we call 'sortModules()' again"""

n2.sortModules()
print n2
""" As the network is now recurrent, successive activations produce different outputs: """

print n2.activate([1, 2]),
print n2.activate([1, 2]),
print n2.activate([1, 2])
Exemple #59
0
 def build_network(self):
     self.neural_network = buildNetwork(
         self.train.indim, 7, self.train.outdim,
         outclass=SoftmaxLayer)  # feed forward network
def net_class(ustraining_set,
              train_set_labels,
              usvalidation_set=None,
              validation_set_labels=None,
              pre=True):
    # print (validation_set_labels - 1)
    if pre:
        # ltraining_set = gabor_filter(ustraining_set)
        ltraining_set = standard_data(ustraining_set)
    else:
        ltraining_set = ustraining_set
    if not usvalidation_set == None:
        if pre:
            # lvalidation_set = gabor_filter(usvalidation_set)
            lvalidation_set = standard_data(usvalidation_set)
        else:
            lvalidation_set = usvalidation_set
        vds = ClassificationDataSet(1024, 7, nb_classes=7)
        for vd, vt in zip(lvalidation_set, validation_set_labels):
            vtarr = [int(i == vt - 1) for i in range(0, 7)]
            vds.addSample(vd, vtarr)
    # net = buildNetwork(1024, 100, 8,outclass=SoftmaxLayer)

    ds = ClassificationDataSet(1024, 7, nb_classes=7)
    for d, t in zip(ltraining_set, train_set_labels):
        tarr = [int(i == t - 1) for i in range(0, 7)]
        ds.addSample(d, tarr)

    tot_min_err = 100.0
    best_l = 0.0
    best_w = 0.0
    obest_e = 0
    for l in [0.005]:
        for w in [0.01]:
            net = buildNetwork(1024,
                               320,
                               7,
                               outclass=SoftmaxLayer,
                               hiddenclass=SigmoidLayer)
            net.sortModules()
            trainer = BackpropTrainer(net,
                                      ds,
                                      learningrate=l,
                                      momentum=0,
                                      weightdecay=w,
                                      batchlearning=False,
                                      verbose=True)
            cmin_err = 100.0
            flag = True
            best_e = 0
            e = 0

            flag = False
            trnresult = 100.0
            tstresult = 100.0
            for i in range(10):
                e += 1
                trainer.trainEpochs(1)

                trnresult = percentError(trainer.testOnClassData(),
                                         train_set_labels - 1)

                if not usvalidation_set == None:
                    tstresult = percentError(
                        trainer.testOnClassData(dataset=vds),
                        validation_set_labels - 1)
                    if cmin_err >= tstresult:
                        cmin_err = tstresult
                        print "copt err ", tstresult
                        best_e = e
                        flag = True
                    if tot_min_err > cmin_err:

                        tot_min_err = cmin_err

                        best_l = l
                        best_w = w
                        obest_e = best_e
                        print "new opt err:{}, for LR: {}, WD:{}, NE:{} ".format(
                            tot_min_err, best_l, best_w, obest_e)
    net.sorted = False
    net.sortModules()
    res_f = open('net.dump', 'w')
    pickle.dump(net, res_f)
    res_f.close()
    return net