Beispiel #1
0
 def activate_hebbian(self):
     print "[+] Activating hebbian learning algorithm for the neural network..."
     # Let's start the magic!
     # 0.- The weights have to be initialized
     utils = NeuralUtils(self)   # I should send myself in order to NeuralUtils to have all my data.
     # With unsupervised learning the phase must always be 'train'
     if self.__phase != "train":
         print "[-] With unsupervised learning the phase must always be 'train'."
         print "[-] The weights are set as in 'train'. Proceeding..."
     utils.init_weights(self.RANDOM)
     
     # Let,s iterate through the dataset
     for _ in range(self.__iterations):
         for instance_number in range(len(self.__dataset)):
             # 1.- The ouput have to be computed. V = sum(w*input)
             output = utils.hebbian_calc_output(instance_number)
             if not output:
                 print "[-] Output for instance %s could not be computed. [NeuralNetwork]" % str(instance_number)
                 break
             # 2.- The weights have to be updated.
             utils.hebbian_update_weights()
     
     # Once finished we should write the weights given that in unsupervised learning,
     # the weights give us the Principal Component of the dataset.
     utils.write_weights_to_file("", "unsupervised")
Beispiel #2
0
 def activate_backpropagation(self):
     print "[+] Activating backpropagation algorithm for the neural network..."
     
     # This list will be used to draw the graphic of the average of the output error at the end of the iterations
     finalErrorLists = []
     oneIterationErrorList = []
     
     # Let's start the magic!
     # 0.- The weights have to be initialized
     utils = NeuralUtils(self)   # I should send myself in order to NeuralUtils to have all my data.
     # If the phase is test
     # we have to set the weights of the network to the ones specified in the weights file
     # if the phase is train
     # we have to set the weights of the network to random
     if self.__phase == "test":
         utils.init_weights(self.PRODUCTION)
     elif self.__phase == "train":
         utils.init_weights(self.RANDOM)
     elif self.__phase == "debug":
         utils.init_weights(self.DEBUG)
     
     if self.__phase == "test":
         self.__iterations = 1
     for it in range(self.__iterations):                 
         oneIterationErrorList = []
         for instance_number in range(len(self.__dataset)):
             # 1.- Compute output of neurons. We have to know with which instance we are working on. (To know the inputs and outputs)
             output = utils.backp_feed_forward(instance_number) # output will always be a list
             if not output:
                 print "[-] Feed forward algorithm failed for instance %s. [NeuralNetwork]" % str(instance_number)
                 break
             if self.__phase == "test":
                 # The output should be stored in a file
                 utils.write_output(output, "supervised")
             elif self.__phase == "train" or self.__phase == "debug":
                 # 2.- Compute the error
                 error_list = utils.calc_error(output, instance_number)
                 # 2.2.- We set the deltas (errors) of the output layer
                 utils.set_output_deltas(error_list)
                 # For each error (output nodes), it has to be back propagated and the weights updated
                 for error in error_list:
                     # 3.- Back propagate the error through the neural network
                     utils.backp_back_propagate(error)
                     # 4.- Update weights
                     utils.backp_update_weights()
                 # Print the average of the errors of each instance.
                 # This graphic show the error of each instance.
                 # If there is more than one output, the average error of each output is done
                 if self.__enabled_graphics: # Done for each instance
                     avError = float(sum(error_list)/len(error_list)) # average
                     oneIterationErrorList.append(avError) # error of each instance
         # --> All instances processed <-- #
         
         # Done for each iteration
         if self.__phase == "train" or self.__phase == "debug":
             print "[+] Average error in iteration " + str(it) +":"
             print sum(oneIterationErrorList)/len(oneIterationErrorList)
             # We compute the average of the errors of all instances and add the point to the mean_grapher
             if self.__enabled_graphics:
                 # Each point is the average error of all instances in that iteration
                 self.__mean_grapher.add(sum(oneIterationErrorList)/len(oneIterationErrorList))
                 # In finalErrorLists we have all the error list of each iteration
                 finalErrorLists.append(oneIterationErrorList)
         # --> End of Iteration <-- #
         
     # --> End of all Iterations <-- #
     if self.__phase == "train" or self.__phase == "debug":
         # Write the final weights to a file
         utils.write_weights_to_file(str(it), "supervised")
         # Compute and save graphics 
         if self.__enabled_graphics:
             utils.compute_and_save_graphics(finalErrorLists)