Esempio n. 1
0
 def calc_error(self, next_layer):
     
     # print "Calculating the error at layer", self.index 
     
     connected_nodes = next_layer.neurons # nodes of next layer
     weighted_sum = 0
     
     for i in range(len(self.neurons)): # index of each node in the current layer
         current_node = self.neurons[i] # references the current node of the current layer
         for node in connected_nodes: # nodes of next layer (reference the weight value of each connection)
             weighted_sum += node.weights[i] * node.out_error # weights[i] references the 'i'th node of the connecting layer
         if current_node.function == activation.getFunc("sigmoid"): 
             current_node.out_error = (weighted_sum) * current_node.node_out * (1 - current_node.node_out) # sigmoid
         else:
             current_node.out_error = (weighted_sum) * (1 - current_node.node_out**2) # tanh
Esempio n. 2
0
 def calc_error(self, expected_output):
     
     # print "Calculating error at the Output layer"
     
     network_error = 0
     
     for i in range(len(self.neurons)):  
         
         current_node = self.neurons[i]
         current_exp = expected_output[i] # expected output for current node
         current_act = self.act_out[i] # the actual output for the current node
         
         if current_node.function == activation.getFunc("sigmoid"): 
             current_node.out_error = (current_exp - current_act) * current_act * (1 - current_act)
         else:
             current_node.out_error = (current_exp - current_act) * (1 - current_act**2)  
             
         network_error += self.neurons[i].out_error**2 / len(self.neurons)
         
     return network_error
Esempio n. 3
0
    def calc_error(self, next_layer):

        # print "Calculating the error at layer", self.index

        connected_nodes = next_layer.neurons  # nodes of next layer
        weighted_sum = 0

        for i in range(len(
                self.neurons)):  # index of each node in the current layer
            current_node = self.neurons[
                i]  # references the current node of the current layer
            for node in connected_nodes:  # nodes of next layer (reference the weight value of each connection)
                weighted_sum += node.weights[
                    i] * node.out_error  # weights[i] references the 'i'th node of the connecting layer
            if current_node.function == activation.getFunc("sigmoid"):
                current_node.out_error = (
                    weighted_sum) * current_node.node_out * (
                        1 - current_node.node_out)  # sigmoid
            else:
                current_node.out_error = (weighted_sum) * (
                    1 - current_node.node_out**2)  # tanh
Esempio n. 4
0
    def calc_error(self, expected_output):

        # print "Calculating error at the Output layer"

        network_error = 0

        for i in range(len(self.neurons)):

            current_node = self.neurons[i]
            current_exp = expected_output[
                i]  # expected output for current node
            current_act = self.act_out[
                i]  # the actual output for the current node

            if current_node.function == activation.getFunc("sigmoid"):
                current_node.out_error = (current_exp - current_act
                                          ) * current_act * (1 - current_act)
            else:
                current_node.out_error = (current_exp -
                                          current_act) * (1 - current_act**2)

            network_error += self.neurons[i].out_error**2 / len(self.neurons)

        return network_error
Esempio n. 5
0
 def assignActivation(self, func_name):
     
     return activation.getFunc(func_name)
Esempio n. 6
0
    def assignActivation(self, func_name):

        return activation.getFunc(func_name)