def propagate_backward(nodes):
   for i, node in enumerate(nodes):
     # node is an output node
     if not node.forward_neighbors:
       node.error = target.values[i] - node.transformed_value
     else:
       # only works if we process in topological order, which we assume
       node.error = sum(map(
         lambda (weight, child): weight.value * child.delta,
         zip(node.forward_weights, node.forward_neighbors)
       ))
     node.delta = node.error * NeuralNetwork.SigmoidPrime(node.raw_value)
예제 #2
0
def Backprop(network, input, target, learning_rate):
  """
  Arguments:
  ---------
  network       : a NeuralNetwork instance
  input         : an Input instance
  target        : a target instance
  learning_rate : the learning rate (a float)

  Returns:
  -------
  Nothing

  Description:
  -----------
  The function first propagates the inputs through the network
  using the Feedforward function, then backtracks and update the
  weights.

  Notes:
  ------
  The remarks made for *FeedForward* hold here too.

  The *target* argument is an instance of the class *Target* and
  has one attribute, *values*, which has the same length as the
  number of output nodes in the network.

  i.e: len(target.values) == len(network.outputs)

  In the distributed output encoding scenario, the target.values
  list has 10 elements.

  When computing the error of the output node, you should consider
  that for each output node, the target (that is, the true output)
  is target[i], and the predicted output is network.outputs[i].transformed_value.
  In particular, the error should be a function of:

  target[i] - network.outputs[i].transformed_value
  
  """
  network.CheckComplete()

  # 1) We first propagate the input through the network
  FeedForward(network, input)

  # 2) Then we compute the errors starting with the last layer
  delta = {}
  for node in network.node_set:
    delta[node] = 0

  for m in range(0,len(network.outputs)):
    e_m = target[m] - network.outputs[m].transformed_value
    delta[network.outputs[m]] = NeuralNetwork.SigmoidPrime(network.outputs[m].raw_value)*e_m  

  # 3a) We now propagate the errors to the hidden layer

  for m in range(1,len(network.hidden_nodes)+1):
    e_m = 0 
    for j in range(0,len(network.hidden_nodes[-m].forward_neighbors)):
      e_m +=  network.hidden_nodes[-m].forward_weights[j].value*delta[network.hidden_nodes[-m].forward_neighbors[j]]
    delta[network.hidden_nodes[-m]] = NeuralNetwork.SigmoidPrime(network.hidden_nodes[-m].raw_value)*e_m

  # 3b) Propagate errors to the input layer's edges to the first hidden layer

  for m in range(1,len(network.inputs)+1):
    e_m = 0 
    for j in range(0,len(network.inputs[-m].forward_neighbors)):
      e_m +=  network.inputs[-m].forward_weights[j].value*delta[network.inputs[-m].forward_neighbors[j]]
    delta[network.inputs[-m]] = NeuralNetwork.SigmoidPrime(network.inputs[-m].raw_value)*e_m

  # 4) Update weights

  for m in range(0, len(network.inputs)):
    for j in range(0,len(network.inputs[m].forward_neighbors)):
        network.inputs[m].forward_weights[j].value += learning_rate*network.inputs[m].transformed_value*delta[network.inputs[m].forward_neighbors[j]]

  for m in range(0, len(network.hidden_nodes)):
    for j in range(0,len(network.hidden_nodes[m].forward_neighbors)):
        network.hidden_nodes[m].forward_weights[j].value += learning_rate*network.hidden_nodes[m].transformed_value*delta[network.hidden_nodes[m].forward_neighbors[j]]
  pass