Exemple #1
0
 def BackProbGradients(self, output, networkOutput, layerOutputs):
     weightGradients = [None] * (self.noOfLayers + 1)
     if self.outputFunctionName == "SoftMax" and self.lossFunctionName == "CrossEntropy":
         gradientsWRTActivation = self.LossAndOutputGradients[
             'CrossEntropyWithSoftMax'](networkOutput, output)
         weightGradients[self.noOfLayers] = np.matmul(
             gradientsWRTActivation,
             np.transpose(
                 fns.IntergrateBiasAndData(layerOutputs[self.noOfLayers])))
     else:
         gradientsWRTActivation = self.LossGradients[self.lossFunctionName](
             networkOutput, output)
         gradientsWRTActivation = self.OutputGradients[
             self.outputFunctionName](networkOutput, gradientsWRTActivation)
         weightGradients[self.noOfLayers] = np.matmul(
             gradientsWRTActivation,
             np.transpose(
                 fns.IntergrateBiasAndData(layerOutputs[self.noOfLayers])))
     for i in reversed(range(0, self.noOfLayers)):
         backProbGradient = np.matmul(
             np.transpose(
                 fns.DisIntergrateBiasFromWeights(self.weights[i + 1])),
             gradientsWRTActivation)
         gradientsWRTActivation = self.ActivationGradients[
             self.activationFunctionNames[i]](layerOutputs[i + 1],
                                              backProbGradient)
         weightGradients[i] = np.matmul(
             gradientsWRTActivation,
             np.transpose(fns.IntergrateBiasAndData(layerOutputs[i])))
     return weightGradients
Exemple #2
0
    def FeedForward(self, data):
        layersOutputs = [data]
        data = fns.IntergrateBiasAndData(data)
        for i in range(0, self.noOfLayers):
            data = self.Activation[self.activationFunctionNames[i]](
                data, self.weights[i])
            layersOutputs.append(data)
            data = fns.IntergrateBiasAndData(data)

        return self.OutputFunction[self.outputFunctionName](
            data, self.weights[self.noOfLayers]), layersOutputs