Ejemplo n.º 1
0
    def forward(self, input):
        """
        Compute forward step over the input using its weights

        Parameters
        ----------
        input : ndarray
            a numpy array (1,nIn + 1) containing the input of the layer

        Returns
        -------
        ndarray :
            a numpy array (nOut,1) containing the output of the layer
        """
        self.input[1:, :] = input.T
        self.output = Activation.sigmoid(np.dot(self.weights, self.input))
        return self.output
Ejemplo n.º 2
0
 def _fire(self, inp):
     return Activation.sigmoid(np.dot(np.array(inp), self.weights))
Ejemplo n.º 3
0
 def _fire(self, inp):
     return Activation.sigmoid(np.dot(np.array(inp), self.weight))
Ejemplo n.º 4
0
    def _fire(self, inp):
	#print np.array(inp).shape
	#print np.array(self.weights).shape
        return Activation.sigmoid(np.dot(np.array(inp), self.weights))
        pass
Ejemplo n.º 5
0
 def _fire(self, inp):
     #TODO compute a vector containing all sigmoids of neurons
     ret = np.zeros(self.n_out)
     for i in range(0, self.n_out):
         ret[i] = Activation.sigmoid(np.dot(np.append(1,inp), self.weights[:,i]))
     return ret
Ejemplo n.º 6
0
 def fire(self, input):
     return Activation.sigmoid(np.dot(np.array(input), self.weight))
Ejemplo n.º 7
0
 def _fire(self, inp, weightsOfNeuron):
     return Activation.sigmoid(np.dot(np.array(inp), np.array(weightsOfNeuron)))
 def fire(self, input):
     # Look at how we change the activation function here!!!!
     # Not Activation.sign as in the perceptron, but sigmoid
     return Activation.sigmoid(np.dot(np.array(input), self.weight))
Ejemplo n.º 9
0
 def fire(self, input):
     # input (n,1)
     return Activation.sigmoid(np.dot(self.weight,input))