def forward(self, input): """ Compute forward step over the input using its weights Parameters ---------- input : ndarray a numpy array (1,nIn + 1) containing the input of the layer Returns ------- ndarray : a numpy array (nOut,1) containing the output of the layer """ self.input[1:, :] = input.T self.output = Activation.sigmoid(np.dot(self.weights, self.input)) return self.output
def _fire(self, inp): return Activation.sigmoid(np.dot(np.array(inp), self.weights))
def _fire(self, inp): return Activation.sigmoid(np.dot(np.array(inp), self.weight))
def _fire(self, inp): #print np.array(inp).shape #print np.array(self.weights).shape return Activation.sigmoid(np.dot(np.array(inp), self.weights)) pass
def _fire(self, inp): #TODO compute a vector containing all sigmoids of neurons ret = np.zeros(self.n_out) for i in range(0, self.n_out): ret[i] = Activation.sigmoid(np.dot(np.append(1,inp), self.weights[:,i])) return ret
def fire(self, input): return Activation.sigmoid(np.dot(np.array(input), self.weight))
def _fire(self, inp, weightsOfNeuron): return Activation.sigmoid(np.dot(np.array(inp), np.array(weightsOfNeuron)))
def fire(self, input): # Look at how we change the activation function here!!!! # Not Activation.sign as in the perceptron, but sigmoid return Activation.sigmoid(np.dot(np.array(input), self.weight))
def fire(self, input): # input (n,1) return Activation.sigmoid(np.dot(self.weight,input))