def _sample_labels(self, x, s):
        if self._bias:
            x = x[:, 1]

        yprob = 0.8 * sigmoid(0.6 *
                              (x + 3)) * sigmoid(-5 * (x - 3)) + sigmoid(x - 5)

        return np.expand_dims(np.random.binomial(1, yprob), axis=1)
Exemple #2
0
    def calc_effort(self, inputs):
        Z = np.concatenate((np.array(inputs[:self.n_input]), self.hidden_out))
        preact = Z.dot(self.W) + self.b
        f, i, o, candidate = np.split(sigmoid(preact), 4)
        self.memory *= f
        self.memory += i * candidate
        self.hidden_out = o * sigmoid(self.memory)

        output = sigmoid(self.hidden_out.dot(self.Wo) + self.bo)

        return float(output)
Exemple #3
0
 def calc_effort(self, inputs):
     """
     Effort is calculated by forward-propagating
     the inputs through the neural network
     """
     stimuli = np.array(inputs)
     for w, b in zip(self.weights, self.biases):
         z = stimuli.dot(w) + b
         stimuli = sigmoid(z)
     return float(stimuli)
Exemple #4
0
 def forward_pass(self, inputs):
     outputs = sigmoid(
         np.dot(self._weights[:, :-1], inputs) + self._weights[:, -1])
     sigmoid_grad = outputs * (1 - outputs)
     self._grad_inputs = sigmoid_grad[
         np.newaxis, :].T * self._weights[:, :-1]
     self._grad_weights[:, :-1] = sigmoid_grad[np.newaxis, :].T * (
         inputs[np.newaxis, :] * np.ones(self._grad_inputs.shape))
     self._grad_weights[:, -1] = np.ones(self._num_neurons) * sigmoid_grad
     return outputs
Exemple #5
0
 def _probability(self, features):
     return sigmoid(np.matmul(self.feature_map(features), self._theta)).reshape(-1, 1)
 def evaluate(self):
     return util.sigmoid(statistics.mean([self.__do_nothing(), self.__rebalance()]))
Exemple #7
0
 def calc_effort(self, inputs):
     Z = np.concatenate((np.array(inputs[:self.n_input]), self.hidden_out))
     self.hidden_out = sigmoid(Z.dot(self.W) + self.b)
     output = sigmoid(self.hidden_out.dot(self.Wo) + self.bo)
     return float(output)