コード例 #1
0
 def test_softmax_3(self):
     input = np.array([2, 3, 5, 1, 7])
     output = np.array(
         [0.00579425, 0.01575041, 0.11638064, 0.00213159, 0.85994311])
     np.testing.assert_allclose(util_functions.softmax(input),
                                output,
                                atol=0.0001)
コード例 #2
0
    def predict_batch(self, x_batch):
        a = x_batch
        for l in range(self.num_layers - 2):
            z = self.weights[l] @ a + self.biases[l]
            a = sigmoid(z)

        output = self.weights[-1] @ a + self.biases[-1]
        return np.argmax(softmax(output), axis=1)
コード例 #3
0
    def predict_batch(self, x_batch):
        a = x_batch * (1 - self.dropout_rates[0])
        for l in range(self.num_layers - 2):
            z = self.weights[l] @ a + self.biases[l]
            a = sigmoid(z) * (1 - self.dropout_rates[l + 1])

        output = self.weights[-1] @ a + self.biases[
            -1]  # no dropout on last layer just softmax
        return np.argmax(softmax(output), axis=1)
コード例 #4
0
 def test_softmax_1(self):
     input = np.array([[2, 3, 5, 1, 7], [4, 3, 0, 5, 5], [-1, 2, 0, -3,
                                                          -4]])
     output = np.array(
         [[0.00579425, 0.01575041, 0.11638064, 0.00213159, 0.85994311],
          [0.14656828, 0.05391946, 0.00268449, 0.39841389, 0.39841389],
          [0.04168587, 0.83728318, 0.11331396, 0.00564157, 0.00207542]])
     np.testing.assert_allclose(util_functions.softmax(input),
                                output,
                                atol=0.0001)
コード例 #5
0
 def test_softmax_4(self):
     input = np.array([[2, 3, 5, 1, 7], [4, 3, 0, 5, 300],
                       [-1, -6, 0, -3, -4]])
     output = np.array(
         [[0.00579425, 0.01575041, 0.11638064, 0.00213159, 0.85994311],
          [
              2.81082208e-129, 1.03404366e-129, 5.14820022e-131,
              7.64060659e-129, 1.00000000e+000
          ], [0.25574518, 0.0017232, 0.69518747, 0.03461135, 0.0127328]])
     np.testing.assert_allclose(util_functions.softmax(input),
                                output,
                                atol=0.0001)
コード例 #6
0
    def compute_gradients(self, y_batch):
        delta = [None] * (self.num_layers - 1)
        delta[-1] = softmax(self.z_values[-1]) - y_batch
        self.dw[-1] = delta[-1] @ self.activations[-2].transpose(0, 2, 1)

        for l in range(2, self.num_layers):
            delta[-l] = self.weights[-l + 1].transpose() @ delta[
                -l + 1] * sigmoid_derivative(self.z_values[-l])
            self.dw[-l] = delta[-l] @ self.activations[-l - 1].transpose(
                0, 2, 1)

        self.db = delta
        return self.dw, self.db
コード例 #7
0
 def __call__(self, z):
     self.layer_input = z
     self.layer_output = util_functions.softmax(z)
     return self.layer_output