Exemple #1
0
    def feed_forward(self, input: np.ndarray) -> np.ndarray:
        """Pass the input through the network and return it's output.

        It is assumed that the input a is an (n, 1) Numpy ndarray,
        not a (n,) vector
        """
        for b, w in zip(self.biases, self.weights):
            input = sigmoid(np.dot(w, input) + b)
        return input
Exemple #2
0
    def sigmoid_derivative(z):
        """Derivative of the sigmoid function.

        This function computes the gradient (also called the slope) of
        the sigmoid function with respect to its input x.

        It is defined as:
            sigmoid\\_derivative(x) = \\sigma'(x) = \\sigma(x) (1 - \\sigma(x))\tag{2}

        :param z: a scalar or numpy array
        :return the gradient value
        """
        sigmoid_value = sigmoid(z)
        return sigmoid_value * (1 - sigmoid_value)
Exemple #3
0
    def back_propagate(self, x: np.ndarray,
                       y: float) -> tuple[list[np.ndarray], list[np.ndarray]]:
        """Pass x through the network and back to calculate the gradient.

        :param x: the test example to be classified
        :param y: the true label (as an index of the neuron in the output layer
        :return: the gradient for the cost function
            as a tuple (g_biases, g_weights), where the elements
            of the tuple are layer-by-layer lists of numpy arrays.
        """
        biases_by_layers = [np.zeros(b.shape) for b in self.biases]
        weights_by_layers = [np.zeros(w.shape) for w in self.weights]

        # 1- feedforward
        # the input, x, is the activation of the first layer
        activation = x
        activations = [x]  # list to store all the activations, layer by layer
        z_vectors_by_layer = [
        ]  # list to store all the z vectors, layer by layer
        for b, w in zip(self.biases, self.weights):
            z = np.dot(w, activation) + b
            z_vectors_by_layer.append(z)
            activation = sigmoid(z)
            activations.append(activation)

        # 2- backward pass
        delta = self.calculate_delta(activations, z_vectors_by_layer, y)
        biases_by_layers[-1] = delta
        weights_by_layers[-1] = np.dot(delta, activations[-2].transpose())

        # Since python allow negative index, we use it to
        # iterate backwards on the network layers.
        # Note that layer = 1 means the last layer of neurons,
        # layer = 2 is the second-last layer, and so on.
        for layer in range(2, self.num_layers):
            z = z_vectors_by_layer[-layer]
            sp = self.sigmoid_derivative(z)
            delta = np.dot(self.weights[-layer + 1].transpose(), delta) * sp
            biases_by_layers[-layer] = delta
            weights_by_layers[-layer] = np.dot(
                delta, activations[-layer - 1].transpose())

        return biases_by_layers, weights_by_layers
Exemple #4
0
    :param y_title:
    :return:
    """
    plt.axhline(0, color="gray")
    plt.axvline(0, color="gray")
    plt.plot(x_values, y_values)
    plt.xlabel(x_title)
    plt.ylabel(y_title)
    plt.show()


if __name__ == "__main__":
    x_data = np.arange(-10, 10, 0.01)
    y_data = [step(z) for z in x_data]
    line_graph(x_data, y_data, "Inputs", "Step Scores")

    y_data = sigmoid(x_data)
    line_graph(x_data, y_data, "Inputs", "Sigmoid Scores")
    pp.pprint(y_data)
    print("----")

    y_data = tanh(x_data)
    line_graph(x_data, y_data, "Inputs", "Hyperbolic Tangent Scores")
    pp.pprint(y_data)
    print("----")

    logits = np.linspace(-1, 10, num=100)
    y_data = softmax(logits)
    pp.pprint(y_data)
    line_graph(logits, y_data, "Inputs", "Softmax Scores")
Exemple #5
0
    :param x_title:
    :param y_title:
    :return:
    """
    plt.axhline(0, color='gray')
    plt.axvline(0, color='gray')
    plt.plot(x, y)
    plt.xlabel(x_title)
    plt.ylabel(y_title)
    plt.show()


input = np.arange(-10, 10, 0.01)
y_value = [step(z) for z in input]
line_graph(input, y_value, "Inputs", "Step Scores")

y_value = sigmoid(input)
line_graph(input, y_value, "Inputs", "Sigmoid Scores")
pp.pprint(y_value)
print('----')

y_value = tanh(input)
line_graph(input, y_value, "Inputs", "Hiperbolic Tangent Scores")
pp.pprint(y_value)
print('----')

logits = np.linspace(-1, 10, num=100)
y_value = softmax(logits)
pp.pprint(y_value)
line_graph(logits, y_value, "Inputs", "Softmax Scores")
Exemple #6
0
 def sigmoid_prime(z: np.ndarray):
     """Derivative of the sigmoid function."""
     return sigmoid(z) * (1 - sigmoid(z))