def test_sigmoid(self):
        X = array([[1, 1, 1], [1, 1, 1]])
        theta = array([[1, 1, 1]]).transpose()
        z = X.dot(theta)
        hypothesis = sigmoid(z)

        expected = array([[0.95257], [0.95257]])
        numpy.testing.assert_almost_equal(expected, hypothesis, 5)
def activation_cost_function(X, Y, weights):
    W, b = utils.vector_to_weights(weights, n, 1) # expand weights
    z = core.calculate_z(X, W, b)
    a = core.sigmoid(z)
    j, dW, db = core.logistic_cost_function(X, a, Y)
    """
    Flatten gradients
    Transposing them because I changed the minimization functions to work with NNs
    """
    gradients = utils.weights_to_vector(dW.T, db.T)
    return j, gradients
Ejemplo n.º 3
0
    def forward_propagate(self, X, weights):
        """

        :param X: Input features (n x m matrix)
        :param weights: A list of (W, b) tuples, one for each layer
        :return: List of activations, list of zs
        """

        activations = [X]  # The inputs are the 0th activation
        zs = []

        for i in range(
                1, len(self.layers)
        ):  # Skip the 0th layer, since these are just the inputs
            W = weights[i - 1]["W"]
            b = weights[i - 1]["b"]
            z = core.calculate_z(activations[i - 1], W, b)
            a = core.sigmoid(z)

            zs.append(z)
            activations.append(a)

        return activations, zs
 def activation_cost_function(X, Y, W, b):
     z = core.calculate_z(X, W, b)
     a = core.sigmoid(z)
     j, dW, db = core.logistic_cost_function(X, a, Y)
     return j, dW.T, db.T  # Transposing the gradients because I changed the minimization functions to work with NNs
 def activate(X, W, b):
     z = core.calculate_z(X, W, b)
     a = core.sigmoid(z)
     return a
Ejemplo n.º 6
0
 def test_cost(self):
     z = calculate_z(self.X, self.W, self.b)
     a = sigmoid(z)
     j, dW, db = logistic_cost_function(self.X, a, self.Y)
     expected = 0.048587
     numpy.testing.assert_almost_equal(j, expected, 5)