Ejemplo n.º 1
0
def forward(network, x):
    W1, W2, W3 = network['W1'], network['W2'], network['W3']
    b1, b2, b3 = network['b1'], network['b2'], network['b3']

    a1 = np.dot(x, W1) + b1
    z1 = sigmoid(a1)
    a2 = np.dot(z1, W2) + b2
    z2 = sigmoid(a2)
    a3 = np.dot(z2, W3) + b3
    y = identity_function(a3)

    return y
def propagate(w, b, X, Y):
    """

    :param w: weights, a numpy array size ( num_px * num_px * 3, 1)
    :param b: bias, a scalar
    :param X: data of size (num_px * num_px * 3, number of examples)
    :param Y: true "label" vector (containing 0 if non-cat, 1 if cat) of size (1, number of examples)
    :return: cost -- negative log-likelihood cost for logistic regression
    dw -- gradient of the loss with respect to w, thus same shape as w
    db -- gradient of the loss with respect to b, thus same shape as b

    """

    m = X.shape[1]
    # FORWARD PROPAGATION (FROM X TO COST)
    A = sigmoid(np.dot(w.T, X) + b)  #compute activation
    cost = -(1.0 / m) * np.sum(Y * np.log(A) +
                               (1 - Y) * np.log(1 - A))  # compute cost

    # BACKWARD PROPAGATION(TO FIND GRAD)
    dw = (1.0 / m) * np.dot(X, (A - Y).T)
    db = (1.0 / m) * np.sum(A - Y)

    assert (dw.shape == w.shape)
    assert (db.dtype == float)
    cost = np.squeeze(cost)
    assert (cost.shape == ())

    grads = {
        "dw": dw,
        "db": db,
    }

    return grads, cost
Ejemplo n.º 3
0
def predict(w, b, X):
    """

    :param w:
    :param b:
    :param X: data of size (num_px * num_px * 3, number of examples
    :return: Y_prediction -- a numpy array (vector) containing all predictions (0/1) for the examples in X

    """

    m = X.shape[1]
    Y_prediction = np.zeros((1, m))
    w = w.reshape(X.shape[0], 1)

    # Compute vector "A" predicting the probabilities of a cat being present in the picture
    A = sigmoid(np.dot(w.T, X) + b)

    for i in range(A.shape[1]):
        # Convert probabilities A[0,i] to actual predictions p[0,i]
        if A[0, i] > 0.5:
            Y_prediction[0, i] = 1
        else:
            Y_prediction[0, i] = 0

    assert (Y_prediction.shape == (1, m))

    return Y_prediction
Ejemplo n.º 4
0
def propagate(w, b, X, Y):
    """
    Implement the cost function and its gradient for the propagation explained above

    Arguments:
    w -- weights, a numpy array of size (num_px * num_px * 3, 1)
    b -- bias, a scalar
    X -- data of size (num_px * num_px * 3, number of examples)
    Y -- true "label" vector (containing 0 if non-cat, 1 if cat) of size (1, number of examples)

    Return:
    cost -- negative log-likelihood cost for logistic regression
    dw -- gradient of the loss with respect to w, thus same shape as w
    db -- gradient of the loss with respect to b, thus same shape as b
    
    Tips:
    - Write your code step by step for the propagation
    """
    m = X.shape[0]
    #forward propagation
    A = sigmoid(np.dot(w.T, X) + b)
    cost = (-1 / m) * (np.sum(Y * np.log(A) + (1 - Y) * np.log(1 - A)))

    #backward propagation
    dw = (1 / m) * np.dot(X, (A - Y).T)
    db = (1 / m) * np.sum(A - Y)

    assert (dw.shape == w.shape)
    assert (db.dtype == float)
    cost = np.squeeze(cost)
    assert (cost.shape == ())

    grads = {"dw": dw, "db": db}
    return grads, cost
Ejemplo n.º 5
0
def predict(w, b, X):
    '''
        Predict whether the label is 0 or 1 using learned logistic regression parameters (w, b)

        Arguments:
        w -- weights, a numpy array of size (num_px * num_px * 3, 1)
        b -- bias, a scalar
        X -- data of size (num_px * num_px * 3, number of examples)

        Returns:
        Y_prediction -- a numpy array (vector) containing all predictions (0/1) for the examples in X
        '''
    m = X.shape[1]
    Y_prediction = np.zeros(shape=(1, m))
    from Sigmoid import sigmoid
    A = sigmoid(np.dot(w.T, X) + b)
    # for i in range(A.shape[1]):
    #     if (np.squeeze(A[0][i])) >= 0.5:
    #         Y_prediction[0][i] = 1
    for i in range(A.shape[1]):
        Y_prediction[0, i] = 1 if A[0, i] > 0.5 else 0
    assert (Y_prediction.shape == (1, m))
    return Y_prediction
Ejemplo n.º 6
0
print("NN V1.0", "\n")
import numpy as np

import MNIST_read
from Sigmoid import sigmoid




x, y, m, w, b = MNIST_read.read() #Reads x and y from the csv file and returns numpy arrays

print("\n", "x.sh:", x.shape, "y.sh:", y.shape, "w.sh:", w.shape, "b.sh:", b.shape, "\n")

z = np.dot(w, x.T) + b
z = np.sum(z, axis = 1) #compute z = sum(wx + b) for the weights
A = sigmoid(z)          #Sigmoid function on the weights
print("z.shape: ", z.shape)
print("z: ", (z))


print ("y: ", (y))
ylabel = y[0,0]       #assigns a 1 to the 5th value of the classifier as y= the character 5.
y = np.zeros(shape = (1,10))
y[0,4] = 1

print ("A:", (A))
print("y:", y)

cost = -(1 / m) * np.sum((y * np.log(A).T) + (1 - y) * np.log(1 - A).T) #cost function
print("\n", "cost: ", cost)
Ejemplo n.º 7
0
test_set_x_flatten = test_set_x_orig.reshape(test_set_x_orig.shape[0], -1).T
### END CODE HERE ###
print("train_set_x_flatten shape: " + str(train_set_x_flatten.shape))
print("train_set_y shape: " + str(train_set_y.shape))
print("test_set_x_flatten shape: " + str(test_set_x_flatten.shape))
print("test_set_y shape: " + str(test_set_y.shape))
print("sanity check after reshaping: " + str(train_set_x_flatten[0:5, 0]))

# broadcast
train_set_x = train_set_x_flatten / 255
test_set_x = test_set_x_flatten / 255

# sigma functions
from Sigmoid import sigmoid

print("sigmoid(0) = " + str(sigmoid(0)))
print("sigmoid(9.2) = " + str(sigmoid(9.2)))
print()

# Initializing parameters
from Initialize_with_zeros import initialize_with_zeros

dim = 2
w, b = initialize_with_zeros(2)
print("w = " + str(w))
print("b = " + str(b))
print()

# propagate
from Propagate import propagate
Ejemplo n.º 8
0
from Sigmoid import sigmoid
import numpy as np


def initialize_with_zeros(dim):
    w = np.zeros((dim, 1))
    b = 0
    assert (w.shape == (dim, 1))
    assert (isinstance(b, float) or isinstance(b, int))
    return w, b


print("sigmoid([0, 2]) = " + str(sigmoid(np.array([0, 2]))))
dim = 2
w, b = initialize_with_zeros(dim)
print("w = " + str(w))
print("b = " + str(b))
Ejemplo n.º 9
0
import numpy as np
from Sigmoid import sigmoid

# 입력 -> 1계층
X = np.array([1.0, 0.5])
W1 = np.array([[0.1, 0.3, 0.5], [0.2, 0.4, 0.6]])
B1 = np.array([0.1, 0.2, 0.3])  # bias 는 층당 하나 아닌가?

print(X.shape)
print(W1.shape)
print(B1.shape)

A1 = np.dot(X, W1) + B1
print(A1)

Z1 = sigmoid(A1)
print(Z1)

# 1계층 -> 2계층
W2 = np.array([[0.1, 0.4], [0.2, 0.5], [0.3, 0.6]])
B2 = np.array([0.1, 0.2])

A2 = np.dot(Z1, W2) + B2
Z2 = sigmoid(A2)


# 2계층 -> 출력층
# 항등함수, y=x 와 동일하지만, 함수사용 규칙 통일 (sigmoid -> 항등함수)
def identity_function(x):
    return x