Esempio n. 1
0
def predict(w, b, X):
    '''
    Predict whether the label is 0 or 1 using learned logistic regression parameters (w, b)
    
    Arguments:
    w -- weights, a numpy array of size (num_px * num_px * 3, 1)
    b -- bias, a scalar
    X -- data of size (num_px * num_px * 3, number of examples)
    
    Returns:
    Y_prediction -- a numpy array (vector) containing all predictions (0/1) for the examples in X
    '''

    m = X.shape[1]
    Y_prediction = np.zeros((1, m))
    w = w.reshape(X.shape[0], 1)

    # Compute vector "A" predicting the probabilities of a cat being present in the picture
    ### START CODE HERE ### (≈ 1 line of code)
    A = lr_utils.sigmoid(np.dot(w.T, X) + b)
    ### END CODE HERE ###

    for i in range(A.shape[1]):
        # Convert probabilities a[0,i] to actual predictions p[0,i]
        ### START CODE HERE ### (≈ 4 lines of code)
        Y_prediction[0, i] = 1 if A[0, i] > 0.5 else 0
        ### END CODE HERE ###

    assert (Y_prediction.shape == (1, m))

    return Y_prediction
Esempio n. 2
0
def propagate(w, b, X, Y):
    """
    Implement the cost function and its gradient for the propagation explained above

    Arguments:
    w -- weights, a numpy array of size (num_px * num_px * 3, 1)
    b -- bias, a scalar
    X -- data of size (num_px * num_px * 3, number of examples)
    Y -- true "label" vector (containing 0 if non-cat, 1 if cat) of size (1, number of examples)

    Return:
    cost -- negative log-likelihood cost for logistic regression
    dw -- gradient of the loss with respect to w, thus same shape as w
    db -- gradient of the loss with respect to b, thus same shape as b
    
    Tips:
    - Write your code step by step for the propagation
    """

    m = X.shape[1]

    # FORWARD PROPAGATION (FROM X TO COST)
    ### START CODE HERE ### (≈ 2 lines of code)
    A = lr_utils.sigmoid(np.dot(w.T, X) + b)  # compute activation
    cost = (-1 / m) * np.sum(Y * np.log(A) + (1 - Y) *
                             (np.log(1 - A)))  # compute cost
    ### END CODE HERE ###

    # BACKWARD PROPAGATION (TO FIND GRAD)
    ### START CODE HERE ### (≈ 2 lines of code)
    dw = (1 / m) * np.dot(X, (A - Y).T)
    db = (1 / m) * np.sum(A - Y)
    ### END CODE HERE ###

    assert (dw.shape == w.shape)
    assert (db.dtype == float)
    cost = np.squeeze(cost)
    assert (cost.shape == ())

    grads = {"dw": dw, "db": db}

    return grads, cost
Esempio n. 3
0
    def propagate(
        self
    ) -> float:  # return cost -- a scalar, negative log-likelihood cost for logistic regression
        m_data = self.train_set_x.shape[1]  # number of examples

        # forward propagation
        A = lr_utils.sigmoid(
            np.dot(self.w.T, self.train_set_x) +
            self.b)  # activation: (1, m_train), the same as self.train_set_y's
        cost: float = np.sum(self.train_set_y * np.log(A) +
                             (1 - self.train_set_y) *
                             np.log(1 - A)) / -m_data  # cost: (1, 1)

        # backward propagation
        self.dw = np.dot(self.train_set_x,
                         (A - self.train_set_y
                          ).T) / m_data  # dw: (m_flat, 1), the same as w's
        self.db = np.sum(A - self.train_set_y) / m_data  # db: scalar

        return cost
train_set_x_flatten = train_set_x_orig.reshape(m_train, -1).T
test_set_x_flatten = test_set_x_orig.reshape(m_test, -1).T
### END CODE HERE ###

print("train_set_x_flatten shape: " + str(train_set_x_flatten.shape))
print("train_set_y shape: " + str(train_set_y.shape))
print("test_set_x_flatten shape: " + str(test_set_x_flatten.shape))
print("test_set_y shape: " + str(test_set_y.shape))
print("sanity check after reshaping: " + str(train_set_x_flatten[0:5, 0]))

# One common preprocessing step in machine learning is to center and standardize your dataset, meaning that you substract the mean of the whole numpy array from each example, and then divide each example by the standard deviation of the whole numpy array. But for picture datasets, it is simpler and more convenient and works almost as well to just divide every row of the dataset by 255 (the maximum value of a pixel channel).

train_set_x = train_set_x_flatten/255.
test_set_x = test_set_x_flatten/255.

print("sigmoid([0, 2]) = " + str(sigmoid(np.array([0, 2]))))

# 4.2 - Initializing parameters
w, b = initialize_with_zeros(m_train)
# print("w = " + str(w))
# print("b = " + str(b))


# 4.3 - Forward and Backward propagation
# Now that your parameters are initialized, you can do the "forward" and "backward" propagation steps for learning the parameters.
# Exercise: Implement a function propagate() that computes the cost function and its gradient.

w, b, X, Y = np.array([[1.], [2.]]), 2., np.array([[1., 2., -1.], [3., 4., -3.2]]), np.array([[1, 0, 1]])
grads, cost = propagate(w, b, X, Y)
print("dw = " + str(grads["dw"]))
print("db = " + str(grads["db"]))
Esempio n. 5
0
 def predict(self, X):  # X: (m_flat, m_cur), w: (m_flat, 1)
     A = lr_utils.sigmoid(np.dot(self.w.T, X) +
                          self.b)  # activation: (1, m_cur)
     return np.rint(A)