示例#1
0
 def test_l_model_forward(self):
     """
     test ml.common.l_model_forward
     @return:
     """
     tests = [{
         "x":
         numpy.array([[1, 2], [3, 4]]),
         "parameters": {
             "W1": numpy.array([[0, 0], [0, 0]]),
             "b1": numpy.array([[0], [0]]),
             "W2": numpy.array([[0, 0], [0, 0]]),
             "b2": numpy.array([[0], [0]])
         },
         "al":
         numpy.array([[0.5, 0.5], [0.5, 0.5]]),
         "caches": [(
             (numpy.array([[1, 2], [3, 4]]), numpy.array([[0, 0], [0, 0]]),
              numpy.array([[0], [0]])),
             numpy.array([[0, 0], [0, 0]]),
         ),
                    ((numpy.array([[0., 0.], [0., 0.]]),
                      numpy.array([[0, 0], [0, 0]]), numpy.array([[0],
                                                                  [0]])),
                     numpy.array([[0., 0.], [0., 0.]]))],
     }]
     for test in tests:
         x = test["x"]
         parameters = test["parameters"]
         expected_al = test["al"]
         al, caches = l_model_forward(x, parameters)
         self.assertListEqual(al.tolist(), expected_al.tolist())
示例#2
0
def l_layer_model(x,
                  y,
                  layers_dims,
                  learning_rate=0.009,
                  num_iterations=2000,
                  print_cost=False,
                  lambd=0.7):
    """
    training using gradient decent

    @param x: input X, numpy arrays
    @param y: actual answers Y, numpy arrays
    @param layers_dims: dimensions of layers, lists
    @param learning_rate: hyper-parameter alpha, floats
    @param num_iterations: hyper-parameter number of iterations, ints
    @param print_cost: whether print cost to system, booleans
    @param lambd: regularization hyper-parameter lambda, floats
    @return: trained parameters, dictionaries
    """

    costs = []  # keep track of cost

    parameters = Parameters(PWD)
    parameters.initialize_parameters_deep_he(layers_dims)

    for i in range(0, num_iterations):

        # Forward propagation: [LINEAR -> RELU]*(L-1) -> LINEAR -> SIGMOID.
        al, caches = l_model_forward(x, parameters.get())
        # print(AL)
        # print(y)
        # Compute costs
        cost = compute_cost_with_l2_regularization(al, y, parameters.get(),
                                                   lambd)

        # Backward propagation.
        grads = l_model_backward_with_l2(al, y, caches, lambd)

        # Update parameters.
        parameters.update(grads, learning_rate)

        # Print the cost every 100 training example
        if print_cost and i % 100 == 0:
            print("Cost after iteration %i: %f" % (i, cost))
        if print_cost and i % 100 == 0:
            costs.append(cost)

    # plot the cost
    plt.plot(np.squeeze(costs))
    plt.ylabel('cost')
    plt.xlabel('iterations (per tens)')
    plt.title("Learning rate =" + str(learning_rate))
    # plt.show()

    return parameters
示例#3
0
def l_layer_model(x,
                  y,
                  layers_dims,
                  learning_rate=0.009,
                  num_iterations=2000,
                  print_cost=False,
                  lambd=0.7):
    """

    @param x:
    @param y:
    @param layers_dims:
    @param learning_rate:
    @param num_iterations:
    @param print_cost:
    @return:
    """

    np.random.seed(1)
    costs = []  # keep track of cost

    parameters = Parameters(PWD)
    parameters.initialize_parameters_deep_he(layers_dims)

    for i in range(0, num_iterations):

        # Forward propagation: [LINEAR -> RELU]*(L-1) -> LINEAR -> SIGMOID.
        al, caches = l_model_forward(x, parameters.get())

        # Compute costs
        cost = compute_cost_with_l2_regularization(al, y, parameters.get(),
                                                   lambd)

        # Backward propagation.
        grads = l_model_backward_with_l2(al, y, caches, lambd)

        # Update parameters.
        parameters.update(grads, learning_rate)

        # Print the cost every 100 training example
        if print_cost and i % 100 == 0:
            print("Cost after iteration %i: %f" % (i, cost))
        if print_cost and i % 100 == 0:
            costs.append(cost)

    # plot the cost
    plt.plot(np.squeeze(costs))
    plt.ylabel('cost')
    plt.xlabel('iterations (per tens)')
    plt.title("Learning rate =" + str(learning_rate))
    # plt.show()

    return parameters
示例#4
0
def predict(x, y, parameters):
    m = x.shape[1]
    # n = len(parameters) // 2  # number of layers in the neural network
    p = np.zeros((1, m))

    # Forward propagation
    probas, caches = l_model_forward(x, parameters)

    # convert probas to 0/1 predictions
    for i in range(0, probas.shape[1]):
        if probas[0, i] > 0.5:
            p[0, i] = 1
        else:
            p[0, i] = 0

    # print(results)
    # print("predictions: " + str(p))
    # print("true labels: " + str(y))
    # print("Accuracy: " + str(np.sum((p == y) / m)))

    return p
示例#5
0
def predict(x, y, parameters):
    """
    prediction over a dataset

    @param x: input X
    @param y: output Y
    @param parameters: parameters got from training
    @return: array of prediction
    """
    # m = x.shape[1]
    # n = len(parameters) // 2  # number of layers in the neural network

    # Forward propagation
    probas, caches = l_model_forward(x, parameters)

    # changing probabilities to predictions using one vs. all method
    prediction = one_vs_all_prediction(probas)

    # print (results)
    # print ("predictions: " + str(prediction))
    # print ("true labels: " + str(y))
    # print("Accuracy: " + str(np.sum((prediction == y) / m)))

    return prediction