Exemple #1
0
def model(X,
          Y,
          learning_rate=0.01,
          num_iterations=15000,
          print_cost=True,
          initialization="he"):
    """
    Implements a three-layer neural network: LINEAR->RELU->LINEAR->RELU->LINEAR->SIGMOID.

    Arguments:
    X -- input data, of shape (2, number of examples)
    Y -- true "label" vector (containing 0 for red dots; 1 for blue dots), of shape (1, number of examples)
    learning_rate -- learning rate for gradient descent
    num_iterations -- number of iterations to run gradient descent
    print_cost -- if True, print the cost every 1000 iterations
    initialization -- flag to choose which initialization to use ("zeros","random" or "he")

    Returns:
    parameters -- parameters learnt by the model
    """

    grads = {}
    costs = []  # to keep track of the loss
    m = X.shape[1]  # number of examples
    layers_dims = [X.shape[0], 10, 5, 1]

    # Initialize parameters dictionary.
    if initialization == "zeros":
        parameters = initialize_parameters_zeros(layers_dims)
    elif initialization == "random":
        parameters = initialize_parameters_random(layers_dims)
    elif initialization == "he":
        parameters = initialize_parameters_he(layers_dims)

    # Loop (gradient descent)

    for i in range(0, num_iterations):

        # Forward propagation: LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID.
        a3, cache = forward_propagation(X, parameters)

        cost = compute_loss(a3, Y)

        grads = backward_propagation(X, Y, cache)

        parameters = update_parameters(parameters, grads, learning_rate)

        # Print the loss every 1000 iterations
        if print_cost and i % 1000 == 0:
            print("Cost after iteration {}: {}".format(i, cost))
            costs.append(cost)

    # plot the loss
    plt.plot(costs)
    plt.ylabel('cost')
    plt.xlabel('iterations (per hundreds)')
    plt.title("Learning rate =" + str(learning_rate))
    plt.show()

    return parameters
Exemple #2
0
def model(X,
          Y,
          learning_rate=0.01,
          num_iterations=15000,
          print_cost=True,
          initialization="zeros"):
    grads = {}
    costs = []
    m = X.shape[1]
    layers_dims = [X.shape[0], 10, 5, 1]
    if initialization == "zeros":
        parameters = initialize_parameters_zeros(layers_dims)
    elif initialization == "random":
        parameters = initialize_parameters_random(layers_dims)
    elif initialization == "he":
        parameters = initialize_parameters_he(layers_dims)
    for i in range(0, num_iterations):
        a3, cache = forward_propagation(X, parameters)

        cost = compute_loss(a3, Y)

        grads = backward_propagation(X, Y, cache)

        paramters = update_parameters(parameters, grads, learning_rate)

        if print_cost and i % 1000 == 0:
            print("Cost after iteration {}: {}".format(i, cost))
            costs.append(cost)
    plt.plot(costs)
    plt.ylabel('cost')
    plt.xlabel('iterations (per hundreds)')
    plt.title("Learning rate =" + str(learning_rate))
    plt.show()

    return parameters
Exemple #3
0
def model(X, Y, alpha=0.005, loops=5000, init_method='he'):
    grads = {}
    costs = []
    m = X.shape[1]
    layers_dims = [X.shape[0], 10, 5, 1]
    # inintial params
    if init_method == 'zeros':
        params = init_zeros(layers_dims)
    elif init_method == 'random':
        params = init_random(layers_dims)
    elif init_method == 'he':
        params = init_he(layers_dims)
    else:
        print('Error: unexcepted init_method!')
    # start train
    for i in range(loops):
        a3, cache = init_utils.forward_propagation(X, params)
        cost = init_utils.compute_loss(a3, Y)
        costs.append(cost)
        grads = init_utils.backward_propagation(X, Y, cache)
        params = init_utils.update_parameters(params, grads, alpha)
        if (i + 1) % 100 == 0:
            print(f'No.{i+1} iteration\'s loss: {cost}')
    plt.plot(costs)
    plt.xlabel('step')
    plt.ylabel('loss')
    plt.title('loss circle')
    plt.show()
    return params
def model(X, Y, learning_rate = 0.01, num_iterations = 15000, print_cost = True, initialization = "he"):
    """
    Implements a three-layer neural network: LINEAR->RELU->LINEAR->RELU->LINEAR->SIGMOID.
    
    Arguments:
    X -- input data, of shape (2, number of examples)
    Y -- true "label" vector (containing 0 for red dots; 1 for blue dots), of shape (1, number of examples)
    learning_rate -- learning rate for gradient descent 
    num_iterations -- number of iterations to run gradient descent
    print_cost -- if True, print the cost every 1000 iterations
    initialization -- flag to choose which initialization to use ("zeros","random" or "he")
    
    Returns:
    parameters -- parameters learnt by the model
    """
        
    grads = {}
    costs = [] # to keep track of the loss
    m = X.shape[1] # number of examples
    layers_dims = [X.shape[0], 10, 5, 1]
    
    # Initialize parameters dictionary.
    if initialization == "zeros":
        parameters = initialize_parameters_zeros(layers_dims)
    elif initialization == "random":
        parameters = initialize_parameters_random(layers_dims)
    elif initialization == "he":
        parameters = initialize_parameters_he(layers_dims)

    # Loop (gradient descent)

    for i in range(0, num_iterations):

        # Forward propagation: LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID.
        a3, cache = forward_propagation(X, parameters)
        
        # Loss
        cost = compute_loss(a3, Y)

        # Backward propagation.
        grads = backward_propagation(X, Y, cache)
        
        # Update parameters.
        parameters = update_parameters(parameters, grads, learning_rate)
        
        # Print the loss every 1000 iterations
        if print_cost and i % 1000 == 0:
            print("Cost after iteration {}: {}".format(i, cost))
            costs.append(cost)
            
    # plot the loss
    plt.plot(costs)
    plt.ylabel('cost')
    plt.xlabel('iterations (per hundreds)')
    plt.title("Learning rate =" + str(learning_rate))
    plt.show()
    
    return parameters
Exemple #5
0
def model(X,
          Y,
          learning_rate=0.01,
          num_iterations=15000,
          print_cost=True,
          initialization='he',
          is_plot=True):
    """
        实现一个三层网络:LINEAR->RELU->LINEAR->RELU->LINEAR->SIGMOID
    参数:
        X -输入的数据,维度(2,训练/测试的数量)
        Y -标签 [0|1],维度(1,训练/测试的数量)
        learning_rate -学习速率
        num_iterations -迭代次数
        print_cost -是否打印成本值,每迭代1000次打印一次
        initialization -字符串类型,[zeros|random|he]
        is_plot -是否绘制梯度下降的曲线图
    返回:
        parameters -学习后的参数
    """
    grads = {}
    costs = []
    m = X.shape[1]
    layers_dims = [X.shape[0], 10, 5, 1]

    if initialization == "zeros":
        parameters = initialize_parameters_zeros(layers_dims)
    elif initialization == "random":
        parameters = initialize_parameters_random(layers_dims)
    elif initialization == "he":
        parameters = initialize_parameters_he(layers_dims)
    else:
        print("Fault initializtion argument! quit")
        exit

    for i in range(0, num_iterations):
        a3, cache = init_utils.forward_propagation(X, parameters)
        cost = init_utils.compute_loss(a3, Y)
        grads = init_utils.backward_propagation(X, Y, cache)
        parameters = init_utils.update_parameters(parameters, grads,
                                                  learning_rate)

        if i % 1000 == 0:
            costs.append(cost)
            if print_cost:
                print("number times" + str(i) + " cost is " + str(cost))

    if is_plot:
        plt.plot(costs)
        plt.ylabel('cost')
        plt.xlabel('iterations(per hundreds)')
        plt.title('learing_rate:' + str(learning_rate))
        plt.show()

    return parameters
Exemple #6
0
def model(X, Y, learning_rate = 0.01, num_iterations = 15000, print_cost = True, initialization = "he"):
    """
    实现一个 3 层的神经网络: LINEAR->RELU->LINEAR->RELU->LINEAR->SIGMOID.
    :param X: 输入数据, of shape (2, number of examples)
    :param Y: 正确的标签向量 (containing 0 for red dots; 1 for blue dots), of shape (1, number of examples)
    :param learning_rate: 梯度下降的学习率 
    :param num_iterations: 梯度下降的迭代次数
    :param print_cost: if True, print the cost every 1000 iterations
    :param initialization: string, 决定要使用的初始化方法 ("zeros","random" or "he")
    
    :return parameters: 神经网络学习到的参数
    """
        
    grads = {}
    costs = [] # to keep track of the loss
    m = X.shape[1] # number of examples
    layers_dims = [X.shape[0], 10, 5, 1]
    
    # 初始化参数字典
    if initialization == "zeros":       # 0 初始化
        parameters = initialize_parameters_zeros(layers_dims)
    elif initialization == "random":    # 随机初始化成较大的权重
        parameters = initialize_parameters_random(layers_dims)
    elif initialization == "he":        # 随机初始化成较小的权重,规模依据 paper by He et al., 2015.
        parameters = initialize_parameters_he(layers_dims)

    # 梯度下降法迭代
    for i in range(0, num_iterations):

        # 前向传播: LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID.
        a3, cache = forward_propagation(X, parameters)
        
        # 计算损失值
        cost = compute_loss(a3, Y)

        # 后向传播
        grads = backward_propagation(X, Y, cache)
        
        # 更新参数
        parameters = update_parameters(parameters, grads, learning_rate)
        
        # Print the loss every 1000 iterations
        if print_cost and i % 1000 == 0:
            print("Cost after iteration {}: {}".format(i, cost))
            costs.append(cost)
            
    # 绘制成本曲线(cost 值随着迭代的变化)
    plt.plot(costs)
    plt.ylabel('cost')
    plt.xlabel('iterations (per hundreds)')
    plt.title("Learning rate =" + str(learning_rate))
    plt.show()
    
    return parameters
def model(X,
          Y,
          learning_rate=0.01,
          num_iterations=15000,
          print_cost=True,
          initialization='he'):
    """
    Implements a three-layer neural network: LINEAR->RELU->LINEAR->RELU->LINEAR->SIGMOID.

    Arguments:
    X -- input data, of shape (2, number of examples)
    Y -- true "label" vector (containing 0 for red dots; 1 for blue dots), of shape (1, number of examples)
    learning_rate -- learning rate for gradient descent
    num_iterations -- number of iterations to run gradient descent
    print_cost -- if True, print the cost every 1000 iterations
    initialization -- flag to choose which initialization to use ("zeros","random" or "he")

    Returns:
    parameters -- parameters learnt by the model
    """

    grads = {}
    costs = []
    m = X.shape[1]
    layers_dims = [X.shape[0], 10, 5, 1]

    if initialization == 'zeros':
        parameters = initialize_parameters_zeros(layers_dims)
    elif initialization == 'random':
        parameters = initialize_parameters_random(layers_dims)
    elif initialization == 'he':
        parameters = initialize_parameters_he(layers_dims)

    for i in range(num_iterations):
        A3, cache = forward_propagation(X, parameters)

        cost = compute_loss(A3, Y)
        costs.append(cost)
        if i % 100 == 0 and print_cost:
            print('Cost at iteration {} is {}'.format(i, cost))

        grads = backward_propagation(X, Y, cache)
        parameters = update_parameters(parameters, grads, learning_rate)

    # plot the cost
    plt.plot(costs)
    plt.xscale('linear')
    plt.xlabel('iterations (per hundreds)')
    plt.ylabel('Costs')

    plt.title('learning rate =' + str(learning_rate))
    plt.show()
    return parameters
Exemple #8
0
def model(X, Y, learning_rate = 0.01, num_iterations = 15000, print_cost = True, initialization = "he", is_plot = True):
    """
    实现一个三层的神经网络:ReLU->ReLU->Sigmoid
    :param X:输入数据,维度(2,要训练/测试的数量)
    :param Y:标签,维度(1,对应输入数据的数量)
    :param learning_rate:学习率
    :param num_iterations:迭代次数
    :param print_cost:是否打印成本值
    :param initialization:权重矩阵初始化方法
    :param is_plot:是否绘制梯度下降的曲线图
    :return:
        parameters:更新之后的参数
    """

    parameters = {}
    costs = []
    m = X.shape[1]
    layers_dims = [X.shape[0], 10, 5, 1]

    # 选择初始化参数类型
    if initialization == "zeros":
        parameters = initialize_parameters_zeros(layers_dims)
    elif initialization == "random":
        parameters = initialize_parameters_random(layers_dims)
    elif initialization == "he":
        parameters = initialize_parameters_he(layers_dims)

    for i in range(num_iterations):
        # 前向传播
        A3, cache = init_utils.forward_propagation(X, parameters)

        # 计算成本
        cost = init_utils.compute_loss(A3, Y)
        if i % 1000 == 0:
            costs.append(cost)
            if print_cost:
                print(f'第{i}次迭代,成本值为:{cost}')

        # 反向传播
        grads = init_utils.backward_propagation(X, Y, cache)

        # 更新参数
        parameters = init_utils.update_parameters(parameters, grads, learning_rate)

    if is_plot:
        plt.plot(costs)
        plt.ylabel('cost')
        plt.xlabel('iterations (per hundreds)')
        plt.title("Learning rate =" + str(learning_rate))
        plt.show()

    return parameters
def model(X,
          Y,
          learning_rate=0.01,
          num_iterations=15000,
          print_cost=True,
          initialization="he",
          is_plot=True):
    grads = {}  # 这表示一个字典
    costs = []  # 这表示一个矩阵
    m = X.shape[1]
    layers_dims = [X.shape[0], 10, 5, 1]

    if initialization == "zeros":
        parameters = initialize_parameters_zeros(layers_dims)
    elif initialization == "random":
        parameters = initialize_parameters_random(layers_dims)
    elif initialization == "he":
        parameters = initialize_parameters_he(layers_dims)
    else:
        print("错误的初始化参数!退出程序")
        exit

    for i in range(0, num_iterations):
        a3, cache = init_utils.forward_propagation(X, parameters)

        #计算成本
        cost = init_utils.compute_loss(a3, Y)

        #反向传播
        grads = init_utils.backward_propagation(X, Y, cache)

        #更新参数
        parameters = init_utils.update_parameters(parameters, grads,
                                                  learning_rate)

        #记录成本
        if i % 1000 == 0:
            costs.append(cost)
            # 打印成本
            if print_cost:
                print("第" + str(i) + "次迭代,成本为:" + str(cost))

    if is_plot:
        plt.plot(costs)
        plt.ylabel('cost')
        plt.xlabel('iteration (per hundreds)')
        plt.title("learning rate =" + str(learning_rate))
        plt.show()

    return parameters
Exemple #10
0
def model(X,
          Y,
          learning_rate=0.01,
          num_iterations=15000,
          print_cost=True,
          initialization="he",
          is_plot=True):
    grads = {}
    costs = []
    m = X.shape[1]
    layers_dims = [X.shape[0], 10, 5, 1]

    # 初始化参数的类型
    if initialization == "zeros":
        parameters = initialize_parameters_zeros(layers_dims)
    elif initialization == "random":
        parameters = initialize_parameters_random(layers_dims)
    elif initialization == "he":
        parameters = initialize_parameters_he(layers_dims)
    else:
        print("初始化参数错误,程序退出")
        exit

    # 开始学习
    for i in range(0, num_iterations):
        # 前向传播
        a3, cache = init_utils.forward_propagation(X, parameters)
        # 计算损失
        cost = init_utils.compute_loss(a3, Y)
        # 反向传播
        grads = init_utils.backward_propagation(X, Y, cache)
        # 更新参数
        parameters = init_utils.update_parameters(parameters, grads,
                                                  learning_rate)
        # 记录成本
        if i % 100 == 0:
            costs.append(cost)
            if print_cost:
                print("第" + str(i) + "次迭代,成本值为" + str(cost))

    # 绘制成本曲线
    if is_plot:
        plt.plot(costs)
        plt.xlabel('iteration(per 100)')
        plt.ylabel('cost J')
        plt.title('learning_rate=' + str(learning_rate))
        plt.show()

    return parameters
def model(X,
          Y,
          learning_rate=0.01,
          num_iterations=15000,
          print_cost=True,
          initialization="he"):

    grads = {}
    costs = []  # To keep track of the loss
    m = X.shape[1]  # Number of examples
    layers_dims = [X.shape[0], 10, 5, 1]

    # Initialize parameters dictionary
    if initialization == "zeros":
        parameters = initialize_parameters_zeros(layers_dims)
    elif initialization == "random":
        parameters = initialize_parameters_random(layers_dims)
    elif initialization == "he":
        parameters = initialize_parameters_he(layers_dims)

    # Loop (gradient descent)

    for i in range(0, num_iterations):

        # Forward propagation: LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID
        a3, cache = forward_propagation(X, parameters)

        # Loss
        cost = compute_loss(a3, Y)

        # Backward propagation
        grads = backward_propagation(X, Y, cache)

        # Update parameters
        parameters = update_parameters(parameters, grads, learning_rate)

        # Print the loss every 1000 iterations
        if print_cost and i % 1000 == 0:
            print("Cost after iteration {}: {}".format(i, cost))
            costs.append(cost)

    # Plot the loss
    plt.plot(costs)
    plt.ylabel('cost')
    plt.xlabel('iterations (per hundreds)')
    plt.title("Learning rate =" + str(learning_rate))
    plt.show()

    return parameters
Exemple #12
0
def model(X,
          Y,
          learning_rate=0.01,
          num_iterations=15000,
          print_cost=True,
          initialization="he",
          is_polt=True):
    losses = []
    m = X.shape[1]
    layers_dims = [X.shape[0], 10, 5, 1]

    # 选择初始化参数的类型
    if initialization == "zeros":
        parameters = initialize_parameters_zeros(layers_dims)
    elif initialization == "random":
        parameters = initialize_parameters_random(layers_dims)
    elif initialization == "he":
        parameters = initialize_parameters_he(layers_dims)
    for i in range(num_iterations):
        # 前向传播
        a3, cache = init_utils.forward_propagation(X, parameters)
        # 计算Loss
        loss = init_utils.compute_loss(a3, Y)

        # 反向传播
        gradients = init_utils.backward_propagation(X, Y, cache)
        #更新权重
        parameters = init_utils.update_parameters(parameters, gradients,
                                                  learning_rate)
        if i % 1000 == 0:
            losses.append(loss)
            # 打印成本
            if print_cost:
                print("第" + str(i) + "次迭代,成本值为:" + str(loss))
    # 学习完毕,绘制成本曲线
    if is_polt:
        plt.plot(losses)
        plt.ylabel('cost')
        plt.xlabel('iterations (per hundreds)')
        plt.title("Learning rate =" + str(learning_rate))
        plt.show()

    # 返回学习完毕后的参数
    return parameters
Exemple #13
0
def ini_model(X,
              Y,
              layer_dimension,
              initialize_method,
              learning_rate,
              num_iteration=15000):
    if initialize_method == 'zero':
        parameters = zero_initialization(layer_dimension)
    if initialize_method == 'random':
        parameters = random_initialization(layer_dimension)
    if initialize_method == 'he':
        parameters = he_initialization(layer_dimension)
    for i in range(0, num_iteration):
        A3, cache = forward_propagation(X, parameters)
        grads = backward_propagation(X, Y, cache)
        parameters = update_parameters(parameters, grads, learning_rate)
        cost = compute_loss(A3, Y)
        if i % 1000 == 0:
            print(f'cost after iteration {i}:{cost}')
    return parameters
def model(X,
          Y,
          learning_rate=0.01,
          num_iterations=15000,
          print_cost=True,
          initialization="he"):
    costs = []  # to keep track of the loss
    layers_dims = [X.shape[0], 10, 5, 1]

    parameters = {}
    if initialization == "random":
        parameters = bad_initialization_relu_act_fn(layers_dims)
    elif initialization == "he":
        parameters = good_initialization_relu_act_fn(layers_dims)

    # Loop (gradient descent)

    for i in range(0, num_iterations):
        # Forward propagation: LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID.
        a3, cache = forward_propagation(X, parameters)
        # Loss
        cost = compute_loss(a3, Y)
        # Backward propagation.
        grads = backward_propagation(X, Y, cache)
        # Update parameters.
        parameters = update_parameters(parameters, grads, learning_rate)
        # Print the loss every 1000 iterations
        if print_cost and i % 1000 == 0:
            print("Cost after iteration {}: {}".format(i, cost))
            costs.append(cost)

    # plot the loss
    plt.plot(costs)
    plt.ylabel('cost')
    plt.xlabel('iterations (per hundreds)')
    plt.title("Learning rate =" + str(learning_rate))
    plt.show()

    return parameters
Exemple #15
0
def model(X,
          Y,
          learning_rate=0.01,
          num_iterations=15000,
          print_cost=True,
          init="he",
          is_plot=True):
    grads = {}
    costs = []
    m = X.shape[1]
    layer_dims = [X.shape[0], 10, 5, 1]
    if init == "zeros":
        params = init_params_zeros(layer_dims)
    elif init == "random":
        params = init_params_random(layer_dims)
    elif init == "he":
        params = init_params_he(layer_dims)
    else:
        print("Wrong init option!")
        exit

    for i in range(0, num_iterations):
        a3, cache = init_utils.forward_propagation(X, params)
        cost = init_utils.compute_loss(a3, Y)
        grads = init_utils.backward_propagation(X, Y, cache)
        params = init_utils.update_parameters(params, grads, learning_rate)

        if i % 1000 == 0:
            costs.append(cost)
            if print_cost:
                print("Iteration " + str(i) + " Cost:" + str(cost))
    if is_plot:
        plt.plot(costs)
        plt.ylabel("cost")
        plt.xlabel("#iterations")
        plt.title("Learning rate = " + str(learning_rate))
        plt.show()
    return params
Exemple #16
0
def model(X,
          Y,
          learning_rate=0.01,
          num_iterations=15000,
          print_cost=True,
          initialization="he"):
    """
    implements a three-layer neural network:
        linear -> relu -> linear -> relu -> linear -> sigmoid
    
    arguments:
        X -- input data, shape(2,number of examples)
        Y -- true label vector
        learning_rate -- learning rate for gradient descent
        num_iterations -- number of iterations to run gradient descent
        print_cost -- true,print the cost every 1000 iterations
        
    returns:
        parameters -- parameters learnt by the model
    """

    grads = {}
    costs = []
    m = X.shape[1]
    layers_dims = [X.shape[0], 10, 5, 1]

    #initialize parameters dictionary
    if initialization == 'zeros':
        print('init zeros')
        parameters = initialize_parameters_zeros(layers_dims)
    elif initialization == 'random':
        print('init random')
        parameters = initialize_parameters_random(layers_dims)
    elif initialization == 'he':
        print('init he')
        parameters = initialize_parameters_he(layers_dims)

    print("w1 = ", parameters["W1"])

    #loop
    for i in range(0, num_iterations):
        #forward
        a3, cache = forward_propagation(X, parameters)

        #loss
        cost = compute_loss(a3, Y)

        #backwawrd
        grads = backward_propagation(X, Y, cache)

        #update parameters
        parameters = update_parameters(parameters, grads, learning_rate)

        if print_cost and i % 1000 == 0:
            print('cost after iteration{}:{}'.format(i, cost))
            costs.append(cost)

    plt.figure("2")
    plt.plot(costs)
    plt.ylabel('cost')
    plt.xlabel('iterations(per hundreds)')
    plt.title('Learning rate = ' + str(learning_rate))
    plt.show()

    return parameters
Exemple #17
0
    if initialization == "zeros":
        parameters = initialize_parameters_zeros(layers_dims)
    elif initialization == "random":
        parameters = initialize_parameters_random(layers_dims)
    elif initialization == "he":
        parameters = initialize_parameters_he(layers_dims)
​
    # Loop (gradient descent)
​
    for i in range(0, num_iterations):
​
        # Forward propagation: LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID.
        a3, cache = forward_propagation(X, parameters)

        # Loss
        cost = compute_loss(a3, Y)
​
        # Backward propagation.
        grads = backward_propagation(X, Y, cache)

        # Update parameters.
        parameters = update_parameters(parameters, grads, learning_rate)

        # Print the loss every 1000 iterations
        if print_cost and i % 1000 == 0:
            print("Cost after iteration {}: {}".format(i, cost))
            costs.append(cost)

    # plot the loss
    plt.plot(costs)
    plt.ylabel('cost')
def model(X,Y,learning_rate = 0.01,num_iterations = 15000,print_cost = True,initialization='he',is_polt = True):
    '''
    实现一个三层的神经网络:linear->relu->linear->relu->linear->sigmoid
    :param X: 输入的数据,维度为(2,要训练/测试的数量)
    :param Y: 标签,[0/1],维度为(1,对应的是输入的数据的标签)
    :param learning_rate: 学习速率
    :param num_iterations: 迭代的次数
    :param print_cost: 是否打印成本值,每次迭代1000次打印一次
    :param initialize: 字符串类型,初始化的类型['zero'|'random'|'he']
    :param is_plot: 是否绘制梯度下降的曲线图
    :return: 
    parameters -学习后的参数
    '''

    grads = {}
    costs = []
    m = X.shape[1]
    layers_dims = [X.shape[0],10,5,1]

    #选择初始化参数类型
    if initialization == 'zeros':
        parameters = initialize_parameters_zeros(layers_dims)
    elif initialization == 'random':
        parameters = initialize_parameters_random(layers_dims)
    elif initialization == 'he':
        parameters = initialize_parameters_he(layers_dims)
    else:
        print('错误的初始化参数!程序退出')
        exit

    #开始学习
    for i in range(0,num_iterations):
        #前向传播
        a3 ,cache = init_utils.forward_propagation(X,parameters)

        #计算成本
        cost = init_utils.compute_loss(a3,Y)

        #反向传播
        grads = init_utils.backward_propagation(X,Y,cache)

        #更新参数
        parameters = init_utils.update_parameters(parameters,grads,learning_rate)

        #记录成本
        if i%1000 == 0:
            costs.append(cost)
            #打印成本
            if print_cost:
                print('第'+str(i)+'次迭代,成本值为'+str(cost))

    #学习完毕,绘制成本曲线
    if is_polt:
        plt.plot(costs)
        plt.ylabel('cost')
        plt.xlabel('iterations(per hundreds)')
        plt.title('Learning rate ='+str(learning_rate))
        plt.show()

        #返回学习完毕后的参数
        return parameters
Exemple #19
0
def model(X,
          Y,
          learning_rate=0.01,
          num_iterations=15000,
          print_cost=True,
          initialization="he",
          is_polt=True):
    """
    实现一个三层的神经网络:LINEAR ->RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID

    参数:
        X - 输入的数据,维度为(2, 要训练/测试的数量)
        Y - 标签,【0 | 1】,维度为(1,对应的是输入的数据的标签)
        learning_rate - 学习速率
        num_iterations - 迭代的次数
        print_cost - 是否打印成本值,每迭代1000次打印一次
        initialization - 字符串类型,初始化的类型【"zeros" | "random" | "he"】
        is_polt - 是否绘制梯度下降的曲线图
    返回
        parameters - 学习后的参数
    """
    grads = {}
    costs = []
    m = X.shape[1]
    layers_dims = [X.shape[0], 10, 5, 1]

    # 选择初始化参数的类型
    if initialization == "zeros":
        parameters = initialize_parameters_zeros(layers_dims)
    elif initialization == "random":
        parameters = initialize_parameters_random(layers_dims)
    elif initialization == "he":
        parameters = initialize_parameters_he(layers_dims)
    else:
        print("错误的初始化参数!程序退出")
        exit()

    # 开始学习
    for i in range(0, num_iterations):
        # 前向传播
        a3, cache = init_utils.forward_propagation(X, parameters)

        # 计算成本
        cost = init_utils.compute_loss(a3, Y)

        # 反向传播
        grads = init_utils.backward_propagation(X, Y, cache)

        # 更新参数
        parameters = init_utils.update_parameters(parameters, grads,
                                                  learning_rate)

        # 记录成本
        if i % 1000 == 0:
            costs.append(cost)
            # 打印成本
            if print_cost:
                print("第" + str(i) + "次迭代,成本值为:" + str(cost))

    # 学习完毕,绘制成本曲线
    if is_polt:
        plt.plot(costs)
        plt.ylabel('cost')
        plt.xlabel('iterations (per hundreds)')
        plt.title("Learning rate =" + str(learning_rate))
        plt.show()

    # 返回学习完毕后的参数
    return parameters
def model(X,
          Y,
          learning_rate=0.01,
          num_iterations=15000,
          print_cost=True,
          initialization='he',
          is_polt=True):
    '''
    实现一个三层的神经网络:linear->relu->linear->relu->linear->sigmoid
    :param X: 输入的数据,维度为(2,要训练/测试的数量)
    :param Y: 标签,[0/1],维度为(1,对应的是输入的数据的标签)
    :param learning_rate: 学习速率
    :param num_iterations: 迭代的次数
    :param print_cost: 是否打印成本值,每次迭代1000次打印一次
    :param initialize: 字符串类型,初始化的类型['zero'|'random'|'he']
    :param is_plot: 是否绘制梯度下降的曲线图
    :return: 
    parameters -学习后的参数
    '''

    grads = {}
    costs = []
    m = X.shape[1]
    layers_dims = [X.shape[0], 10, 5, 1]

    #选择初始化参数类型
    if initialization == 'zeros':
        parameters = initialize_parameters_zeros(layers_dims)
    elif initialization == 'random':
        parameters = initialize_parameters_random(layers_dims)
    elif initialization == 'he':
        parameters = initialize_parameters_he(layers_dims)
    else:
        print('错误的初始化参数!程序退出')
        exit

    #开始学习
    for i in range(0, num_iterations):
        #前向传播
        a3, cache = init_utils.forward_propagation(X, parameters)

        #计算成本
        cost = init_utils.compute_loss(a3, Y)

        #反向传播
        grads = init_utils.backward_propagation(X, Y, cache)

        #更新参数
        parameters = init_utils.update_parameters(parameters, grads,
                                                  learning_rate)

        #记录成本
        if i % 1000 == 0:
            costs.append(cost)
            #打印成本
            if print_cost:
                print('第' + str(i) + '次迭代,成本值为' + str(cost))

    #学习完毕,绘制成本曲线
    if is_polt:
        plt.plot(costs)
        plt.ylabel('cost')
        plt.xlabel('iterations(per hundreds)')
        plt.title('Learning rate =' + str(learning_rate))
        plt.show()

        #返回学习完毕后的参数
        return parameters