Пример #1
0
# In[42]:

# train 3-layer model
layers_dims = [train_X.shape[0], 5, 2, 1]
parameters = model(train_X, train_Y, layers_dims, optimizer="gd")

# Predict
predictions = predict(train_X, train_Y, parameters)

# Plot decision boundary
plt.title("Model with Gradient Descent optimization")
axes = plt.gca()
axes.set_xlim([-1.5, 2.5])
axes.set_ylim([-1, 1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X,
                       train_Y)

# ### 5.2 - Mini-batch gradient descent with momentum
#
# Run the following code to see how the model does with momentum. Because this example is relatively simple, the gains from using momemtum are small; but for more complex problems you might see bigger gains.

# In[43]:

# train 3-layer model
layers_dims = [train_X.shape[0], 5, 2, 1]
parameters = model(train_X,
                   train_Y,
                   layers_dims,
                   beta=0.9,
                   optimizer="momentum")
Пример #2
0
# In[16]:

# train 3-layer model
layers_dims = [train_X.shape[0], 5, 2, 1]
parameters = model(train_X, train_Y, layers_dims, optimizer = "gd")

# Predict
predictions = predict(train_X, train_Y, parameters)

# Plot decision boundary
plt.title("Model with Gradient Descent optimization")
axes = plt.gca()
axes.set_xlim([-1.5,2.5])
axes.set_ylim([-1,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)


# ### 5.2 - Mini-batch gradient descent with momentum
# 
# Run the following code to see how the model does with momentum. Because this example is relatively simple, the gains from using momemtum are small; but for more complex problems you might see bigger gains.

# In[17]:

# train 3-layer model
layers_dims = [train_X.shape[0], 5, 2, 1]
parameters = model(train_X, train_Y, layers_dims, beta = 0.9, optimizer = "momentum")

# Predict
predictions = predict(train_X, train_Y, parameters)
Пример #3
0
        plt.show()

    return parameters

if __name__ == '__main__':
    train_X, train_Y = opt_utils.load_dataset(is_plot=True)
    layers_dims = [train_X.shape[0], 5, 2, 1]
    # #使用普通的梯度下降
    # parameters = model(train_X, train_Y, layers_dims, optimizer="gd",is_plot=True)

    # #使用动量的梯度下降
    # parameters = model(train_X, train_Y, layers_dims, beta=0.9,optimizer="momentum",is_plot=True)

    #使用Adam优化的梯度下降
    parameters = model(train_X,
                       train_Y,
                       layers_dims,
                       optimizer="adam",
                       is_plot=True)

    #预测
    preditions = opt_utils.predict(train_X, train_Y, parameters)

    #绘制分类图
    plt.title("Model")
    axes = plt.gca()
    axes.set_xlim([-1.5, 2.5])
    axes.set_ylim([-1, 1.5])
    opt_utils.plot_decision_boundary(
        lambda x: opt_utils.predict_dec(parameters, x.T), train_X, train_Y)