예제 #1
0
def m_adam():
    layers_dims = [train_X.shape[0], 5, 2, 1]
    parameters = model(train_X, train_Y, layers_dims, optimizer="adam")

    # Predict
    predictions = predict(train_X, train_Y, parameters)

# You will now run this 3 layer neural network with each of the 3 optimization methods.
#
# ### 5.1 - Mini-batch Gradient descent
#
# Run the following code to see how the model does with mini-batch gradient descent.

# In[16]:

# train 3-layer model
layers_dims = [train_X.shape[0], 5, 2, 1]
parameters = model(train_X, train_Y, layers_dims, optimizer="gd")

# Predict
predictions = predict(train_X, train_Y, parameters)

# Plot decision boundary
plt.title("Model with Gradient Descent optimization")
axes = plt.gca()
axes.set_xlim([-1.5, 2.5])
axes.set_ylim([-1, 1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X,
                       train_Y)

# ### 5.2 - Mini-batch gradient descent with momentum
#
# Run the following code to see how the model does with momentum. Because this example is relatively simple, the gains from using momemtum are small; but for more complex problems you might see bigger gains.

# In[17]:
예제 #3
0
def m_b_gb_m():
    layers_dims = [train_X.shape[0], 5, 2, 1]
    parameters = model(train_X, train_Y, layers_dims, beta=0.9, optimizer="momentum")

    # Predict
    predictions = predict(train_X, train_Y, parameters)