Example #1
0
def main():
    plt.rcParams['figure.figsize'] = (7.0, 4.0)  # set default size of plots
    plt.rcParams['image.interpolation'] = 'nearest'
    plt.rcParams['image.cmap'] = 'gray'

    train_X, train_Y = load_dataset()

    layers_dims = [train_X.shape[0], 5, 2, 1]
    #parameters = model(train_X, train_Y, layers_dims, optimizer = "gd")
    #parameters = model(train_X, train_Y, layers_dims, beta = 0.9, optimizer = "momentum")
    parameters = model(train_X, train_Y, layers_dims, optimizer="adam")

    # Predict
    predictions = predict(train_X, train_Y, parameters)

    # Plot decision boundary
    #plt.title("Model with Gradient Descent optimization")
    #plt.title("Model with Momentum optimization")
    plt.title("Model with Adam optimization")
    axes = plt.gca()
    axes.set_xlim([-1.5, 2.5])
    axes.set_ylim([-1, 1.5])
    plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X,
                           train_Y)

    plt.show()
Example #2
0
def main(optimizer='gd'):
    # load and plot the data points
    train_X, train_Y = load_dataset()
    plt.show()

    layers_dims = [train_X.shape[0], 5, 2, 1]

    # try different optimization methods
    if optimizer == 'gd':
        # mini-batch gradient descent
        parameters = model(train_X, train_Y, layers_dims, optimizer="gd")
        plt.title("Model with Gradient Descent optimization")
    elif optimizer == 'momentum':
        # mini-batch gradient descent with momentum
        parameters = model(train_X, train_Y, layers_dims, optimizer="momentum")
        plt.title("Model with Momentum optimization")
    elif optimizer == 'adam':
        # mini-batch gradient descent with Adam
        parameters = model(train_X, train_Y, layers_dims, optimizer="adam")
        plt.title("Model with Adam optimization")
    else:
        print("No such optimization method named " + optimizer)
        return

    # Predict
    predictions = predict(train_X, train_Y, parameters)

    # Plot decision boundary
    axes = plt.gca()
    axes.set_xlim([-1.5, 2.5])
    axes.set_ylim([-1, 1.5])
    plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X,
                           train_Y)
Example #3
0
def train_adam():
    # train 3-layer model
    layers_dims = [train_X.shape[0], 5, 2, 1]
    parameters = model(train_X, train_Y, layers_dims, optimizer="adam")

    # Predict
    predictions = predict(train_X, train_Y, parameters)

    # Plot decision boundary
    plt.title("Model with Adam optimization")
    axes = plt.gca()
    axes.set_xlim([-1.5, 2.5])
    axes.set_ylim([-1, 1.5])
    plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X,
                           train_Y)
Example #4
0
def mini_batch_with_gradient():
    train_X, train_Y = load_dataset()
    # train 3-layer model
    layers_dims = [train_X.shape[0], 5, 2, 1]
    parameters = model(train_X, train_Y, layers_dims, optimizer="gd")

    # Predict
    predictions = predict(train_X, train_Y, parameters)

    # Plot decision boundary
    plt.title("Model with Gradient Descent optimization")
    axes = plt.gca()
    axes.set_xlim([-1.5, 2.5])
    axes.set_ylim([-1, 1.5])
    plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X,
                           train_Y)
Example #5
0
def main():
    train_X, train_Y = load_dataset()
    layers_dims = [train_X.shape[0], 5, 2, 1]
    print("training with mini-batch gd optimizer")
    learned_parameters_gd = model(train_X,
                                  train_Y,
                                  layers_dims,
                                  optimizer="gd")
    predict(train_X, train_Y, learned_parameters_gd)
    # Plot decision boundary
    plt.title("model with Gradient Descent optimization")
    axes = plt.gca()
    axes.set_xlim([-1.5, 2.5])
    axes.set_ylim([-1, 1.5])
    plot_decision_boundary(lambda x: predict_dec(learned_parameters_gd, x.T),
                           train_X, train_Y)

    print("training with momentum optimizer")
    learned_parameters_momentum = model(train_X,
                                        train_Y,
                                        layers_dims,
                                        beta=0.9,
                                        optimizer="momentum")
    predict(train_X, train_Y, learned_parameters_momentum)
    # Plot decision boundary
    plt.title("model with Momentum optimization")
    axes = plt.gca()
    axes.set_xlim([-1.5, 2.5])
    axes.set_ylim([-1, 1.5])
    plot_decision_boundary(
        lambda x: predict_dec(learned_parameters_momentum, x.T), train_X,
        train_Y)

    print("training with adam optimizer")
    learned_parameters_adam = model(train_X,
                                    train_Y,
                                    layers_dims,
                                    optimizer="adam")
    predict(train_X, train_Y, learned_parameters_adam)
    # Plot decision boundary
    plt.title("model with Adam optimization")
    axes = plt.gca()
    axes.set_xlim([-1.5, 2.5])
    axes.set_ylim([-1, 1.5])
    plot_decision_boundary(lambda x: predict_dec(learned_parameters_adam, x.T),
                           train_X, train_Y)

    return None
Example #6
0
    return parameters


# train 3-layer model
layers_dims = [train_X.shape[0], 5, 2, 1]
parameters = model(train_X, train_Y, layers_dims, optimizer="gd")

# Predict
predictions = predict(train_X, train_Y, parameters)

# Plot decision boundary
plt.title("Model with Gradient Descent optimization")
axes = plt.gca()
axes.set_xlim([-1.5, 2.5])
axes.set_ylim([-1, 1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X,
                       train_Y)

# train 3-layer model
layers_dims = [train_X.shape[0], 5, 2, 1]
parameters = model(train_X,
                   train_Y,
                   layers_dims,
                   num_epochs=30000,
                   beta=0.9,
                   optimizer="momentum")

# Predict
predictions = predict(train_X, train_Y, parameters)

# Plot decision boundary
plt.title("Model with Momentum optimization")
Example #7
0
# In[16]:

# train 3-layer model
layers_dims = [train_X.shape[0], 5, 2, 1]
parameters = model(train_X, train_Y, layers_dims, optimizer = "gd")

# Predict
predictions = predict(train_X, train_Y, parameters)

# Plot decision boundary
plt.title("Model with Gradient Descent optimization")
axes = plt.gca()
axes.set_xlim([-1.5,2.5])
axes.set_ylim([-1,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)


# ### 5.2 - Mini-batch gradient descent with momentum
# 
# Run the following code to see how the model does with momentum. Because this example is relatively simple, the gains from using momemtum are small; but for more complex problems you might see bigger gains.

# In[17]:

# train 3-layer model
layers_dims = [train_X.shape[0], 5, 2, 1]
parameters = model(train_X, train_Y, layers_dims, beta = 0.9, optimizer = "momentum")

# Predict
predictions = predict(train_X, train_Y, parameters)
Example #8
0
                t = t + 1
                self.Update_Parameters_Adam(t, learning_rate, beta1, beta2, epsilon)
            if print_cost and i % (10*self.iteration_unit) == 0:
                print ("Cost after iteration %i: %f" %(i, cost))
                self.Query(X, Y)
            if i % self.iteration_unit == 0:
                self.costs.append(cost)
        print('finished!')
        self.PlotCosts()
        return self.costs
        
    pass

train_X, train_Y = load_dataset()
plt.show()

a = DeepNeuralNetwork("Model without optimization", [train_X.shape[0], 10, 3, 1])

for i in range(1500):
    a.Forward_Propagation(train_X)
    a.Backward_Propagation(train_Y)
    a.Update_Parameters(0.35)
    if i % 50 == 0:
        plt.title("Model without optimization")
        axes = plt.gca()
        axes.set_xlim([-1.5,2.5])
        axes.set_ylim([-1,1.5])
        plot_decision_boundary(lambda x: a.Predict(x.T), train_X, np.squeeze(train_Y))
        plt.show()
        a.Query(train_X, train_Y)
        
Example #9
0
# Test model using **common gradient descent**

# In[27]:

layer_dims = [train_X.shape[0], 5, 2, 1]
params = model(train_X, train_Y, layer_dims, optimizer="gd", is_plot=True)

# In[28]:

predictions = opt_utils.predict(train_X, train_Y, params)

plt.title("Gradient Descent")
axes = plt.gca()
axes.set_xlim([-1.5, 2.5])
axes.set_ylim([-1, 1.5])
opt_utils.plot_decision_boundary(lambda x: opt_utils.predict_dec(params, x.T),
                                 train_X, np.squeeze(train_Y))

# Test model using **momentum gradient descent**

# In[29]:

layer_dims = [train_X.shape[0], 5, 2, 1]

params = model(train_X,
               train_Y,
               layer_dims,
               beta=0.9,
               optimizer="momentum",
               is_plot=True)

# In[30]:
Example #10
0
    layers_dims = [train_X.shape[0], 5, 2, 1]
    parameters = model(train_X, train_Y, layers_dims, optimizer="momentum", is_plot=True)

    preditions = opt_utils.predict(train_X, train_Y, parameters)

    # 绘制分类图
    plt.title("Model with Gradient Descent optimization")
    axes = plt.gca()
    axes.set_xlim([-1.5, 2.5])
    axes.set_ylim([-1, 1.5])
    opt_utils.plot_decision_boundary(lambda x: opt_utils.predict_dec(parameters, x.T), train_X, train_Y)
    '''
    #测试adam优化器
    train_X, train_Y = opt_utils.load_dataset(is_plot=False)
    layers_dims = [train_X.shape[0], 5, 2, 1]
    parameters = model(train_X,
                       train_Y,
                       layers_dims,
                       optimizer="adam",
                       is_plot=True)

    preditions = opt_utils.predict(train_X, train_Y, parameters)

    # 绘制分类图
    plt.title("Model with Gradient Descent optimization")
    axes = plt.gca()
    axes.set_xlim([-1.5, 2.5])
    axes.set_ylim([-1, 1.5])
    opt_utils.plot_decision_boundary(
        lambda x: opt_utils.predict_dec(parameters, x.T), train_X, train_Y)
Example #11
0
sep("Mini-batch gradient descent")

# train 3-layer model
layers_dims = [train_X.shape[0], 5, 2, 1]
parameters = model(train_X, train_Y, layers_dims, optimizer = "gd")

# Predict
predictions = predict(train_X, train_Y, parameters)

# Plot decision boundary
plt.title("Model with Gradient Descent optimization")
axes = plt.gca()
axes.set_xlim([-1.5,2.5])
axes.set_ylim([-1,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, np.reshape(train_Y,-1))

sep("with momentum")
# train 3-layer model
layers_dims = [train_X.shape[0], 5, 2, 1]
parameters = model(train_X, train_Y, layers_dims, beta = 0.9, optimizer = "momentum")

# Predict
predictions = predict(train_X, train_Y, parameters)

# Plot decision boundary
plt.title("Model with Momentum optimization")
axes = plt.gca()
axes.set_xlim([-1.5,2.5])
axes.set_ylim([-1,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, np.reshape(train_Y,-1))
Example #12
0
    return parameters

# train 3-layer model
layers_dims = [train_X.shape[0], 5, 2, 1]
parameters = model(train_X, train_Y, layers_dims, optimizer = "gd")

# Predict
predictions = predict(train_X, train_Y, parameters)

# Plot decision boundary
plt.title("Model with Gradient Descent optimization")
axes = plt.gca()
axes.set_xlim([-1.5,2.5])
axes.set_ylim([-1,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, np.squeeze(train_Y))
plt.show()

# train 3-layer model
layers_dims = [train_X.shape[0], 5, 2, 1]
parameters = model(train_X, train_Y, layers_dims, beta = 0.9, optimizer = "momentum")

# Predict
predictions = predict(train_X, train_Y, parameters)

# Plot decision boundary
plt.title("Model with Momentum optimization")
axes = plt.gca()
axes.set_xlim([-1.5,2.5])
axes.set_ylim([-1,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, np.squeeze(train_Y))