예제 #1
0
def model(X,
          Y,
          hidden_layers_dims,
          learning_rate=1.2,
          num_iter=5000,
          hidden_activation='relu',
          print_cost=True):

    layers_dims = []
    layers_dims.append(X_train.shape[0])
    layers_dims.extend(hidden_layers_dims)
    layers_dims.append(Y_train.shape[0])

    parameters = initialize_parameters(layers_dims)

    costs = []

    for i in range(num_iter):

        AL, caches = forward_propagation(X, parameters, hidden_activation)

        cost = compute_cost(AL, Y)

        dAL = compute_cost_derivative(AL, Y)

        grads = backward_propagation(dAL, caches, hidden_activation)

        parameters = update_parameters(parameters, grads, learning_rate)

        if print_cost and i % 100 == 0:
            print('Cost after iteration %d: %f' % (i, cost))
            costs.append(cost)

    # Plot Learning curve
    plt.plot(costs)
    plt.ylabel('Cost')
    plt.xlabel('# of iterations (per 100)')
    plt.title('Learning rate = ' + str(learning_rate))
    plt.show()

    # Plot Decision Boundary
    print('decision boundary')
    plot_decision_boundary(
        lambda x: predict(x.T, parameters, hidden_activation), X, Y)

    return parameters
예제 #2
0
파일: ex1.py 프로젝트: phrayezzen/COMP540
# TODO: calculate the probability of a student being admitted with score of 45,85
#       replace pred_prob = 0 with pred_prob = expression for that probability

pred_prob = theta_opt.dot(np.array([1, 45, 85]))
print "For a student with 45 on exam 1 and 85 on exam 2, the probability of admission = ", pred_prob

# compute accuracy on the training set

predy = log_reg1.predict(XX)

# TODO: calculate the accuracy of predictions on training set (hint: compare predy and y)

accuracy = 1. * sum([predy[i] == y[i] for i in xrange(len(y))]) / len(y)
print "Accuracy on the training set = ", accuracy

# plot the decision surface

plot_utils.plot_decision_boundary(X,y,theta_opt,'Exam 1 score', 'Exam 2 score',['Not Admitted','Admitted'])
plt.savefig('fig2.pdf')

# Compare with sklearn logistic regression
# note the parameters fed into the LogisticRegression call

from sklearn import linear_model
sk_logreg = linear_model.LogisticRegression(C=1e5,solver='lbfgs',fit_intercept=False)
sk_logreg.fit(XX,y)
print "Theta found by sklearn: ", sk_logreg.coef_

plot_utils.plot_decision_boundary_sklearn(X,y,sk_logreg,'Exam 1 score', 'Exam 2 score',['Not Admitted','Admitted'])
plt.savefig('fig2_sk.pdf')
예제 #3
0
파일: dnn.py 프로젝트: gaurishchaudhari/DNN
def model(X,
          Y,
          hidden_layers_dims,
          learning_rate,
          num_epochs,
          minibatch_size,
          hidden_activation='relu',
          lambd=0.0,
          keep_prob=1.0,
          optimizer='gd',
          beta1=0.9,
          beta2=0.999,
          epsilon=1e-8,
          print_cost=False,
          show_plot=False):

    layers_dims = []
    layers_dims.append(X_train.shape[0])
    layers_dims.extend(hidden_layers_dims)
    layers_dims.append(Y_train.shape[0])

    parameters = initialize_parameters(layers_dims)

    if optimizer == 'momentum':
        v = initialize_momentum(parameters)
    if optimizer == 'adam':
        v, s = initialize_adam(parameters)

    costs = []
    t = 0
    seed = 10

    for i in range(num_epochs):

        seed = seed + 1
        minibatches = hp.random_minibatches(X, Y, minibatch_size, seed)

        for minibatch in minibatches:

            (minibatch_X, minibatch_Y) = minibatch

            AL, caches = forward_propagation(minibatch_X, parameters,
                                             hidden_activation, keep_prob)

            cost = compute_cost(AL, minibatch_Y, parameters, lambd)

            dAL = compute_cost_derivative(AL, minibatch_Y)

            grads = backward_propagation(dAL, caches, hidden_activation, lambd,
                                         keep_prob)

            if print_cost and keep_prob == 1.0 and i > 0 and i % 1000 == 0:
                hp.grad_check(
                    lambda params: forward_prop_and_compute_cost(
                        minibatch_X, minibatch_Y, params, hidden_activation,
                        lambd, keep_prob), parameters, grads)

            if optimizer == 'gd':
                parameters = update_parameters(parameters, grads,
                                               learning_rate)
            elif optimizer == 'momentum':
                parameters, v = update_parameters_momentum(
                    parameters, grads, v, learning_rate, beta1)
            elif optimizer == 'adam':
                t = t + 1
                parameters, v, s = update_parameters_adam(
                    parameters, grads, v, s, t, learning_rate, beta1, beta2,
                    epsilon)

        if print_cost and i % 100 == 0:
            print('Cost after epoch %d: %f' % (i, cost))
            costs.append(cost)

    if show_plot:
        # Plot Learning curve
        plt.plot(costs)
        plt.ylabel('Cost')
        plt.xlabel('# of iterations (per 100)')
        plt.title('Learning rate = ' + str(learning_rate))
        plt.show()

        # Plot Decision Boundary
        print('Decision Boundary')
        plot_decision_boundary(
            lambda x: predict(x.T, parameters, hidden_activation), X, Y)

    return parameters