def otherDatasets(): noisy_circles, noisy_moons, blobs, gaussian_quantiles, no_structure = load_extra_datasets( ) datasets = { "noisy_circles": noisy_circles, "noisy_moons": noisy_moons, "blobs": blobs, "gaussian_quantiles": gaussian_quantiles } for item in datasets: X, Y = datasets[item] X, Y = X.T, Y.reshape(1, Y.shape[0]) # make blobs binary if item == "blobs": Y = Y % 2 parameters = nn_model(X, Y, n_h=20, num_iterations=10000, print_cost=True) plot_decision_boundary(lambda x: predict(parameters, x.T), X, Y[0, :]) plt.savefig(item) plt.cla()
def load(): X, Y = load_planar_dataset() name = 'flower' X = X.T Y = Y[0] # plt.scatter(X[:, 0], X[:, 1], c=Y , s=40, cmap=plt.cm.Spectral); # plt.title(name+'_original') # plt.savefig(os.path.join('pic', name+'_original')) # plt.show() # load dataset noisy_circles, noisy_moons, blobs, gaussian_quantiles, no_structure = load_extra_datasets() datasets = {"noisy_circles": noisy_circles, "noisy_moons": noisy_moons, "blobs": blobs, "gaussian_quantiles": gaussian_quantiles} datas = [(name, X, Y), ] for name, dataset in datasets.items(): X, Y = datasets[name] datas.append((name, X, Y)) # print(X.shape) # print(Y.shape) # Visualize the data # plt.scatter(X[:, 0], X[:, 1], c=Y, s=40, cmap=plt.cm.Spectral) # plt.title(name+'_original') # plt.savefig(os.path.join('pic', name+'_original')) # plt.show() return datas
def try_different_dataset(): # Datasets noisy_circles, noisy_moons, blobs, gaussian_quantiles, no_structure = load_extra_datasets( ) datasets = { "noisy_circles": noisy_circles, "noisy_moons": noisy_moons, "blobs": blobs, "gaussian_quantiles": gaussian_quantiles } ### START CODE HERE ### (choose your dataset) dataset = "noisy_moons" ### END CODE HERE ### X, Y = datasets[dataset] X, Y = X.T, Y.reshape(1, Y.shape[0]) # make blobs binary if dataset == "blobs": Y = Y % 2 # Visualize the data plt.scatter(X[0, :], X[1, :], c=Y[0], s=40, cmap=plt.cm.Spectral)
def Test2(): noisy_circles, noisy_moons, blobs, gaussian_quantiles, no_structure = load_extra_datasets( ) datasets = { 'noisy_circles': noisy_circles, 'noisy_moons': noisy_moons, 'blobs': blobs, 'gaussian_quantiles': gaussian_quantiles } # choose one dataset dataset = 'gaussian_quantiles' X, Y = datasets[dataset] X, Y = X.T, Y.reshape(1, Y.shape[0]) print(X.shape, Y.shape) # make blobs binary if dataset == 'blobs': Y = Y % 2 # Visualize the data plt.scatter(X[0, :], X[1, :], c=Y, s=40, cmap=plt.cm.Spectral) plt.show() model = SingleHiddenLayer(5, 0.2) model.initializeData(X, Y) list_iterations = [500, 1000, 1500, 2000, 2500, 3000] for i, iterations in enumerate(list_iterations): model.train_model(num_iterations=iterations) predictions = model.predict(X) print('Accuracy: {0}%'.format( float( (np.dot(Y, predictions.T) + np.dot(1 - Y, 1 - predictions.T)) / float(Y.size) * 100))) plot_decision_boundary(lambda x: model.predict(x.T), X, Y.ravel()) plt.title( 'Decision boundary for 5 hidden units for Iteration: {0}'.format( iterations)) plt.show()
def init_dataset(): # Datasets noisy_circles, noisy_moons, blobs, gaussian_quantiles, no_structure = load_extra_datasets( ) datasets = { "noisy_circles": noisy_circles, "noisy_moons": noisy_moons, "blobs": blobs, "gaussian_quantiles": gaussian_quantiles } ### START CODE HERE ### (choose your dataset) dataset = "gaussian_quantiles" ### END CODE HERE ### X, Y = datasets[dataset] X, Y = X.T, Y.reshape(1, Y.shape[0]) # make blobs binary if dataset == "blobs": Y = Y % 2 return X, Y
def main(): #default Dataset #X, Y = load_planar_dataset() #extra Datasets noisy_circles, noisy_moons, blobs, gaussian_quantiles, no_structure = load_extra_datasets() datasets = {"noisy_circles": noisy_circles, "noisy_moons": noisy_moons, "blobs": blobs, "gaussian_quantiles": gaussian_quantiles} dataset = "blobs" X, Y = datasets[dataset] X, Y = X.T, Y.reshape(1, Y.shape[0]) # make blobs binary if dataset == "blobs": Y = Y % 2 # Visualize the data plt.scatter(X[0, :], X[1, :], c=Y, s=40, cmap=plt.cm.Spectral) plt.show() #确定神经网络结构 n1=X.shape[0] n2=5 n3=Y.shape[0] assert(n3==1 and n1==2) net=NN(n1,n2,n3,'leaky Relu') costs=[] for i in range(iterations): A2,cache=net.forward(X) cost=net.loss(A2,Y) costs.append(cost) grads=net.backward(cache,X,Y) net.optimize(grads) plt.plot(costs) plt.show() plot_decision_boundary(lambda x: net.predict(x.T), X, Y) plt.show() predictions = net.predict(X) print ('神经网络的准确性: %d' % float((np.dot(Y, predictions.T) + np.dot(1 - Y, 1 - predictions.T)) / float(Y.size) * 100) + '%')
# **You've learnt to:** # - Build a complete neural network with a hidden layer # - Make a good use of a non-linear unit # - Implemented forward propagation and backpropagation, and trained a neural network # - See the impact of varying the hidden layer size, including overfitting. # Nice work! # ## 5) Performance on other datasets # If you want, you can rerun the whole notebook (minus the dataset part) for each of the following datasets. # In[ ]: # Datasets noisy_circles, noisy_moons, blobs, gaussian_quantiles, no_structure = load_extra_datasets( ) datasets = { "noisy_circles": noisy_circles, "noisy_moons": noisy_moons, "blobs": blobs, "gaussian_quantiles": gaussian_quantiles } ### START CODE HERE ### (choose your dataset) dataset = "noisy_moons" ### END CODE HERE ### X, Y = datasets[dataset] X, Y = X.T, Y.reshape(1, Y.shape[0])
def update_parameters(parameters, grads, learning_rate = 1.2): """ Updates parameters using the gradient descent update rule given above Arguments: parameters -- python dictionary containing your parameters grads -- python dictionary containing your gradients Returns: parameters -- python dictionary containing your updated parameters """ # Retrieve each parameter from the dictionary "parameters" W1 = parameters['W1'] b1 = parameters['b1'] W2 = parameters['W2'] b2 = parameters['b2'] # Retrieve each gradient from the dictionary "grads" dW1 = grads['dW1'] db1 = grads['db1'] dW2 = grads['dW2'] db2 = grads['db2' # Update rule for each parameter W1 = W1 - learning_rate * dW1 b1 = b1 - learning_rate * db1 W2 = W2 - learning_rate * dW2 b2 = b2 - learning_rate * db2 parameters = {"W1": W1, "b1": b1, "W2": W2, "b2": b2} return parameters parameters, grads = update_parameters_test_case() parameters = update_parameters(parameters, grads) print("W1 = " + str(parameters["W1"])) print("b1 = " + str(parameters["b1"])) print("W2 = " + str(parameters["W2"])) print("b2 = " + str(parameters["b2"])) # GRADED FUNCTION: predict def predict(parameters, X): """ Using the learned parameters, predicts a class for each example in X Arguments: parameters -- python dictionary containing your parameters X -- input data of size (n_x, m) Returns predictions -- vector of predictions of our model (red: 0 / blue: 1) """ # Computes probabilities using forward propagation, and classifies to 0/1 using 0.5 as the threshold. A2, cache = forward_propagation(X, parameters) predictions = A2 > 0.5 return predictions parameters, X_assess = predict_test_case() predictions = predict(parameters, X_assess) print("predictions mean = " + str(np.mean(predictions))) # Build a model with a n_h-dimensional hidden layer parameters = nn_model(X, Y, n_h = 4, num_iterations = 10000, print_cost=True) # Plot the decision boundary plot_decision_boundary(lambda x: predict(parameters, x.T), X, Y) plt.title("Decision Boundary for hidden layer size " + str(4)) # Print accuracy predictions = predict(parameters, X) print ('Accuracy: %d' % float((np.dot(Y,predictions.T) + np.dot(1-Y,1-predictions.T))/float(Y.size)*100) + '%') # This may take about 2 minutes to run plt.figure(figsize=(16, 32)) hidden_layer_sizes = [1, 2, 3, 4, 5, 20, 50] for i, n_h in enumerate(hidden_layer_sizes): plt.subplot(5, 2, i+1) plt.title('Hidden Layer of size %d' % n_h) parameters = nn_model(X, Y, n_h, num_iterations = 5000) plot_decision_boundary(lambda x: predict(parameters, x.T), X, Y) predictions = predict(parameters, X) accuracy = float((np.dot(Y,predictions.T) + np.dot(1-Y,1-predictions.T))/float(Y.size)*100) print ("Accuracy for {} hidden units: {} %".format(n_h, accuracy)) # Datasets noisy_circles, noisy_moons, blobs, gaussian_quantiles, no_structure = load_extra_datasets() datasets = {"noisy_circles": noisy_circles, "noisy_moons": noisy_moons, "blobs": blobs, "gaussian_quantiles": gaussian_quantiles} dataset = "noisy_moons" X, Y = datasets[dataset] X, Y = X.T, Y.reshape(1, Y.shape[0]) # make blobs binary if dataset == "blobs": Y = Y%2 # Visualize the data plt.scatter(X[0, :], X[1, :], c=Y, s=40, cmap=plt.cm.Spectral);
plt.figure(figsize=(16, 32)) hidden_layer_sizes = [1, 2, 3, 4, 5, 20, 50] for i, n_h in enumerate(hidden_layer_sizes): plt.subplot(5, 2, i + 1) # plt.title('Hidden Layer of size %d' % n_h) title = 'Hidden Layer of size: ' + str(n_h) parameters = nn_model(X, Y, n_h, num_iterations=5000) plot_decision_boundary(lambda x: predict(parameters, x.T), X, Y_flat, title) predictions = predict(parameters, X) accuracy = float((np.dot(Y, predictions.T) + np.dot(1 - Y, 1 - predictions.T)) / float(Y.size) * 100) print("Accuracy for {} hidden units: {} %".format(n_h, accuracy)) # Datasets noisy_circles, noisy_moons, blobs, gaussian_quantiles, no_structure = load_extra_datasets() datasets = {"noisy_circles": noisy_circles, "noisy_moons": noisy_moons, "blobs": blobs, "gaussian_quantiles": gaussian_quantiles} dataset = "noisy_moons" X, Y = datasets[dataset] X, Y = X.T, Y.reshape(1, Y.shape[0]) # make blobs binary if dataset == "blobs": Y = Y % 2 # Visualize the data plt.scatter(X[0, :], X[1, :], c = Y[0], s = size, cmap = plt.cm.Spectral)
def do_something(): X, Y = load_planar_dataset() # Visualize the data: plt.scatter(X[0, :], X[1, :], c=Y.ravel(), s=40, cmap=plt.cm.Spectral); ### Shapes ### START CODE HERE ### (≈ 3 lines of code) shape_X = X.shape shape_Y = Y.shape m = Y.size # training set size ### END CODE HERE ### print ('The shape of X is: ' + str(shape_X)) print ('The shape of Y is: ' + str(shape_Y)) print ('I have m = %d training examples!' % (m)) ### Logistic Regression # Train the logistic regression classifier clf = sklearn.linear_model.LogisticRegressionCV() clf.fit(X.T, Y.T) # Plot the decision boundary for logistic regression plot_decision_boundary(lambda x: clf.predict(x), X, Y) plt.title("Logistic Regression") # Print accuracy LR_predictions = clf.predict(X.T) print ('Accuracy of logistic regression: %d ' % float((np.dot(Y,LR_predictions) + np.dot(1-Y,1-LR_predictions))/float(Y.size)*100) + '% ' + "(percentage of correctly labelled datapoints)") # plt.show() ### Layer sizes X_assess, Y_assess = layer_sizes_test_case() (n_x, n_h, n_y) = layer_sizes(X_assess, Y_assess) print("The size of the input layer is: n_x = " + str(n_x)) print("The size of the hidden layer is: n_h = " + str(n_h)) print("The size of the output layer is: n_y = " + str(n_y)) ### Initialize parameters n_x, n_h, n_y = initialize_parameters_test_case() parameters = initialize_parameters(n_x, n_h, n_y) print("W1 = " + str(parameters["W1"])) print("b1 = " + str(parameters["b1"])) print("W2 = " + str(parameters["W2"])) print("b2 = " + str(parameters["b2"])) ## The Loop ### Forward propagation X_assess, parameters = forward_propagation_test_case() A2, cache = forward_propagation(X_assess, parameters) # Note: we use the mean here just to make sure that your output matches ours. print(np.mean(cache['Z1']) ,np.mean(cache['A1']),np.mean(cache['Z2']),np.mean(cache['A2'])) ### Cost function A2, Y_assess, parameters = compute_cost_test_case() print("cost = " + str(compute_cost(A2, Y_assess, parameters))) ### Backward propagation parameters, cache, X_assess, Y_assess = backward_propagation_test_case() grads = backward_propagation(parameters, cache, X_assess, Y_assess) print ("dW1 = "+ str(grads["dW1"])) print ("db1 = "+ str(grads["db1"])) print ("dW2 = "+ str(grads["dW2"])) print ("db2 = "+ str(grads["db2"])) ### Update parameters parameters, grads = update_parameters_test_case() parameters = update_parameters(parameters, grads) print("W1 = " + str(parameters["W1"])) print("b1 = " + str(parameters["b1"])) print("W2 = " + str(parameters["W2"])) print("b2 = " + str(parameters["b2"])) ## End of Loop ## Integrate parts 4.1, 4.2 and 4.3 in nn_model() X_assess, Y_assess = nn_model_test_case() parameters = nn_model(X_assess, Y_assess, 4, num_iterations=10000, print_cost=False) print("W1 = " + str(parameters["W1"])) print("b1 = " + str(parameters["b1"])) print("W2 = " + str(parameters["W2"])) print("b2 = " + str(parameters["b2"])) ## Predictions parameters, X_assess = predict_test_case() predictions = predict(parameters, X_assess) print("predictions mean = " + str(np.mean(predictions))) ## Neural network # Build a model with a n_h-dimensional hidden layer parameters = nn_model(X, Y, n_h = 4, num_iterations = 10000, print_cost=True) # Plot the decision boundary plot_decision_boundary(lambda x: predict(parameters, x.T), X, Y) plt.title("Decision Boundary for hidden layer size " + str(4)) # Print accuracy predictions = predict(parameters, X) print ('Accuracy: %d' % float((np.dot(Y,predictions.T) + \ np.dot(1-Y,1-predictions.T))/float(Y.size)*100) + '%') ## Tuning hidden layer size (optional) # This may take about 2 minutes to run plt.figure(figsize=(16, 32)) hidden_layer_sizes = [1, 2, 3, 4, 5, 20, 50] for i, n_h in enumerate(hidden_layer_sizes): plt.subplot(5, 2, i+1) plt.title('Hidden Layer of size %d' % n_h) parameters = nn_model(X, Y, n_h, num_iterations = 5000) plot_decision_boundary(lambda x: predict(parameters, x.T), X, Y) predictions = predict(parameters, X) accuracy = float((np.dot(Y,predictions.T) + np.dot(1-Y,1-predictions.T))/float(Y.size)*100) print ("Accuracy for {} hidden units: {} %".format(n_h, accuracy)) ## Performance on other datasets # Datasets noisy_circles, noisy_moons, blobs, gaussian_quantiles, no_structure = load_extra_datasets() datasets = {"noisy_circles": noisy_circles, "noisy_moons": noisy_moons, "blobs": blobs, "gaussian_quantiles": gaussian_quantiles} ### START CODE HERE ### (choose your dataset) dataset = "noisy_moons" ### END CODE HERE ### X, Y = datasets[dataset] X, Y = X.T, Y.reshape(1, Y.shape[0]) # make blobs binary if dataset == "blobs": Y = Y%2 # Visualize the data plt.scatter(X[0, :], X[1, :], c=Y.ravel(), s=40, cmap=plt.cm.Spectral) plt.show()