def testGMMData(iterations, learning_rate, batch_size, layers_count):
    print(
        f'Neural network is running on GMM dataset with {iterations}, {learning_rate}, {batch_size}, {layers_count}'
    )
    data = sio.loadmat('resources/GMMData.mat')
    trainX = data['Yt']
    trainY = data['Ct']
    testX = data['Yv']
    testY = data['Cv']

    labels_count = trainY.shape[0]
    sample_size = trainX.shape[0]

    freq = 1

    theta_layer_size = sample_size + 1 * (sample_size**2)
    loss_layer_size = labels_count * (sample_size + 1)

    theta = np.random.randn(layers_count * theta_layer_size + loss_layer_size,
                            1)

    model = nn_model(theta, layers_count, batch_size, learning_rate,
                     iterations, freq)
    model.train(trainX, trainY, testX, testY)

    plot_sgd_results(
        model, 'SGD results (GMM Data)',
        f'q5_gmm_data_{iterations}_{learning_rate}_{batch_size}_{layers_count}',
        False)

    model_momentum = nn_model(theta,
                              layers_count,
                              batch_size,
                              learning_rate,
                              iterations,
                              freq,
                              gamma=0.5)
    model_momentum.train(trainX, trainY, testX, testY)
    compare_sgd_result(
        [model, model_momentum], 'SGD with / without momentum (GMM Data)',
        f'q5_gmm_data_momentum_{iterations}_{learning_rate}_{batch_size}_{layers_count}',
        False)

    iterations, train_loss, train_accuracy, test_loss, test_accuracy = zip(
        *model.training_records)
    return train_loss[-1], train_accuracy[-1], test_loss[-1], test_accuracy[-1]
for i in range(1, sheet2.nrows):
    Y.append(sheet2.row_values(i))

X = np.array(X).transpose()
Y = np.array(Y).transpose()

X_train = X[:, 0:700]
Y_train = Y[:, 0:700]

X_test = X[:, 700:962]
Y_test = Y[:, 700:962]

layer_dims = [3, 20, 10, 7, 5, 2]

parameters = nn_model(X_train, Y_train, layer_dims, learning_rate=0.001, num_iterations=27000, beta1=0.9, beta2=0.999,
                      epsilon=1e-8, print_cost=True)


def predict(a, b, parameter):
    AL, caches = nn_model_forward(a, parameter)
    return np.sum(AL - b)/np.sum(b)


accuracy_train = predict(X_train, Y_train, parameters)
accuracy_test = predict(X_test, Y_test, parameters)
print(accuracy_train)
print(accuracy_test)


def signalstrength():
    subprocess.run(['netsh', 'interface', 'set', 'interface', 'name="Wi-Fi"', 'admin=disabled'])
Beispiel #3
0
# Train the logistic regression classifier
clf = sklearn.linear_model.LogisticRegressionCV();
clf.fit(X.T, Y.T);

# Plot the decision boundary for logistic regression
plot_decision_boundary(lambda x: clf.predict(x), X, Y)
plt.title("Logistic Regression")

# Print accuracy
LR_predictions = clf.predict(X.T)
print ('Accuracy of logistic regression: %d ' % float((np.dot(Y,LR_predictions) + np.dot(1-Y,1-LR_predictions))/float(Y.size)*100) +
       '% ' + "(percentage of correctly labelled datapoints)")

# Build a model with a n_h-dimensional hidden layer
parameters = nn_model(X, Y, n_h = 4, num_iterations = 10000, print_cost=True)

# Plot the decision boundary
plot_decision_boundary(lambda x: predict(parameters, x.T), X, Y)
plt.title("Decision Boundary for hidden layer size " + str(4))

'''
    Accuracy is really high compared to Logistic Regression. The model has learnt
    the leaf patterns of the flower! Neural networks are able to learn even
    highly non-linear decision boundaries, unlike logistic regression.
'''

# Tuning hidden layer size
plt.figure(figsize=(16, 32))
hidden_layer_sizes = [1, 2, 3, 4, 5, 20, 50]
for i, n_h in enumerate(hidden_layer_sizes):