def plot_boundaries():
    """
    Plots the decision boundary updates while training
    """
    n_data = 200 # Number input vectors
    m = 2 # Number of features (input nodes)
    n = 1 # Number of output nodes
    l_rate = 0.006 # Learning rate
    nb_epochs = 20 # Max number of iterations

    # Generate input data
    classA, classB = generate_data(int(n_data/2))
    input_vectors = np.concatenate((classA, classB), axis=1)
    # Adding the bias
    input_vectors = np.concatenate((np.ones((1, n_data)), input_vectors))
    # Transposing matrix
    input_vectors = np.transpose(input_vectors) # Matrix n_data by m+1
    # Targets
    targets = np.zeros((n_data, n))
    for i in range(int(n_data/2)):
        for j in range(n):
            targets[i][j] = 1 # Neurons should fire 1 for classA points
    # Init the weights
    weights = np.random.rand(m + 1, n) * 0.1 - 0.05
    weights = seq_train(input_vectors, targets, weights, nb_epochs, l_rate, verbose=True, do_plot=True, classA=classA, classB=classB)
def batch_train_display():
    epochs = 100
    classA, classB = generate_data(100)
    X = process_inputs(classA, classB)
    W, accuracy, error = batch_train(0.001, X, epochs)
    plot_classes(classA, classB, W)
    plot_accuracy(accuracy, len(accuracy))
    #plot_error(error, len(accuracy))
    print("final accuracy is", accuracy[len(accuracy) - 1])
    print("number of epochs:", len(accuracy))
def sensibility():
    epochs = 100
    np.random.seed(0)
    classA, classB = generate_data(100)
    np.random.seed(4)  # seed 1 and 4
    X = process_inputs(classA, classB)
    W, accuracy, error = batch_train(0.001, X, epochs)
    plot_classes(classA, classB, W)
    plot_accuracy(accuracy, len(accuracy))
    print("final accuracy is", accuracy[len(accuracy) - 1])
    print("number of epochs:", len(accuracy))
def perceptron_train_display():
    """
    sequential training using delta rule
    :return:
    """
    epochs = 50
    classA, classB = generate_data(100)
    X = process_inputs(classA, classB)
    W, accuracy, error = perceptron_train(0.006, X, epochs)
    plot_classes(classA, classB, W)
    plot_accuracy(accuracy, len(accuracy))
    plot_error(error, len(accuracy))
    print("final accuracy is", accuracy[len(accuracy) - 1])
    print("number of epochs:", len(accuracy))
def no_bias_batch_train():
    epochs = 200
    np.random.seed(0)
    classA, classB = generate_data(100,
                                   mA=np.array([2.0, 4]),
                                   mB=np.array([-5, -2]),
                                   sigmaA=1.5,
                                   sigmaB=1.5)
    #classA, classB = generate_data(100)
    np.random.seed(1)  # seed 1 and 2
    X = process_inputs(classA, classB, False)
    W, accuracy, error = batch_train(0.0001, X, epochs, 1, False)
    plot_classes(classA, classB, W, False)
    plot_accuracy(accuracy, len(accuracy))
    print("final accuracy is", accuracy[len(accuracy) - 1])
    print("number of epochs:", len(accuracy))
def seq_pcn_stats():
    """
    Shows the efficiency of the sequential perceptron when eta varies
    """
    n_data = 200 # Number input vectors
    m = 2 # Number of features (input nodes)
    n = 1 # Number of output nodes
    nb_epochs = 10 # Max number of iterations

    # Generate input data
    classA, classB = generate_data(int(n_data/2))
    input_vectors = np.concatenate((classA, classB), axis=1)
    # Adding the bias
    input_vectors = np.concatenate((np.ones((1, n_data)), input_vectors))
    # Transposing matrix
    input_vectors = np.transpose(input_vectors) # Matrix n_data by m+1
    # Targets
    targets = np.zeros((n_data, n))
    for i in range(int(n_data/2)):
        for j in range(n):
            targets[i][j] = 1 # Neurons should fire 1 for classA points

    eta = np.arange(0.001, 0.011, 0.0001)
    stats_epoch = []
    stats_accuracy = []
    for k in range(len(eta)):
        epochs = []
        accuracies = []
        for i in range(20):
            # Init the weights
            weights = np.random.normal(0, 1, (m + 1, n))
            weights, epoch = seq_train(input_vectors, targets, weights, nb_epochs, eta[k])
            acc = accuracy(input_vectors, targets, weights)
            epochs.append(epoch)
            accuracies.append(acc)
        stats_epoch.append(np.mean(epochs))
        stats_accuracy.append(np.mean(accuracies))
    plt.plot(eta, stats_epoch)
    plt.xlabel('Learning rate (eta)')
    plt.ylabel('Epochs')
    plt.title('Required number of epochs depending on learning rate')
    plt.show()
    plt.plot(eta, stats_accuracy)
    plt.xlabel('Learning rate (eta)')
    plt.ylabel('Accuracy')
    plt.title('NN Accuracy depending on learning rate')
    plt.show()
def adjust_batch():
    epochs = 50
    nb_batches = np.arange(1, 11, 1)
    classA, classB = generate_data(100)
    X = process_inputs(classA, classB)
    eta = 0.01
    stats = []
    for k in range(len(nb_batches)):
        acc = []
        for i in range(50):
            W, accuracy, error = batch_train(eta, X, epochs, nb_batches[k])
            acc.append(accuracy[len(accuracy) - 1])
        stats.append(np.mean(acc))
    plt.plot(nb_batches, stats)
    plt.xlabel('number of batches')
    plt.ylabel('accuracy')
    plt.title('accuracy depending on number of batches')
    plt.show()
def adjust_eta(train_function):
    """
    This functions shows how training is the most efficient when learning rate eta = 0.002
    :param train_function: sequential or batch learning
    :return:
    """
    epochs = 50
    classA, classB = generate_data(100)
    X = process_inputs(classA, classB)
    eta = np.arange(0.001, 0.01, 0.0005)
    stats = []
    for k in range(len(eta)):
        acc = []
        for i in range(50):
            W, accuracy, error = train_function(eta[k], X, epochs)
            acc.append(accuracy[epochs - 1])
        stats.append(np.mean(acc))
    plt.plot(eta, stats)
    plt.xlabel('learning rate (eta)')
    plt.ylabel('accuracy')
    plt.title('accuracy depending on learning rate')
def adjust_epochs(train_function):
    """
    This functions shows how training is the most efficient when epochs >= 40 for small learning rates
    :return:
    """
    epochs = np.arange(1, 10, 1)
    classA, classB = generate_data(100)
    X = process_inputs(classA, classB)
    eta = 0.01
    stats = []
    for k in range(len(epochs)):
        acc = []
        for i in range(100):
            W, accuracy, error = train_function(eta, X, epochs[k])
            acc.append(accuracy[len(accuracy) - 1])
        stats.append(np.mean(acc))
    plt.plot(epochs, stats)
    plt.xlabel('number of epochs')
    plt.ylabel('accuracy')
    plt.title('accuracy depending on number of epochs')
    plt.show()
    print(epochs, acc)
Exemplo n.º 10
0
from sklearn.preprocessing import MinMaxScaler, OneHotEncoder
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Activation
from datagen import generate_data, normalize
# from keras.models import load_model

data = generate_data(3000)
# input_dim = 6
# layer_1_dim = 4
# encode_dim = 3

# min_max_scalar = MinMaxScaler()
# ohe = OneHotEncoder()
# normal_list = []

# for i in range(6):
# 	d = data[:, i]
# 	if i == 3:
# 		ohe.fit(np.array([[0, 1, 2, 3]]).T)
# 		print('------------------------------')
# 		print(d.reshape(1, -1).T.shape)
# 		print('------------------------------')
# 		temp = ohe.transform(d.reshape(1, -1).T).toarray()
# 		print(temp)
# 	else:
# 		min_max_scalar.fit(d)
# 		temp = min_max_scalar.transform(d).reshape(-1, 1)
# 	normal_list.append(temp)

# normalized = np.hstack(normal_list)