Пример #1
0
def make_layer(neurons: int) -> Layer:
    """Make a layer with some amount of neurons.

    Neurons will have a bias of zero and weights of 0.5, and will use the
    sigmoid activation function. They will have two inputs.
    """
    return Layer(make_neuron() for _ in range(neurons))
Пример #2
0
    def fit(self, X: np.ndarray, y: np.ndarray):
        X_ = check_array(X)

        y_ = check_array(y, ensure_2d=False)
        if len(y_.shape) == 1:
            y_ = y_.reshape(-1, 1)

        assert X_.shape[0] == y_.shape[0], ValueError('inconsistent # of samples')

        in_size = X_.shape[1]
        out_size = 1  # binary classification only!

        ### build neural network
        if self.hidden_size is not None:
            hidden_size = self.hidden_size
        else:
            hidden_size = out_size

        random_state = copy(self.random_state)

        layers = list()
        layer_input = in_size
        # hidden layers
        for _ in range(self.n_hidden):
            layers.append(Layer(
                input_size=layer_input,
                output_size=hidden_size,
                activation=self.hidden_activation,
                random_state=random_state,
            ))
            layer_input = hidden_size  # to make sure layers fit together

        # output layer
        layers.append(
            Layer(layer_input, out_size, self.output_activation,
                  random_state=random_state)
        )

        self.net_ = NeuralNetwork(layers)

        # train
        self.net_, _ = train(self.net_, X_, y_, self.loss, self.lr,
                             self.n_epochs)

        return self
Пример #3
0
    def test_and(self):
        # Creamos las capas
        input_layer = Layer(2, 1)
        hidden_layer = Layer(1, 1)

        # Inicializamos la red
        neural_network = NeuralNetwork([input_layer, hidden_layer], .1)

        training_features = numpy.array([[0, 0], [0, 1], [1, 0], [1, 1]])
        training_classes = numpy.array([[0, 0, 0, 1]]).T
        neural_network.train(training_features, training_classes, 5000)

        result_1_1 = round(neural_network.check([1, 1])[0])
        result_1_0 = round(neural_network.check([1, 0])[0])
        result_0_1 = round(neural_network.check([0, 1])[0])
        result_0_0 = round(neural_network.check([0, 0])[0])

        result = (result_1_1, result_1_0, result_0_1, result_0_0)
        self.assertEqual(result, (1, 0, 0, 0))
Пример #4
0
    def test_xor(self):
        # Creamos las capas
        input_layer = Layer(2, 2)
        hidden_layer = Layer(2, 1)

        # Inicializamos la red
        neural_network = NeuralNetwork([input_layer, hidden_layer], .1)

        training_features = numpy.array([[0, 0], [0, 1], [1, 0], [1, 1]])
        training_classes = numpy.array([[0, 1, 1, 0]]).T
        # Este amigo derepente fallaba con pocas epocas asi que se colocaron muchas para evitar problemas con los test
        neural_network.train(training_features, training_classes, 30000)

        result_1_1 = round(neural_network.check([1, 1])[0])
        result_1_0 = round(neural_network.check([1, 0])[0])
        result_0_1 = round(neural_network.check([0, 1])[0])
        result_0_0 = round(neural_network.check([0, 0])[0])

        result = (result_1_1, result_1_0, result_0_1, result_0_0)
        self.assertEqual(result, (0, 1, 1, 0))
Пример #5
0
    def __init__(self, numIn=0, numEx=0):
        super().__init__()

        self.shape('arrow')
        self.color('red')
        self.pendown()
        self.showturtle()

        # inputLayer = Layer(3, 2)
        # inputLayer = Layer(3, 0)
        # inputLayer = Layer(2, 3)
        # inputLayer = Layer(1, 2)
        inputLayer = Layer(0, 3)
        outputLayer = Layer(2, 0)

        self.net = Network(numIn, numEx, inputLayer, outputLayer)

        self.motorAccum = numpy.zeros((Agent.MOTORWINDOW, 2))
        self.motorFrequency = numpy.zeros(2)
        self.motorClock = 0
Пример #6
0
def test_lr(network, x, lrate, momentum=0.9, iterations=100):
    """
    Arguments:
    - network must be an object of class Network
    - data must be data given by function: data_read_classification
    - lrate must be np array 
    """
    errors = np.zeros(len(lrate))
    for i in range(len(lrate)):
        brain = Network(learning_rate=lrate[i],
                        momentum_rate=momentum,
                        iterations=iterations)
        for j in range(len(network.layers)):
            brain.add(
                Layer(network.layers[j].inputs_neurons,
                      network.layers[j].output_neurons,
                      network.layers[j].activation_func_name))
        all_errors = brain.train_and_evaluate(x[0], x[1], x[2], x[3])
        errors[i] = all_errors[0][iterations - 1]
    plt.plot(sorted(lrate), errors)
    plt.xlabel('Learning Rate')
    plt.ylabel('Error of network after ' + str(iterations) + ' iterations')
    plt.show()
Пример #7
0

def train(model, epochs=10, batch_size=32):
    x_train, x_test, y_train, y_test = load_data()
    for e in range(epochs):
        for batch_x, batch_y in create_batches(x_train, y_train, batch_size):
            model.zero_grad()
            for x, y in zip(batch_x, batch_y):
                model.forward(x, y)
            model.backward()
            model.step()
        print(f"Finished epoch {e}")
        test(model, x_test, y_test)


def test(model, x_test, y_test):
    predictions = []
    for x, y in zip(x_test, y_test):
        output = model.forward(x, y)
        predictions.append(np.argmax(output) == np.argmax(y))
    accuracy = np.mean(predictions)
    print(f"Accuracy: {round(accuracy, 4)}")


if __name__ == "__main__":
    np.random.seed(42)

    model = MLP([Layer(28 * 28, 128), Layer(128, 64), Layer(64, 10)])

    train(model)
Пример #8
0
def main():
    # Dependiendo de los pesos iniciales se necesitan entre 1000 y 1500 epocas. Sobre 2000 epocas entrega una precision sobre
    # el 95% en los casos de prueba, aunque dependiendo de los pesos iniciales podemos alcanzar un overfitting a las 1000 epocas :(
    try:
        EPOCHS = int(sys.argv[1])
        learning_rate = float(sys.argv[2])
    except:
        EPOCHS = 1000
        learning_rate = .2
    '''
    Creamos las capas
    '''
    # Se crean las capas necesarias para la red
    # Input layer (inputs, neurons)
    input_layer = Layer(5, 8)
    # Cada neurona es un input de la siguiente layer
    # Hidden layer (inputs, neurons)
    hidden_layer = Layer(8, 3)
    # Esta capa esta solo para experimentar, pro lo general usar 2 hidden layers entrega malos resultados
    hidden_layer2 = Layer(3, 1)
    '''
    Inicializamos la red
    '''
    # Neural network (podemos agregar la capa hidden_layer2 para experimentar
    neural_network = NeuralNetwork([input_layer, hidden_layer], learning_rate)
    '''
    Creamos los datos de un archivo csv y entranamos
    '''
    d = Data('sin_normalizar.csv')
    # Primero se normaliza luego extremos los conjuntos de entranamiento y prueba
    d.normalize()
    d.run_split()
    training_features = d.train_features
    training_classes = np.array([d.train_classes]).T

    start_time = time.time()
    neural_network.train(training_features, training_classes, EPOCHS)
    print("Tiempo entrenamiento red: %s seconds" % (time.time() - start_time))
    '''
    Test test_data
    '''
    def test_data(features, classes):
        precision_count = 0
        for i in range(len(features)):
            output = neural_network.check(features[i])
            # print(output," ", classes[i])
            output = np.mean(output)
            output = neural_network.filter_out(output)
            precision_count = (precision_count +
                               1) if output == classes[i] else precision_count
        return (precision_count / len(classes))

    print("Precision test data")
    print(test_data(d.test_features, d.test_classes))

    print("Precision training data")
    print(test_data(d.train_features, d.train_classes))
    '''
    Plot
    '''

    # ploting errors
    plt.figure(0)
    plt.title("Error")
    plt.xlabel("Epochs")
    plt.plot(neural_network.errors)

    # ploting precision
    plt.figure(1)
    plt.title("Precision")
    plt.xlabel("Epochs")
    plt.plot(neural_network.precision)
Пример #9
0
import os
os.chdir('C:/Users/tomas/OneDrive/Documents/Studies/PW-IAD/MGU/projekt1-implementacja_backpropagation/MGUProjekt1')
from network import Layer
from network import Network
from program_functions import data_read_classification
from program_functions import data_read_regression
from program_functions import plot_classification
from program_functions import plot_regression
from program_functions import plot_errors

### Regression
x = data_read_regression('linear',100)
brain = Network(learning_rate = 0.0001, momentum_rate = 0.8, iterations = 100)
brain.add(Layer(1,5,'sigmoid'))
brain.add(Layer(5,50,'sigmoid'))
brain.add(Layer(50,1,'linear'))
errors = brain.train_and_evaluate(x[0],x[1],x[2],x[3])
errors = brain.train_mini_batch_and_evaluate(x[0],x[1],x[2],x[3],10)
plot_errors(errors)
plot_regression(brain, x)


#### Classification
x = data_read_classification('simple',100)
brain = Network(learning_rate = 0.01, momentum_rate = 0.8, iterations = 1000)
brain.add(Layer(2,10,'sigmoid'))
brain.add(Layer(10,100,'sigmoid'))
brain.add(Layer(100,50,'sigmoid'))
brain.add(Layer(50,2,'sigmoid'))
errors = brain.train_and_evaluate(x[0],x[1],x[2],x[3])
plot_errors(errors)
Пример #10
0
import matplotlib.pyplot as plt
import numpy as np

from network import NeuralNetwork, Layer
from train import BCE_Loss, train_cv
from utils import plot_decision_boundaries, plot_learning_curves

if __name__ == '__main__':
    data = np.loadtxt('classification2.txt', delimiter=',')
    X = data[:, :2]
    y = data[:, -1:]

    net = NeuralNetwork([
        Layer(2, 2, activation='ReLU'),
        Layer(2, 1, activation='sigmoid'),
    ])

    train_losses, test_losses, accus, nets = train_cv(
        net,
        X,
        y,
        loss=BCE_Loss(),
        lr=0.01,
        n_epochs=1000,
        k=5,
        reset_weights=True,  # new initial (random) weights for each fold
    )

    plot_learning_curves(train_losses, test_losses, accus)

    plot_decision_boundaries(nets, X, y)
Пример #11
0

mnist = MNIST("./mnist")
mnist.gz = True
images, labels = mnist.load_training()
images_test, labels_test = mnist.load_testing()

images = array(images) / 255.
images_test = array(images_test) / 255.
labels = one_hot(labels)
labels_test = one_hot(labels_test)

colours = ["b", "g", "r", "c", "m"]

l2 = L2Reg(5.0)
net1 = Network([Layer(784, 100, sigmoid, l2), Layer(100, 10, softmax, l2)])
net1.save('./weights/tmp.pkl')

net2 = Network.load('./weights/tmp.pkl')
net2.layers[-1].act = sigmoid

nets = [net1, net2]
for i in range(2):
    trainer = Trainer(nets[i], CrossEntropy, 0.5, 10, images, labels,
                      images_test, labels_test)
    data = trainer.SGD(100, tr_acc=True, tr_loss=True)
    y_points = [point["epoch"] for point in data["test"]]
    x_points = [point["acc"] for point in data["test"]]
    pyplot.plot(y_points, x_points, colours[i])

pyplot.show()
Пример #12
0
            np.log(y_pred + epsilon, where=y_true == 1, out=out) +
            np.log(1 - y_pred + epsilon, where=y_true == 0, out=out))

    @staticmethod
    def grad(y_pred: np.ndarray, y_true: np.ndarray) -> np.ndarray:
        return -np.where(y_true == 1, 1 / (y_pred + epsilon), -1 /
                         (1 - y_pred + epsilon))


if __name__ == '__main__':
    from network import Layer
    # XOR (Goodfelow et al., Deep Learning Book)
    net = NeuralNetwork([
        Layer(2,
              2,
              activation='ReLU',
              W=np.array([[1, 1], [1, 1]]),
              b=np.array([0, -1])),
        Layer(2,
              1,
              activation=None,
              W=np.array([1, -2]).reshape(-1, 1),
              b=np.zeros(1)),
    ])

    X = np.array([
        [0, 0],
        [0, 1],
        [1, 0],
        [1, 1],
    ])