Exemplo n.º 1
0
    def setUp(self):
        """Prepares a Halfadder Neuron Network"""
        # Layer 1 Neurons:
        n1 = Neuron([12, 12], Sigmoid().activate, bias=-18)
        n2 = Neuron([-12, -12], Sigmoid().activate, bias=6)
        n3 = Neuron([12, 12], Sigmoid().activate, bias=-18)
        # Layer 2 Neurons:
        n4 = Neuron([-12, -12, 0], Sigmoid().activate, bias=6)
        n5 = Neuron([0, 0, 12], Sigmoid().activate, bias=-6)
        # Layers
        l1 = NeuronLayer([n1, n2, n3])
        l2 = NeuronLayer([n4, n5])

        self.ntwrk = NeuronNetwork([l1, l2])
 def test_set_activation(self):
     self.assertEqual(Sigmoid().__class__,
                      self.nn.layers[0].get_activation().__class__)
     self.assertEqual(Sigmoid().__class__,
                      self.nn.layers[1].get_activation().__class__)
     self.assertEqual(Sigmoid().__class__,
                      self.nn.layers[2].get_activation().__class__)
     self.nn.set_activation([Tanh(), Step(), Tanh()])
     self.assertEqual(Tanh().__class__,
                      self.nn.layers[0].get_activation().__class__)
     self.assertEqual(Step().__class__,
                      self.nn.layers[1].get_activation().__class__)
     self.assertEqual(Tanh().__class__,
                      self.nn.layers[2].get_activation().__class__)
Exemplo n.º 3
0
 def test_feed_sigmoid(self):
     act = Sigmoid()
     p1 = Perceptron(0., [1., 2., 3.], activation=act)
     self.assertEqual(1., act.to_bin(p1.feed([.5, .5, .5])))
     self.assertEqual(1., act.to_bin(p1.feed([0, 0, .1])))
     self.assertEqual(1., act.to_bin(p1.feed([0, 0, 0])))
     self.assertEqual(0., act.to_bin(p1.feed([-0.5, -0.5, -0.5])))
     self.assertEqual(0., act.to_bin(p1.feed([0, 0, -0.1])))
     self.assertEqual(0., act.to_bin(p1.feed([-0.1, 0, 0])))
Exemplo n.º 4
0
 def __init__(self,
              n,
              ni=0,
              is_output=False,
              next_layer=None,
              prev_layer=None):
     """
     class constructor
     :param n: number of neurons in this layer
     :param ni: number of neurons in the previous layer
     :param is_output: boolean, True if its the output layer
     :param next_layer: next layer in the neural network
     :param prev_layer: previous layer in the neural network
     """
     self.n = n
     self.is_output = is_output
     # neurons
     self.neurons = np.array([
         Perceptron(activation=Sigmoid(), n_inputs=ni, learning_rate=0.5)
         for i in range(n)
     ])
     # layers
     self.next_layer = next_layer
     self.prev_layer = prev_layer
Exemplo n.º 5
0
def twohl(xTrain, xTest, yTrain, yTest, hots):
    print('EXPERIMENTO CON DOS CAPAS ESCONDIDAS')
    # Primera red neuronal
    nn = NeuralNetwork(7, [5, 4], 2, 3)
    nn.set_activation([Sigmoid(), Sigmoid(), Sigmoid()])
    nn.set_learning_rate([0.6, 0.6, 0.6])

    train_plot_cm(nn, xTrain, xTest, yTrain, yTest,
                  epochs=50, encoding_dict=hots)

    # Segunda red neuronal
    nn = NeuralNetwork(7, [5, 4], 2, 3)
    nn.set_activation([Tanh(), Tanh(), Tanh()])
    nn.set_learning_rate([0.6, 0.6, 0.6])

    train_plot_cm(nn, xTrain, xTest, yTrain, yTest,
                  epochs=50, encoding_dict=hots)

    # Tercera red neuronal
    nn = NeuralNetwork(7, [5, 4], 2, 3)
    nn.set_activation([Sigmoid(), Sigmoid(), Sigmoid()])
    nn.set_learning_rate([0.3, 0.3, 0.3])

    train_plot_cm(nn, xTrain, xTest, yTrain, yTest,
                  epochs=50, encoding_dict=hots)

    # Cuarta red neuronal
    nn = NeuralNetwork(7, [10, 10], 2, 3)
    nn.set_activation([Sigmoid(), Sigmoid(), Sigmoid()])
    nn.set_learning_rate([0.6, 0.6, 0.6])

    train_plot_cm(nn, xTrain, xTest, yTrain, yTest,
                  epochs=50, encoding_dict=hots)

    # Quinta red neuronal
    nn = NeuralNetwork(7, [1, 2], 2, 3)
    nn.set_activation([Sigmoid(), Sigmoid(), Sigmoid()])
    nn.set_learning_rate([0.6, 0.6, 0.6])

    train_plot_cm(nn, xTrain, xTest, yTrain, yTest,
                  epochs=50, encoding_dict=hots)
Exemplo n.º 6
0
def fourhl(xTrain, xTest, yTrain, yTest, hots):
    print('EXPERIMENTO CON CUATRO CAPAS ESCONDIDAS')
    nn = NeuralNetwork(7, [5, 5, 4, 4], 4, 3)
    nn.set_activation([Sigmoid(), Sigmoid(), Sigmoid(), Sigmoid(), Sigmoid()])
    nn.set_learning_rate([0.6, 0.6, 0.6, 0.6, 0.6])

    train_plot_cm(nn, xTrain, xTest, yTrain, yTest,
                  epochs=50, encoding_dict=hots)

    nn = NeuralNetwork(7, [2, 2, 2, 2], 4, 3)
    nn.set_activation([Sigmoid(), Sigmoid(), Sigmoid(), Sigmoid(), Sigmoid()])
    nn.set_learning_rate([0.6, 0.6, 0.6, 0.6, 0.6])

    train_plot_cm(nn, xTrain, xTest, yTrain, yTest,
                  epochs=50, encoding_dict=hots)

    nn = NeuralNetwork(7, [10, 10, 9, 8], 4, 3)
    nn.set_activation([Sigmoid(), Sigmoid(), Sigmoid(), Sigmoid(), Sigmoid()])
    nn.set_learning_rate([0.6, 0.6, 0.6, 0.6, 0.6])

    train_plot_cm(nn, xTrain, xTest, yTrain, yTest,
                  epochs=50, encoding_dict=hots)
Exemplo n.º 7
0
 def setUp(self):
     """Prepares a OR-type Neuron"""
     self.OR_Neuron = Neuron([12, 12], Sigmoid().activate, bias=-6)
Exemplo n.º 8
0
from Activation import Sigmoid
from Neuron import Neuron

AND_Neuron_wrong = Neuron([0.5, 0.5], Sigmoid().activate, bias=-1)

print("Should be high (1):")
outcome = AND_Neuron_wrong.activate([1, 1])
print("Input [1,1] gives {}".format(outcome))
print("")
print("Should be low (0):")
outcome = AND_Neuron_wrong.activate([1, 0])
print("Input [1,0] gives {}".format(outcome))
outcome = AND_Neuron_wrong.activate([0, 1])
print("Input [0,1] gives {}".format(outcome))
outcome = AND_Neuron_wrong.activate([0, 0])
print("Input [0,0] gives {}".format(outcome))
Exemplo n.º 9
0
from Activation import Sigmoid
from Neuron import Neuron

INVERTER_Neuron_wrong = Neuron([-1], Sigmoid().activate)

print("Should be high (1):")
outcome = INVERTER_Neuron_wrong.activate([0])
print("Input [0] gives {}".format(outcome))
print("")
print("Should be low (0):")
outcome = INVERTER_Neuron_wrong.activate([1])
print("Input [1] gives {}".format(outcome))
Exemplo n.º 10
0
 def test_get_activation(self):
     layer = NeuronLayer(n=4, ni=5)
     self.assertEqual(Sigmoid().__class__,
                      self.layer1.get_activation().__class__)
Exemplo n.º 11
0
 def setUp(self):
     """Prepares a AND-type Neuron"""
     self.AND_Neuron = Neuron([12, 12], Sigmoid().activate, bias=-18)
Exemplo n.º 12
0
l18 = FullyConnected(size=[2048, 2048],
                     num_classes=num_classes,
                     init_weights=args.init,
                     alpha=ALPHA,
                     activation=Relu(),
                     last_layer=False)
l19 = FeedbackFC(size=[2048, 2048],
                 num_classes=num_classes,
                 sparse=sparse,
                 rank=rank)

l20 = FullyConnected(size=[2048, num_classes],
                     num_classes=num_classes,
                     init_weights=args.init,
                     alpha=ALPHA,
                     activation=Sigmoid(),
                     last_layer=True)

model = Model(layers=[
    l0, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11, l12, l13, l14, l15, l16,
    l17, l18, l19, l20
])

predict = model.predict(X=features)

if args.dfa:
    grads_and_vars = model.dfa(X=features, Y=labels)
else:
    grads_and_vars = model.train(X=features, Y=labels)

if args.opt == "adam":
Exemplo n.º 13
0
 def setUp(self):
     """Prepares a INVERT-type Neuron."""
     self.INVERT_Neuron = Neuron([-12], Sigmoid().activate, bias=6)
Exemplo n.º 14
0
    :param learning_rate: learning rate of the perceptron
    :return:
    """
    plt.plot(precision, '-b')
    plt.ylim(0, 1)
    plt.title(f'Number of training vs precision\n Learning rate = {learning_rate}')
    plt.show()


if __name__ == "__main__":
    q = 1000
    trainings = 10
    op = 'AND'
    op_data = {'AND': and_data,
               'OR': or_data,
               'NAND': nand_data}
    for key in op_data.keys():
        for lr in np.arange(0.1, 0.92, 0.1):
            p = Perceptron(learning_rate=lr, activation=Sigmoid())
            points, classification = op_data[key]()
            precision_training = np.array([])
            # train the perceptron
            for i in range(trainings):
                local_precision, perceptron_out = p.train_all(points, classification)
                precision_training = np.append(precision_training, (q - np.count_nonzero(local_precision != 0)) / q)
            plot_precision(precision_training, learning_rate=lr)
            print(f'{key} para 1 y 0  = {p.activation.to_bin(p.feed([1, 0]))}')
            print(f'{key} para 0 y 0  = {p.activation.to_bin(p.feed([0, 0]))}')
            print(f'{key} para 1 y 1  = {p.activation.to_bin(p.feed([1, 1]))}')
            print(f'{key} para 0 y 1  = {p.activation.to_bin(p.feed([0, 1]))}')
Exemplo n.º 15
0
from Neuron import Neuron
from NeuronLayer import NeuronLayer
from NeuronNetwork import NeuronNetwork
from Activation import Sigmoid

n1 = Neuron([-0.5,0.5],Sigmoid().activate,bias=1.5)

l1 = NeuronLayer([n1])

ntwrk = NeuronNetwork([l1],1)

x = [[0,0],[0,1],[1,0],[1,1]]
y = [[0],[0],[0],[1]]

ntwrk.train(x,y,1000,0.0000001)
print(ntwrk.__str__())

print("MSE network:")
print(ntwrk.error(x,y))

print("Should be as close as possible to high (1)")
print("[1,1] gives:")
print(ntwrk.feed_forward([1,1]))
print("Should be as close as possible to low (0)")
print("[0,1] gives:")
print(ntwrk.feed_forward([0,1]))
print("[1,0] gives:")
print(ntwrk.feed_forward([1,0]))
print("[0,0] gives:")
print(ntwrk.feed_forward([0,0]))
from Neuron import Neuron
from NeuronLayer import NeuronLayer
from NeuronNetwork import NeuronNetwork
from Activation import Sigmoid

n1 = Neuron([0.0, 0.1], Sigmoid().activate)
n2 = Neuron([0.2, 0.3], Sigmoid().activate)
n3 = Neuron([0.4, 0.5], Sigmoid().activate)

n4 = Neuron([0.6, 0.7, 0.8], Sigmoid().activate)
n5 = Neuron([0.9, 1.0, 1.1], Sigmoid().activate)

l1 = NeuronLayer([n1, n2, n3])
l2 = NeuronLayer([n4, n5])

ntwrk = NeuronNetwork([l1, l2], 0.5)

x = [[0, 0], [1, 0], [0, 1], [1, 1]]
y = [[0, 0], [1, 0], [1, 0], [0, 1]]

ntwrk.train(x, y, 80000, 0.001)
print(ntwrk.__str__())

print("MSE network:")
print(ntwrk.error(x, y))

print("Output should be close to [0,0]")
print(ntwrk.feed_forward([0, 0]))
print("Output should be close to [1,0]")
print(ntwrk.feed_forward([1, 0]))
print(ntwrk.feed_forward([0, 1]))
Exemplo n.º 17
0
 def setUp(self):
     """Prepares a NOR-type Neuron"""
     self.NOR_Neuron = Neuron([-12, -12, -12], Sigmoid().activate, bias=6)
Exemplo n.º 18
0
from Neuron import Neuron
from NeuronLayer import NeuronLayer
from NeuronNetwork import NeuronNetwork
from Activation import Sigmoid

# Layer 1
n1 = Neuron([0.2,-0.4],Sigmoid().activate)
n2 = Neuron([0.7,0.1],Sigmoid().activate)
# Layer 2
n3 = Neuron([0.6,0.9],Sigmoid().activate)

l1 = NeuronLayer([n1,n2])
l2 = NeuronLayer([n3])

ntwrk = NeuronNetwork([l1,l2],1)

x = [[0,0],[0,1],[1,0],[1,1]]
y = [[0],[1],[1],[0]]

ntwrk.train(x,y,40000,0.001)
print(ntwrk.__str__())

print("MSE network:")
print(ntwrk.error(x,y))

print("Should be as close as possible to high (1)")
print("[0,1] gives:")
print(ntwrk.feed_forward([0,1]))
print("[1,0] gives:")
print(ntwrk.feed_forward([1,0]))
print("Should be as close as possible to low (0)")
Exemplo n.º 19
0
 def test_get_last_activation(self):
     self.assertEqual(Sigmoid().__class__,
                      self.nn.get_last_activation().__class__)
Exemplo n.º 20
0
def run_all_model(train_input,
                  train_target,
                  test_input,
                  test_target,
                  Sample_number,
                  save_plot=False):

    # Define constants along the test
    hidden_nb = 25
    std = 0.1
    eta = 3e-1
    batch_size = 200
    epochs_number = 1000

    # Model 1. No dropout; constant learning rate (SGD)
    print('\nModel 1: Optimizer: SGD; No dropout; ReLU; CrossEntropy')

    # Define model name for plots
    mname = 'Model1'

    # Define structure of the network
    linear_1 = Linear(2, hidden_nb)
    relu_1 = Relu()
    linear_2 = Linear(hidden_nb, hidden_nb)
    relu_2 = Relu()
    linear_3 = Linear(hidden_nb, hidden_nb)
    relu_3 = Relu()
    linear_4 = Linear(hidden_nb, 2)
    loss = CrossEntropy()

    model_1 = Sequential(linear_1,
                         relu_1,
                         linear_2,
                         relu_2,
                         linear_3,
                         relu_3,
                         linear_4,
                         loss=CrossEntropy())

    # Initialize weights
    model_1.normalize_parameters(mean=0, std=std)
    # Define optimizer
    optimizer = Sgd(eta)

    # Train model
    my_loss_1 = train_model(model_1, train_input, train_target, optimizer,
                            epochs_number, Sample_number, batch_size)

    # Evalute model and produce plots
    model_1_perf = evaluate_model(model_1,
                                  train_input,
                                  train_target,
                                  test_input,
                                  test_target,
                                  my_loss_1,
                                  save_plot,
                                  mname=mname)

    # Model 2. No dropout; decreasing learning rate (DecreaseSGD)
    print('\nModel 2: Optimizer: DecreaseSGD; No dropout; ReLU; CrossEntropy')

    # Define model name for plots
    mname = 'Model2'

    # Define structure of the network
    linear_1 = Linear(2, hidden_nb)
    relu_1 = Relu()
    linear_2 = Linear(hidden_nb, hidden_nb)
    relu_2 = Relu()
    linear_3 = Linear(hidden_nb, hidden_nb)
    relu_3 = Relu()
    linear_4 = Linear(hidden_nb, 2)

    model_2 = Sequential(linear_1,
                         relu_1,
                         linear_2,
                         relu_2,
                         linear_3,
                         relu_3,
                         linear_4,
                         loss=CrossEntropy())

    # Initialize weights
    model_2.normalize_parameters(mean=0, std=std)
    # Define optimizer
    optimizer = DecreaseSGD(eta)

    # Train model
    my_loss_2 = train_model(model_2, train_input, train_target, optimizer,
                            epochs_number, Sample_number, batch_size)
    # Evalute model and produce plots
    model_2_perf = evaluate_model(model_2,
                                  train_input,
                                  train_target,
                                  test_input,
                                  test_target,
                                  my_loss_2,
                                  save_plot,
                                  mname=mname)

    # Model 3. No dropout; Adam Optimizer
    print('\nModel 3: Optimizer: Adam; No dropout; ReLU; CrossEntropy')

    # Define model name for plots
    mname = 'Model3'

    # Custom hyperparameters
    eta_adam = 1e-3
    epochs_number_adam = 500

    # Define structure of the network
    linear_1 = Linear(2, hidden_nb)
    relu_1 = Relu()
    linear_2 = Linear(hidden_nb, hidden_nb)
    relu_2 = Relu()
    linear_3 = Linear(hidden_nb, hidden_nb)
    relu_3 = Relu()
    linear_4 = Linear(hidden_nb, 2)
    loss = CrossEntropy()

    model_3 = Sequential(linear_1,
                         relu_1,
                         linear_2,
                         relu_2,
                         linear_3,
                         relu_3,
                         linear_4,
                         loss=CrossEntropy())

    # Initialize weights
    model_3.normalize_parameters(mean=0, std=std)
    # Define optimizer
    optimizer = Adam(eta_adam, 0.9, 0.99, 1e-8)

    # Train model
    my_loss_3 = train_model(model_3, train_input, train_target, optimizer,
                            epochs_number_adam, Sample_number, batch_size)

    # Evalute model and produce plots
    model_3_perf = evaluate_model(model_3,
                                  train_input,
                                  train_target,
                                  test_input,
                                  test_target,
                                  my_loss_3,
                                  save_plot,
                                  mname=mname)

    # PLOT TO COMPARE OPTIMIZERS
    if save_plot:
        fig = plt.figure(figsize=(10, 4))
        plt.plot(range(0, epochs_number), my_loss_1, linewidth=1)
        plt.plot(range(0, epochs_number), my_loss_2, linewidth=1)
        plt.plot(range(0, epochs_number_adam), my_loss_3, linewidth=1)
        plt.legend(["SGD", "Decreasing SGD", "Adam"])
        plt.title("Loss")
        plt.xlabel("Epochs")
        plt.savefig('output/compare_optimizers.pdf', bbox_inches='tight')
        plt.close(fig)

    # Model 4. Dropout; SGD
    print('\nModel 4: Optimizer: SGD; Dropout; ReLU; CrossEntropy')

    # Define model name for plots
    mname = 'Model4'

    # Define structure of the network
    dropout = 0.15

    linear_1 = Linear(2, hidden_nb)
    relu_1 = Relu()
    linear_2 = Linear(hidden_nb, hidden_nb, dropout=dropout)
    relu_2 = Relu()
    linear_3 = Linear(hidden_nb, hidden_nb, dropout=dropout)
    relu_3 = Relu()
    linear_4 = Linear(hidden_nb, 2)

    model_4 = Sequential(linear_1,
                         relu_1,
                         linear_2,
                         relu_2,
                         linear_3,
                         relu_3,
                         linear_4,
                         loss=CrossEntropy())

    # Initialize weights
    model_4.normalize_parameters(mean=0, std=std)
    # Define optimizer
    optimizer = Sgd(eta)

    # Train model
    my_loss_4 = train_model(model_4, train_input, train_target, optimizer,
                            epochs_number, Sample_number, batch_size)

    # Evalute model and produce plots
    model_4_perf = evaluate_model(model_4,
                                  train_input,
                                  train_target,
                                  test_input,
                                  test_target,
                                  my_loss_4,
                                  save_plot,
                                  mname=mname)

    # PLOT TO COMPARE DROPOUT AND NO DROPOUT
    if save_plot:
        fig = plt.figure(figsize=(10, 4))
        plt.plot(range(0, epochs_number), my_loss_1, linewidth=1)
        plt.plot(range(0, epochs_number), my_loss_4, linewidth=1)
        plt.legend(["Without Dropout", "With Dropout"])
        plt.title("Loss")
        plt.xlabel("Epochs")
        plt.savefig('output/compare_dropout.pdf', bbox_inches='tight')
        plt.close(fig)

    print('\nEvaluation of different activation functions\n')

    # Model 5. No Dropout; SGD; Tanh
    print('\nModel 5: Optimizer: SGD; No dropout; Tanh; CrossEntropy')

    # Define model name for plots
    mname = 'Model5'

    # Define structure of the network
    linear_1 = Linear(2, hidden_nb)
    relu_1 = Tanh()
    linear_2 = Linear(hidden_nb, hidden_nb)
    relu_2 = Tanh()
    linear_3 = Linear(hidden_nb, hidden_nb)
    relu_3 = Tanh()
    linear_4 = Linear(hidden_nb, 2)

    model_5 = Sequential(linear_1,
                         relu_1,
                         linear_2,
                         relu_2,
                         linear_3,
                         relu_3,
                         linear_4,
                         loss=CrossEntropy())

    # Initialize weights
    model_5.normalize_parameters(mean=0, std=std)
    # Define optimizer
    optimizer = Sgd(eta)

    # Train model
    my_loss_5 = train_model(model_5, train_input, train_target, optimizer,
                            epochs_number, Sample_number, batch_size)

    # Evalute model and produce plots
    model_5_perf = evaluate_model(model_5,
                                  train_input,
                                  train_target,
                                  test_input,
                                  test_target,
                                  my_loss_5,
                                  save_plot,
                                  mname=mname)

    # Model 6. Xavier Initialization
    print(
        '\nModel 6: Optimizer: SGD; No dropout; Tanh; Xavier initialization; CrossEntropy'
    )

    # Define model name for plots
    mname = 'Model6'

    # Define network structure
    linear_1 = Linear(2, hidden_nb)
    relu_1 = Tanh()
    linear_2 = Linear(hidden_nb, hidden_nb)
    relu_2 = Tanh()
    linear_3 = Linear(hidden_nb, hidden_nb)
    relu_3 = Tanh()
    linear_4 = Linear(hidden_nb, 2)

    model_6 = Sequential(linear_1,
                         relu_1,
                         linear_2,
                         relu_2,
                         linear_3,
                         relu_3,
                         linear_4,
                         loss=CrossEntropy())

    model_6.xavier_parameters()
    optimizer = Sgd()

    # Train model
    my_loss_6 = train_model(model_6, train_input, train_target, optimizer,
                            epochs_number, Sample_number, batch_size)

    # Evalute model and produce plots
    model_6_perf = evaluate_model(model_6,
                                  train_input,
                                  train_target,
                                  test_input,
                                  test_target,
                                  my_loss_6,
                                  save_plot,
                                  mname=mname)

    # Model 7. Sigmoid
    print('\nModel 7: Optimizer: SGD; No dropout; Sigmoid; CrossEntropy')

    # Define model name for plots
    mname = 'Model7'

    # Define parameter for sigmoid activation
    p_lambda = 0.1

    # Define network structure
    linear_1 = Linear(2, hidden_nb)
    relu_1 = Sigmoid(p_lambda)
    linear_2 = Linear(hidden_nb, hidden_nb)
    relu_2 = Sigmoid(p_lambda)
    linear_3 = Linear(hidden_nb, hidden_nb)
    relu_3 = Sigmoid(p_lambda)
    linear_4 = Linear(hidden_nb, 2)

    model_7 = Sequential(linear_1,
                         relu_1,
                         linear_2,
                         relu_2,
                         linear_3,
                         relu_3,
                         linear_4,
                         loss=CrossEntropy())

    model_7.normalize_parameters(mean=0.5, std=1)
    optimizer = Sgd(eta=0.5)

    # Train model
    my_loss_7 = train_model(model_7, train_input, train_target, optimizer,
                            epochs_number, Sample_number, batch_size)

    # Evalute model and produce plots
    model_7_perf = evaluate_model(model_7,
                                  train_input,
                                  train_target,
                                  test_input,
                                  test_target,
                                  my_loss_7,
                                  save_plot,
                                  mname=mname)

    # PLOT TO COMPARE EFFECT OF DIFFERENT ACTIVATIONS
    if save_plot:
        fig = plt.figure(figsize=(10, 4))
        plt.plot(range(0, epochs_number), my_loss_1, linewidth=0.5)
        plt.plot(range(0, epochs_number), my_loss_5, linewidth=0.5, alpha=0.8)
        plt.plot(range(0, epochs_number), my_loss_6, linewidth=0.5, alpha=0.8)
        plt.plot(range(0, epochs_number), my_loss_7, linewidth=0.5)
        plt.legend(["Relu", "Tanh", "Tanh (Xavier)", "Sigmoid"])
        plt.title("Loss")
        plt.xlabel("Epochs")
        plt.savefig('output/compare_activations.pdf', bbox_inches='tight')
        plt.close(fig)

    print('\nEvaluation of base model with MSE loss\n')

    # Model 8. MSE loss
    print('\nModel 8: Optimizer: SGD; No dropout; Relu; MSE')

    # Define model name for plots
    mname = 'Model8'
    linear_1 = Linear(2, hidden_nb)
    relu_1 = Relu()
    linear_2 = Linear(hidden_nb, hidden_nb)
    relu_2 = Relu()
    linear_3 = Linear(hidden_nb, hidden_nb)
    relu_3 = Relu()
    linear_4 = Linear(hidden_nb, 2)
    loss = LossMSE()

    model_8 = Sequential(linear_1,
                         relu_1,
                         linear_2,
                         relu_2,
                         linear_3,
                         relu_3,
                         linear_4,
                         loss=loss)

    model_8.normalize_parameters(mean=0, std=std)
    optimizer = Sgd(eta)

    # Train model
    my_loss_8 = train_model(model_8, train_input, train_target, optimizer,
                            epochs_number, Sample_number, batch_size)

    # Evalute model and produce plots
    model_8_perf = evaluate_model(model_8,
                                  train_input,
                                  train_target,
                                  test_input,
                                  test_target,
                                  my_loss_8,
                                  save_plot,
                                  mname=mname)

    print('Evaluation done! ')

    train_loss = torch.tensor([
        model_1_perf[0], model_2_perf[0], model_3_perf[0], model_4_perf[0],
        model_5_perf[0], model_6_perf[0], model_7_perf[0], model_8_perf[0]
    ])
    train_error = torch.tensor([
        model_1_perf[1], model_2_perf[1], model_3_perf[1], model_4_perf[1],
        model_5_perf[1], model_6_perf[1], model_7_perf[1], model_8_perf[1]
    ])
    test_loss = torch.tensor([
        model_1_perf[2], model_2_perf[2], model_3_perf[2], model_4_perf[2],
        model_5_perf[2], model_6_perf[2], model_7_perf[2], model_8_perf[2]
    ])
    test_error = torch.tensor([
        model_1_perf[3], model_2_perf[3], model_3_perf[3], model_4_perf[3],
        model_5_perf[3], model_6_perf[3], model_7_perf[3], model_8_perf[3]
    ])

    return train_loss, train_error, test_loss, test_error
Exemplo n.º 21
0
 def test_set_activation(self):
     self.assertEqual(Sigmoid().__class__,
                      self.layer1.get_activation().__class__)
     self.layer1.set_activation(Tanh())
     self.assertEqual(Tanh().__class__,
                      self.layer1.get_activation().__class__)