예제 #1
0
    def fit(self,   x_train=None, y_train=None, 
                    x_test=None , y_test=None ,
                    batch_size=4,
                    epochs=200  ,
                    opt=optimizer.SGD(lr=0.005),
                    loss_function=loss.MSE(),
                    acc_function =metric.accuracy):
        
        self.loss_function=loss_function
        self.acc_function=acc_function

        self.acc_vect = []#np.zeros(epochs)
        self.loss_vect= []#np.zeros(epochs)
        self.pres_vect= []#np.zeros(epochs)
        self.loss_test= []#np.zeros(epochs)

        self.iter_batch= int(x_train.shape[0]/batch_size)

        capa_anterior=self.capas[-1]
        capa_anterior.output_size = y_train.shape[1]
        capa_anterior.ini_weights()

        for it in range(epochs):
            loss, acc=0,0
            for it_ba in range(self.iter_batch):
                x_batch =   x_train[it_ba :(it_ba + 1)*batch_size]
                y_batch =   y_train[it_ba :(it_ba + 1)*batch_size]

                output, reg_sum = opt(x_batch, y_batch, self)
                #print(output)
                #print(y_batch)
                        
                loss+= self.loss_function(output, y_batch) + reg_sum
                acc += self.acc_function(output, y_batch)

            self.loss_vect.append(loss/self.iter_batch)
            self.acc_vect.append(100*acc/self.iter_batch)
                

            if np.any(x_test!=None) and np.any(y_test!=None):
                output, reg_sum = opt.forwprop(x_test)
                loss= self.loss_function(output, y_test) + reg_sum
                acc = self.acc_function(output, y_test)
                self.loss_test.append(loss)
                self.pres_vect.append(100*acc)
            

                print("Epoca {}/{} - loss: {} - acc:{} - acc_test:{}".format(
                        it, epochs, 
                        self.loss_vect[-1],
                        self.acc_vect[-1] ,
                        self.pres_vect[-1]))
            else:    
                print("Epoca {}/{} - loss: {} - acc:{}".format(
                    it, epochs, 
                    self.loss_vect[-1],
                    self.acc_vect[-1] ))
예제 #2
0
def ejer6_211(x_train, y_train):
    reg1 = regularizador.L1(0.0)
    reg2 = regularizador.L2(0.0)

    red_densa = model.Red()
    input_size=x_train.shape[1]

    Layer1= layer.Dense(neuronas    =input_size, 
                        act         =activation.Tanh(), 
                        reg         = reg2,
                        name        ="Layer 1"  ,
                        bias        = True  )

    Layer2= layer.Dense(neuronas    =1, 
                        act         =activation.Tanh(), 
                        reg         =reg1,
                        name        ="Layer 2",
                        bias        = True)

    red_densa.add(Layer1)

    layer_aux= layer.ConcatInput(input_size, Layer2)
    
    red_densa.add(layer_aux)
    
    red_densa.fit(  
                x_train=x_train,    y_train=y_train, 
                batch_size=1,
                epochs=300,
                opt=optimizer.SGD(lr=0.1),
                loss_function=loss.MSE(),
                acc_function=metric.accuracy_xor)

    plt.figure(1)
    plt.ylabel("Precisión [%]")
    plt.plot(red_densa.acc_vect, label="211", alpha=0.6)
    plt.legend(loc=0)
    #plt.savefig("ejer6_acc_211.pdf")

    plt.figure(2)
    plt.ylabel("Pérdida Normalizada")
    plt.plot(red_densa.loss_vect/np.max(red_densa.loss_vect), label="211", alpha=0.6)
    plt.legend(loc=0)
    
    #plt.savefig("ejer6_loss_211.pdf")
    #plt.show()

    np.savetxt("ejer6_211.txt",  np.array([
                         red_densa.acc_vect,
                         red_densa.loss_vect]).T)
예제 #3
0
def ejer6_221(x_train, y_train):
    reg1 = regularizador.L2(0.0)
    reg2 = regularizador.L2(0.0)

    red_densa = model.Red()
    input_size=x_train.shape[1]

    Layer1= layer.Dense(neuronas    = input_size, 
                        act         = activation.Tanh(), 
                        reg         = reg1,
                        name        ="Layer 1"  ,
                        bias        = True  )

    Layer2= layer.Dense(neuronas    = 2, 
                        act         = activation.Tanh(), 
                        reg         = reg2,
                        name        = "Layer 2",
                        bias        = True)

    red_densa.add(Layer1)
    red_densa.add(Layer2)
    
    red_densa.fit(  
                x_train=x_train,    y_train=y_train, 
                batch_size=1,
                epochs=300,
                opt=optimizer.SGD(lr=0.1),
                loss_function=loss.MSE(),
                acc_function=metric.accuracy_xor)

    plt.figure(1)
    plt.ylabel("Accuracy [%]")
    plt.plot(red_densa.acc_vect, label="221", c='red', alpha=0.6)
    #plt.plot(red_densa.pres_vect, label="Validación", c='blue', alpha=0.6)
    plt.legend(loc=0)
    #plt.savefig("ejer6_acc.pdf")

    plt.figure(2)
    plt.ylabel("Pérdida")
    plt.plot(red_densa.loss_vect/np.max(red_densa.loss_vect), label="221", c='red', alpha=0.6)
    #plt.plot(red_densa.loss_test, label="Validación", c='blue', alpha=0.6)
    plt.legend(loc=0)
    #plt.savefig("ejer6_loss.pdf")
    #plt.show()
    np.savetxt("ejer6_221.txt",  np.array([
                         red_densa.acc_vect,
                         red_densa.loss_vect]).T)
예제 #4
0
    def fit(self,
            x_train=None,
            y_train=None,
            x_test=None,
            y_test=None,
            batch_size=4,
            epochs=200,
            opt=optimizer.SGD(lr=0.1),
            loss_function=loss.MSE(),
            acc_function=metric.accuracy):

        self.loss_function = loss_function
        self.acc_function = acc_function
        self.opt = opt
        self.batch_size = batch_size

        self.acc_vect = []  #np.zeros(epochs)
        self.loss_vect = []  #np.zeros(epochs)
        self.pres_vect = []  #np.zeros(epochs)
        self.loss_test = []  #np.zeros(epochs)

        self.iter_batch = int(x_train.shape[0] / batch_size)

        ultima_capa = self.capas[-1]
        ultima_capa.set_ydim(y_train.shape[1])
        ultima_capa.ini_weights()

        for it in range(epochs):

            opt(x_train, y_train, self)

            if np.any(x_test != None) and np.any(y_test != None):
                output, reg_sum = self.forwprop(x_test)

                loss = self.loss_function(output, y_test) + reg_sum
                acc = self.acc_function(output, y_test)
                self.loss_test.append(loss)
                self.pres_vect.append(100 * acc)

                print(
                    "-Epoca {}/{} - loss:{:.4} - loss_test: {:.4} - acc:{:.4} - acc_test:{:.4}"
                    .format(it, epochs, self.loss_vect[-1], self.loss_test[-1],
                            self.acc_vect[-1], self.pres_vect[-1]))
            else:
                print("Epoca {}/{} - loss: {:.4} - acc:{:.4}".format(
                    it, epochs, self.loss_vect[-1], self.acc_vect[-1]))
예제 #5
0
def ejer7_NN1(x_train, y_train, x_test, y_test, N, NN, ejemplos, i):
    reg1 = regularizador.L1(0.0)
    reg2 = regularizador.L2(0.0)

    red_densa = model.Red()
    input_size = x_train.shape[1]

    Layer1 = layer.Dense(neuronas=input_size,
                         act=activation.Tanh(),
                         reg=reg2,
                         name="Layer 1",
                         bias=True)

    Layer2 = layer.Dense(neuronas=NN,
                         act=activation.Tanh(),
                         reg=reg1,
                         name="Layer 2",
                         bias=True)

    red_densa.add(Layer1)
    red_densa.add(Layer2)

    red_densa.fit(x_train=x_train,
                  y_train=y_train,
                  x_test=x_test,
                  y_test=y_test,
                  batch_size=ejemplos,
                  epochs=500,
                  opt=optimizer.SGD(lr=0.1),
                  loss_function=loss.MSE(),
                  acc_function=metric.accuracy_xor)
    plt.figure(1)

    plt.ylabel("Accuracy [%]")
    plt.plot(red_densa.acc_vect,
             c=cmap(i),
             label="({},{},{})".format(N, NN, ejemplos))
    plt.legend(loc=0)

    plt.figure(2)
    plt.ylabel("Pérdida Normalizada")
    plt.plot(red_densa.loss_vect / np.max(red_densa.loss_vect),
             c=cmap(i),
             label="({},{},{})".format(N, NN, ejemplos))
    plt.legend(loc=0)