Example #1
0
def ejer3():
    reg1 = regularizador.L2(0.0001)
    reg2 = regularizador.L2(0.0001)

    proto = clasificador.Classifier(epochs=400, batch_size=50, eta=0.003)

    outputfile = 'ejer3_v2_mse.dat'

    (x_train, y_train), (x_test, y_test) = datasets.cifar10.load_data()

    mean_train = x_train.mean()

    n_clasifi = 10
    X, Y = clasificador.flattening(x_train, y_train, n_clasifi, mean_train)
    X_test, Y_test = clasificador.flattening(x_test, y_test, n_clasifi,
                                             mean_train)

    proto.fit(X,
              Y,
              X_test,
              Y_test,
              act_function1=act.sigmoid(),
              reg1=reg1,
              act_function2=act.Linear(),
              reg2=reg2,
              loss_function=los.MSE())

    # plt.figure(1)
    # plt.ylabel("Accuracy [%]")
    # plt.plot(proto.acc_vect, label="Entrenamiento", c='red', alpha=0.6, ls='--')
    # plt.plot(proto.pres_vect, label="Validación", c='blue', alpha=0.6)
    # plt.legend(loc=0)
    # plt.savefig("ejer3_acc.pdf")

    # plt.figure(2)
    # plt.ylabel("Pérdida")
    # plt.plot(proto.loss_vect, label="Entrenamiento", c='red', alpha=0.6, ls='--')
    # plt.plot(proto.loss_test, label="Validación", c='blue', alpha=0.6)
    # plt.legend(loc=0)
    # plt.savefig("ejer3_loss.pdf")

    # plt.close()

    np.save(
        outputfile,
        np.array([
            proto.acc_vect, proto.pres_vect, proto.loss_vect, proto.loss_test
        ]).T)

    #plt.show()
    pass
Example #2
0
def ejer5_loss(loss_fun, act_fun_last, label, nfig1, nfig2):
    print(label)
    reg1 = regularizador.L2(0.1)
    reg2 = regularizador.L2(0.1)

    proto = clasificador.Classifier(epochs=300, batch_size=50, eta=0.001)

    outputfile = "ejer5_" + label + "_v3.dat"

    (x_train, y_train), (x_test, y_test) = datasets.cifar10.load_data()

    mean_train = x_train.mean()

    n_clasifi = 10
    X, Y = clasificador.flattening(x_train, y_train, n_clasifi, mean_train)
    X_test, Y_test = clasificador.flattening(x_test, y_test, n_clasifi,
                                             mean_train)

    proto.fit(X,
              Y,
              X_test,
              Y_test,
              act_function1=act.ReLU(0),
              reg1=reg1,
              loss_function=loss_fun,
              act_function2=act_fun_last,
              reg2=reg2)

    # plt.figure(nfig1)
    # plt.ylabel("Accuracy [%]")
    # plt.plot(proto.acc_vect, label="Entrenamiento", c='red', alpha=0.6, ls='--')
    # plt.plot(proto.pres_vect, label="Validación", c='blue', alpha=0.6)
    # plt.legend(loc=0)
    # plt.savefig("ejer5_acc_"+label+".pdf")

    # plt.figure(nfig2)
    # plt.ylabel("Pérdida")
    # plt.plot(proto.loss_vect, label="Entrenamiento", c='red', alpha=0.6, ls='--')
    # plt.plot(proto.loss_test, label="Validación", c='blue', alpha=0.6)
    # plt.legend(loc=0)
    # plt.savefig("ejer5_loss_"+label+".pdf")
    # plt.show()
    #plt.close()
    #plt.clf()

    np.savetxt(
        outputfile,
        np.array([
            proto.acc_vect, proto.pres_vect, proto.loss_vect, proto.loss_test
        ]).T)
Example #3
0
def ejer6_221(x_train, y_train):
    reg1 = regularizador.L2(0.0)
    reg2 = regularizador.L2(0.0)

    red_densa = model.Red()
    input_size=x_train.shape[1]

    Layer1= layer.Dense(neuronas    = input_size, 
                        act         = activation.Tanh(), 
                        reg         = reg1,
                        name        ="Layer 1"  ,
                        bias        = True  )

    Layer2= layer.Dense(neuronas    = 2, 
                        act         = activation.Tanh(), 
                        reg         = reg2,
                        name        = "Layer 2",
                        bias        = True)

    red_densa.add(Layer1)
    red_densa.add(Layer2)
    
    red_densa.fit(  
                x_train=x_train,    y_train=y_train, 
                batch_size=1,
                epochs=300,
                opt=optimizer.SGD(lr=0.1),
                loss_function=loss.MSE(),
                acc_function=metric.accuracy_xor)

    plt.figure(1)
    plt.ylabel("Accuracy [%]")
    plt.plot(red_densa.acc_vect, label="221", c='red', alpha=0.6)
    #plt.plot(red_densa.pres_vect, label="Validación", c='blue', alpha=0.6)
    plt.legend(loc=0)
    #plt.savefig("ejer6_acc.pdf")

    plt.figure(2)
    plt.ylabel("Pérdida")
    plt.plot(red_densa.loss_vect/np.max(red_densa.loss_vect), label="221", c='red', alpha=0.6)
    #plt.plot(red_densa.loss_test, label="Validación", c='blue', alpha=0.6)
    plt.legend(loc=0)
    #plt.savefig("ejer6_loss.pdf")
    #plt.show()
    np.savetxt("ejer6_221.txt",  np.array([
                         red_densa.acc_vect,
                         red_densa.loss_vect]).T)
Example #4
0
 def __init__(self,
              neuronas=1,
              act=1,
              reg=regularizador.L2(0.0),
              name="No Name",
              bias=False,
              isCon=False):
     self.neuronas = neuronas
     self.act = act
     self.reg = reg
     self.name = name
     self.bias = bias
     self.isCon = isCon
Example #5
0
def ejer6_211(x_train, y_train):
    reg1 = regularizador.L1(0.0)
    reg2 = regularizador.L2(0.0)

    red_densa = model.Red()
    input_size=x_train.shape[1]

    Layer1= layer.Dense(neuronas    =input_size, 
                        act         =activation.Tanh(), 
                        reg         = reg2,
                        name        ="Layer 1"  ,
                        bias        = True  )

    Layer2= layer.Dense(neuronas    =1, 
                        act         =activation.Tanh(), 
                        reg         =reg1,
                        name        ="Layer 2",
                        bias        = True)

    red_densa.add(Layer1)

    layer_aux= layer.ConcatInput(input_size, Layer2)
    
    red_densa.add(layer_aux)
    
    red_densa.fit(  
                x_train=x_train,    y_train=y_train, 
                batch_size=1,
                epochs=300,
                opt=optimizer.SGD(lr=0.1),
                loss_function=loss.MSE(),
                acc_function=metric.accuracy_xor)

    plt.figure(1)
    plt.ylabel("Precisión [%]")
    plt.plot(red_densa.acc_vect, label="211", alpha=0.6)
    plt.legend(loc=0)
    #plt.savefig("ejer6_acc_211.pdf")

    plt.figure(2)
    plt.ylabel("Pérdida Normalizada")
    plt.plot(red_densa.loss_vect/np.max(red_densa.loss_vect), label="211", alpha=0.6)
    plt.legend(loc=0)
    
    #plt.savefig("ejer6_loss_211.pdf")
    #plt.show()

    np.savetxt("ejer6_211.txt",  np.array([
                         red_densa.acc_vect,
                         red_densa.loss_vect]).T)
Example #6
0
def ejer7_NN1(x_train, y_train, x_test, y_test, N, NN, ejemplos, i):
    reg1 = regularizador.L1(0.0)
    reg2 = regularizador.L2(0.0)

    red_densa = model.Red()
    input_size = x_train.shape[1]

    Layer1 = layer.Dense(neuronas=input_size,
                         act=activation.Tanh(),
                         reg=reg2,
                         name="Layer 1",
                         bias=True)

    Layer2 = layer.Dense(neuronas=NN,
                         act=activation.Tanh(),
                         reg=reg1,
                         name="Layer 2",
                         bias=True)

    red_densa.add(Layer1)
    red_densa.add(Layer2)

    red_densa.fit(x_train=x_train,
                  y_train=y_train,
                  x_test=x_test,
                  y_test=y_test,
                  batch_size=ejemplos,
                  epochs=500,
                  opt=optimizer.SGD(lr=0.1),
                  loss_function=loss.MSE(),
                  acc_function=metric.accuracy_xor)
    plt.figure(1)

    plt.ylabel("Accuracy [%]")
    plt.plot(red_densa.acc_vect,
             c=cmap(i),
             label="({},{},{})".format(N, NN, ejemplos))
    plt.legend(loc=0)

    plt.figure(2)
    plt.ylabel("Pérdida Normalizada")
    plt.plot(red_densa.loss_vect / np.max(red_densa.loss_vect),
             c=cmap(i),
             label="({},{},{})".format(N, NN, ejemplos))
    plt.legend(loc=0)