Beispiel #1
0
def test_linear():
    coef1 = 1002
    coef2 = 13
    bias = 4

    # fonction linéaire que l'on apprend
    def f(x1, x2):
        return x1 * coef1 + coef2 * x2 + bias

    # données d'entrainement avec bruit
    def f_bruit(x1, x2):
        bruit = np.random.normal(0, 1, len(x1)).reshape((-1, 1))
        return f(x1, x2) + bruit

    nb_data = 100
    x1 = np.random.uniform(-10, 10, nb_data)
    x2 = np.random.uniform(-10, 10, nb_data)
    x1 = x1.reshape((-1, 1))
    x2 = x2.reshape((-1, 1))
    datay = f_bruit(x1, x2)

    datax = np.concatenate((x1, x2), axis=1)
    # Input and Output size of our NN
    input_size = len(datax[0])
    output_size = 1

    # Initialize modules with respective size
    iteration = 300
    gradient_step = 1e-4
    m_mse = MSE()
    m_linear = Linear(input_size, output_size)

    for _ in range(iteration):
        hidden_l = m_linear.forward(datax)
        loss = m_mse.forward(datay, hidden_l)
        print("max loss:", np.max(loss))
        loss_back = m_mse.backward(datay, hidden_l)

        m_linear.backward_update_gradient(datax, loss_back)
        m_linear.update_parameters(gradient_step=gradient_step)
        m_linear.zero_grad()

    x1 = np.random.uniform(-10, 10, nb_data)
    x2 = np.random.uniform(-10, 10, nb_data)
    x1 = x1.reshape((-1, 1))
    x2 = x2.reshape((-1, 1))
    testy = f(x1, x2)
    testx = np.concatenate((x1, x2), axis=1)
    hidden_l = m_linear.forward(testx)
    print("max différence res:", np.max(hidden_l - testy))
    print("parameters:", str(m_linear._parameters))
    print("valeurs voulues:", str([[coef1], [coef2]]))
    print("biais:", str(m_linear._bias_parameters))
    print("valeur voulue:", str([bias]))
Beispiel #2
0
def test_auto_encodeur():
    uspsdatatrain = "../data/USPS_train.txt"
    uspsdatatest = "../data/USPS_test.txt"
    alltrainx, alltrainy = load_usps(uspsdatatrain)
    alltestx, alltesty = load_usps(uspsdatatest)
    alltrainx /= 2
    alltestx /= 2

    TNSE(alltesty, alltestx)
    return

    # Initialize modules with respective size
    iteration = 100
    gradient_step = 1e-3
    batch_size = 50  # len(alltrainx)

    input_size = alltrainx.shape[1]
    hidden_size = 100
    compression_size = 2

    m_linear = Linear(input_size, hidden_size)
    m_tanh = TanH()
    m_linear2 = Linear(hidden_size, compression_size)

    m_linear3 = Linear(compression_size, hidden_size)
    m_linear3._parameters = m_linear2._parameters.T

    m_linear4 = Linear(hidden_size, input_size)
    m_linear4._parameters = m_linear._parameters.T

    m_sigmoid = Sigmoid()

    m_loss = BCE()

    seq = Sequential([
        m_linear, m_tanh, m_linear2, m_tanh, m_linear3, m_tanh, m_linear4,
        m_sigmoid
    ])

    opt = Optim(seq, loss=m_loss, eps=gradient_step)
    opt.SGD(alltrainx, alltrainx, batch_size, maxiter=iteration, verbose=2)

    predict = opt.predict(alltestx)
    compression_train = seq.forward(alltrainx)[-5]
    compression = seq.forward(alltestx)[-5]
    """
    # print 
    for i in range(6):
        plt.imshow(alltestx[i].reshape((16,16)))
        plt.show()
        plt.imshow(predict[i].reshape((16,16)))
        plt.show()
        plt.imshow(compression[i].reshape((compression_size,1)))
        plt.show()
    """
    # if compression_size == 2:
    #    plot_2D(alltesty, compression)

    TNSE(alltesty, alltestx)
Beispiel #3
0
def test_non_linear_SGD():
    # data generation
    datax, datay = tools.gen_arti(centerx=1,
                                  centery=1,
                                  sigma=0.1,
                                  nbex=1000,
                                  data_type=1,
                                  epsilon=0.1)
    testx, testy = tools.gen_arti(centerx=1,
                                  centery=1,
                                  sigma=0.1,
                                  nbex=1000,
                                  data_type=1,
                                  epsilon=0.1)
    datay = datay[..., None]
    testy = testy[..., None]
    datay = np.where(datay == -1, 0, 1)
    testy = np.where(testy == -1, 0, 1)

    # NN parameters
    batch_size = len(datax)
    input_size = len(datax[0])
    hidden_size = 2
    final_size = 1
    iteration = 100
    gradient_step = 10e-3

    # Setup NN
    net = Sequential([
        Linear(input_size, hidden_size, bias=True),
        TanH(),
        Linear(hidden_size, final_size, bias=True),
        Sigmoid()
    ])

    opt = Optim(net=net, loss=MSE(), eps=gradient_step)
    opt.SGD(datax, datay, batch_size, maxiter=iteration, verbose=2)

    def yhat(x):
        yhat = opt.predict(x)
        return np.where(yhat >= 0.5, 1, -1)

    tools.plot_frontiere(testx, yhat, step=100)
    tools.plot_data(testx, testy.reshape(-1))
    plt.plot()
    plt.show()
def test_auto_encodeur():
    mnist = fetch_openml('mnist_784')
    alltrainx, alltrainy = mnist.data.to_numpy()[:10000, :], np.intc(
        mnist.target.to_numpy()[:10000])
    alltestx, alltesty = mnist.data.to_numpy()[10000:20000, :], np.intc(
        mnist.target.to_numpy()[10000:20000])
    alltestx, alltrainx = alltestx / 255, alltrainx / 255

    # Initialize modules with respective size
    iteration = 100
    gradient_step = 1e-3
    batch_size = 50  # len(alltrainx)
    input_size = alltrainx.shape[1]
    hidden_size = 100
    compression_size = 10

    m_linear = Linear(input_size, hidden_size)
    m_tanh = TanH()
    m_linear2 = Linear(hidden_size, compression_size)

    m_linear3 = Linear(compression_size, hidden_size)
    m_linear3._parameters = m_linear2._parameters.T

    m_linear4 = Linear(hidden_size, input_size)
    m_linear4._parameters = m_linear._parameters.T

    m_sigmoid = Sigmoid()

    m_loss = BCE()

    seq = Sequential([
        m_linear, m_tanh, m_linear2, m_tanh, m_linear3, m_tanh, m_linear4,
        m_sigmoid
    ])

    opt = Optim(seq, loss=m_loss, eps=gradient_step)
    opt.SGD(alltrainx, alltrainx, batch_size, maxiter=iteration, verbose=True)

    predict = opt.predict(alltestx)
    compression_train = seq.forward(alltrainx)[-5]
    compression = seq.forward(alltestx)[-5]

    # print
    size = int(np.sqrt(alltestx.shape[1]))
    for i in range(6):
        plt.imshow(alltestx[i].reshape((size, size)))
        plt.show()
        plt.imshow(predict[i].reshape((size, size)))
        plt.show()
        plt.imshow(compression[i].reshape(-1, 1))
        plt.show()

    # TNSE(alltesty,compression)
    return cluster(compression_train, compression, alltrainy, alltesty)
Beispiel #5
0
def test_linear_SGD():
    coef1 = 1002
    coef2 = 13
    bias = 4

    # fonction linéaire que l'on apprend
    def f(x1, x2):
        return x1 * coef1 + coef2 * x2 + bias

    # données d'entrainement avec bruit
    def f_bruit(x1, x2):
        bruit = np.random.normal(0, 1, len(x1)).reshape((-1, 1))
        return f(x1, x2) + bruit

    nb_data = 100
    x1 = np.random.uniform(-10, 10, nb_data)
    x2 = np.random.uniform(-10, 10, nb_data)
    x1 = x1.reshape((-1, 1))
    x2 = x2.reshape((-1, 1))
    datay = f_bruit(x1, x2)

    datax = np.concatenate((x1, x2), axis=1)
    # Input and Output size of our NN
    input_size = len(datax[0])
    output_size = 1

    # Initialize modules with respective size
    iteration = 50
    gradient_step = 1e-4

    m_linear = Linear(input_size, output_size)
    m_mse = MSE()

    seq = Sequential([m_linear])
    opt = Optim(seq, loss=m_mse, eps=gradient_step)
    opt.SGD(datax, datay, nb_data, maxiter=iteration, verbose=2)

    x1 = np.random.uniform(-10, 10, nb_data)
    x2 = np.random.uniform(-10, 10, nb_data)
    x1 = x1.reshape((-1, 1))
    x2 = x2.reshape((-1, 1))
    testy = f(x1, x2)
    testx = np.concatenate((x1, x2), axis=1)
    hidden_l = opt.predict(testx)

    print("max différence res:", np.max(hidden_l - testy))
    print("parameters:", str(m_linear._parameters))
    print("valeurs voulues:", str([[coef1], [coef2]]))
    print("biais:", str(m_linear._bias_parameters))
    print("valeur voulue:", str([bias]))
Beispiel #6
0
                                  data_type=0,
                                  epsilon=0.1)
    datay_r = np.zeros((len(datay), 2))
    # Re-arranging data to compute a probability
    for y in range(len(datay)):
        if datay[y] == -1:
            datay_r[y][0] = 1
        elif datay[y] == 1:
            datay_r[y][1] = 1

    # Input and Output size of our NN
    input_size = len(datax[0])
    output_size = len(np.unique(datay))
    # Initialize modules with respective size
    m_mse = MSE()
    m_linear = Linear(input_size, output_size)
    m_sigmoid = Sigmoid()
    # Etape forward
    hidden_l = m_linear.forward(datax)
    assert (hidden_l.shape == (len(datax), output_size))
    act_l = m_sigmoid.forward(hidden_l)
    assert (act_l.shape == hidden_l.shape)
    loss = m_mse.forward(hidden_l, datay_r)
    assert (loss.shape == (len(datax), ))
    # Etape Backward
    loss_back = m_mse.backward(datay_r, hidden_l)
    assert (loss_back.shape == (len(datax), output_size))
    delta_funcact = m_sigmoid.backward_delta(hidden_l, loss_back)
    assert (loss_back.shape == delta_funcact.shape)
    delta_linear = m_linear.backward_delta(datax, delta_funcact)
    # sortie de taille (nb entr�es, nombre de donn�es)
Beispiel #7
0
    chan_input = 1
    chan_output = 32
    stride = 1

    max_pool_stride = 2
    max_pool_kernel = 2

    # loss function
    sftmax = CESoftMax()

    # Network parameters
    net = Sequential([
        Conv1D(kernel_size, chan_input, chan_output, stride=stride),
        MaxPool1D(max_pool_kernel, max_pool_stride),
        Flatten(),
        Linear(4064, 100),
        ReLU(),
        Linear(100, 10)
    ])

    # Train networks
    opt = Optim(net=net, loss=sftmax, eps=gradient_step)
    opt.SGD(alltrainx,
            alltrainy_proba,
            batch_size,
            X_val=allvalx,
            Y_val=allvaly,
            f_val=lambda x: np.argmax(Softmax().forward(x), axis=1),
            maxiter=iterations,
            verbose=2)
Beispiel #8
0
def test_non_linear():
    # data generation
    datax, datay = tools.gen_arti(centerx=1,
                                  centery=1,
                                  sigma=0.1,
                                  nbex=1000,
                                  data_type=1,
                                  epsilon=0.1)
    testx, testy = tools.gen_arti(centerx=1,
                                  centery=1,
                                  sigma=0.1,
                                  nbex=1000,
                                  data_type=1,
                                  epsilon=0.1)

    datay = datay[..., None]
    testy = testy[..., None]

    datay = np.where(datay == -1, 0, 1)
    testy = np.where(testy == -1, 0, 1)
    nb_data = len(datax)

    # Input and Output size of our NN
    input_size = len(datax[0])
    hidden_size = 3
    final_size = 1

    # Initialize modules with respective size
    iteration = 100
    gradient_step = 10e-3

    m_linear_first = Linear(input_size, hidden_size, bias=True)
    m_linear_second = Linear(hidden_size, final_size, bias=True)
    m_sig = Sigmoid()
    m_tanh = TanH()
    m_mse = MSE()

    for _ in range(iteration):
        hidden_l = m_linear_first.forward(datax)
        hidden_l_tanh = m_tanh.forward(hidden_l)
        hidden_l2 = m_linear_second.forward(hidden_l_tanh)
        hidden_l2_sigmoid = m_sig.forward(hidden_l2)
        loss = m_mse.forward(datay, hidden_l2_sigmoid)

        print("max loss:", np.max(loss))

        loss_back = m_mse.backward(datay, hidden_l2_sigmoid)
        delta_sigmoid = m_sig.backward_delta(hidden_l2, loss_back)
        delta_linear_2 = m_linear_second.backward_delta(
            hidden_l_tanh, delta_sigmoid)
        delta_tanh = m_tanh.backward_delta(hidden_l, delta_linear_2)

        m_linear_second.backward_update_gradient(hidden_l_tanh, delta_sigmoid)
        m_linear_first.backward_update_gradient(datax, delta_tanh)

        m_linear_second.update_parameters(gradient_step=gradient_step)
        m_linear_first.update_parameters(gradient_step=gradient_step)

        m_linear_first.zero_grad()
        m_linear_second.zero_grad()

    def yhat(x):
        hidden_l = m_linear_first.forward(x)
        hidden_l_tanh = m_tanh.forward(hidden_l)
        hidden_l2 = m_linear_second.forward(hidden_l_tanh)
        yhat = m_sig.forward(hidden_l2)
        return np.where(yhat >= 0.5, 1, -1)

    tools.plot_frontiere(testx, yhat, step=100)
    tools.plot_data(testx, testy.reshape(-1))
    plt.show()
Beispiel #9
0
def test_multiclass():
    """
    testx, testy = get_usps([neg, pos], alltestx, alltesty)
    testy = np.where(testy == neg, -1, 1)
    :return:
    """
    uspsdatatrain = "../data/USPS_train.txt"
    uspsdatatest = "../data/USPS_test.txt"
    alltrainx, alltrainy = load_usps(uspsdatatrain)
    alltestx, alltesty = load_usps(uspsdatatest)
    input_size = len(alltrainx[0])
    output_size = len(np.unique(alltesty))
    alltrainy_proba = transform_numbers(alltrainy, output_size)

    # Initialize modules with respective size
    iteration = 100
    gradient_step = 10e-5
    arbitrary_neural = 128
    m_linear = Linear(input_size, arbitrary_neural)
    m_act1 = TanH()
    m_linear2 = Linear(arbitrary_neural, output_size)
    m_act2 = Softmax()
    # m_loss = BCE()
    m_loss = CESoftMax()
    for _ in range(iteration):
        # Etape forward
        hidden_l1 = m_linear.forward(alltrainx)
        act1 = m_act1.forward(hidden_l1)
        hidden_l2 = m_linear2.forward(act1)
        loss = m_loss.forward(alltrainy_proba, hidden_l2)
        # print("max loss:", np.mean(loss, axis=0))

        print("max loss:", np.mean(loss))
        # Etape Backward

        loss_back = m_loss.backward(alltrainy_proba, hidden_l2)
        hidden_l2_back = m_linear2.backward_delta(act1, loss_back)

        act1_back = m_act1.backward_delta(hidden_l1, hidden_l2_back)

        # update gradient
        m_linear2.backward_update_gradient(act1, loss_back)
        m_linear.backward_update_gradient(alltrainx, act1_back)

        # update parameters
        m_linear2.update_parameters(gradient_step=gradient_step)
        m_linear.update_parameters(gradient_step=gradient_step)

        m_linear.zero_grad()
        m_linear2.zero_grad()

    hidden_l1 = m_linear.forward(alltestx)
    act1 = m_act1.forward(hidden_l1)
    hidden_l2 = m_linear2.forward(act1)
    act2 = m_act2.forward(hidden_l2)
    predict = np.argmax(act2, axis=1)

    res = skt.confusion_matrix(predict, alltesty)
    print(np.sum(np.where(predict == alltesty, 1, 0)) / len(predict))
    plt.imshow(res)
Beispiel #10
0
from src.Activation.softmax import Softmax
from src.Activation.tanH import TanH
from src.Loss.BCE import BCE
from src.Loss.CESoftMax import CE
from src.Loss.MSE import MSE
from src.Module.conv1D import Conv1D
from src.Module.flatten import Flatten
from src.Module.linear import Linear
from src.Pooling.maxPool1D import MaxPool1D

np.random.seed(1)

datax = np.random.randn(20, 10)
datay = np.random.choice([-1, 1], 20, replace=True)
dataymulti = np.random.choice(range(10), 20, replace=True)
linear = Linear(10, 1)
sigmoide = Sigmoid()
softmax = Softmax()
tanh = TanH()
relu = ReLU()
conv1D = Conv1D(k_size=3, chan_in=1, chan_out=5, stride=2)
maxpool1D = MaxPool1D(k_size=2, stride=2)
flatten = Flatten()

mse = MSE()
bce = BCE()
crossentr = CE()  # cross entropy avec log softmax

## Lineaire et MSE
linear.zero_grad()
res_lin = linear.forward(datax)