Esempio n. 1
0
def train(net: NeuralNet,
          inputs: Tensor,
          targets: Tensor,
          num_epochs: int = 5000,
          iterator: DataIterator = BatchIterator(),
          loss: Loss = MSE(),
          optimizer: Optimizer = SGD()) -> None:
    for epoch in range(num_epochs):
        epoch_loss = 0.0
        for batch in iterator(inputs, targets):
            predicted = net.forward(batch.inputs)
            epoch_loss += loss.loss(predicted, batch.targets)
            grad = loss.grad(predicted, batch.targets)
            net.backward(grad)
            optimizer.step(net)
        print(epoch, epoch_loss)
def train(net: NeuralNet,
          inputs: Tensor,
          targets: Tensor,
          num_epochs: int = 5000,
          iterator: DataIterator = BatchIterator(),
          loss: Loss = MSE(),
          optimizer: Optimizer = SGD(0.001),
          eps: float  = -1) -> [float]:
    
    loss_list = []
    eval_list = []
    net.n_eval = 0
    for epoch in range(num_epochs):
        n_iter = 0
        epoch_loss = 0.0
        
        print(f'================   EPOCH NUMBER {epoch + 1}   ================')
        for batch in iterator(inputs, targets):
            #print(f'batch: \n{batch}')
            net.n_iter = n_iter
            net.curr_batch = batch
            net.loss_f = loss
          
            predicted = net.forward(batch.inputs)
            curr_loss = loss.loss(predicted, batch.targets)
            epoch_loss += curr_loss
            grad = loss.grad(predicted, batch.targets)
            net.backward(grad)
            optimizer.step(net)
            n_iter = n_iter + 1
            
        eval_list.append(net.n_eval)

        

        #eval_list.append(net.n_eval)

        # () / iterator.batch_size
        print(epoch, epoch_loss)
        loss_list.append(epoch_loss)

        if eps > 0 and epoch_loss < eps:
            print('precisão atingida')
            break


    return loss_list, eval_list

net = NeuralNet([
    Linear(input_size=1, output_size=2, weights = np.array([[1.0,2.0]]), biases = np.array([0.0, 0.0])),
    reLu(),
    Linear(input_size=2, output_size=1, weights = np.array([[3.0],[4.0]]), biases = np.array([0.0])),
    reLu()
])



n_epochs = 1000

#loss_list = train(net, inputs,targets, optimizer = Adam(lr = 1e-2, gamma1 = 0.3, gamma2 = 0.3),iterator = BatchIterator(batch_size = 5), num_epochs = 1000)
start_time = time.time()
loss_list = train(net, inputs,targets, loss = MSE() ,optimizer = SGD(1e-5), iterator = BatchIterator(batch_size =  5), num_epochs = n_epochs, eps = 2000)
end_time = time.time()
print(f'Tempo gasto no treinamento: {end_time - start_time}s')




# for x, y in zip(inputs, targets)
#     predicted = net.forward(x)
#     print(x, predicted, y)



#print(f'Levenberg Marquardt com busca linear\nloss = {loss_list[len(loss_list) - 1]:.2f}')

ex = np.linspace(0,20,200)
Esempio n. 4
0
           output_size=1,
           weights=np.array([[3.0], [4.0]]),
           biases=np.array([0.0]))
])

n_epochs = 2
eps = 1e-4

#loss_list = train(net, inputs,targets, optimizer = Adam(lr = 1e-2, gamma1 = 0.3, gamma2 = 0.3),iterator = BatchIterator(batch_size = 5), num_epochs = 1000)
loss_list, eval_list = train(net,
                             inputs,
                             targets,
                             loss=MSE(),
                             optimizer=LM_cond(float(sys.argv[1]),
                                               float(sys.argv[2])),
                             iterator=BatchIterator(batch_size=len(inputs)),
                             num_epochs=n_epochs,
                             eps=eps)

# for x, y in zip(inputs, targets)
#     predicted = net.forward(x)
#     print(x, predicted, y)

ex = np.arange(0, 5, 0.2)
ey = []
test_loss = []
for val in ex:
    predicted = net.forward([val])
    ey.append(predicted)

#plt.scatter(range(0,len(loss_list)), loss_list)
Esempio n. 5
0
    Linear(input_size=30, output_size=24),
    Tanh(),
    Linear(input_size=24, output_size=30),
    Tanh(),
    Linear(input_size=30, output_size=35),
    Tanh(),
    Linear(input_size=35, output_size=1),
    Sigmoid()
])

n_epochs = 200
loss_list = train(net,
                  inputs,
                  targets,
                  optimizer=Adam(lr=1e-2, gamma1=0.3, gamma2=0.4),
                  iterator=BatchIterator(128),
                  num_epochs=n_epochs)

y_pred = []
for x in X_test[0:1000]:
    y_pred.append(net.forward(x))
y_pred = np.array(y_pred)

aux = X_test[0:1000]
indices_1 = np.where(aux == 0)
print('fraudes:', indices_1[0])

plt.title("Erro quadrático x Tempo")
plt.xlabel("número de iterações")
plt.ylabel("erro quadrático")
plt.scatter(list(range(0, n_epochs)), loss_list)
           output_size=1,
           weights=np.array([[3.0], [4.0]]),
           biases=np.array([0.0]))
])

n_epochs = 1000
eps = 1e-3

#loss_list = train(net, inputs,targets, optimizer = Adam(lr = 1e-2, gamma1 = 0.3, gamma2 = 0.3),iterator = BatchIterator(batch_size = 5), num_epochs = 1000)
loss_list, eval_list = train(net,
                             inputs,
                             targets,
                             loss=MSE(),
                             optimizer=LM_cond(float(sys.argv[1]),
                                               float(sys.argv[2])),
                             iterator=BatchIterator(batch_size=2),
                             num_epochs=n_epochs,
                             eps=eps)

# for x, y in zip(inputs, targets)
#     predicted = net.forward(x)
#     print(x, predicted, y)

ex = np.arange(0, 5, 0.2)
ey = []
test_loss = []
for val in ex:
    predicted = net.forward([val])
    ey.append(predicted)

#plt.scatter(range(0,len(loss_list)), loss_list)
           biases=np.array([0.0])),
    reLu()
])

n_epochs = 500
print('========= Método LM com busca linear =========')
print('treinando com 500 epochs')
print(f'alpha: {1e3:.1E}')
#loss_list = train(net, inputs,targets, optimizer = Adam(lr = 1e-2, gamma1 = 0.3, gamma2 = 0.3),iterator = BatchIterator(batch_size = 5), num_epochs = 1000)
start_time = time.time()
loss_list = train(net,
                  inputs,
                  targets,
                  loss=MSE(),
                  optimizer=LM_cond(1e3),
                  iterator=BatchIterator(batch_size=5),
                  num_epochs=n_epochs)
end_time = time.time()
print(f'Tempo gasto no treinamento: {end_time - start_time}s')

ex = np.linspace(0, 20, 200)
ey = []
test_loss = []
for val in ex:
    predicted = net.forward([val])
    ey.append(predicted)

plt.plot(
    ex,
    ey,
    label=f'alpha = {1e3:.1E}\nloss = {loss_list[len(loss_list) - 1]:.02f}')
# net = NeuralNet([
#     Linear(input_size=30, output_size=2),
#     Tanh(),
#     Linear(input_size=2, output_size=1),
#     Sigmoid()
# ])

n_epochs = 20
#loss_list = train(net, inputs,targets, loss = MSE() ,optimizer = Adam(lr = 1e-2, gamma1 = 0.3, gamma2 = 0.3), iterator = BatchIterator(1024), num_epochs = n_epochs)
try:
    loss_list = train(net,
                      inputs,
                      targets,
                      loss=MSE(),
                      optimizer=LM_cond(1e4, 1e2),
                      iterator=BatchIterator(len(inputs)),
                      num_epochs=n_epochs)
except np.linalg.LinAlgError as err:
    print('Interrompido por matriz singular')

y_pred = []
for x in X_test:
    y_pred.append(net.forward(x))
y_pred = np.array(y_pred)

plt.title("Erro quadrático x Tempo")
plt.xlabel("número de iterações")
plt.ylabel("erro quadrático")
plt.scatter(list(range(0, n_epochs)), loss_list)
plt.savefig(f'Figuras/Fraud/Adam_IMP.png', format='png')
plt.show()
    Sigmoid()
])

# net = NeuralNet([
#     Linear(input_size=30, output_size=2),
#     Tanh(),
#     Linear(input_size=2, output_size=1), 
#     Sigmoid()
# ])


n_epochs = 10

#loss_list = train(net, inputs,targets, loss = MSE() ,optimizer = Adam(lr = 1e-2, gamma1 = 0.3, gamma2 = 0.3), iterator = BatchIterator(1024), num_epochs = n_epochs)
try:
	loss_list = train(net, inputs,targets, loss = MSE() ,optimizer = LM_cond(1e3,1e2), iterator = BatchIterator(len(inputs)), num_epochs = n_epochs)
except np.linalg.LinAlgError as err:
	print('Interrompido por matriz singular')

y_pred = []
for x in X_test:
    y_pred.append(net.forward(x))
y_pred = np.array(y_pred)


plt.title("Erro quadrático x Tempo")
plt.xlabel("número de iterações")
plt.ylabel("erro quadrático")
#plt.scatter(list(range(0, len(loss_list))),loss_list)
plt.scatter(list(range(0, len(loss_list))),loss_list)
plt.axis([0,len(loss_list),0,len(loss_list)])
Esempio n. 10
0
targets = np.array([[0], [1], [1], [0]])

net = NeuralNet([
    Linear(input_size=2, output_size=4),
    Sigmoid(),
    Linear(input_size=4, output_size=4),
    Sigmoid(),
    Linear(input_size=4, output_size=1),
    Sigmoid()
])

n_epochs = 10000
loss_list = train(net,
                  inputs,
                  targets,
                  loss=Log_loss(),
                  optimizer=SGD(lr=1e-5),
                  iterator=BatchIterator(4),
                  num_epochs=n_epochs)

for x, y in zip(inputs, targets):
    predicted = net.forward(x)
    print(x, predicted, y)

plt.title("Erro quadrático x Tempo")
plt.xlabel("número de iterações")
plt.ylabel("erro quadrático")
plt.scatter(list(range(0, n_epochs)), loss_list)
plt.show()