コード例 #1
0
ファイル: train.py プロジェクト: xiaopantt/joelnet
def train(net: NeuralNet,
          inputs: Tensor,
          targets: Tensor,
          num_epochs: int = 5000,
          iterator: DataIterator = BatchIterator(),
          loss: Loss = MSE(),
          optimizer: Optimizer = SGD()) -> None:
    for epoch in range(num_epochs):
        epoch_loss = 0.0
        for batch in iterator(inputs, targets):
            predicted = net.forward(batch.inputs)
            epoch_loss += loss.loss(predicted, batch.targets)
            grad = loss.grad(predicted, batch.targets)
            net.backward(grad)
            optimizer.step(net)
        print(epoch, epoch_loss)
コード例 #2
0
def train(net: NeuralNet,
          inputs: Tensor,
          targets: Tensor,
          num_epochs: int = 5000,
          iterator: DataIterator = BatchIterator(),
          loss: Loss = MSE(),
          optimizer: Optimizer = SGD(0.001),
          eps: float  = -1) -> [float]:
    
    loss_list = []
    eval_list = []
    net.n_eval = 0
    for epoch in range(num_epochs):
        n_iter = 0
        epoch_loss = 0.0
        
        print(f'================   EPOCH NUMBER {epoch + 1}   ================')
        for batch in iterator(inputs, targets):
            #print(f'batch: \n{batch}')
            net.n_iter = n_iter
            net.curr_batch = batch
            net.loss_f = loss
          
            predicted = net.forward(batch.inputs)
            curr_loss = loss.loss(predicted, batch.targets)
            epoch_loss += curr_loss
            grad = loss.grad(predicted, batch.targets)
            net.backward(grad)
            optimizer.step(net)
            n_iter = n_iter + 1
            
        eval_list.append(net.n_eval)

        

        #eval_list.append(net.n_eval)

        # () / iterator.batch_size
        print(epoch, epoch_loss)
        loss_list.append(epoch_loss)

        if eps > 0 and epoch_loss < eps:
            print('precisão atingida')
            break


    return loss_list, eval_list
コード例 #3
0

net = NeuralNet([
    Linear(input_size=1, output_size=2, weights = np.array([[1.0,2.0]]), biases = np.array([0.0, 0.0])),
    reLu(),
    Linear(input_size=2, output_size=1, weights = np.array([[3.0],[4.0]]), biases = np.array([0.0])),
    reLu()
])



n_epochs = 1000

#loss_list = train(net, inputs,targets, optimizer = Adam(lr = 1e-2, gamma1 = 0.3, gamma2 = 0.3),iterator = BatchIterator(batch_size = 5), num_epochs = 1000)
start_time = time.time()
loss_list = train(net, inputs,targets, loss = MSE() ,optimizer = SGD(1e-5), iterator = BatchIterator(batch_size =  5), num_epochs = n_epochs, eps = 2000)
end_time = time.time()
print(f'Tempo gasto no treinamento: {end_time - start_time}s')




# for x, y in zip(inputs, targets)
#     predicted = net.forward(x)
#     print(x, predicted, y)



#print(f'Levenberg Marquardt com busca linear\nloss = {loss_list[len(loss_list) - 1]:.2f}')

ex = np.linspace(0,20,200)
コード例 #4
0
           weights=np.array([[1.0, 2.0]]),
           biases=np.array([0.0, 0.0])),
    Linear(input_size=2,
           output_size=1,
           weights=np.array([[3.0], [4.0]]),
           biases=np.array([0.0]))
])

n_epochs = 2
eps = 1e-4

#loss_list = train(net, inputs,targets, optimizer = Adam(lr = 1e-2, gamma1 = 0.3, gamma2 = 0.3),iterator = BatchIterator(batch_size = 5), num_epochs = 1000)
loss_list, eval_list = train(net,
                             inputs,
                             targets,
                             loss=MSE(),
                             optimizer=LM_cond(float(sys.argv[1]),
                                               float(sys.argv[2])),
                             iterator=BatchIterator(batch_size=len(inputs)),
                             num_epochs=n_epochs,
                             eps=eps)

# for x, y in zip(inputs, targets)
#     predicted = net.forward(x)
#     print(x, predicted, y)

ex = np.arange(0, 5, 0.2)
ey = []
test_loss = []
for val in ex:
    predicted = net.forward([val])
コード例 #5
0
ファイル: fizzbuzz.py プロジェクト: andrewKyres/nnlib
def binary_encode(x: int) -> List[int]:
    """
    10 digit binary encoding of x
    """
    return [x >> i & 1 for i in range(10)]


def binary_decode(bitlist: List) -> int:
    pass


inputs = np.array([binary_encode(x) for x in range(101, 1024)])

targets = np.array([fizz_buzz_encode(x) for x in range(101, 1024)])

net = NeuralNet([
    Linear(input_size=10, output_size=50),
    Tanh(),
    Linear(input_size=50, output_size=4)
])

train(net, inputs, targets, num_epochs=20, loss=MSE(), optimizer=SGD(lr=0.001))

inputs = np.array([binary_encode(x) for x in range(1, 101)])

targets = np.array([fizz_buzz_encode(x) for x in range(1, 101)])

labels = ["x", "fizz", "buzz", "fizzbuzz"]

test(net, inputs, targets, labels, binary_decode)
コード例 #6
0
    Sigmoid()
])

# net = NeuralNet([
#     Linear(input_size=30, output_size=2),
#     Tanh(),
#     Linear(input_size=2, output_size=1), 
#     Sigmoid()
# ])


n_epochs = 10

#loss_list = train(net, inputs,targets, loss = MSE() ,optimizer = Adam(lr = 1e-2, gamma1 = 0.3, gamma2 = 0.3), iterator = BatchIterator(1024), num_epochs = n_epochs)
try:
	loss_list = train(net, inputs,targets, loss = MSE() ,optimizer = LM_cond(1e3,1e2), iterator = BatchIterator(len(inputs)), num_epochs = n_epochs)
except np.linalg.LinAlgError as err:
	print('Interrompido por matriz singular')

y_pred = []
for x in X_test:
    y_pred.append(net.forward(x))
y_pred = np.array(y_pred)


plt.title("Erro quadrático x Tempo")
plt.xlabel("número de iterações")
plt.ylabel("erro quadrático")
#plt.scatter(list(range(0, len(loss_list))),loss_list)
plt.scatter(list(range(0, len(loss_list))),loss_list)
plt.axis([0,len(loss_list),0,len(loss_list)])