training_accuracy = []

### Training of the model
for e in range(nb_epochs + 1):
    for b in range(0, train_input.size(0), mini_batch_size):

        ##Forward propagation
        output = model.forward(train_input.narrow(0, b, mini_batch_size).t())

        #Calculate loss
        loss = criterion.forward(
            output,
            train_target_hot.narrow(0, b, mini_batch_size).t())

        # put to zero weights and bias
        model.zero_grad()

        ##Backpropagation
        #Calculate grad of loss
        loss_grad = criterion.backward()

        #Grad of the model
        model.backward(loss_grad)

        #Update parameters
        model.grad_step(lr=lr)

    if e % print_step == 0:
        print(f'Epoc : {e}, Loss: {loss}')
        #Save loss in vector
        loss_at_print.append(float(loss))
Esempio n. 2
0
pytorch_training_accuracy = []

### Training of the framework_model
for e in range(nb_epochs+1):
    for b in range(0, train_input.size(0), mini_batch_size):

        #Train the mini deep learning framework
        
        ##Forward propagation
        framework_output = framework_model.forward(train_input.narrow(0, b, mini_batch_size).t())

        #Calculate loss
        framework_loss = framework_criterion.forward(framework_output,train_target_hot.narrow(0,b,mini_batch_size).t())
        
        # put to zero weights and bias
        framework_model.zero_grad()
        
        ##Backpropagation
        #Calculate grad of loss
        loss_grad = framework_criterion.backward()

        #Grad of the framework_model
        framework_model.backward(loss_grad)

        #Update parameters
        framework_model.grad_step(lr=lr)

        #Train the model using pytorch NN
        pytorch_output = pytorch_model(train_input.narrow(0, b, mini_batch_size))
        pytorch_loss = pytorch_criterion(pytorch_output, train_target_hot.narrow(0, b, mini_batch_size))
        pytorch_model.zero_grad()