loss = criterion.forward(
            output,
            train_target_hot.narrow(0, b, mini_batch_size).t())

        # put to zero weights and bias
        model.zero_grad()

        ##Backpropagation
        #Calculate grad of loss
        loss_grad = criterion.backward()

        #Grad of the model
        model.backward(loss_grad)

        #Update parameters
        model.grad_step(lr=lr)

    if e % print_step == 0:
        print(f'Epoc : {e}, Loss: {loss}')
        #Save loss in vector
        loss_at_print.append(float(loss))
        test_prediction = model.forward(test_input.t())
        test_accuracy.append(
            float(compute_nb_errors(model, test_input, test_target)) /
            test_target.size(0) * 100)
        print(f'\tTest Accuracy : {100-test_accuracy[-1]}%')
        training_prediction = model.forward(train_input.t())
        training_accuracy.append(
            float(compute_nb_errors(model, train_input, train_target)) /
            train_target.size(0) * 100)
        print(f'\tTraining Accuracy : {100-training_accuracy[-1]}%')
示例#2
0
        #Calculate loss
        framework_loss = framework_criterion.forward(framework_output,train_target_hot.narrow(0,b,mini_batch_size).t())
        
        # put to zero weights and bias
        framework_model.zero_grad()
        
        ##Backpropagation
        #Calculate grad of loss
        loss_grad = framework_criterion.backward()

        #Grad of the framework_model
        framework_model.backward(loss_grad)

        #Update parameters
        framework_model.grad_step(lr=lr)

        #Train the model using pytorch NN
        pytorch_output = pytorch_model(train_input.narrow(0, b, mini_batch_size))
        pytorch_loss = pytorch_criterion(pytorch_output, train_target_hot.narrow(0, b, mini_batch_size))
        pytorch_model.zero_grad()
        pytorch_loss.backward()
        pytorch_optimizer.step()

        
    if e % print_step ==0 :
        print(f'Epoc : {e}, Mini Deeplearning Framework Loss: {framework_loss}, Pytorch Loss:{pytorch_loss}')
        #Save loss in vector
        framework_loss_at_print.append(float(framework_loss))
        framework_test_accuracy.append(float(compute_nb_errors(framework_model,test_input, test_target)) / test_target.size(0) * 100)
        print(f'\tMini Deeplearning Framework: tTest Accuracy : {100-framework_test_accuracy[-1]}%')