Example #1
0
def main():
    trained_model = './trained_model.pth'
    test_batch_dir = './cifar-10/test_batch'

    classifier = CNNModel()
    classifier.load_state_dict(torch.load(trained_model))
    classifier.cuda()
    classifier.eval()

    test_x, test_y = unpickle(test_batch_dir)
    test_x, test_y = torch.tensor(np.reshape(
        test_x, (len(test_x), 3, 32, 32))).to(
            'cuda', dtype=torch.float), torch.tensor(test_y).cuda()

    classes = [
        'Airplane', 'Automobile', 'Bird', 'Cat', 'Deer', 'Dog', 'Frog',
        'Horse', 'Ship', 'Truck'
    ]

    # calculating the accuracy of our classifier;
    print("Calculating accuracy...")
    correct = 0
    total = len(test_x)

    with torch.no_grad():
        out = classifier(test_x)
        _, predicted = torch.max(out, 1)

        # calculate the total accuracy
        correct += (predicted == test_y).sum().item()
        print('Accuracy: %5d %%' % (correct / total * 100))
Example #2
0
            total_loss += loss.data[0]
            total_acc += (y_pred == y_batch).data.sum() - (y_batch
                                                           == 0).data.sum()

            # Update progressbar
            widgets[0] = FormatLabel('{}/{}'.format(
                (i * batch_size + x_batch.size(0)), x_train.shape[0]))
            widgets[-1] = FormatLabel(
                'train_loss:{:.4f}, train_acc:{:.4f}'.format(
                    total_loss / (i + 1), total_acc / nonzeros))

            pbar.update(i * batch_size + x_batch.size(0))
        pbar.finish()

        model.eval()
        # Validation test
        total_loss, total_acc, total_ed, nonzeros = 0, 0, 0, 0
        for i, (x_batch, y_batch) in enumerate(valid_loader):
            x_batch = Variable(x_batch).cuda()
            y_batch = Variable(y_batch).cuda()

            output = model(x_batch)
            loss = loss_func(output.view(-1, output.size(-1)),
                             y_batch.view(-1))

            y_pred = torch.max(output, -1)[1]
            y_pred = y_pred.masked_fill_((y_batch == 0), 0)
            nonzeros += (y_batch != 0).data.sum()

            total_loss += loss.data[0]