Exemplo n.º 1
0
def test(model, device, test_loader, criterion, L1_loss_enable=False):
    model.eval()
    test_loss = 0
    correct = 0
    with torch.no_grad():
        for data, target in test_loader:
            data, target = data.to(device), target.to(device)
            output = model(data)
            #test_loss += F.nll_loss(output, target, reduction='sum').item()  # sum up batch loss
            test_loss += criterion(output, target).item()  # sum up batch loss

            pred = output.argmax(
                dim=1,
                keepdim=True)  # get the index of the max log-probability
            correct += pred.eq(target.view_as(pred)).sum().item()

        #test_loss /= len(test_loader.dataset) # For F.nll_loss
        test_loss /= len(test_loader)  # criterion = nn.CrossEntropyLoss()

        if (L1_loss_enable == True):
            regloss = regularization.L1_Loss_calc(model, 0.0005)
            regloss /= len(
                test_loader.dataset
            )  # by batch size which is here total test dataset size
            test_loss += regloss

    print(
        '\nTest set: Average loss: {:.6f}, Accuracy: {}/{} ({:.2f}%)\n'.format(
            test_loss, correct, len(test_loader.dataset),
            100. * correct / len(test_loader.dataset)))

    acc = 100. * correct / len(test_loader.dataset)
    return np.round(acc, 2), test_loss
Exemplo n.º 2
0
def train(model,
          device,
          train_loader,
          optimizer,
          scheduler,
          criterion,
          L1_loss_enable=False):
    model.train()
    pbar = tqdm(train_loader)
    train_loss = 0
    correct = 0
    processed = 0
    for batch_idx, (data, target) in enumerate(pbar):
        # get samples
        data, target = data.to(device), target.to(device)

        # Init
        optimizer.zero_grad()
        y_pred = model(data)
        loss = criterion(y_pred, target)

        if (L1_loss_enable == True):
            regloss = regularization.L1_Loss_calc(model, 0.0005)
            regloss /= len(data)  # by batch size
            loss += regloss

        train_loss += loss.item()

        # Backpropagation
        loss.backward()
        optimizer.step()
        scheduler.step()

        # Update pbar-tqdm
        pred = y_pred.argmax(
            dim=1, keepdim=True)  # get the index of the max log-probability
        correct += pred.eq(target.view_as(pred)).sum().item()
        processed += len(data)

        pbar.set_description(
            desc=
            f'Loss={train_loss / (batch_idx + 1):0.6f} Batch_id={batch_idx} Accuracy={100 * correct / processed:0.2f}'
        )

    train_loss /= len(train_loader)
    acc = 100. * correct / len(train_loader.dataset)  # processed #
    return np.round(acc, 2), np.round(train_loss, 6)