示例#1
0
def test(model, device, test_loader, criterion, L1_loss_enable=False):
    model.eval()
    test_loss = 0
    correct = 0
    with torch.no_grad():
        for data, target in test_loader:
            data, target = data.to(device), target.to(device)
            output = model(data)

            #test_loss += criterion(output, target, reduction='sum').item()   # sum up batch loss # criterion = F.nll_loss
            test_loss += criterion(output, target).item(
            )  # sum up batch loss # criterion = nn.CrossEntropyLoss()

            pred = output.argmax(
                dim=1,
                keepdim=True)  # get the index of the max log-probability
            correct += pred.eq(target.view_as(pred)).sum().item()

        #test_loss /= len(test_loader.dataset)  # criterion = F.nll_loss
        test_loss /= len(test_loader)  # criterion = nn.CrossEntropyLoss()

        if (L1_loss_enable == True):
            regloss = regularization.L1_Loss_calc(model, 0.0005)
            regloss /= len(
                test_loader.dataset
            )  # by batch size which is here total test dataset size
            test_loss += regloss

    print(
        '\nTest set: Average loss: {:.6f}, Accuracy: {}/{} ({:.2f}%)\n'.format(
            test_loss, correct, len(test_loader.dataset),
            100. * correct / len(test_loader.dataset)))

    acc = 100. * correct / len(test_loader.dataset)
    return np.round(acc, 2), test_loss
示例#2
0
def train(model,
          device,
          train_loader,
          criterion,
          optimizer,
          lr_scheduler,
          L1_loss_enable=False):
    model.train()
    pbar = tqdm(train_loader)
    train_loss = 0
    correct = 0
    processed = 0
    for batch_idx, (data, target) in enumerate(pbar):
        # get samples
        data, target = data.to(device), target.to(device)

        # Init
        optimizer.zero_grad()
        # In PyTorch, we need to set the gradients to zero before starting to do backpropragation because PyTorch accumulates the gradients on subsequent backward passes.
        # Because of this, when you start your training loop, ideally you should zero out the gradients so that you do the parameter update correctly.

        # Predict
        y_pred = model(data)

        # Calculate loss
        loss = criterion(y_pred, target)

        if (L1_loss_enable == True):
            regloss = regularization.L1_Loss_calc(model, 0.0005)
            regloss /= len(data)  # by batch size
            loss += regloss

        train_loss += loss.item()

        # Backpropagation
        loss.backward()
        optimizer.step()

        # Update pbar-tqdm

        pred = y_pred.argmax(
            dim=1, keepdim=True)  # get the index of the max log-probability
        correct += pred.eq(target.view_as(pred)).sum().item()
        processed += len(data)

        #pbar.set_description(desc= f'Loss={loss.item():0.6f} Batch_id={batch_idx} Accuracy={100*correct/processed:0.2f}')
        pbar.set_description(
            desc=
            f'Loss={train_loss/(batch_idx+1):0.6f} Batch_id={batch_idx+1} Accuracy={100*correct/processed:0.2f}'
        )

    train_loss /= len(train_loader)
    acc = 100. * correct / len(train_loader.dataset)  #processed #
    return np.round(acc, 2), np.round(train_loss, 6)
示例#3
0
def train(model,
          device,
          train_loader,
          optimizer,
          criterion,
          L1_loss_enable=False):
    model.train()
    pbar = tqdm(train_loader)
    train_loss = 0
    correct = 0
    processed = 0
    for batch_idx, (data, target) in enumerate(pbar):
        # get samples
        data, target = data.to(device), target.to(device)

        # Init
        optimizer.zero_grad()
        y_pred = model(data)
        loss = criterion(y_pred, target)

        if (L1_loss_enable == True):
            regloss = regularization.L1_Loss_calc(model, 0.0005)
            regloss /= len(data)  # by batch size
            loss += regloss

        train_loss += loss.item()

        # Backpropagation
        loss.backward()
        optimizer.step()

        # Update pbar-tqdm
        pred = y_pred.argmax(
            dim=1, keepdim=True)  # get the index of the max log-probability
        correct += pred.eq(target.view_as(pred)).sum().item()
        processed += len(data)

        pbar.set_description(
            desc=
            f'Loss={train_loss/(batch_idx+1):0.6f} Batch_id={batch_idx} Accuracy={100*correct/processed:0.2f}'
        )

    train_loss /= len(train_loader)
    acc = 100. * correct / len(train_loader.dataset)  #processed #
    return np.round(acc, 2), np.round(train_loss, 6)