Пример #1
0
def train(model, optimizer, train_loader, val_loader, epochs=10):
    global device
    train_loss = 0
    train_correct = 0
    for e in range(epochs):
        model.train()
        for batch_idx, (data, labels) in enumerate(train_loader):
            data, labels = data.to(device), labels.to(device)

            optimizer.zero_grad()
            output = model(data)

            loss = F.nll_loss(input=output, target=labels)
            loss.backward()
            optimizer.step()

            train_loss += loss.item()
            pred = output.max(
                dim=1,
                keepdim=True)[1]  # get the index of the max log-probability
            train_correct += pred.eq(labels.view_as(pred)).cpu().sum().item()

        train_loss /= len(train_loader.dataset)
        train_correct /= len(train_loader.dataset)
        if val_loader:
            val_loss, val_acc = test(model=model, loader=val_loader)
        else:
            val_loss, val_acc = None, None

        print(
            f'Epoch: {e + 1} [{(e + 1)}/{epochs}] Train Loss: {train_loss:.3f}, Val Loss: {val_loss:.3f}'
        )
        print(
            f'Epoch: {e + 1} [{(e + 1)}/{epochs}] Train ACC:  {train_correct:.3f},  Val ACC:  {val_acc:.3f}'
        )
Пример #2
0
def model_testing(model,
                  device,
                  test_dataloader,
                  test_acc,
                  test_losses,
                  misclassified=[]):

    model.load_state_dict(torch.load(model_dir))  # Loading the saved model
    model.eval()
    test_loss = 0
    correct = 0

    with torch.no_grad():

        for index, (data, target) in enumerate(test_dataloader):
            data, target = data.to(device), target.to(device)
            output = model(data)
            pred = output.argmax(dim=1, keepdim=True)

            for d, i, j in zip(data, pred, target):
                if i != j:
                    misclassified.append([d.cpu(), i[0].cpu(), j.cpu()])

            test_loss += F.nll_loss(output, target, reduction='sum').item()
            correct += pred.eq(target.view_as(pred)).sum().item()
    test_loss /= len(test_dataloader.dataset)
    test_losses.append(test_loss)

    print(
        '\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format(
            test_loss, correct, len(test_dataloader.dataset),
            100. * correct / len(test_dataloader.dataset)))

    test_acc.append(100. * correct / len(test_dataloader.dataset))
    return misclassified
Пример #3
0
    def validation_step(self, batch, batch_num):
        x, target = batch

        output = self.forward(x)
        loss = F.nll_loss(output, target)

        logger = {'val_loss': loss}
        return {'val_loss': loss, 'log': logger}
Пример #4
0
    def training_step(self, batch, batch_num):
        x, target = batch

        output = self.forward(x)
        loss = F.nll_loss(output, target)

        logger = {'train_loss': loss}
        return {'loss': loss, 'log': logger}
Пример #5
0
    def training_step(self, batch: Tuple[Tensor, Tensor]) -> List[Parameter]:
        """Forward and backward pass"""
        features, target = batch
        features, target = features.to(config.device), target.to(config.device)
        self.optimizer.zero_grad()

        pred = self.model(features)

        loss: Tensor = F.nll_loss(pred, target)

        loss.backward()
        self.optimizer.step()
Пример #6
0
def model_testing(model,
                  device,
                  test_dataloader,
                  test_acc,
                  test_losses,
                  misclassified=[]):

    # model.load_state_dict(torch.load(model_dir)) # Loading the saved model
    model.eval()
    test_loss = 0
    correct = 0
    class_correct = list(0. for i in range(10))
    class_total = list(0. for i in range(10))
    # label = 0
    classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse',
               'ship', 'truck')

    with torch.no_grad():

        for index, (data, target) in enumerate(test_dataloader):
            data, target = data.to(device), target.to(device)
            output = model(data)
            pred = output.argmax(dim=1, keepdim=True)
            # c = (pred == target).squeeze()
            # print(c.shape, c[0])

            for d, i, j in zip(data, pred, target):
                if i != j:
                    misclassified.append([d.cpu(), i[0].cpu(), j.cpu()])

            # for i in range(4):
            #     label = target[i]
            #     print(label)
            #     class_correct[label] += c[i].item()
            #     class_total[label] += 1

        # for i in range(10):
        #     print('Accuracy of %5s : %2d %%' % (
        #     classes[i], 100 * class_correct[i] / class_total[i]))

            test_loss += F.nll_loss(output, target, reduction='sum').item()
            correct += pred.eq(target.view_as(pred)).sum().item()
    test_loss /= len(test_dataloader.dataset)
    test_losses.append(test_loss)

    print(
        '\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format(
            test_loss, correct, len(test_dataloader.dataset),
            100. * correct / len(test_dataloader.dataset)))

    test_acc.append(100. * correct / len(test_dataloader.dataset))
    return misclassified
Пример #7
0
def model_training(model,
                   device,
                   train_dataloader,
                   optimizer,
                   train_acc,
                   train_losses,
                   l1_loss=False):

    model.train()
    pbar = tqdm(train_dataloader)
    correct = 0
    processed = 0
    running_loss = 0.0

    for batch_idx, (data, target) in enumerate(pbar):
        data, target = data.to(device), target.to(device)
        optimizer.zero_grad()
        y_pred = model(data)
        loss = F.nll_loss(y_pred, target)

        # IF L1 Loss
        if l1_loss:
            lambda_l1 = 0.0001
            l1 = 0
            for p in model.parameters():
                l1 = l1 + p.abs().sum()
                loss = loss + lambda_l1 * l1

        train_losses.append(loss)
        loss.backward()
        optimizer.step()

        pred = y_pred.argmax(dim=1, keepdim=True)
        correct += pred.eq(target.view_as(pred)).sum().item()
        processed += len(data)
        # print statistics
        running_loss += loss.item()
        # if batch_idx % 500 == 499:    # print every 2000 mini-batches
        #     # print('[%d, %5d] loss: %.3f' %
        #     #       (epoch + 1, i + 1, running_loss / 2000))
        pbar.set_description(
            desc=
            f'Loss={loss.item()} Batch_id={batch_idx} Accuracy={100*correct/processed:0.2f}'
        )
        # running_loss = 0.0
        train_acc.append(100 * correct / processed)
Пример #8
0
def test(model, loader):
    global device
    model.eval()
    loss = 0
    correct = 0
    with torch.no_grad():
        for data, target in loader:
            data, target = data.to(device), target.to(device)

            output = model(data)
            loss += F.nll_loss(output, target,
                               reduction='sum').item()  # sum up batch loss
            pred = output.max(
                dim=1,
                keepdim=True)[1]  # get the index of the max log-probability
            correct += pred.eq(target.view_as(pred)).cpu().sum().item()

    loss /= len(loader.dataset)
    return loss, correct / len(loader.dataset)
Пример #9
0
def model_training(model,
                   device,
                   train_dataloader,
                   optimizer,
                   train_acc,
                   train_losses,
                   l1_loss=False):

    model.train()
    pbar = tqdm(train_dataloader)
    correct = 0
    processed = 0

    for batch_idx, (data, target) in enumerate(pbar):
        data, target = data.to(device), target.to(device)
        optimizer.zero_grad()
        y_pred = model(data)
        loss = F.nll_loss(y_pred, target)

        # IF L1 Loss
        if l1_loss:
            lambda_l1 = 0.0001
            l1 = 0
            for p in model.parameters():
                l1 = l1 + p.abs().sum()
                loss = loss + lambda_l1 * l1

        train_losses.append(loss)
        loss.backward()
        optimizer.step()

        pred = y_pred.argmax(dim=1, keepdim=True)
        correct += pred.eq(target.view_as(pred)).sum().item()
        processed += len(data)

        pbar.set_description(
            desc=
            f'Loss={loss.item()} Batch_id={batch_idx} Accuracy={100*correct/processed:0.2f}'
        )
        train_acc.append(100 * correct / processed)
        torch.save(model.state_dict(), model_dir)
Пример #10
0
def test(network, testloader, writer, epoch, i):
    network.eval()
    test_loss = 0
    correct = 0
    test_losses = []
    with torch.no_grad():
        for data, target in testloader:
            data = data.to(network.device).cuda(network.device)
            target = target.to(network.device).cuda(network.device)
            output = network(data)
            test_loss += F.nll_loss(output, target, size_average=False).item()
            pred = output.data.max(1, keepdim=True)[1]
            correct += pred.eq(target.data.view_as(pred)).sum()

        test_loss /= len(testloader.dataset)
        writer.add_scalar('Test loss', test_loss, epoch * len(testloader) + i)
        test_losses.append(test_loss)
        print('\nTest set: Avg. loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.
              format(test_loss, correct, len(testloader.dataset),
                     100. * correct / len(testloader.dataset)))
    return