Exemplo n.º 1
0
        def __init__(self):
            super().__init__()
            self.layer = torch.nn.Linear(32, 1)

            for stage in ["train", "val", "test"]:
                acc = Accuracy()
                acc.reset = mock.Mock(side_effect=acc.reset)
                ap = AveragePrecision(num_classes=1, pos_label=1)
                ap.reset = mock.Mock(side_effect=ap.reset)
                self.add_module(f"acc_{stage}", acc)
                self.add_module(f"ap_{stage}", ap)
Exemplo n.º 2
0
class TestLoop(Loop):
    def __init__(self, lite, args, model, dataloader):
        super().__init__()
        self.lite = lite
        self.args = args
        self.model = model
        self.dataloader = dataloader
        self.dataloader_iter = None
        self.accuracy = Accuracy().to(lite.device)
        self.test_loss = 0

    @property
    def done(self) -> bool:
        return False

    def reset(self):
        self.dataloader_iter = enumerate(self.dataloader)
        self.test_loss = 0
        self.accuracy.reset()

    def advance(self) -> None:
        _, (data, target) = next(self.dataloader_iter)
        output = self.model(data)
        self.test_loss += F.nll_loss(output, target)
        self.accuracy(output, target)

        if self.args.dry_run:
            raise StopIteration

    def on_run_end(self):
        test_loss = self.lite.all_gather(self.test_loss).sum() / len(
            self.dataloader.dataset)

        if self.lite.is_global_zero:
            print(
                f"\nTest set: Average loss: {test_loss:.4f}, Accuracy: ({self.accuracy.compute():.0f}%)\n"
            )
Exemplo n.º 3
0
class Lite(LightningLite):
    """Lite is starting to look like a LightningModule."""
    def run(self, hparams):
        self.hparams = hparams
        seed_everything(hparams.seed)  # instead of torch.manual_seed(...)

        self.model = Net()
        [optimizer], [scheduler] = self.configure_optimizers()
        model, optimizer = self.setup(self.model, optimizer)

        if self.is_global_zero:
            # In multi-device training, this code will only run on the first process / GPU
            self.prepare_data()

        train_loader, test_loader = self.setup_dataloaders(
            self.train_dataloader(), self.train_dataloader())

        self.test_acc = Accuracy().to(self.device)

        # EPOCH LOOP
        for epoch in range(1, hparams.epochs + 1):

            # TRAINING LOOP
            self.model.train()
            for batch_idx, batch in enumerate(train_loader):
                optimizer.zero_grad()
                loss = self.training_step(batch, batch_idx)
                self.backward(loss)
                optimizer.step()

                if (batch_idx == 0) or ((batch_idx + 1) % hparams.log_interval
                                        == 0):
                    print("Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}".
                          format(
                              epoch,
                              (batch_idx + 1) * self.hparams.batch_size,
                              len(train_loader.dataset),
                              100.0 * batch_idx / len(train_loader),
                              loss.item(),
                          ))
                    if hparams.dry_run:
                        break

            scheduler.step()

            # TESTING LOOP
            self.model.eval()
            test_loss = 0
            with torch.no_grad():
                for batch_idx, batch in enumerate(test_loader):
                    test_loss += self.test_step(batch, batch_idx)
                    if hparams.dry_run:
                        break

            test_loss = self.all_gather(test_loss).sum() / len(
                test_loader.dataset)

            print(
                f"\nTest set: Average loss: {test_loss:.4f}, Accuracy: ({self.test_acc.compute():.0f}%)\n"
            )
            self.test_acc.reset()

            if hparams.dry_run:
                break

        if hparams.save_model:
            self.save(model.state_dict(), "mnist_cnn.pt")

    # Methods for the `LightningModule` conversion

    def forward(self, x):
        return self.model(x)

    def training_step(self, batch, batch_idx):
        """Here you compute and return the training loss and compute extra training metrics."""
        x, y = batch
        logits = self.forward(x)
        loss = F.nll_loss(logits, y.long())
        return loss

    def test_step(self, batch, batch_idx):
        """Here you compute and return the testing loss and compute extra testing metrics."""
        x, y = batch
        logits = self.forward(x)
        loss = F.nll_loss(logits, y.long())
        self.test_acc(logits, y.long())
        return loss

    def configure_optimizers(self):
        optimizer = optim.Adadelta(self.model.parameters(), lr=self.hparams.lr)
        return [optimizer
                ], [StepLR(optimizer, step_size=1, gamma=self.hparams.gamma)]

    # Methods for the `LightningDataModule` conversion

    @property
    def transform(self):
        return T.Compose([T.ToTensor(), T.Normalize((0.1307, ), (0.3081, ))])

    def prepare_data(self) -> None:
        MNIST("./data", download=True)

    def train_dataloader(self):
        train_dataset = MNIST("./data",
                              train=True,
                              download=False,
                              transform=self.transform)
        return torch.utils.data.DataLoader(train_dataset,
                                           batch_size=self.hparams.batch_size)

    def test_dataloader(self):
        test_dataset = MNIST("./data",
                             train=False,
                             download=False,
                             transform=self.transform)
        return torch.utils.data.DataLoader(test_dataset,
                                           batch_size=self.hparams.batch_size)
 def _create_metrics(self):
     acc = Accuracy()
     acc.reset = mock.Mock(side_effect=acc.reset)
     ap = AveragePrecision(num_classes=1, pos_label=1)
     ap.reset = mock.Mock(side_effect=ap.reset)
     return acc, ap