Пример #1
0
    def run(self, epochs):
        """
        Run the recipe for :code:`epochs` epochs.

        Args:
            epochs (int): number of epochs

        Returns:
            The state
        """
        self.to(self.device)
        for epoch in range(epochs):
            self.callbacks('on_epoch_start')
            for batch in self.loader:
                self.callbacks.update_state({'batch': batch})
                batch = tu.send_to_device(batch,
                                          self.device,
                                          non_blocking=True)
                self.callbacks.update_state({'batch_gpu': batch})

                self.callbacks('on_batch_start')
                out = self.call_fun(batch)
                out = tu.send_to_device(out, 'cpu', non_blocking=False)
                self.callbacks.update_state(out)
                self.callbacks('on_batch_end')
            self.callbacks('on_epoch_end')
        return self.callbacks.state
Пример #2
0
    def __call__(self, train_loader, test_loader, epochs=5):
        """
        Runs the recipe.

        Args:
            train_loader (DataLoader): Training set dataloader
            test_loader (DataLoader): Testing set dataloader
            epochs (int): number of epochs

        Returns:
            trained model, test metrics
        """
        self.state['iters'] = 0
        for epoch in range(epochs):
            self.state['epoch'] = epoch
            self.train_callbacks('on_epoch_start', self.state)
            for i, batch in enumerate(train_loader):
                self.state['epoch_batch'] = i
                self.state['batch'] = batch
                batch = tu.send_to_device(batch,
                                          self.device,
                                          non_blocking=True)
                self.state['batch_gpu'] = batch

                self.train_callbacks('on_batch_start', self.state)

                out = self.model.train_step(batch, self.opt)
                out = tu.send_to_device(out, 'cpu', non_blocking=True)
                self.state.update(out)

                self.train_callbacks('on_batch_end', self.state)

                if self.state['iters'] % self.test_every == 0:
                    self.model.eval()
                    metrics = self.evaluate(test_loader)
                    self.state['test_metrics'] = metrics
                    self.model.train()
                self.state['iters'] += 1
            self.train_callbacks('on_epoch_end', self.state)

        self.model.eval()
        return self.model, self.state['test_metrics']
Пример #3
0
    def evaluate(self, test_loader):
        with torch.no_grad():
            self.test_state = self.state
            self.test_state['metrics'] = {}
            self.test_callbacks('on_epoch_start', self.test_state)
            for batch in test_loader:
                self.test_state['batch'] = batch
                batch = tu.send_to_device(batch,
                                          self.device,
                                          non_blocking=True)
                self.test_state['batch_gpu'] = batch

                self.test_callbacks('on_batch_start', self.test_state)
                out = self.model.validation_step(batch)
                out = tu.send_to_device(out, 'cpu', non_blocking=True)
                self.test_state.update(out)
                self.test_callbacks('on_batch_end', self.test_state)

            self.test_callbacks('on_epoch_end', self.test_state)
            return copy.deepcopy(self.test_state['metrics'])
Пример #4
0
    def after_train(self):
        with torch.no_grad():
            self.test_state = self.state
            self.test_state['metrics'] = {}
            self.test_callbacks('on_epoch_start', self.test_state)

            out = self.model.after_train()
            out = tu.send_to_device(out, 'cpu', non_blocking=True)
            self.test_state.update(out)

            self.test_callbacks('on_epoch_end', self.test_state)
            return copy.deepcopy(self.test_state['metrics'])
Пример #5
0
    def __getitem__(self, i):
        """
        Returns:
            The ith element of the underlying dataset or its cached value if
            available
        """
        if self.cache[i] is None:
            self.cache[i] = tu.send_to_device(self.ds[i], self.device,
                    non_blocking=True)

        x, *y = self.cache[i]

        if self.transform is not None:
            x = self.transform(x)

        return [x] + y