Esempio n. 1
0
def test_tfds_to_numpy():
    x = np.random.randn(32, 128).astype(np.float32)
    y = np.random.randint(0, 9, (32, )).astype(np.int64)

    ds = tf.data.Dataset.from_tensor_slices((x, y))
    loader = modules.tfds_to_numpy(ds)
    assert hasattr(loader, "__iter__")
Esempio n. 2
0
    def train_cycle(self, epoch: int, loader: DataLoader) -> None:
        """
        This is a training loop for neural net.

        Args:
            epoch
            loader (torch.utils.data.DataLoader):

        """
        results = list()
        loader_size = len(loader)

        if is_tfds(loader):
            loader = tfds_to_numpy(loader)

        self.model.train()
        with self.experiment.train():
            for step, batch in enumerate(loader):
                self.optimizer.zero_grad()
                batch = send(batch, self.device, self.non_blocking)
                # on_step_start()
                self.global_step += 1
                outputs = self.train_step(batch)
                self.backward(outputs["loss"])
                self.update_optimizer()

                if hasattr(self.pbar, "set_postfix"):
                    per = "{:1.0%}".format(step / loader_size)
                    self.pbar.set_postfix(OrderedDict(train_batch=per),
                                          refresh=True)  # type: ignore

                outputs = {
                    key: outputs[key].detach().cpu() if isinstance(
                        outputs[key], Tensor) else outputs[key]
                    for key in outputs.keys()
                }
                tmp = {
                    k: outputs[k]
                    for k in outputs.keys() if is_scalar(outputs[k])
                }
                self.experiment.log_metrics(tmp,
                                            step=self.global_step,
                                            epoch=epoch)
                results.append(outputs)
                self.manager.on_train_step_end(outputs)

            dic = self.train_end(results)  # pylint: disable=E1111

            if len(dic) != 0:
                self.metrics["train"].update(dic)
                self.experiment.log_metrics(dic, step=epoch, epoch=epoch)
Esempio n. 3
0
    def val_cycle(self, epoch: int, loader: DataLoader) -> None:
        """
        This is a validating loop for neural net.

        Args:
            epoch
            loader:

        Returns:

        """
        results = list()
        loader_size = len(loader)

        if is_tfds(loader):
            loader = tfds_to_numpy(loader)

        self.model.eval()
        with self.experiment.validate(), torch.no_grad():
            for step, batch in enumerate(loader):
                batch = send(batch, self.device, self.non_blocking)
                self.global_step += 1
                # on_step_start()
                outputs = self.val_step(batch)  # pylint: disable=E1111

                if hasattr(self.pbar, "set_postfix"):
                    per = "{:1.0%}".format(step / loader_size)
                    self.pbar.set_postfix(OrderedDict(val_batch=per),
                                          refresh=True)  # type: ignore

                outputs = {
                    key: outputs[key].cpu() if isinstance(
                        outputs[key], Tensor) else outputs[key]
                    for key in outputs.keys()
                }
                tmp = {
                    k: outputs[k]
                    for k in outputs.keys() if is_scalar(outputs[k])
                }
                self.experiment.log_metrics(tmp,
                                            step=self.global_step,
                                            epoch=epoch)
                results.append(outputs)
                self.manager.on_validation_step_end(outputs)

            dic = self.val_end(results)  # pylint: disable=E1111

            if len(dic) != 0:
                self.metrics["val"].update(dic)
                self.experiment.log_metrics(dic, step=epoch, epoch=epoch)
Esempio n. 4
0
    def _generate_train_features(self) -> Tuple[torch.Tensor, torch.Tensor]:
        features = []
        targets = []

        loader = self.loaders["train"]
        loader = modules.tfds_to_numpy(loader) if modules.is_tfds(
            loader) else loader

        self.model.eval()
        with torch.no_grad(), amp.autocast(
                enabled=isinstance(self.scaler, amp.GradScaler)):
            for batch in loader:
                x, y = batch
                features.append(self.model(x.to(self.device)))
                targets.append(y)

        return torch.cat(features).cpu().numpy(), torch.cat(
            targets).cpu().numpy()