Ejemplo n.º 1
0
def test_epoch(model, dataloader, device, criterion, n_predictions):

    metrics = dict(mae=ignite.metrics.MeanAbsoluteError(),
                   mse=ignite.metrics.MeanSquaredError(),
                   rmse=ignite.metrics.RootMeanSquaredError())

    losses = list()

    with torch.no_grad():
        iterator = tqdm(enumerate(dataloader), total=len(dataloader))
        for idx, batch in iterator:
            x_data, y_true = batch

            x_data = x_data.to(device)
            y_true = y_true.to(device)

            if y_true.shape[2] == 2:
                doy = x_data[:, :, 1]
                y_true = y_true[:, :, 0].unsqueeze(2)
            else:
                doy = None

            # make single forward pass to get test loss
            y_pred, log_variances = model(x_data, date=doy)
            loss = criterion(y_pred, y_true, log_variances)
            losses.append(loss.cpu())

            # make multiple MC dropout inferences for further metrics
            y_pred, epi_var, ale_var = model.predict(x_data,
                                                     n_predictions,
                                                     date=doy)
            for name, metric in metrics.items():
                metric.update((y_pred.view(-1), y_true.view(-1)))

    return metrics, torch.stack(losses).mean()
Ejemplo n.º 2
0
    def log_metrics(engine):
        evaluator.run(dataloader)
        metrics = evaluator.state.metrics
        message = ''
        for metric_name, metric_value in metrics.items():
            message += f'{metric_name}: {metric_value} '

        logger.info(message)
Ejemplo n.º 3
0
    def log_metrics(engine):
        y_pred, y = engine.state.output

        metrics = engine.state.metrics
        message = ''
        for metric_name, metric_value in metrics.items():
            message += f'{metric_name}: {metric_value} '

        logger.info(message)
Ejemplo n.º 4
0
        def _to_message(metrics):
            message = ''

            for metric_name, metric_value in metrics.items():
                if isinstance(metric_value, dict):
                    message += _to_message(metric_value)
                else:
                    writer.add_scalar(f'{data_name}/mean_{metric_name}', metric_value, engine.state.epoch)
                    message += f'{metric_name}: {metric_value:.3f} '

            return message
Ejemplo n.º 5
0
    def __call__(self, engine: Engine, logger: TensorboardLogger, event_name: str):
        if not isinstance(logger, TensorboardLogger):
            raise RuntimeError(f'Error: Handler `{type(self).__name__}` works only with `TensorboardLogger` logger')

        metrics = self._setup_output_metrics(engine)

        prefixed_metrics = dict()
        for key, value in metrics.items():
            if isinstance(value, numbers.Number) or isinstance(value, torch.Tensor) and value.ndimension() == 0:
                prefixed_metrics['{self.tag}/{key}'] = value
            elif isinstance(value, torch.Tensor) and value.ndimension() == 1:
                for i, v in enumerate(value):
                    prefixed_metrics[f'{self.tag}/{key}/{i}'] = v.item()
            else:
                warnings.warn(f'Warning: TensorboardLogger {type(self).__name__} can not log metrics value type {type(value)}')
        logger.writer.add_hparams(self.hp, metric_dict={f'hpparam/{m}': v for m, v in metrics}})
Ejemplo n.º 6
0
def create_mask_rcnn_evaluator(model: nn.Module, metrics, device=None, non_blocking: bool = False):
    if device:
        model.to(device)

    fn_prepare_batch = lambda batch: engine._prepare_batch(batch, device=device, non_blocking=non_blocking)

    def _update(engine, batch):
        # warning(will.brennan) - not putting model in eval mode because we want the losses!
        with torch.no_grad():
            image, targets = fn_prepare_batch(batch)
            losses = model(image, targets)

            losses = {k: v.item() for k, v in losses.items()}
            losses['loss'] = sum(losses.values())

        # note(will.brennan) - an ugly hack for metrics...
        return (losses, len(image))

    evaluator = engine.Engine(_update)

    for name, metric in metrics.items():
        metric.attach(evaluator, name)

    return evaluator
Ejemplo n.º 7
0
 def _metrics(prefix): return {**{f'{prefix}_{n}': m for n, m in metrics.items()},
                               **{f'{prefix}_{n}': loss for n, loss in losses.items()}}
Ejemplo n.º 8
0
    def __init__(
        self,
        device: torch.device,
        max_epochs: int,
        amp: bool,
        data_loader: DataLoader,
        prepare_batch: Callable = default_prepare_batch,
        iteration_update: Optional[Callable] = None,
        post_transform: Optional[Callable] = None,
        key_metric: Optional["ignite.metrics.Metric"] = None,
        additional_metrics=None,
        handlers=None,
    ) -> None:
        # pytype: disable=invalid-directive
        # pytype: disable=wrong-arg-count
        super().__init__(iteration_update
                         if iteration_update is not None else self._iteration)
        # pytype: enable=invalid-directive
        # pytype: enable=wrong-arg-count
        # FIXME:
        if amp:
            self.logger.info(
                "Will add AMP support when PyTorch v1.6 released.")
        if not isinstance(device, torch.device):
            raise ValueError("device must be PyTorch device object.")
        if not isinstance(data_loader, DataLoader):
            raise ValueError("data_loader must be PyTorch DataLoader.")

        # set all sharable data for the workflow based on Ignite engine.state
        self.state = State(
            seed=0,
            iteration=0,
            epoch=0,
            max_epochs=max_epochs,
            epoch_length=-1,
            output=None,
            batch=None,
            metrics={},
            dataloader=None,
            device=device,
            amp=amp,
            key_metric_name=
            None,  # we can set many metrics, only use key_metric to compare and save the best model
            best_metric=-1,
            best_metric_epoch=-1,
        )
        self.data_loader = data_loader
        self.prepare_batch = prepare_batch

        if post_transform is not None:

            @self.on(Events.ITERATION_COMPLETED)
            def run_post_transform(engine: "ignite.engine.Engine"):
                assert post_transform is not None
                engine.state.output = apply_transform(post_transform,
                                                      engine.state.output)

        if key_metric is not None:

            if not isinstance(key_metric, dict):
                raise ValueError("key_metric must be a dict object.")
            self.state.key_metric_name = list(key_metric.keys())[0]
            metrics = key_metric
            if additional_metrics is not None and len(additional_metrics) > 0:
                if not isinstance(additional_metrics, dict):
                    raise ValueError(
                        "additional_metrics must be a dict object.")
                metrics.update(additional_metrics)
            for name, metric in metrics.items():
                metric.attach(self, name)

            @self.on(Events.EPOCH_COMPLETED)
            def _compare_metrics(engine: "ignite.engine.Engine"):
                if engine.state.key_metric_name is not None:
                    current_val_metric = engine.state.metrics[
                        engine.state.key_metric_name]
                    if current_val_metric > engine.state.best_metric:
                        self.logger.info(
                            f"Got new best metric of {engine.state.key_metric_name}: {current_val_metric}"
                        )
                        engine.state.best_metric = current_val_metric
                        engine.state.best_metric_epoch = engine.state.epoch

        if handlers is not None:
            handlers = ensure_tuple(handlers)
            for handler in handlers:
                handler.attach(self)
Ejemplo n.º 9
0
    def __init__(
        self,
        select_action: Callable[[Engine, Observation], Action],
        metrics: Optional[Dict[str, ignite.metrics.Metric]] = None,
        dtype: Optional[torch.dtype] = None,
        device: Optional[torch.device] = None,
    ) -> Engine:
        """Initialize an ignite engine to explore the environment.

        Parameters
        ----------
        select_action:
            A function used to select an action. Has access to the engine and
            the iteration number. The current observation is stored under
            `engine.state.observation`. Takes as input the ingine and the
            iteration number, returns the action passed to the environement,
            along with a dictionary (possibly empty) of other variable to
            remember.
        dtype:
            Type to cast observations in.
        device:
            Device to move observations to before passing it to the
            `select_action` function.

        """
        def _process_func(engine, episode_timestep):
            """Take action on each iteration."""
            self.state.episode_timestep = episode_timestep
            # Select action.
            action = select_action(engine, engine.state.observation_dev)

            # Make action.
            next_observation, reward, done, infos = engine.state.env.step(
                action)
            next_observation = next_observation.to(dtype=dtype)

            # We create the transition object and store it.
            required_fields = [
                name for name, attrib in attr.fields_dict(
                    engine.state.TransitionClass).items() if attrib.init
            ]
            engine.state.transition = engine.state.TransitionClass(
                observation=engine.state.observation,
                action=action,
                next_observation=next_observation,
                reward=reward,
                done=done,
                **getattr(engine.state, "extra_transition_members", {}),
                **{k: v
                   for k, v in infos.items() if k in required_fields},
            )
            # Cleaning to avoid exposing unecessary information
            if hasattr(engine.state, "extra_transition_members"):
                del engine.state.extra_transition_members

            # Store info for user
            engine.state.environment_info = infos

            # Save for next move
            # Observation on cpu (untouched)
            engine.state.observation = next_observation
            # observation on device fo passing to select_action
            engine.state.observation_dev = self._maybe_pin(
                next_observation, device)

            if done:  # Iteration events still fired.
                engine.terminate_epoch()

            return engine.state.transition, engine.state.environment_info

        super().__init__(_process_func)

        @self.on(Events.STARTED)
        def _store_TransitionClass(engine):
            engine.state.TransitionClass = Transition

        @self.on(Events.ITERATION_STARTED)
        def _move_to_device(engine):
            engine.state.observation_dev = engine.state.observation_dev.to(
                device=device, non_blocking=True)

        @self.on(Events.EPOCH_STARTED)
        def _init_episode(engine):
            obs = engine.state.env.reset().to(dtype=dtype)
            engine.state.observation = obs
            engine.state.observation_dev = obs.to(device=device,
                                                  non_blocking=True)

        if metrics is not None:
            for name, metric in metrics.items():
                metric.attach(self, name)