Beispiel #1
0
 def _init_summary(self):
     self._summary = reporter.DictSummary()
Beispiel #2
0
    def evaluate(self):
        """Evaluates the model and returns a result dictionary.

        This method runs the evaluation loop over the validation dataset. It
        accumulates the reported values to :class:`~pytorch_trainer.DictSummary` and
        returns a dictionary whose values are means computed by the summary.

        Note that this function assumes that the main iterator raises
        ``StopIteration`` or code in the evaluation loop raises an exception.
        So, if this assumption is not held, the function could be caught in
        an infinite loop.

        Users can override this method to customize the evaluation routine.

        .. note::

            This method encloses :attr:`eval_func` calls with
            :func:`function.no_backprop_mode` context, so all calculations
            using :class:`~pytorch_trainer.FunctionNode`\\s inside
            :attr:`eval_func` do not make computational graphs. It is for
            reducing the memory consumption.

        Returns:
            dict: Result dictionary. This dictionary is further reported via
            :func:`~pytorch_trainer.report` without specifying any observer.

        """
        iterator = self._iterators['main']
        eval_func = self.eval_func or self._targets['main']

        for target in self._targets.values():
            target.eval()

        if self.eval_hook:
            self.eval_hook(self)

        if hasattr(iterator, 'reset'):
            iterator.reset()
            it = iterator
        else:
            warnings.warn(
                'This iterator does not have the reset method. Evaluator '
                'copies the iterator instead of resetting. This behavior is '
                'deprecated. Please implement the reset method.',
                DeprecationWarning)
            it = copy.copy(iterator)

        summary = reporter_module.DictSummary()

        if self._progress_bar:
            pbar = _IteratorProgressBar(iterator=it)

        for batch in it:
            observation = {}
            with reporter_module.report_scope(observation):
                in_arrays = convert._call_converter(self.converter, batch,
                                                    self.device)
                if isinstance(in_arrays, tuple):
                    eval_func(*in_arrays)
                elif isinstance(in_arrays, dict):
                    eval_func(**in_arrays)
                else:
                    eval_func(in_arrays)

            summary.add(observation)

            if self._progress_bar:
                pbar.update()

        if self._progress_bar:
            pbar.close()

        return summary.compute_mean()
    def evaluate(self, trainer):
        """Evaluates the model and returns a result dictionary.

        This method runs the evaluation loop over the validation dataset. It
        accumulates the reported values to :class:`~chainer.DictSummary` and
        returns a dictionary whose values are means computed by the summary.

        Note that this function assumes that the main iterator raises
        ``StopIteration`` or code in the evaluation loop raises an exception.
        So, if this assumption is not held, the function could be caught in
        an infinite loop.

        Users can override this method to customize the evaluation routine.

        .. note::

            This method encloses :attr:`eval_func` calls with
            :func:`function.no_backprop_mode` context, so all calculations
            using :class:`~chainer.FunctionNode`\\s inside
            :attr:`eval_func` do not make computational graphs. It is for
            reducing the memory consumption.

        Returns:
            dict: Result dictionary. This dictionary is further reported via
            :func:`~chainer.report` without specifying any observer.

        """
        iterator = self._iterators['main']
        eval_func = self.eval_func or self._targets['main']

        for target in self._targets.values():
            target.eval()

        if self.eval_hook:
            self.eval_hook(self)

        if hasattr(iterator, 'reset'):
            iterator.reset()
            it = iterator
        else:
            it = copy.copy(iterator)

        summary = reporter_module.DictSummary()

        self.visualizer.reset()

        desc = 'valid (iter=%08d)' % trainer.updater.iteration
        total = len(it.dataset) // it.batch_size

        for batch in tqdm.tqdm(it, total=total, desc=desc, ncols=80, leave=False):
            observation = {}
            with reporter_module.report_scope(observation):
                in_arrays = self.converter(batch, self.device)

                with torch.no_grad():
                    if isinstance(in_arrays, tuple):
                        eval_func(*in_arrays)
                    elif isinstance(in_arrays, dict):
                        eval_func(**in_arrays)
                    else:
                        eval_func(in_arrays)

            if self.visualizer.n_examples < self.n_vis:
                if hasattr(eval_func, 'x') \
                        and hasattr(eval_func, 'y') \
                        and hasattr(eval_func, 't'):

                    self.visualizer.add_batch(eval_func.x,
                                              eval_func.y,
                                              eval_func.t)
                else:
                    warnings.warn('`eval_func` should have attributes'
                                  '`x`, `y` and `t` for visualization..')

            summary.add(observation)

        # save
        filename = self.filename
        if callable(filename):
            filename = filename(trainer)
        else:
            filename = filename.format(trainer)

        out = os.path.join(trainer.out, filename)
        self.visualizer.save(out)

        return summary.compute_mean()