コード例 #1
0
class Dataset(metaclass=abc.ABCMeta):
  """The abstract class representing a dataset."""

  @abc.abstractproperty
  def info(self) -> DatasetInfo:
    """The properties of the dataset."""

  @abc.abstractmethod
  def load(self,
           preprocess_fn: Optional[Callable[[types.Features], types.Features]]
           ) -> tf.data.Dataset:
    """Loads the dataset.

    Note: The provided `preprocess_fn` gets *always* run in graph-mode.

    Args:
      preprocess_fn: The function used to preprocess the dataset before
        batching. Set to `None` for the per-dataset default.

    Returns:
      The pre-processed and batched dataset. Each element passed to
      `preprocess_fn` is a dictionary with the following fields:
         * "element_id": A unique integer assinged to each example.
         * "image": A (H, W, C)-tensor of type tf.uint8 holding the image.
         * "label": An int64 with the label.
         * "metadata": Additional data about an instance.
    """

registry = registry.Registry(Dataset)
コード例 #2
0
for 3D images (height x width x channels). The functors output dictionary with
field "image" being modified. Potentially, other fields can also be modified
or added.
"""
import abc
import collections
from robustness_metrics.common import registry
import tensorflow as tf2
import tensorflow.compat.v1 as tf


class PreprocessingOp(metaclass=abc.ABCMeta):
    """The abstract class representing a preprocessing operation."""


registry = registry.Registry(PreprocessingOp)
get = registry.get


def tf_apply_to_image_or_images(fn, image_or_images, **map_kw):
    """Applies a function to a single image or each image in a batch of them.

  Args:
    fn: the function to apply, receives an image, returns an image.
    image_or_images: Either a single image, or a batch of images.
    **map_kw: Arguments passed through to tf.map_fn if called.

  Returns:
    The result of applying the function to the image or batch of images.

  Raises:
コード例 #3
0
      dataset_name: The identifier of the dataset on which the metric was
        computed.
      metric_name: The name of the metric.
      metric_results: The result from the metric computation.
    """

    @abc.abstractmethod
    def result(self) -> Dict[Text, float]:
        """Computes the results from the given measurements.

    Returns:
      A dictionary mapping the name of each score to its value.
    """


registry = registry.Registry(Report)
get = registry.get


class UnionReport(Report):
    """Concatenates the required measurements in a single dictionary.

  Specifically, if the metric `metric_name` computed a value of `value` with
  key `key` on dataset `dataset_name`, the report will report a value of
  `value` under the name `dataset_name/metric_name/key`.
  """
    def __init__(self):
        self._metric_measurements = collections.defaultdict()
        self._metrics_seen = set()

    def add_measurement(self, dataset_name: Text, metric_name: Text,
コード例 #4
0
      **metadata: The batch metadata, possibly including `labels` which is the
        batch of labels, one for each example in the batch.
    """
        raise NotImplementedError(
            "Batched predictions not implemented for this metric.")

    @abc.abstractmethod
    def result(self) -> Dict[Text, float]:
        """Computes the results from all the predictions it has seen so far.

    Returns:
      A dictionary mapping the name of each computed metric to its value.
    """


registry = registry.Registry(Metric)
get = registry.get


def _map_labelset(predictions, label, appearing_classes):
    assert len(predictions.shape) == 2
    if appearing_classes:
        predictions = predictions[:, appearing_classes]
        predictions /= np.sum(predictions, axis=-1, keepdims=True)
        label = appearing_classes.index(label)
    return predictions, label


class FullBatchMetric(Metric):
    """Base class for metrics that operate on the full dataset (not streaming)."""
    def __init__(self, dataset_info=None, use_dataset_labelset=False):