Example #1
0
def _get_and_validate_dtype(dtype, encoding_format):
  """Update the dtype."""
  dtype = tf.as_dtype(dtype)
  acceptable_dtypes = ACCEPTABLE_DTYPES.get(encoding_format)
  if acceptable_dtypes and dtype not in acceptable_dtypes:
    raise ValueError(
        f'Acceptable `dtype` for {encoding_format}: '
        f'{acceptable_dtypes} (was {dtype})'
    )
  return dtype
Example #2
0
def to_feature(value):
  """Convert the given value to Feature if necessary."""
  if isinstance(value, feature_lib.FeatureConnector):
    return value
  elif utils.is_dtype(value):  # tf.int32, tf.string,...
    return feature_lib.Tensor(shape=(), dtype=tf.as_dtype(value))
  elif isinstance(value, dict):
    return FeaturesDict(value)
  else:
    raise ValueError('Feature not supported: {}'.format(value))
Example #3
0
 def as_signature_def(self, receiver_tensors):
   if len(receiver_tensors) != 1:
     raise ValueError('Classification input must be a single string Tensor; '
                      'got {}'.format(receiver_tensors))
   (_, examples), = receiver_tensors.items()
   if tf.as_dtype(examples.dtype) != tf.string:
     raise ValueError('Classification input must be a single string Tensor; '
                      'got {}'.format(receiver_tensors))
   return tf.compat.v1.saved_model.classification_signature_def(
       examples, self.classes, self.scores)
Example #4
0
def convert_to_ndarray(x, dtype=None):
    """Convert 'x' to a numpy array."""
    array = np.array(x) if isinstance(x, (list, tuple)) else x
    if dtype not in (None, tf.string):
        # If the dtype is an integer, we do permissive casting. This allows
        # users to examine int32 data if the dtype is int64 without trouble.
        np_dtype = tf.as_dtype(dtype).as_numpy_dtype
        if np.can_cast(array.dtype, np_dtype):
            array = array.astype(np_dtype, casting="safe")
    return array
Example #5
0
def _to_tf_type(dtype):
    """Converts a native python or numpy type to TF DType.

  Args:
    dtype: Could be a python type, a numpy type or a TF DType.

  Returns:
    A tensorflow `DType`.
  """
    return tf.as_dtype(dtype)
Example #6
0
def set_policy(policy):
  """Sets the global dtype policy.

  The global policy is the default `tf.keras.mixed_precision.Policy` used for
  layers, if no policy is passed to the layer constructor.

  >>> tf.keras.mixed_precision.set_global_policy('mixed_float16')
  >>> tf.keras.mixed_precision.global_policy()
  <Policy "mixed_float16">
  >>> tf.keras.layers.Dense(10).dtype_policy
  <Policy "mixed_float16">
  >>> # Global policy is not used if a policy is directly passed to constructor
  >>> tf.keras.layers.Dense(10, dtype='float64').dtype_policy
  <Policy "float64">
  >>> tf.keras.mixed_precision.set_global_policy('float32')

  If no global policy is set, layers will instead default to a Policy
  constructed from `tf.keras.backend.floatx()`.

  To use mixed precision, the global policy should be set to `'mixed_float16'`
  or `'mixed_bfloat16'`, so that every layer uses a 16-bit compute dtype and
  float32 variable dtype by default.

  Only floating point policies can be set as the global policy, such as
  `'float32'` and `'mixed_float16'`. Non-floating point policies such as
  `'int32'` and `'complex64'` cannot be set as the global policy because most
  layers do not support such policies.

  See `tf.keras.mixed_precision.Policy` for more information.

  Args:
    policy: A Policy, or a string that will be converted to a Policy. Can also
      be None, in which case the global policy will be constructed from
      `tf.keras.backend.floatx()`
  """
  global _global_policy
  if not base_layer_utils.v2_dtype_behavior_enabled():
    raise ValueError('The global policy can only be set in TensorFlow 2 or if '
                     'V2 dtype behavior has been set. To enable V2 dtype '
                     'behavior, call '
                     '"tf.compat.v1.keras.layers.enable_v2_dtype_behavior()"')
  if policy is not None and not isinstance(policy, Policy):
    policy = Policy(policy)
  is_mixed_policy = (policy is not None and
                     policy.compute_dtype != policy.variable_dtype)
  if is_mixed_policy:
    _check_if_mixed_precision_graph_rewrite_is_enabled(policy)
  if (policy is not None and policy.compute_dtype is not None and
      not tf.as_dtype(policy.compute_dtype).is_floating):
    raise ValueError('set_policy can only be used to set the global policy to '
                     'floating-point policies, such as "float32" and '
                     '"mixed_float16", but got policy: %s'
                     % (policy.name,))
  _global_policy = policy
  mixed_precision_global_state.using_mixed_precision_policy = is_mixed_policy
Example #7
0
def estimate_tails(func, target, shape, dtype):
    """Estimates approximate tail quantiles.

  This runs a simple Adam iteration to determine tail quantiles. The
  objective is to find an `x` such that:
  ```
  func(x) == target
  ```
  For instance, if `func` is a CDF and the target is a quantile value, this
  would find the approximate location of that quantile. Note that `func` is
  assumed to be monotonic. When each tail estimate has passed the optimal value
  of `x`, the algorithm does 10 additional iterations and then stops.

  This operation is vectorized. The tensor shape of `x` is given by `shape`, and
  `target` must have a shape that is broadcastable to the output of `func(x)`.

  Arguments:
    func: A callable that computes cumulative distribution function, survival
      function, or similar.
    target: The desired target value.
    shape: The shape of the `tf.Tensor` representing `x`.
    dtype: The `tf.dtypes.Dtype` of the computation (and the return value).

  Returns:
    A `tf.Tensor` representing the solution (`x`).
  """
    with tf.name_scope("estimate_tails"):
        dtype = tf.as_dtype(dtype)
        shape = tf.convert_to_tensor(shape, tf.int32)
        target = tf.convert_to_tensor(target, dtype)

        def loop_cond(tails, m, v, count):
            del tails, m, v  # unused
            return tf.reduce_min(count) < 10

        def loop_body(tails, m, v, count):
            with tf.GradientTape(watch_accessed_variables=False) as tape:
                tape.watch(tails)
                loss = abs(func(tails) - target)
            grad = tape.gradient(loss, tails)
            m = .5 * m + .5 * grad  # Adam mean estimate.
            v = .9 * v + .1 * tf.square(grad)  # Adam variance estimate.
            tails -= .5 * m / (tf.sqrt(v) + 1e-7)
            # Start counting when the gradient flips sign (note that this assumes
            # `tails` is initialized to zero).
            count = tf.where(tf.math.logical_or(count > 0, tails * grad > 0),
                             count + 1, count)
            return tails, m, v, count

        init_tails = tf.zeros(shape, dtype=dtype)
        init_m = tf.zeros(shape, dtype=dtype)
        init_v = tf.ones(shape, dtype=dtype)
        init_count = tf.zeros(shape, dtype=tf.int32)
        return tf.while_loop(loop_cond, loop_body,
                             (init_tails, init_m, init_v, init_count))[0]
Example #8
0
    def __call__(self, shape, dtype=None):
        """Returns a tensor object initialized as specified by the initializer.

    Args:
      shape: Shape of the tensor.
      dtype: Optional dtype of the tensor. If not provided will return tensor
       of `tf.float32`.
    """
        dtype = tf.as_dtype(dtype or tf.keras.backend.floatx())
        if isinstance(shape, tf.TensorShape):
            shape_dtype = tf.int32
            shape_ = np.int32(shape)
        else:
            if not tf.is_tensor(shape):
                shape = tf.convert_to_tensor(value=shape,
                                             dtype_hint=tf.int32,
                                             name='shape')
            shape_dtype = shape.dtype.base_dtype
            shape_ = tf.get_static_value(shape, partial=True)

        sizes_ = tf.get_static_value(self.sizes)
        if sizes_ is not None:
            sizes_ = np.array(sizes_, shape_dtype.as_numpy_dtype)

        assertions = []
        message = 'Rightmost dimension of shape must equal `sum(sizes)`.'
        n = shape[-1] if shape_ is None or shape_[-1] is None else shape_[-1]
        if sizes_ is not None and not tf.is_tensor(n):
            if sum(sizes_) != n:
                raise ValueError(message)
        elif self.validate_args:
            assertions.append(
                tf.debugging.assert_equal(
                    shape[-1],
                    tf.reduce_sum(input_tensor=self.sizes),
                    message=message))

        s = (shape[:-1] if shape_ is None or any(
            s is None for s in shape_[:-1]) else shape_[:-1])
        if sizes_ is not None and isinstance(s, (np.ndarray, np.generic)):
            return tf.concat([
                tf.keras.initializers.get(init)(np.concatenate(
                    [s, np.array([e], shape_dtype.as_numpy_dtype)], axis=-1),
                                                dtype)
                for init, e in zip(self.initializers, sizes_.tolist())
            ],
                             axis=-1)

        sizes = tf.split(self.sizes, len(self.initializers))
        return tf.concat([
            tf.keras.initializers.get(init)(tf.concat([s, e], axis=-1), dtype)
            for init, e in zip(self.initializers, sizes)
        ],
                         axis=-1)
Example #9
0
    def merge_dtypes(self, dt1, dt2):
        """Merges two dtypes, returning a compatible dtype.

    In practice, TF implementation asserts that the two dtypes are identical.

    Args:
      dt1: A numpy dtype, or None.
      dt2: A numpy dtype, or None.

    Returns:
      dtype: The common numpy dtype.

    Raises:
      ValueError: If dt1 and dt2 are not equal and both are non-`None`.
    """
        if None in (dt1, dt2):
            return dt1 or dt2
        if tf.as_dtype(dt1) == tf.as_dtype(dt2):
            return dtype_util.as_numpy_dtype(tf.as_dtype(dt1))
        raise ValueError('Mismatched dtypes {} vs {}'.format(dt1, dt2))
Example #10
0
    def _parse_name(self, name):
        """Parses a Policy name into a compute and variable dtype.

    Args:
      name: The name of the policy:

    Returns:
      The (compute_dtype, variable_dtype) pair.
    """
        if name.endswith('_float32_vars'):
            error_msg = (
                'Policies ending in \'_float32_vars\' have been removed '
                'from TensorFlow.')
            if name in ('infer_float32_vars', 'infer_with_float32_vars'):
                error_msg += (
                    ' Please use the \'mixed_float16\' or \'mixed_bfloat16\' '
                    'policy instead.')
            elif name == 'float16_with_float32_vars':
                error_msg += (
                    ' Please use the \'mixed_float16\' policy instead.')
            elif name == 'bfloat16_with_float32_vars':
                error_msg += (
                    ' Please use the \'mixed_bfloat16\' policy instead.')
            error_msg += ' Got policy name: \'%s\'' % name
            raise ValueError(error_msg)

        if name == 'mixed_float16':
            return 'float16', 'float32'
        elif name == 'mixed_bfloat16':
            return 'bfloat16', 'float32'
        elif name == '_infer':
            # The "_infer" policy exists only for compatibility with TF 1, where
            # "_infer" is the default. The behavior matches the behavior of TF 1's
            # behavior before policies were introduced. With "_infer", the computation
            # and variable dtype are inferred from the first input the first time the
            # layer is called. Once the layer is called for the first time, the
            # layer's policy will change to the dtype of the first input, and it will
            # no longer have the "_infer" policy.
            #
            # The infer policy should be considered an implementation detail and may
            # be removed in the future.
            return None, None

        try:
            dtype = tf.as_dtype(name).name
        except TypeError:
            error = (
                "Cannot convert value %s to a mixed precision Policy. "
                "Valid policies include 'mixed_float16', 'mixed_bfloat16', "
                "and the name of any dtype such as 'float32'." % (name, ))
            # six.raise_from suppresses the original TypeError from being raised
            six.raise_from(ValueError(error), None)
        return dtype, dtype
Example #11
0
def to_tf_type(dtype):
  """Converts a native python or numpy type to TF DType.

  Args:
    dtype: Could be a python type, a numpy type or a TF DType.

  Returns:
    A tensorflow `DType`.
  """
  if isinstance(dtype, tf.DType):
    return dtype
  return tf.as_dtype(dtypes.canonicalize_dtype(np.dtype(dtype)))
Example #12
0
    def _parse_name(self, name):
        """Parses a Policy name into a compute and variable dtype.

        Args:
          name: The name of the policy:

        Returns:
          The (compute_dtype, variable_dtype) pair.
        """
        if name.endswith("_float32_vars"):
            error_msg = (
                "Policies ending in '_float32_vars' have been removed "
                "from TensorFlow."
            )
            if name in ("infer_float32_vars", "infer_with_float32_vars"):
                error_msg += (
                    " Please use the 'mixed_float16' or 'mixed_bfloat16' "
                    "policy instead."
                )
            elif name == "float16_with_float32_vars":
                error_msg += " Please use the 'mixed_float16' policy instead."
            elif name == "bfloat16_with_float32_vars":
                error_msg += " Please use the 'mixed_bfloat16' policy instead."
            error_msg += " Got policy name: '%s'" % name
            raise ValueError(error_msg)

        if name == "mixed_float16":
            return "float16", "float32"
        elif name == "mixed_bfloat16":
            return "bfloat16", "float32"
        elif name == "_infer":
            # The "_infer" policy exists only for compatibility with TF 1, where
            # "_infer" is the default. The behavior matches the behavior of TF 1's
            # behavior before policies were introduced. With "_infer", the computation
            # and variable dtype are inferred from the first input the first time the
            # layer is called. Once the layer is called for the first time, the
            # layer's policy will change to the dtype of the first input, and it will
            # no longer have the "_infer" policy.
            #
            # The infer policy should be considered an implementation detail and may
            # be removed in the future.
            return None, None

        try:
            dtype = tf.as_dtype(name).name
        except TypeError:
            error = (
                "Cannot convert value %s to a mixed precision Policy. "
                "Valid policies include 'mixed_float16', 'mixed_bfloat16', "
                "and the name of any dtype such as 'float32'." % (name,)
            )
            raise ValueError(error)
        return dtype, dtype
Example #13
0
    def test_functional_api(self):
        # Create a trainable distribution using the functional API.
        dummy_input = tf.keras.Input(shape=())
        x = tfp.layers.VariableLayer(
            shape=[2, 3, 4],
            dtype=tf.float64,
            trainable=False,  # You'd probably never want this in IRL.
        )(dummy_input)
        # The Dense serves no real purpose; it will change the event_shape.
        x = tf.keras.layers.Dense(5, use_bias=False, dtype=tf.float64)(x)
        x = tfp.layers.DistributionLambda(
            lambda t: tfd.Independent(
                tfd.Normal(loc=t[0], scale=t[1]),  # pylint: disable=g-long-lambda
                reinterpreted_batch_ndims=1),
            dtype=tf.float64)(x)
        model = tf.keras.Model(dummy_input, x)

        # Instantiate the model (as a TFP distribution).
        dist = model(tf.zeros([]))

        # Check the weights.
        self.assertEqual(2, len(model.weights))

        # Check the VariableLayer layer.
        self.assertIs(tf.float64, tf.as_dtype(model.weights[0].dtype))
        self.assertEqual((2, 3, 4), model.layers[1].weights[0].shape)
        self.assertFalse(model.layers[1].trainable)
        self.assertFalse(model.layers[1].weights[0].trainable)

        # Check the Dense layer.
        self.assertIs(tf.float64, tf.as_dtype(model.weights[1].dtype))
        self.assertEqual((4, 5), model.layers[2].weights[0].shape)
        self.assertTrue(model.layers[2].trainable)
        self.assertTrue(model.layers[2].weights[0].trainable)

        # Check the distribution.
        self.assertIsInstance(dist, tfd.Independent)
        self.assertIs(tf.float64, dist.dtype)
        self.assertEqual((3, ), dist.batch_shape)
        self.assertEqual((5, ), dist.event_shape)
Example #14
0
    def __init__(self, shape, dtype=float, buffer=None):  # pylint: disable=redefined-builtin
        """Initializes an ndarray.

    This is a low level interface for building ndarrays and should be avoided.
    Users should instead use methods in array_creation.py.

    This class provides a numpy.ndarray like interface for a TF Tensor with a
    fully-defined shape. Note that, unlike the backing buffer of np.ndarray,
    Tensors are immutable. So, operations like `__setitem__` are performed by
    replacing the Tensor. This restricts the ability to implement NumPy `view`
    semantics.

    Compared to numpy.ndarray, this does not support `offset`, `strides`
    and `order` arguments.

    Args:
      shape: The shape of the array. Must be a scalar, an iterable of integers
        or a `TensorShape` object.
      dtype: Optional. The dtype of the array. Must be a python type, a numpy
        type or a tensorflow `DType` object.
      buffer: Optional. The backing buffer of the array. Must have shape
        `shape`. Must be a `ndarray`, `np.ndarray` or a `Tensor`.

    Raises:
      ValueError: If `buffer` is specified and its shape does not match
       `shape`.
    """
        if dtype and not isinstance(dtype, tf.DType):
            dtype = tf.as_dtype(np.dtype(dtype))
        if buffer is None:
            buffer = tf.zeros(shape, dtype=dtype)
        else:
            if isinstance(buffer, ndarray):
                buffer = buffer.data
            elif isinstance(buffer, np.ndarray):
                # If `buffer` is a np.ndarray, the Tensor will share the underlying
                # storage of the array.
                buffer = convert_to_tensor(value=buffer, dtype=dtype)
            elif not isinstance(buffer, tf.Tensor):
                raise ValueError(
                    'Unexpected type for `buffer` {}. Must be an ndarray,'
                    ' Tensor or np.ndarray.'.format(type(buffer)))

            if shape is not None and tuple(shape) != buffer._shape_tuple():  # pylint: disable=protected-access
                # TODO(srbs): NumPy allows this. Investigate if/how to support this.
                raise ValueError('shape arg must match buffer.shape.')

        assert isinstance(buffer, tf.Tensor)
        if dtype and dtype != buffer.dtype:
            buffer = tf.bitcast(buffer, dtype)
        self._data = buffer
        self.base = None
Example #15
0
 def build(self, input_shape):
     input_shape = tf.TensorShape(input_shape)
     if self.data_format == "channels_first":
         channel_axis = 1
     else:
         channel_axis = -1
     input_dim = tf.compat.dimension_value(input_shape[channel_axis])
     if input_dim is None:
         raise ValueError("The channel dimension of inputs Found `None`.")
     kernel_shape = self.kernel_size + (input_dim, self.filters)
     # If self.dtype is None, build weights using the default dtype.
     dtype = tf.as_dtype(self.dtype or tf.keras.backend.floatx())
     # Must have a posterior kernel.
     self.kernel_posterior = self.kernel_posterior_fn(
         dtype, kernel_shape, "kernel_posterior", self.trainable, self.add_variable
     )
     if self.kernel_prior_fn is None:
         self.kernel_prior = None
     else:
         self.kernel_prior = self.kernel_prior_fn(
             dtype, kernel_shape, "kernel_prior", self.trainable, self.add_variable
         )
     if self.bias_posterior_fn is None:
         self.bias_posterior = None
     else:
         self.bias_posterior = self.bias_posterior_fn(
             dtype,
             (self.filters,),
             "bias_posterior",
             self.trainable,
             self.add_variable,
         )
     if self.bias_prior_fn is None:
         self.bias_prior = None
     else:
         self.bias_prior = self.bias_prior_fn(
             dtype, (self.filters,), "bias_prior", self.trainable, self.add_variable
         )
     self.input_spec = tf.keras.layers.InputSpec(
         ndim=self.rank + 2, axes={channel_axis: input_dim}
     )
     self._convolution_op = nn_ops.Convolution(
         input_shape,
         filter_shape=tf.TensorShape(kernel_shape),
         dilation_rate=self.dilation_rate,
         strides=self.strides,
         padding=self.padding.upper(),
         data_format=tf_layers_util.convert_data_format(
             self.data_format, self.rank + 2
         ),
     )
     self.built = True
Example #16
0
    def __init__(self,
                 prior,
                 coding_rank,
                 compression=False,
                 likelihood_bound=1e-9,
                 tail_mass=2**-8,
                 range_coder_precision=12,
                 no_variables=False):
        """Initializer.

    Arguments:
      prior: A `tfp.distributions.Distribution` object. A density model fitting
        the marginal distribution of the bottleneck data with additive uniform
        noise, which is shared a priori between the sender and the receiver. For
        best results, the distribution should be flexible enough to have a
        unit-width uniform distribution as a special case, since this is the
        marginal distribution for bottleneck dimensions that are constant.
      coding_rank: Integer. Number of innermost dimensions considered a coding
        unit. Each coding unit is compressed to its own bit string, and the
        `bits()` method sums over each coding unit.
      compression: Boolean. If set to `True`, the range coding tables used by
        `compress()` and `decompress()` will be built on instantiation. If set
        to `False`, these two methods will not be accessible.
      likelihood_bound: Float. Lower bound for likelihood values, to prevent
        training instabilities.
      tail_mass: Float. Approximate probability mass which is range encoded with
        less precision, by using a Golomb-like code.
      range_coder_precision: Integer. Precision passed to the range coding op.
      no_variables: Boolean. If True, creates range coding tables as `Tensor`s
        rather than `Variable`s.

    Raises:
      RuntimeError: when attempting to instantiate an entropy model with
        `compression=True` and not in eager execution mode.
    """
        if prior.event_shape.rank:
            raise ValueError(
                "`prior` must be a (batch of) scalar distribution(s).")
        super().__init__()
        with self.name_scope:
            self._prior = prior
            self._dtype = tf.as_dtype(prior.dtype)
            self._prior_shape = tuple(int(s) for s in prior.batch_shape)
            self._coding_rank = int(coding_rank)
            self._compression = bool(compression)
            self._likelihood_bound = float(likelihood_bound)
            self._tail_mass = float(tail_mass)
            self._range_coder_precision = int(range_coder_precision)
            self._no_variables = bool(no_variables)
            if self.compression:
                self._build_tables(prior)
Example #17
0
def common_dtype(args_list, preferred_dtype=None):
    """Returns explict dtype from `args_list` if there is one."""
    dtype = None
    for a in tf.nest.flatten(args_list):
        if hasattr(a, 'dtype'):
            dt = as_numpy_dtype(a.dtype)
        else:
            continue
        if dtype is None:
            dtype = dt
        elif dtype != dt:
            raise TypeError('Found incompatible dtypes, {} and {}.'.format(
                dtype, dt))
    return preferred_dtype if dtype is None else tf.as_dtype(dtype)
Example #18
0
def common_dtype(args_list, dtype_hint=None):
    """Returns explict dtype from `args_list` if there is one."""
    dtype = None
    for a in tf.nest.flatten(args_list):
        if hasattr(a, 'dtype') and a.dtype:
            dt = as_numpy_dtype(a.dtype)
        else:
            continue
        if dtype is None:
            dtype = dt
        elif dtype != dt:
            if SKIP_DTYPE_CHECKS:
                dtype = (np.ones([2], dtype) + np.ones([2], dt)).dtype
            else:
                raise TypeError('Found incompatible dtypes, {} and {}.'.format(
                    dtype, dt))
    return dtype_hint if dtype is None else tf.as_dtype(dtype)
Example #19
0
    def __init__(
        self,
        dtype=None,
        shape=None,
        ndim=None,
        max_ndim=None,
        min_ndim=None,
        axes=None,
        allow_last_axis_squeeze=False,
        name=None,
    ):
        self.dtype = tf.as_dtype(dtype).name if dtype is not None else None
        shape = tf.TensorShape(shape)
        if shape.rank is None:
            shape = None
        else:
            shape = tuple(shape.as_list())
        if shape is not None:
            self.ndim = len(shape)
            self.shape = shape
        else:
            self.ndim = ndim
            self.shape = None
        self.max_ndim = max_ndim
        self.min_ndim = min_ndim
        self.name = name
        self.allow_last_axis_squeeze = allow_last_axis_squeeze
        try:
            axes = axes or {}
            self.axes = {int(k): axes[k] for k in axes}
        except (ValueError, TypeError):
            raise TypeError(
                "Argument `axes` must be a dict with integer keys. "
                f"Received: axes={axes}"
            )

        if self.axes and (self.ndim is not None or self.max_ndim is not None):
            max_dim = (self.ndim if self.ndim else self.max_ndim) - 1
            max_axis = max(self.axes)
            if max_axis > max_dim:
                raise ValueError(
                    "Axis {} is greater than the maximum allowed value: {}".format(
                        max_axis, max_dim
                    )
                )
Example #20
0
    def from_config(cls, config):
        """Instantiates an entropy model from a configuration dictionary.

    Args:
      config: A `dict`, typically the output of `get_config`.

    Returns:
      An entropy model.
    """
        # Instantiate new object without calling initializers, and call superclass
        # (tf.Module) initializer manually. Note: `cls` is child class of this one.
        self = cls.__new__(cls)  # pylint:disable=no-value-for-parameter
        super().__init__(self)

        # What follows is the alternative initializer.
        with self.name_scope:
            # pylint:disable=protected-access
            self._dtype = tf.as_dtype(config["dtype"])
            self._prior_shape = tuple(map(int, config["prior_shape"]))
            self._coding_rank = int(config["coding_rank"])
            self._compression = True
            self._laplace_tail_mass = float(config["laplace_tail_mass"])
            if self._laplace_tail_mass:
                self._laplace_prior = tfp.distributions.Laplace(loc=0.0,
                                                                scale=1.0)
            self._expected_grads = bool(config["expected_grads"])
            self._tail_mass = float(config["tail_mass"])
            self._range_coder_precision = int(config["range_coder_precision"])
            self._no_variables = False

            # TODO(relational): Switch to math.prod when we switch to Python 3.8
            context_size = functools.reduce(lambda x, y: x * y,
                                            self.context_shape, 1)
            cdf_width = int(config["cdf_width"])
            zeros = tf.zeros([context_size, cdf_width], dtype=tf.int32)
            self._cdf = tf.Variable(zeros, trainable=False, name="cdf")
            self._cdf_offset = tf.Variable(zeros[:, 0],
                                           trainable=False,
                                           name="cdf_offset")
            self._cdf_length = tf.Variable(zeros[:, 0],
                                           trainable=False,
                                           name="cdf_length")
            # pylint:enable=protected-access

        return self
Example #21
0
def _assert_float_dtype(dtype):
    """Validate and return floating point type based on `dtype`.

    `dtype` must be a floating point type.

    Args:
      dtype: The data type to validate.

    Returns:
      Validated type.

    Raises:
      ValueError: if `dtype` is not a floating point type.
    """
    dtype = tf.as_dtype(dtype)
    if not dtype.is_floating:
        raise ValueError(f"Expected floating point type, got {dtype}.")
    return dtype
Example #22
0
def estimate_tail(func, target, shape, dtype):
    """Estimates approximate tail quantiles."""
    dtype = tf.as_dtype(dtype)
    shape = tf.convert_to_tensor(shape, tf.int32)
    target = tf.convert_to_tensor(target, dtype)
    opt = tf.keras.optimizers.Adam(learning_rate=.1)
    tails = tf.Variable(tf.zeros(shape, dtype=dtype),
                        trainable=False,
                        name="tails")
    loss = best_loss = tf.fill(shape, tf.constant(float("inf"), dtype=dtype))
    while tf.reduce_any(loss == best_loss):
        with tf.GradientTape(watch_accessed_variables=False) as tape:
            tape.watch(tails)
            loss = abs(func(tails) - target)
        best_loss = tf.minimum(best_loss, loss)
        gradient = tape.gradient(loss, tails)
        opt.apply_gradients([(gradient, tails)])
    return tails.value()
Example #23
0
    def testHutchinsonsNormalEstimator(self, dtype):
        seed = 42
        tf_dtype = tf.as_dtype(dtype)
        num_dims = 10
        np.random.seed(seed=seed)
        matrix_diagonal = np.random.uniform(size=[num_dims]).astype(dtype)
        scaling_matrix = np.diag(matrix_diagonal)
        one_time_scale_matrix = np.diag(np.exp(matrix_diagonal))
        scale_ode_fn = lambda t, z: tf.linalg.matvec(scaling_matrix, z)

        def trace_augmentation_fn(ode_fn, z_shape, dtype):
            return tfb.ffjord.trace_jacobian_hutchinson(ode_fn,
                                                        z_shape,
                                                        dtype,
                                                        num_samples=128,
                                                        seed=seed)

        bijector = tfb.FFJORD(trace_augmentation_fn=trace_augmentation_fn,
                              state_time_derivative_fn=scale_ode_fn,
                              dtype=tf_dtype)
        x = np.random.uniform(size=[1, num_dims]).astype(dtype)
        y = np.matmul(x, one_time_scale_matrix)
        expected_forward_log_det_jacobian_value = np.log(
            np.prod(np.exp(matrix_diagonal)))
        expected_fldj = np.array([expected_forward_log_det_jacobian_value])
        expected_ildj = np.array([-expected_forward_log_det_jacobian_value])

        self.assertAllClose(y,
                            self.evaluate(bijector.forward(x)),
                            rtol=0.0,
                            atol=1e-3)
        self.assertAllClose(x,
                            self.evaluate(bijector.inverse(y)),
                            rtol=0.0,
                            atol=1e-3)
        self.assertAllClose(
            expected_ildj,
            self.evaluate(bijector.inverse_log_det_jacobian(y, event_ndims=1)),
            atol=7e-1)
        self.assertAllClose(
            expected_fldj,
            self.evaluate(bijector.forward_log_det_jacobian(x, event_ndims=1)),
            atol=7e-1)
Example #24
0
    def testBijectorConditionKwargs(self, dtype):
        if not tf2.enabled():
            self.skipTest('b/152464477')

        tf_dtype = tf.as_dtype(dtype)

        def conditional_ode_fn(t, z, c):
            del t  # unused.
            return tf.ones_like(z) * c**2

        trace_augmentation_fn = tfb.ffjord.trace_jacobian_exact
        bijector = tfb.FFJORD(trace_augmentation_fn=trace_augmentation_fn,
                              state_time_derivative_fn=conditional_ode_fn,
                              dtype=tf_dtype)
        x = tf.zeros((2, 5), dtype=tf_dtype)
        y = tf.ones((2, 5), dtype=tf_dtype) * 4
        c = tf.ones((2, 5), dtype=tf_dtype) * 2
        expected_log_det_jacobian = np.zeros(2, dtype=dtype)
        expected_dy_dc = np.ones((2, 5), dtype=dtype) * 4

        def grad_fn(c):
            y = bijector.forward(x, c=c)
            return y

        dy_dc = self.evaluate(tfp_gradient.value_and_gradient(grad_fn, c)[1])

        self.assertStartsWith(bijector.name, 'ffjord')
        self.assertAllClose(self.evaluate(y),
                            self.evaluate(bijector.forward(x, c=c)),
                            atol=1e-5)
        self.assertAllClose(self.evaluate(x),
                            self.evaluate(bijector.inverse(y, c=c)),
                            atol=1e-5)
        self.assertAllClose(
            expected_log_det_jacobian,
            self.evaluate(
                bijector.inverse_log_det_jacobian(y, event_ndims=1, c=c)))
        self.assertAllClose(
            expected_log_det_jacobian,
            self.evaluate(
                bijector.forward_log_det_jacobian(x, event_ndims=1, c=c)))
        self.assertAllClose(expected_dy_dc, dy_dc)
    def build(self, input_shape):
        input_shape = tf.TensorShape(input_shape)
        in_size = tf.compat.dimension_value(
            input_shape.with_rank_at_least(2)[-1])
        if in_size is None:
            raise ValueError('The last dimension of the inputs to `Dense` '
                             'should be defined. Found `None`.')
        self._input_spec = tf.keras.layers.InputSpec(min_ndim=2,
                                                     axes={-1: in_size})

        # If self.dtype is None, build weights using the default dtype.
        dtype = tf.as_dtype(self.dtype or tf.keras.backend.floatx())

        # Must have a posterior kernel.
        self.kernel_posterior = self.kernel_posterior_fn(
            dtype, [in_size, self.units], 'kernel_posterior', self.trainable,
            self.add_variable)

        if self.kernel_prior_fn is None:
            self.kernel_prior = None
        else:
            self.kernel_prior = self.kernel_prior_fn(dtype,
                                                     [in_size, self.units],
                                                     'kernel_prior',
                                                     self.trainable,
                                                     self.add_variable)

        if self.bias_posterior_fn is None:
            self.bias_posterior = None
        else:
            self.bias_posterior = self.bias_posterior_fn(
                dtype, [self.units], 'bias_posterior', self.trainable,
                self.add_variable)

        if self.bias_prior_fn is None:
            self.bias_prior = None
        else:
            self.bias_prior = self.bias_prior_fn(dtype, [self.units],
                                                 'bias_prior', self.trainable,
                                                 self.add_variable)

        self.built = True
Example #26
0
 def as_signature_def(self, receiver_tensors):
     if len(receiver_tensors) != 1:
         raise ValueError(
             "Regression signatures can only accept a single tensor input of "
             "type tf.string. Please check to make sure that you have structured "
             "the serving_input_receiver_fn so that it creates a single string "
             "placeholder. If your model function expects multiple inputs, then "
             "use `tf.io.parse_example()` to parse the string into multiple "
             f"tensors.\n Received: {receiver_tensors}")
     ((_, examples), ) = receiver_tensors.items()
     if tf.as_dtype(examples.dtype) != tf.string:
         raise ValueError(
             "Regression signatures can only accept a single tensor input of "
             "type tf.string. Please check to make sure that you have structured "
             "the serving_input_receiver_fn so that it creates a single string "
             "placeholder. If your model function expects multiple inputs, then "
             "use `tf.io.parse_example()` to parse the string into multiple "
             f"tensors.\n Received: {receiver_tensors}")
     return tf.compat.v1.saved_model.regression_signature_def(
         examples, self.value)
Example #27
0
    def testBijector(self, dtype):
        tf_dtype = tf.as_dtype(dtype)
        move_ode_fn = lambda t, z: tf.ones_like(z)
        trace_augmentation_fn = tfb.ffjord.trace_jacobian_exact
        bijector = tfb.FFJORD(trace_augmentation_fn=trace_augmentation_fn,
                              state_time_derivative_fn=move_ode_fn,
                              dtype=tf_dtype)
        x = np.zeros((2, 5), dtype=dtype)
        y = np.ones((2, 5), dtype=dtype)
        expected_log_det_jacobian = np.zeros(2, dtype=dtype)

        self.assertStartsWith(bijector.name, 'ffjord')
        self.assertAllClose(y, self.evaluate(bijector.forward(x)))
        self.assertAllClose(x, self.evaluate(bijector.inverse(y)))
        self.assertAllClose(
            expected_log_det_jacobian,
            self.evaluate(bijector.inverse_log_det_jacobian(y, event_ndims=1)))
        self.assertAllClose(
            expected_log_det_jacobian,
            self.evaluate(bijector.forward_log_det_jacobian(x, event_ndims=1)))
Example #28
0
 def as_signature_def(self, receiver_tensors):
     if len(receiver_tensors) != 1:
         raise ValueError(
             'Classification signatures can only accept a single tensor input of '
             'type tf.string. Please check to make sure that you have structured '
             'the serving_input_receiver_fn so that it creates a single string '
             'placeholder. If your model function expects multiple inputs, then '
             'use `tf.io.parse_example()` to parse the string into multiple '
             f'tensors.\n Received: {receiver_tensors}')
     (_, examples), = receiver_tensors.items()
     if tf.as_dtype(examples.dtype) != tf.string:
         raise ValueError(
             'Classification signatures can only accept a single tensor input of '
             'type tf.string. Please check to make sure that you have structured '
             'the serving_input_receiver_fn so that it creates a single string '
             'placeholder. If your model function expects multiple inputs, then '
             'use `tf.io.parse_example()` to parse the string into multiple '
             f'tensors.\n Received: {receiver_tensors}')
     return tf.compat.v1.saved_model.classification_signature_def(
         examples, self.classes, self.scores)
Example #29
0
    def build(self, input_shape):
        dtype = tf.as_dtype(self.dtype or backend.floatx())
        if not (dtype.is_floating or dtype.is_complex):
            raise TypeError(
                "A Dense layer can only be built with a floating-point "
                f"dtype. Received: dtype={dtype}"
            )

        input_shape = tf.TensorShape(input_shape)
        last_dim = tf.compat.dimension_value(input_shape[-1])
        if last_dim is None:
            raise ValueError(
                "The last dimension of the inputs to a Dense layer "
                "should be defined. Found None. "
                f"Full input shape received: {input_shape}"
            )
        self.input_spec = InputSpec(min_ndim=2, axes={-1: last_dim})
        self.kernel = self.add_weight(
            "kernel",
            shape=[last_dim, self.units],
            initializer=self.kernel_initializer,
            regularizer=self.kernel_regularizer,
            constraint=self.kernel_constraint,
            dtype=self.dtype,
            trainable=True,
        )
        if self.use_bias:
            self.bias = self.add_weight(
                "bias",
                shape=[
                    self.units,
                ],
                initializer=self.bias_initializer,
                regularizer=self.bias_regularizer,
                constraint=self.bias_constraint,
                dtype=self.dtype,
                trainable=True,
            )
        else:
            self.bias = None
        self.built = True
Example #30
0
    def from_config(cls, config):
        """Instantiates an entropy model from a configuration dictionary.

    Arguments:
      config: A `dict`, typically the output of `get_config`.

    Returns:
      An entropy model.
    """
        # Instantiate new object without calling initializers, and call superclass
        # (tf.Module) initializer manually. Note: `cls` is child class of this one.
        self = cls.__new__(cls)  # pylint:disable=no-value-for-parameter
        super().__init__(self)

        # What follows is the alternative initializer.
        with self.name_scope:
            # pylint:disable=protected-access
            self._dtype = tf.as_dtype(config["dtype"])
            self._prior_shape = tuple(int(s) for s in config["prior_shape"])
            self._coding_rank = int(config["coding_rank"])
            self._compression = True
            self._likelihood_bound = float(config["likelihood_bound"])
            self._tail_mass = float(config["tail_mass"])
            self._range_coder_precision = int(config["range_coder_precision"])
            self._no_variables = False

            prior_size = functools.reduce(lambda x, y: x * y, self.prior_shape,
                                          1)
            cdf_width = int(config["cdf_width"])
            zeros = tf.zeros([prior_size, cdf_width], dtype=tf.int32)
            self._cdf = tf.Variable(zeros, trainable=False, name="cdf")
            self._cdf_offset = tf.Variable(zeros[:, 0],
                                           trainable=False,
                                           name="cdf_offset")
            self._cdf_length = tf.Variable(zeros[:, 0],
                                           trainable=False,
                                           name="cdf_length")
            # pylint:enable=protected-access

        return self