Пример #1
0
def refine_signal(signal,
                  norm_params=None,
                  filter_config=None,
                  add_dynamic=False):
    """Refine a multi-dimensional signal.

    signal        : bi-dimensional tensor containing the signal,
                    or rank 3 tensor batching such signals
    norm_params   : optional array of normalization parameters by
                    which to scale the signal's channels
    filter_config : optional tuple specifying a signal filter for smoothing
    add_dynamic   : whether to add delta and deltadelta features
                    to the refined signal (bool, default False)
    """
    tf.assert_rank_in(signal, (2, 3))
    # Optionally de-normalize the initial signal.
    if norm_params is not None:
        signal *= norm_params
    # Optionally filter the signal.
    if filter_config is None:
        top_filter = None
    else:
        top_filter = list(
            build_layers_stack(signal, [filter_config]).values())[0]
        signal = top_filter.output
    # Optionally add dynamic features to the signal.
    if add_dynamic:
        signal = (add_dynamic_features(signal, window=5)
                  if len(signal.shape) == 2 else run_along_first_dim(
                      add_dynamic_features, signal, window=5))
    # Return the refined signal and the defined top filter, if any.
    return signal, top_filter
Пример #2
0
def explained_variance(targets, predictions):
    targets, predictions = tf.squeeze(targets), tf.squeeze(predictions)
    tf.assert_rank_in(targets, [0, 1])
    tf.assert_rank_in(predictions, [0, 1])
    var_targets = tf.cond(tf.equal(tf.rank(targets), 0),
                          lambda: tf.constant(0, dtype=tf.float32),
                          lambda: tf.nn.moments(targets, axes=[0])[1])
    return tf.cond(
        tf.equal(var_targets, 0), lambda: tf.constant(np.nan), lambda:
        (1 - tf.nn.moments(targets - predictions, axes=[0])[1] / var_targets))
Пример #3
0
def img_shape(img, dtype=tf.int32, scope='img_shape'):
    with tf.name_scope(scope):
        with tf.control_dependencies([tf.assert_rank_in(img, (3, 4))]):
            result = tf.cond(pred=tf.equal(tf.rank(img), 3),
                             true_fn=lambda: tf.shape(img)[:2],
                             false_fn=lambda: tf.shape(img)[1:3])
            return tf.cast(result, dtype)
Пример #4
0
    def call(self, similarity, mask=None):
        """
            Args:
                  similarity: a Tensor with shape [batch_size, heads (optional), q/k_length, q/k_length]
                  mask: a Tensor with shape [batch_size, q/k_length, q/k_length]

            Returns:
                masked_similarity: a Tensor with shape [batch_size, heads (optional), q/k_length, q/k_length]
        """
        if mask is None:
            return similarity

        similarity_rank_assert = tf.assert_rank_in(similarity, (3, 4))
        mask_rank_assert = tf.assert_rank(mask, 3)

        # There are so many different reasons a mask might be constructed a particular manner.
        # Because of this we don't want to infer a particular construction.
        with tf.control_dependencies([similarity_rank_assert, mask_rank_assert]):
            # If shapes don't match, then similarity has been split for multi-headed attention
            if len(mask.shape) != len(similarity.shape):
                similarity[:, 0].shape.assert_is_compatible_with(mask.shape)
                mask = mask[:, None]
            else:
                similarity.shape.assert_is_compatible_with(mask.shape)

            # We know that we're passing this through a softmax later, thus just add a relatively large negative
            # value to mask the output avoids a hadamard product (though I think that technically it's not
            # any more efficient to do it this way operations wise)
            bias = -1e9 * tf.cast(tf.logical_not(mask), tf.float32)
            masked_similarity = similarity + bias
            return masked_similarity
Пример #5
0
def gaussian_mixture_density(data, priors, means, stds):
    """Evaluate gaussian mixtures of given parameters' density at given points.

    The evaluated gaussian distributions may be univariate or multivariate.
    For multivariate cases, no covariance between terms is considered.
    Data may also be made of batched sequences of multivariate points
    (3-D tensor of dimensions [n_batches, batches_length, n_dim]).

    data   : points at which to evaluate each density function
             (tensorflow.Tensor of rank r in [1, 3])
    priors : prior probability of each mixture component,
             for each mixture (tensorflow.Tensor of rank max(r, 2))
    means  : mean of each mixture component, for each mixture
             (tensorflow.Tensor of rank r + 1)
    stds   : standard deviation of each mixture component,
             for each mixture (tensorflow.Tensor of rank r + 1)

    Return a 1-D Tensor gathering point-wise density for data made
    of a single sequence (rank in [1, 2]), or a 2-D Tensor gathering
    sequence-wise point-wise density for batched data (rank 3).
    """
    tf.assert_rank_in(data, [1, 2, 3])
    rank = tf.rank(data) + 1
    tf.control_dependencies([
        tf.assert_rank(priors, tf.maximum(2, rank - 1)),
        tf.assert_rank(means, rank),
        tf.assert_rank(stds, rank)
    ])
    # Handle the univariate density case.
    if len(data.shape) == 1 or data.shape[1].value == 1:
        return tf.reduce_sum(priors * gaussian_density(data, means, stds),
                             axis=1)
    # Handle the multivariate density case.
    data = tf.expand_dims(data, -2)
    if len(stds.shape) == 2:
        stds = tf.expand_dims(stds, 2)
    densities = tf.reduce_prod(gaussian_density(data, means, stds), axis=-1)
    return tf.reduce_sum(priors * densities, axis=-1)
Пример #6
0
def neg_log_likelihood_unit_gaussian(predictions,
                                     labels=None,
                                     mean_batch=True,
                                     name='neg_log_likelihood_unit_gaussian'):
    """ Negative log likelihood of a unit Gaussian distribution
        nll = 0.5 * [ (labels - predictions) (labels - predictions)^T + k*log(2*pi) ]
        Predictions and labels are batches of Nx[S]xK data
    """
    with tf.name_scope(name):
        tf.assert_rank_in(predictions, [2, 3],
                          message="predictions must have rank 2 or 3")
        if labels is not None:
            tf.assert_rank_in(labels, [2, 3],
                              message="labels must have rank 2 or 3")

        k_log_2_pi = _get_k_log_2_pi(None, predictions)
        if labels is None:
            squared_error = tf.square(predictions)
        else:
            squared_error = tf.squared_difference(labels, predictions)
        squared_error = tf.reduce_sum(squared_error, axis=-1)
        if mean_batch:
            squared_error = tf.reduce_mean(squared_error)
        return 0.5 * (squared_error + k_log_2_pi)
Пример #7
0
def percentile(x,
               q,
               axis=None,
               interpolation=None,
               keep_dims=False,
               validate_args=False,
               name=None):
    """Compute the `q`-th percentile(s) of `x`.

  Given a vector `x`, the `q`-th percentile of `x` is the value `q / 100` of the
  way from the minimum to the maximum in a sorted copy of `x`.

  The values and distances of the two nearest neighbors as well as the
  `interpolation` parameter will determine the percentile if the normalized
  ranking does not match the location of `q` exactly.

  This function is the same as the median if `q = 50`, the same as the minimum
  if `q = 0` and the same as the maximum if `q = 100`.

  Multiple percentiles can be computed at once by using `1-D` vector `q`.
  Dimension zero of the returned `Tensor` will index the different percentiles.


  ```python
  # Get 30th percentile with default ('nearest') interpolation.
  x = [1., 2., 3., 4.]
  percentile(x, q=30.)
  ==> 2.0

  # Get 30th and 70th percentiles with 'lower' interpolation
  x = [1., 2., 3., 4.]
  percentile(x, q=[30., 70.], interpolation='lower')
  ==> [1., 3.]

  # Get 100th percentile (maximum).  By default, this is computed over every dim
  x = [[1., 2.]
       [3., 4.]]
  percentile(x, q=100.)
  ==> 4.

  # Treat the leading dim as indexing samples, and find the 100th quantile (max)
  # over all such samples.
  x = [[1., 2.]
       [3., 4.]]
  percentile(x, q=100., axis=[0])
  ==> [3., 4.]
  ```

  Compare to `numpy.percentile`.

  Args:
    x:  Floating point `N-D` `Tensor` with `N > 0`.  If `axis` is not `None`,
      `x` must have statically known number of dimensions.
    q:  Scalar or vector `Tensor` with values in `[0, 100]`. The percentile(s).
    axis:  Optional `0-D` or `1-D` integer `Tensor` with constant values. The
      axis that hold independent samples over which to return the desired
      percentile.  If `None` (the default), treat every dimension as a sample
      dimension, returning a scalar.
    interpolation : {'lower', 'higher', 'nearest'}.  Default: 'nearest' This
      optional parameter specifies the interpolation method to
      use when the desired quantile lies between two data points `i < j`:
        * lower: `i`.
        * higher: `j`.
        * nearest: `i` or `j`, whichever is nearest.
    keep_dims:  Python `bool`. If `True`, the last dimension is kept with size 1
      If `False`, the last dimension is removed from the output shape.
    validate_args:  Whether to add runtime checks of argument validity. If
      False, and arguments are incorrect, correct behavior is not guaranteed.
    name:  A Python string name to give this `Op`.  Default is 'percentile'

  Returns:
    A `(rank(q) + N - len(axis))` dimensional `Tensor` of same dtype as `x`, or,
      if `axis` is `None`, a `rank(q)` `Tensor`.  The first `rank(q)` dimensions
      index quantiles for different values of `q`.

  Raises:
    ValueError:  If argument 'interpolation' is not an allowed type.
  """
    name = name or 'percentile'
    allowed_interpolations = {'lower', 'higher', 'nearest'}

    if interpolation is None:
        interpolation = 'nearest'
    else:
        if interpolation not in allowed_interpolations:
            raise ValueError(
                'Argument `interpolation` must be in %s.  Found %s' %
                (allowed_interpolations, interpolation))

    with tf.name_scope(name, values=[x, q]):
        x = tf.convert_to_tensor(x, name='x')
        # Double is needed here and below, else we get the wrong index if the array
        # is huge along axis.
        q = tf.to_double(q, name='q')
        _get_static_ndims(q, expect_ndims_no_more_than=1)

        if validate_args:
            q = control_flow_ops.with_dependencies([
                tf.assert_rank_in(q, [0, 1]),
                tf.assert_greater_equal(q, tf.to_double(0.)),
                tf.assert_less_equal(q, tf.to_double(100.))
            ], q)

        if axis is None:
            y = tf.reshape(x, [-1])
        else:
            axis = tf.convert_to_tensor(axis, name='axis')
            tf.assert_integer(axis)
            axis_ndims = _get_static_ndims(axis,
                                           expect_static=True,
                                           expect_ndims_no_more_than=1)
            axis_const = tensor_util.constant_value(axis)
            if axis_const is None:
                raise ValueError(
                    'Expected argument `axis` to be statically available.  Found: %s'
                    % axis)
            axis = axis_const
            if axis_ndims == 0:
                axis = [axis]
            axis = [int(a) for a in axis]
            x_ndims = _get_static_ndims(x,
                                        expect_static=True,
                                        expect_ndims_at_least=1)
            axis = _make_static_axis_non_negative(axis, x_ndims)
            # Move dims in axis to the end, since _sort_tensor, which calls top_k,
            # only sorts the last dim.
            y = _move_dims_to_flat_end(x, axis, x_ndims)

        frac_at_q_or_above = 1. - q / 100.
        d = tf.to_double(tf.shape(y)[-1])

        if interpolation == 'lower':
            indices = tf.ceil((d - 1) * frac_at_q_or_above)
        elif interpolation == 'higher':
            indices = tf.floor((d - 1) * frac_at_q_or_above)
        elif interpolation == 'nearest':
            indices = tf.round((d - 1) * frac_at_q_or_above)

        # If d is gigantic, then we would have d == d - 1, even in double... So
        # let's use max/min to avoid out of bounds errors.
        d = tf.shape(y)[-1]
        # d - 1 will be distinct from d in int32.
        indices = tf.clip_by_value(tf.to_int32(indices), 0, d - 1)

        # Sort everything, not just the top 'k' entries, which allows multiple calls
        # to sort only once (under the hood) and use CSE.
        sorted_y = _sort_tensor(y)

        # Gather the indices along the sorted (last) dimension.
        # If q is a vector, the last dim of gathered_y indexes different q[i].
        gathered_y = tf.gather(sorted_y, indices, axis=-1)

        if keep_dims:
            if axis is None:
                ones_vec = tf.ones(shape=[
                    _get_best_effort_ndims(x) + _get_best_effort_ndims(q)
                ],
                                   dtype=tf.int32)
                gathered_y *= tf.ones(ones_vec, dtype=x.dtype)
            else:
                gathered_y = _insert_back_keep_dims(gathered_y, axis)

        # If q is a scalar, then result has the right shape.
        # If q is a vector, then result has trailing dim of shape q.shape, which
        # needs to be rotated to dim 0.
        return util.rotate_transpose(gathered_y, tf.rank(q))
Пример #8
0
def percentile(x,
               q,
               axis=None,
               interpolation=None,
               keep_dims=False,
               validate_args=False,
               preserve_gradients=True,
               name=None):
    """Compute the `q`-th percentile(s) of `x`.

  Given a vector `x`, the `q`-th percentile of `x` is the value `q / 100` of the
  way from the minimum to the maximum in a sorted copy of `x`.

  The values and distances of the two nearest neighbors as well as the
  `interpolation` parameter will determine the percentile if the normalized
  ranking does not match the location of `q` exactly.

  This function is the same as the median if `q = 50`, the same as the minimum
  if `q = 0` and the same as the maximum if `q = 100`.

  Multiple percentiles can be computed at once by using `1-D` vector `q`.
  Dimension zero of the returned `Tensor` will index the different percentiles.

  Compare to `numpy.percentile`.

  Args:
    x:  Numeric `N-D` `Tensor` with `N > 0`.  If `axis` is not `None`,
      `x` must have statically known number of dimensions.
    q:  Scalar or vector `Tensor` with values in `[0, 100]`. The percentile(s).
    axis:  Optional `0-D` or `1-D` integer `Tensor` with constant values. The
      axis that hold independent samples over which to return the desired
      percentile.  If `None` (the default), treat every dimension as a sample
      dimension, returning a scalar.
    interpolation : {'nearest', 'linear', 'lower', 'higher', 'midpoint'}.
      Default value: 'nearest'.  This specifies the interpolation method to
      use when the desired quantile lies between two data points `i < j`:
        * linear: i + (j - i) * fraction, where fraction is the fractional part
          of the index surrounded by i and j.
        * lower: `i`.
        * higher: `j`.
        * nearest: `i` or `j`, whichever is nearest.
        * midpoint: (i + j) / 2.
      `linear` and `midpoint` interpolation do not work with integer dtypes.
    keep_dims:  Python `bool`. If `True`, the last dimension is kept with size 1
      If `False`, the last dimension is removed from the output shape.
    validate_args:  Whether to add runtime checks of argument validity. If
      False, and arguments are incorrect, correct behavior is not guaranteed.
    preserve_gradients:  Python `bool`.  If `True`, ensure that gradient w.r.t
      the percentile `q` is preserved in the case of linear interpolation.
      If `False`, the gradient will be (incorrectly) zero when `q` corresponds
      to a point in `x`.
    name:  A Python string name to give this `Op`.  Default is 'percentile'

  Returns:
    A `(rank(q) + N - len(axis))` dimensional `Tensor` of same dtype as `x`, or,
      if `axis` is `None`, a `rank(q)` `Tensor`.  The first `rank(q)` dimensions
      index quantiles for different values of `q`.

  Raises:
    ValueError:  If argument 'interpolation' is not an allowed type.
    ValueError:  If interpolation type not compatible with `dtype`.

  #### Examples

  ```python
  # Get 30th percentile with default ('nearest') interpolation.
  x = [1., 2., 3., 4.]
  tfp.stats.percentile(x, q=30.)
  ==> 2.0

  # Get 30th percentile with 'linear' interpolation.
  x = [1., 2., 3., 4.]
  tfp.stats.percentile(x, q=30., interpolation='linear')
  ==> 1.9

  # Get 30th and 70th percentiles with 'lower' interpolation
  x = [1., 2., 3., 4.]
  tfp.stats.percentile(x, q=[30., 70.], interpolation='lower')
  ==> [1., 3.]

  # Get 100th percentile (maximum).  By default, this is computed over every dim
  x = [[1., 2.]
       [3., 4.]]
  tfp.stats.percentile(x, q=100.)
  ==> 4.

  # Treat the leading dim as indexing samples, and find the 100th quantile (max)
  # over all such samples.
  x = [[1., 2.]
       [3., 4.]]
  tfp.stats.percentile(x, q=100., axis=[0])
  ==> [3., 4.]
  ```

  """
    name = name or 'percentile'
    allowed_interpolations = {
        'linear', 'lower', 'higher', 'nearest', 'midpoint'
    }

    if interpolation is None:
        interpolation = 'nearest'
    else:
        if interpolation not in allowed_interpolations:
            raise ValueError(
                'Argument `interpolation` must be in %s.  Found %s' %
                (allowed_interpolations, interpolation))

    with tf.name_scope(name, values=[x, q]):
        x = tf.convert_to_tensor(x, name='x')

        if interpolation in {'linear', 'midpoint'} and x.dtype.is_integer:
            raise TypeError(
                '{} interpolation not allowed with dtype {}'.format(
                    interpolation, x.dtype))

        # Double is needed here and below, else we get the wrong index if the array
        # is huge along axis.
        q = tf.cast(q, tf.float64)
        _get_static_ndims(q, expect_ndims_no_more_than=1)

        if validate_args:
            q = control_flow_ops.with_dependencies([
                tf.assert_rank_in(q, [0, 1]),
                tf.assert_greater_equal(q, tf.cast(0., tf.float64)),
                tf.assert_less_equal(q, tf.cast(100., tf.float64))
            ], q)

        # Move `axis` dims of `x` to the rightmost, call it `y`.
        if axis is None:
            y = tf.reshape(x, [-1])
        else:
            x_ndims = _get_static_ndims(x,
                                        expect_static=True,
                                        expect_ndims_at_least=1)
            axis = _make_static_axis_non_negative_list(axis, x_ndims)
            y = _move_dims_to_flat_end(x, axis, x_ndims)

        frac_at_q_or_above = 1. - q / 100.

        # Sort everything, not just the top 'k' entries, which allows multiple calls
        # to sort only once (under the hood) and use CSE.
        sorted_y = _sort_tensor(y)

        d = tf.cast(tf.shape(y)[-1], tf.float64)

        def _get_indices(interp_type):
            """Get values of y at the indices implied by interp_type."""
            # Note `lower` <--> ceiling.  Confusing, huh?  Due to the fact that
            # _sort_tensor sorts highest to lowest, tf.ceil corresponds to the higher
            # index, but the lower value of y!
            if interp_type == 'lower':
                indices = tf.ceil((d - 1) * frac_at_q_or_above)
            elif interp_type == 'higher':
                indices = tf.floor((d - 1) * frac_at_q_or_above)
            elif interp_type == 'nearest':
                indices = tf.round((d - 1) * frac_at_q_or_above)
            # d - 1 will be distinct from d in int32, but not necessarily double.
            # So clip to avoid out of bounds errors.
            return tf.clip_by_value(tf.cast(indices, tf.int32), 0,
                                    tf.shape(y)[-1] - 1)

        if interpolation in ['nearest', 'lower', 'higher']:
            gathered_y = tf.gather(sorted_y,
                                   _get_indices(interpolation),
                                   axis=-1)
        elif interpolation == 'midpoint':
            gathered_y = 0.5 * (
                tf.gather(sorted_y, _get_indices('lower'), axis=-1) +
                tf.gather(sorted_y, _get_indices('higher'), axis=-1))
        elif interpolation == 'linear':
            # Copy-paste of docstring on interpolation:
            # linear: i + (j - i) * fraction, where fraction is the fractional part
            # of the index surrounded by i and j.
            larger_y_idx = _get_indices('lower')
            exact_idx = (d - 1) * frac_at_q_or_above
            if preserve_gradients:
                # If q cooresponds to a point in x, we will initially have
                # larger_y_idx == smaller_y_idx.
                # This results in the gradient w.r.t. fraction being zero (recall `q`
                # enters only through `fraction`...and see that things cancel).
                # The fix is to ensure that smaller_y_idx and larger_y_idx are always
                # separated by exactly 1.
                smaller_y_idx = tf.maximum(larger_y_idx - 1, 0)
                larger_y_idx = tf.minimum(smaller_y_idx + 1,
                                          tf.shape(y)[-1] - 1)
                fraction = tf.cast(larger_y_idx, tf.float64) - exact_idx
            else:
                smaller_y_idx = _get_indices('higher')
                fraction = tf.ceil((d - 1) * frac_at_q_or_above) - exact_idx

            fraction = tf.cast(fraction, y.dtype)
            gathered_y = (
                tf.gather(sorted_y, larger_y_idx, axis=-1) * (1 - fraction) +
                tf.gather(sorted_y, smaller_y_idx, axis=-1) * fraction)

        if keep_dims:
            if axis is None:
                ones_vec = tf.ones(shape=[
                    _get_best_effort_ndims(x) + _get_best_effort_ndims(q)
                ],
                                   dtype=tf.int32)
                gathered_y *= tf.ones(ones_vec, dtype=x.dtype)
            else:
                gathered_y = _insert_back_keep_dims(gathered_y, axis)

        # If q is a scalar, then result has the right shape.
        # If q is a vector, then result has trailing dim of shape q.shape, which
        # needs to be rotated to dim 0.
        return distribution_util.rotate_transpose(gathered_y, tf.rank(q))
Пример #9
0
def _make_rank2(x: tf.Tensor):
    with tf.control_dependencies([tf.assert_rank_in(x, (1, 2))]):
        result = tf.reshape(x, [tf.shape(x)[0], -1])
    return result
Пример #10
0
def denseCaps(inputs, caps, dims, iterations=2, name=None):
    with tf.variable_scope(name, default_name='denseCaps'):

        # There are two possible inputs to a denseCaps layer:
        #
        # 1. a flat (batch_size, caps_in, dims_in) tensor of capsules; this 
        #   happens when denseCaps layer are stacked on another
        #
        # 2. a (batch_size, height, width, filters, dims_in) tensor of spatial
        #   capsule filters; this happens when a denseCaps layer is stacked on 
        #   a convCaps/primaryCaps layer

        # Assert that the input belongs to either of the two cases
        tf.assert_rank_in(
            inputs,
            ranks=[3, 5],
            message='''`inputs` must either be a flat tensor of capsules (i.e.
                    of shape (batch_size, caps_in, dims_in)) or a tensor of
                    capsule filters (i.e. of shape 
                    (batch_size, height, width, filters, dims))'''
        )

        # Both cases can be dealt with by reshaping the input tensor to a 
        # tensor of 1x1 filters, containing one input capsule each, and
        # performing a 1x1 capsule convolution on them with the number of output 
        # filters being the number of desired output capsules
        
        # Compute the number of input 1x1-filters
        inputs_rank = len(inputs.shape)

        if inputs_rank == 3:
            # Case 1 (flat tensor of capsules):
            _, filters_in, dims_in = inputs.shape.as_list()
            
        elif inputs_rank == 5:
            # Case 2 (tensor of spatial capsule filters) 
            _, height, width, filters, dims_in = inputs.shape.as_list()
            filters_in = height * width * filters

        # Reshape input tensor to tensor of 1x1-filters
        inputs_filters = tf.reshape(
            inputs,
            [-1, 1, 1, filters_in, dims_in],
            name='inputs_filters'
        )

        # Perform the 1x1 capsule convolution on the reshaped input tensor
        # This yields a (batch_size, 1, 1, caps, dims) shaped output tensor
        # of 1x1 filters containing one output capsule each
        outputs = _caps_conv2d(
            inputs_filters, 
            filters_out=caps, 
            dims_out=dims, 
            kernel_size=(1, 1), 
            strides=(1, 1), 
            iterations=iterations
        )

        # Reshape to a flat tensor of output capsules
        outputs = tf.reshape(
            outputs,
            [-1, caps, dims],
            name='outputs'
        )

    return outputs