def _build_test_model(self,
                          num_timesteps=5,
                          num_features=2,
                          batch_shape=(),
                          missing_prob=0,
                          true_noise_scale=0.1,
                          true_level_scale=0.04,
                          true_slope_scale=0.02,
                          prior_class=tfd.InverseGamma,
                          dtype=tf.float32):
        seed = test_util.test_seed(sampler_type='stateless')
        (design_seed, weights_seed, noise_seed, level_seed, slope_seed,
         is_missing_seed) = samplers.split_seed(seed,
                                                6,
                                                salt='_build_test_model')

        design_matrix = samplers.normal([num_timesteps, num_features],
                                        dtype=dtype,
                                        seed=design_seed)
        weights = samplers.normal(list(batch_shape) + [num_features],
                                  dtype=dtype,
                                  seed=weights_seed)
        regression = tf.linalg.matvec(design_matrix, weights)
        noise = samplers.normal(list(batch_shape) + [num_timesteps],
                                dtype=dtype,
                                seed=noise_seed) * true_noise_scale

        level_residuals = samplers.normal(list(batch_shape) + [num_timesteps],
                                          dtype=dtype,
                                          seed=level_seed) * true_level_scale
        if true_slope_scale is not None:
            slope = tf.cumsum(
                samplers.normal(list(batch_shape) + [num_timesteps],
                                dtype=dtype,
                                seed=slope_seed) * true_slope_scale,
                axis=-1)
            level_residuals += slope
        level = tf.cumsum(level_residuals, axis=-1)
        time_series = (regression + noise + level)
        is_missing = samplers.uniform(list(batch_shape) + [num_timesteps],
                                      dtype=dtype,
                                      seed=is_missing_seed) < missing_prob

        model = gibbs_sampler.build_model_for_gibbs_fitting(
            observed_time_series=tfp.sts.MaskedTimeSeries(
                time_series[..., tf.newaxis], is_missing),
            design_matrix=design_matrix,
            weights_prior=tfd.Normal(loc=tf.cast(0., dtype),
                                     scale=tf.cast(10.0, dtype)),
            level_variance_prior=prior_class(concentration=tf.cast(
                0.01, dtype),
                                             scale=tf.cast(0.01 * 0.01,
                                                           dtype)),
            slope_variance_prior=None if true_slope_scale is None else
            prior_class(concentration=tf.cast(0.01, dtype),
                        scale=tf.cast(0.01 * 0.01, dtype)),
            observation_noise_variance_prior=prior_class(
                concentration=tf.cast(0.01, dtype),
                scale=tf.cast(0.01 * 0.01, dtype)))
        return model, time_series, is_missing
Beispiel #2
0
def _lower_triangular_mask(shape):
  """Creates a lower-triangular boolean mask over the last 2 dimensions."""
  row_index = tf.cumsum(
      tf.ones(shape=shape, dtype=tf.int32), axis=-2)
  col_index = tf.cumsum(
      tf.ones(shape=shape, dtype=tf.int32), axis=-1)
  return tf.greater_equal(row_index, col_index)
Beispiel #3
0
def wasserstein_distance(u_values, v_values, u_weights, v_weights, p=1.0):
    """Differentiable 1-D Wasserstein distance.

  Adapted from the scipy.stats implementation.
  Args:
    u_values: Samples from distribution `u`. Shape [batch_shape, n_samples].
    v_values: Samples from distribution `v`. Shape [batch_shape, n_samples].
    u_weights: Sample weights. Shape [batch_shape, n_samples].
    v_weights: Sample weights. Shape [batch_shape, n_samples].
    p: Degree of the distance norm. Wasserstein=1, Energy=2.

  Returns:
    The Wasserstein distance between samples. Shape [batch_shape].
  """
    u_sorter = tf.argsort(u_values, axis=-1)
    v_sorter = tf.argsort(v_values, axis=-1)

    all_values = tf.concat([u_values, v_values], axis=-1)
    all_values = tf.sort(all_values, axis=-1)

    # Compute the differences between pairs of successive values of u and v.
    deltas = spectral_ops.diff(all_values, axis=-1)

    # Get the respective positions of the values of u and v among the values of
    # both distributions.
    batch_dims = len(u_values.shape) - 1
    gather = lambda x, i: tf.gather(x, i, axis=-1, batch_dims=batch_dims)
    u_cdf_indices = tf.searchsorted(gather(u_values, u_sorter),
                                    all_values[..., :-1],
                                    side='right')
    v_cdf_indices = tf.searchsorted(gather(v_values, v_sorter),
                                    all_values[..., :-1],
                                    side='right')

    # Calculate the CDFs of u and v using their weights, if specified.
    if u_weights is None:
        u_cdf = u_cdf_indices / float(u_values.shape[-1])
    else:
        u_sorted_cumweights = tf.concat([
            tf.zeros_like(u_weights)[..., 0:1],
            tf.cumsum(gather(u_weights, u_sorter), axis=-1)
        ],
                                        axis=-1)
        u_cdf = gather(u_sorted_cumweights, u_cdf_indices)
        safe_divide(u_cdf, u_sorted_cumweights[..., -1:])

    if v_weights is None:
        v_cdf = v_cdf_indices / float(v_values.shape[-1])
    else:
        v_sorted_cumweights = tf.concat([
            tf.zeros_like(v_weights)[..., 0:1],
            tf.cumsum(gather(v_weights, v_sorter), axis=-1)
        ],
                                        axis=-1)
        v_cdf = gather(v_sorted_cumweights, v_cdf_indices)
        safe_divide(v_cdf, v_sorted_cumweights[..., -1:])

    # Compute the value of the integral based on the CDFs.
    return tf.reduce_sum(deltas * tf.abs(u_cdf - v_cdf)**p, axis=-1)**(1.0 / p)
def get_dense_is_inside_for_dense_spans(
        dense_start_positions: tf.Tensor,
        dense_end_positions: tf.Tensor) -> tf.Tensor:
    """Dense mask whether position is inside span given dense starts / ends."""
    # `tf.cumsum(dense_start_positions)[i]` computes how many spans start before
    # or on the i-th position.
    # `tf.cumsum(dense_end_positions, exclusive=True`) computes how many spans
    #  ends strictly before i-th positions.
    # Their difference is equal to how many spans start before i-th position and
    # end after or on i-th position. This is precisely how many spans contain
    # i-th position.
    is_inside_span = (tf.cumsum(dense_start_positions) -
                      tf.cumsum(dense_end_positions, exclusive=True))
    # Adjust for the case of overlapping spans
    is_inside_span = tf.minimum(is_inside_span, 1)
    return is_inside_span
Beispiel #5
0
    def _cdf(self, k):
        # TODO(b/135263541): Improve numerical precision of categorical.cdf.
        probs = self.probs_parameter()
        num_categories = self._num_categories(probs)

        k, probs = _broadcast_cat_event_and_params(
            k, probs, base_dtype=dtype_util.base_dtype(self.dtype))

        # Since the lowest number in the support is 0, any k < 0 should be zero in
        # the output.
        should_be_zero = k < 0

        # Will use k as an index in the gather below, so clip it to {0,...,K-1}.
        k = tf.clip_by_value(tf.cast(k, tf.int32), 0, num_categories - 1)

        batch_shape = tf.shape(k)

        # tf.gather(..., batch_dims=batch_dims) requires static batch_dims kwarg, so
        # to handle the case where the batch shape is dynamic, flatten the batch
        # dims (so we know batch_dims=1).
        k_flat_batch = tf.reshape(k, [-1])
        probs_flat_batch = tf.reshape(
            probs, tf.concat(([-1], [num_categories]), axis=0))

        cdf_flat = tf.gather(tf.cumsum(probs_flat_batch, axis=-1),
                             k_flat_batch[..., tf.newaxis],
                             batch_dims=1)

        cdf = tf.reshape(cdf_flat, shape=batch_shape)

        zero = np.array(0, dtype=dtype_util.as_numpy_dtype(cdf.dtype))
        return tf.where(should_be_zero, zero, cdf)
def boolean_mask(boxlist, indicator, fields=None, scope=None,
                 use_static_shapes=False, indicator_sum=None):
  """Select boxes from BoxList according to indicator and return new BoxList.

  `boolean_mask` returns the subset of boxes that are marked as "True" by the
  indicator tensor. By default, `boolean_mask` returns boxes corresponding to
  the input index list, as well as all additional fields stored in the boxlist
  (indexing into the first dimension).  However one can optionally only draw
  from a subset of fields.

  Args:
    boxlist: BoxList holding N boxes
    indicator: a rank-1 boolean tensor
    fields: (optional) list of fields to also gather from.  If None (default),
      all fields are gathered from.  Pass an empty fields list to only gather
      the box coordinates.
    scope: name scope.
    use_static_shapes: Whether to use an implementation with static shape
      gurantees.
    indicator_sum: An integer containing the sum of `indicator` vector. Only
      required if `use_static_shape` is True.

  Returns:
    subboxlist: a BoxList corresponding to the subset of the input BoxList
      specified by indicator
  Raises:
    ValueError: if `indicator` is not a rank-1 boolean tensor.
  """
  with tf.name_scope(scope, 'BooleanMask'):
    if indicator.shape.ndims != 1:
      raise ValueError('indicator should have rank 1')
    if indicator.dtype != tf.bool:
      raise ValueError('indicator should be a boolean tensor')
    if use_static_shapes:
      if not (indicator_sum and isinstance(indicator_sum, int)):
        raise ValueError('`indicator_sum` must be a of type int')
      selected_positions = tf.cast(indicator, dtype=tf.float32)
      indexed_positions = tf.cast(
          tf.multiply(
              tf.cumsum(selected_positions), selected_positions),
          dtype=tf.int32)
      one_hot_selector = tf.one_hot(
          indexed_positions - 1, indicator_sum, dtype=tf.float32)
      sampled_indices = tf.cast(
          tf.tensordot(
              tf.cast(tf.range(tf.shape(indicator)[0]), dtype=tf.float32),
              one_hot_selector,
              axes=[0, 0]),
          dtype=tf.int32)
      return gather(boxlist, sampled_indices, use_static_shapes=True)
    else:
      subboxlist = box_list.BoxList(tf.boolean_mask(boxlist.get(), indicator))
      if fields is None:
        fields = boxlist.get_extra_fields()
      for field in fields:
        if not boxlist.has_field(field):
          raise ValueError('boxlist must contain all specified fields')
        subfieldlist = tf.boolean_mask(boxlist.get_field(field), indicator)
        subboxlist.add_field(field, subfieldlist)
      return subboxlist
Beispiel #7
0
def oscillator_bank(frequency_envelopes: tf.Tensor,
                    amplitude_envelopes: tf.Tensor,
                    sample_rate: int = 16000) -> tf.Tensor:
    """Generates audio from sample-wise frequencies for a bank of oscillators.

    Args:
      frequency_envelopes: Sample-wise oscillator frequencies (Hz). Shape
        [batch_size, n_samples, n_sinusoids].
      amplitude_envelopes: Sample-wise oscillator amplitude. Shape [batch_size,
        n_samples, n_sinusoids].
      sample_rate: Sample rate in samples per a second.

    Returns:
      wav: Sample-wise audio. Shape [batch_size, n_samples, n_sinusoids].
    """
    frequency_envelopes = tf_float32(frequency_envelopes)
    amplitude_envelopes = tf_float32(amplitude_envelopes)

    # Don't exceed Nyquist.
    amplitude_envelopes = remove_above_nyquist(frequency_envelopes,
                                               amplitude_envelopes,
                                               sample_rate)

    # Change Hz to radians per sample.
    omegas = frequency_envelopes * (2.0 * np.pi)  # rad / sec
    omegas = omegas / float(sample_rate)  # rad / sample

    # Accumulate phase and synthesize.
    phases = tf.cumsum(omegas, axis=1)
    wavs = tf.sin(phases)
    harmonic_audio = amplitude_envelopes * wavs  # [mb, n_samples, n_sinusoids]
    audio = tf.reduce_sum(harmonic_audio, axis=-1)  # [mb, n_samples]
    return audio
Beispiel #8
0
 def testGradient(self):
     x = tf.convert_to_tensor(np.arange(10)[np.newaxis, ...] / 10.0 - 0.5,
                              dtype=tf.float64)
     jac_naive = batch_jacobian(lambda t: tf.cumsum(tf.exp(t), axis=-1), x)
     jac_fused = batch_jacobian(
         lambda t: tf.exp(tfp.math.log_cumsum_exp(t, axis=-1)), x)
     self.assertAllClose(jac_naive, jac_fused)
Beispiel #9
0
    def _log_prob(self, x):
        scores = tf.convert_to_tensor(self.scores)
        event_size = self._event_size(scores)

        x = tf.cast(x, self.dtype)
        # Broadcast scores or x if need be.
        if (not tensorshape_util.is_fully_defined(x.shape)
                or not tensorshape_util.is_fully_defined(scores.shape)
                or x.shape != scores.shape):
            broadcast_shape = tf.broadcast_dynamic_shape(
                tf.shape(scores), tf.shape(x))
            scores = tf.broadcast_to(scores, broadcast_shape)
            x = tf.broadcast_to(x, broadcast_shape)
        scores_shape = tf.shape(scores)[:-1]
        scores_2d = tf.reshape(scores, [-1, event_size])
        x_2d = tf.reshape(x, [-1, event_size])

        rearranged_scores = tf.gather(scores_2d, x_2d, batch_dims=1)
        normalization_terms = tf.cumsum(rearranged_scores,
                                        axis=-1,
                                        reverse=True)
        ret = tf.math.reduce_sum(tf.math.log(rearranged_scores /
                                             normalization_terms),
                                 axis=-1)
        # Reshape back to user-supplied batch and sample dims prior to 2D reshape.
        ret = tf.reshape(ret, scores_shape)
        return ret
Beispiel #10
0
def cumsum(a, axis=None, dtype=None):
  """Returns cumulative sum of `a` along an axis or the flattened array.

  Uses `tf.cumsum`.

  Args:
    a: array_like. Could be an ndarray, a Tensor or any object that can
      be converted to a Tensor using `tf.convert_to_tensor`.
    axis: Optional. Axis along which to compute sums. If None, operation is
      performed on the flattened array.
    dtype: Optional. The type of the output array. If None, defaults to the
      dtype of `a` unless `a` is an integer type with precision less than `int`
      in which case the output type is `int.`

  Returns:
    An ndarray with the same number of elements as `a`. If `axis` is None, the
    output is a 1-d array, else it has the same shape as `a`.
  """
  a = array_creation.asarray(a, dtype=dtype)

  if dtype is None and tf.as_dtype(a.dtype).is_integer:
    # If a is an integer type and its precision is less than that of `int`,
    # the output type will be `int`.
    output_type = np.promote_types(a.dtype, int)
    if output_type != a.dtype:
      a = array_creation.asarray(a, dtype=output_type)

  # If axis is None, the input is flattened.
  if axis is None:
    a = ravel(a)
    axis = 0
  if axis < 0:
    axis += a.ndim
  assert axis >= 0 and axis < a.ndim
  return utils.tensor_to_ndarray(tf.cumsum(a.data, axis))
    def _get_values_from_start_and_end(self, input_tensor, num_start_samples,
                                       num_end_samples, total_num_samples):
        """slices num_start_samples and last num_end_samples from input_tensor.

    Args:
      input_tensor: An int32 tensor of shape [N] to be sliced.
      num_start_samples: Number of examples to be sliced from the beginning
        of the input tensor.
      num_end_samples: Number of examples to be sliced from the end of the
        input tensor.
      total_num_samples: Sum of is num_start_samples and num_end_samples. This
        should be a scalar.

    Returns:
      A tensor containing the first num_start_samples and last num_end_samples
      from input_tensor.

    """
        input_length = tf.shape(input=input_tensor)[0]
        start_positions = tf.less(tf.range(input_length), num_start_samples)
        end_positions = tf.greater_equal(tf.range(input_length),
                                         input_length - num_end_samples)
        selected_positions = tf.logical_or(start_positions, end_positions)
        selected_positions = tf.cast(selected_positions, tf.float32)
        indexed_positions = tf.multiply(tf.cumsum(selected_positions),
                                        selected_positions)
        one_hot_selector = tf.one_hot(tf.cast(indexed_positions, tf.int32) - 1,
                                      total_num_samples,
                                      dtype=tf.float32)
        return tf.cast(
            tf.tensordot(tf.cast(input_tensor, tf.float32),
                         one_hot_selector,
                         axes=[0, 0]), tf.int32)
Beispiel #12
0
 def quantiles(self):
     maxs = self._maxima[:, tf.newaxis]
     mins = self._minima[:, tf.newaxis]
     weights = tf.cumsum(tf.nn.softmax(self._q, axis=-1), axis=-1)
     weights = tf.concat([tf.zeros((self._num_features, 1)), weights],
                         axis=1)
     return weights * (maxs - mins) + mins
Beispiel #13
0
    def _cdf(self, k):
        k = tf.convert_to_tensor(value=k, name="k")

        k, probs = _broadcast_cat_event_and_params(
            k, self.probs, base_dtype=dtype_util.base_dtype(self.dtype))

        # Since the lowest number in the support is 0, any k < 0 should be zero in
        # the output.
        should_be_zero = k < 0

        # Will use k as an index in the gather below, so clip it to {0,...,K-1}.
        k = tf.clip_by_value(tf.cast(k, tf.int32), 0, self.num_categories - 1)

        batch_shape = tf.shape(input=k)

        # tf.gather(..., batch_dims=batch_dims) requires static batch_dims kwarg, so
        # to handle the case where the batch shape is dynamic, flatten the batch
        # dims (so we know batch_dims=1).
        k_flat_batch = tf.reshape(k, [-1])
        probs_flat_batch = tf.reshape(
            probs, tf.concat(([-1], [self.num_categories]), axis=0))

        cdf_flat = tf.gather(tf.cumsum(probs_flat_batch, axis=-1),
                             k_flat_batch[..., tf.newaxis],
                             batch_dims=1)

        cdf = tf.reshape(cdf_flat, shape=batch_shape)

        return tf.where(should_be_zero, tf.zeros_like(cdf), cdf)
Beispiel #14
0
def _piecewise_constant_integrate(x1, x2, jump_locations, values, batch_rank):
    """Integrates piecewise constant function between `x1` and `x2`."""
    # Initializer already verified that `jump_locations` and `values` have the
    # same shape.
    # Expand batch size to one if there is no batch shape.
    if x1.shape.as_list()[:batch_rank]:
        no_batch_shape = False
    else:
        no_batch_shape = True
        x1 = tf.expand_dims(x1, 0)
        x2 = tf.expand_dims(x2, 0)
    if not jump_locations.shape.as_list()[:-1]:
        jump_locations = tf.expand_dims(jump_locations, 0)
        values = tf.expand_dims(values, 0)
        batch_rank += 1
    # Compute integral values between the jump locations
    event_shape = tf.shape(values)[(batch_rank + 1):]
    event_rank = values.shape.rank - batch_rank - 1
    num_data_points = tf.shape(values)[batch_rank]
    diff = jump_locations[..., 1:] - jump_locations[..., :-1]
    # Broadcast `diff` to the shape of
    # `batch_shape + [num_data_points - 2] + [1] * sample_rank`.
    for _ in range(event_rank):
        diff = tf.expand_dims(diff, -1)
    slice_indices = batch_rank * [slice(None)]
    slice_indices += [slice(1, num_data_points - 1)]
    integrals = tf.cumsum(values[slice_indices] * diff, batch_rank)
    # Pad integrals with zero values on left and right.
    batch_shape = tf.shape(integrals)[:batch_rank]
    pad_shape = tf.concat([batch_shape, [1], event_shape], axis=0)
    zeros = tf.zeros(pad_shape, dtype=integrals.dtype)
    integrals = tf.concat([zeros, integrals, zeros], axis=batch_rank)
    # Get jump locations and values and the integration end points
    value1, jump_location1, indices_1 = _get_indices_and_values(
        x1, jump_locations, values, 'left', batch_rank)
    value2, jump_location2, indices_2 = _get_indices_and_values(
        x2, jump_locations, values, 'right', batch_rank)
    integrals1 = tf.gather(integrals,
                           indices_1,
                           axis=batch_rank,
                           batch_dims=batch_rank)
    integrals2 = tf.gather(integrals,
                           indices_2,
                           axis=batch_rank,
                           batch_dims=batch_rank)
    # Broadcast `x1`, `x2`, `jump_location1`, `jump_location2` to the shape
    # `batch_shape + [num_points] + [1] * sample_rank`.
    for _ in range(event_rank):
        x1 = tf.expand_dims(x1, -1)
        x2 = tf.expand_dims(x2, -1)
        jump_location1 = tf.expand_dims(jump_location1, -1)
        jump_location2 = tf.expand_dims(jump_location2, -1)
    # Compute the value of the integrals.
    res = ((jump_location1 - x1) * value1 + (x2 - jump_location2) * value2 +
           integrals2 - integrals1)
    if no_batch_shape:
        return tf.squeeze(res, 0)
    else:
        return res
Beispiel #15
0
    def _joint_sample_n(self, n, seed=None):
        """Draw a joint sample from the prior over latents and observations.

    This sampler is specific to LocalLevel models and is faster than the
    generic LinearGaussianStateSpaceModel implementation.

    Args:
      n: `int` `Tensor` number of samples to draw.
      seed: Optional `int` `Tensor` seed for the random number generator.
    Returns:
      latents: `float` `Tensor` of shape `concat([[n], self.batch_shape,
        [self.num_timesteps, self.latent_size]], axis=0)` representing samples
        of latent trajectories.
      observations: `float` `Tensor` of shape `concat([[n], self.batch_shape,
        [self.num_timesteps, self.observation_size]], axis=0)` representing
        samples of observed series generated from the sampled `latents`.
    """
        with tf.name_scope('joint_sample_n'):
            (initial_level_seed, level_jumps_seed,
             prior_observation_seed) = samplers.split_seed(
                 seed, n=3, salt='LocalLevelStateSpaceModel_joint_sample_n')

            if self.batch_shape.is_fully_defined():
                batch_shape = self.batch_shape.as_list()
            else:
                batch_shape = self.batch_shape_tensor()
            sample_and_batch_shape = tf.cast(
                prefer_static.concat([[n], batch_shape], axis=0), tf.int32)

            # Sample the initial timestep from the prior.  Since we want
            # this sample to have full batch shape (not just the batch shape
            # of the self.initial_state_prior object which might in general be
            # smaller), we augment the sample shape to include whatever
            # extra batch dimensions are required.
            initial_level = self.initial_state_prior.sample(
                linear_gaussian_ssm._augment_sample_shape(  # pylint: disable=protected-access
                    self.initial_state_prior, sample_and_batch_shape,
                    self.validate_args),
                seed=initial_level_seed)

            # Sample the latent random walk and observed noise, more efficiently than
            # the generic loop in `LinearGaussianStateSpaceModel`.
            level_jumps = self.level_scale[..., tf.newaxis] * samplers.normal(
                prefer_static.concat(
                    [sample_and_batch_shape, [self.num_timesteps - 1]],
                    axis=0),
                dtype=self.dtype,
                seed=level_jumps_seed)
            prior_level_sample = tf.cumsum(tf.concat(
                [initial_level, level_jumps], axis=-1),
                                           axis=-1)
            prior_observation_sample = prior_level_sample + (  # Sample noise.
                self.observation_noise_scale[..., tf.newaxis] *
                samplers.normal(prefer_static.shape(prior_level_sample),
                                dtype=self.dtype,
                                seed=prior_observation_seed))

            return (prior_level_sample[..., tf.newaxis],
                    prior_observation_sample[..., tf.newaxis])
 def test_cumulative_sum_power_of_two(self):
     elems = self._maybe_static(tf.range(0, 2**4, dtype=tf.int64))
     self.assertAllEqual(
         self.evaluate(
             tfp.math.scan_associative(operator.add,
                                       elems,
                                       max_num_levels=8)),
         self.evaluate(tf.cumsum(elems)))
 def test_cumulative_sum_size_zero(self):
     elems = tf.range(0, dtype=tf.int64)
     self.assertAllEqual(
         self.evaluate(
             tfp.math.scan_associative(operator.add,
                                       elems,
                                       max_num_levels=8)),
         self.evaluate(tf.cumsum(elems)))
Beispiel #18
0
 def testGradient(self):
     x = np.arange(10) / 10.0 - 0.5
     x = tf.convert_to_tensor(x, dtype=tf.float64)
     grad_naive_theoretical, _ = gradient_checker_v2.compute_gradient(
         lambda y: tf.cumsum(tf.exp(y)), [x])
     grad_fused_theoretical, _ = gradient_checker_v2.compute_gradient(
         lambda y: tf.exp(tfp.math.log_cumsum_exp(y)), [x])
     self.assertAllClose(grad_fused_theoretical, grad_naive_theoretical)
Beispiel #19
0
 def test_soft_quantile_normalization(self):
     x = tf.constant([1.2, 1.3, 1.5, -4.0, 1.8, 2.4, -1.0])
     target = tf.cumsum(tf.ones(x.shape[0]))
     xn = ops.soft_quantile_normalization(x, target)
     # Make sure that the order of x and xn are identical
     self.assertAllEqual(tf.argsort(x), tf.argsort(xn))
     # Make sure that the values of xn and target are close.
     self.assertAllClose(tf.sort(target), tf.sort(xn), atol=1e-1)
Beispiel #20
0
 def build_lut(histo, step):
     # Compute the cumulative sum, shifting by step // 2
     # and then normalization by step.
     lut = (tf.cumsum(histo) + (step // 2)) // step
     # Shift lut, prepending with 0.
     lut = tf.concat([[0], lut[:-1]], 0)
     # Clip the counts to be in range.  This is done
     # in the C code for image.point.
     return tf.clip_by_value(lut, 0, 255)
Beispiel #21
0
    def test_can_scan_tensors_of_different_rank(self):
        num_elems = 2**4
        elems0 = self.evaluate(
            tfd.Uniform(-1., 1.).sample(sample_shape=[num_elems]))
        elems1 = self.evaluate(
            tfd.Uniform(-1., 1.).sample(sample_shape=[num_elems, 1]))

        def extended_add(a, b):
            return (a[0] + b[0], a[1] + b[1])

        result = self.evaluate(
            tfp.math.scan_associative(
                extended_add,
                (self._maybe_static(elems0), self._maybe_static(elems1))))

        self.assertAllClose(result[0], self.evaluate(tf.cumsum(elems0)))
        self.assertAllClose(result[1], self.evaluate(tf.cumsum(elems1,
                                                               axis=0)))
Beispiel #22
0
def _piecewise_constant_integrate(x1, x2, jump_locations, values, batch_rank):
  """Integrates piecewise constant function between `x1` and `x2`."""
  # Initializer already verified that `jump_locations` and `values` have the
  # same shape.
  # Expand batch size to one if there is no batch shape.
  if x1.shape.as_list()[:batch_rank]:
    no_batch_shape = False
  else:
    no_batch_shape = True
    x1 = tf.expand_dims(x1, 0)
    x2 = tf.expand_dims(x2, 0)
  if not jump_locations.shape.as_list()[:-1]:
    jump_locations = tf.expand_dims(jump_locations, 0)
    values = tf.expand_dims(values, 0)
    batch_rank += 1

  # Compute the index matrix that is later used for `tf.gather_nd`.
  index_matrix = _prepare_index_matrix(
      x1.shape.as_list()[:-1], x1.shape.as_list()[-1], tf.int32)
  # Compute integral values at the jump locations starting from the first jump
  # location.
  event_shape = values.shape[(batch_rank+1):]
  num_data_points = values.shape.as_list()[batch_rank]
  diff = jump_locations[..., 1:] - jump_locations[..., :-1]
  # Broadcast `diff` to the shape of
  # `batch_shape + [num_data_points - 2] + [1] * sample_rank`.
  for _ in event_shape:
    diff = tf.expand_dims(diff, -1)
  slice_indices = batch_rank * [slice(None)]
  slice_indices += [slice(1, num_data_points - 1)]
  integrals = tf.cumsum(values[slice_indices] * diff, batch_rank)
  # Pad integrals with zero values on left and right.
  batch_shape = integrals.shape.as_list()[:batch_rank]
  zeros = tf.zeros(batch_shape + [1] + event_shape, dtype=integrals.dtype)
  integrals = tf.concat([zeros, integrals, zeros], axis=batch_rank)
  # Get jump locations and values and the integration end points
  value1, jump_location1, indices_nd1 = _get_indices_and_values(
      x1, index_matrix, jump_locations, values, 'left', batch_rank)
  value2, jump_location2, indices_nd2 = _get_indices_and_values(
      x2, index_matrix, jump_locations, values, 'right', batch_rank)
  integrals1 = tf.gather_nd(integrals, indices_nd1)
  integrals2 = tf.gather_nd(integrals, indices_nd2)
  # Broadcast `x1`, `x2`, `jump_location1`, `jump_location2` to the shape
  # `batch_shape + [num_points] + [1] * sample_rank`.
  for _ in event_shape:
    x1 = tf.expand_dims(x1, -1)
    x2 = tf.expand_dims(x2, -1)
    jump_location1 = tf.expand_dims(jump_location1, -1)
    jump_location2 = tf.expand_dims(jump_location2, -1)
  # Compute the value of the integral.
  res = ((jump_location1 - x1) * value1
         + (x2 - jump_location2) * value2
         + integrals2 - integrals1)
  if no_batch_shape:
    return tf.squeeze(res, 0)
  else:
    return res
Beispiel #23
0
    def test_cumulative_sum_with_xla(self):
        elems = self._maybe_static(tf.range(0, 2**4 - 1, dtype=np.int64))

        xla_scan = tf.function(experimental_compile=True)(
            tfp.math.scan_associative)
        result = xla_scan(operator.add, elems)

        self.assertAllEqual(self.evaluate(result),
                            self.evaluate(tf.cumsum(elems)))
Beispiel #24
0
    def test_max_allowed_size(self):
        elems = self.evaluate(tfd.Uniform(-1., 1.).sample([511]))

        result = self.evaluate(
            tfp.math.scan_associative(operator.add,
                                      self._maybe_static(elems),
                                      max_num_levels=8,
                                      validate_args=True))
        self.assertAllClose(result, self.evaluate(tf.cumsum(elems)), atol=1e-4)
Beispiel #25
0
 def test_cumulative_sum_maximally_odd(self):
     # A size that is one less than a power of two ensures that
     # every reduction results in an odd size tensor.
     # This makes a good test for the logic to handle
     # odd sizes
     elems = self._maybe_static(tf.range(0, 2**4 - 1, dtype=np.int64))
     self.assertAllEqual(
         self.evaluate(tfp.math.scan_associative(operator.add, elems)),
         self.evaluate(tf.cumsum(elems)))
Beispiel #26
0
    def _checkBijectorInAllDims(self, x):
        """Helper for `testBijector`."""
        x = self._build_tensor(x)
        for axis in range(-self.evaluate(tf.rank(x)), 0):
            bijector = tfb.Cumsum(axis=axis, validate_args=True)
            self.assertStartsWith(bijector.name, 'cumsum')

            y = tf.cumsum(x, axis=axis)
            self.assertAllClose(y, self.evaluate(bijector.forward(x)))
            self.assertAllClose(x, self.evaluate(bijector.inverse(y)))
Beispiel #27
0
 def _get_new_item_indices(self, age, updates, mask=None):
     any_update = list(updates.values())[0]
     num_updates = tf.shape(any_update)[0]
     _, new_item_indices = tf.math.top_k(age, num_updates)
     if mask is not None:
         mask = tf.cast(mask, dtype=tf.int32)
         unmasked_indices = (tf.cumsum(mask) - 1) * mask
         unmasked_indices = tf.expand_dims(unmasked_indices, axis=1)
         new_item_indices = tf.gather_nd(new_item_indices, unmasked_indices)
     return new_item_indices
Beispiel #28
0
def get_note_mask(q_pitch, max_regions=100, note_on_only=True):
    """Get a binary mask for each note from a monophonic instrument.

  Each transition of the value creates a new region. Returns the mask of each
  region.
  Args:
    q_pitch: A quantized value, such as pitch or velocity. Shape
      [batch, n_timesteps] or [batch, n_timesteps, 1].
    max_regions: Maximum number of note regions to consider in the sequence.
      Also, the channel dimension of the output mask. Each value transition
      defines a new region, e.g. each note-on and note-off count as a separate
      region.
    note_on_only: Return a mask that is true only for regions where the pitch
      is greater than 0.

  Returns:
    A binary mask of each region [batch, n_timesteps, max_regions].
  """
    # Only batch and time dimensions.
    if len(q_pitch.shape) == 3:
        q_pitch = q_pitch[:, :, 0]

    # Get onset and offset points.
    edges = tf.abs(spectral_ops.diff(q_pitch, axis=1)) > 0

    # Count endpoints as starts/ends of regions.
    edges = edges[:, :-1, ...]
    edges = tf.pad(edges, [[0, 0], [1, 0]],
                   mode='constant',
                   constant_values=True)
    edges = tf.pad(edges, [[0, 0], [0, 1]],
                   mode='constant',
                   constant_values=False)
    edges = tf.cast(edges, tf.int32)

    # Count up onset and offsets for each timestep.
    # Assumes each onset has a corresponding offset.
    # The -1 ensures that the 0th index is the first note.
    edge_idx = tf.cumsum(edges, axis=1) - 1

    # Create masks of shape [batch, n_timesteps, max_regions].
    note_mask = edge_idx[..., None] == tf.range(max_regions)[None, None, :]
    note_mask = tf.cast(note_mask, tf.float32)

    if note_on_only:
        # [batch, notes]
        note_pitches = get_note_moments(q_pitch, note_mask, return_std=False)
        # [batch, time, notes]
        note_on = tf.cast(note_pitches > 0.0, tf.float32)[:, None, :]
        # [batch, time, notes]
        note_mask *= note_on

    return note_mask
Beispiel #29
0
def cumsum(a, axis=None, dtype=None):  # pylint: disable=missing-docstring
  a = asarray(a, dtype=dtype)

  if dtype is None:
    a = _maybe_promote_to_int(a)

  # If axis is None, the input is flattened.
  if axis is None:
    a = ravel(a)
    axis = 0
  elif axis < 0:
    axis += tf.rank(a.data)
  return utils.tensor_to_ndarray(tf.cumsum(a.data, axis))
    def test_cumulative_sum_custom_axis(self, axis):
        elems = self._maybe_static(
            tf.random.stateless_normal(
                [4, 32, 31, 1],
                seed=test_util.test_seed(sampler_type='stateless')))

        axis = self._maybe_static(axis)
        expected_result = self.evaluate(tf.cumsum(elems, axis=axis))
        result = tfp.math.scan_associative(operator.add,
                                           elems,
                                           axis=axis,
                                           max_num_levels=8)
        self.assertAllClose(self.evaluate(result), expected_result, rtol=1e-5)