Ejemplo n.º 1
0
 def _make_runtime_assertions(self, distribution, reinterpreted_batch_ndims,
                              validate_args):
     assertions = []
     static_reinterpreted_batch_ndims = tf.contrib.util.constant_value(
         reinterpreted_batch_ndims)
     batch_ndims = distribution.batch_shape.ndims
     if batch_ndims is not None and static_reinterpreted_batch_ndims is not None:
         if static_reinterpreted_batch_ndims > batch_ndims:
             raise ValueError("reinterpreted_batch_ndims({}) cannot exceed "
                              "distribution.batch_ndims({})".format(
                                  static_reinterpreted_batch_ndims,
                                  batch_ndims))
     elif validate_args:
         batch_shape = distribution.batch_shape_tensor()
         batch_ndims = (
             tf.dimension_value(batch_shape.shape[0])  # pylint: disable=g-long-ternary
             if (tf.dimension_value(
                 batch_shape.shape.with_rank_at_least(1)[0]) is not None)
             else tf.shape(batch_shape)[0])
         assertions.append(
             tf.assert_less_equal(
                 reinterpreted_batch_ndims,
                 batch_ndims,
                 message=("reinterpreted_batch_ndims cannot exceed "
                          "distribution.batch_ndims")))
     return assertions
Ejemplo n.º 2
0
def maybe_check_quadrature_param(param, name, validate_args):
    """Helper which checks validity of `loc` and `scale` init args."""
    with tf.name_scope(name="check_" + name, values=[param]):
        assertions = []
        if param.shape.ndims is not None:
            if param.shape.ndims == 0:
                raise ValueError("Mixing params must be a (batch of) vector; "
                                 "{}.rank={} is not at least one.".format(
                                     name, param.shape.ndims))
        elif validate_args:
            assertions.append(
                tf.assert_rank_at_least(
                    param,
                    1,
                    message=("Mixing params must be a (batch of) vector; "
                             "{}.rank is not at least one.".format(name))))

        # TODO(jvdillon): Remove once we support k-mixtures.
        if param.shape.with_rank_at_least(1)[-1] is not None:
            if tf.dimension_value(param.shape[-1]) != 1:
                raise NotImplementedError(
                    "Currently only bimixtures are supported; "
                    "{}.shape[-1]={} is not 1.".format(
                        name, tf.dimension_value(param.shape[-1])))
        elif validate_args:
            assertions.append(
                tf.assert_equal(
                    tf.shape(param)[-1],
                    1,
                    message=("Currently only bimixtures are supported; "
                             "{}.shape[-1] is not 1.".format(name))))

        if assertions:
            return control_flow_ops.with_dependencies(assertions, param)
        return param
Ejemplo n.º 3
0
def _one_hot_like(x, indices, on_value=None):
    output_dtype = x.dtype.base_dtype
    if tf.dimension_value(x.shape[-1]) is None:
        depth = tf.shape(x)[-1]
    else:
        depth = tf.dimension_value(x.shape[-1])
    if on_value is not None:
        on_value = tf.cast(on_value, output_dtype)
    return tf.one_hot(indices,
                      depth=depth,
                      on_value=on_value,
                      dtype=output_dtype)
Ejemplo n.º 4
0
def _trim_boundaries(tensor, from_dim):
    """Trims tensor boundaries starting from given dimension."""
    # For example, if tensor has shape (a, b, c, d) and from_dim=1, then the
    # output tensor has shape (a, b-2, c-2, d-2).
    rank = len(tensor.shape.as_list())
    slice_begin = np.zeros(rank, dtype=np.int32)
    slice_size = np.zeros(rank, dtype=np.int32)
    for i in range(from_dim):
        slice_size[i] = tf.dimension_value(tensor.shape.as_list()[i])
    for i in range(from_dim, rank):
        slice_begin[i] = 1
        slice_size[i] = tf.dimension_value(tensor.shape.as_list()[i]) - 2
    return tf.slice(tensor, slice_begin, slice_size)
Ejemplo n.º 5
0
 def _is_empty_observation_data(self):
   # If both input locations and observations are `None`, we consider this
   # "empty" observation data.
   if self.observation_index_points is None and self.observations is None:
     return True
   ndims = self.kernel.feature_ndims
   if (self.observation_index_points.shape[-ndims:].is_fully_defined() and
       tf.dimension_value(self.observation_index_points.shape[-ndims]) == 0):
     return True
   if (self.observations.shape[-ndims:].is_fully_defined() and
       tf.dimension_value(self.observations.shape[-ndims]) == 0):
     return True
   return False
Ejemplo n.º 6
0
 def _event_shape_tensor(self):
     with tf.control_dependencies(self._runtime_assertions):
         batch_shape = self.distribution.batch_shape_tensor()
         batch_ndims = (
             tf.dimension_value(batch_shape.shape[0])  # pylint: disable=g-long-ternary
             if tf.dimension_value(
                 batch_shape.shape.with_rank_at_least(1)[0]) else
             tf.shape(batch_shape)[0])
         return tf.concat([
             batch_shape[batch_ndims - self.reinterpreted_batch_ndims:],
             self.distribution.event_shape_tensor(),
         ],
                          axis=0)
Ejemplo n.º 7
0
 def _batch_shape_tensor(self):
     with tf.control_dependencies(self._runtime_assertions):
         batch_shape = self.distribution.batch_shape_tensor()
         batch_ndims = tf.dimension_value(batch_shape.shape[0])
         if batch_ndims is None:
             batch_ndims = tf.shape(batch_shape)[0]
         return batch_shape[:batch_ndims - self.reinterpreted_batch_ndims]
Ejemplo n.º 8
0
def _prepare_memory(memory, memory_sequence_length, check_inner_dims_defined):
    """Convert to tensor and possibly mask `memory`.

    Args:
      memory: `Tensor`, shaped `[batch_size, max_time, ...]`.
      memory_sequence_length: `int32` `Tensor`, shaped `[batch_size]`.
      check_inner_dims_defined: Python boolean.  If `True`, the `memory`
        argument's shape is checked to ensure all but the two outermost
        dimensions are fully defined.

    Returns:
      A (possibly masked), checked, new `memory`.

    Raises:
      ValueError: If `check_inner_dims_defined` is `True` and not
        `memory.shape[2:].is_fully_defined()`.
    """
    memory = nest.map_structure(
        lambda m: tf.convert_to_tensor(m, name="memory"), memory)
    if memory_sequence_length is not None:
        memory_sequence_length = tf.convert_to_tensor(
            memory_sequence_length, name="memory_sequence_length")
    if check_inner_dims_defined:
        def _check_dims(m):
            if not m.get_shape()[2:].is_fully_defined():
                raise ValueError("Expected memory %s to have fully defined inner dims, "
                                 "but saw shape: %s" % (m.name, m.get_shape()))

        nest.map_structure(_check_dims, memory)
    if memory_sequence_length is None:
        seq_len_mask = None
    else:
        seq_len_mask = tf.sequence_mask(
            memory_sequence_length,
            maxlen=tf.shape(nest.flatten(memory)[0])[1],
            dtype=nest.flatten(memory)[0].dtype)
        seq_len_batch_size = (
                tf.dimension_value(memory_sequence_length.shape[0])
                or tf.shape(memory_sequence_length)[0])

    def _maybe_mask(m, seq_len_mask):
        rank = m.get_shape().ndims
        rank = rank if rank is not None else tf.rank(m)
        extra_ones = tf.ones(rank - 2, dtype=tf.int32)
        m_batch_size = tf.dimension_value(
            m.shape[0]) or tf.shape(m)[0]
        if memory_sequence_length is not None:
            message = ("memory_sequence_length and memory tensor batch sizes do not "
                       "match.")
            with tf.control_dependencies([
                tf.assert_equal(
                    seq_len_batch_size, m_batch_size, message=message)]):
                seq_len_mask = tf.reshape(
                    seq_len_mask,
                    tf.concat((tf.shape(seq_len_mask), extra_ones), 0))
                return m * seq_len_mask
        else:
            return m

    return nest.map_structure(lambda m: _maybe_mask(m, seq_len_mask), memory)
Ejemplo n.º 9
0
  def _inverse(self, y):
    # To derive the inverse mapping note that:
    #   y[i] = exp(x[i]) / normalization
    # and
    #   y[end] = 1 / normalization.
    # Thus:
    # x[i] = log(exp(x[i])) - log(y[end]) - log(normalization)
    #      = log(exp(x[i])/normalization) - log(y[end])
    #      = log(y[i]) - log(y[end])

    # Do this first to make sure CSE catches that it'll happen again in
    # _inverse_log_det_jacobian.
    x = tf.log(y)

    log_normalization = (-x[..., -1])[..., tf.newaxis]
    x = x[..., :-1] + log_normalization

    # Set shape hints.
    if y.shape.ndims is not None:
      last_dim = tf.dimension_value(y.shape[-1])
      shape = y.shape[:-1].concatenate(
          None if last_dim is None else last_dim - 1)
      x.shape.assert_is_compatible_with(shape)
      x.set_shape(shape)

    return x
Ejemplo n.º 10
0
    def test_invertible_from_lu(self):
        lower_upper, permutation = tf.linalg.lu([[1., 2, 3], [4, 5, 6],
                                                 [0.5, 0., 0.25]])

        conv1x1 = tfb.MatvecLU(lower_upper=lower_upper,
                               permutation=permutation,
                               validate_args=True)

        channels = tf.dimension_value(lower_upper.shape[-1])
        x = tf.random_uniform(shape=[2, 28, 28, channels])

        fwd = conv1x1.forward(x)
        rev_fwd = conv1x1.inverse(fwd)
        fldj = conv1x1.forward_log_det_jacobian(x, event_ndims=3)

        rev = conv1x1.inverse(x)
        fwd_rev = conv1x1.forward(rev)
        ildj = conv1x1.inverse_log_det_jacobian(x, event_ndims=3)

        [x_, fwd_, rev_, fwd_rev_, rev_fwd_, fldj_,
         ildj_] = self.evaluate([x, fwd, rev, fwd_rev, rev_fwd, fldj, ildj])

        self.assertAllClose(x_, fwd_rev_, atol=1e-3, rtol=1e-6)
        self.assertAllClose(x_, rev_fwd_, atol=1e-3, rtol=1e-6)

        self.assertEqual(fldj_, -ildj_)
        self.assertTrue(fldj_ > 1.)  # Notably, bounded away from zero.

        # We now check that the bijector isn't simply the identity function. We do
        # this by checking that at least 50% of pixels differ by at least 10%.
        self.assertTrue(np.mean(np.abs(x_ - fwd_) > 0.1 * x_) > 0.5)
        self.assertTrue(np.mean(np.abs(x_ - rev_) > 0.1 * x_) > 0.5)
Ejemplo n.º 11
0
  def test_doc_string(self):
    # Load data.
    n = int(1e3)
    scale_noise = 0.01
    x = tfd.Normal(loc=0, scale=1).sample([n, 2])
    eps = tfd.Normal(loc=0, scale=scale_noise).sample([n, 1])
    y = tfd.OneHotCategorical(
        logits=_vec_pad(
            0.3142 + 1.6180 * x[..., :1] - 2.7183 * x[..., 1:] + eps),
        dtype=tf.float32).sample()

    # Create model.
    d = tf.dimension_value(y.shape[-1])
    k = 2
    p = tfpl.CategoricalMixtureOfOneHotCategorical.params_size(d, k)
    model = tf.keras.Sequential([
        tf.keras.layers.Dense(p),
        tfpl.CategoricalMixtureOfOneHotCategorical(d, k),
    ])

    # Fit.
    model.compile(optimizer=tf.train.AdamOptimizer(learning_rate=0.5),
                  loss=lambda y, model: -model.log_prob(y),
                  metrics=[])
    batch_size = 100
    model.fit(x, y,
              batch_size=batch_size,
              epochs=1,
              steps_per_epoch=1,  # Usually `n // batch_size`.
              shuffle=True)

    yhat = model(x)
    self.assertIsInstance(yhat, tfd.MixtureSameFamily)
    self.assertIsInstance(yhat.mixture_distribution, tfd.Categorical)
    self.assertIsInstance(yhat.components_distribution, tfd.OneHotCategorical)
    def test_doc_string(self):
        # Load data.
        n = int(1e4)
        scale_noise = 0.01
        x = tfd.Normal(loc=0, scale=1).sample([n, 2])
        eps = tfd.Normal(loc=0, scale=scale_noise).sample([n, 1])
        y = tfd.OneHotCategorical(
            logits=_vec_pad(0.3142 + 1.6180 * x[..., :1] -
                            2.7183 * x[..., 1:] + eps),
            dtype=tf.float32).sample()

        # Create model.
        d = tf.dimension_value(y.shape[-1])
        model = tf.keras.Sequential([
            tf.keras.layers.Dense(tfpl.OneHotCategorical.params_size(d) - 1),
            tf.keras.layers.Lambda(_vec_pad),
            tfpl.OneHotCategorical(d),
        ])

        # Fit.
        model.compile(optimizer=tf.train.AdamOptimizer(learning_rate=0.5),
                      loss=lambda y, model: -model.log_prob(y),
                      metrics=[])
        batch_size = 100
        model.fit(x,
                  y,
                  batch_size=batch_size,
                  epochs=1,
                  steps_per_epoch=n // batch_size,
                  shuffle=True)
        self.assertAllClose([[1.6180], [-2.7183]],
                            model.get_weights()[0],
                            atol=0,
                            rtol=0.1)
Ejemplo n.º 13
0
def _slice(tensor, dim, start, end):
    """Slices the tensor along given dimension."""
    # Performs a slice along the dimension dim. E.g. for tensor t of rank 3,
    # _slice(t, 1, 3, 5) is same as t[:, 3:5].
    # For a slice unbounded to the right, set end=0: _slice(t, 1, -3, 0) is same
    # as t[:, -3:].
    rank = len(tensor.shape.as_list())
    if start < 0:
        start += tf.dimension_value(tensor.shape.as_list()[dim])
    if end <= 0:
        end += tf.dimension_value(tensor.shape.as_list()[dim])
    slice_begin = np.zeros(rank, dtype=np.int32)
    slice_begin[dim] = start
    slice_size = -np.ones(rank, dtype=np.int32)
    slice_size[dim] = end - start
    return tf.slice(tensor, slice_begin, slice_size)
Ejemplo n.º 14
0
 def _covariance(self):
     # Derivation: https://sachinruk.github.io/blog/von-Mises-Fisher/
     event_dim = tf.dimension_value(self.event_shape[0])
     if event_dim is None:
         raise ValueError(
             'event shape must be statically known for _bessel_ive')
     # TODO(bjp): Enable this; numerically unstable.
     if event_dim > 2:
         raise ValueError(
             'vMF covariance is numerically unstable for dim>2')
     concentration = self.concentration[..., tf.newaxis]
     safe_conc = tf.where(concentration > 0, concentration,
                          tf.ones_like(concentration))
     h = (_bessel_ive(event_dim / 2, safe_conc) /
          _bessel_ive(event_dim / 2 - 1, safe_conc))
     intermediate = (
         tf.matmul(self.mean_direction[..., :, tf.newaxis],
                   self.mean_direction[..., tf.newaxis, :]) *
         (1 - event_dim * h / safe_conc - h**2)[..., tf.newaxis])
     cov = tf.matrix_set_diag(
         intermediate,
         tf.matrix_diag_part(intermediate) + (h / safe_conc))
     return tf.where(
         concentration[..., tf.newaxis] > tf.zeros_like(cov), cov,
         tf.linalg.eye(event_dim, batch_shape=self.batch_shape_tensor()) /
         event_dim)
Ejemplo n.º 15
0
def interpolate_loc(grid, loc):
    """Helper which interpolates between two locs."""
    if len(loc) != 2:
        raise NotImplementedError("Currently only bimixtures are supported; "
                                  "len(scale)={} is not 2.".format(len(loc)))
    deg = tf.dimension_value(grid.shape.with_rank_at_least(1)[-1])
    if deg is None:
        raise ValueError("Num quadrature grid points must be known prior "
                         "to graph execution.")
    with tf.name_scope("interpolate_loc", values=[grid, loc]):
        if loc is None or loc[0] is None and loc[1] is None:
            return [None] * deg
        # shape: [B, 1, k, deg]
        w = grid[..., tf.newaxis, :, :]
        loc = [
            x[..., tf.newaxis]  # shape: [B, e, 1]
            if x is not None else None for x in loc
        ]
        if loc[0] is None:
            x = w[..., 1, :] * loc[1]  # shape: [B, e, deg]
        elif loc[1] is None:
            x = w[..., 0, :] * loc[0]  # shape: [B, e, deg]
        else:
            delta = loc[0] - loc[1]
            x = w[..., 0, :] * delta + loc[1]  # shape: [B, e, deg]
        return [x[..., k] for k in range(deg)]  # list(shape:[B, e])
Ejemplo n.º 16
0
def _shift(tensor, axis, delta):
    """Shifts the given tensor, filling it with zeros on the other side.

  Args:
    tensor: `Tensor`.
    axis: Axis to shift along.
    delta: Shift size. May be negative: the sign determines the direction of the
      shift.

  Returns:
    Shifted `Tensor`.

  Example:
  ```
  t = [[1, 2, 3]
       [4, 5, 6]
       [7, 8, 9]]
  _shift(t, 1, 2) = [[0, 0, 1]
                     [0, 0, 4]
                     [0, 0, 7]]
  _shift(t, 0, -1) = [[4, 5, 6]
                      [7, 8, 9]
                      [0, 0, 0]]

  TODO(b/144087751): implement this in C++. Perhaps we can add a parameter to
  tf.roll, so that it fills "the other side" with zeros.
  """
    rank = len(tensor.shape)
    zeros_shape = np.zeros(rank)
    for d in range(rank):
        if d == axis:
            zeros_shape[d] = np.abs(delta)
        else:
            zeros_shape[d] = tf.dimension_value(tensor.shape[d])

    zeros = tf.zeros(zeros_shape, dtype=tensor.dtype)

    slice_begin = np.zeros(rank, dtype=np.int32)
    slice_size = -np.ones(rank, dtype=np.int32)
    if delta > 0:
        slice_size[axis] = tf.dimension_value(tensor.shape[axis]) - delta
        return tf.concat((zeros, tf.slice(tensor, slice_begin, slice_size)),
                         axis=axis)
    else:
        slice_begin[axis] = -delta
        return tf.concat((tf.slice(tensor, slice_begin, slice_size), zeros),
                         axis=axis)
Ejemplo n.º 17
0
 def _get_final_shape(qs):
     """Helper to build `TensorShape`."""
     bs = dist.batch_shape.with_rank_at_least(1)
     num_components = tf.dimension_value(bs[-1])
     if num_components is not None:
         num_components += 1
     tail = tf.TensorShape([num_components, qs])
     return bs[:-1].concatenate(tail)
Ejemplo n.º 18
0
 def _rotate(self, samples):
     """Applies a Householder rotation to `samples`."""
     event_dim = (tf.dimension_value(self.event_shape[0])
                  or self._event_shape_tensor()[0])
     basis = tf.concat(
         [[1.], tf.zeros([event_dim - 1], dtype=self.dtype)], axis=0),
     u = tf.nn.l2_normalize(basis - self.mean_direction, axis=-1)
     return samples - 2 * tf.reduce_sum(samples * u, axis=-1,
                                        keepdims=True) * u
Ejemplo n.º 19
0
    def __init__(self, transition_params):
        """Initialize the CrfForwardRnnCell.

        Args:
          transition_params: A [num_tags, num_tags] matrix of binary potentials.
              This matrix is expanded into a [1, num_tags, num_tags] in preparation
              for the broadcast summation occurring within the cell.
        """
        self._transition_params = tf.expand_dims(transition_params, 0)
        self._num_tags = tf.dimension_value(transition_params.shape[0])
  def test_end_to_end_works_correctly(self):
    true_mean = self.dtype([0, 0])
    true_cov = self.dtype([[1, 0.5],
                           [0.5, 1]])
    num_results = 2000
    counter = collections.Counter()
    with self.cached_session(graph=tf.Graph()) as sess:
      def target_log_prob(x, y):
        counter['target_calls'] += 1
        # Corresponds to unnormalized MVN.
        # z = matmul(inv(chol(true_cov)), [x, y] - true_mean)
        z = tf.stack([x, y], axis=-1) - true_mean
        z = tf.squeeze(
            tf.linalg.triangular_solve(
                np.linalg.cholesky(true_cov),
                z[..., tf.newaxis]),
            axis=-1)
        return -0.5 * tf.reduce_sum(z**2., axis=-1)

      transformed_hmc = tfp.mcmc.TransformedTransitionKernel(
          inner_kernel=tfp.mcmc.HamiltonianMonteCarlo(
              target_log_prob_fn=target_log_prob,
              # Affine scaling means we have to change the step_size
              # in order to get 60% acceptance, as was done in mcmc/hmc_test.py.
              step_size=[1.23 / 0.75, 1.23 / 0.5],
              num_leapfrog_steps=2,
              seed=54),
          bijector=[
              tfb.AffineScalar(scale=0.75),
              tfb.AffineScalar(scale=0.5),
          ])
      # Recall, tfp.mcmc.sample_chain calls
      # transformed_hmc.bootstrap_results too.
      states, kernel_results = tfp.mcmc.sample_chain(
          num_results=num_results,
          # The initial state is used by inner_kernel.bootstrap_results.
          # Note the input is *after* `bijector.forward`.
          current_state=[self.dtype(-2), self.dtype(2)],
          kernel=transformed_hmc,
          num_burnin_steps=200,
          num_steps_between_results=1,
          parallel_iterations=1)
      self.assertAllEqual(dict(target_calls=2), counter)
      states = tf.stack(states, axis=-1)
      self.assertEqual(num_results, tf.dimension_value(states.shape[0]))
      sample_mean = tf.reduce_mean(states, axis=0)
      x = states - sample_mean
      sample_cov = tf.matmul(x, x, transpose_a=True) / self.dtype(num_results)
      [sample_mean_, sample_cov_, is_accepted_] = sess.run([
          sample_mean, sample_cov, kernel_results.inner_results.is_accepted])
      self.assertNear(0.6, is_accepted_.mean(), err=0.05)
      self.assertAllClose(true_mean, sample_mean_,
                          atol=0.06, rtol=0.)
      self.assertAllClose(true_cov, sample_cov_,
                          atol=0., rtol=0.1)
Ejemplo n.º 21
0
  def __init__(self,
               target_log_prob_fn,
               inverse_temperatures,
               make_kernel_fn,
               exchange_proposed_fn=default_exchange_proposed_fn(1.),
               seed=None,
               name=None,
               **kwargs):
    """Instantiates this object.

    Args:
      target_log_prob_fn: Python callable which takes an argument like
        `current_state` (or `*current_state` if it's a list) and returns its
        (possibly unnormalized) log-density under the target distribution.
      inverse_temperatures: `1D` `Tensor of inverse temperatures to perform
        samplings with each replica. Must have statically known `shape`.
        `inverse_temperatures[0]` produces the states returned by samplers,
        and is typically == 1.
      make_kernel_fn: Python callable which takes target_log_prob_fn and seed
        args and returns a TransitionKernel instance.
      exchange_proposed_fn: Python callable which take a number of replicas, and
        return combinations of replicas for exchange.
      seed: Python integer to seed the random number generator.
        Default value: `None` (i.e., no seed).
      name: Python `str` name prefixed to Ops created by this function.
        Default value: `None` (i.e., "remc_kernel").
      **kwargs: Arguments for `make_kernel_fn`.

    Raises:
      ValueError: `inverse_temperatures` doesn't have statically known 1D shape.
    """
    inverse_temperatures = tf.convert_to_tensor(
        inverse_temperatures, name='inverse_temperatures')

    # Note these are static checks, and don't need to be embedded in the graph.
    inverse_temperatures.shape.assert_is_fully_defined()
    inverse_temperatures.shape.assert_has_rank(1)

    self._seed_stream = distributions.SeedStream(seed, salt=name)
    self._seeded_mcmc = seed is not None
    self._parameters = dict(
        target_log_prob_fn=target_log_prob_fn,
        inverse_temperatures=inverse_temperatures,
        num_replica=tf.dimension_value(inverse_temperatures.shape[0]),
        exchange_proposed_fn=exchange_proposed_fn,
        seed=seed,
        name=name)
    self.replica_kernels = []
    for i in range(self.num_replica):
      self.replica_kernels.append(
          make_kernel_fn(
              target_log_prob_fn=_replica_log_prob_fn(inverse_temperatures[i],
                                                      target_log_prob_fn),
              seed=self._seed_stream()))
Ejemplo n.º 22
0
 def _is_scalar_helper(self, static_shape, dynamic_shape_fn):
   """Implementation for `is_scalar_batch` and `is_scalar_event`."""
   if static_shape.ndims is not None:
     return static_shape.ndims == 0
   shape = dynamic_shape_fn()
   if tf.dimension_value(shape.shape[0]) is not None:
     # If the static_shape is correctly written then we should never execute
     # this branch. We keep it just in case there's some unimagined corner
     # case.
     return shape.shape.as_list() == [0]
   return tf.equal(tf.shape(shape)[0], 0)
Ejemplo n.º 23
0
 def compute_output_shape(self, input_shape):
     input_shape = tf.TensorShape(input_shape)
     input_shape = input_shape.with_rank_at_least(
         self._num_summed_dimensions + 1)
     for i in range(self._num_summed_dimensions):
         if tf.dimension_value(input_shape[-1 * i]) is None:
             raise ValueError(
                 "The %s dimension of input_shape must be defined, but saw: %s" %
                 (-1 * i, input_shape))
     return input_shape[:-1 * self._num_summed_dimensions].concatenate(
         self._units)
Ejemplo n.º 24
0
  def testShapes(self):
    # 5x5 grid of index points in R^2 and flatten to 25x2
    index_points = np.linspace(-4., 4., 5, dtype=np.float32)
    index_points = np.stack(np.meshgrid(index_points, index_points), axis=-1)
    index_points = np.reshape(index_points, [-1, 2])
    # ==> shape = [25, 2]

    # Kernel with batch_shape [2, 4, 1]
    df = np.array(
        [[3., 4., 5., 4.], [7.5, 8, 5., 5.]],
        dtype=np.float32).reshape([2, 4, 1])
    amplitude = np.array([1., 2.], np.float32).reshape([2, 1, 1])
    length_scale = np.array([1., 2., 3., 4.], np.float32).reshape([1, 4, 1])
    batched_index_points = np.stack([index_points]*6)
    # ==> shape = [6, 25, 2]
    if not self.is_static:
      df = tf.placeholder_with_default(df, shape=None)
      amplitude = tf.placeholder_with_default(amplitude, shape=None)
      length_scale = tf.placeholder_with_default(length_scale, shape=None)
      batched_index_points = tf.placeholder_with_default(
          batched_index_points, shape=None)
    kernel = psd_kernels.ExponentiatedQuadratic(amplitude, length_scale)
    tp = tfd.StudentTProcess(
        df,
        kernel,
        batched_index_points,
        jitter=1e-5)

    batch_shape = [2, 4, 6]
    event_shape = [25]
    sample_shape = [5, 3]

    samples = tp.sample(sample_shape)

    if self.is_static or tf.executing_eagerly():
      self.assertAllEqual(tp.batch_shape_tensor(), batch_shape)
      self.assertAllEqual(tp.event_shape_tensor(), event_shape)
      self.assertAllEqual(samples.shape,
                          sample_shape + batch_shape + event_shape)
      self.assertAllEqual(tp.batch_shape, batch_shape)
      self.assertAllEqual(tp.event_shape, event_shape)
      self.assertAllEqual(samples.shape,
                          sample_shape + batch_shape + event_shape)
    else:
      self.assertAllEqual(self.evaluate(tp.batch_shape_tensor()), batch_shape)
      self.assertAllEqual(self.evaluate(tp.event_shape_tensor()), event_shape)
      self.assertAllEqual(
          self.evaluate(samples).shape,
          sample_shape + batch_shape + event_shape)
      self.assertIsNone(samples.shape.ndims)
      self.assertIsNone(tp.batch_shape.ndims)
      self.assertEqual(tp.event_shape.ndims, 1)
      self.assertIsNone(tf.dimension_value(tp.event_shape.dims[0]))
Ejemplo n.º 25
0
 def _cache_input_depth(self, x):
     if self._input_depth is None:
         self._input_depth = tf.dimension_value(
             x.shape.with_rank_at_least(1)[-1])
         if self._input_depth is None:
             raise NotImplementedError(
                 "Rightmost dimension must be known prior to graph execution."
             )
         if self._num_masked >= self._input_depth:
             raise ValueError(
                 "Number of masked units must be smaller than the event size."
             )
Ejemplo n.º 26
0
def validate_init_args_statically(distribution, batch_shape):
    """Helper to __init__ which makes or raises assertions."""
    if batch_shape.shape.ndims is not None:
        if batch_shape.shape.ndims != 1:
            raise ValueError("`batch_shape` must be a vector "
                             "(saw rank: {}).".format(batch_shape.shape.ndims))

    batch_shape_static = tensor_util.constant_value_as_shape(batch_shape)
    batch_size_static = batch_shape_static.num_elements()
    dist_batch_size_static = distribution.batch_shape.num_elements()

    if batch_size_static is not None and dist_batch_size_static is not None:
        if batch_size_static != dist_batch_size_static:
            raise ValueError("`batch_shape` size ({}) must match "
                             "`distribution.batch_shape` size ({}).".format(
                                 batch_size_static, dist_batch_size_static))

    if batch_shape_static.dims is not None:
        if any(
                tf.dimension_value(dim) is not None
                and tf.dimension_value(dim) < 1 for dim in batch_shape_static):
            raise ValueError("`batch_shape` elements must be >=-1.")
Ejemplo n.º 27
0
def identity_conv(NHWC_X, filter_size, feature_maps_in, feature_maps_out, stride, padding = 'VALID'):
    conv = IdentityConv2dMean(filter_size, feature_maps_in, feature_maps_out, stride, padding)
    sess = conv.enquire_session()
    if type(NHWC_X.shape[0]) == tf.Dimension:
        batch = tf.dimension_value(NHWC_X.shape[0])
        # print(batch)
        with sess.as_default():
            NHWC_X = NHWC_X.eval()
    else:
        batch = NHWC_X.shape[0]
        # print(batch)
    random_images = np.random.choice(np.arange(batch), size=1000)
    return sess.run(conv(NHWC_X[random_images]))
Ejemplo n.º 28
0
 def _verifyCovariance(self, vmf):
     dim = tf.dimension_value(vmf.event_shape[-1])
     nsamples = 10000
     samples = vmf.sample(nsamples)
     samples = tf.check_numerics(samples, 'samples')
     cov = vmf.covariance()
     samples, cov = self.evaluate([samples, cov])
     batched_samples = np.reshape(samples, [nsamples, -1, dim])
     batch_size = batched_samples.shape[1]
     est_cov = np.zeros([batch_size, dim, dim], dtype=cov.dtype)
     for bi in range(batched_samples.shape[1]):
         est_cov[bi] = np.cov(batched_samples[:, bi], rowvar=False)
     self.assertAllClose(np.reshape(est_cov, cov.shape), cov, atol=0.015)
Ejemplo n.º 29
0
def crf_log_norm(inputs, sequence_lengths, transition_params):
    """Computes the normalization for a CRF.

    Args:
      inputs: A [batch_size, max_seq_len, num_tags] tensor of unary potentials
          to use as input to the CRF layer.
      sequence_lengths: A [batch_size] vector of true sequence lengths.
      transition_params: A [num_tags, num_tags] transition matrix.
    Returns:
      log_norm: A [batch_size] vector of normalizers for a CRF.
    """
    # Split up the first and rest of the inputs in preparation for the forward
    # algorithm.
    first_input = tf.slice(inputs, [0, 0, 0], [-1, 1, -1])
    first_input = tf.squeeze(first_input, [1])

    # If max_seq_len is 1, we skip the algorithm and simply reduce_logsumexp over
    # the "initial state" (the unary potentials).
    def _single_seq_fn():
        log_norm = tf.reduce_logsumexp(first_input, [1])
        # Mask `log_norm` of the sequences with length <= zero.
        log_norm = tf.where(tf.less_equal(sequence_lengths, 0),
                            tf.zeros_like(log_norm), log_norm)
        return log_norm

    def _multi_seq_fn():
        """Forward computation of alpha values."""
        rest_of_input = tf.slice(inputs, [0, 1, 0], [-1, -1, -1])

        # Compute the alpha values in the forward algorithm in order to get the
        # partition function.
        forward_cell = CrfForwardRnnCell(transition_params)
        # Sequence length is not allowed to be less than zero.
        sequence_lengths_less_one = tf.maximum(
            tf.constant(0, dtype=sequence_lengths.dtype), sequence_lengths - 1)

        _, alphas = dynamic_rnn(cell=forward_cell,
                                inputs=rest_of_input,
                                sequence_length=sequence_lengths_less_one,
                                initial_state=first_input,
                                dtype=tf.float32)
        log_norm = tf.reduce_logsumexp(alphas, [1])
        # Mask `log_norm` of the sequences with length <= zero.
        log_norm = tf.where(tf.less_equal(sequence_lengths, 0),
                            tf.zeros_like(log_norm), log_norm)
        return log_norm

    return tf.cond(pred=tf.equal(
        tf.dimension_value(inputs.shape[1]) or tf.shape(inputs)[1], 1),
                   true_fn=_single_seq_fn,
                   false_fn=_multi_seq_fn)
Ejemplo n.º 30
0
  def _forward(self, x):
    # Pad the last dim with a zeros vector. We need this because it lets us
    # infer the scale in the inverse function.
    y = distribution_util.pad(x, axis=-1, back=True)

    # Set shape hints.
    if x.shape.ndims is not None:
      last_dim = tf.dimension_value(x.shape[-1])
      shape = x.shape[:-1].concatenate(
          None if last_dim is None else last_dim + 1)
      y.shape.assert_is_compatible_with(shape)
      y.set_shape(shape)

    return tf.nn.softmax(y)