Example #1
0
def single_layer_classifier(h, label, n_way, scope, reuse=False):
    """
    :param h: hidden placeholder
    :param l: label placeholder
    :param n_way: number of class
    :param scope: scope name for this module
    :param reuse:
    :return:
    """
    # Here can reuse=True or reuse=Flase
    with tf.variable_scope(scope, reuse=reuse):
        # [b, 4, 4, 16] => [b, -1]
        h0 = tf.layers.flatten(h)
        # => [b, 10]
        logits = tf.layers.dense(h0, n_way)
        # => [b]
        pred = tf.argmax(logits, 1)

        # TODO: when your label=[5,6,7,8,9]> n_way=5, it wont occur an error!
        tf.assert_less(label, tf.to_float(n_way))
        label = tf.one_hot(tf.to_int32(label), n_way)
        loss = tf.reduce_sum(
            tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits,
                                                       labels=label))

    return loss, pred
Example #2
0
  def append(self, transitions, rows=None):
    """Append a batch of transitions to rows of the memory.

    Args:
      transitions: Tuple of transition quantities with batch dimension.
      rows: Episodes to append to, defaults to all.

    Returns:
      Operation.
    """
    rows = tf.range(self._capacity) if rows is None else rows
    assert rows.shape.ndims == 1
    assert_capacity = tf.assert_less(
        rows, self._capacity,
        message='capacity exceeded')
    with tf.control_dependencies([assert_capacity]):
      assert_max_length = tf.assert_less(
          tf.gather(self._length, rows), self._max_length,
          message='max length exceeded')
    with tf.control_dependencies([assert_max_length]):
      timestep = tf.gather(self._length, rows)
      indices = tf.stack([rows, timestep], 1)
      append_ops = tools.nested.map(
          lambda var, val: tf.scatter_nd_update(var, indices, val),
          self._buffers, transitions, flatten=True)
    with tf.control_dependencies(append_ops):
      episode_mask = tf.reduce_sum(tf.one_hot(
          rows, self._capacity, dtype=tf.int32), 0)
      return self._length.assign_add(episode_mask)
Example #3
0
  def append(self, transitions, rows=None):
    """Append a batch of transitions to rows of the memory.

    Args:
      transitions: Tuple of transition quantities with batch dimension.
      rows: Episodes to append to, defaults to all.

    Returns:
      Operation.
    """
    rows = tf.range(self._capacity) if rows is None else rows
    assert rows.shape.ndims == 1
    assert_capacity = tf.assert_less(
        rows, self._capacity,
        message='capacity exceeded')
    with tf.control_dependencies([assert_capacity]):
      assert_max_length = tf.assert_less(
          tf.gather(self._length, rows), self._max_length,
          message='max length exceeded')
    append_ops = []
    with tf.control_dependencies([assert_max_length]):
      for buffer_, elements in zip(self._buffers, transitions):
        timestep = tf.gather(self._length, rows)
        indices = tf.stack([rows, timestep], 1)
        append_ops.append(tf.scatter_nd_update(buffer_, indices, elements))
    with tf.control_dependencies(append_ops):
      episode_mask = tf.reduce_sum(tf.one_hot(
          rows, self._capacity, dtype=tf.int32), 0)
      return self._length.assign_add(episode_mask)
Example #4
0
    def energy(self, x):

        xl = tf.tensordot(
            x, self.tfsl,
            axes=[[2], [0]])  # assume sum_k x_{ijk} tfsl_{kl} gives xl_{ijl}
        xu = tf.transpose(tf.tensordot(x, self.tfsu, axes=[[1], [1]]),
                          perm=[0, 2, 1])
        xxl = tf.multiply(x, xl)
        xxu = tf.multiply(x, xu)

        e = tf.reduce_sum(xxl,reduction_indices=[1,2]) \
          + tf.reduce_sum(xxu,reduction_indices=[1,2])

        # normalize to the lattice size
        e = tf.divide(e, self.nspins)

        es = tf.reshape(e, [-1, 1])

        # to force error
        #    es = es - 1000
        # check energy range
        maxe = tf.reduce_max(es)
        mine = tf.reduce_min(es)
        maxpossible = tf.constant(2.0 * self.nspins)
        minpossible = tf.constant(-2.0 * self.nspins)

        with tf.control_dependencies([tf.assert_less(maxe, maxpossible)]):
            es = es * 1
        with tf.control_dependencies([tf.assert_less(minpossible, mine)]):
            es = es * 1
        return (es)
Example #5
0
 def __call__(self, step):
     with tf.name_scope(self.name):
         initial_learning_rate = tf.convert_to_tensor(self.initial_learning_rate, name="initial_learning_rate")
         dtype = initial_learning_rate.dtype
         step_t = tf.cast(step, dtype)
         nupdates_t = tf.convert_to_tensor(self.nupdates, dtype=dtype)
         tf.assert_less(step_t, nupdates_t)
         return initial_learning_rate * (1. - step_t / nupdates_t)
Example #6
0
  def _variance(self):
    # We need to put the tf.where inside the outer tf.where to ensure we never
    # hit a NaN in the gradient.
    denom = tf.where(tf.greater(self.df, 2.),
                     self.df - 2.,
                     tf.ones_like(self.df))
    # Abs(scale) superfluous.
    var = (tf.ones(self.batch_shape_tensor(), dtype=self.dtype) *
           tf.square(self.scale) * self.df / denom)
    # When 1 < df <= 2, variance is infinite.
    inf = np.array(np.inf, dtype=self.dtype.as_numpy_dtype())
    result_where_defined = tf.where(
        self.df > tf.fill(self.batch_shape_tensor(), 2.),
        var,
        tf.fill(self.batch_shape_tensor(), inf, name="inf"))

    if self.allow_nan_stats:
      nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype())
      return tf.where(
          tf.greater(
              self.df,
              tf.ones(self.batch_shape_tensor(), dtype=self.dtype)),
          result_where_defined,
          tf.fill(self.batch_shape_tensor(), nan, name="nan"))
    else:
      return control_flow_ops.with_dependencies(
          [
              tf.assert_less(
                  tf.ones([], dtype=self.dtype),
                  self.df,
                  message="variance not defined for components of df <= 1"),
          ],
          result_where_defined)
Example #7
0
def pad_to(tensor, length, axis=0, name=None):
    '''pad the tensor to a certain length

    args:
        - tensor: the tensor to pad
        - length: the length to pad to, has to be larger than tensor.shape[axis]
        - axis: the axis to pad
        - name: the name of the operation

    returns:
        the padded tensor
    '''

    with tf.name_scope(name or 'pad_to'):
        rank = tensor.shape.ndims
        orig_length = tf.shape(tensor)[axis]
        assert_op = tf.assert_less(axis,
                                   rank,
                                   message='axis has to be less than rank')
        with tf.control_dependencies([assert_op]):
            assert_op = tf.assert_less_equal(
                orig_length,
                length,
                message='target length less than original length')
        with tf.control_dependencies([assert_op]):
            paddings = tf.SparseTensor(indices=[[axis, 1]],
                                       values=tf.expand_dims(
                                           length - orig_length, 0),
                                       dense_shape=[rank, 2])

        padded = tf.pad(tensor, tf.sparse_tensor_to_dense(paddings))

    return padded
Example #8
0
 def call(self, x, mask=None):
     fft_x = fft2d(x, True)
     trun_x = self.truncate(fft_x)
     x = fft2d(trun_x, False)
     img_max = tf.reduce_max(tf.abs(tf.imag(x)))
     with tf.control_dependencies([tf.assert_less(img_max, 1e-5)]):
         return tf.real(x)
Example #9
0
def expand_multiple_dims(tensor, num_dims, axes):
    """
    Inserts dimensions of 1 in `tensor`. Old `tensor` dimensions are
    moved to permuted positions specified in `axes`. Number of dimensions
    in result is equal to `num_dims`.
    ```python
    tensor = tf.constant([[1, 2], [3, 4]])
    expand_multiple_dims(tensor, 4, [1, 3])  # [[[[1, 2]],
                                             #   [[3, 4]]]]

    expand_multiple_dims(tensor, 4, [3, 1])  # [[[[1, 3]],
                                             #   [[2, 4]]]]
    ```
    :param tensor: a `Tensor`
    :param num_dims: a `Tensor` of shape `[]`
    :param axes: a 1D `Tensor`
    :return: a `num_dims` dimensional `Tensor` with same data as `tensor`
    """
    with tf.name_scope('expand_multiple_dims'):
        if not tf.contrib.framework.is_tensor(tensor):
            tensor = tf.constant(tensor)
        if not tf.contrib.framework.is_tensor(axes):
            axes = tf.constant(axes, dtype=tf.int32)
        sh = tf.shape(tensor, out_type=tf.int32)
        nd = tf.shape(sh, out_type=tf.int32)[0]
        with tf.device('/cpu:0'):
            assert_axes_smaller_than_num_dims = tf.assert_less(
                axes,
                num_dims,
                message='`axes` has to be less than `num_dims`')
            check_num_dims = tf.assert_greater_equal(
                num_dims,
                nd,
                message=
                '`num_dims` has to be greater or equal to number of dimensions in `tensor`'
            )
            ass_axes_bigger_or_equal_than_num_dims = tf.assert_greater_equal(
                axes, -num_dims)

        axes %= num_dims

        ones_for_expansion = tf.ones(tf.reshape(num_dims - nd, [1]),
                                     dtype=tf.int32)
        shape_for_expansion = tf.concat([sh, ones_for_expansion], 0)

        tensor = tf.reshape(tensor, shape_for_expansion)

        updates = tf.range(0, num_dims, 1, dtype=tf.int32)
        remained_positions = get_all_values_except_specified(
            tf.range(num_dims, dtype=tf.int32), axes)
        indices = tf.concat([axes, remained_positions], 0)
        indices = tf.reshape(indices, [-1, 1])
        perm_shape = tf.reshape(num_dims, [1])
        perm = tf.scatter_nd(indices, updates, perm_shape)

        with tf.control_dependencies([
                check_num_dims, assert_axes_smaller_than_num_dims,
                ass_axes_bigger_or_equal_than_num_dims
        ]):
            return tf.transpose(tensor, perm=perm)
Example #10
0
 def _entropy(self):
   probs = self._probs
   if self.validate_args:
     probs = control_flow_ops.with_dependencies([
         tf.assert_less(
             probs,
             tf.constant(1., probs.dtype),
             message="Entropy is undefined when logits = inf or probs = 1.")
     ], probs)
   # Claim: entropy(p) = softplus(s)/p - s
   # where s=logits and p=probs.
   #
   # Proof:
   #
   # entropy(p)
   # := -[(1-p)log(1-p) + plog(p)]/p
   # = -[log(1-p) + plog(p/(1-p))]/p
   # = -[-softplus(s) + ps]/p
   # = softplus(s)/p - s
   #
   # since,
   # log[1-sigmoid(s)]
   # = log[1/(1+exp(s)]
   # = -log[1+exp(s)]
   # = -softplus(s)
   #
   # using the fact that,
   # 1-sigmoid(s) = sigmoid(-s) = 1/(1+exp(s))
   return tf.nn.softplus(self.logits) / probs - self.logits
Example #11
0
 def call(self, inputs, **kwargs):
     with tf.control_dependencies([
             tf.assert_greater_equal(inputs, self.index_offset),
             tf.assert_less(inputs, self.index_offset + self._num_symbols)
     ]):
         return tf.nn.embedding_lookup(self._embedding,
                                       inputs - self.index_offset)
Example #12
0
    def replace(self, episodes, length, rows=None):
        """Replace full episodes.

    Args:
      episodes: Tuple of transition quantities with batch and time dimensions.
      length: Batch of sequence lengths.
      rows: Episodes to replace, defaults to all.

    Returns:
      Operation.
    """
        rows = tf.range(self._capacity) if rows is None else rows
        assert rows.shape.ndims == 1
        assert_capacity = tf.assert_less(rows,
                                         self._capacity,
                                         message='capacity exceeded')
        with tf.control_dependencies([assert_capacity]):
            assert_max_length = tf.assert_less_equal(
                length, self._max_length, message='max length exceeded')
        with tf.control_dependencies([assert_max_length]):
            replace_ops = tools.nested.map(
                lambda var, val: tf.scatter_update(var, rows, val),
                self._buffers,
                episodes,
                flatten=True)
        with tf.control_dependencies(replace_ops):
            return tf.scatter_update(self._length, rows, length)
    def _std_var_helper(self, statistic, statistic_name, statistic_ndims,
                        df_factor_fn):
        """Helper to compute stddev, covariance and variance."""
        df = tf.reshape(
            self.df,
            tf.concat([
                tf.shape(self.df),
                tf.ones([statistic_ndims], dtype=tf.int32)
            ], -1))
        df = _broadcast_to_shape(df, tf.shape(statistic))
        # We need to put the tf.where inside the outer tf.where to ensure we never
        # hit a NaN in the gradient.
        denom = tf.where(df > 2., df - 2., tf.ones_like(df))
        statistic = statistic * df_factor_fn(df / denom)
        # When 1 < df <= 2, stddev/variance are infinite.
        inf = np.array(np.inf, dtype=self.dtype.as_numpy_dtype())
        result_where_defined = tf.where(
            df > 2., statistic, tf.fill(tf.shape(statistic), inf, name="inf"))

        if self.allow_nan_stats:
            nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype())
            return tf.where(df > 1., result_where_defined,
                            tf.fill(tf.shape(statistic), nan, name="nan"))
        else:
            with tf.control_dependencies([
                    tf.assert_less(tf.cast(1., self.dtype),
                                   df,
                                   message=statistic_name +
                                   " not defined for components of df <= 1"),
            ]):
                return tf.identity(result_where_defined)
Example #14
0
 def testRejection4D(self):
     num_samples = int(1e5)  # Chosen for a small min detectable discrepancy
     det_bounds = np.array([0.0], dtype=np.float32)
     exact_volumes = [four_by_four_volume()]
     (rej_weights, rej_proposal_volume
      ) = corr.correlation_matrix_volume_rejection_samples(det_bounds,
                                                           4,
                                                           [num_samples, 1],
                                                           dtype=np.float32,
                                                           seed=45)
     # shape of rej_weights: [num_samples, 1, 4, 4]
     chk1 = st.assert_true_mean_equal_by_dkwm(rej_weights,
                                              low=0.,
                                              high=rej_proposal_volume,
                                              expected=exact_volumes,
                                              false_fail_rate=1e-6)
     chk2 = tf.assert_less(
         st.min_discrepancy_of_true_means_detectable_by_dkwm(
             num_samples,
             low=0.,
             high=rej_proposal_volume,
             false_fail_rate=1e-6,
             false_pass_rate=1e-6),
         # Going for about a 10% relative error
         1.1)
     with tf.control_dependencies([chk1, chk2]):
         rej_weights = tf.identity(rej_weights)
     self.evaluate(rej_weights)
Example #15
0
  def _testSampleLogProbExact(
      self, concentrations, det_bounds, dim, means,
      num_samples=int(1e5), dtype=np.float32, target_discrepancy=0.1, seed=42):
    # For test methodology see the comment in
    # _testSampleConsistentLogProbInterval, except that this test
    # checks those parameter settings where the true volume is known
    # analytically.
    concentration = np.array(concentrations, dtype=dtype)
    det_bounds = np.array(det_bounds, dtype=dtype)
    means = np.array(means, dtype=dtype)
    # Add a tolerance to guard against some of the importance_weights exceeding
    # the theoretical maximum (importance_maxima) due to numerical inaccuracies
    # while lower bounding the determinant. See corresponding comment in
    # _testSampleConsistentLogProbInterval.
    high_tolerance = 1e-6

    testee_lkj = tfd.LKJ(
        dimension=dim, concentration=concentration, validate_args=True)
    x = testee_lkj.sample(num_samples, seed=seed)
    importance_weights = (
        tf.exp(-testee_lkj.log_prob(x)) * _det_ok_mask(x, det_bounds))
    importance_maxima = (1. / det_bounds) ** (concentration - 1) * tf.exp(
        testee_lkj._log_normalization())

    chk1 = st.assert_true_mean_equal_by_dkwm(
        importance_weights, low=0., high=importance_maxima + high_tolerance,
        expected=means, false_fail_rate=1e-6)
    chk2 = tf.assert_less(
        st.min_discrepancy_of_true_means_detectable_by_dkwm(
            num_samples, low=0., high=importance_maxima + high_tolerance,
            false_fail_rate=1e-6, false_pass_rate=1e-6),
        dtype(target_discrepancy))
    self.evaluate([chk1, chk2])
Example #16
0
 def testRejection2D(self):
     num_samples = int(1e5)  # Chosen for a small min detectable discrepancy
     det_bounds = np.array(
         [0.01, 0.02, 0.03, 0.04, 0.05, 0.3, 0.35, 0.4, 0.5],
         dtype=np.float32)
     exact_volumes = two_by_two_volume(det_bounds)
     (rej_weights, rej_proposal_volume
      ) = corr.correlation_matrix_volume_rejection_samples(det_bounds,
                                                           2,
                                                           [num_samples, 9],
                                                           dtype=np.float32,
                                                           seed=43)
     # shape of rej_weights: [num_samples, 9, 2, 2]
     chk1 = st.assert_true_mean_equal_by_dkwm(rej_weights,
                                              low=0.,
                                              high=rej_proposal_volume,
                                              expected=exact_volumes,
                                              false_fail_rate=1e-6)
     chk2 = tf.assert_less(
         st.min_discrepancy_of_true_means_detectable_by_dkwm(
             num_samples,
             low=0.,
             high=rej_proposal_volume,
             # Correct the false fail rate due to different broadcasting
             false_fail_rate=1.1e-7,
             false_pass_rate=1e-6),
         0.036)
     with tf.control_dependencies([chk1, chk2]):
         rej_weights = tf.identity(rej_weights)
     self.evaluate(rej_weights)
Example #17
0
def _distribution_statistics(distribution: tf.Tensor) -> tf.Tensor:
  """Implementation of `distribution_statisticsy`."""
  _, num_classes = distribution.shape.as_list()
  assert num_classes is not None

  # Each batch element is a probability distribution.
  max_discrepancy = tf.reduce_max(
      tf.abs(tf.reduce_sum(distribution, axis=1) - 1.0))
  with tf.control_dependencies([tf.assert_less(max_discrepancy, 0.0001)]):
    values = tf.reshape(tf.linspace(0.0, 1.0, num_classes), [1, num_classes])

    mode = tf.to_float(tf.argmax(distribution,
                                 axis=1)) / tf.constant(num_classes - 1.0)
    median = tf.reduce_sum(
        tf.to_float(tf.cumsum(distribution, axis=1) < 0.5),
        axis=1) / tf.constant(num_classes - 1.0)
    mean = tf.reduce_sum(distribution * values, axis=1)
    standard_deviation = tf.sqrt(
        tf.reduce_sum(
            ((values - tf.reshape(mean, [-1, 1]))**2) * distribution, axis=1))
    probability_nonzero = 1.0 - distribution[:, 0]
    entropy = tf.reduce_sum(
        -(distribution * tf.log(distribution + 0.0000001)), axis=1) / tf.log(
            float(num_classes))

    statistics = tf.stack(
        [mode, median, mean, standard_deviation, probability_nonzero, entropy],
        axis=1)

    return statistics
Example #18
0
    def replace(self, episodes, length, rows=None):
        """Replace full episodes.

    Args:
      episodes: Tuple of transition quantities with batch and time dimensions.
      length: Batch of sequence lengths.
      rows: Episodes to replace, defaults to all.

    Returns:
      Operation.
    """
        rows = tf.range(self._capacity) if rows is None else rows
        assert rows.shape.ndims == 1
        assert_capacity = tf.assert_less(rows,
                                         self._capacity,
                                         message='capacity exceeded')
        with tf.control_dependencies([assert_capacity]):
            assert_max_length = tf.assert_less_equal(
                length, self._max_length, message='max length exceeded')
        replace_ops = []
        with tf.control_dependencies([assert_max_length]):
            for buffer_, elements in zip(self._buffers, episodes):
                replace_op = tf.scatter_update(buffer_, rows, elements)
                replace_ops.append(replace_op)
        with tf.control_dependencies(replace_ops):
            return tf.scatter_update(self._length, rows, length)
Example #19
0
  def _variance(self):
    # We need to put the tf.where inside the outer tf.where to ensure we never
    # hit a NaN in the gradient.
    denom = tf.where(tf.greater(self.df, 2.),
                     self.df - 2.,
                     tf.ones_like(self.df))
    # Abs(scale) superfluous.
    var = (tf.ones(self.batch_shape_tensor(), dtype=self.dtype) *
           tf.square(self.scale) * self.df / denom)
    # When 1 < df <= 2, variance is infinite.
    inf = np.array(np.inf, dtype=self.dtype.as_numpy_dtype())
    result_where_defined = tf.where(
        self.df > tf.fill(self.batch_shape_tensor(), 2.),
        var,
        tf.fill(self.batch_shape_tensor(), inf, name="inf"))

    if self.allow_nan_stats:
      nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype())
      return tf.where(
          tf.greater(
              self.df,
              tf.ones(self.batch_shape_tensor(), dtype=self.dtype)),
          result_where_defined,
          tf.fill(self.batch_shape_tensor(), nan, name="nan"))
    else:
      return control_flow_ops.with_dependencies(
          [
              tf.assert_less(
                  tf.ones([], dtype=self.dtype),
                  self.df,
                  message="variance not defined for components of df <= 1"),
          ],
          result_where_defined)
Example #20
0
 def test_doesnt_raise_when_both_empty(self):
     with self.test_session():
         larry = tf.constant([])
         curly = tf.constant([])
         with tf.control_dependencies([tf.assert_less(larry, curly)]):
             out = tf.identity(larry)
         out.eval()
Example #21
0
 def test_raises_when_equal(self):
   with self.test_session():
     small = tf.constant([1, 2], name="small")
     with tf.control_dependencies([tf.assert_less(small, small)]):
       out = tf.identity(small)
     with self.assertRaisesOpError("small.*small"):
       out.eval()
Example #22
0
 def _entropy(self):
     probs = self._probs
     if self.validate_args:
         probs = control_flow_ops.with_dependencies([
             tf.assert_less(
                 probs,
                 tf.constant(1., probs.dtype),
                 message=
                 "Entropy is undefined when logits = inf or probs = 1.")
         ], probs)
     # Claim: entropy(p) = softplus(s)/p - s
     # where s=logits and p=probs.
     #
     # Proof:
     #
     # entropy(p)
     # := -[(1-p)log(1-p) + plog(p)]/p
     # = -[log(1-p) + plog(p/(1-p))]/p
     # = -[-softplus(s) + ps]/p
     # = softplus(s)/p - s
     #
     # since,
     # log[1-sigmoid(s)]
     # = log[1/(1+exp(s)]
     # = -log[1+exp(s)]
     # = -softplus(s)
     #
     # using the fact that,
     # 1-sigmoid(s) = sigmoid(-s) = 1/(1+exp(s))
     return tf.nn.softplus(self.logits) / probs - self.logits
Example #23
0
 def test_raises_when_equal(self):
     with self.test_session():
         small = tf.constant([1, 2], name="small")
         with tf.control_dependencies([tf.assert_less(small, small)]):
             out = tf.identity(small)
         with self.assertRaisesOpError("small.*small"):
             out.eval()
Example #24
0
 def test_doesnt_raise_when_both_empty(self):
   with self.test_session():
     larry = tf.constant([])
     curly = tf.constant([])
     with tf.control_dependencies([tf.assert_less(larry, curly)]):
       out = tf.identity(larry)
     out.eval()
Example #25
0
 def test_doesnt_raise_when_less(self):
     with self.test_session():
         small = tf.constant([3, 1], name="small")
         big = tf.constant([4, 2], name="big")
         with tf.control_dependencies([tf.assert_less(small, big)]):
             out = tf.identity(small)
         out.eval()
Example #26
0
def crop_range_image(range_images, new_width, scope=None):
  """Crops range image by shrinking the width.

  Requires: new_width is smaller than the existing width.

  Args:
    range_images: [B, H, W, ...]
    new_width: an integer.
    scope: the name scope.

  Returns:
    range_image_crops: [B, H, new_width, ...]
  """
  # pylint: disable=unbalanced-tuple-unpacking
  shape = _combined_static_and_dynamic_shape(range_images)
  width = shape[2]
  if width == new_width:
    return range_images
  if new_width < 1:
    raise ValueError('new_width must be positive.')
  if width is not None and new_width >= width:
    raise ValueError('new_width {} should be < the old width {}.'.format(
        new_width, width))

  with tf.control_dependencies([tf.assert_less(new_width, width)]):
    #with tf.name_scope(scope, 'CropRangeImage', [range_images]):
    with tf.name_scope('CropRangeImage'):
      diff = width - new_width

      left = diff // 2
      right = diff - left
      range_image_crops = range_images[:, :, left:-right, ...]
      return range_image_crops
Example #27
0
  def replace(self, episodes, length, rows=None):
    """Replace full episodes.

    Args:
      episodes: Tuple of transition quantities with batch and time dimensions.
      length: Batch of sequence lengths.
      rows: Episodes to replace, defaults to all.

    Returns:
      Operation.
    """
    rows = tf.range(self._capacity) if rows is None else rows
    assert rows.shape.ndims == 1
    assert_capacity = tf.assert_less(
        rows, self._capacity, message='capacity exceeded')
    with tf.control_dependencies([assert_capacity]):
      assert_max_length = tf.assert_less_equal(
          length, self._max_length, message='max length exceeded')
    replace_ops = []
    with tf.control_dependencies([assert_max_length]):
      for buffer_, elements in zip(self._buffers, episodes):
        replace_op = tf.scatter_update(buffer_, rows, elements)
        replace_ops.append(replace_op)
    with tf.control_dependencies(replace_ops):
      return tf.scatter_update(self._length, rows, length)
Example #28
0
 def test_doesnt_raise_when_less_and_broadcastable_shapes(self):
   with self.test_session():
     small = tf.constant([1], name="small")
     big = tf.constant([3, 2], name="big")
     with tf.control_dependencies([tf.assert_less(small, big)]):
       out = tf.identity(small)
     out.eval()
Example #29
0
 def _mode(self):
     mode = (self.concentration1 - 1.) / (self.total_concentration - 2.)
     if self.allow_nan_stats:
         nan = tf.fill(self.batch_shape_tensor(),
                       np.array(np.nan, dtype=self.dtype.as_numpy_dtype()),
                       name="nan")
         is_defined = tf.logical_and(self.concentration1 > 1.,
                                     self.concentration0 > 1.)
         return tf.where(is_defined, mode, nan)
     return control_flow_ops.with_dependencies([
         tf.assert_less(tf.ones([], dtype=self.dtype),
                        self.concentration1,
                        message="Mode undefined for concentration1 <= 1."),
         tf.assert_less(tf.ones([], dtype=self.dtype),
                        self.concentration0,
                        message="Mode undefined for concentration0 <= 1.")
     ], mode)
Example #30
0
 def test_raises_when_greater(self):
     with self.test_session():
         small = tf.constant([1, 2], name="small")
         big = tf.constant([3, 4], name="big")
         with tf.control_dependencies([tf.assert_less(big, small)]):
             out = tf.identity(small)
         with self.assertRaisesOpError("big.*small"):
             out.eval()
Example #31
0
 def test_raises_when_less_but_non_broadcastable_shapes(self):
   with self.test_session():
     small = tf.constant([1, 1, 1], name="small")
     big = tf.constant([3, 2], name="big")
     with self.assertRaisesRegexp(ValueError, "broadcast"):
       with tf.control_dependencies([tf.assert_less(small, big)]):
         out = tf.identity(small)
       out.eval()
Example #32
0
 def test_raises_when_less_but_non_broadcastable_shapes(self):
     with self.test_session():
         small = tf.constant([1, 1, 1], name="small")
         big = tf.constant([3, 2], name="big")
         with self.assertRaisesRegexp(ValueError, "broadcast"):
             with tf.control_dependencies([tf.assert_less(small, big)]):
                 out = tf.identity(small)
             out.eval()
Example #33
0
 def test_raises_when_greater(self):
   with self.test_session():
     small = tf.constant([1, 2], name="small")
     big = tf.constant([3, 4], name="big")
     with tf.control_dependencies([tf.assert_less(big, small)]):
       out = tf.identity(small)
     with self.assertRaisesOpError("big.*small"):
       out.eval()
Example #34
0
    def append(self, transitions, rows=None):
        """Append a batch of transitions to rows of the memory.

    Args:
      transitions: Tuple of transition quantities with batch dimension.
      rows: Episodes to append to, defaults to all.

    Returns:
      Operation.
    """
        rows = tf.range(self._capacity) if rows is None else rows
        assert rows.shape.ndims == 1
        # Check that the indices given in rows are smaller than self._capacity, which is the number
        # of rows (or equivalently, the number of environments).
        assert_capacity = tf.assert_less(rows,
                                         self._capacity,
                                         message='capacity exceeded')

        # Check that the needed rows all have at least a single slot of space to store the
        # new experience records (one record per row, or per environment).
        with tf.control_dependencies([assert_capacity]):
            assert_max_length = tf.assert_less(tf.gather(self._length, rows),
                                               self._max_length,
                                               message='max length exceeded')

        append_ops = []
        with tf.control_dependencies([assert_max_length]):
            for buffer_, elements in zip(self._buffers, transitions):
                timestep = tf.gather(self._length, rows)
                indices = tf.stack([rows, timestep], 1)
                append_ops.append(
                    tf.scatter_nd_update(buffer_, indices, elements))

        # Add one to all the rows with new experience record.
        # tf.one_hot(rows, self._capacity, dtype=tf.int32) give a list of one-hot representations
        # for each environment specified in rows, something like:
        # [array([[0, 1, 0, 0, 0, 0],
        #         [0, 0, 1, 0, 0, 0],
        #         [0, 0, 0, 1, 0, 0]], dtype=int32)]
        # So the reduce_sum operator is needed to generate the row indices needed to
        # update the self._length tensor.
        with tf.control_dependencies(append_ops):
            episode_mask = tf.reduce_sum(
                tf.one_hot(rows, self._capacity, dtype=tf.int32), 0)
            return self._length.assign_add(episode_mask)
Example #35
0
 def mixture_kl():
     with tf.control_dependencies([tf.assert_greater(consistency_trust, 0.0),
                                   tf.assert_less(consistency_trust, 1.0)]):
         uniform = tf.constant(1 / num_classes, shape=[num_classes])
         mixed_softmax1 = consistency_trust * softmax1 + (1 - consistency_trust) * uniform
         mixed_softmax2 = consistency_trust * softmax2 + (1 - consistency_trust) * uniform
         costs = tf.reduce_sum(mixed_softmax2 * tf.log(mixed_softmax2 / mixed_softmax1), axis=1)
         costs = costs * kl_cost_multiplier
         return costs
Example #36
0
 def mixture_kl():
     with tf.control_dependencies([tf.assert_greater(consistency_trust, 0.0),
                                   tf.assert_less(consistency_trust, 1.0)]):
         uniform = tf.constant(1 / num_classes, shape=[num_classes])
         mixed_softmax1 = consistency_trust * softmax1 + (1 - consistency_trust) * uniform
         mixed_softmax2 = consistency_trust * softmax2 + (1 - consistency_trust) * uniform
         costs = tf.reduce_sum(mixed_softmax2 * tf.log(mixed_softmax2 / mixed_softmax1), axis=1)
         costs = costs * kl_cost_multiplier
         return costs
Example #37
0
    def encode_tensorflow(
            self, input_strings: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor]:
        """Encodes `input_strings` to tensors.

    Args:
      input_strings: A 1-D `tf.Tensor` of type `tf.string`. Denote the shape of
        `input_strings` as `(num_strings,)`.

    Returns:
      A Tuple `(encoded_strings, trimmed_input_strings)`
      - encoded_strings: A `tf.Tensor` of shape
        `(num_strings, self._num_chunks)` containing encoded `input_strings`
      - trimmed_input_strings: A `tf.Tensor` of shape `(num_strings,)`
        containing trimmed `input_strings` that the length of each string in it
        is no more than `self._max_length` bytes.
      Note that a utf-8 character might take morethan one byte, so both the
      encoded and trimmed strings could contain characters that are cut in the
      middle. The caller needs to be aware of this when decoding these strings,
      i.g. decode a byte string s by `s.decode('utf-8', 'ignore')` to avoid
      decoding errors.
    """
        string_bytes = tf.io.decode_raw(input_strings,
                                        out_type=tf.uint8,
                                        fixed_length=self._max_length)
        string_bytes_reshaped = tf.reshape(string_bytes,
                                           (-1, self._dtype_size_bytes))
        string_bytes_cast = tf.cast(string_bytes_reshaped, self._dtype)
        dtype_multipliers = tf.constant([[2**(bit)]
                                         for bit in self._bit_lengths],
                                        dtype=self._dtype)
        encoded_as_dtype = tf.matmul(string_bytes_cast, dtype_multipliers)
        if self._max_chunk_value:
            tf.assert_less(
                encoded_as_dtype,
                tf.constant(self._max_chunk_value, dtype=self._dtype))
        encoded_strings = tf.reshape(encoded_as_dtype, (-1, self._num_chunks))

        int_to_char_map = tf.constant(self._int_to_byte_map, dtype=tf.string)
        trimmed_input_strings = tf.nn.embedding_lookup(
            int_to_char_map, tf.cast(string_bytes, dtype=tf.int32))
        trimmed_input_strings = tf.strings.reduce_join(trimmed_input_strings,
                                                       axis=1)

        return encoded_strings, trimmed_input_strings
Example #38
0
def box_clip_to_boundaries(box_tlbr_tensor, min_x_tensor, max_x_tensor,
                           min_y_tensor, max_y_tensor):
    assert isinstance(box_tlbr_tensor, tf.Tensor)
    assert (box_tlbr_tensor.dtype == tf.float32)
    assert (box_tlbr_tensor.shape.ndims is not None)
    assert (box_tlbr_tensor.shape.ndims > 0)
    assert (box_tlbr_tensor.shape[-1].value == 4)

    with tf.name_scope("box_clip_to_boundaries"):
        min_x_tensor = tf.cast(min_x_tensor, tf.float32)
        min_x_tensor.shape.assert_has_rank(0)

        max_x_tensor = tf.cast(max_x_tensor, tf.float32)
        max_x_tensor.shape.assert_has_rank(0)

        min_y_tensor = tf.cast(min_y_tensor, tf.float32)
        min_y_tensor.shape.assert_has_rank(0)

        max_y_tensor = tf.cast(max_y_tensor, tf.float32)
        max_y_tensor.shape.assert_has_rank(0)

        validate_assertion_list = [
            tf.assert_less(min_x_tensor, max_x_tensor),
            tf.assert_less(min_y_tensor, max_y_tensor)
        ]
        with tf.control_dependencies(validate_assertion_list):
            clipped_top_tensor = tf.clip_by_value(box_tlbr_tensor[..., 0],
                                                  min_y_tensor, max_y_tensor)
            clipped_left_tensor = tf.clip_by_value(box_tlbr_tensor[..., 1],
                                                   min_x_tensor, max_x_tensor)
            clipped_bottom_tensor = tf.clip_by_value(box_tlbr_tensor[..., 2],
                                                     min_y_tensor,
                                                     max_y_tensor)
            clipped_right_tensor = tf.clip_by_value(box_tlbr_tensor[..., 3],
                                                    min_x_tensor, max_x_tensor)

            clipped_box_tlbr_tensor = tf.stack([
                clipped_top_tensor, clipped_left_tensor, clipped_bottom_tensor,
                clipped_right_tensor
            ],
                                               axis=-1)

    return clipped_box_tlbr_tensor
Example #39
0
 def _maybe_assert_valid_sample(self, x):
     """Checks the validity of a sample."""
     if not self.validate_args:
         return x
     return control_flow_ops.with_dependencies([
         tf.assert_positive(x, message="sample must be positive"),
         tf.assert_less(x,
                        tf.ones([], self.dtype),
                        message="sample must be less than `1`."),
     ], x)
Example #40
0
    def get_vidx3d(xyzs, min_xyz, res):
        vidx_3d = (xyzs - min_xyz) / res

        #if tf.reduce_min(vidx_3d)<0:
        #  import pdb; pdb.set_trace()  # XXX BREAKPOINT
        #  pass
        c0 = tf.assert_greater_equal(tf.reduce_min(vidx_3d), 0.0)
        c1 = tf.assert_less(tf.reduce_max(vidx_3d), 8.0)
        with tf.control_dependencies([c0, c1]):
            vidx_3d = tf.cast(tf.math.floor(vidx_3d), tf.int32)
        return vidx_3d
Example #41
0
 def _maybe_assert_valid_sample(self, x):
   """Checks the validity of a sample."""
   if not self.validate_args:
     return x
   return control_flow_ops.with_dependencies([
       tf.assert_positive(x, message="sample must be positive"),
       tf.assert_less(
           x,
           tf.ones([], self.dtype),
           message="sample must be less than `1`."),
   ], x)
Example #42
0
    def _mode(self):
        a = self.concentration1
        b = self.concentration0
        mode = ((a - 1) / (a * b - 1))**(1. / a)
        if self.allow_nan_stats:
            nan = tf.fill(self.batch_shape_tensor(),
                          np.array(np.nan, dtype=self.dtype.as_numpy_dtype),
                          name="nan")
            is_defined = (self.concentration1 > 1.) & (self.concentration0 >
                                                       1.)
            return tf.where(is_defined, mode, nan)

        return control_flow_ops.with_dependencies([
            tf.assert_less(tf.ones([], dtype=self.concentration1.dtype),
                           self.concentration1,
                           message="Mode undefined for concentration1 <= 1."),
            tf.assert_less(tf.ones([], dtype=self.concentration0.dtype),
                           self.concentration0,
                           message="Mode undefined for concentration0 <= 1.")
        ], mode)
Example #43
0
 def _mode(self):
   mode = (self.concentration1 - 1.) / (self.total_concentration - 2.)
   if self.allow_nan_stats:
     nan = tf.fill(
         self.batch_shape_tensor(),
         np.array(np.nan, dtype=self.dtype.as_numpy_dtype()),
         name="nan")
     is_defined = tf.logical_and(self.concentration1 > 1.,
                                 self.concentration0 > 1.)
     return tf.where(is_defined, mode, nan)
   return control_flow_ops.with_dependencies([
       tf.assert_less(
           tf.ones([], dtype=self.dtype),
           self.concentration1,
           message="Mode undefined for concentration1 <= 1."),
       tf.assert_less(
           tf.ones([], dtype=self.dtype),
           self.concentration0,
           message="Mode undefined for concentration0 <= 1.")
   ], mode)
Example #44
0
  def _mode(self):
    a = self.concentration1
    b = self.concentration0
    mode = ((a - 1) / (a * b - 1))**(1. / a)
    if self.allow_nan_stats:
      nan = tf.fill(
          self.batch_shape_tensor(),
          np.array(np.nan, dtype=self.dtype.as_numpy_dtype),
          name="nan")
      is_defined = (self.concentration1 > 1.) & (self.concentration0 > 1.)
      return tf.where(is_defined, mode, nan)

    return control_flow_ops.with_dependencies([
        tf.assert_less(
            tf.ones([], dtype=self.concentration1.dtype),
            self.concentration1,
            message="Mode undefined for concentration1 <= 1."),
        tf.assert_less(
            tf.ones([], dtype=self.concentration0.dtype),
            self.concentration0,
            message="Mode undefined for concentration0 <= 1.")
    ], mode)
Example #45
0
 def _mean(self):
   mean = self.rate / (self.concentration - 1.)
   if self.allow_nan_stats:
     nan = tf.fill(
         self.batch_shape_tensor(),
         np.array(np.nan, dtype=self.dtype.as_numpy_dtype()),
         name="nan")
     return tf.where(self.concentration > 1., mean, nan)
   else:
     return control_flow_ops.with_dependencies([
         tf.assert_less(
             tf.ones([], self.dtype),
             self.concentration,
             message="mean undefined when any concentration <= 1"),
     ], mean)
Example #46
0
  def _maybe_assert_valid_y(self, y):
    if not self.validate_args:
      return y
    is_valid = [
        tf.assert_greater(
            y,
            tf.cast(-1., dtype=y.dtype.base_dtype),
            message="Inverse transformation input must be greater than -1."),
        tf.assert_less(
            y,
            tf.cast(1., dtype=y.dtype.base_dtype),
            message="Inverse transformation input must be less than 1.")
    ]

    return control_flow_ops.with_dependencies(is_valid, y)
Example #47
0
 def _maybe_assert_valid_concentration(self, concentration, validate_args):
   """Checks the validity of the concentration parameter."""
   if not validate_args:
     return concentration
   return control_flow_ops.with_dependencies([
       tf.assert_positive(
           concentration,
           message="Concentration parameter must be positive."),
       tf.assert_rank_at_least(
           concentration, 1,
           message="Concentration parameter must have >=1 dimensions."),
       tf.assert_less(
           1, tf.shape(concentration)[-1],
           message="Concentration parameter must have event_size >= 2."),
   ], concentration)
Example #48
0
  def __init__(self,
               low=0.,
               high=1.,
               validate_args=False,
               allow_nan_stats=True,
               name="Uniform"):
    """Initialize a batch of Uniform distributions.

    Args:
      low: Floating point tensor, lower boundary of the output interval. Must
        have `low < high`.
      high: Floating point tensor, upper boundary of the output interval. Must
        have `low < high`.
      validate_args: Python `bool`, default `False`. When `True` distribution
        parameters are checked for validity despite possibly degrading runtime
        performance. When `False` invalid inputs may silently render incorrect
        outputs.
      allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
        (e.g., mean, mode, variance) use the value "`NaN`" to indicate the
        result is undefined. When `False`, an exception is raised if one or
        more of the statistic's batch members are undefined.
      name: Python `str` name prefixed to Ops created by this class.

    Raises:
      InvalidArgumentError: if `low >= high` and `validate_args=False`.
    """
    parameters = dict(locals())
    with tf.name_scope(name, values=[low, high]) as name:
      dtype = dtype_util.common_dtype([low, high], tf.float32)
      low = tf.convert_to_tensor(low, name="low", dtype=dtype)
      high = tf.convert_to_tensor(high, name="high", dtype=dtype)
      with tf.control_dependencies([
          tf.assert_less(
              low, high, message="uniform not defined when low >= high.")
      ] if validate_args else []):
        self._low = tf.identity(low)
        self._high = tf.identity(high)
        tf.assert_same_float_dtype([self._low, self._high])
    super(Uniform, self).__init__(
        dtype=self._low.dtype,
        reparameterization_type=reparameterization.FULLY_REPARAMETERIZED,
        validate_args=validate_args,
        allow_nan_stats=allow_nan_stats,
        parameters=parameters,
        graph_parents=[self._low,
                       self._high],
        name=name)
Example #49
0
 def _variance(self):
   var = (
       tf.square(self.rate) / tf.square(self.concentration - 1.) /
       (self.concentration - 2.))
   if self.allow_nan_stats:
     nan = tf.fill(
         self.batch_shape_tensor(),
         np.array(np.nan, dtype=self.dtype.as_numpy_dtype()),
         name="nan")
     return tf.where(self.concentration > 2., var, nan)
   else:
     return control_flow_ops.with_dependencies([
         tf.assert_less(
             tf.constant(2., dtype=self.dtype),
             self.concentration,
             message="variance undefined when any concentration <= 2"),
     ], var)
Example #50
0
 def _mode(self):
   k = tf.cast(self.event_shape_tensor()[0], self.dtype)
   mode = (self.concentration - 1.) / (
       self.total_concentration[..., tf.newaxis] - k)
   if self.allow_nan_stats:
     nan = tf.fill(
         tf.shape(mode),
         np.array(np.nan, dtype=self.dtype.as_numpy_dtype()),
         name="nan")
     return tf.where(
         tf.reduce_all(self.concentration > 1., axis=-1),
         mode, nan)
   return control_flow_ops.with_dependencies([
       tf.assert_less(
           tf.ones([], self.dtype),
           self.concentration,
           message="Mode undefined when any concentration <= 1"),
   ], mode)
Example #51
0
 def testMean(self, dtype):
   testee_lkj = tfd.LKJ(dimension=3, concentration=dtype([1., 3., 5.]))
   num_samples = 20000
   results = testee_lkj.sample(sample_shape=[num_samples])
   mean = testee_lkj.mean()
   self.assertEqual(mean.shape, [3, 3, 3])
   check1 = st.assert_true_mean_equal_by_dkwm(
       samples=results, low=-1., high=1.,
       expected=mean,
       false_fail_rate=1e-6)
   check2 = tf.assert_less(
       st.min_discrepancy_of_true_means_detectable_by_dkwm(
           num_samples, low=-1., high=1.,
           # Smaller false fail rate because of different batch sizes between
           # these two checks.
           false_fail_rate=1e-7,
           false_pass_rate=1e-6),
       # 4% relative error
       0.08)
   self.evaluate([check1, check2])
Example #52
0
 def _mean(self):
   mean = self.loc * tf.ones(self.batch_shape_tensor(),
                             dtype=self.dtype)
   if self.allow_nan_stats:
     nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype())
     return tf.where(
         tf.greater(
             self.df,
             tf.ones(self.batch_shape_tensor(), dtype=self.dtype)),
         mean,
         tf.fill(self.batch_shape_tensor(), nan, name="nan"))
   else:
     return control_flow_ops.with_dependencies(
         [
             tf.assert_less(
                 tf.ones([], dtype=self.dtype),
                 self.df,
                 message="mean not defined for components of df <= 1"),
         ],
         mean)
 def testRejection4D(self):
   num_samples = int(1e5)  # Chosen for a small min detectable discrepancy
   det_bounds = np.array([0.0], dtype=np.float32)
   exact_volumes = [four_by_four_volume()]
   (rej_weights,
    rej_proposal_volume) = corr.correlation_matrix_volume_rejection_samples(
        det_bounds, 4, [num_samples, 1], dtype=np.float32, seed=45)
   # shape of rej_weights: [num_samples, 1, 4, 4]
   chk1 = st.assert_true_mean_equal_by_dkwm(
       rej_weights, low=0., high=rej_proposal_volume, expected=exact_volumes,
       false_fail_rate=1e-6)
   chk2 = tf.assert_less(
       st.min_discrepancy_of_true_means_detectable_by_dkwm(
           num_samples, low=0., high=rej_proposal_volume,
           false_fail_rate=1e-6, false_pass_rate=1e-6),
       # Going for about a 10% relative error
       1.1)
   with tf.control_dependencies([chk1, chk2]):
     rej_weights = tf.identity(rej_weights)
   self.evaluate(rej_weights)
Example #54
0
def create_initial_softmax_from_labels(last_frame_labels, reference_labels,
                                       decoder_output_stride, reduce_labels):
  """Creates initial softmax predictions from last frame labels.

  Args:
    last_frame_labels: last frame labels of shape [1, height, width, 1].
    reference_labels: reference frame labels of shape [1, height, width, 1].
    decoder_output_stride: Integer, the stride of the decoder. Can be None, in
      this case it's assumed that the last_frame_labels and reference_labels
      are already scaled to the decoder output resolution.
    reduce_labels: Boolean, whether to reduce the depth of the softmax one_hot
      encoding to the actual number of labels present in the reference frame
      (otherwise the depth will be the highest label index + 1).

  Returns:
    init_softmax: the initial softmax predictions.
  """
  if decoder_output_stride is None:
    labels_output_size = last_frame_labels
    reference_labels_output_size = reference_labels
  else:
    h = tf.shape(last_frame_labels)[1]
    w = tf.shape(last_frame_labels)[2]
    h_sub = model.scale_dimension(h, 1.0 / decoder_output_stride)
    w_sub = model.scale_dimension(w, 1.0 / decoder_output_stride)
    labels_output_size = tf.image.resize_nearest_neighbor(
        last_frame_labels, [h_sub, w_sub], align_corners=True)
    reference_labels_output_size = tf.image.resize_nearest_neighbor(
        reference_labels, [h_sub, w_sub], align_corners=True)
  if reduce_labels:
    unique_labels, _ = tf.unique(tf.reshape(reference_labels_output_size, [-1]))
    depth = tf.size(unique_labels)
  else:
    depth = tf.reduce_max(reference_labels_output_size) + 1
  one_hot_assertion = tf.assert_less(tf.reduce_max(labels_output_size), depth)
  with tf.control_dependencies([one_hot_assertion]):
    init_softmax = tf.one_hot(tf.squeeze(labels_output_size,
                                         axis=-1),
                              depth=depth,
                              dtype=tf.float32)
  return init_softmax
 def testRejection2D(self):
   num_samples = int(1e5)  # Chosen for a small min detectable discrepancy
   det_bounds = np.array(
       [0.01, 0.02, 0.03, 0.04, 0.05, 0.3, 0.35, 0.4, 0.5], dtype=np.float32)
   exact_volumes = two_by_two_volume(det_bounds)
   (rej_weights,
    rej_proposal_volume) = corr.correlation_matrix_volume_rejection_samples(
        det_bounds, 2, [num_samples, 9], dtype=np.float32, seed=43)
   # shape of rej_weights: [num_samples, 9, 2, 2]
   chk1 = st.assert_true_mean_equal_by_dkwm(
       rej_weights, low=0., high=rej_proposal_volume, expected=exact_volumes,
       false_fail_rate=1e-6)
   chk2 = tf.assert_less(
       st.min_discrepancy_of_true_means_detectable_by_dkwm(
           num_samples, low=0., high=rej_proposal_volume,
           # Correct the false fail rate due to different broadcasting
           false_fail_rate=1.1e-7, false_pass_rate=1e-6),
       0.036)
   with tf.control_dependencies([chk1, chk2]):
     rej_weights = tf.identity(rej_weights)
   self.evaluate(rej_weights)
Example #56
0
def logit(x):
    """Evaluate :math:`\log(x / (1 - x))` elementwise.

    Parameters
    ----------
    x : tf.Tensor
        scalar, vector, matrix, or n-Tensor

    Returns
    -------
    tf.Tensor
        size corresponding to size of input

    Raises
    ------
    InvalidArgumentError
        If the input is not between :math:`(0,1)` elementwise.
    """
    dependencies = [tf.assert_positive(x),
                    tf.assert_less(x, 1.0)]
    x = control_flow_ops.with_dependencies(dependencies, x)

    return tf.log(x) - tf.log(1.0 - x)
Example #57
0
  def replace(self, episodes, length, rows=None):
    """Replace full episodes.

    Args:
      episodes: Tuple of transition quantities with batch and time dimensions.
      length: Batch of sequence lengths.
      rows: Episodes to replace, defaults to all.

    Returns:
      Operation.
    """
    rows = tf.range(self._capacity) if rows is None else rows
    assert rows.shape.ndims == 1
    assert_capacity = tf.assert_less(
        rows, self._capacity, message='capacity exceeded')
    with tf.control_dependencies([assert_capacity]):
      assert_max_length = tf.assert_less_equal(
          length, self._max_length, message='max length exceeded')
    with tf.control_dependencies([assert_max_length]):
      replace_ops = tools.nested.map(
          lambda var, val: tf.scatter_update(var, rows, val),
          self._buffers, episodes, flatten=True)
    with tf.control_dependencies(replace_ops):
      return tf.scatter_update(self._length, rows, length)
Example #58
0
def logit(x):
    """Evaluate :math:`\log(x / (1 - x))` elementwise.

    Parameters
    ----------
    x : tf.Tensor
        A n-D tensor.

    Returns
    -------
    tf.Tensor
        A tensor of same shape as input.

    Raises
    ------
    InvalidArgumentError
        If the input is not between :math:`(0,1)` elementwise.
    """
    dependencies = [tf.assert_positive(x),
                    tf.assert_less(x, 1.0)]
    x = control_flow_ops.with_dependencies(dependencies, x)
    x = tf.cast(x, dtype=tf.float32)

    return tf.log(x) - tf.log(1.0 - x)
  def _sample_n(self, n, seed=None):
    seed = seed_stream.SeedStream(seed, salt='vom_mises_fisher')
    # The sampling strategy relies on the fact that vMF variates are symmetric
    # about the mean direction. Accordingly, if we have a sampling strategy for
    # the away-from-mean angle, then we can uniformly sample the remaining
    # dimensions on the S^{dim-2} sphere for , and rotate these samples from a
    # (1, 0, 0, ..., 0)-mode distribution into the target orientation.
    #
    # This is easy to imagine on the 1-sphere (S^1; in 2-D space): sample a
    # von-Mises distributed `x` value in [-1, 1], then uniformly select what
    # amounts to a "up" or "down" additional degree of freedom after unit
    # normalizing, followed by a final rotation to the desired mean direction
    # from a basis of (1, 0).
    #
    # On S^2 (in 3-D), selecting a vMF `x` identifies a circle in `yz` on the
    # unit sphere over which the distribution is uniform, in particular the
    # circle where x = \hat{x} intersects the unit sphere. We pick a point on
    # that circle, then rotate to the desired mean direction from a basis of
    # (1, 0, 0).
    event_dim = self.event_shape[0].value or self._event_shape_tensor()[0]

    sample_batch_shape = tf.concat([[n], self._batch_shape_tensor()], axis=0)
    dim = tf.cast(event_dim - 1, self.dtype)
    if event_dim == 3:
      samples_dim0 = self._sample_3d(n, seed=seed)
    else:
      # Wood'94 provides a rejection algorithm to sample the x coordinate.
      # Wood'94 definition of b:
      # b = (-2 * kappa + tf.sqrt(4 * kappa**2 + dim**2)) / dim
      # https://stats.stackexchange.com/questions/156729 suggests:
      b = dim / (2 * self.concentration +
                 tf.sqrt(4 * self.concentration**2 + dim**2))
      # TODO(bjp): Integrate any useful numerical tricks from hyperspherical VAE
      #     https://github.com/nicola-decao/s-vae-tf/
      x = (1 - b) / (1 + b)
      c = self.concentration * x + dim * tf.log1p(-x**2)
      beta = beta_lib.Beta(dim / 2, dim / 2)

      def cond_fn(w, should_continue):
        del w
        return tf.reduce_any(should_continue)

      def body_fn(w, should_continue):
        z = beta.sample(sample_shape=sample_batch_shape, seed=seed())
        w = tf.where(should_continue, (1 - (1 + b) * z) / (1 - (1 - b) * z), w)
        w = tf.check_numerics(w, 'w')
        should_continue = tf.logical_and(
            should_continue,
            self.concentration * w + dim * tf.log1p(-x * w) - c <
            tf.log(tf.random_uniform(sample_batch_shape, seed=seed(),
                                     dtype=self.dtype)))
        return w, should_continue

      w = tf.zeros(sample_batch_shape, dtype=self.dtype)
      should_continue = tf.ones(sample_batch_shape, dtype=tf.bool)
      samples_dim0 = tf.while_loop(cond_fn, body_fn, (w, should_continue))[0]
      samples_dim0 = samples_dim0[..., tf.newaxis]
    if not self._allow_nan_stats:
      # Verify samples are w/in -1, 1, with useful error output tensors (top
      # value rather than all values).
      with tf.control_dependencies([
          tf.assert_less_equal(
              samples_dim0, self.dtype.as_numpy_dtype(1.01),
              data=[tf.nn.top_k(tf.reshape(samples_dim0, [-1]))[0]]),
          tf.assert_greater_equal(
              samples_dim0, self.dtype.as_numpy_dtype(-1.01),
              data=[-tf.nn.top_k(tf.reshape(-samples_dim0, [-1]))[0]])]):
        samples_dim0 = tf.identity(samples_dim0)
    samples_otherdims_shape = tf.concat([sample_batch_shape, [event_dim - 1]],
                                        axis=0)
    unit_otherdims = tf.nn.l2_normalize(
        tf.random_normal(samples_otherdims_shape, seed=seed(),
                         dtype=self.dtype),
        axis=-1)
    samples = tf.concat([
        samples_dim0,  # we must avoid sqrt(1 - (>1)**2)
        tf.sqrt(tf.maximum(1 - samples_dim0**2, 0.)) * unit_otherdims
    ], axis=-1)
    samples = tf.nn.l2_normalize(samples, axis=-1)
    if not self._allow_nan_stats:
      samples = tf.check_numerics(samples, 'samples')

    # Runtime assert that samples are unit length.
    if not self._allow_nan_stats:
      worst, idx = tf.nn.top_k(
          tf.reshape(tf.abs(1 - tf.linalg.norm(samples, axis=-1)), [-1]))
      with tf.control_dependencies([
          tf.assert_near(
              self.dtype.as_numpy_dtype(0), worst,
              data=[worst, idx,
                    tf.gather(tf.reshape(samples, [-1, event_dim]), idx)],
              atol=1e-4, summarize=100)]):
        samples = tf.identity(samples)
    # The samples generated are symmetric around a mode at (1, 0, 0, ...., 0).
    # Now, we move the mode to `self.mean_direction` using a rotation matrix.
    if not self._allow_nan_stats:
      # Assert that the basis vector rotates to the mean direction, as expected.
      basis = tf.cast(tf.concat([[1.], tf.zeros([event_dim - 1])], axis=0),
                      self.dtype)
      with tf.control_dependencies([
          tf.assert_less(
              tf.linalg.norm(self._rotate(basis) - self.mean_direction,
                             axis=-1),
              self.dtype.as_numpy_dtype(1e-5))
      ]):
        return self._rotate(samples)
    return self._rotate(samples)
  def __init__(self,
               distribution,
               low=None,
               high=None,
               validate_args=False,
               name="QuantizedDistribution"):
    """Construct a Quantized Distribution representing `Y = ceiling(X)`.

    Some properties are inherited from the distribution defining `X`. Example:
    `allow_nan_stats` is determined for this `QuantizedDistribution` by reading
    the `distribution`.

    Args:
      distribution:  The base distribution class to transform. Typically an
        instance of `Distribution`.
      low: `Tensor` with same `dtype` as this distribution and shape
        able to be added to samples. Should be a whole number. Default `None`.
        If provided, base distribution's `prob` should be defined at
        `low`.
      high: `Tensor` with same `dtype` as this distribution and shape
        able to be added to samples. Should be a whole number. Default `None`.
        If provided, base distribution's `prob` should be defined at
        `high - 1`.
        `high` must be strictly greater than `low`.
      validate_args: Python `bool`, default `False`. When `True` distribution
        parameters are checked for validity despite possibly degrading runtime
        performance. When `False` invalid inputs may silently render incorrect
        outputs.
      name: Python `str` name prefixed to Ops created by this class.

    Raises:
      TypeError: If `dist_cls` is not a subclass of
          `Distribution` or continuous.
      NotImplementedError:  If the base distribution does not implement `cdf`.
    """
    parameters = dict(locals())
    values = (
        list(distribution.parameters.values()) +
        [low, high])
    with tf.name_scope(name, values=values) as name:
      self._dist = distribution

      if low is not None:
        low = tf.convert_to_tensor(low, name="low")
      if high is not None:
        high = tf.convert_to_tensor(high, name="high")
      tf.assert_same_float_dtype(tensors=[self.distribution, low, high])

      # We let QuantizedDistribution access _graph_parents since this class is
      # more like a baseclass.
      graph_parents = self._dist._graph_parents  # pylint: disable=protected-access

      checks = []
      if validate_args and low is not None and high is not None:
        message = "low must be strictly less than high."
        checks.append(tf.assert_less(low, high, message=message))
      self._validate_args = validate_args  # self._check_integer uses this.
      with tf.control_dependencies(checks if validate_args else []):
        if low is not None:
          self._low = self._check_integer(low)
          graph_parents += [self._low]
        else:
          self._low = None
        if high is not None:
          self._high = self._check_integer(high)
          graph_parents += [self._high]
        else:
          self._high = None

    super(QuantizedDistribution, self).__init__(
        dtype=self._dist.dtype,
        reparameterization_type=distributions.NOT_REPARAMETERIZED,
        validate_args=validate_args,
        allow_nan_stats=self._dist.allow_nan_stats,
        parameters=parameters,
        graph_parents=graph_parents,
        name=name)