Beispiel #1
0
def xywh_to_ltrb(xywh):
    """Convert a box coordinate from xywh to ltrb.

    Parameters
    ----------
    xywh: (num_boxes, (x, y, w, h) location)
        The box location in (x, y, w, h) coordinate.

    Returns
    -------
    ltrb: (num_boxes, (l, t, r, b) location)
        The box location in (l, t, r, b) coordinate.
    """
    l = xywh[:, 0] - xywh[:, 2] / 2.
    t = xywh[:, 1] - xywh[:, 3] / 2.
    r = xywh[:, 0] + xywh[:, 2] / 2.
    b = xywh[:, 1] + xywh[:, 3] / 2.

    # Make sure consistent shape.
    l = tf.reshape(l, (tf.shape(xywh)[0], 1))
    t = tf.reshape(t, (tf.shape(xywh)[0], 1))
    r = tf.reshape(r, (tf.shape(xywh)[0], 1))
    b = tf.reshape(b, (tf.shape(xywh)[0], 1))

    with tf.control_dependencies(
        [tf.assert_non_negative(r - l),
         tf.assert_non_negative(b - t)]):
        ltrb = tf.concat([l, t, r, b], axis=1)
    return ltrb
Beispiel #2
0
def ltrb_to_xywh(ltrb):
    """Convert a box coordinate from ltrb to xywh.

    Parameters
    ----------
    ltrb: (num_boxes, (l, t, r, b) location)
        The box location in (l, t, r, b) coordinate.

    Returns
    -------
    xywh: (num_boxes, (x, y, w, h) location)
        The box location in (x, y, w, h) coordinate.
    """
    x = (ltrb[:, 0] + ltrb[:, 2]) / 2.
    y = (ltrb[:, 1] + ltrb[:, 3]) / 2.
    w = ltrb[:, 2] - ltrb[:, 0]
    h = ltrb[:, 3] - ltrb[:, 1]

    # Make sure consistent shape.
    x = tf.reshape(x, (tf.shape(ltrb)[0], 1))
    y = tf.reshape(y, (tf.shape(ltrb)[0], 1))
    w = tf.reshape(w, (tf.shape(ltrb)[0], 1))
    h = tf.reshape(h, (tf.shape(ltrb)[0], 1))

    with tf.control_dependencies(
        [tf.assert_non_negative(w),
         tf.assert_non_negative(h)]):
        xywh = tf.concat([x, y, w, h], axis=1)
    return xywh
Beispiel #3
0
def mix_prediction(losses,
                   lam=0.,
                   mean_typ='arithmetic',
                   weight_typ='normal',
                   sign=-1.,
                   sf=1e-3):
    # losses is shape (# of discriminators x batch_size)
    # output is scalar

    tf.assert_non_negative(lam)
    assert mean_typ in ['arithmetic', 'geometric', 'harmonic']
    assert weight_typ in ['normal', 'log']
    assert sign == 1. or sign == -1.
    assert sf > 0.

    if lam == 0.:
        weights = tf.ones_like(losses)
    else:
        if weight_typ == 'log':
            weights = tf.pow(losses, lam)
        else:
            weights = tf.exp(lam * losses)

    if mean_typ == 'arithmetic':
        loss = weighted_arithmetic(weights, losses)
    elif mean_typ == 'geometric':
        log_losses = tf.log(sign * losses)
        loss = sign * tf.exp(weighted_arithmetic(weights, log_losses))
    else:
        mn = tf.reduce_min(losses) - sf
        inv_losses = tf.reciprocal(losses - mn)
        loss = mn + tf.reciprocal(weighted_arithmetic(weights, inv_losses))

    return loss
Beispiel #4
0
    def eval(self, X, Y):
        b = self.b
        c = self.c
        sumx2 = tf.reshape(tf.reduce_sum(X**2, 1), [-1, 1])
        sumy2 = tf.reshape(tf.reduce_sum(Y**2, 1), [1, -1])
        with tf.control_dependencies([
                tf.assert_non_negative(sumx2, name="sumx2_nonneg"),
                tf.assert_non_negative(sumy2, name="sumy2_nonneg")
        ]):
            D2 = sumx2 - 2.0 * tf.matmul(X, Y, transpose_b=True) + sumy2

        D2_no0 = tf.maximum(0.0, D2)
        with tf.control_dependencies(
            [tf.assert_non_negative(D2_no0, name="D2_nonneg")]):
            K = (c**2 + D2_no0)**b
        return K
Beispiel #5
0
def _maybe_validate_rightmost_transposed_ndims(
    rightmost_transposed_ndims, validate_args, name=None):
  """Checks that `rightmost_transposed_ndims` is valid."""
  with tf.name_scope(name, 'maybe_validate_rightmost_transposed_ndims',
                     [rightmost_transposed_ndims]):
    assertions = []
    if not rightmost_transposed_ndims.dtype.is_integer:
      raise TypeError('`rightmost_transposed_ndims` must be integer type.')

    if rightmost_transposed_ndims.shape.ndims is not None:
      if rightmost_transposed_ndims.shape.ndims != 0:
        raise ValueError('`rightmost_transposed_ndims` must be a scalar, '
                         'saw rank: {}.'.format(
                             rightmost_transposed_ndims.shape.ndims))
    elif validate_args:
      assertions += [tf.assert_rank(rightmost_transposed_ndims, 0)]

    rightmost_transposed_ndims_ = tensor_util.constant_value(
        rightmost_transposed_ndims)
    msg = '`rightmost_transposed_ndims` must be non-negative.'
    if rightmost_transposed_ndims_ is not None:
      if rightmost_transposed_ndims_ < 0:
        raise ValueError(msg[:-1] + ', saw: {}.'.format(
            rightmost_transposed_ndims_))
    elif validate_args:
      assertions += [tf.assert_non_negative(
          rightmost_transposed_ndims, message=msg)]

    return assertions
def bernoulli_sample_test():
    # import os
    # os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
    # os.environ['CUDA_VISIBLE_DEVICES'] = ''
    g = tf.Graph()
    with g.as_default():
        tf.set_random_seed(4896)
        logits = tf.random_uniform(
            shape=[128, 256],
            minval=-5,
            maxval=5,
            dtype=tf.float32,
        )
        probs = tf.nn.sigmoid(logits)
        sample = bernoulli_sample_sigmoid(logits)
        grad_probs = tf.gradients(probs, logits)
        grad_sample = tf.gradients(sample, logits)
        with tf.control_dependencies(
            [tf.assert_equal(grad_probs, grad_sample)]):
            grad_probs = tf.identity(grad_probs)
        with tf.control_dependencies([tf.assert_non_negative(sample)]):
            grad_probs = tf.identity(grad_probs)
        init_fn = tf.global_variables_initializer()

    sess = tf.Session(graph=g)
    with sess:
        sess.run(init_fn)
        grad_probs = sess.run(grad_probs)
    print('Tests passed: `bernoulli_sample_sigmoid`')
Beispiel #7
0
 def test_raises_when_negative(self):
     with self.test_session():
         zoe = tf.constant([-1, -2], name="zoe")
         with tf.control_dependencies([tf.assert_non_negative(zoe)]):
             out = tf.identity(zoe)
         with self.assertRaisesOpError("zoe"):
             out.eval()
Beispiel #8
0
    def add_loss_op(self, result):
        logits = result.rnn_output
        with tf.control_dependencies(
            [tf.assert_positive(tf.shape(logits)[1],
                                data=[tf.shape(logits)])]):
            length_diff = tf.reshape(self.config.max_length -
                                     tf.shape(logits)[1],
                                     shape=(1, ))
        padding = tf.reshape(tf.concat([[0, 0, 0], length_diff, [0, 0]],
                                       axis=0),
                             shape=(3, 2))
        preds = tf.pad(logits, padding, mode='constant')

        # add epsilon to avoid division by 0
        preds = preds + 1e-5

        mask = tf.sequence_mask(self.output_length_placeholder,
                                self.config.max_length,
                                dtype=tf.float32)
        loss = tf.contrib.seq2seq.sequence_loss(preds, self.output_placeholder,
                                                mask)

        with tf.control_dependencies([
                tf.assert_non_negative(loss,
                                       data=[preds, mask],
                                       summarize=256 * 60 * 300)
        ]):
            return tf.identity(loss)
Beispiel #9
0
        def lm_symbols_to_logprobs_fn(ids,
                                      unused_i,
                                      cache,
                                      inf_mask_tf=zero_mask_tf):
            """Go from ids to logits for next symbol."""

            ids = ids[:, -1:]
            # Map from lip to lm indices
            lm_ids = table.lookup(ids)

            with tf.control_dependencies(
                [tf.assert_non_negative(lm_ids, [lm_ids])]):
                with tf.variable_scope(
                        top_scope,
                        reuse=True):  # we have initialized LM outside
                    model_pl = CharRnnLm(input_data=lm_ids,
                                         initial_state=cache["lm_state"],
                                         args=self.clm_opts,
                                         training=False)
            probs_lm, cache["lm_state"] = model_pl.probs, model_pl.final_state

            lm_probs_mapped_to_lip = tf.gather(probs_lm,
                                               chars2lip_keys,
                                               axis=-1)
            lm_probs_mapped_to_lip *= zero_mask_tf  # mask out indices with no lip correspondence
            lm_logprobs = tf.log(
                lm_probs_mapped_to_lip)  # Entries with value 0 will get -Inf
            return lm_logprobs, cache
Beispiel #10
0
def bounds_unlabeled(lower: float,
                     upper: float,
                     tensor: tf.Tensor,
                     name: Optional[str] = None) -> tf.Tensor:
  """Checks the tensor elements fall in the given bounds.

  Args:
    lower: The lower bound.
    upper: The upper bound.
    tensor: The input tensor.
    name: Optional op name.

  Returns:
    The input tensor.
  """
  with tf.name_scope(name, 'check_bounds', [tensor]) as scope:
    if FLAGS.tensorcheck_enable_checks:
      lower_bound_op = tf.assert_non_negative(
          tensor - lower, name='lower_bound')
      upper_bound_op = tf.assert_non_positive(
          tensor - upper, name='upper_bound')
      with tf.control_dependencies([lower_bound_op, upper_bound_op]):
        tensor = tf.identity(tensor, name=scope)

    return tensor
Beispiel #11
0
  def get_sample_ndims(self, x, name="get_sample_ndims"):
    """Returns number of dimensions corresponding to iid draws ("sample").

    Args:
      x: `Tensor`.
      name: Python `str`. The name to give this op.

    Returns:
      sample_ndims: `Tensor` (0D, `int32`).

    Raises:
      ValueError: if `sample_ndims` is calculated to be negative.
    """
    with self._name_scope(name, values=[x]):
      ndims = self.get_ndims(x, name=name)
      if self._is_all_constant_helper(ndims, self.batch_ndims,
                                      self.event_ndims):
        ndims = tensor_util.constant_value(ndims)
        sample_ndims = (ndims - self._batch_ndims_static -
                        self._event_ndims_static)
        if sample_ndims < 0:
          raise ValueError(
              "expected batch_ndims(%d) + event_ndims(%d) <= ndims(%d)" %
              (self._batch_ndims_static, self._event_ndims_static, ndims))
        return tf.convert_to_tensor(sample_ndims, name="sample_ndims")
      else:
        with tf.name_scope(name="sample_ndims"):
          sample_ndims = ndims - self.batch_ndims - self.event_ndims
          if self.validate_args:
            sample_ndims = control_flow_ops.with_dependencies(
                [tf.assert_non_negative(sample_ndims)], sample_ndims)
        return sample_ndims
Beispiel #12
0
def _maybe_validate_rightmost_transposed_ndims(
    rightmost_transposed_ndims, validate_args, name=None):
  """Checks that `rightmost_transposed_ndims` is valid."""
  with tf.name_scope(name, 'maybe_validate_rightmost_transposed_ndims',
                     [rightmost_transposed_ndims]):
    assertions = []
    if not rightmost_transposed_ndims.dtype.is_integer:
      raise TypeError('`rightmost_transposed_ndims` must be integer type.')

    if rightmost_transposed_ndims.shape.ndims is not None:
      if rightmost_transposed_ndims.shape.ndims != 0:
        raise ValueError('`rightmost_transposed_ndims` must be a scalar, '
                         'saw rank: {}.'.format(
                             rightmost_transposed_ndims.shape.ndims))
    elif validate_args:
      assertions += [tf.assert_rank(rightmost_transposed_ndims, 0)]

    rightmost_transposed_ndims_ = tf.contrib.util.constant_value(
        rightmost_transposed_ndims)
    msg = '`rightmost_transposed_ndims` must be non-negative.'
    if rightmost_transposed_ndims_ is not None:
      if rightmost_transposed_ndims_ < 0:
        raise ValueError(msg[:-1] + ', saw: {}.'.format(
            rightmost_transposed_ndims_))
    elif validate_args:
      assertions += [tf.assert_non_negative(
          rightmost_transposed_ndims, message=msg)]

    return assertions
Beispiel #13
0
 def test_raises_when_negative(self):
   with self.test_session():
     zoe = tf.constant([-1, -2], name="zoe")
     with tf.control_dependencies([tf.assert_non_negative(zoe)]):
       out = tf.identity(zoe)
     with self.assertRaisesOpError("zoe"):
       out.eval()
Beispiel #14
0
 def sparsemax(attentionscores):
     attentionscores = tf.contrib.sparsemax.sparsemax(attentionscores)
     with tf.control_dependencies([
             tf.assert_non_negative(attentionscores),
             tf.assert_less_equal(attentionscores, 1., summarize=60)
     ]):
         return tf.identity(attentionscores)
Beispiel #15
0
    def get_sample_ndims(self, x, name="get_sample_ndims"):
        """Returns number of dimensions corresponding to iid draws ("sample").

    Args:
      x: `Tensor`.
      name: Python `str`. The name to give this op.

    Returns:
      sample_ndims: `Tensor` (0D, `int32`).

    Raises:
      ValueError: if `sample_ndims` is calculated to be negative.
    """
        with self._name_scope(name, values=[x]):
            ndims = self.get_ndims(x, name=name)
            if self._is_all_constant_helper(ndims, self.batch_ndims,
                                            self.event_ndims):
                ndims = tensor_util.constant_value(ndims)
                sample_ndims = (ndims - self._batch_ndims_static -
                                self._event_ndims_static)
                if sample_ndims < 0:
                    raise ValueError(
                        "expected batch_ndims(%d) + event_ndims(%d) <= ndims(%d)"
                        % (self._batch_ndims_static, self._event_ndims_static,
                           ndims))
                return tf.convert_to_tensor(sample_ndims, name="sample_ndims")
            else:
                with tf.name_scope(name="sample_ndims"):
                    sample_ndims = ndims - self.batch_ndims - self.event_ndims
                    if self.validate_args:
                        sample_ndims = control_flow_ops.with_dependencies(
                            [tf.assert_non_negative(sample_ndims)],
                            sample_ndims)
                return sample_ndims
Beispiel #16
0
def quantization_input_as_bins(x, k_center):
    """
    quantization the input x to the k-bins structure
    :param x: the input data
    :param k_center: the number of bins
    :return: quantized input
    """

    # normalization input x to the k-bins vector
    x = tf.cast(x, tf.float32)
    x_shape = tf.shape(x)
    x_ones = tf.ones_like(x)  # create tensor the same shape as x

    x_min_per_batch = tf.reduce_min(x, axis=0)  # min per batch
    x_min_per_image = tf.reduce_min(tf.reduce_min(x_min_per_batch, axis=0),
                                    axis=0)
    x_min_global = tf.reduce_min(x_min_per_image)
    x_min_mat = tf.multiply(x_min_global, x_ones)
    x_idx_0 = tf.subtract(x, x_min_mat)
    assert_zeros1 = tf.assert_non_negative(x_idx_0)
    with tf.control_dependencies([assert_zeros1]):
        x_idx_0 = tf.identity(x_idx_0, name='x_idx_0')

    x_max_per_batch = tf.reduce_max(x_idx_0, axis=0)
    x_max_per_image = tf.reduce_max(tf.reduce_max(x_max_per_batch, axis=0),
                                    axis=0)
    x_max_global = tf.reduce_max(x_max_per_image, axis=0)
    x_max_mat = tf.multiply(x_max_global, x_ones)
    x_idx_01 = tf.div(x_idx_0, x_max_mat)  # TODO
    assert_zeros2 = tf.assert_non_negative(x_idx_01)
    with tf.control_dependencies([assert_zeros2]):
        x_idx_01 = tf.identity(x_idx_01, name='x_idx_01')

    # [0..1]*(256-1)->[0..255]+1->[1..256] and then floor getting the interger not greater than x
    k = tf.cast((k_center - 1), tf.float32)
    x_idx = tf.multiply(x_idx_01, k)
    x_idx = tf.round(x_idx)

    # x_idx = tf.floor(tf.add(x_idx,1))
    # x_idx = tf.floor(x_idx)  # the range [0..255]because we have z index in the python list
    x_idx = tf.cast(x_idx, tf.int32)
    # indices have to be non negative
    assert_zeros = tf.assert_non_negative(x_idx)
    with tf.control_dependencies([assert_zeros]):
        x_idx = tf.identity(x_idx, name='x_idx')

    return x_idx
Beispiel #17
0
def normalize_prob(x, axis=None, name='normalize'):
    with tf.name_scope(name) as scope:
        with tf.control_dependencies([tf.assert_non_negative(x)]):
            x = tf.identity(x)
        z = tf.reduce_sum(x, axis=axis, keepdims=True)
        with tf.control_dependencies([tf.assert_positive(z)]):
            p = (1. / z) * x
        return p
Beispiel #18
0
 def _maybe_assert_valid_x(self, x):
     if not self.validate_args or self.power == 0.:
         return x
     is_valid = tf.assert_non_negative(
         1. + self.power * x,
         message="Forward transformation input must be at least {}.".format(
             -1. / self.power))
     return control_flow_ops.with_dependencies([is_valid], x)
 def _maybe_assert_valid_x(self, x):
   if not self.validate_args or self.power == 0.:
     return x
   is_valid = tf.assert_non_negative(
       1. + self.power * x,
       message="Forward transformation input must be at least {}.".format(
           -1. / self.power))
   return control_flow_ops.with_dependencies([is_valid], x)
Beispiel #20
0
def _test_assertions(inf_tensors, gen_tensors, eval_tensors):
    """Returns in-graph assertions for testing."""
    observed, latents, divs, log_probs, elbo = inf_tensors
    generated, sampled_latents = gen_tensors
    eval_log_probs, = eval_tensors

    # For RNN, we return None from infer_latents as an optimization.
    if latents is None:
        latents = sampled_latents

    def _same_batch_and_sequence_size_asserts(t1, name1, t2, name2):
        return [
            tf.assert_equal(util.batch_size_from_nested_tensors(t1),
                            util.batch_size_from_nested_tensors(t2),
                            message="Batch: " + name1 + " vs " + name2),
            tf.assert_equal(util.sequence_size_from_nested_tensors(t1),
                            util.sequence_size_from_nested_tensors(t2),
                            message="Steps: " + name1 + " vs " + name2),
        ]

    def _same_shapes(nested1, nested2):
        return snt.nest.flatten(
            snt.nest.map(
                lambda t1, t2: tf.assert_equal(tf.shape(t1),
                                               tf.shape(t2),
                                               message="Shapes: " + t1.name +
                                               " vs " + t2.name), nested1,
                nested2))

    def _all_same_batch_and_sequence_sizes(nested):
        batch_size = util.batch_size_from_nested_tensors(nested)
        sequence_size = util.sequence_size_from_nested_tensors(nested)
        return [
            tf.assert_equal(tf.shape(tensor)[0],
                            batch_size,
                            message="Batch: " + tensor.name)
            for tensor in snt.nest.flatten(nested)
        ] + [
            tf.assert_equal(tf.shape(tensor)[1],
                            sequence_size,
                            message="Steps: " + tensor.name)
            for tensor in snt.nest.flatten(nested)
        ]

    assertions = [
        tf.assert_non_negative(divs),
        tf.assert_non_positive(log_probs),
    ] + _same_shapes(
        (log_probs, log_probs, observed, latents),
        (divs, eval_log_probs, generated,
         sampled_latents)) + _all_same_batch_and_sequence_sizes(
             (observed, latents, divs)) + _all_same_batch_and_sequence_sizes(
                 (generated, sampled_latents))
    vars_ = tf.trainable_variables()
    grads = tf.gradients(-elbo, vars_)
    for (var, grad) in zip(vars_, grads):
        assertions.append(tf.check_numerics(grad, "Gradient for " + var.name))
    return assertions
Beispiel #21
0
def cosine(template, search, is_training,
           trainable=True,
           use_batch_norm=False,
           gain_init=1,
           eps=1e-3,
           scope='cosine'):
    '''
    Args:
        template: [b, h, w, c]
        search: [b, s, h, w, c]
    '''
    search = cnn.as_tensor(search)
    num_search_dims = len(search.value.shape)
    if num_search_dims != 5:
        raise ValueError('search should have 5 dims: {}'.format(num_search_dims))

    with tf.variable_scope(scope, 'cosine'):
        # Discard receptive field of template and get underlying tf.Tensor.
        template = cnn.get_value(template)

        dot_xy = cnn.channel_sum(cnn.diag_xcorr(search, template, padding='VALID'))
        dot_xx = tf.reduce_sum(tf.square(template), axis=(-3, -2, -1), keepdims=True)

        sq_search = cnn.pixelwise(tf.square, search)
        ones = tf.ones_like(template)  # TODO: Faster and less memory to use sum.
        dot_yy = cnn.channel_sum(cnn.diag_xcorr(sq_search, ones, padding='VALID'))
        # num_channels = template.shape[-1].value
        # template_size = template.shape[-3:-1].as_list()
        # ones = tf.ones(template_size + [num_channels, 1], tf.float32)
        # sq_search, restore = cnn.merge_batch_dims(sq_search)
        # dot_yy = cnn.nn_conv2d(sq_search, ones, strides=[1, 1, 1, 1], padding='VALID')
        # dot_yy = restore(dot_yy)

        dot_xx = tf.expand_dims(dot_xx, 1)
        assert_ops = [tf.assert_non_negative(dot_xx, message='assert dot_xx non negative'),
                      tf.assert_non_negative(dot_yy.value, message='assert dot_yy non negative')]
        with tf.control_dependencies(assert_ops):
            denom = cnn.pixelwise(lambda dot_yy: tf.sqrt(dot_xx * dot_yy), dot_yy)
        similarity = cnn.pixelwise_binary(
            lambda dot_xy, denom: dot_xy / (denom + eps), dot_xy, denom)
        # Gain is necessary here because similarity is always in [-1, 1].
        return _calibrate(similarity, is_training, use_batch_norm,
                          learn_gain=True,
                          gain_init=gain_init,
                          trainable=trainable)
Beispiel #22
0
 def _inverse_log_det_jacobian(self, y):
   # If event_ndims = 2,
   # F^{-1}(y) = (-y, y), so DF^{-1}(y) = (-1, 1),
   # so Log|DF^{-1}(y)| = Log[1, 1] = [0, 0].
   zeros = tf.constant(0., dtype=y.dtype)
   if self.validate_args:
     zeros = control_flow_ops.with_dependencies(
         [tf.assert_non_negative(y, message="Argument y was negative")], zeros)
   return zeros, zeros
Beispiel #23
0
 def _maybe_assert_valid(self, x):
     if not self.validate_args:
         return x
     return control_flow_ops.with_dependencies([
         tf.assert_non_negative(x, message="sample must be non-negative"),
         tf.assert_less_equal(x,
                              tf.ones([], self.concentration0.dtype),
                              message="sample must be no larger than `1`."),
     ], x)
Beispiel #24
0
    def _maybe_validate_shape_override(self, override_shape, base_is_scalar,
                                       validate_args, name):
        """Helper to __init__ which ensures override batch/event_shape are valid."""
        if override_shape is None:
            override_shape = []

        override_shape = tf.convert_to_tensor(override_shape,
                                              dtype=tf.int32,
                                              name=name)

        if not override_shape.dtype.is_integer:
            raise TypeError("shape override must be an integer")

        override_is_scalar = _is_scalar_from_shape(override_shape)
        if tf.contrib.util.constant_value(override_is_scalar):
            return self._empty

        dynamic_assertions = []

        if override_shape.shape.ndims is not None:
            if override_shape.shape.ndims != 1:
                raise ValueError("shape override must be a vector")
        elif validate_args:
            dynamic_assertions += [
                tf.assert_rank(override_shape,
                               1,
                               message="shape override must be a vector")
            ]

        if tensor_util.constant_value(override_shape) is not None:
            if any(s < 0 for s in tensor_util.constant_value(override_shape)):
                raise ValueError(
                    "shape override must have non-negative elements")
        elif validate_args:
            dynamic_assertions += [
                tf.assert_non_negative(
                    override_shape,
                    message="shape override must have non-negative elements")
            ]

        is_both_nonscalar = _logical_and(_logical_not(base_is_scalar),
                                         _logical_not(override_is_scalar))
        if tf.contrib.util.constant_value(is_both_nonscalar) is not None:
            if tf.contrib.util.constant_value(is_both_nonscalar):
                raise ValueError("base distribution not scalar")
        elif validate_args:
            dynamic_assertions += [
                tf.assert_equal(is_both_nonscalar,
                                False,
                                message="base distribution not scalar")
            ]

        if not dynamic_assertions:
            return override_shape
        return control_flow_ops.with_dependencies(dynamic_assertions,
                                                  override_shape)
Beispiel #25
0
 def _maybe_assert_valid_y(self, y):
   if not self.validate_args:
     return y
   is_positive = tf.assert_non_negative(
       y, message="Inverse transformation input must be greater than 0.")
   less_than_one = tf.assert_less_equal(
       y,
       tf.constant(1., y.dtype),
       message="Inverse transformation input must be less than or equal to 1.")
   return control_flow_ops.with_dependencies([is_positive, less_than_one], y)
Beispiel #26
0
 def _maybe_assert_valid_total_count(self, total_count, validate_args):
   if not validate_args:
     return total_count
   return control_flow_ops.with_dependencies([
       tf.assert_non_negative(
           total_count, message="total_count must be non-negative."),
       distribution_util.assert_integer_form(
           total_count,
           message="total_count cannot contain fractional components."),
   ], total_count)
Beispiel #27
0
 def test_empty_tensor_doesnt_raise(self):
   # A tensor is non-negative when it satisfies:
   #   For every element x_i in x, x_i >= 0
   # and an empty tensor has no elements, so this is trivially satisfied.
   # This is standard set theory.
   with self.test_session():
     empty = tf.constant([], name="empty")
     with tf.control_dependencies([tf.assert_non_negative(empty)]):
       out = tf.identity(empty)
     out.eval()
Beispiel #28
0
 def _maybe_assert_valid_total_count(self, total_count, validate_args):
   if not validate_args:
     return total_count
   return control_flow_ops.with_dependencies([
       tf.assert_non_negative(
           total_count, message="total_count must be non-negative."),
       distribution_util.assert_integer_form(
           total_count,
           message="total_count cannot contain fractional components."),
   ], total_count)
Beispiel #29
0
 def _maybe_assert_valid(self, x):
   if not self.validate_args:
     return x
   return control_flow_ops.with_dependencies([
       tf.assert_non_negative(x, message="sample must be non-negative"),
       tf.assert_less_equal(
           x,
           tf.ones([], self.concentration0.dtype),
           message="sample must be no larger than `1`."),
   ], x)
Beispiel #30
0
 def test_empty_tensor_doesnt_raise(self):
     # A tensor is non-negative when it satisfies:
     #   For every element x_i in x, x_i >= 0
     # and an empty tensor has no elements, so this is trivially satisfied.
     # This is standard set theory.
     with self.test_session():
         empty = tf.constant([], name="empty")
         with tf.control_dependencies([tf.assert_non_negative(empty)]):
             out = tf.identity(empty)
         out.eval()
Beispiel #31
0
 def _inverse_log_det_jacobian(self, y):
     # If event_ndims = 2,
     # F^{-1}(y) = (-y, y), so DF^{-1}(y) = (-1, 1),
     # so Log|DF^{-1}(y)| = Log[1, 1] = [0, 0].
     zeros = tf.constant(0., dtype=y.dtype)
     if self.validate_args:
         zeros = control_flow_ops.with_dependencies(
             [tf.assert_non_negative(y, message="Argument y was negative")],
             zeros)
     return zeros, zeros
Beispiel #32
0
def modify_aspect_ratio(rect, method='stretch', axis=-1, eps=1e-3, name='modify_aspect_ratio'):
    if method == 'stretch':
        return rect  # No change.
    with tf.name_scope(name) as scope:
        center, size = geom.rect_center_size(rect)
        with tf.control_dependencies([tf.assert_non_negative(size)]):
            size = tf.identity(size)
        size = tf.maximum(size, eps)
        width = scalar_size(size, method, axis=axis, keepdims=True)
        return geom.make_rect_center_size(center, width)
Beispiel #33
0
  def _get_tol(self, tol):
    if tol is None:
      return tf.convert_to_tensor(0, dtype=self.loc.dtype)

    tol = tf.convert_to_tensor(tol, dtype=self.loc.dtype)
    if self.validate_args:
      tol = control_flow_ops.with_dependencies([
          tf.assert_non_negative(
              tol, message="Argument 'tol' must be non-negative")
      ], tol)
    return tol
Beispiel #34
0
def op(name,
       images,
       max_outputs=3,
       display_name=None,
       description=None,
       collections=None):
    """Create an image summary op for use in a TensorFlow graph.

  Arguments:
    name: A unique name for the generated summary node.
    images: A `Tensor` representing pixel data with shape `[k, h, w, c]`,
      where `k` is the number of images, `h` and `w` are the height and
      width of the images, and `c` is the number of channels, which
      should be 1, 3, or 4. Any of the dimensions may be statically
      unknown (i.e., `None`).
    max_outputs: Optional `int` or rank-0 integer `Tensor`. At most this
      many images will be emitted at each step. When more than
      `max_outputs` many images are provided, the first `max_outputs` many
      images will be used and the rest silently discarded.
    display_name: Optional name for this summary in TensorBoard, as a
      constant `str`. Defaults to `name`.
    description: Optional long-form description for this summary, as a
      constant `str`. Markdown is supported. Defaults to empty.
    collections: Optional list of graph collections keys. The new
      summary op is added to these collections. Defaults to
      `[Graph Keys.SUMMARIES]`.

  Returns:
    A TensorFlow summary op.
  """
    if display_name is None:
        display_name = name
    summary_metadata = metadata.create_summary_metadata(
        display_name=display_name, description=description)
    with tf.name_scope(name), \
         tf.control_dependencies([tf.assert_rank(images, 4),
                                  tf.assert_type(images, tf.uint8),
                                  tf.assert_non_negative(max_outputs)]):
        limited_images = images[:max_outputs]
        encoded_images = tf.map_fn(tf.image.encode_png,
                                   limited_images,
                                   dtype=tf.string,
                                   name='encode_each_image')
        image_shape = tf.shape(images)
        dimensions = tf.stack([
            tf.as_string(image_shape[2], name='width'),
            tf.as_string(image_shape[1], name='height')
        ],
                              name='dimensions')
        tensor = tf.concat([dimensions, encoded_images], axis=0)
        return tf.summary.tensor_summary(name='image_summary',
                                         tensor=tensor,
                                         collections=collections,
                                         summary_metadata=summary_metadata)
    def _get_tol(self, tol):
        if tol is None:
            return tf.convert_to_tensor(0, dtype=self.loc.dtype)

        tol = tf.convert_to_tensor(tol, dtype=self.loc.dtype)
        if self.validate_args:
            tol = control_flow_ops.with_dependencies([
                tf.assert_non_negative(
                    tol, message="Argument 'tol' must be non-negative")
            ], tol)
        return tol
Beispiel #36
0
  def _prepare(self):
    # We need to put the conversion and check here because a user will likely
    # want to decay the learning rate dynamically.
    self._learning_rate_tensor = control_flow_ops.with_dependencies([
        tf.assert_non_negative(
            self._learning_rate, message='`learning_rate` must be non-negative')
    ], tf.convert_to_tensor(self._learning_rate, name='learning_rate_tensor'))
    self._decay_tensor = tf.convert_to_tensor(
        self._preconditioner_decay_rate, name='preconditioner_decay_rate')

    super(StochasticGradientLangevinDynamics, self)._prepare()
Beispiel #37
0
 def build_pred_var_interp(t_interp_X):
     # Return the diagonal of the predictive covariance matrix
     KXx = self.kern.K(self.X, t_interp_X)
     sys1 = tf.matrix_triangular_solve(self.L, KXx, lower=True)
     reduce_sum = tf.reduce_sum(tf.square(sys1), axis=0)
     Kxx = self.kern.K(t_interp_X, diag=True)
     pred_var = Kxx - reduce_sum
     with tf.control_dependencies([tf.assert_non_negative(pred_var)]):
         pred_var = tf.identity(pred_var)
     # pred_var = tf.Print(pred_var, [pred_var])
     return pred_var
Beispiel #38
0
def pad_up_to(vector, size, rank):
    length_diff = tf.reshape(size - tf.shape(vector)[1], shape=(1, ))
    with tf.control_dependencies([
            tf.assert_non_negative(length_diff,
                                   data=(vector, size, tf.shape(vector)))
    ]):
        padding = tf.reshape(tf.concat([[0, 0, 0], length_diff, [0, 0] *
                                        (rank - 1)],
                                       axis=0),
                             shape=((rank + 1), 2))
        return tf.pad(vector, padding, mode='constant')
Beispiel #39
0
    def __init__(self,
                 dimension,
                 concentration,
                 validate_args=False,
                 allow_nan_stats=True,
                 name='LKJ'):
        """Construct LKJ distributions.

    Args:
      dimension: Python `int`. The dimension of the correlation matrices
        to sample.
      concentration: `float` or `double` `Tensor`. The positive concentration
        parameter of the LKJ distributions. The pdf of a sample matrix `X` is
        proportional to `det(X) ** (concentration - 1)`.
      validate_args: Python `bool`, default `False`. When `True` distribution
        parameters are checked for validity despite possibly degrading runtime
        performance. When `False` invalid inputs may silently render incorrect
        outputs.
      allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
        (e.g., mean, mode, variance) use the value `NaN` to indicate the
        result is undefined. When `False`, an exception is raised if one or
        more of the statistic's batch members are undefined.
      name: Python `str` name prefixed to Ops created by this class.

    Raises:
      ValueError: If `dimension` is negative.
    """
        if dimension < 0:
            raise ValueError(
                'There are no negative-dimension correlation matrices.')
        parameters = dict(locals())
        with tf.name_scope(name, values=[dimension, concentration]):
            concentration = tf.convert_to_tensor(
                concentration,
                name='concentration',
                dtype=dtype_util.common_dtype([concentration],
                                              preferred_dtype=tf.float32))
            with tf.control_dependencies([
                    # concentration >= 1
                    # TODO(b/111451422) Generalize to concentration > 0.
                    tf.assert_non_negative(concentration - 1.),
            ] if validate_args else []):
                self._dimension = dimension
                self._concentration = tf.identity(concentration,
                                                  name='concentration')
        super(LKJ, self).__init__(
            dtype=self._concentration.dtype,
            validate_args=validate_args,
            allow_nan_stats=allow_nan_stats,
            reparameterization_type=reparameterization.NOT_REPARAMETERIZED,
            parameters=parameters,
            graph_parents=[self._concentration],
            name=name)
Beispiel #40
0
 def _maybe_assert_valid_y(self, y):
     if not self.validate_args:
         return y
     is_positive = tf.assert_non_negative(
         y, message="Inverse transformation input must be greater than 0.")
     less_than_one = tf.assert_less_equal(
         y,
         tf.constant(1., y.dtype),
         message=
         "Inverse transformation input must be less than or equal to 1.")
     return control_flow_ops.with_dependencies([is_positive, less_than_one],
                                               y)
Beispiel #41
0
  def __init__(self,
               dimension,
               concentration,
               validate_args=False,
               allow_nan_stats=True,
               name='LKJ'):
    """Construct LKJ distributions.

    Args:
      dimension: Python `int`. The dimension of the correlation matrices
        to sample.
      concentration: `float` or `double` `Tensor`. The positive concentration
        parameter of the LKJ distributions. The pdf of a sample matrix `X` is
        proportional to `det(X) ** (concentration - 1)`.
      validate_args: Python `bool`, default `False`. When `True` distribution
        parameters are checked for validity despite possibly degrading runtime
        performance. When `False` invalid inputs may silently render incorrect
        outputs.
      allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
        (e.g., mean, mode, variance) use the value `NaN` to indicate the
        result is undefined. When `False`, an exception is raised if one or
        more of the statistic's batch members are undefined.
      name: Python `str` name prefixed to Ops created by this class.

    Raises:
      ValueError: If `dimension` is negative.
    """
    if dimension < 0:
      raise ValueError(
          'There are no negative-dimension correlation matrices.')
    parameters = dict(locals())
    with tf.name_scope(name, values=[dimension, concentration]):
      concentration = tf.convert_to_tensor(
          concentration,
          name='concentration',
          dtype=dtype_util.common_dtype([concentration],
                                        preferred_dtype=tf.float32))
      with tf.control_dependencies([
          # concentration >= 1
          # TODO(b/111451422) Generalize to concentration > 0.
          tf.assert_non_negative(concentration - 1.),
      ] if validate_args else []):
        self._dimension = dimension
        self._concentration = tf.identity(concentration, name='concentration')
    super(LKJ, self).__init__(
        dtype=self._concentration.dtype,
        validate_args=validate_args,
        allow_nan_stats=allow_nan_stats,
        reparameterization_type=reparameterization.NOT_REPARAMETERIZED,
        parameters=parameters,
        graph_parents=[self._concentration],
        name=name)
Beispiel #42
0
def op(name,
       images,
       max_outputs=3,
       display_name=None,
       description=None,
       collections=None):
  """Create an image summary op for use in a TensorFlow graph.

  Arguments:
    name: A unique name for the generated summary node.
    images: A `Tensor` representing pixel data with shape `[k, w, h, c]`,
      where `k` is the number of images, `w` and `h` are the width and
      height of the images, and `c` is the number of channels, which
      should be 1, 3, or 4. Any of the dimensions may be statically
      unknown (i.e., `None`).
    max_outputs: Optional `int` or rank-0 integer `Tensor`. At most this
      many images will be emitted at each step. When more than
      `max_outputs` many images are provided, the first `max_outputs` many
      images will be used and the rest silently discarded.
    display_name: Optional name for this summary in TensorBoard, as a
      constant `str`. Defaults to `name`.
    description: Optional long-form description for this summary, as a
      constant `str`. Markdown is supported. Defaults to empty.
    collections: Optional list of graph collections keys. The new
      summary op is added to these collections. Defaults to
      `[Graph Keys.SUMMARIES]`.

  Returns:
    A TensorFlow summary op.
  """
  if display_name is None:
    display_name = name
  summary_metadata = metadata.create_summary_metadata(
      display_name=display_name, description=description)
  with tf.name_scope(name), \
       tf.control_dependencies([tf.assert_rank(images, 4),
                                tf.assert_type(images, tf.uint8),
                                tf.assert_non_negative(max_outputs)]):
    limited_images = images[:max_outputs]
    encoded_images = tf.map_fn(tf.image.encode_png, limited_images,
                               dtype=tf.string,
                               name='encode_each_image')
    image_shape = tf.shape(images)
    dimensions = tf.stack([tf.as_string(image_shape[1], name='width'),
                           tf.as_string(image_shape[2], name='height')],
                          name='dimensions')
    tensor = tf.concat([dimensions, encoded_images], axis=0)
    return tf.summary.tensor_summary(name='image_summary',
                                     tensor=tensor,
                                     collections=collections,
                                     summary_metadata=summary_metadata)
Beispiel #43
0
 def _assert_non_negative_int32_scalar(self, x):
   """Helper which ensures that input is a non-negative, int32, scalar."""
   x = tf.convert_to_tensor(x, name="x")
   if x.dtype.base_dtype != tf.int32.base_dtype:
     raise TypeError("%s.dtype=%s is not %s" % (x.name, x.dtype, tf.int32))
   x_value_static = tensor_util.constant_value(x)
   if x.get_shape().ndims is not None and x_value_static is not None:
     if x.get_shape().ndims != 0:
       raise ValueError("%s.ndims=%d is not 0 (scalar)" %
                        (x.name, x.get_shape().ndims))
     if x_value_static < 0:
       raise ValueError("%s.value=%d cannot be negative" %
                        (x.name, x_value_static))
     return x
   if self.validate_args:
     x = control_flow_ops.with_dependencies(
         [tf.assert_rank(x, 0),
          tf.assert_non_negative(x)], x)
   return x
Beispiel #44
0
 def disable_some_bgs():
     # Mutatis mutandis, all comments from disable_some_fgs apply.
     shuffled_inds = tf.random_shuffle(bg_inds, seed=self._seed)
     disable_place = (tf.shape(bg_inds)[0] - max_bg)
     integrity_assertion = tf.assert_non_negative(
         disable_place,
         message="disable_place in disable_some_bgs is negative."
     )
     with tf.control_dependencies([integrity_assertion]):
         disable_inds = shuffled_inds[:disable_place]
     is_disabled = tf.sparse_to_dense(
         sparse_indices=disable_inds,
         sparse_values=True, default_value=False,
         output_shape=tf.cast(proposals_label_shape, tf.int64),
         validate_indices=False
     )
     return tf.where(
         condition=is_disabled,
         x=tf.fill(
             dims=proposals_label_shape,
             value=-1.
         ),
         y=proposals_label
     )
Beispiel #45
0
  def __init__(self,
               learning_rate,
               preconditioner_decay_rate=0.95,
               data_size=1,
               burnin=25,
               diagonal_bias=1e-8,
               name=None,
               parallel_iterations=10,
               variable_scope=None):
    default_name = 'StochasticGradientLangevinDynamics'
    with tf.name_scope(name, default_name, [
        learning_rate, preconditioner_decay_rate, data_size, burnin,
        diagonal_bias
    ]):
      if tf.executing_eagerly():
        raise NotImplementedError('Eager execution currently not supported for '
                                  ' SGLD optimizer.')
      if variable_scope is None:
        var_scope_name = tf.get_default_graph().unique_name(
            name or default_name)
        with tf.variable_scope(var_scope_name) as scope:
          self._variable_scope = scope
      else:
        self._variable_scope = variable_scope

      self._preconditioner_decay_rate = tf.convert_to_tensor(
          preconditioner_decay_rate, name='preconditioner_decay_rate')
      self._data_size = tf.convert_to_tensor(
          data_size, name='data_size')
      self._burnin = tf.convert_to_tensor(burnin, name='burnin')
      self._diagonal_bias = tf.convert_to_tensor(
          diagonal_bias, name='diagonal_bias')
      self._learning_rate = tf.convert_to_tensor(
          learning_rate, name='learning_rate')
      self._parallel_iterations = parallel_iterations

      with tf.variable_scope(self._variable_scope):
        self._counter = tf.get_variable(
            'counter', initializer=0, trainable=False)

      self._preconditioner_decay_rate = control_flow_ops.with_dependencies([
          tf.assert_non_negative(
              self._preconditioner_decay_rate,
              message='`preconditioner_decay_rate` must be non-negative'),
          tf.assert_less_equal(
              self._preconditioner_decay_rate,
              1.,
              message='`preconditioner_decay_rate` must be at most 1.'),
      ], self._preconditioner_decay_rate)

      self._data_size = control_flow_ops.with_dependencies([
          tf.assert_greater(
              self._data_size,
              0,
              message='`data_size` must be greater than zero')
      ], self._data_size)

      self._burnin = control_flow_ops.with_dependencies([
          tf.assert_non_negative(
              self._burnin, message='`burnin` must be non-negative'),
          tf.assert_integer(
              self._burnin, message='`burnin` must be an integer')
      ], self._burnin)

      self._diagonal_bias = control_flow_ops.with_dependencies([
          tf.assert_non_negative(
              self._diagonal_bias,
              message='`diagonal_bias` must be non-negative')
      ], self._diagonal_bias)

      super(StochasticGradientLangevinDynamics, self).__init__(
          use_locking=False, name=name or default_name)
  def __init__(self,
               batch_size,
               total_num_examples,
               max_learning_rate=1.,
               preconditioner_decay_rate=0.95,
               burnin=25,
               burnin_max_learning_rate=1e-6,
               use_single_learning_rate=False,
               name=None,
               variable_scope=None):
    default_name = 'VariationalSGD'
    with tf.name_scope(name, default_name, [
        max_learning_rate, preconditioner_decay_rate, batch_size, burnin,
        burnin_max_learning_rate
    ]):
      if variable_scope is None:
        var_scope_name = tf.get_default_graph().unique_name(
            name or default_name)
        with tf.variable_scope(var_scope_name) as scope:
          self._variable_scope = scope
      else:
        self._variable_scope = variable_scope

      self._preconditioner_decay_rate = tf.convert_to_tensor(
          preconditioner_decay_rate, name='preconditioner_decay_rate')
      self._batch_size = tf.convert_to_tensor(batch_size, name='batch_size')
      self._total_num_examples = tf.convert_to_tensor(
          total_num_examples, name='total_num_examples')
      self._burnin = tf.convert_to_tensor(burnin, name='burnin')
      self._burnin_max_learning_rate = tf.convert_to_tensor(
          burnin_max_learning_rate, name='burnin_max_learning_rate')
      self._max_learning_rate = tf.convert_to_tensor(
          max_learning_rate, name='max_learning_rate')
      self._use_single_learning_rate = use_single_learning_rate

      with tf.variable_scope(self._variable_scope):
        self._counter = tf.get_variable(
            'counter', initializer=0, trainable=False)

      self._preconditioner_decay_rate = control_flow_ops.with_dependencies([
          tf.assert_non_negative(
              self._preconditioner_decay_rate,
              message='`preconditioner_decay_rate` must be non-negative'),
          tf.assert_less_equal(
              self._preconditioner_decay_rate,
              1.,
              message='`preconditioner_decay_rate` must be at most 1.'),
      ], self._preconditioner_decay_rate)

      self._batch_size = control_flow_ops.with_dependencies([
          tf.assert_greater(
              self._batch_size,
              0,
              message='`batch_size` must be greater than zero')
      ], self._batch_size)

      self._total_num_examples = control_flow_ops.with_dependencies([
          tf.assert_greater(
              self._total_num_examples,
              0,
              message='`total_num_examples` must be greater than zero')
      ], self._total_num_examples)

      self._burnin = control_flow_ops.with_dependencies([
          tf.assert_non_negative(
              self._burnin, message='`burnin` must be non-negative'),
          tf.assert_integer(
              self._burnin, message='`burnin` must be an integer')
      ], self._burnin)

      self._burnin_max_learning_rate = control_flow_ops.with_dependencies([
          tf.assert_non_negative(
              self._burnin_max_learning_rate,
              message='`burnin_max_learning_rate` must be non-negative')
      ], self._burnin_max_learning_rate)

      self._max_learning_rate = control_flow_ops.with_dependencies([
          tf.assert_non_negative(
              self._max_learning_rate,
              message='`max_learning_rate` must be non-negative')
      ], self._max_learning_rate)

      super(VariationalSGD, self).__init__(
          use_locking=False, name=name or default_name)
  def __init__(self,
               mean_direction,
               concentration,
               validate_args=False,
               allow_nan_stats=True,
               name='VonMisesFisher'):
    """Creates a new `VonMisesFisher` instance.

    Args:
      mean_direction: Floating-point `Tensor` with shape [B1, ... Bn, D].
        A unit vector indicating the mode of the distribution, or the
        unit-normalized direction of the mean. (This is *not* in general the
        mean of the distribution; the mean is not generally in the support of
        the distribution.) NOTE: `D` is currently restricted to <= 5.
      concentration: Floating-point `Tensor` having batch shape [B1, ... Bn]
        broadcastable with `mean_direction`. The level of concentration of
        samples around the `mean_direction`. `concentration=0` indicates a
        uniform distribution over the unit hypersphere, and `concentration=+inf`
        indicates a `Deterministic` distribution (delta function) at
        `mean_direction`.
      validate_args: Python `bool`, default `False`. When `True` distribution
        parameters are checked for validity despite possibly degrading runtime
        performance. When `False` invalid inputs may silently render incorrect
        outputs.
      allow_nan_stats: Python `bool`, default `True`. When `True`,
        statistics (e.g., mean, mode, variance) use the value "`NaN`" to
        indicate the result is undefined. When `False`, an exception is raised
        if one or more of the statistic's batch members are undefined.
      name: Python `str` name prefixed to Ops created by this class.

    Raises:
      ValueError: For known-bad arguments, i.e. unsupported event dimension.
    """
    parameters = dict(locals())
    with tf.name_scope(name, values=[mean_direction, concentration]) as name:
      dtype = dtype_util.common_dtype([mean_direction, concentration],
                                      tf.float32)
      mean_direction = tf.convert_to_tensor(
          mean_direction, name='mean_direction', dtype=dtype)
      concentration = tf.convert_to_tensor(
          concentration, name='concentration', dtype=dtype)
      assertions = [
          tf.assert_non_negative(
              concentration, message='`concentration` must be non-negative'),
          tf.assert_greater(
              tf.shape(mean_direction)[-1], 1,
              message='`mean_direction` may not have scalar event shape'),
          tf.assert_near(
              1., tf.linalg.norm(mean_direction, axis=-1),
              message='`mean_direction` must be unit-length')
      ] if validate_args else []
      if mean_direction.shape.with_rank_at_least(1)[-1].value is not None:
        if mean_direction.shape.with_rank_at_least(1)[-1].value > 5:
          raise ValueError('vMF ndims > 5 is not currently supported')
      elif validate_args:
        assertions += [tf.assert_less_equal(
            tf.shape(mean_direction)[-1], 5,
            message='vMF ndims > 5 is not currently supported')]
      with tf.control_dependencies(assertions):
        self._mean_direction = tf.identity(mean_direction)
        self._concentration = tf.identity(concentration)
      tf.assert_same_float_dtype([self._mean_direction, self._concentration])
      # mean_direction is always reparameterized.
      # concentration is only for event_dim==3, via an inversion sampler.
      reparameterization_type = (
          reparameterization.FULLY_REPARAMETERIZED
          if mean_direction.shape.with_rank_at_least(1)[-1].value == 3 else
          reparameterization.NOT_REPARAMETERIZED)
      super(VonMisesFisher, self).__init__(
          dtype=self._concentration.dtype,
          validate_args=validate_args,
          allow_nan_stats=allow_nan_stats,
          reparameterization_type=reparameterization_type,
          parameters=parameters,
          graph_parents=[self._mean_direction, self._concentration],
          name=name)
Beispiel #48
0
 def test_doesnt_raise_when_zero_and_positive(self):
   with self.test_session():
     lucas = tf.constant([0, 2], name="lucas")
     with tf.control_dependencies([tf.assert_non_negative(lucas)]):
       out = tf.identity(lucas)
     out.eval()
Beispiel #49
0
 def _inverse(self, y):
   if self.validate_args:
     y = control_flow_ops.with_dependencies(
         [tf.assert_non_negative(y, message="Argument y was negative")], y)
   return -y, y
Beispiel #50
0
 def _maybe_assert_valid_x(self, x):
   if not self.validate_args:
     return x
   is_valid = tf.assert_non_negative(
       x, message="Forward transformation input must be at least 0.")
   return control_flow_ops.with_dependencies([is_valid], x)