Esempio n. 1
0
def process_singe_image(image_path, FLAGS):
    image = tf.read_file(image_path)
    image = tf.image.decode_png(image, channels=3)
    image = tf.image.convert_image_dtype(image, dtype=tf.float32)

    assertion = tf.assert_equal(tf.shape(image)[2], 3, message="image does not have 3 channels")
    with tf.control_dependencies([assertion]):
        image = tf.identity(image)

    # scale image, new min(height,  width) = FLAGS.image_size
    with tf.name_scope("scaling"):
        h, w, _ = shape(image)
        scale = tf.cast(FLAGS.image_size, dtype=tf.float32) / tf.cast(tf.cond(tf.less(h, w), lambda: w, lambda: h),
                                                                      dtype=tf.float32)
        image = tf.squeeze(tf.image.resize_bilinear(tf.expand_dims(image, 0), [
            tf.cast(tf.floor(scale * tf.cast(h, dtype=tf.float32)), dtype=tf.int32),
            tf.cast(tf.floor(scale * tf.cast(w, dtype=tf.float32)), dtype=tf.int32)]))
        image.set_shape([None, None, 3])

    with tf.name_scope("pad"):
        h2, w2, _ = shape(image)
        h_diff, w_diff = FLAGS.image_size - h2, FLAGS.image_size - w2

        # If uncomment, then add it control dependency
        # print = tf.Print(h_diff, [scale, scale_h, scale_w, h, w, h1, w1, h2, w2, h_diff, w_diff],
        #                  message="scale, h, w, h_diff, w_diff: ")

        assert_positive_hdiff = tf.assert_greater_equal(h_diff, 0)
        assert_positive_wdiff = tf.assert_greater_equal(w_diff, 0)
        with tf.control_dependencies([assert_positive_hdiff, assert_positive_wdiff]):
            image = tf.pad(image, ([0, h_diff], [0, w_diff], [0, 0]), constant_values=0.999)

    image.set_shape([FLAGS.image_size, FLAGS.image_size, 3])
    # image = tf.cast(image, dtype=tf.float32)
    return image
Esempio n. 2
0
def find_center_in_scoremap(scoremap, threshold=0.95):
    assert len(scoremap.shape.as_list()) == 4

    max_val = tf.reduce_max(scoremap, axis=(1, 2), keepdims=True)
    with tf.control_dependencies([tf.assert_greater_equal(scoremap, 0.0)]):
        max_loc = tf.greater_equal(scoremap, max_val *
                                   threshold)  # values over 95% of max.

    spatial_dim = scoremap.shape.as_list()[1:3]
    assert all(spatial_dim)  # Spatial dimension must be static.
    # Compute center of each pixel in [0, 1] in search area.
    dim_y, dim_x = spatial_dim[0], spatial_dim[1]
    centers_x, centers_y = tf.meshgrid(
        tf.to_float(tf.range(dim_x)) / tf.to_float(dim_x - 1),
        tf.to_float(tf.range(dim_y)) / tf.to_float(dim_y - 1))
    centers = tf.stack([centers_x, centers_y], axis=-1)
    max_loc = tf.to_float(max_loc)
    center = tf.divide(tf.reduce_sum(centers * max_loc, axis=(1, 2)),
                       tf.reduce_sum(max_loc, axis=(1, 2)))

    EPSILON = 1e-4
    with tf.control_dependencies([
            tf.assert_greater_equal(center, -EPSILON),
            tf.assert_less_equal(center, 1.0 + EPSILON)
    ]):
        center = tf.identity(center)
    return center
Esempio n. 3
0
File: utils.py Progetto: gar1t/SAVER
def quantile_loss(quantile, delta, huber):
    quantile_scale = tf.where(delta < 0.0, 1.0 - quantile, quantile)
    tf.assert_greater_equal(quantile_scale, 0.0)
    tf.assert_less_equal(quantile_scale, 1.0)
    if huber:
        return quantile_scale * huber_loss(delta, 0.01)
    else:
        return quantile_scale * tf.abs(delta)
Esempio n. 4
0
def splice(feat, left_context, right_context):
    '''
  splice frame with context
    param: feat, tf.float32, [batch, time, feat]
    return: feat, tf.float32, [batch, time, feat*(left_context + 1 + right_context)]
    reference:
      https://github.com/kaldi-asr/kaldi/src/feat/feature-functions.cc#L205:6
  '''
    def _loop_continue(time, end_time, context, unused_left_context,
                       right_context, unused_output_tas):
        del unused_output_tas
        del unused_left_context
        return time < end_time

    def _loop_body(time, end_time, context, left_context, right_context,
                   output_tas):
        shape = tf.shape(context)
        B, _, D = shape[0], shape[1], shape[2]
        N = (1 + left_context + right_context) * D

        new_feat = context[:, time:time + left_context + 1 + right_context, :]
        new_feat = tf.reshape(new_feat, [B, N])
        new_output_tas = output_tas.write(time, new_feat)
        return (time + 1, end_time, context, left_context, right_context,
                new_output_tas)

    with tf.control_dependencies([
            tf.assert_greater_equal(left_context, 0),
            tf.assert_greater_equal(right_context, 0)
    ]):
        T = tf.shape(feat)[1]
        output_tas = _new_tensor_array('splice_feat_ta', T, dtype=tf.float32)
        time = tf.constant(0, tf.int32)
        first = tf.tile(feat[:, 0:1, :], [1, left_context, 1])
        last = tf.tile(feat[:, -1:, :], [1, right_context, 1])
        context = tf.concat([first, feat], axis=1)
        context = tf.concat([context, last], axis=1)

        loop_vars = (time, T, context, left_context, right_context, output_tas)

        parallel_iterations = 10
        shape_invariants = tf.contrib.framework.nest.map_structure(
            lambda t: tf.TensorShape(None), loop_vars)

        (time, end_time, context, left_context, right_context,
         output_tas) = tf.while_loop(_loop_continue,
                                     _loop_body,
                                     loop_vars=loop_vars,
                                     shape_invariants=shape_invariants,
                                     parallel_iterations=parallel_iterations,
                                     swap_memory=False)
        del context
        del left_context
        del right_context

        batch_spliced_feats = output_tas.stack()
        batch_spliced_feats = tf.transpose(batch_spliced_feats, [1, 0, 2])
    return batch_spliced_feats
Esempio n. 5
0
File: utils.py Progetto: gar1t/SAVER
def expand_dim_cos(inpt, target_dimensionality):
    tf.assert_less_equal(inpt, 1.0)
    tf.assert_greater_equal(inpt, 0.0)
    dimensionality_expansion = target_dimensionality // int(inpt.shape[-1])
    if dimensionality_expansion <= 1:
        return inpt
    embedding_indices = tf.reshape(
        tf.range(1, dimensionality_expansion + 1, dtype=tf.float32),
        [1] * len(inpt.shape) + [dimensionality_expansion])
    target_shape = tf.concat(
        [inpt.shape[:-1], [dimensionality_expansion * int(inpt.shape[-1])]],
        axis=0)
    return tf.reshape(
        tf.cos(np.pi * tf.expand_dims(inpt, axis=-1) * embedding_indices),
        target_shape)
Esempio n. 6
0
    def _prob(self, x):
        if self.validate_args:
            with tf.control_dependencies([
                    tf.assert_greater_equal(x, self.low),
                    tf.assert_less_equal(x, self.high)
            ]):
                x = tf.identity(x)

        broadcast_x_to_high = _broadcast_to(x, [self.high])
        left_of_peak = tf.logical_and(broadcast_x_to_high > self.low,
                                      broadcast_x_to_high <= self.peak)

        interval_length = self.high - self.low
        # This is the pdf function when a low <= high <= x. This looks like
        # a triangle, so we have to treat each line segment separately.
        result_inside_interval = tf.where(
            left_of_peak,
            # Line segment from (self.low, 0) to (self.peak, 2 / (self.high -
            # self.low).
            2. * (x - self.low) / (interval_length * (self.peak - self.low)),
            # Line segment from (self.peak, 2 / (self.high - self.low)) to
            # (self.high, 0).
            2. * (self.high - x) / (interval_length * (self.high - self.peak)))

        broadcast_x_to_peak = _broadcast_to(x, [self.peak])
        outside_interval = tf.logical_or(broadcast_x_to_peak < self.low,
                                         broadcast_x_to_peak > self.high)

        broadcast_shape = tf.broadcast_dynamic_shape(tf.shape(x),
                                                     self.batch_shape_tensor())

        return tf.where(outside_interval,
                        tf.zeros(broadcast_shape, dtype=self.dtype),
                        result_inside_interval)
Esempio n. 7
0
 def call(self, inputs, **kwargs):
     with tf.control_dependencies([
             tf.assert_greater_equal(inputs, self.index_offset),
             tf.assert_less(inputs, self.index_offset + self._num_symbols)
     ]):
         return tf.nn.embedding_lookup(self._embedding,
                                       inputs - self.index_offset)
Esempio n. 8
0
    def __init__(self,
                 dtype,
                 param_dtype,
                 is_continuous,
                 is_reparameterized,
                 group_ndims=0,
                 **kwargs):
        if 'group_event_ndims' in kwargs:
            warnings.warn(
                "The argument `group_event_ndims` has been deprecated and "
                "will be removed in the coming version (0.3.1). Please use "
                "`group_ndims` instead.", FutureWarning)
            group_ndims = kwargs['group_event_ndims']

        self._dtype = dtype
        self._param_dtype = param_dtype
        self._is_continuous = is_continuous
        self._is_reparameterized = is_reparameterized
        if isinstance(group_ndims, int):
            if group_ndims < 0:
                raise ValueError("group_ndims must be non-negative.")
            self._group_ndims = group_ndims
        else:
            group_ndims = tf.convert_to_tensor(group_ndims, tf.int32)
            _assert_rank_op = tf.assert_rank(
                group_ndims, 0,
                message="group_ndims should be a scalar (0-D Tensor).")
            _assert_nonnegative_op = tf.assert_greater_equal(
                group_ndims, 0,
                message="group_ndims must be non-negative.")
            with tf.control_dependencies([_assert_rank_op,
                                          _assert_nonnegative_op]):
                self._group_ndims = tf.identity(group_ndims)
Esempio n. 9
0
def new_mean_squared(grad_vec, decay, ms):
  """Calculates the new accumulated mean squared of the gradient.

  Args:
    grad_vec: the vector for the current gradient
    decay: the decay term
    ms: the previous mean_squared value

  Returns:
    the new mean_squared value
  """
  decay_size = decay.get_shape().num_elements()
  decay_check_ops = [
      tf.assert_less_equal(decay, 1., summarize=decay_size),
      tf.assert_greater_equal(decay, 0., summarize=decay_size)]

  with tf.control_dependencies(decay_check_ops):
    grad_squared = tf.square(grad_vec)

  # If the previous mean_squared is the 0 vector, don't use the decay and just
  # return the full grad_squared. This should only happen on the first timestep.
  decay = tf.cond(tf.reduce_all(tf.equal(ms, 0.)),
                  lambda: tf.zeros_like(decay, dtype=tf.float32), lambda: decay)

  # Update the running average of squared gradients.
  epsilon = 1e-12
  return (1. - decay) * (grad_squared + epsilon) + decay * ms
Esempio n. 10
0
def preprocess_for_inception(images):
  """Preprocess images for inception.

  Args:
    images: images minibatch. Shape [batch size, width, height,
      channels]. Values are in [0..255].

  Returns:
    preprocessed_images
  """

  # Images should have 3 channels.
  assert images.shape[3].value == 3

  # tfgan_eval.preprocess_image function takes values in [0, 1], so rescale.
  with tf.control_dependencies([tf.assert_greater_equal(images, 0.0),
                                tf.assert_less_equal(images, 255.0)]):
    images = tf.identity(images)

  preprocessed_images = tf.map_fn(
      fn=tfgan_eval.preprocess_image,
      elems=images,
      back_prop=False
  )

  return preprocessed_images
Esempio n. 11
0
def _preprocess_for_inception(images):
    """Preprocess images for inception.

    Args:
      images: images minibatch. Shape [batch size, width, height,
        channels]. Values are in [0..255].

    Returns:
      preprocessed_images
    """

    images = tf.cast(images, tf.float32)

    # tfgan_eval.preprocess_image function takes values in [0, 255]
    with tf.control_dependencies([
            tf.assert_greater_equal(images, 0.0),
            tf.assert_less_equal(images, 255.0)
    ]):
        images = tf.identity(images)

    preprocessed_images = tf.map_fn(fn=_TFGAN.preprocess_image,
                                    elems=images,
                                    back_prop=False)

    return preprocessed_images
Esempio n. 12
0
    def __init__(self, alpha, group_ndims=0, check_numerics=False, **kwargs):
        self._alpha = tf.convert_to_tensor(alpha)
        dtype = assert_same_float_dtype([(self._alpha, 'Dirichlet.alpha')])

        static_alpha_shape = self._alpha.get_shape()
        shape_err_msg = "alpha should have rank >= 1."
        cat_err_msg = "n_categories (length of the last axis " \
                      "of alpha) should be at least 2."
        if static_alpha_shape and (static_alpha_shape.ndims < 1):
            raise ValueError(shape_err_msg)
        elif static_alpha_shape and (static_alpha_shape[-1].value is not None):
            self._n_categories = static_alpha_shape[-1].value
            if self._n_categories < 2:
                raise ValueError(cat_err_msg)
        else:
            _assert_shape_op = tf.assert_rank_at_least(self._alpha,
                                                       1,
                                                       message=shape_err_msg)
            with tf.control_dependencies([_assert_shape_op]):
                self._alpha = tf.identity(self._alpha)
            self._n_categories = tf.shape(self._alpha)[-1]

            _assert_cat_op = tf.assert_greater_equal(self._n_categories,
                                                     2,
                                                     message=cat_err_msg)
            with tf.control_dependencies([_assert_cat_op]):
                self._alpha = tf.identity(self._alpha)
        self._check_numerics = check_numerics

        super(Dirichlet, self).__init__(dtype=dtype,
                                        param_dtype=dtype,
                                        is_continuous=True,
                                        is_reparameterized=False,
                                        group_ndims=group_ndims,
                                        **kwargs)
Esempio n. 13
0
    def _check_valid_event_ndims(self, min_event_ndims, event_ndims):
        """Check whether event_ndims is atleast min_event_ndims."""
        event_ndims = tf.convert_to_tensor(event_ndims, name="event_ndims")
        event_ndims_ = tensor_util.constant_value(event_ndims)
        assertions = []

        if not event_ndims.dtype.is_integer:
            raise ValueError("Expected integer dtype, got dtype {}".format(
                event_ndims.dtype))

        if event_ndims_ is not None:
            if event_ndims.shape.ndims != 0:
                raise ValueError(
                    "Expected scalar event_ndims, got shape {}".format(
                        event_ndims.shape))
            if min_event_ndims > event_ndims_:
                raise ValueError("event_ndims ({}) must be larger than "
                                 "min_event_ndims ({})".format(
                                     event_ndims_, min_event_ndims))
        elif self.validate_args:
            assertions += [
                tf.assert_greater_equal(event_ndims, min_event_ndims)
            ]

        if event_ndims.shape.is_fully_defined():
            if event_ndims.shape.ndims != 0:
                raise ValueError("Expected scalar shape, got ndims {}".format(
                    event_ndims.shape.ndims))

        elif self.validate_args:
            assertions += [
                tf.assert_rank(event_ndims, 0, message="Expected scalar.")
            ]
        return assertions
Esempio n. 14
0
def preprocess_for_inception(images):
    """Preprocess images for inception.

    Args:
      images: images minibatch. Shape [batch size, width, height,
        channels]. Values are in [0..255].

    Returns:
      preprocessed_images
    """

    # Images should have 3 channels.
    assert images.shape[3].value == 3

    # tfgan_eval.preprocess_image function takes values in [0, 1], so rescale.
    with tf.control_dependencies([tf.assert_greater_equal(images, 0.0),
                                  tf.assert_less_equal(images, 255.0)]):
        images = tf.identity(images)

    preprocessed_images = tf.map_fn(
        fn=gan_eval.preprocess_image,
        elems=images,
        back_prop=False
    )

    return preprocessed_images
Esempio n. 15
0
def new_mean_squared(grad_vec, decay, ms):
    """Calculates the new accumulated mean squared of the gradient.

  Args:
    grad_vec: the vector for the current gradient
    decay: the decay term
    ms: the previous mean_squared value

  Returns:
    the new mean_squared value
  """
    decay_size = decay.get_shape().num_elements()
    decay_check_ops = [
        tf.assert_less_equal(decay, 1., summarize=decay_size),
        tf.assert_greater_equal(decay, 0., summarize=decay_size)
    ]

    with tf.control_dependencies(decay_check_ops):
        grad_squared = tf.square(grad_vec)

    # If the previous mean_squared is the 0 vector, don't use the decay and just
    # return the full grad_squared. This should only happen on the first timestep.
    decay = tf.cond(tf.reduce_all(tf.equal(ms, 0.)),
                    lambda: tf.zeros_like(decay, dtype=tf.float32),
                    lambda: decay)

    # Update the running average of squared gradients.
    epsilon = 1e-12
    return (1. - decay) * (grad_squared + epsilon) + decay * ms
Esempio n. 16
0
  def pre_attention(self, segment_number, query_antecedent,
                    memory_antecedent, bias):
    """Called prior to self-attention, to incorporate memory items.

    Args:
      segment_number: an integer Tensor with shape [batch]
      query_antecedent: a Tensor with shape [batch, length_q, channels]
      memory_antecedent: must be None. Attention normally allows this to be a
        Tensor with shape [batch, length_m, channels], but we currently only
        support memory for decoder-side self-attention.
      bias: bias Tensor (see attention_bias())
    Returns:
      (data, new_query_antecedent, new_memory_antecedent, new_bias)
    """
    with tf.variable_scope(self.name + "/pre_attention", reuse=tf.AUTO_REUSE):
      assert memory_antecedent is None, "We only support language modeling"
      with tf.control_dependencies([
          tf.assert_greater_equal(self.batch_size, tf.size(segment_number))]):
        difference = self.batch_size - tf.size(segment_number)
        segment_number = tf.pad(segment_number, [[0, difference]])
        reset_op = self.reset(tf.reshape(tf.where(
            tf.less(segment_number, self.segment_number)), [-1]))
      memory_results = {}
      with tf.control_dependencies([reset_op]):
        with tf.control_dependencies([
            self.update_segment_number(segment_number)]):
          x = tf.pad(query_antecedent, [
              [0, difference], [0, 0], [0, 0]])
          access_logits, retrieved_mem = self.read(x)
      memory_results["x"] = x
      memory_results["access_logits"] = access_logits
      memory_results["retrieved_mem"] = retrieved_mem
      return memory_results, query_antecedent, memory_antecedent, bias
Esempio n. 17
0
def rot_angle_error(y_true, y_pred):
    """
    Computes the angular error between the quaternions by converting their difference
    to angle axis representation. Formula used to compute the angular difference in radians:
    2 * arcos(|q1 * q2|)
    Taken from: 
    https://math.stackexchange.com/questions/90081/quaternion-distance and verified in
    https://link.springer.com/article/10.1007%2Fs10851-009-0161-2 (Du Q.Huynh, Section 3.4)
    """
    quat_true = y_true[:, :4]
    quat_pred = y_pred
    #quat_pred = tf.split(y_pred, [4, 1], axis=-1)[0]
    # quat_true = tf.split(y_true, [4, 3], axis=1)[0]
    # quat_pred = tf.split(y_pred, [4, 3], axis=1)[0]
    quat_true = quat.normalize_quaternions(quat_true)
    quat_pred = quat.normalize_quaternions(quat_pred)

    quat_dot_product = tf.abs(quat.batchwise_dot_product(quat_true, quat_pred))
    quat_dot_product = tf.clip_by_value(quat_dot_product, -0.99999, 0.99999)
    check_greater_min = tf.assert_greater_equal(quat_dot_product, -1.)
    check_smaller_max = tf.assert_less_equal(quat_dot_product, 1.)
    with tf.control_dependencies([check_greater_min, check_smaller_max]):
        diffs_rad = 2. * tf.acos(quat_dot_product)
        diffs_deg = (diffs_rad * 180.) / math.pi
        return tf.reduce_mean(diffs_deg)
Esempio n. 18
0
  def _check_valid_event_ndims(self, min_event_ndims, event_ndims):
    """Check whether event_ndims is atleast min_event_ndims."""
    event_ndims = tf.convert_to_tensor(event_ndims, name="event_ndims")
    event_ndims_ = tf.contrib.util.constant_value(event_ndims)
    assertions = []

    if not event_ndims.dtype.is_integer:
      raise ValueError("Expected integer dtype, got dtype {}".format(
          event_ndims.dtype))

    if event_ndims_ is not None:
      if event_ndims.shape.ndims != 0:
        raise ValueError("Expected scalar event_ndims, got shape {}".format(
            event_ndims.shape))
      if min_event_ndims > event_ndims_:
        raise ValueError("event_ndims ({}) must be larger than "
                         "min_event_ndims ({})".format(event_ndims_,
                                                       min_event_ndims))
    elif self.validate_args:
      assertions += [tf.assert_greater_equal(event_ndims, min_event_ndims)]

    if event_ndims.shape.is_fully_defined():
      if event_ndims.shape.ndims != 0:
        raise ValueError("Expected scalar shape, got ndims {}".format(
            event_ndims.shape.ndims))

    elif self.validate_args:
      assertions += [tf.assert_rank(event_ndims, 0, message="Expected scalar.")]
    return assertions
Esempio n. 19
0
def image_central_crop_boundingbox(tensor, target_shape):
    '''
    crop central part of image according to target_shape

    tensor is supposed to be 4D [batch, height, width, channel]
    '''
    current_shape = tf.shape(tensor)

    offset_width = (current_shape[1] - target_shape[0]) // 2
    offset_height = (current_shape[2] - target_shape[1]) // 2
    cropped = tf.image.crop_to_bounding_box(
        tensor,
        offset_width=offset_width,
        offset_height=offset_height,
        target_width=target_shape[0],
        target_height=target_shape[1],
    )

    with tf.control_dependencies([
            tf.assert_greater_equal(current_shape[1:2],
                                    target_shape[0:1],
                                    data=[current_shape, target_shape])
    ]):
        output = tf.cond(
            tf.reduce_all(tf.equal(current_shape[1:2], target_shape[0:1])),
            true_fn=lambda: tensor,
            false_fn=lambda: cropped,
        )
    return output
Esempio n. 20
0
 def test_doesnt_raise_when_equal(self):
     with self.test_session():
         small = tf.constant([1, 2], name="small")
         with tf.control_dependencies(
             [tf.assert_greater_equal(small, small)]):
             out = tf.identity(small)
         out.eval()
Esempio n. 21
0
def cifar_transition(images, num_channels=3, is_chief=True):
    """ Transition -> to car classifier 
    
    Args:
        images: a 4D Tensor of images
        
    Returns:
        (., 32, 32, 1 or 3)  images in [0., 1.]
    """
    del is_chief

    with tf.control_dependencies([tf.assert_greater_equal(images, 0.)]):
        with tf.control_dependencies([tf.assert_less_equal(images, 1.)]):
            # resize
            size = images.get_shape()[1].value
            if size != 32:
                images = tf.image.resize_nearest_neighbor(images, (32, 32))

            # grayscale
            channels = images.get_shape()[3].value
            assert channels in [1, 3]
            if channels == 1 and num_channels == 3:
                images = tf.tile(images, (1, 1, 1, 3))
            if channels == 3 and num_channels == 1:
                images = tf.reduce_mean(images, axis=3, keep_dims=True)
            return images
Esempio n. 22
0
 def test_doesnt_raise_when_both_empty(self):
     with self.test_session():
         larry = tf.constant([])
         curly = tf.constant([])
         with tf.control_dependencies([tf.assert_greater_equal(larry, curly)]):
             out = tf.identity(larry)
         out.eval()
Esempio n. 23
0
 def __init__(self,
              dtype,
              param_dtype,
              is_continuous,
              is_reparameterized,
              group_event_ndims=0):
     self._dtype = dtype
     self._param_dtype = param_dtype
     self._is_continuous = is_continuous
     self._is_reparameterized = is_reparameterized
     if isinstance(group_event_ndims, int):
         if group_event_ndims < 0:
             raise ValueError("group_event_ndims must be non-negative.")
         self._group_event_ndims = group_event_ndims
     else:
         group_event_ndims = tf.convert_to_tensor(group_event_ndims,
                                                  tf.int32)
         _assert_rank_op = tf.assert_rank(
             group_event_ndims,
             0,
             message="group_event_ndims should be a scalar (0-D Tensor).")
         _assert_nonnegative_op = tf.assert_greater_equal(
             group_event_ndims,
             0,
             message="group_event_ndims must be non-negative.")
         with tf.control_dependencies(
             [_assert_rank_op, _assert_nonnegative_op]):
             self._group_event_ndims = tf.identity(group_event_ndims)
Esempio n. 24
0
 def test_doesnt_raise_when_greater_equal_and_broadcastable_shapes(self):
     with self.test_session():
         small = tf.constant([1], name="small")
         big = tf.constant([3, 1], name="big")
         with tf.control_dependencies([tf.assert_greater_equal(big, small)]):
             out = tf.identity(small)
         out.eval()
Esempio n. 25
0
    def __init__(self,
                 dtype,
                 param_dtype,
                 is_continuous,
                 is_reparameterized,
                 use_path_derivative=False,
                 group_ndims=0,
                 **kwargs):
        if 'group_event_ndims' in kwargs:
            raise ValueError(
                "The argument `group_event_ndims` has been deprecated "
                "Please use `group_ndims` instead.")

        self._dtype = dtype
        self._param_dtype = param_dtype
        self._is_continuous = is_continuous
        self._is_reparameterized = is_reparameterized
        self._use_path_derivative = use_path_derivative
        if isinstance(group_ndims, int):
            if group_ndims < 0:
                raise ValueError("group_ndims must be non-negative.")
            self._group_ndims = group_ndims
        else:
            group_ndims = tf.convert_to_tensor(group_ndims, tf.int32)
            _assert_rank_op = tf.assert_rank(
                group_ndims,
                0,
                message="group_ndims should be a scalar (0-D Tensor).")
            _assert_nonnegative_op = tf.assert_greater_equal(
                group_ndims, 0, message="group_ndims must be non-negative.")
            with tf.control_dependencies(
                [_assert_rank_op, _assert_nonnegative_op]):
                self._group_ndims = tf.identity(group_ndims)
Esempio n. 26
0
def remidify(pitches):
  """Transforms [0, 88) to MIDI pitches [21, 108]."""
  assertions = [
      tf.assert_greater_equal(pitches, 0),
      tf.assert_less_equal(pitches, 87)
  ]
  with tf.control_dependencies(assertions):
    return pitches + 21
Esempio n. 27
0
 def test_doesnt_raise_when_greater_equal_and_broadcastable_shapes(self):
     with self.test_session():
         small = tf.constant([1], name="small")
         big = tf.constant([3, 1], name="big")
         with tf.control_dependencies([tf.assert_greater_equal(big,
                                                               small)]):
             out = tf.identity(small)
         out.eval()
Esempio n. 28
0
 def test_doesnt_raise_when_both_empty(self):
     with self.test_session():
         larry = tf.constant([])
         curly = tf.constant([])
         with tf.control_dependencies(
             [tf.assert_greater_equal(larry, curly)]):
             out = tf.identity(larry)
         out.eval()
Esempio n. 29
0
 def test_raises_when_less_equal_but_non_broadcastable_shapes(self):
     with self.test_session():
         small = tf.constant([1, 1, 1], name="big")
         big = tf.constant([3, 1], name="small")
         with self.assertRaisesRegexp(ValueError, "Dimensions must be equal"):
             with tf.control_dependencies([tf.assert_greater_equal(big, small)]):
                 out = tf.identity(small)
             out.eval()
Esempio n. 30
0
 def test_raises_when_less(self):
     with self.test_session():
         small = tf.constant([1, 2], name="small")
         big = tf.constant([3, 4], name="big")
         with tf.control_dependencies([tf.assert_greater_equal(small, big, message="fail")]):
             out = tf.identity(small)
         with self.assertRaisesOpError("fail.*small.*big"):
             out.eval()
Esempio n. 31
0
def demidify(pitches):
  """Transforms MIDI pitches [21,108] to [0, 88)."""
  assertions = [
      tf.assert_greater_equal(pitches, 21),
      tf.assert_less_equal(pitches, 108)
  ]
  with tf.control_dependencies(assertions):
    return pitches - 21
def assert_greater_equal(*args, **kwargs):
    """
  Wrapper for tf.assert_greater_equal.
  Overrides tf.device so that the assert always goes on CPU.
  The unwrapped version raises an exception if used with tf.device("/GPU:x").
  """
    with tf.device("/CPU:0"):
        return tf.assert_greater_equal(*args, **kwargs)
Esempio n. 33
0
def demidify(pitches):
    """Transforms MIDI pitches [21,108] to [0, 88)."""
    assertions = [
        tf.assert_greater_equal(pitches, 21),
        tf.assert_less_equal(pitches, 108)
    ]
    with tf.control_dependencies(assertions):
        return pitches - 21
Esempio n. 34
0
def remidify(pitches):
    """Transforms [0, 88) to MIDI pitches [21, 108]."""
    assertions = [
        tf.assert_greater_equal(pitches, 0),
        tf.assert_less_equal(pitches, 87)
    ]
    with tf.control_dependencies(assertions):
        return pitches + 21
Esempio n. 35
0
def check_xyz_normed(xyzs):
    xyz_min = tf.reduce_min(xyzs)
    check = tf.assert_greater_equal(xyz_min,
                                    0.0,
                                    message='xyz should be normed at first')
    with tf.control_dependencies([check]):
        xyzs = tf.identity(xyzs)
    return xyzs
Esempio n. 36
0
    def slice_sequences(self, state_like_seqs, action_like_seqs,
                        example_sequence_length):
        """
        Slices sequences of length `example_sequence_length` into subsequences
        of length `sequence_length`. The dicts of sequences are updated
        in-place and the same dicts are returned.
        """
        # handle random shifting and frame skip
        sequence_length = self.hparams.sequence_length  # desired sequence length
        frame_skip = self.hparams.frame_skip
        time_shift = self.hparams.time_shift
        if isinstance(example_sequence_length, tf.Tensor):
            example_sequence_length = tf.cast(example_sequence_length,
                                              tf.int32)
        if (time_shift > 0
                and self.mode == 'train') or self.hparams.force_time_shift:
            assert time_shift > 0 and isinstance(time_shift, int)
            num_shifts = ((example_sequence_length - 1) -
                          (sequence_length - 1) *
                          (frame_skip + 1)) // time_shift
            assert_message = (
                'example_sequence_length has to be at least %d when '
                'sequence_length=%d, frame_skip=%d.' %
                ((sequence_length - 1) *
                 (frame_skip + 1) + 1, sequence_length, frame_skip))
            with tf.control_dependencies([
                    tf.assert_greater_equal(
                        num_shifts,
                        0,
                        data=[example_sequence_length, num_shifts],
                        message=assert_message)
            ]):
                t_start = tf.random_uniform(
                    [], 0, num_shifts + 1, dtype=tf.int32,
                    seed=self.seed) * time_shift
        elif time_shift < 0:  # if negative, always use the last subsequence
            t_start = ((example_sequence_length - 1) - (sequence_length - 1) *
                       (frame_skip + 1))
        else:
            t_start = 0
        state_like_t_slice = slice(
            t_start, t_start + (sequence_length - 1) * (frame_skip + 1) + 1,
            frame_skip + 1)
        action_like_t_slice = slice(
            t_start, t_start + (sequence_length - 1) * (frame_skip + 1))

        for example_name, seq in state_like_seqs.items():
            seq = tf.convert_to_tensor(seq)[state_like_t_slice]
            seq.set_shape([sequence_length] + seq.shape.as_list()[1:])
            state_like_seqs[example_name] = seq
        for example_name, seq in action_like_seqs.items():
            seq = tf.convert_to_tensor(seq)[action_like_t_slice]
            seq.set_shape([(sequence_length - 1) * (frame_skip + 1)] +
                          seq.shape.as_list()[1:])
            # concatenate actions of skipped frames into single macro actions
            seq = tf.reshape(seq, [sequence_length - 1, -1])
            action_like_seqs[example_name] = seq
        return state_like_seqs, action_like_seqs
Esempio n. 37
0
def get_even_batch_producer(paths,
                            batch_size=FLAGS.batch_size,
                            prefetch_size=FLAGS.capacity,
                            num_of_threads=FLAGS.threads):

    tf.assert_greater_equal(
        batch_size, len(paths), data=[batch_size, len(paths)],
        message='batch_size must be greater than the number of classes')

    sub_batch_size = batch_size // len(paths)
    input_prods = []
    for path in paths:
        input_prods.append(get_batch_producer(
            path,
            batch_size=sub_batch_size,
            prefetch_size=FLAGS.capacity,
            num_of_threads=FLAGS.threads,
            scope='producer_%s' % path
        ))

    '''batch_op = tf.train.batch_join(
      list(zip(*input_prods)),
      batch_size,
      #num_of_threads,
      capacity=prefetch_size,
      shapes=[[None], [], []],
      dynamic_pad=True,
      enqueue_many=True,
      name='even_batch_producer')'''

    with tf.name_scope('even_batch_producer'):
        q = tf.PaddingFIFOQueue(
            capacity=prefetch_size,
            dtypes=[tf.float32, tf.int32, tf.int32],
            shapes=[[None], [], []], name='padding_even_queue')

        for data, seq_len, label in input_prods:
            enqueue_op = q.enqueue_many(
                [data, seq_len, label], name='push_many_example_of_class')
            qr = tf.train.QueueRunner(q, [enqueue_op] * num_of_threads)
            tf.train.add_queue_runner(qr)

        batch_op = q.dequeue_many(n=batch_size, name='pop_batch')

    return batch_op, input_prods
Esempio n. 38
0
def scale_to_inception_range(image):
  """Scales an image in the range [0,1] to [-1,1] as expected by inception."""
  # Assert that incoming images have been properly scaled to [0,1].
  with tf.control_dependencies(
      [tf.assert_less_equal(tf.reduce_max(image), 1.),
       tf.assert_greater_equal(tf.reduce_min(image), 0.)]):
    image = tf.subtract(image, 0.5)
    image = tf.multiply(image, 2.0)
    return image
Esempio n. 39
0
 def _check_arg_and_apply_f(*args, **kwargs):
   dist = args[0]
   x = args[1]
   with tf.control_dependencies([
       tf.assert_greater_equal(
           x, dist.loc,
           message="x is not in the support of the distribution"
       )] if dist.validate_args else []):
     return f(*args, **kwargs)
Esempio n. 40
0
 def build_graph(parameters):
   input_tensor = tf.placeholder(
       dtype=tf.float32, name="input", shape=parameters["input_shape"])
   filter_value = tf.zeros((3, 3, TEST_INPUT_DEPTH, 8), tf.float32)
   assert_op = tf.assert_greater_equal(input_tensor, input_tensor - 1)
   with tf.control_dependencies([assert_op]):
     out = tf.nn.conv2d(input_tensor, filter_value,
                        strides=(1, 1, 1, 1), padding="SAME")
     return [input_tensor], [out]
Esempio n. 41
0
 def build_graph(parameters):
   input_tensor = tf.placeholder(
       dtype=tf.float32, name="input", shape=parameters["input_shape"])
   filter_value = tf.zeros((3, 3, TEST_INPUT_DEPTH, 8), tf.float32)
   assert_op = tf.assert_greater_equal(input_tensor, input_tensor - 1)
   with tf.control_dependencies([assert_op]):
     out = tf.nn.conv2d(
         input_tensor, filter_value, strides=(1, 1, 1, 1), padding="SAME")
     return [input_tensor], [out]
Esempio n. 42
0
 def _check_arg_and_apply_f(*args, **kwargs):
   dist = args[0]
   x = args[1]
   with tf.control_dependencies([
       tf.assert_greater_equal(
           x, dist.loc,
           message="x is not in the support of the distribution"
       )] if dist.validate_args else []):
     return f(*args, **kwargs)
Esempio n. 43
0
 def _prob(self, x):
   with tf.control_dependencies([
       tf.assert_greater_equal(
           x, self.scale,
           message="x is not in the support of the distribution."
       )] if self.validate_args else []):
     def prob_on_support(z):
       return (self.concentration * (self.scale ** self.concentration) /
               (z ** (self.concentration + 1)))
     return self._extend_support(x, prob_on_support, alt=0.)
 def check(t):
   samples_batch_shape = tf.shape(samples)[1:]
   broadcasted_batch_shape = tf.broadcast_dynamic_shape(
       samples_batch_shape, tf.shape(t))
   # This rank check ensures that I don't get a wrong answer from the
   # _shapes_ broadcasting against each other.
   samples_batch_ndims = tf.size(samples_batch_shape)
   ge = tf.assert_greater_equal(samples_batch_ndims, tf.rank(t))
   eq = tf.assert_equal(samples_batch_shape, broadcasted_batch_shape)
   return ge, eq
Esempio n. 45
0
  def _log_prob(self, x):
    with tf.control_dependencies([
        tf.assert_greater_equal(
            x, self.scale,
            message="x is not in the support of the distribution."
        )] if self.validate_args else []):

      def log_prob_on_support(z):
        return (tf.log(self.concentration) +
                self.concentration * tf.log(self.scale) -
                (self.concentration + 1.) * tf.log(z))

      return self._extend_support(x, log_prob_on_support, alt=-np.inf)
Esempio n. 46
0
  def _maybe_check_valid_shape(self, shape, validate_args):
    """Check that a shape Tensor is int-type and otherwise sane."""
    if not shape.dtype.is_integer:
      raise TypeError('{} dtype ({}) should be `int`-like.'.format(
          shape, shape.dtype.name))

    assertions = []

    ndims = tf.rank(shape)
    ndims_ = tensor_util.constant_value(ndims)
    if ndims_ is not None and ndims_ > 1:
      raise ValueError('`{}` rank ({}) should be <= 1.'.format(
          shape, ndims_))
    elif validate_args:
      assertions.append(
          tf.assert_less_equal(
              ndims, 1, message='`{}` rank should be <= 1.'.format(shape)))

    # Note, we might be inclined to use tensor_util.constant_value_as_shape
    # here, but that method coerces negative values into `None`s, rendering the
    # checks we do below impossible.
    shape_tensor_ = tensor_util.constant_value(shape)
    if shape_tensor_ is not None:
      es = np.int32(shape_tensor_)
      if sum(es == -1) > 1:
        raise ValueError(
            '`{}` must have at most one `-1` (given {})'
            .format(shape, es))
      if np.any(es < -1):
        raise ValueError(
            '`{}` elements must be either positive integers or `-1`'
            '(given {}).'
            .format(shape, es))
    elif validate_args:
      assertions.extend([
          tf.assert_less_equal(
              tf.reduce_sum(tf.cast(tf.equal(shape, -1), tf.int32)),
              1,
              message='`{}` elements must have at most one `-1`.'
              .format(shape)),
          tf.assert_greater_equal(
              shape,
              -1,
              message='`{}` elements must be either positive integers or `-1`.'
              .format(shape)),
      ])
    return assertions
def _minimum_mean(samples, envelope, low, name=None):
  """Returns a stochastic lower bound on the mean of a scalar distribution.

  The idea is that if the true CDF is within an `eps`-envelope of the
  empirical CDF of the samples, and the support is bounded below, then
  the mean is bounded below as well.  In symbols,

  ```none
  sup_x(|F_n(x) - F(x)|) < eps
  ```

  The 0th dimension of `samples` is interpreted as independent and
  identically distributed samples.  The remaining dimensions are
  broadcast together with `envelope` and `low`, and operated on
  separately.

  Args:
    samples: Floating-point `Tensor` of samples from the distribution(s)
      of interest.  Entries are assumed IID across the 0th dimension.
      The other dimensions must broadcast with `envelope` and `low`.
    envelope: Floating-point `Tensor` of sizes of admissible CDF
      envelopes (i.e., the `eps` above).
    low: Floating-point `Tensor` of lower bounds on the distributions'
      supports.  `samples >= low`.
    name: A name for this operation (optional).

  Returns:
    bound: Floating-point `Tensor` of lower bounds on the true means.

  Raises:
    InvalidArgumentError: If some `sample` is found to be smaller than
      the corresponding `low`.
  """
  with tf.name_scope(name, "minimum_mean", [samples, envelope, low]):
    dtype = dtype_util.common_dtype([samples, envelope, low], tf.float32)
    samples = tf.convert_to_tensor(samples, name="samples", dtype=dtype)
    envelope = tf.convert_to_tensor(envelope, name="envelope", dtype=dtype)
    low = tf.convert_to_tensor(low, name="low", dtype=dtype)

    xmin = tf.reduce_min(samples, axis=[0])
    msg = "Given sample minimum value falls below expectations"
    check_op = tf.assert_greater_equal(xmin, low, message=msg)
    with tf.control_dependencies([check_op]):
      return - _do_maximum_mean(-samples, envelope, -low)
Esempio n. 48
0
def percentile(x,
               q,
               axis=None,
               interpolation=None,
               keep_dims=False,
               validate_args=False,
               name=None):
  """Compute the `q`-th percentile of `x`.

  Given a vector `x`, the `q`-th percentile of `x` is the value `q / 100` of the
  way from the minimum to the maximum in a sorted copy of `x`.

  The values and distances of the two nearest neighbors as well as the
  `interpolation` parameter will determine the percentile if the normalized
  ranking does not match the location of `q` exactly.

  This function is the same as the median if `q = 50`, the same as the minimum
  if `q = 0` and the same as the maximum if `q = 100`.


  ```python
  # Get 30th percentile with default ('nearest') interpolation.
  x = [1., 2., 3., 4.]
  percentile(x, q=30.)
  ==> 2.0

  # Get 30th percentile with 'lower' interpolation
  x = [1., 2., 3., 4.]
  percentile(x, q=30., interpolation='lower')
  ==> 1.0

  # Get 100th percentile (maximum).  By default, this is computed over every dim
  x = [[1., 2.]
       [3., 4.]]
  percentile(x, q=100.)
  ==> 4.0

  # Treat the leading dim as indexing samples, and find the 100th quantile (max)
  # over all such samples.
  x = [[1., 2.]
       [3., 4.]]
  percentile(x, q=100., axis=[0])
  ==> [3., 4.]
  ```

  Compare to `numpy.percentile`.

  Args:
    x:  Floating point `N-D` `Tensor` with `N > 0`.  If `axis` is not `None`,
      `x` must have statically known number of dimensions.
    q:  Scalar `Tensor` in `[0, 100]`. The percentile.
    axis:  Optional `0-D` or `1-D` integer `Tensor` with constant values.
      The axis that hold independent samples over which to return the desired
      percentile.  If `None` (the default), treat every dimension as a sample
      dimension, returning a scalar.
    interpolation : {"lower", "higher", "nearest"}.  Default: "nearest"
      This optional parameter specifies the interpolation method to
      use when the desired quantile lies between two data points `i < j`:
        * lower: `i`.
        * higher: `j`.
        * nearest: `i` or `j`, whichever is nearest.
    keep_dims:  Python `bool`. If `True`, the last dimension is kept with size 1
      If `False`, the last dimension is removed from the output shape.
    validate_args:  Whether to add runtime checks of argument validity.
      If False, and arguments are incorrect, correct behavior is not guaranteed.
    name:  A Python string name to give this `Op`.  Default is "percentile"

  Returns:
    A `(N - len(axis))` dimensional `Tensor` of same dtype as `x`, or, if
      `axis` is `None`, a scalar.

  Raises:
    ValueError:  If argument 'interpolation' is not an allowed type.
  """
  name = name or "percentile"
  allowed_interpolations = {"lower", "higher", "nearest"}

  if interpolation is None:
    interpolation = "nearest"
  else:
    if interpolation not in allowed_interpolations:
      raise ValueError("Argument 'interpolation' must be in %s.  Found %s" %
                       (allowed_interpolations, interpolation))

  with tf.name_scope(name, [x, q]):
    x = tf.convert_to_tensor(x, name="x")
    # Double is needed here and below, else we get the wrong index if the array
    # is huge along axis.
    q = tf.to_double(q, name="q")
    _get_static_ndims(q, expect_ndims=0)

    if validate_args:
      q = control_flow_ops.with_dependencies([
          tf.assert_rank(q, 0),
          tf.assert_greater_equal(q, tf.to_double(0.)),
          tf.assert_less_equal(q, tf.to_double(100.))
      ], q)

    if axis is None:
      y = tf.reshape(x, [-1])
    else:
      axis = tf.convert_to_tensor(axis, name="axis")
      tf.assert_integer(axis)
      axis_ndims = _get_static_ndims(
          axis, expect_static=True, expect_ndims_no_more_than=1)
      axis_const = tensor_util.constant_value(axis)
      if axis_const is None:
        raise ValueError(
            "Expected argument 'axis' to be statically available.  Found: %s" %
            axis)
      axis = axis_const
      if axis_ndims == 0:
        axis = [axis]
      axis = [int(a) for a in axis]
      x_ndims = _get_static_ndims(
          x, expect_static=True, expect_ndims_at_least=1)
      axis = _make_static_axis_non_negative(axis, x_ndims)
      y = _move_dims_to_flat_end(x, axis, x_ndims)

    frac_at_q_or_above = 1. - q / 100.
    d = tf.to_double(tf.shape(y)[-1])

    if interpolation == "lower":
      index = tf.ceil((d - 1) * frac_at_q_or_above)
    elif interpolation == "higher":
      index = tf.floor((d - 1) * frac_at_q_or_above)
    elif interpolation == "nearest":
      index = tf.round((d - 1) * frac_at_q_or_above)

    # If d is gigantic, then we would have d == d - 1, even in double... So
    # let's use max/min to avoid out of bounds errors.
    d = tf.shape(y)[-1]
    # d - 1 will be distinct from d in int32.
    index = tf.clip_by_value(tf.to_int32(index), 0, d - 1)

    # Sort everything, not just the top 'k' entries, which allows multiple calls
    # to sort only once (under the hood) and use CSE.
    sorted_y = _sort_tensor(y)

    # result.shape = B
    result = sorted_y[..., index]
    result.set_shape(y.get_shape()[:-1])

    if keep_dims:
      if axis is None:
        # ones_vec = [1, 1,..., 1], total length = len(S) + len(B).
        ones_vec = tf.ones(shape=[_get_best_effort_ndims(x)], dtype=tf.int32)
        result *= tf.ones(ones_vec, dtype=x.dtype)
      else:
        result = _insert_back_keep_dims(result, axis)

    return result
Esempio n. 49
0
  def _sample_n(self, n, seed=None):
    seed = seed_stream.SeedStream(seed, salt='vom_mises_fisher')
    # The sampling strategy relies on the fact that vMF variates are symmetric
    # about the mean direction. Accordingly, if we have a sampling strategy for
    # the away-from-mean angle, then we can uniformly sample the remaining
    # dimensions on the S^{dim-2} sphere for , and rotate these samples from a
    # (1, 0, 0, ..., 0)-mode distribution into the target orientation.
    #
    # This is easy to imagine on the 1-sphere (S^1; in 2-D space): sample a
    # von-Mises distributed `x` value in [-1, 1], then uniformly select what
    # amounts to a "up" or "down" additional degree of freedom after unit
    # normalizing, followed by a final rotation to the desired mean direction
    # from a basis of (1, 0).
    #
    # On S^2 (in 3-D), selecting a vMF `x` identifies a circle in `yz` on the
    # unit sphere over which the distribution is uniform, in particular the
    # circle where x = \hat{x} intersects the unit sphere. We pick a point on
    # that circle, then rotate to the desired mean direction from a basis of
    # (1, 0, 0).
    event_dim = self.event_shape[0].value or self._event_shape_tensor()[0]

    sample_batch_shape = tf.concat([[n], self._batch_shape_tensor()], axis=0)
    dim = tf.cast(event_dim - 1, self.dtype)
    if event_dim == 3:
      samples_dim0 = self._sample_3d(n, seed=seed)
    else:
      # Wood'94 provides a rejection algorithm to sample the x coordinate.
      # Wood'94 definition of b:
      # b = (-2 * kappa + tf.sqrt(4 * kappa**2 + dim**2)) / dim
      # https://stats.stackexchange.com/questions/156729 suggests:
      b = dim / (2 * self.concentration +
                 tf.sqrt(4 * self.concentration**2 + dim**2))
      # TODO(bjp): Integrate any useful numerical tricks from hyperspherical VAE
      #     https://github.com/nicola-decao/s-vae-tf/
      x = (1 - b) / (1 + b)
      c = self.concentration * x + dim * tf.log1p(-x**2)
      beta = beta_lib.Beta(dim / 2, dim / 2)

      def cond_fn(w, should_continue):
        del w
        return tf.reduce_any(should_continue)

      def body_fn(w, should_continue):
        z = beta.sample(sample_shape=sample_batch_shape, seed=seed())
        w = tf.where(should_continue, (1 - (1 + b) * z) / (1 - (1 - b) * z), w)
        w = tf.check_numerics(w, 'w')
        should_continue = tf.logical_and(
            should_continue,
            self.concentration * w + dim * tf.log1p(-x * w) - c <
            tf.log(tf.random_uniform(sample_batch_shape, seed=seed(),
                                     dtype=self.dtype)))
        return w, should_continue

      w = tf.zeros(sample_batch_shape, dtype=self.dtype)
      should_continue = tf.ones(sample_batch_shape, dtype=tf.bool)
      samples_dim0 = tf.while_loop(cond_fn, body_fn, (w, should_continue))[0]
      samples_dim0 = samples_dim0[..., tf.newaxis]
    if not self._allow_nan_stats:
      # Verify samples are w/in -1, 1, with useful error output tensors (top
      # value rather than all values).
      with tf.control_dependencies([
          tf.assert_less_equal(
              samples_dim0, self.dtype.as_numpy_dtype(1.01),
              data=[tf.nn.top_k(tf.reshape(samples_dim0, [-1]))[0]]),
          tf.assert_greater_equal(
              samples_dim0, self.dtype.as_numpy_dtype(-1.01),
              data=[-tf.nn.top_k(tf.reshape(-samples_dim0, [-1]))[0]])]):
        samples_dim0 = tf.identity(samples_dim0)
    samples_otherdims_shape = tf.concat([sample_batch_shape, [event_dim - 1]],
                                        axis=0)
    unit_otherdims = tf.nn.l2_normalize(
        tf.random_normal(samples_otherdims_shape, seed=seed(),
                         dtype=self.dtype),
        axis=-1)
    samples = tf.concat([
        samples_dim0,  # we must avoid sqrt(1 - (>1)**2)
        tf.sqrt(tf.maximum(1 - samples_dim0**2, 0.)) * unit_otherdims
    ], axis=-1)
    samples = tf.nn.l2_normalize(samples, axis=-1)
    if not self._allow_nan_stats:
      samples = tf.check_numerics(samples, 'samples')

    # Runtime assert that samples are unit length.
    if not self._allow_nan_stats:
      worst, idx = tf.nn.top_k(
          tf.reshape(tf.abs(1 - tf.linalg.norm(samples, axis=-1)), [-1]))
      with tf.control_dependencies([
          tf.assert_near(
              self.dtype.as_numpy_dtype(0), worst,
              data=[worst, idx,
                    tf.gather(tf.reshape(samples, [-1, event_dim]), idx)],
              atol=1e-4, summarize=100)]):
        samples = tf.identity(samples)
    # The samples generated are symmetric around a mode at (1, 0, 0, ...., 0).
    # Now, we move the mode to `self.mean_direction` using a rotation matrix.
    if not self._allow_nan_stats:
      # Assert that the basis vector rotates to the mean direction, as expected.
      basis = tf.cast(tf.concat([[1.], tf.zeros([event_dim - 1])], axis=0),
                      self.dtype)
      with tf.control_dependencies([
          tf.assert_less(
              tf.linalg.norm(self._rotate(basis) - self.mean_direction,
                             axis=-1),
              self.dtype.as_numpy_dtype(1e-5))
      ]):
        return self._rotate(samples)
    return self._rotate(samples)
Esempio n. 50
0
def tower(inputs,
          is_training,
          dropout_probability,
          input_noise,
          normalize_input,
          flip_horizontally,
          translate,
          num_logits,
          is_initialization=False,
          name=None):
    with tf.name_scope(name, "tower"):
        default_conv_args = dict(
            padding='SAME',
            kernel_size=[3, 3],
            activation_fn=nn.lrelu,
            init=is_initialization
        )
        training_mode_funcs = [
            nn.random_translate, nn.flip_randomly, nn.gaussian_noise, slim.dropout,
            wn.fully_connected, wn.conv2d
        ]
        training_args = dict(
            is_training=is_training
        )

        with \
        slim.arg_scope([wn.conv2d], **default_conv_args), \
        slim.arg_scope(training_mode_funcs, **training_args):
            #pylint: disable=no-value-for-parameter
            net = inputs
            assert_shape(net, [None, 32, 32, 3])

            net = tf.cond(normalize_input,
                          lambda: slim.layer_norm(net,
                                                  scale=False,
                                                  center=False,
                                                  scope='normalize_inputs'),
                          lambda: net)
            assert_shape(net, [None, 32, 32, 3])

            net = nn.flip_randomly(net,
                                   horizontally=flip_horizontally,
                                   vertically=False,
                                   name='random_flip')
            net = tf.cond(translate,
                          lambda: nn.random_translate(net, scale=2, name='random_translate'),
                          lambda: net)
            net = nn.gaussian_noise(net, scale=input_noise, name='gaussian_noise')

            net = wn.conv2d(net, 128, scope="conv_1_1")
            net = wn.conv2d(net, 128, scope="conv_1_2")
            net = wn.conv2d(net, 128, scope="conv_1_3")
            net = slim.max_pool2d(net, [2, 2], scope='max_pool_1')
            net = slim.dropout(net, 1 - dropout_probability, scope='dropout_probability_1')
            assert_shape(net, [None, 16, 16, 128])

            net = wn.conv2d(net, 256, scope="conv_2_1")
            net = wn.conv2d(net, 256, scope="conv_2_2")
            net = wn.conv2d(net, 256, scope="conv_2_3")
            net = slim.max_pool2d(net, [2, 2], scope='max_pool_2')
            net = slim.dropout(net, 1 - dropout_probability, scope='dropout_probability_2')
            assert_shape(net, [None, 8, 8, 256])

            net = wn.conv2d(net, 512, padding='VALID', scope="conv_3_1")
            assert_shape(net, [None, 6, 6, 512])
            net = wn.conv2d(net, 256, kernel_size=[1, 1], scope="conv_3_2")
            net = wn.conv2d(net, 128, kernel_size=[1, 1], scope="conv_3_3")
            net = slim.avg_pool2d(net, [6, 6], scope='avg_pool')
            assert_shape(net, [None, 1, 1, 128])

            net = slim.flatten(net)
            assert_shape(net, [None, 128])

            primary_logits = wn.fully_connected(net, 10, init=is_initialization)
            secondary_logits = wn.fully_connected(net, 10, init=is_initialization)

            with tf.control_dependencies([tf.assert_greater_equal(num_logits, 1),
                                          tf.assert_less_equal(num_logits, 2)]):
                secondary_logits = tf.case([
                    (tf.equal(num_logits, 1), lambda: primary_logits),
                    (tf.equal(num_logits, 2), lambda: secondary_logits),
                ], exclusive=True, default=lambda: primary_logits)

            assert_shape(primary_logits, [None, 10])
            assert_shape(secondary_logits, [None, 10])
            return primary_logits, secondary_logits
Esempio n. 51
0
 def test_doesnt_raise_when_equal(self):
     with self.test_session():
         small = tf.constant([1, 2], name="small")
         with tf.control_dependencies([tf.assert_greater_equal(small, small)]):
             out = tf.identity(small)
         out.eval()
Esempio n. 52
0
def expected_calibration_error(y_true, y_pred, nbins=20):
  """Calculates Expected Calibration Error (ECE).

  ECE is a scalar summary statistic of calibration error. It is the
  sample-weighted average of the difference between the predicted and true
  probabilities of a positive detection across uniformly-spaced model
  confidences [0, 1]. See referenced paper for a thorough explanation.

  Reference:
    Guo, et. al, "On Calibration of Modern Neural Networks"
    Page 2, Expected Calibration Error (ECE).
    https://arxiv.org/pdf/1706.04599.pdf

  This function creates three local variables, `bin_counts`, `bin_true_sum`, and
  `bin_preds_sum` that are used to compute ECE.  For estimation of the metric
  over a stream of data, the function creates an `update_op` operation that
  updates these variables and returns the ECE.

  Args:
    y_true: 1-D tf.int64 Tensor of binarized ground truth, corresponding to each
      prediction in y_pred.
    y_pred: 1-D tf.float32 tensor of model confidence scores in range
      [0.0, 1.0].
    nbins: int specifying the number of uniformly-spaced bins into which y_pred
      will be bucketed.

  Returns:
    value_op: A value metric op that returns ece.
    update_op: An operation that increments the `bin_counts`, `bin_true_sum`,
      and `bin_preds_sum` variables appropriately and whose value matches `ece`.

  Raises:
    InvalidArgumentError: if y_pred is not in [0.0, 1.0].
  """
  bin_counts = metrics_impl.metric_variable(
      [nbins], tf.float32, name='bin_counts')
  bin_true_sum = metrics_impl.metric_variable(
      [nbins], tf.float32, name='true_sum')
  bin_preds_sum = metrics_impl.metric_variable(
      [nbins], tf.float32, name='preds_sum')

  with tf.control_dependencies([
      tf.assert_greater_equal(y_pred, 0.0),
      tf.assert_less_equal(y_pred, 1.0),
  ]):
    bin_ids = tf.histogram_fixed_width_bins(y_pred, [0.0, 1.0], nbins=nbins)

  with tf.control_dependencies([bin_ids]):
    update_bin_counts_op = tf.assign_add(
        bin_counts, tf.to_float(tf.bincount(bin_ids, minlength=nbins)))
    update_bin_true_sum_op = tf.assign_add(
        bin_true_sum,
        tf.to_float(tf.bincount(bin_ids, weights=y_true, minlength=nbins)))
    update_bin_preds_sum_op = tf.assign_add(
        bin_preds_sum,
        tf.to_float(tf.bincount(bin_ids, weights=y_pred, minlength=nbins)))

  ece_update_op = _ece_from_bins(
      update_bin_counts_op,
      update_bin_true_sum_op,
      update_bin_preds_sum_op,
      name='update_op')
  ece = _ece_from_bins(bin_counts, bin_true_sum, bin_preds_sum, name='value')
  return ece, ece_update_op
def assert_true_mean_in_interval_by_dkwm(
    samples, low, high, expected_low, expected_high,
    false_fail_rate=1e-6, name=None):
  """Asserts the mean of the given distribution is in the given interval.

  More precisely, fails if there is enough evidence (using the
  [Dvoretzky-Kiefer-Wolfowitz-Massart inequality]
  (https://en.wikipedia.org/wiki/CDF-based_nonparametric_confidence_interval))
  that the mean of the distribution from which the given samples are
  drawn is _outside_ the given interval with statistical significance
  `false_fail_rate` or stronger, otherwise passes.  If you also want
  to check that you are gathering enough evidence that a pass is not
  spurious, see `min_num_samples_for_dkwm_mean_test` and
  `min_discrepancy_of_true_means_detectable_by_dkwm`.

  Note that `false_fail_rate` is a total false failure rate for all
  the assertions in the batch.  As such, if the batch is nontrivial,
  the assertion will insist on stronger evidence to fail any one member.

  Args:
    samples: Floating-point `Tensor` of samples from the distribution(s)
      of interest.  Entries are assumed IID across the 0th dimension.
      The other dimensions must broadcast with `low` and `high`.
      The support is bounded: `low <= samples <= high`.
    low: Floating-point `Tensor` of lower bounds on the distributions'
      supports.
    high: Floating-point `Tensor` of upper bounds on the distributions'
      supports.
    expected_low: Floating-point `Tensor` of lower bounds on the
      expected true means.
    expected_high: Floating-point `Tensor` of upper bounds on the
      expected true means.
    false_fail_rate: *Scalar* floating-point `Tensor` admissible total
      rate of mistakes.
    name: A name for this operation (optional).

  Returns:
    check: Op that raises `InvalidArgumentError` if any expected mean
      interval does not overlap with the corresponding confidence
      interval.
  """
  args_list = [samples, low, high, expected_low, expected_high, false_fail_rate]
  with tf.name_scope(
      name, "assert_true_mean_in_interval_by_dkwm", args_list):
    dtype = dtype_util.common_dtype(args_list, tf.float32)
    samples = tf.convert_to_tensor(samples, name="samples", dtype=dtype)
    low = tf.convert_to_tensor(low, name="low", dtype=dtype)
    high = tf.convert_to_tensor(high, name="high", dtype=dtype)
    expected_low = tf.convert_to_tensor(
        expected_low, name="expected_low", dtype=dtype)
    expected_high = tf.convert_to_tensor(
        expected_high, name="expected_high", dtype=dtype)
    false_fail_rate = tf.convert_to_tensor(
        false_fail_rate, name="false_fail_rate", dtype=dtype)
    samples = _check_shape_dominates(
        samples, [low, high, expected_low, expected_high])
    min_mean, max_mean = true_mean_confidence_interval_by_dkwm(
        samples, low, high, false_fail_rate)
    # Assert that the interval [min_mean, max_mean] intersects the
    # interval [expected_low, expected_high].  This is true if
    #   max_mean >= expected_low and min_mean <= expected_high.
    # By DeMorgan's law, that's also equivalent to
    #   not (max_mean < expected_low or min_mean > expected_high),
    # which is a way of saying the two intervals are not disjoint.
    check_confidence_interval_can_intersect = tf.assert_greater_equal(
        max_mean,
        expected_low,
        message="Confidence interval does not "
        "intersect: true mean smaller than expected")
    with tf.control_dependencies([check_confidence_interval_can_intersect]):
      return tf.assert_less_equal(
          min_mean,
          expected_high,
          message="Confidence interval does not "
          "intersect: true mean greater than expected")
Esempio n. 54
0
  def _validate_sample_arg(self, x):
    """Helper which validates sample arg, e.g., input to `log_prob`."""
    with tf.name_scope(name="validate_sample_arg", values=[x]):
      x_ndims = (tf.rank(x) if x.shape.ndims is None else x.shape.ndims)
      event_ndims = (
          tf.size(self.event_shape_tensor())
          if self.event_shape.ndims is None else self.event_shape.ndims)
      batch_ndims = (
          tf.size(self._batch_shape_unexpanded)
          if self.batch_shape.ndims is None else self.batch_shape.ndims)
      expected_batch_event_ndims = batch_ndims + event_ndims

      if (isinstance(x_ndims, int) and
          isinstance(expected_batch_event_ndims, int)):
        if x_ndims < expected_batch_event_ndims:
          raise NotImplementedError(
              "Broadcasting is not supported; too few batch and event dims "
              "(expected at least {}, saw {}).".format(
                  expected_batch_event_ndims, x_ndims))
        ndims_assertion = []
      elif self.validate_args:
        ndims_assertion = [
            tf.assert_greater_equal(
                x_ndims,
                expected_batch_event_ndims,
                message=("Broadcasting is not supported; too few "
                         "batch and event dims."),
                name="assert_batch_and_event_ndims_large_enough"),
        ]

      if (self.batch_shape.is_fully_defined() and
          self.event_shape.is_fully_defined()):
        expected_batch_event_shape = np.int32(self.batch_shape.concatenate(
            self.event_shape).as_list())
      else:
        expected_batch_event_shape = tf.concat(
            [
                self.batch_shape_tensor(),
                self.event_shape_tensor(),
            ], axis=0)

      sample_ndims = x_ndims - expected_batch_event_ndims
      if isinstance(sample_ndims, int):
        sample_ndims = max(sample_ndims, 0)
      if (isinstance(sample_ndims, int) and
          x.shape[sample_ndims:].is_fully_defined()):
        actual_batch_event_shape = np.int32(x.shape[sample_ndims:].as_list())
      else:
        sample_ndims = tf.maximum(sample_ndims, 0)
        actual_batch_event_shape = tf.shape(x)[sample_ndims:]

      if (isinstance(expected_batch_event_shape, np.ndarray) and
          isinstance(actual_batch_event_shape, np.ndarray)):
        if any(expected_batch_event_shape != actual_batch_event_shape):
          raise NotImplementedError("Broadcasting is not supported; "
                                    "unexpected batch and event shape "
                                    "(expected {}, saw {}).".format(
                                        expected_batch_event_shape,
                                        actual_batch_event_shape))
        # We need to set the final runtime-assertions to `ndims_assertion` since
        # its possible this assertion was created. We could add a condition to
        # only do so if `self.validate_args == True`, however this is redundant
        # as `ndims_assertion` already encodes this information.
        runtime_assertions = ndims_assertion
      elif self.validate_args:
        # We need to make the `ndims_assertion` a control dep because otherwise
        # TF itself might raise an exception owing to this assertion being
        # ill-defined, ie, one cannot even compare different rank Tensors.
        with tf.control_dependencies(ndims_assertion):
          shape_assertion = tf.assert_equal(
              expected_batch_event_shape,
              actual_batch_event_shape,
              message=("Broadcasting is not supported; "
                       "unexpected batch and event shape."),
              name="assert_batch_and_event_shape_same")
        runtime_assertions = [shape_assertion]
      else:
        runtime_assertions = []

      return runtime_assertions