コード例 #1
0
def rgb_to_ycbcr(rgb):
    """Map from RGB to YCbCr colorspace."""
    rgb = tf.cast(rgb, dtype=tf.float32)
    r, g, b = tf.unstack(rgb, axis=-1)
    y = r * 0.299 + g * 0.587 + b * 0.114
    cb = r * -0.1687 - g * 0.3313 + b * 0.5
    cr = r * 0.5 - g * 0.4187 - b * 0.0813
    cb += 128.0
    cr += 128.0

    ycbcr = tf.stack((y, cb, cr), axis=-1)
    ycbcr = tf.clip_by_value(ycbcr, 0, 255)
    ycbcr = tf.cast(ycbcr, dtype=tf.int32)
    return ycbcr
コード例 #2
0
    def _remove_indices(a, b):
      """Remove indices (`b`) from `a`."""
      items = tf.unstack(tf.sort(tf.stack(b)), num=len(b))

      i = 0
      result = []

      for item in items:
        result.append(a[i:item])
        i = item + 1

      result.append(a[i:])

      return tf.concat(result, 0)
コード例 #3
0
    def test_doc_string_images_case_2(self):
        # Generate fake images.
        images = np.random.choice([0, 1], size=(100, 8, 8, 3))
        n, width, height, channels = images.shape

        # Reshape images to achieve desired autoregressivity.
        reshaped_images = np.transpose(np.reshape(
            images, [n, width * height, channels]),
                                       axes=[0, 2, 1])

        made = tfb.AutoregressiveNetwork(params=1,
                                         event_shape=[width * height],
                                         hidden_units=[20, 20],
                                         activation="relu")

        # Density estimation with MADE.
        #
        # NOTE: Parameterize an autoregressive distribution over an event_shape of
        # [channels, width * height], with univariate Bernoulli conditional
        # distributions.
        distribution = tfd.Autoregressive(
            lambda x: tfd.Independent(  # pylint: disable=g-long-lambda
                tfd.Bernoulli(logits=tf.unstack(made(x), axis=-1)[0],
                              dtype=tf.float32),
                reinterpreted_batch_ndims=2),
            sample0=tf.zeros([channels, width * height], dtype=tf.float32))

        # Construct and fit model.
        x_ = tfkl.Input(shape=(channels, width * height), dtype=tf.float32)
        log_prob_ = distribution.log_prob(x_)
        model = tfk.Model(x_, log_prob_)

        model.compile(optimizer=tf1.train.AdamOptimizer(),
                      loss=lambda _, log_prob: -log_prob)

        batch_size = 10
        model.fit(
            x=reshaped_images,
            y=np.zeros((n, 0), dtype=np.float32),
            batch_size=batch_size,
            epochs=1,
            steps_per_epoch=1,  # Usually `n // batch_size`.
            shuffle=True,
            verbose=True)

        # Use the fitted distribution.
        self.assertAllEqual((7, channels, width * height),
                            distribution.sample(7).shape)
        self.assertAllEqual((n, ),
                            distribution.log_prob(reshaped_images).shape)
コード例 #4
0
ファイル: samplers.py プロジェクト: magnawhale/probability
def split_seed(seed, n=2, salt=None, name=None):
    """Splits a seed deterministically into derived seeds."""
    if not isinstance(n, int):  # avoid confusion with salt.
        raise TypeError('`n` must be a python `int`, got {}'.format(repr(n)))
    with tf.name_scope(name or 'split'):
        seed = sanitize_seed(seed, salt=salt)
        if JAX_MODE:
            from jax import random as jaxrand  # pylint: disable=g-import-not-at-top
            return jaxrand.split(seed, n)
        return tf.unstack(
            tf.random.stateless_uniform([n, 2],
                                        seed=seed,
                                        minval=None,
                                        maxval=None,
                                        dtype=SEED_DTYPE))
コード例 #5
0
        def _fn(x):
            """MADE parameterized via `masked_autoregressive_default_template`."""
            # TODO(b/67594795): Better support of dynamic shape.
            cond_depth = tf.compat.dimension_value(
                tensorshape_util.with_rank_at_least(conditional_tensor.shape,
                                                    1)[-1])

            input_shape = (np.int32(tensorshape_util.as_list(x.shape))
                           if tensorshape_util.is_fully_defined(x.shape) else
                           tf.shape(x))
            if tensorshape_util.rank(x.shape) == 1:
                x = x[tf.newaxis, ...]
            x = tf.concat([conditional_tensor, x], axis=-1)
            input_depth = tf.compat.dimension_value(
                tensorshape_util.with_rank_at_least(x.shape, 1)[-1])
            if input_depth is None:
                raise NotImplementedError(
                    'Rightmost dimension must be known prior to graph execution.'
                )
            for i, units in enumerate(hidden_layers):
                x = masked_dense(
                    inputs=x,
                    units=units,
                    num_blocks=input_depth,
                    exclusive=True if i == 0 else False,
                    activation=activation,
                    *args,  # pylint: disable=keyword-arg-before-vararg
                    **kwargs)
            x = masked_dense(
                inputs=x,
                units=(1 if shift_only else 2) * input_depth,
                num_blocks=input_depth,
                activation=None,
                *args,  # pylint: disable=keyword-arg-before-vararg
                **kwargs)
            if shift_only:
                x = x[..., cond_depth:]
                x = tf.reshape(x, shape=input_shape)
                return x, None
            else:
                x = x[..., 2 * cond_depth:]
            x = tf.reshape(x, shape=tf.concat([input_shape, [2]], axis=0))
            shift, log_scale = tf.unstack(x, num=2, axis=-1)
            which_clip = (tf.clip_by_value if log_scale_clip_gradient else
                          clip_by_value_preserve_gradient)
            log_scale = which_clip(log_scale, log_scale_min_clip,
                                   log_scale_max_clip)
            return shift, log_scale
コード例 #6
0
ファイル: split.py プロジェクト: tensorflow/probability
  def _forward_event_shape_tensor(self, input_shape):
    """Shape of a sample from a single batch as a list of `int32` 1D `Tensor`s.

    Args:
      input_shape: `Tensor`, `int32` vector indicating event-portion shape
        passed into `forward` function.

    Returns:
      forward_event_shape_tensor: A list of `Tensor`, `int32` vectors indicating
        event-portion shape after applying `forward`. The length of the list is
        equal to the number of splits.
    """
    # Validate `input_shape` statically if possible and get assertions.
    is_validated = self._validate_input_shape(
        tensorshape_util.constant_value_as_shape(input_shape))
    if is_validated or not self.validate_args:
      assertions = []
    else:
      assertions = self._validate_input_shape_tensor(input_shape)

    with tf.control_dependencies(assertions):
      if self.split_sizes is None:
        split_sizes = tf.convert_to_tensor(
            [input_shape[self.axis] // self.num_splits] * self.num_splits)
      else:
        # Deduce the value of the unknown element of `split_sizes`, if any.
        split_sizes = tf.convert_to_tensor(self.split_sizes)
        split_sizes = tf.where(
            split_sizes < 0,
            input_shape[self.axis] -
            tf.reduce_sum(split_sizes) - 1,  # Cancel the unknown size `-1`.
            split_sizes)

      # Each element of the `output_shape_tensor` list is equal to the
      # `input_shape`, with the corresponding element of `split_sizes`
      # substituted in the `axis` position.
      positive_axis = ps.rank_from_shape(input_shape) + self.axis
      tiled_input_shape = tf.tile(
          input_shape[tf.newaxis, :], [self.num_splits, 1])
      fused_output_shapes = tf.concat([
          tiled_input_shape[:, :positive_axis],
          split_sizes[..., tf.newaxis],
          tiled_input_shape[:, positive_axis + 1:]], axis=1)

      output_shapes = tf.unstack(fused_output_shapes, num=self.num_splits)
      return [tf.identity(tf.convert_to_tensor(
          t, dtype_hint=tf.int32, name='forward_event_shape'))
              for t in output_shapes]
コード例 #7
0
def _masked_autoregressive_shift_and_log_scale_fn(hidden_units,
                                                  shift_only=False,
                                                  activation="relu",
                                                  name=None,
                                                  **kwargs):
    params = 1 if shift_only else 2
    layer = tfb.AutoregressiveNetwork(params,
                                      hidden_units=hidden_units,
                                      activation=activation,
                                      name=name,
                                      **kwargs)

    if shift_only:
        return lambda x: (layer(x)[..., 0], None)

    return lambda x: tf.unstack(layer(x), axis=-1)
コード例 #8
0
def ycbcr_to_rgb(ycbcr):
    """Map from YCbCr to Colorspace."""
    ycbcr = tf.cast(ycbcr, dtype=tf.float32)
    y, cb, cr = tf.unstack(ycbcr, axis=-1)

    cb -= 128.0
    cr -= 128.0

    r = y * 1. + cb * 0. + cr * 1.402
    g = y * 1. - cb * 0.34414 - cr * 0.71414
    b = y * 1. + cb * 1.772 + cr * 0.

    rgb = tf.stack((r, g, b), axis=-1)
    rgb = tf.clip_by_value(rgb, 0, 255)
    rgb = tf.cast(rgb, dtype=tf.int32)
    return rgb
コード例 #9
0
def split(state, num):
    """Creates new independent RNG states from an existing state.

  Args:
    state: the existing state.
    num: the number of the new states.

  Returns:
    A tuple of new states.
  """
    state = tf_np.asarray(state, dtype=_RNG_KEY_DTYPE)
    state = _key2seed(state)
    states = tf.random.experimental.stateless_split(state, num)
    states = tf.unstack(states, num)
    states = tf.nest.map_structure(_seed2key, states)
    return states
コード例 #10
0
    def call(self, env_output, neck_state):
        """Runs the entire episode given time-major tensors.

    Args:
      env_output: An `EnvOutput` tuple with following expectations:
        reward - Unused
        done - A boolean tensor of shape  [num_timesteps, batch_size].
        observation - A nested structure with individual tensors that have first
          two dimensions equal to [num_timesteps, batch_size]
        info - Unused
      neck_state: A tensor or nested structure with individual tensors that have
        first dimension equal to batch_size and no time dimension.

    Returns:
      An `AgentOutput` tuple with individual tensors that have first two
        dimensions equal to [num_timesteps, batch_size]
    """
        unused_reward, done, observation, unused_info = env_output
        # Add current time_step and batch_size.
        self._current_num_timesteps = tf.shape(done)[0]
        self._current_batch_size = tf.shape(done)[1]

        torso_output = utils.batch_apply(self._torso, observation)
        # shape: [num_timesteps, batch_size, ...], where the trailing dimensions are
        # same as trailing dimensions of `neck_state`.
        reset_state = self._get_reset_state(observation, done, neck_state)
        neck_output_list = []
        for timestep, d in enumerate(tf.unstack(done)):
            neck_input = utils.get_row_nested_tensor(torso_output, timestep)
            # If the episode ended, the neck state should be reset before the next
            # step.
            curr_timestep_reset_state = utils.get_row_nested_tensor(
                reset_state, timestep)
            neck_state = tf.nest.map_structure(
                lambda reset_state, state: tf.compat.v1.where(
                    d, reset_state, state), curr_timestep_reset_state,
                neck_state)
            neck_output, neck_state = self._neck(neck_input, neck_state)
            neck_output_list.append(neck_output)

        head_input = tf.nest.map_structure(lambda *tensors: tf.stack(tensors),
                                           *neck_output_list)
        head_output = utils.batch_apply(self._head, head_input)
        assert isinstance(head_output, common.AgentOutput)
        return head_output, neck_state
コード例 #11
0
    def call(self, inputs):
        value, index = inputs
        if self.cache.shape == inputs[0].shape:
            self.cache = value
            return value

        shape = self.cache.shape.as_list()
        num_index_axes = index.shape[0]
        num_batch_axes = self.num_batch_axes
        num_feature_axes = len(shape) - num_index_axes - num_batch_axes
        features_shape = shape[num_batch_axes + num_index_axes:]
        batch_shape = shape[:num_batch_axes]

        value_index_shape = tf.shape(value)[num_batch_axes:-num_feature_axes]
        if tf.reduce_max(value_index_shape) > 1:
            # This is a block update starting at index.
            value_ranges = []
            for i, s in enumerate(tf.unstack(value_index_shape)):
                curr_range = tf.range(index[i], index[i] + s)
                value_ranges.append(curr_range)

            batch_ranges = [tf.range(s) for s in batch_shape]

            mesh = tf.meshgrid(*(batch_ranges + value_ranges), indexing='ij')
            indices = tf.stack(mesh, axis=-1)
            indices = tf.reshape(indices,
                                 [-1, num_index_axes + num_batch_axes])
        else:
            # This is a single update at index position.
            batch_ranges = [tf.range(s) for s in batch_shape]
            mesh = tf.meshgrid(*batch_ranges, indexing='ij')
            batch_indices = tf.stack(mesh, axis=-1)
            batch_indices = tf.reshape(batch_indices, [-1, num_batch_axes])

            # Add leading axes to nd-index and tile to get batched indices.
            shape_indices = tf.reshape(index, [1] * num_batch_axes + [-1])
            shape_indices = tf.tile(shape_indices, batch_shape + [1])
            shape_indices = tf.reshape(shape_indices, [-1, num_index_axes])

            indices = tf.concat([batch_indices, shape_indices], axis=-1)

        # We need to squeeze nd-axes from value before updating.
        value = tf.reshape(value, [-1] + features_shape)
        self.cache = tf.tensor_scatter_nd_update(self.cache, indices, value)
        return self.cache
コード例 #12
0
def split_seed(seed, count):
    """Splits a seed into `count` seeds."""
    if _is_stateful_seed(seed):
        if seed is None:
            return count * [None]
        return [
            np.random.RandomState(seed + i).randint(0, 2**31)
            for i, seed in enumerate([seed] * count)
        ]
    else:
        seeds = tf.random.stateless_uniform(
            [count, 2],
            seed=make_tensor_seed(seed),
            minval=None,
            maxval=None,
            dtype=tf.int32,
        )
        return tf.unstack(seeds)
コード例 #13
0
 def _bijector_fn(x, **condition_kwargs):
     if conditioning is not None:
         print(x, conditioning)
         x = tf.concat([conditioning, x], axis=-1)
         cond_depth = tf.compat.dimension_value(
             tensorshape_util.with_rank_at_least(
                 conditioning.shape, 1)[-1])
     else:
         cond_depth = 0
     params = shift_and_log_scale_fn(x, **condition_kwargs)
     if tf.is_tensor(params):
         shift, log_scale = tf.unstack(params, num=2, axis=-1)
     else:
         shift, log_scale = params
     shift = shift[..., cond_depth:]
     log_scale = log_scale[..., cond_depth:]
     return affine_scalar.AffineScalar(shift=shift,
                                       log_scale=log_scale)
コード例 #14
0
def lorenz_system_prior_fn(num_timesteps, innovation_scale, step_size,
                           dtype=tf.float32):
  """Generative process for the Lorenz System model."""
  innovation_scale = tensor_util.convert_nonref_to_tensor(
      innovation_scale, name='innovation_scale', dtype=dtype)
  step_size = tensor_util.convert_nonref_to_tensor(
      step_size, name='step_size', dtype=dtype)
  loc = yield Root(tfd.Sample(tfd.Normal(0., 1.), sample_shape=3))
  for _ in range(num_timesteps - 1):
    x, y, z = tf.unstack(loc, axis=-1)
    dx = 10 * (y - x)
    dy = x * (28 - z) - y
    dz = x * y - 8 / 3 * z
    delta = tf.stack([dx, dy, dz], axis=-1)
    loc = yield tfd.Independent(
        tfd.Normal(loc + step_size * delta,
                   tf.sqrt(step_size) * innovation_scale),
        reinterpreted_batch_ndims=1)
コード例 #15
0
def _check_and_get_mask(logu, joint_sample_mask=None, validate_args=False):
    """Helper function for creating masks for joint/marginal samples.

  The function is designed to do:
    - when `joint_sample_mask` is provided, check and validate the mask.
    - when `joint_sample_mask` is not provided, generate a default mask.

  Variational bounds on mutual information require knowing which
  elements of the score matrix correspond to positive elements
  (sampled from joint distribution `p(x,y)`) and negative elements
  (sampled from marginal distribution `p(x)p(y)`). By default, we assume
  that the diagonal elements of scores contain positaive pairs, and all
  other elements are negatives.

  Args:
    logu: `float`-like `Tensor` representing scores to be masked.
    joint_sample_mask: `bool`-like `Tensor` of the same shape of `logu`
      masking the joint samples by `True`, i.e. samples from joint
      distributions `p(x, y)`.
      Default value: `None`. By default, an identity matrix is constructed as
      the mask.
    validate_args: Python `bool`, default `False`. Whether to validate input
      with asserts. If `validate_args` is `False`, and the inputs are invalid,
      correct behavior is not guaranteed.

  Returns:
    logu: `float`-like `Tensor` based on input `logu`.
    joint_sample_mask: `bool`-like `Tensor` for joint samples.
  """
    with tf.name_scope('get_default_mask'):
        logu = tf.convert_to_tensor(logu, dtype_hint=tf.float32, name='logu')

        if joint_sample_mask is None:
            num_rows, num_cols = tf.unstack(tf.shape(logu)[-2:])
            joint_sample_mask = tf.eye(num_rows, num_cols, dtype=tf.bool)
        else:
            joint_sample_mask = tf.convert_to_tensor(joint_sample_mask,
                                                     dtype_hint=tf.bool,
                                                     name='joint_sample_mask')
            with tf.control_dependencies(
                    _check_mask_shape(logu, joint_sample_mask
                                      ) if validate_args else []):
                joint_sample_mask = tf.identity(joint_sample_mask)
        return logu, joint_sample_mask
  def get_center_coordinates_and_sizes(self, scope=None):
    """Computes the center coordinates, height and width of the boxes.

    Args:
      scope: name scope of the function.

    Returns:
      a list of 4 1-D tensors [ycenter, xcenter, height, width].
    """
    if not scope:
      scope = 'get_center_coordinates_and_sizes'
    with tf.name_scope(scope):
      box_corners = self.get()
      ymin, xmin, ymax, xmax = tf.unstack(tf.transpose(a=box_corners))
      width = xmax - xmin
      height = ymax - ymin
      ycenter = ymin + height / 2.
      xcenter = xmin + width / 2.
      return [ycenter, xcenter, height, width]
コード例 #17
0
def split(state, num):
    """Creates new independent RNG states from an existing state.

  Args:
    state: the existing state.
    num: the number of the new states.

  Returns:
    A tuple of new states.
  """
    state = tf_np.asarray(state, dtype=_RNG_KEY_DTYPE)
    state = _key2seed(state)
    try:
        states = tf.random.experimental.stateless_split(state, num)
    except AttributeError as e:  # pylint: disable=unused-variable
        # TODO(afrozm): For TF < 2.3 we need to do this. Delete once 2.3 launches.
        states = stateless_split(state, num)
    states = tf.unstack(states, num)
    states = tf.nest.map_structure(_seed2key, states)
    return states
def _compute_new_dynamic_size(image, min_dimension, max_dimension):
    """Compute new dynamic shape for resize_to_range method."""
    image_shape = tf.shape(input=image)
    orig_height = tf.cast(image_shape[0], dtype=tf.float32)
    orig_width = tf.cast(image_shape[1], dtype=tf.float32)
    num_channels = image_shape[2]
    orig_min_dim = tf.minimum(orig_height, orig_width)
    # Calculates the larger of the possible sizes
    min_dimension = tf.constant(min_dimension, dtype=tf.float32)
    large_scale_factor = min_dimension / orig_min_dim
    # Scaling orig_(height|width) by large_scale_factor will make the smaller
    # dimension equal to min_dimension, save for floating point rounding errors.
    # For reasonably-sized images, taking the nearest integer will reliably
    # eliminate this error.
    large_height = tf.cast(tf.round(orig_height * large_scale_factor),
                           dtype=tf.int32)
    large_width = tf.cast(tf.round(orig_width * large_scale_factor),
                          dtype=tf.int32)
    large_size = tf.stack([large_height, large_width])
    if max_dimension:
        # Calculates the smaller of the possible sizes, use that if the larger
        # is too big.
        orig_max_dim = tf.maximum(orig_height, orig_width)
        max_dimension = tf.constant(max_dimension, dtype=tf.float32)
        small_scale_factor = max_dimension / orig_max_dim
        # Scaling orig_(height|width) by small_scale_factor will make the larger
        # dimension equal to max_dimension, save for floating point rounding
        # errors. For reasonably-sized images, taking the nearest integer will
        # reliably eliminate this error.
        small_height = tf.cast(tf.round(orig_height * small_scale_factor),
                               dtype=tf.int32)
        small_width = tf.cast(tf.round(orig_width * small_scale_factor),
                              dtype=tf.int32)
        small_size = tf.stack([small_height, small_width])
        new_size = tf.cond(pred=tf.cast(tf.reduce_max(input_tensor=large_size),
                                        dtype=tf.float32) > max_dimension,
                           true_fn=lambda: small_size,
                           false_fn=lambda: large_size)
    else:
        new_size = large_size
    return tf.stack(tf.unstack(new_size) + [num_channels])
コード例 #19
0
  def testExampleDoc1(self):
    seed = tfp_test_util.test_seed(sampler_type='stateless')
    model = TestModel()

    num_seeds = len(tf.nest.flatten(model.dtype))
    flat_seed = tf.unstack(tfp.random.split_seed(seed, num_seeds), axis=0)
    seed = tf.nest.pack_sequence_as(model.dtype, flat_seed)

    unconstrained_values = tf.nest.map_structure(
        lambda d, s, seed: tf.random.stateless_normal(s, dtype=d, seed=seed),
        model.dtype,
        model.event_shape,
        seed,
    )
    constrained_values = nest.map_structure_up_to(
        model.default_event_space_bijector,
        lambda b, v: b(v),
        model.default_event_space_bijector,
        unconstrained_values,
    )
    self.assertGreaterEqual(self.evaluate(constrained_values), 0.)
コード例 #20
0
def split_seed(seed, n=2, salt=None, name=None):
    """Splits a seed into `n` derived seeds.

  See https://github.com/tensorflow/probability/blob/main/PRNGS.md
  for details.
  Args:
    seed: The seed to split; may be an `int`, an `(int, int) tuple`, or a
      `Tensor`. `int` seeds are converted to `Tensor` seeds using
      `tf.random.uniform` stateful sampling. Tuples are converted to `Tensor`.
    n: The number of splits to return. In TensorFlow, if `n` is an integer, this
      function returns a list of seeds and otherwise returns a `Tensor` of
      seeds.  In JAX, this function always returns an array of seeds.
    salt: Optional `str` salt to mix with the seed.
    name: Optional name to scope related ops.

  Returns:
    seeds: If `n` is a Python `int`, a `tuple` of seed values is returned. If
      `n` is an int `Tensor`, a single `Tensor` of shape `[n, 2]` is returned. A
      single such seed is suitable to pass as the `seed` argument of the
      `tf.random.stateless_*` ops.
  """
    if not (isinstance(n, int) or isinstance(n, np.ndarray)
            or tf.is_tensor(n)):  # avoid confusion with salt.
        raise TypeError(
            '`n` must be a python `int` or an int Tensor, got {}'.format(
                repr(n)))
    with tf.name_scope(name or 'split_seed'):
        seed = sanitize_seed(seed, salt=salt)
        if JAX_MODE:
            from jax import random as jaxrand  # pylint: disable=g-import-not-at-top
            return jaxrand.split(seed, int(n))
        seeds = tf.random.stateless_uniform([n, 2],
                                            seed=seed,
                                            minval=None,
                                            maxval=None,
                                            dtype=SEED_DTYPE)
        if isinstance(n, six.integer_types):
            seeds = tf.unstack(seeds)
        return seeds
コード例 #21
0
ファイル: image_ops.py プロジェクト: tallamjr/google-research
def central_crop(features, crop_size, keys=("image", )):
    """Center crops given input.

  Args:
    features: Input features dictionary.
    crop_size: Output resolution.
    keys: Fields in features which need to be cropped.

  Returns:
    Cropped features.
  """
    h_c, w_c = crop_size
    for key in keys:
        h, w = tf.unstack(tf.shape(features[key]))[:2]
        h_offset = (h - h_c) // 2
        w_offset = (w - w_c) // 2
        features[key] = features[key][h_offset:h_offset + h_c,
                                      w_offset:w_offset + w_c]
    for key in keys:
        features[key].set_shape([h_c, w_c] + features[key].get_shape()[2:])

    return features
コード例 #22
0
    def test_doc_string(self):
        # Generate data.
        n = 2000
        x2 = np.random.randn(n).astype(dtype=np.float32) * 2.
        x1 = np.random.randn(n).astype(dtype=np.float32) + (x2 * x2 / 4.)
        data = np.stack([x1, x2], axis=-1)

        # Density estimation with MADE.
        made = tfb.AutoregressiveNetwork(params=2, hidden_units=[10, 10])

        distribution = tfd.TransformedDistribution(
            distribution=tfd.Normal(loc=0., scale=1.),
            bijector=tfb.MaskedAutoregressiveFlow(
                lambda x: tf.unstack(made(x), num=2, axis=-1)),
            event_shape=[2])

        # Construct and fit model.
        x_ = tfkl.Input(shape=(2, ), dtype=tf.float32)
        log_prob_ = distribution.log_prob(x_)
        model = tfk.Model(x_, log_prob_)

        model.compile(optimizer=tf1.train.AdamOptimizer(),
                      loss=lambda _, log_prob: -log_prob)

        batch_size = 25
        model.fit(
            x=data,
            y=np.zeros((n, 0), dtype=np.float32),
            batch_size=batch_size,
            epochs=1,
            steps_per_epoch=1,  # Usually `n // batch_size`.
            shuffle=True,
            verbose=True)

        # Use the fitted distribution.
        self.assertAllEqual((3, 1, 2), distribution.sample((3, 1)).shape)
        self.assertAllEqual(
            (3, ),
            distribution.log_prob(np.ones((3, 2), dtype=np.float32)).shape)
コード例 #23
0
def clip_to_window(boxlist, window, filter_nonoverlapping=True, scope=None):
    """Clip bounding boxes to a window.

  This op clips any input bounding boxes (represented by bounding box
  corners) to a window, optionally filtering out boxes that do not
  overlap at all with the window.

  Args:
    boxlist: BoxList holding M_in boxes
    window: a tensor of shape [4] representing the [y_min, x_min, y_max, x_max]
      window to which the op should clip boxes.
    filter_nonoverlapping: whether to filter out boxes that do not overlap at
      all with the window.
    scope: name scope.

  Returns:
    a BoxList holding M_out boxes where M_out <= M_in
  """
    with tf.name_scope(scope, 'ClipToWindow'):
        y_min, x_min, y_max, x_max = tf.split(value=boxlist.get(),
                                              num_or_size_splits=4,
                                              axis=1)
        win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window)
        y_min_clipped = tf.maximum(tf.minimum(y_min, win_y_max), win_y_min)
        y_max_clipped = tf.maximum(tf.minimum(y_max, win_y_max), win_y_min)
        x_min_clipped = tf.maximum(tf.minimum(x_min, win_x_max), win_x_min)
        x_max_clipped = tf.maximum(tf.minimum(x_max, win_x_max), win_x_min)
        clipped = box_list.BoxList(
            tf.concat(
                [y_min_clipped, x_min_clipped, y_max_clipped, x_max_clipped],
                1))
        clipped = _copy_extra_fields(clipped, boxlist)
        if filter_nonoverlapping:
            areas = area(clipped)
            nonzero_area_indices = tf.cast(
                tf.reshape(tf.where(tf.greater(areas, 0.0)), [-1]), tf.int32)
            clipped = gather(clipped, nonzero_area_indices)
        return clipped
コード例 #24
0
def _get_permutations(num_results, dims, seed):
    """Uniform iid sample from the space of permutations.

  Draws a sample of size `num_results` from the group of permutations of degrees
  specified by the `dims` tensor. These are packed together into one tensor
  such that each row is one sample from each of the dimensions in `dims`. For
  example, if dims = [2,3] and num_results = 2, the result is a tensor of shape
  [2, 2 + 3] and the first row of the result might look like:
  [1, 0, 2, 0, 1]. The first two elements are a permutation over 2 elements
  while the next three are a permutation over 3 elements.

  Args:
    num_results: A positive scalar `Tensor` of integral type. The number of
      draws from the discrete uniform distribution over the permutation groups.
    dims: A 1D `Tensor` of the same dtype as `num_results`. The degree of the
      permutation groups from which to sample.
    seed: (Optional) Python integer to seed the random number generator.

  Returns:
    permutations: A `Tensor` of shape `[num_results, sum(dims)]` and the same
    dtype as `dims`.
  """
    sample_range = tf.range(num_results)

    def generate_one(d):
        def fn(i):
            if seed is None:
                return tf.random.shuffle(tf.range(d))
            else:
                return stateless.stateless_random_shuffle(tf.range(d),
                                                          seed=(seed + i, d))

        return tf.map_fn(fn,
                         sample_range,
                         parallel_iterations=1 if seed is not None else 10)

    return tf.concat([generate_one(d) for d in tf.unstack(dims)], axis=-1)
コード例 #25
0
            def body(i, exchanged_states, is_exchange_proposed_for_kr,
                     is_exchange_accepted_for_kr):
                """Body of while loop for exchanging states."""
                # Propose exchange between replicas indexed by m and n.
                m, n = tf.unstack(exchange_proposed[i])

                # Construct log_accept_ratio:  -temp_diff * target_log_prob_diff.
                # Note target_log_prob_diff = -EnergyDiff (common definition is in terms
                # of energy).
                temp_diff = self.inverse_temperatures[
                    m] - self.inverse_temperatures[n]
                # Difference of target log probs may be +- Inf or NaN.  We want the
                # product of this with the temperature difference to have "alt value" of
                # -Inf.
                log_accept_ratio = mcmc_util.safe_sum([
                    -temp_diff * target_log_probs[m],
                    temp_diff * target_log_probs[n]
                ])

                is_exchange_accepted = log_uniforms[i] < log_accept_ratio

                if self._exchange_between_adjacent_only:
                    exchange_edge = tf.minimum(m, n)
                    is_exchange_proposed_for_kr = is_exchange_proposed_for_kr.write(
                        exchange_edge, True)
                    is_exchange_accepted_for_kr = is_exchange_accepted_for_kr.write(
                        exchange_edge, is_exchange_accepted)

                for k in range(num_state_parts):
                    new_m, new_n = _swap(is_exchange_accepted,
                                         old_states[k].read(m),
                                         old_states[k].read(n))
                    exchanged_states[k] = exchanged_states[k].write(m, new_m)
                    exchanged_states[k] = exchanged_states[k].write(n, new_n)

                return (i + 1, exchanged_states, is_exchange_proposed_for_kr,
                        is_exchange_accepted_for_kr)
def batch_decode(encoded_boxes, box_coder, anchors):
    """Decode a batch of encoded boxes.

  This op takes a batch of encoded bounding boxes and transforms
  them to a batch of bounding boxes specified by their corners in
  the order of [y_min, x_min, y_max, x_max].

  Args:
    encoded_boxes: a float32 tensor of shape [batch_size, num_anchors,
      code_size] representing the location of the objects.
    box_coder: a BoxCoder object.
    anchors: a BoxList of anchors used to encode `encoded_boxes`.

  Returns:
    decoded_boxes: a float32 tensor of shape [batch_size, num_anchors,
      coder_size] representing the corners of the objects in the order
      of [y_min, x_min, y_max, x_max].

  Raises:
    ValueError: if batch sizes of the inputs are inconsistent, or if
    the number of anchors inferred from encoded_boxes and anchors are
    inconsistent.
  """
    encoded_boxes.get_shape().assert_has_rank(3)
    if encoded_boxes.get_shape()[1].value != anchors.num_boxes_static():
        raise ValueError(
            'The number of anchors inferred from encoded_boxes'
            ' and anchors are inconsistent: shape[1] of encoded_boxes'
            ' %s should be equal to the number of anchors: %s.' %
            (encoded_boxes.get_shape()[1].value, anchors.num_boxes_static()))

    decoded_boxes = tf.stack([
        box_coder.decode(boxes, anchors).get()
        for boxes in tf.unstack(encoded_boxes)
    ])
    return decoded_boxes
コード例 #27
0
ファイル: keras_tensor_test.py プロジェクト: yule9527/keras
    def test_repr_and_string(self):
        kt = keras_tensor.KerasTensor(
            type_spec=tf.TensorSpec(shape=(1, 2, 3), dtype=tf.float32))
        expected_str = ("KerasTensor(type_spec=TensorSpec(shape=(1, 2, 3), "
                        "dtype=tf.float32, name=None))")
        expected_repr = "<KerasTensor: shape=(1, 2, 3) dtype=float32>"
        self.assertEqual(expected_str, str(kt))
        self.assertEqual(expected_repr, repr(kt))

        kt = keras_tensor.KerasTensor(type_spec=tf.TensorSpec(shape=(2, ),
                                                              dtype=tf.int32),
                                      inferred_value=[2, 3])
        expected_str = ("KerasTensor(type_spec=TensorSpec(shape=(2,), "
                        "dtype=tf.int32, name=None), inferred_value=[2, 3])")
        expected_repr = (
            "<KerasTensor: shape=(2,) dtype=int32 inferred_value=[2, 3]>")
        self.assertEqual(expected_str, str(kt))
        self.assertEqual(expected_repr, repr(kt))

        kt = keras_tensor.KerasTensor(
            type_spec=tf.SparseTensorSpec(shape=(1, 2, 3), dtype=tf.float32))
        expected_str = ("KerasTensor(type_spec=SparseTensorSpec("
                        "TensorShape([1, 2, 3]), tf.float32))")
        expected_repr = ("<KerasTensor: type_spec=SparseTensorSpec("
                         "TensorShape([1, 2, 3]), tf.float32)>")
        self.assertEqual(expected_str, str(kt))
        self.assertEqual(expected_repr, repr(kt))

        inp = layers.Input(shape=(3, 5))
        kt = layers.Dense(10)(inp)
        expected_str = (
            "KerasTensor(type_spec=TensorSpec(shape=(None, 3, 10), "
            "dtype=tf.float32, name=None), name='dense/BiasAdd:0', "
            "description=\"created by layer 'dense'\")")
        expected_repr = (
            "<KerasTensor: shape=(None, 3, 10) dtype=float32 (created "
            "by layer 'dense')>")
        self.assertEqual(expected_str, str(kt))
        self.assertEqual(expected_repr, repr(kt))

        kt = tf.reshape(kt, shape=(3, 5, 2))
        expected_str = (
            "KerasTensor(type_spec=TensorSpec(shape=(3, 5, 2), dtype=tf.float32, "
            "name=None), name='tf.reshape/Reshape:0', description=\"created "
            "by layer 'tf.reshape'\")")
        expected_repr = (
            "<KerasTensor: shape=(3, 5, 2) dtype=float32 (created "
            "by layer 'tf.reshape')>")
        self.assertEqual(expected_str, str(kt))
        self.assertEqual(expected_repr, repr(kt))

        kts = tf.unstack(kt)
        for i in range(3):
            expected_str = (
                "KerasTensor(type_spec=TensorSpec(shape=(5, 2), dtype=tf.float32, "
                "name=None), name='tf.unstack/unstack:%s', description=\"created "
                "by layer 'tf.unstack'\")" % (i, ))
            expected_repr = ("<KerasTensor: shape=(5, 2) dtype=float32 "
                             "(created by layer 'tf.unstack')>")
            self.assertEqual(expected_str, str(kts[i]))
            self.assertEqual(expected_repr, repr(kts[i]))
コード例 #28
0
 def neg_logp(vals):
     args = merge_flat_args(vals, flat_data)
     return -joint_dist.log_prob(
         tf.nest.pack_sequence_as(structure, tf.unstack(args)))
コード例 #29
0
def quap(joint_dist,
         data=None,
         max_tries=20,
         initial_position=None,
         name=None):
    """Compute a quadratic approximation to a ``JointDistributionNamed``.


  Traverses a JointDistribution*, uses bfgs to minimize the negative
  log probability and estimate the hessian, and returns a JointDistribution of
  the same type,  whose distributions are all Gaussians, and covariances are
  set appropriately.

  Args:
    joint_dist: A `JointDistributionNamed` or `JointDistributionSequential`
      model. Also works with auto batched versions of the same.
    data: Optional `dict` of data to condition the joint_dist with. The return
      value will be conditioned o this data. If this is `None`, the return
      value will be a quadratic approximation to the distribution itself.
    max_tries: Optional `int` of number of times to run the optimizer internally
      before raising a `RuntimeError`. Default is 10.
      initial_position: Optional `dict` to initialize the optimizer. Keys should
      correspond to names in the JointDistribution. Defaults to random draws
      from `joint_dist`.
    name: Python `str` name prefixed to ops created by this function.
      Default value: `None` (i.e., 'quap').

  Returns:
    `JointDistributionNamed` which is a quadratic approximation to the input
    `joint_dist`, conditioned on `data`.

  Raises:
    RuntimeError: In case the optimizer does not converge within `max_tries`.
  """
    with tf.name_scope(name or "quap"):
        max_tries = tf.convert_to_tensor(max_tries)
        structure = joint_dist.sample()

        # A dictionary is the only structure that does not already
        # have None's as placeholders
        if isinstance(data, dict):
            data = {k: data.get(k) for k in structure}

        if data is None:
            data = tf.nest.map_structure(lambda j: None, structure)

        data = tf.nest.map_structure(lambda j: None if j is None else j, data)
        flat_data = tf.nest.flatten(data)

        def try_optimize(idx, opt):  # pylint: disable=unused-argument
            locs = tf.nest.flatten(joint_dist.sample(value=initial_position))
            locs = [j for idx, j in enumerate(locs) if flat_data[idx] is None]

            def neg_logp_and_grad(vals):
                def neg_logp(vals):
                    args = merge_flat_args(vals, flat_data)
                    return -joint_dist.log_prob(
                        tf.nest.pack_sequence_as(structure, tf.unstack(args)))

                return tfp.math.value_and_gradient(neg_logp, vals)

            return idx + 1, tfp.optimizer.bfgs_minimize(
                neg_logp_and_grad, locs)

        def should_stop(idx, opt):
            return (idx < max_tries) & ~opt.converged

        idx = tf.constant(0, dtype=max_tries.dtype)
        idx, opt = try_optimize(idx, None)
        _, opt = tf.while_loop(should_stop, try_optimize, [idx, opt])

        with tf.control_dependencies(
            [tf.debugging.Assert(condition=opt.converged, data=opt)]):
            dists = {}
            stddevs = tf.sqrt(tf.linalg.diag_part(
                opt.inverse_hessian_estimate))

            gaussians = tf.nest.map_structure(tfd.Normal,
                                              tf.unstack(opt.position),
                                              tf.unstack(stddevs))
            dists = merge_flat_args(gaussians, flat_data)
            dists = [
                v if isinstance(v, tfd.Distribution) else tfd.Deterministic(v)
                for v in dists
            ]

            approx = joint_dist.__class__(tf.nest.pack_sequence_as(
                structure, dists),
                                          name=name)
            return approx
コード例 #30
0
ファイル: ode_test.py プロジェクト: manda-creator/probability
 def ode_fn(_, state):
     state = tf.stack(state, axis=0)
     jacobian_tensor = tf.convert_to_tensor(jacobian, dtype=tf.float64)
     return tf.unstack(
         tf.squeeze(tf.matmul(jacobian_tensor, state[:, tf.newaxis])))