def testConsistent(self):
   nums, divs = self.intTestData()
   with self.test_session():
     tf_result = (
         math_ops.floor_div(nums, divs) * divs + math_ops.floor_mod(nums, divs)
     ).eval()
     tf_nums = array_ops.constant(nums)
     tf_divs = array_ops.constant(divs)
     tf2_result = (tf_nums // tf_divs * tf_divs + tf_nums % tf_divs).eval()
     np_result = (nums // divs) * divs + (nums % divs)
     # consistentcy with numpy
     self.assertAllEqual(tf_result, np_result)
     # consistentcy with two forms of divide
     self.assertAllEqual(tf_result, tf2_result)
     # consistency for truncation form
     tf3_result = (
         math_ops.truncatediv(nums, divs) * divs
         + math_ops.truncatemod(nums, divs)
     ).eval()
     expanded_nums = np.reshape(np.tile(nums, divs.shape[1]),
                                (nums.shape[0], divs.shape[1]))
     # Consistent with desire to get numerator
     self.assertAllEqual(tf3_result, expanded_nums)
     # Consistent with desire to get numerator
     self.assertAllEqual(tf_result, expanded_nums)
Пример #2
0
 def testConsistent(self):
   nums, divs = self.intTestData()
   with self.test_session():
     tf_result = (
         math_ops.floor_div(nums, divs) * divs + math_ops.floormod(nums, divs)
     ).eval()
     tf_nums = array_ops.constant(nums)
     tf_divs = array_ops.constant(divs)
     tf2_result = (tf_nums // tf_divs * tf_divs + tf_nums % tf_divs).eval()
     np_result = (nums // divs) * divs + (nums % divs)
     # consistentcy with numpy
     self.assertAllEqual(tf_result, np_result)
     # consistentcy with two forms of divide
     self.assertAllEqual(tf_result, tf2_result)
     # consistency for truncation form
     tf3_result = (
         math_ops.truncatediv(nums, divs) * divs
         + math_ops.truncatemod(nums, divs)
     ).eval()
     expanded_nums = np.reshape(np.tile(nums, divs.shape[1]),
                                (nums.shape[0], divs.shape[1]))
     # Consistent with desire to get numerator
     self.assertAllEqual(tf3_result, expanded_nums)
     # Consistent with desire to get numerator
     self.assertAllEqual(tf_result, expanded_nums)
Пример #3
0
 def testFloorDivideInt(self):
   nums, divs = self.intTestData()
   tf_result = math_ops.floor_div(nums, divs)
   np_result = self.numpySafeFloorDivInt(nums, divs)
   self.assertAllEqual(tf_result, np_result)
   tf2_result = (array_ops.constant(nums) // array_ops.constant(divs))
   self.assertAllEqual(tf2_result, tf_result)
Пример #4
0
    def update_state(self, labels, predictions, weights=None):
        """
        Arg:
            labels: shape of (#batch, #num_joint, 3) : num_joint=18
            predictions: shape of (#batch, #height, #width, #num_joint) : num_joint=18
            weights: shape of (#batch, #height*output_stride, #width*output_stride)
        """
        b, h, w, c = predictions.get_shape()
        euclid_dists = []
        masks = []
        predictions = array_ops.reshape(
            array_ops.transpose(predictions, (0, 3, 1, 2)), (-1, h * w))
        # weights = image.resize(weights, (h, w), align_corners=True,
        #                        method=image.ResizeMethod.NEAREST_NEIGHBOR)
        a_ = []
        labels = array_ops.reshape(labels, (-1, 3))
        if weights is None:
            weights = array_ops.ones_like(predictions)
        else:
            weights = array_ops.tile(array_ops.expand_dims(weights, axis=-1),
                                     [1, 1, 1, c])
            weights = array_ops.reshape(
                array_ops.transpose(weights, (0, 3, 1, 2)), (-1, h * w))

        for prediction, label, weight in zip(array_ops.unstack(predictions),
                                             array_ops.unstack(labels),
                                             array_ops.unstack(weights)):
            prediction = array_ops.where(
                math_ops.equal(weight, 1), prediction,
                array_ops.ones_like(prediction) *
                array_ops.constant(np.nan, dtype=dtypes.float32))
            arg_index = math_ops.arg_max(prediction, 0)
            masks.append(
                clip_ops.clip_by_value(label[2], 0., 1.) *
                array_ops.gather(weight, arg_index))
            col, row = arg_index % w, math_ops.floor_div(arg_index, w)
            arg_coord = math_ops.cast(array_ops.stack([col, row]),
                                      dtype=dtypes.float32)
            # arg_coord = (self.output_stride / 2 - 0.5) + self.output_stride * arg_coord
            a_.append(arg_coord)

            euclid_dists.append(math_ops.reduce_sum(
                (arg_coord - label[:2])**2))

        masks = array_ops.reshape(array_ops.stack(masks), (b, c))
        euclid_dists = array_ops.reshape(array_ops.stack(euclid_dists), (b, c))
        self.a = array_ops.reshape(array_ops.stack(a_), (b, c, 2))

        with fops.control_dependencies([self.count]):
            self.update_ops.append(
                state_ops.assign_add(self.count,
                                     math_ops.reduce_sum(masks, axis=0)))

        add_value = operator(euclid_dists) * masks

        with fops.control_dependencies([self.total_value]):
            self.update_ops.append(
                state_ops.assign_add(self.total_value,
                                     math_ops.reduce_sum(add_value, axis=0)))
Пример #5
0
 def testFloorDivModIntEdges(self):
   for dtype in [np.int32, np.int64]:
     x, y = self.intEdgeTestData(dtype)
     tf_floor_div = math_ops.floor_div(x, y)
     np_floor_div = self.numpySafeFloorDivInt(x, y)
     self.assertAllEqual(tf_floor_div, np_floor_div)
     tf_floor_mod = math_ops.floormod(x, y)
     np_floor_mod = self.numpySafeFloorModInt(x, y)
     self.assertAllEqual(tf_floor_mod, np_floor_mod)
     z = math_ops.add(math_ops.multiply(tf_floor_div, y), tf_floor_mod)
     # x = floor_div(x, y) * y + floor_mod(x, y)
     self.assertAllEqual(z, np.broadcast_to(x, z.shape))
Пример #6
0
 def testConsistent(self):
   nums, divs = self.intTestData()
   with self.test_session():
     tf_result = (
         math_ops.floor_div(nums, divs) * divs + math_ops.floor_mod(nums, divs)
     ).eval()
     tf_nums = array_ops.constant(nums)
     tf_divs = array_ops.constant(divs)
     tf2_result = (tf_nums // tf_divs * tf_divs + tf_nums % tf_divs).eval()
     np_result = (nums // divs) * divs + (nums % divs)
     self.assertAllEqual(tf_result, np_result)
     self.assertAllEqual(tf_result, tf2_result)
Пример #7
0
 def testConsistent(self):
   nums, divs = self.intTestData()
   with self.test_session():
     tf_result = (
         math_ops.floor_div(nums, divs) * divs + math_ops.floor_mod(nums, divs)
     ).eval()
     tf_nums = array_ops.constant(nums)
     tf_divs = array_ops.constant(divs)
     tf2_result = (tf_nums // tf_divs * tf_divs + tf_nums % tf_divs).eval()
     np_result = (nums // divs) * divs + (nums % divs)
     self.assertAllEqual(tf_result, np_result)
     self.assertAllEqual(tf_result, tf2_result)
Пример #8
0
def _FloorModGrad(op, grad):
  """Returns grad * (1, -floor(x/y))."""
  x = math_ops.conj(op.inputs[0])
  y = math_ops.conj(op.inputs[1])

  sx = array_ops.shape(x)
  sy = array_ops.shape(y)
  rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
  floor_xy = math_ops.floor_div(x, y)
  gx = array_ops.reshape(math_ops.reduce_sum(grad, rx), sx)
  gy = array_ops.reshape(
      math_ops.reduce_sum(grad * math_ops.negative(floor_xy), ry), sy)
  return gx, gy
Пример #9
0
def _FloorModGrad(op, grad):
    """Returns grad * (1, -floor(x/y))."""
    x = math_ops.conj(op.inputs[0])
    y = math_ops.conj(op.inputs[1])

    sx = array_ops.shape(x)
    sy = array_ops.shape(y)
    rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
    floor_xy = math_ops.floor_div(x, y)
    gx = array_ops.reshape(math_ops.reduce_sum(grad, rx), sx)
    gy = array_ops.reshape(
        math_ops.reduce_sum(grad * math_ops.negative(floor_xy), ry), sy)
    return gx, gy
Пример #10
0
 def testDivideInt(self):
   nums, divs = self.intTestData()
   with self.test_session():
     tf_result = math_ops.floor_div(nums, divs).eval()
     np_result = nums // divs
     self.assertAllEqual(tf_result, np_result)
Пример #11
0
 def testDivideInt(self):
     nums, divs = self.intTestData()
     with self.cached_session():
         tf_result = math_ops.floor_div(nums, divs).eval()
         np_result = nums // divs
         self.assertAllEqual(tf_result, np_result)
Пример #12
0
 def testDivideInt(self):
     nums, divs = self.intTestData()
     tf_result = math_ops.floor_div(nums, divs)
     np_result = nums // divs
     self.assertAllEqual(tf_result, np_result)
def sample(dim, num_samples=None, sample_indices=None, dtype=None, name=None):
  r"""Returns a sample from the `m` dimensional Halton sequence.

  Warning: The sequence elements take values only between 0 and 1. Care must be
  taken to appropriately transform the domain of a function if it differs from
  the unit cube before evaluating integrals using Halton samples. It is also
  important to remember that quasi-random numbers are not a replacement for
  pseudo-random numbers in every context. Quasi random numbers are completely
  deterministic and typically have significant negative autocorrelation (unless
  randomized).

  Computes the members of the low discrepancy Halton sequence in dimension
  `dim`. The d-dimensional sequence takes values in the unit hypercube in d
  dimensions. Currently, only dimensions up to 1000 are supported. The prime
  base for the `k`-th axes is the k-th prime starting from 2. For example,
  if dim = 3, then the bases will be [2, 3, 5] respectively and the first
  element of the sequence will be: [0.5, 0.333, 0.2]. For a more complete
  description of the Halton sequences see:
  https://en.wikipedia.org/wiki/Halton_sequence. For low discrepancy sequences
  and their applications see:
  https://en.wikipedia.org/wiki/Low-discrepancy_sequence.

  The user must supply either `num_samples` or `sample_indices` but not both.
  The former is the number of samples to produce starting from the first
  element. If `sample_indices` is given instead, the specified elements of
  the sequence are generated. For example, sample_indices=tf.range(10) is
  equivalent to specifying n=10.

  Example Use:

  ```python
  bf = tf.contrib.bayesflow

  # Produce the first 1000 members of the Halton sequence in 3 dimensions.
  num_samples = 1000
  dim = 3
  sample = bf.halton_sequence.sample(dim, num_samples=num_samples)

  # Evaluate the integral of x_1 * x_2^2 * x_3^3  over the three dimensional
  # hypercube.
  powers = tf.range(1.0, limit=dim + 1)
  integral = tf.reduce_mean(tf.reduce_prod(sample ** powers, axis=-1))
  true_value = 1.0 / tf.reduce_prod(powers + 1.0)
  with tf.Session() as session:
    values = session.run((integral, true_value))

  # Produces a relative absolute error of 1.7%.
  print ("Estimated: %f, True Value: %f" % values)

  # Now skip the first 1000 samples and recompute the integral with the next
  # thousand samples. The sample_indices argument can be used to do this.


  sample_indices = tf.range(start=1000, limit=1000 + num_samples,
                            dtype=tf.int32)
  sample_leaped = halton.sample(dim, sample_indices=sample_indices)

  integral_leaped = tf.reduce_mean(tf.reduce_prod(sample_leaped ** powers,
                                                  axis=-1))
  with tf.Session() as session:
    values = session.run((integral_leaped, true_value))
  # Now produces a relative absolute error of 0.05%.
  print ("Leaped Estimated: %f, True Value: %f" % values)
  ```

  Args:
    dim: Positive Python `int` representing each sample's `event_size.` Must
      not be greater than 1000.
    num_samples: (Optional) positive Python `int`. The number of samples to
      generate. Either this parameter or sample_indices must be specified but
      not both. If this parameter is None, then the behaviour is determined by
      the `sample_indices`.
    sample_indices: (Optional) `Tensor` of dtype int32 and rank 1. The elements
      of the sequence to compute specified by their position in the sequence.
      The entries index into the Halton sequence starting with 0 and hence,
      must be whole numbers. For example, sample_indices=[0, 5, 6] will produce
      the first, sixth and seventh elements of the sequence. If this parameter
      is None, then the `num_samples` parameter must be specified which gives
      the number of desired samples starting from the first sample.
    dtype: (Optional) The dtype of the sample. One of `float32` or `float64`.
      Default is `float32`.
    name:  (Optional) Python `str` describing ops managed by this function. If
    not supplied the name of this function is used.

  Returns:
    halton_elements: Elements of the Halton sequence. `Tensor` of supplied dtype
    and `shape` `[num_samples, dim]` if `num_samples` was specified or shape
    `[s, dim]` where s is the size of `sample_indices` if `sample_indices`
    were specified.

  Raises:
    ValueError: if both `sample_indices` and `num_samples` were specified or
    if dimension `dim` is less than 1 or greater than 1000.
  """
  if dim < 1 or dim > _MAX_DIMENSION:
    raise ValueError(
        'Dimension must be between 1 and {}. Supplied {}'.format(_MAX_DIMENSION,
                                                                 dim))
  if (num_samples is None) == (sample_indices is None):
    raise ValueError('Either `num_samples` or `sample_indices` must be'
                     ' specified but not both.')

  dtype = dtype or dtypes.float32
  if not dtype.is_floating:
    raise ValueError('dtype must be of `float`-type')

  with ops.name_scope(name, 'sample', values=[sample_indices]):
    # Here and in the following, the shape layout is as follows:
    # [sample dimension, event dimension, coefficient dimension].
    # The coefficient dimension is an intermediate axes which will hold the
    # weights of the starting integer when expressed in the (prime) base for
    # an event dimension.
    indices = _get_indices(num_samples, sample_indices, dtype)
    radixes = array_ops.constant(_PRIMES[0:dim], dtype=dtype, shape=[dim, 1])

    max_sizes_by_axes = _base_expansion_size(math_ops.reduce_max(indices),
                                             radixes)

    max_size = math_ops.reduce_max(max_sizes_by_axes)

    # The powers of the radixes that we will need. Note that there is a bit
    # of an excess here. Suppose we need the place value coefficients of 7
    # in base 2 and 3. For 2, we will have 3 digits but we only need 2 digits
    # for base 3. However, we can only create rectangular tensors so we
    # store both expansions in a [2, 3] tensor. This leads to the problem that
    # we might end up attempting to raise large numbers to large powers. For
    # example, base 2 expansion of 1024 has 10 digits. If we were in 10
    # dimensions, then the 10th prime (29) we will end up computing 29^10 even
    # though we don't need it. We avoid this by setting the exponents for each
    # axes to 0 beyond the maximum value needed for that dimension.
    exponents_by_axes = array_ops.tile([math_ops.range(max_size)], [dim, 1])
    weight_mask = exponents_by_axes > max_sizes_by_axes
    capped_exponents = array_ops.where(
        weight_mask, array_ops.zeros_like(exponents_by_axes), exponents_by_axes)
    weights = radixes ** capped_exponents
    coeffs = math_ops.floor_div(indices, weights)
    coeffs *= 1 - math_ops.cast(weight_mask, dtype)
    coeffs = (coeffs % radixes) / radixes
    return math_ops.reduce_sum(coeffs / weights, axis=-1)
Пример #14
0
def sample(dim,
           num_results=None,
           sequence_indices=None,
           dtype=None,
           randomized=True,
           seed=None,
           name=None):
  r"""Returns a sample from the `dim` dimensional Halton sequence.

  Warning: The sequence elements take values only between 0 and 1. Care must be
  taken to appropriately transform the domain of a function if it differs from
  the unit cube before evaluating integrals using Halton samples. It is also
  important to remember that quasi-random numbers without randomization are not
  a replacement for pseudo-random numbers in every context. Quasi random numbers
  are completely deterministic and typically have significant negative
  autocorrelation unless randomization is used.

  Computes the members of the low discrepancy Halton sequence in dimension
  `dim`. The `dim`-dimensional sequence takes values in the unit hypercube in
  `dim` dimensions. Currently, only dimensions up to 1000 are supported. The
  prime base for the k-th axes is the k-th prime starting from 2. For example,
  if `dim` = 3, then the bases will be [2, 3, 5] respectively and the first
  element of the non-randomized sequence will be: [0.5, 0.333, 0.2]. For a more
  complete description of the Halton sequences see:
  https://en.wikipedia.org/wiki/Halton_sequence. For low discrepancy sequences
  and their applications see:
  https://en.wikipedia.org/wiki/Low-discrepancy_sequence.

  If `randomized` is true, this function produces a scrambled version of the
  Halton sequence introduced by Owen in arXiv:1706.02808. For the advantages of
  randomization of low discrepancy sequences see:
  https://en.wikipedia.org/wiki/Quasi-Monte_Carlo_method#Randomization_of_quasi-Monte_Carlo

  The number of samples produced is controlled by the `num_results` and
  `sequence_indices` parameters. The user must supply either `num_results` or
  `sequence_indices` but not both.
  The former is the number of samples to produce starting from the first
  element. If `sequence_indices` is given instead, the specified elements of
  the sequence are generated. For example, sequence_indices=tf.range(10) is
  equivalent to specifying n=10.

  Example Use:

  ```python
  bf = tf.contrib.bayesflow

  # Produce the first 1000 members of the Halton sequence in 3 dimensions.
  num_results = 1000
  dim = 3
  sample = bf.halton_sequence.sample(dim, num_results=num_results, seed=127)

  # Evaluate the integral of x_1 * x_2^2 * x_3^3  over the three dimensional
  # hypercube.
  powers = tf.range(1.0, limit=dim + 1)
  integral = tf.reduce_mean(tf.reduce_prod(sample ** powers, axis=-1))
  true_value = 1.0 / tf.reduce_prod(powers + 1.0)
  with tf.Session() as session:
    values = session.run((integral, true_value))

  # Produces a relative absolute error of 1.7%.
  print ("Estimated: %f, True Value: %f" % values)

  # Now skip the first 1000 samples and recompute the integral with the next
  # thousand samples. The sequence_indices argument can be used to do this.


  sequence_indices = tf.range(start=1000, limit=1000 + num_results,
                              dtype=tf.int32)
  sample_leaped = halton.sample(dim, sequence_indices=sequence_indices,
                                seed=111217)

  integral_leaped = tf.reduce_mean(tf.reduce_prod(sample_leaped ** powers,
                                                  axis=-1))
  with tf.Session() as session:
    values = session.run((integral_leaped, true_value))
  # Now produces a relative absolute error of 0.05%.
  print ("Leaped Estimated: %f, True Value: %f" % values)
  ```

  Args:
    dim: Positive Python `int` representing each sample's `event_size.` Must
      not be greater than 1000.
    num_results: (Optional) positive Python `int`. The number of samples to
      generate. Either this parameter or sequence_indices must be specified but
      not both. If this parameter is None, then the behaviour is determined by
      the `sequence_indices`.
    sequence_indices: (Optional) `Tensor` of dtype int32 and rank 1. The
      elements of the sequence to compute specified by their position in the
      sequence. The entries index into the Halton sequence starting with 0 and
      hence, must be whole numbers. For example, sequence_indices=[0, 5, 6] will
      produce the first, sixth and seventh elements of the sequence. If this
      parameter is None, then the `num_results` parameter must be specified
      which gives the number of desired samples starting from the first sample.
    dtype: (Optional) The dtype of the sample. One of `float32` or `float64`.
      Default is `float32`.
    randomized: (Optional) bool indicating whether to produce a randomized
      Halton sequence. If True, applies the randomization described in
      Owen (2017) [arXiv:1706.02808].
    seed: (Optional) Python integer to seed the random number generator. Only
      used if `randomized` is True. If not supplied and `randomized` is True,
      no seed is set.
    name:  (Optional) Python `str` describing ops managed by this function. If
    not supplied the name of this function is used.

  Returns:
    halton_elements: Elements of the Halton sequence. `Tensor` of supplied dtype
    and `shape` `[num_results, dim]` if `num_results` was specified or shape
    `[s, dim]` where s is the size of `sequence_indices` if `sequence_indices`
    were specified.

  Raises:
    ValueError: if both `sequence_indices` and `num_results` were specified or
    if dimension `dim` is less than 1 or greater than 1000.
  """
  if dim < 1 or dim > _MAX_DIMENSION:
    raise ValueError(
        'Dimension must be between 1 and {}. Supplied {}'.format(_MAX_DIMENSION,
                                                                 dim))
  if (num_results is None) == (sequence_indices is None):
    raise ValueError('Either `num_results` or `sequence_indices` must be'
                     ' specified but not both.')

  dtype = dtype or dtypes.float32
  if not dtype.is_floating:
    raise ValueError('dtype must be of `float`-type')

  with ops.name_scope(name, 'sample', values=[sequence_indices]):
    # Here and in the following, the shape layout is as follows:
    # [sample dimension, event dimension, coefficient dimension].
    # The coefficient dimension is an intermediate axes which will hold the
    # weights of the starting integer when expressed in the (prime) base for
    # an event dimension.
    indices = _get_indices(num_results, sequence_indices, dtype)
    radixes = array_ops.constant(_PRIMES[0:dim], dtype=dtype, shape=[dim, 1])

    max_sizes_by_axes = _base_expansion_size(math_ops.reduce_max(indices),
                                             radixes)

    max_size = math_ops.reduce_max(max_sizes_by_axes)

    # The powers of the radixes that we will need. Note that there is a bit
    # of an excess here. Suppose we need the place value coefficients of 7
    # in base 2 and 3. For 2, we will have 3 digits but we only need 2 digits
    # for base 3. However, we can only create rectangular tensors so we
    # store both expansions in a [2, 3] tensor. This leads to the problem that
    # we might end up attempting to raise large numbers to large powers. For
    # example, base 2 expansion of 1024 has 10 digits. If we were in 10
    # dimensions, then the 10th prime (29) we will end up computing 29^10 even
    # though we don't need it. We avoid this by setting the exponents for each
    # axes to 0 beyond the maximum value needed for that dimension.
    exponents_by_axes = array_ops.tile([math_ops.range(max_size)], [dim, 1])

    # The mask is true for those coefficients that are irrelevant.
    weight_mask = exponents_by_axes >= max_sizes_by_axes
    capped_exponents = array_ops.where(
        weight_mask, array_ops.zeros_like(exponents_by_axes), exponents_by_axes)
    weights = radixes ** capped_exponents
    # The following computes the base b expansion of the indices. Suppose,
    # x = a0 + a1*b + a2*b^2 + ... Then, performing a floor div of x with
    # the vector (1, b, b^2, b^3, ...) will produce
    # (a0 + s1 * b, a1 + s2 * b, ...) where s_i are coefficients we don't care
    # about. Noting that all a_i < b by definition of place value expansion,
    # we see that taking the elements mod b of the above vector produces the
    # place value expansion coefficients.
    coeffs = math_ops.floor_div(indices, weights)
    coeffs *= 1 - math_ops.cast(weight_mask, dtype)
    coeffs %= radixes
    if not randomized:
      coeffs /= radixes
      return math_ops.reduce_sum(coeffs / weights, axis=-1)
    coeffs = _randomize(coeffs, radixes, seed=seed)
    # Remove the contribution from randomizing the trailing zero for the
    # axes where max_size_by_axes < max_size. This will be accounted
    # for separately below (using zero_correction).
    coeffs *= 1 - math_ops.cast(weight_mask, dtype)
    coeffs /= radixes
    base_values = math_ops.reduce_sum(coeffs / weights, axis=-1)

    # The randomization used in Owen (2017) does not leave 0 invariant. While
    # we have accounted for the randomization of the first `max_size_by_axes`
    # coefficients, we still need to correct for the trailing zeros. Luckily,
    # this is equivalent to adding a uniform random value scaled so the first
    # `max_size_by_axes` coefficients are zero. The following statements perform
    # this correction.
    zero_correction = random_ops.random_uniform([dim, 1], seed=seed,
                                                dtype=dtype)
    zero_correction /= (radixes ** max_sizes_by_axes)
    return base_values + array_ops.reshape(zero_correction, [-1])