Beispiel #1
0
def max_pool(x, pool_size, strides, padding):
    """Performs an N-D max pooling.

  Args:
    x: ndarray of rank N+2, of shape
      `[batch_size] + input_spatial_shape + [num_channels]`. Pooling happens
      over the spatial dimensions only.
    pool_size: sequence of N ints.
    strides: sequence of N ints.
    padding: a string, the padding algorithm. Must be "SAME" or "VALID".

  Returns:
    An (N+2)-D array,  of shape
      [batch_size] + output_spatial_shape + [num_channels],
    where `output_spatial_shape` depends on the value of padding:
    If padding = "SAME":
      output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides[i])
    If padding = "VALID":
      output_spatial_shape[i] =
        ceil((input_spatial_shape[i] - (pool_size[i] - 1)) / strides[i]).
  """
    x = asarray(x)
    return asarray(
        tf.nn.pool(input=x,
                   window_shape=pool_size,
                   pooling_type="MAX",
                   strides=strides,
                   padding=padding))
Beispiel #2
0
def uniform(key,
            shape,
            dtype=random.DEFAULT_RANDN_DTYPE,
            minval=0.,
            maxval=1.):
    """Sample uniform random values in range [`minval`, `maxval`).

  Args:
    key: the RNG key.
    shape: the shape of the result.
    dtype: the dtype of the result.
    minval: the minimal value (inclusive).
    maxval: the maximal value (exclusive).

  Returns:
    An ndarray with shape `shape` and dtype `dtype`. Each value in the ndarray
    is sampled uniformly randomly in range [`minval`, `maxval`).
  """
    key = asarray(key, dtype=_RNG_KEY_DTYPE)
    return asarray(
        tf.random.stateless_uniform(shape,
                                    seed=_key2seed(key),
                                    dtype=dtype,
                                    minval=minval,
                                    maxval=maxval))
Beispiel #3
0
    def testAsArray(self):
        for a, dtype in itertools.product(self.all_arrays, self.all_types):
            self.match(array_ops.asarray(a, dtype=dtype),
                       np.asarray(a, dtype=dtype))

        zeros_list = array_ops.zeros(5)
        # Same instance is returned if no dtype is specified and input is ndarray.
        self.assertIs(array_ops.asarray(zeros_list), zeros_list)
        # Different instance is returned if dtype is specified and input is ndarray.
        self.assertIsNot(array_ops.asarray(zeros_list, dtype=int), zeros_list)
Beispiel #4
0
def conv(inp,
         fltr,
         window_strides,
         padding,
         dimension_numbers,
         filter_dilation=None):
    """Convolution over an N-D array.

  See https://www.tensorflow.org/api_docs/python/tf/nn/convolution and
  https://www.tensorflow.org/xla/operation_semantics#conv_convolution for
  reference.

  Args:
    inp: an (N+2)-D array. The input of the convolution.
    fltr: an (N+2)-D array. The filter (i.e. kernel) of the convolution.
    window_strides: a sequence of N ints, the strides for moving the
      convolution window.
    padding: a string, either "VALID" or "SAME". The padding algorithm.
    dimension_numbers: a tuple of three strings encoding the data format of
      input, filter and output. "I" means input; "O" means output; "C" means
      channel; other characters such as "W", "H" and "D" means spatial
      dimensions.
    filter_dilation: the dilation rates for the filter. Dilating the filter
      means adding "holes" to the filter.

  Returns:
    An (N+2)-D array. The convolution result.
  """
    input_spec, filter_spec, output_spec = dimension_numbers
    if input_spec != output_spec:
        raise ValueError(
            "Input and output data formats must be the same; got %s "
            "and %s" % (input_spec, output_spec))
    supported_filter_spec = ["WIO", "HWIO", "DHWIO"]
    if filter_spec not in supported_filter_spec:
        raise ValueError(
            "The supported data format for the filter are %s; got %s" %
            (supported_filter_spec, filter_spec))
    if input_spec[1:-1] != filter_spec[:-2]:
        raise ValueError(
            "Input data format (%s) is not compatible with filter "
            "data format (%s)" % (input_spec, filter_spec))
    # No type promotion in order to prevent accidentally doing more expensive
    # computation.
    inp = asarray(inp)
    fltr = asarray(fltr)
    return asarray(
        tf.nn.convolution(input=inp.data,
                          filters=fltr.data,
                          padding=padding,
                          strides=window_strides,
                          dilations=filter_dilation,
                          data_format=input_spec))
Beispiel #5
0
 def testGradNonScalarOutput(self):
   def f(a):
     return a
   g = extensions.grad(f)
   with self.assertRaisesWithPredicateMatch(
       ValueError, r"result .* must be a scalar"):
     g(asarray([1.0, 2.0]))
   @extensions.jit
   def g_jitted(a):
     return extensions.grad(f)(a)
   g_jitted(asarray(1.0))
   with self.assertRaisesWithPredicateMatch(
       ValueError, r"result .* must be a scalar"):
     g_jitted(asarray([1.0, 2.0]))
Beispiel #6
0
def normal(key, shape, dtype=tf.float32):
    """Sample standard-normal random values.

  Args:
    key: the RNG key.
    shape: the shape of the result.
    dtype: the dtype of the result.

  Returns:
    Random values in standard-normal distribution.
  """
    key = asarray(key, dtype=_RNG_KEY_DTYPE)
    return asarray(
        tf.random.stateless_normal(shape, seed=_key2seed(key), dtype=dtype))
Beispiel #7
0
 def testGradNonArrayOutput(self):
   def f(_):
     return 1.0
   g = extensions.grad(f)
   with self.assertRaisesWithPredicateMatch(
       ValueError, r"result .* must be an ndarray"):
     g(asarray(1.0))
Beispiel #8
0
def logsumexp(x, axis=None, keepdims=None):
  """Computes log(sum(exp(elements across dimensions of a tensor))).

  Reduces `x` along the dimensions given in `axis`.
  Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
  entry in `axis`. If `keepdims` is true, the reduced dimensions
  are retained with length 1.
  If `axis` has no entries, all dimensions are reduced, and a
  tensor with a single element is returned.
  This function is more numerically stable than log(sum(exp(input))). It avoids
  overflows caused by taking the exp of large inputs and underflows caused by
  taking the log of small inputs.

  Args:
    x: The tensor to reduce. Should have numeric type.
    axis: The dimensions to reduce. If `None` (the default),
      reduces all dimensions. Must be in the range
      `[-rank(x), rank(x))`.
    keepdims: If true, retains reduced dimensions with length 1.

  Returns:
    The reduced tensor.
  """
  return asarray(tf.math.reduce_logsumexp(input_tensor=x.data, axis=axis,
                                          keepdims=keepdims))
Beispiel #9
0
  def testAxisName(self):
    devices = self._get_two_devices(require_same_type=True)

    def reduce_sum(f):
      return extensions.psum(f, axis_name="foo")

    data = array_ops.asarray(tf.convert_to_tensor(value=[1, 3]))
    pmapped = extensions.pmap(reduce_sum, axis_name="foo", devices=devices)
    pmapped(data)
Beispiel #10
0
 def testEvalOnShapesNoUnnecessaryTracing(self):
   def num_traces(f):
     return len(f._tf_function.
                _list_all_concrete_functions_for_serialization())
   def check_trace_only_once(arg1, arg2):
     @extensions.eval_on_shapes
     def f(a):
       return a + 1
     self.assertAllEqual(0, num_traces(f))
     f(arg1)
     self.assertAllEqual(1, num_traces(f))
     f(arg2)
     self.assertAllEqual(1, num_traces(f))
   check_trace_only_once(1, 2)
   check_trace_only_once(1.1, 2.1)
   check_trace_only_once(asarray(1), asarray(2))
   check_trace_only_once(tf.convert_to_tensor(value=1),
                         tf.convert_to_tensor(value=2))
Beispiel #11
0
  def testWrongAxisName(self):
    devices = self._get_two_devices(require_same_type=True)

    def reduce_sum(f):
      return extensions.psum(f, axis_name="bar")

    data = array_ops.asarray(tf.convert_to_tensor(value=[1, 3]))
    with self.assertRaisesWithPredicateMatch(
        ValueError, r"axis_name (.*) is not equal to that of the surrounding"):
      pmapped = extensions.pmap(reduce_sum, axis_name="foo", devices=devices)
      pmapped(data)
Beispiel #12
0
  def testPsum(self):
    devices = self._get_two_devices(require_same_type=True)

    def reduce_sum(f):
      return extensions.psum(f)

    data = array_ops.asarray(tf.convert_to_tensor(value=[1, 3]))
    pmapped = extensions.pmap(reduce_sum, devices=devices)
    result = pmapped(data)

    self.assertAllClose(result[0], 4)
    self.assertAllClose(result[1], 4)
Beispiel #13
0
def prng(s):
    """Creates RNG state from seed.

  Args:
    s: the seed, an integer.

  Returns:
    An RNG state, as a scalar array of dtype `np.int64`.
  """
    # TODO(wangpeng): Become bitwise-identical to JAX when TF stateless RNGs get
    #   improved.
    return asarray(s, dtype=_RNG_KEY_DTYPE)
Beispiel #14
0
  def testNoNestedPmap(self):
    devices = self._get_two_devices(require_same_type=True)

    def f(x):
      return x + 1.0

    data = array_ops.asarray(tf.convert_to_tensor(value=[1, 3]))
    with self.assertRaisesWithPredicateMatch(
        ValueError, r"Nested pmap is not supported"):
      f = extensions.pmap(f, devices=devices)
      f = extensions.pmap(f, devices=devices)
      f(data)
Beispiel #15
0
  def testPmean(self):
    if extensions.tpu_devices():
      self.skipTest("pmean for TPU is not supported yet")
    devices = self._get_two_devices(require_same_type=True)

    def reduce_mean(f):
      return extensions.pmean(f)

    data = array_ops.asarray(tf.convert_to_tensor(value=[1, 3]))
    pmapped = extensions.pmap(reduce_mean, devices=devices)
    result = pmapped(data)

    self.assertAllClose(result[0], 2)
    self.assertAllClose(result[1], 2)
Beispiel #16
0
def trace(a, offset=0, axis1=0, axis2=1, dtype=None):  # pylint: disable=missing-docstring
    if dtype:
        dtype = utils.result_type(dtype)
    a = array_ops.asarray(a, dtype).data

    if offset == 0:
        a_shape = a.shape
        if a_shape.rank is not None:
            rank = len(a_shape)
            if (axis1 == -2 or axis1 == rank - 2) and (axis2 == -1
                                                       or axis2 == rank - 1):
                return utils.tensor_to_ndarray(tf.linalg.trace(a))

    a = array_ops.diagonal(a, offset, axis1, axis2)
    return array_ops.sum(a, -1, dtype)
Beispiel #17
0
def split(state, num):  # pylint: disable=unused-argument
  """Creates new independent RNG states from an existing state.

  This implementation doesn't pass RNG states explicitly, so all RNG states
  are assumed to be zeros.

  Args:
    state: the existing state (unused).
    num: the number of the new states.

  Returns:
    A tuple of new states.
  """
  # TODO(wangpeng): change it to use stateless random ops to truely mimic JAX
  #   RNGs
  return (asarray(0, dtype=np.int64),) * num
Beispiel #18
0
def split(state, num):
    """Creates new independent RNG states from an existing state.

  Args:
    state: the existing state.
    num: the number of the new states.

  Returns:
    A tuple of new states.
  """
    state = asarray(state, dtype=_RNG_KEY_DTYPE)
    state = _key2seed(state)
    states = tf.random.experimental.stateless_split(state, num)
    states = tf.unstack(states, num)
    states = tf.nest.map_structure(_seed2key, states)
    return states
Beispiel #19
0
def bernoulli(key, mean=np.float32(0.5), shape=None):
    """Sample Bernoulli random values with given shape and mean.

  Args:
    key: the RNG key.
    mean: optional, an array_like broadcastable to `shape` for the mean of the
      random variables (default 0.5).
    shape: optional, a tuple of nonnegative integers representing the shape
      (default to `mean`'s shape).

  Returns:
    A random array with the specified shape and boolean dtype.
  """
    mean = asarray(mean)
    if shape is None:
        shape = mean.shape
    return uniform(key, shape) < mean
Beispiel #20
0
def prng(s):
  """Creates RNG state from seed.

  This implementation doesn't pass RNG states explicitly so the result is
  always a dummy 0.

  Args:
    s: the seed, an integer.

  Returns:
    A dummy integer 0.
  """
  # TODO(wangpeng): change it to use stateless random ops to truely mimic JAX
  #   RNGs
  random.seed(s)
  # Returning None will cause errors in some layer/optimizer libraries based on
  # JAX
  return asarray(0, dtype=np.int64)
Beispiel #21
0
def _seed2key(a):
  """Converts an RNG seed to an RNG key.

  Args:
    a: an RNG seed, a tensor of shape [2] and dtype `tf.int32`.

  Returns:
    an RNG key, an ndarray of shape [] and dtype `np.int64`.
  """
  def int32s_to_int64(a):
    """Converts an int32 tensor of shape [2] to an int64 tensor of shape []."""
    a = tf.bitwise.bitwise_or(
        tf.cast(a[0], tf.uint64),
        tf.bitwise.left_shift(tf.cast(a[1], tf.uint64),
                              tf.constant(32, tf.uint64)))
    a = tf.cast(a, tf.int64)
    return a
  return asarray(int32s_to_int64(a))
Beispiel #22
0
def dataset_as_numpy(dataset):
    """Converts a `tf.data.Dataset` to an iterable of ndarrays.

  `dataset_as_numpy` converts a possibly nested structure of `tf.data.Dataset`s
  and `tf.Tensor`s to iterables of ndarrays and ndarrays, respectively. This
  function must be run in eager mode outside tf.function.

  Args:
    dataset: a possibly nested structure of `tf.data.Dataset`s and/or
      `tf.Tensor`s.

  Returns:
    A structure matching `dataset` where `tf.data.Dataset`s are converted to
    generators of ndarrays and `tf.Tensor`s are converted to ndarrays.
  """
    if not tf.executing_eagerly():
        raise ValueError(
            "dataset_as_numpy must be run in eager mode outside tf.function")
    nested_ds = dataset
    del dataset

    # Flatten
    flat_ds = tf.nest.flatten(nested_ds)
    flat_np = []

    # Type check for Tensors and Datasets
    for ds_el in flat_ds:
        if not isinstance(ds_el, (tf.Tensor, tf.data.Dataset)):
            types = tf.nest.map_structure(type, nested_ds)
            raise ValueError(
                "Arguments to dataset_as_numpy must be (possibly nested "
                "structure of) tf.Tensors or tf.data.Datasets. Got: %s" %
                types)

    for ds_el in flat_ds:
        if isinstance(ds_el, tf.Tensor):
            np_el = asarray(ds_el)
        elif isinstance(ds_el, tf.data.Dataset):
            np_el = _eager_dataset_iterator(ds_el)
        else:
            assert False
        flat_np.append(np_el)

    return tf.nest.pack_sequence_as(nested_ds, flat_np)
Beispiel #23
0
def meshgrid(*xi, **kwargs):
    """This currently requires copy=True and sparse=False."""
    sparse = kwargs.get('sparse', False)
    if sparse:
        raise ValueError('tf.numpy doesnt support returning sparse arrays yet')

    copy = kwargs.get('copy', True)
    if not copy:
        raise ValueError('tf.numpy only supports copy=True')

    indexing = kwargs.get('indexing', 'xy')

    xi = [array_ops.asarray(arg).data for arg in xi]
    kwargs = {'indexing': indexing}

    outputs = tf.meshgrid(*xi, **kwargs)
    outputs = [utils.tensor_to_ndarray(output) for output in outputs]

    return outputs
Beispiel #24
0
def _scalar(tf_fn, x, promote_to_float=False):
    """Computes the tf_fn(x) for each element in `x`.

  Args:
    tf_fn: function that takes a single Tensor argument.
    x: array_like. Could be an ndarray, a Tensor or any object that can
      be converted to a Tensor using `tf.convert_to_tensor`.
    promote_to_float: whether to cast the argument to a float dtype
      (`dtypes.default_float_type`) if it is not already.

  Returns:
    An ndarray with the same shape as `x`. The default output dtype is
    determined by `dtypes.default_float_type`, unless x is an ndarray with a
    floating point type, in which case the output type is same as x.dtype.
  """
    x = array_ops.asarray(x)
    if promote_to_float and not np.issubdtype(x.dtype, np.inexact):
        x = x.astype(dtypes.default_float_type())
    return utils.tensor_to_ndarray(tf_fn(x.data))
Beispiel #25
0
def trace(a, offset=0, axis1=0, axis2=1, dtype=None):  # pylint: disable=missing-docstring
    a = array_ops.asarray(a).data

    if offset == 0:
        a_shape = a.shape
        if a_shape.rank is not None:
            rank = len(a_shape)
            if (axis1 == -2 or axis1 == rank - 2) and (axis2 == -1
                                                       or axis2 == rank - 1):
                return utils.tensor_to_ndarray(tf.linalg.trace(a))

    a_rank = tf.rank(a)
    if axis1 < 0:
        axis1 += a_rank
    if axis2 < 0:
        axis2 += a_rank

    minaxis = tf.minimum(axis1, axis2)
    maxaxis = tf.maximum(axis1, axis2)

    # Move axes of interest to the end.
    range_rank = tf.range(a_rank)
    perm = tf.concat([
        range_rank[0:minaxis], range_rank[minaxis + 1:maxaxis],
        range_rank[maxaxis + 1:], [axis1, axis2]
    ],
                     axis=0)
    a = tf.transpose(a, perm)

    a_shape = tf.shape(a)

    # All zeros since diag_part doesn't handle all possible k (aka offset).
    # Written this way since cond will run shape inference on both branches,
    # and diag_part shape inference will fail when offset is out of bounds.
    a, offset = utils.cond(
        utils.logical_or(
            utils.less_equal(offset, -1 * utils.getitem(a_shape, -2)),
            utils.greater_equal(offset, utils.getitem(a_shape, -1)),
        ), lambda: (tf.zeros_like(a), 0), lambda: (a, offset))

    a = utils.tensor_to_ndarray(tf.linalg.diag_part(a, k=offset))
    return array_ops.sum(a, -1, dtype)
Beispiel #26
0
 def f(a, b):
   y = array_ops.sum(math_ops.sqrt(math_ops.exp(a)) + b)
   if has_aux:
     return y, array_ops.asarray(1)
   else:
     return y
Beispiel #27
0
def erf(x):
  """Computes the Gauss error function of x element-wise."""
  return asarray(tf.math.erf(x.data))
Beispiel #28
0
def expit(x):
  """Compute 1 / (1 + exp(-x))."""
  return asarray(tf.math.sigmoid(x.data))
Beispiel #29
0
 def testPrng(self):
   self.assertAllEqual(asarray(123, np.int64), extensions.prng(123))
Beispiel #30
0
def uniform(rng, shape, dtype):
  if np.issubdtype(dtype, np.integer):
    minval = None
  else:
    minval = 0
  return array_ops.asarray(rng.uniform(shape=shape, dtype=dtype, minval=minval))