def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=float): if dtype: dtype = utils.result_type(dtype) start = array_creation.asarray(start, dtype=dtype) stop = array_creation.asarray(stop, dtype=dtype) if num == 0: return empty(dtype) if num < 0: raise ValueError( 'Number of samples {} must be non-negative.'.format(num)) step = np.nan if endpoint: result = tf.linspace(start.data, stop.data, num) if num > 1: step = (stop - start) / (num - 1) else: # tf.linspace does not support endpoint=False so we manually handle it # here. if num > 1: step = (stop - start) / num result = tf.linspace(start.data, (stop - step).data, num) else: result = tf.linspace(start.data, stop.data, num) if dtype: result = tf.cast(result, dtype) if retstep: return arrays.tensor_to_ndarray(result), step else: return arrays.tensor_to_ndarray(result)
def cumsum(a, axis=None, dtype=None): """Returns cumulative sum of `a` along an axis or the flattened array. Uses `tf.cumsum`. Args: a: array_like. Could be an ndarray, a Tensor or any object that can be converted to a Tensor using `tf.convert_to_tensor`. axis: Optional. Axis along which to compute sums. If None, operation is performed on the flattened array. dtype: Optional. The type of the output array. If None, defaults to the dtype of `a` unless `a` is an integer type with precision less than `int` in which case the output type is `int.` Returns: An ndarray with the same number of elements as `a`. If `axis` is None, the output is a 1-d array, else it has the same shape as `a`. """ a = array_creation.asarray(a, dtype=dtype) if dtype is None and tf.as_dtype(a.dtype).is_integer: # If a is an integer type and its precision is less than that of `int`, # the output type will be `int`. output_type = np_promote_types(a.dtype, int) if output_type != a.dtype: a = array_creation.asarray(a, dtype=output_type) # If axis is None, the input is flattened. if axis is None: a = ravel(a) axis = 0 if axis < 0: axis += a.ndim assert axis >= 0 and axis < a.ndim return utils.tensor_to_ndarray(tf.cumsum(a.data, axis))
def f(x): # pylint: disable=g-long-lambda x = array_creation.asarray(x) return array_creation.asarray( utils.cond( utils.greater(n, tf.rank(x)), lambda: array_methods.reshape( x, new_shape(n, tf.shape(x.data))).data, lambda: x.data))
def nan_reduction(a, axis=None, dtype=None, keepdims=False): a = array_creation.asarray(a) v = array_creation.asarray(init_val, dtype=a.dtype) return reduction(array_methods.where(isnan(a), v, a), axis=axis, dtype=dtype, keepdims=keepdims)
def _bin_op(tf_fun, a, b, promote=True): if promote: a, b = promote_args_types(a, b) else: a = array_creation.asarray(a) b = array_creation.asarray(b) return utils.tensor_to_ndarray(tf_fun(a.data, b.data))
def max_pool(x, pool_size, strides, padding): """Performs an N-D max pooling. Args: x: ndarray of rank N+2, of shape `[batch_size] + input_spatial_shape + [num_channels]`. Pooling happens over the spatial dimensions only. pool_size: sequence of N ints. strides: sequence of N ints. padding: a string, the padding algorithm. Must be "SAME" or "VALID". Returns: An (N+2)-D array, of shape [batch_size] + output_spatial_shape + [num_channels], where `output_spatial_shape` depends on the value of padding: If padding = "SAME": output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides[i]) If padding = "VALID": output_spatial_shape[i] = ceil((input_spatial_shape[i] - (pool_size[i] - 1)) / strides[i]). """ x = asarray(x) return asarray( tf.nn.pool(input=x, window_shape=pool_size, pooling_type="MAX", strides=strides, padding=padding))
def sum(a, axis=None, dtype=None, keepdims=None): # pylint: disable=redefined-builtin """Computes sum of all array elements or along specified axes. Args: a: array_like. Could be an ndarray, a Tensor or any object that can be converted to a Tensor using `tf.convert_to_tensor`. axis: Optional 0-d or 1-d array_like. Axes along which to compute sum. If None, returns sum of all elements in array. dtype: Optional. The type of the output array. If None, defaults to the dtype of `a` unless `a` is an integer type with precision less than `int` in which case the output type is `int.` keepdims: If true, retains reduced dimensions with length 1. Returns: An ndarray. """ # TODO(wangpeng): check that we fully match numpy behavior. a = array_creation.asarray(a, dtype=dtype) if dtype is None and tf.as_dtype(a.dtype).is_integer: # If a is an integer type and its precision is less than that of `int`, # the output type will be `int`. output_type = np.promote_types(a.dtype, int) if output_type != a.dtype: a = array_creation.asarray(a, dtype=output_type) return utils.tensor_to_ndarray( tf.reduce_sum(input_tensor=a.data, axis=axis, keepdims=keepdims))
def clip(a, a_min=None, a_max=None): """Clips array values to lie within a given range. Uses `tf.clip_by_value`. Args: a: array_like. Could be an ndarray, a Tensor or any object that can be converted to a Tensor using `tf.convert_to_tensor`. a_min: array_like. Must be a scalar or a shape that can be broadcast to `a.shape`. At least one of `a_min` or `a_max` should be non-None. a_max: array_like. Must be a scalar or a shape that can be broadcast to `a.shape`. At least one of `a_min` or `a_max` should be non-None. Returns: An ndarray with trimmed values with the same shape and dtype as `a`. Raises: ValueError: if both a_min and a_max are None. """ if a_min is None and a_max is None: raise ValueError('Both a_min and a_max cannot be None.') a = array_creation.asarray(a) # Unlike np.clip, tf.clip_by_value requires both min and max values to be # specified so we set them to the smallest/largest values of the array dtype. if a_min is None: a_min = np_iinfo(a.dtype).min if a_max is None: a_max = np_iinfo(a.dtype).max a_min = array_creation.asarray(a_min, dtype=a.dtype) a_max = array_creation.asarray(a_max, dtype=a.dtype) return utils.tensor_to_ndarray( tf.clip_by_value(a.data, a_min.data, a_max.data))
def prod(a, axis=None, dtype=None, keepdims=None): """Computes the product of elements across dimensions of a tensor. Uses `tf.reduce_prod`. Args: a: array_like. Could be an ndarray, a Tensor or any object that can be converted to a Tensor using `tf.convert_to_tensor`. axis: Optional 0-d or 1-d array_like. Axes along which to compute products. If None, returns product of all elements in array. dtype: Optional. The type of the output array. If None, defaults to the dtype of `a` unless `a` is an integer type with precision less than `int` in which case the output type is `int.` keepdims: If true, retains reduced dimensions with length 1. Returns: An ndarray. """ a = array_creation.asarray(a, dtype=dtype) if dtype is None and tf.as_dtype(a.dtype).is_integer: # If a is an integer type and its precision is less than that of `int`, # the output type will be `int`. output_type = np_promote_types(a.dtype, int) if output_type != a.dtype: a = array_creation.asarray(a, dtype=output_type) return utils.tensor_to_ndarray( tf.reduce_prod(input_tensor=a.data, axis=axis, keepdims=keepdims))
def testAsArray(self): for a, dtype in itertools.product(self.all_arrays, self.all_types): self.match(array_creation.asarray(a, dtype=dtype), np.asarray(a, dtype=dtype)) zeros_list = array_creation.zeros(5) # Same instance is returned if no dtype is specified and input is ndarray. self.assertIs(array_creation.asarray(zeros_list), zeros_list) # Different instance is returned if dtype is specified and input is ndarray. self.assertIsNot(array_creation.asarray(zeros_list, dtype=int), zeros_list)
def _comparison(tf_fun, x1, x2, cast_bool_to_int=False): dtype = utils.result_type(x1, x2) # Cast x1 and x2 to the result_type if needed. x1 = array_creation.asarray(x1, dtype=dtype) x2 = array_creation.asarray(x2, dtype=dtype) x1 = x1.data x2 = x2.data if cast_bool_to_int and x1.dtype == tf.bool: x1 = tf.cast(x1, tf.int32) x2 = tf.cast(x2, tf.int32) return utils.tensor_to_ndarray(tf_fun(x1, x2))
def _setitem(arr, index, value): """Sets the `value` at `index` in the array `arr`. This works by replacing the slice at `index` in the tensor with `value`. Since tensors are immutable, this builds a new tensor using the `tf.concat` op. Currently, only 0-d and 1-d indices are supported. Note that this may break gradients e.g. a = tf_np.array([1, 2, 3]) old_a_t = a.data with tf.GradientTape(persistent=True) as g: g.watch(a.data) b = a * 2 a[0] = 5 g.gradient(b.data, [a.data]) # [None] g.gradient(b.data, [old_a_t]) # [[2., 2., 2.]] Here `d_b / d_a` is `[None]` since a.data no longer points to the same tensor. Args: arr: array_like. index: scalar or 1-d integer array. value: value to set at index. Returns: ndarray Raises: ValueError: if `index` is not a scalar or 1-d array. """ # TODO(srbs): Figure out a solution to the gradient problem. arr = array_creation.asarray(arr) index = array_creation.asarray(index) if index.ndim == 0: index = ravel(index) elif index.ndim > 1: raise ValueError('index must be a scalar or a 1-d array.') value = array_creation.asarray(value, dtype=arr.dtype) if arr.shape[len(index):] != value.shape: value = array_creation.full(arr.shape[len(index):], value) prefix_t = arr.data[:index.data[0]] postfix_t = arr.data[index.data[0] + 1:] if len(index) == 1: arr._data = tf.concat( # pylint: disable=protected-access [prefix_t, tf.expand_dims(value.data, 0), postfix_t], 0) else: subarray = arr[index.data[0]] _setitem(subarray, index[1:], value) arr._data = tf.concat( # pylint: disable=protected-access [prefix_t, tf.expand_dims(subarray.data, 0), postfix_t], 0)
def conv(inp, fltr, window_strides, padding, dimension_numbers, filter_dilation=None): """Convolution over an N-D array. See https://www.tensorflow.org/api_docs/python/tf/nn/convolution and https://www.tensorflow.org/xla/operation_semantics#conv_convolution for reference. Args: inp: an (N+2)-D array. The input of the convolution. fltr: an (N+2)-D array. The filter (i.e. kernel) of the convolution. window_strides: a sequence of N ints, the strides for moving the convolution window. padding: a string, either "VALID" or "SAME". The padding algorithm. dimension_numbers: a tuple of three strings encoding the data format of input, filter and output. "I" means input; "O" means output; "C" means channel; other characters such as "W", "H" and "D" means spatial dimensions. filter_dilation: the dilation rates for the filter. Dilating the filter means adding "holes" to the filter. Returns: An (N+2)-D array. The convolution result. """ input_spec, filter_spec, output_spec = dimension_numbers if input_spec != output_spec: raise ValueError( "Input and output data formats must be the same; got %s " "and %s" % (input_spec, output_spec)) supported_filter_spec = ["WIO", "HWIO", "DHWIO"] if filter_spec not in supported_filter_spec: raise ValueError( "The supported data format for the filter are %s; got %s" % (supported_filter_spec, filter_spec)) if input_spec[1:-1] != filter_spec[:-2]: raise ValueError( "Input data format (%s) is not compatible with filter " "data format (%s)" % (input_spec, filter_spec)) # No type promotion in order to prevent accidentally doing more expensive # computation. inp = asarray(inp) fltr = asarray(fltr) return asarray( tf.nn.convolution(input=inp.data, filters=fltr.data, padding=padding, strides=window_strides, dilations=filter_dilation, data_format=input_spec))
def testGradNonScalarOutput(self): def f(a): return a g = extensions.grad(f) with self.assertRaisesWithPredicateMatch( ValueError, r"result .* must be a scalar"): g(asarray([1.0, 2.0])) @extensions.jit def g_jitted(a): return extensions.grad(f)(a) g_jitted(asarray(1.0)) with self.assertRaisesWithPredicateMatch( ValueError, r"result .* must be a scalar"): g_jitted(asarray([1.0, 2.0]))
def tile(a, reps): a = array_creation.asarray(a).data reps = array_creation.asarray(reps, dtype=tf.int32).reshape([-1]).data a_rank = tf.rank(a) reps_size = tf.size(reps) reps = tf.pad(reps, [[tf.math.maximum(a_rank - reps_size, 0), 0]], constant_values=1) a_shape = tf.pad(tf.shape(a), [[tf.math.maximum(reps_size - a_rank, 0), 0]], constant_values=1) a = tf.reshape(a, a_shape) return arrays.tensor_to_ndarray(tf.tile(a, reps))
def reshape(a, newshape): """Reshapes an array. Args: a: array_like. Could be an ndarray, a Tensor or any object that can be converted to a Tensor using `tf.convert_to_tensor`. newshape: 0-d or 1-d array_like. Returns: An ndarray with the contents and dtype of `a` and shape `newshape`. """ a = array_creation.asarray(a) newshape = array_creation.asarray(newshape) return utils.tensor_to_ndarray( tf.reshape(a.data, utils.get_shape_from_ndarray(newshape)))
def testGradNonArrayOutput(self): def f(_): return 1.0 g = extensions.grad(f) with self.assertRaisesWithPredicateMatch( ValueError, r"result .* must be an ndarray"): g(asarray(1.0))
def swapaxes(a, axis1, axis2): """Interchange two axes of an array. Args: a: array_like. Input array. axis1: int. First axis. axis2: int. Second axis. Returns: An ndarray. """ a = array_creation.asarray(a) # TODO(wangpeng): handling partial shapes with unknown ranks n = len(a.shape) if not (-n <= axis1 and axis1 < n): raise ValueError('axis1 must be in range [-%s, %s); got %s' % (n, n, axis1)) if not (-n <= axis2 and axis2 < n): raise ValueError('axis2 must be in range [-%s, %s); got %s' % (n, n, axis2)) if axis1 < 0: axis1 += n if axis2 < 0: axis2 += n perm = list(range(n)) perm[axis1] = axis2 perm[axis2] = axis1 return transpose(a, perm)
def logsumexp(x, axis=None, keepdims=None): """Computes log(sum(exp(elements across dimensions of a tensor))). Reduces `x` along the dimensions given in `axis`. Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each entry in `axis`. If `keepdims` is true, the reduced dimensions are retained with length 1. If `axis` has no entries, all dimensions are reduced, and a tensor with a single element is returned. This function is more numerically stable than log(sum(exp(input))). It avoids overflows caused by taking the exp of large inputs and underflows caused by taking the log of small inputs. Args: x: The tensor to reduce. Should have numeric type. axis: The dimensions to reduce. If `None` (the default), reduces all dimensions. Must be in the range `[-rank(x), rank(x))`. keepdims: If true, retains reduced dimensions with length 1. Returns: The reduced tensor. """ return asarray( tf.math.reduce_logsumexp(input_tensor=x.data, axis=axis, keepdims=keepdims))
def compress(condition, a, axis=None): """Compresses `a` by selecting values along `axis` with `condition` true. Uses `tf.boolean_mask`. Args: condition: 1-d array of bools. If `condition` is shorter than the array axis (or the flattened array if axis is None), it is padded with False. a: array_like. Could be an ndarray, a Tensor or any object that can be converted to a Tensor using `tf.convert_to_tensor`. axis: Optional. Axis along which to select elements. If None, `condition` is applied on flattened array. Returns: An ndarray. Raises: ValueError: if `condition` is not of rank 1. """ condition = array_creation.asarray(condition, dtype=bool) a = array_creation.asarray(a) if condition.ndim != 1: raise ValueError('condition must be a 1-d array.') # `np.compress` treats scalars as 1-d arrays. if a.ndim == 0: a = ravel(a) if axis is None: a = ravel(a) axis = 0 if axis < 0: axis += a.ndim assert axis >= 0 and axis < a.ndim # `tf.boolean_mask` requires the first dimensions of array and condition to # match. `np.compress` pads condition with False when it is shorter. condition_t = condition.data a_t = a.data if condition.shape[0] < a.shape[axis]: padding = tf.fill([a.shape[axis] - condition.shape[0]], False) condition_t = tf.concat([condition_t, padding], axis=0) return utils.tensor_to_ndarray(tf.boolean_mask(tensor=a_t, mask=condition_t, axis=axis))
def transpose(a, axes=None): """Permutes dimensions of the array. Args: a: array_like. Could be an ndarray, a Tensor or any object that can be converted to a Tensor using `tf.convert_to_tensor`. axes: array_like. A list of ints with length rank(a) or None specifying the order of permutation. The i'th dimension of the output array corresponds to axes[i]'th dimension of the `a`. If None, the axes are reversed. Returns: An ndarray. """ a = array_creation.asarray(a) if axes is not None: axes = array_creation.asarray(axes) return utils.tensor_to_ndarray(tf.transpose(a=a.data, perm=axes))
def nanmean(a, axis=None, dtype=None, keepdims=None): a = array_creation.asarray(a) if np.issubdtype(a.dtype, np.bool_) or np.issubdtype(a.dtype, np.integer): return array_methods.mean(a, axis=axis, dtype=dtype, keepdims=keepdims) nan_mask = logical_not(isnan(a)) normalizer = array_methods.sum(nan_mask, axis=axis, dtype=np.int64, keepdims=keepdims) return nansum(a, axis=axis, dtype=dtype, keepdims=keepdims) / normalizer
def testEvalOnShapesNoUnnecessaryTracing(self): def num_traces(f): return len(f._tf_function. _list_all_concrete_functions_for_serialization()) def check_trace_only_once(arg1, arg2): @extensions.eval_on_shapes def f(a): return a + 1 self.assertAllEqual(0, num_traces(f)) f(arg1) self.assertAllEqual(1, num_traces(f)) f(arg2) self.assertAllEqual(1, num_traces(f)) check_trace_only_once(1, 2) check_trace_only_once(1.1, 2.1) check_trace_only_once(asarray(1), asarray(2)) check_trace_only_once(tf.convert_to_tensor(1), tf.convert_to_tensor(2))
def _argminmax(fn, a, axis=None): a = array_creation.asarray(a) if axis is None: # When axis is None numpy flattens the array. a_t = tf.reshape(a.data, [-1]) else: a_t = atleast_1d(a).data return utils.tensor_to_ndarray(fn(input=a_t, axis=axis))
def _reduce(tf_fn, a, axis=None, dtype=None, keepdims=None, promote_int=_TO_INT64, tf_bool_fn=None, preserve_bool=False): """A general reduction function. Args: tf_fn: the TF reduction function. a: the array to be reduced. axis: (optional) the axis along which to do the reduction. If None, all dimensions are reduced. dtype: (optional) the dtype of the result. keepdims: (optional) whether to keep the reduced dimension(s). promote_int: how to promote integer and bool inputs. There are three choices: (1) _TO_INT64: always promote them to int64 or uint64; (2) _TO_FLOAT: always promote them to a float type (determined by dtypes.default_float_type); (3) None: don't promote. tf_bool_fn: (optional) the TF reduction function for bool inputs. It will only be used if `dtype` is explicitly set to `np.bool_` or if `a`'s dtype is `np.bool_` and `preserve_bool` is True. preserve_bool: a flag to control whether to use `tf_bool_fn` if `a`'s dtype is `np.bool_` (some reductions such as np.sum convert bools to integers, while others such as np.max preserve bools. Returns: An ndarray. """ if dtype: dtype = utils.result_type(dtype) if keepdims is None: keepdims = False a = array_creation.asarray(a, dtype=dtype) if ((dtype == np.bool_ or preserve_bool and a.dtype == np.bool_) and tf_bool_fn is not None): return utils.tensor_to_ndarray( tf_bool_fn(input_tensor=a.data, axis=axis, keepdims=keepdims)) if dtype is None: dtype = a.dtype if np.issubdtype(dtype, np.integer) or dtype == np.bool_: if promote_int == _TO_INT64: # If a is an integer/bool type and whose bit width is less than 64, # numpy up-casts it to 64-bit. if dtype == np.bool_: is_signed = True width = 8 # We can use any number here that is less than 64 else: is_signed = np.issubdtype(dtype, np.signedinteger) width = np.iinfo(dtype).bits if width < 64: if is_signed: dtype = np.int64 else: dtype = np.uint64 a = a.astype(dtype) elif promote_int == _TO_FLOAT: a = a.astype(dtypes.default_float_type()) return utils.tensor_to_ndarray( tf_fn(input_tensor=a.data, axis=axis, keepdims=keepdims))
def testAxisName(self): devices = self._get_two_devices(require_same_type=True) def reduce_sum(f): return extensions.psum(f, axis_name="foo") data = array_creation.asarray(tf.convert_to_tensor([1, 3])) pmapped = extensions.pmap(reduce_sum, axis_name="foo", devices=devices) pmapped(data)
def pad(array, pad_width, mode, constant_values=0): """Pads an array. Args: array: array_like of rank N. Input array. pad_width: {sequence, array_like, int}. Number of values padded to the edges of each axis. ((before_1, after_1), ... (before_N, after_N)) unique pad widths for each axis. ((before, after),) yields same before and after pad for each axis. (pad,) or int is a shortcut for before = after = pad width for all axes. mode: string. One of the following string values: 'constant' Pads with a constant value. 'reflect' Pads with the reflection of the vector mirrored on the first and last values of the vector along each axis. 'symmetric' Pads with the reflection of the vector mirrored along the edge of the array. **NOTE**: The supported list of `mode` does not match that of numpy's. constant_values: scalar with same dtype as `array`. Used in 'constant' mode as the pad value. Default is 0. Returns: An ndarray padded array of rank equal to `array` with shape increased according to `pad_width`. Raises: ValueError if `mode` is not supported. """ if not (mode == 'constant' or mode == 'reflect' or mode == 'symmetric'): raise ValueError('Unsupported padding mode: ' + mode) mode = mode.upper() array = array_creation.asarray(array) pad_width = array_creation.asarray(pad_width, dtype=tf.int32) return utils.tensor_to_ndarray( tf.pad(tensor=array.data, paddings=pad_width.data, mode=mode, constant_values=constant_values))
def shape(a): """Return the shape of an array. Args: a: array_like. Input array. Returns: Tuple of ints. """ a = array_creation.asarray(a) return a.shape
def testWrongAxisName(self): devices = self._get_two_devices(require_same_type=True) def reduce_sum(f): return extensions.psum(f, axis_name="bar") data = array_creation.asarray(tf.convert_to_tensor([1, 3])) with self.assertRaisesWithPredicateMatch( ValueError, r"axis_name (.*) is not equal to that of the surrounding"): pmapped = extensions.pmap(reduce_sum, axis_name="foo", devices=devices) pmapped(data)
def testNoNestedPmap(self): devices = self._get_two_devices(require_same_type=True) def f(x): return x + 1.0 data = array_creation.asarray(tf.convert_to_tensor([1, 3])) with self.assertRaisesWithPredicateMatch( ValueError, r"Nested pmap is not supported"): f = extensions.pmap(f, devices=devices) f = extensions.pmap(f, devices=devices) f(data)