def ix_(*args): # pylint: disable=missing-docstring n = len(args) output = [] for i, a in enumerate(args): a = asarray(a).data a_rank = tf.rank(a) a_rank_temp = utils.get_static_value(a_rank) if a_rank_temp is not None: a_rank = a_rank_temp if a_rank != 1: raise ValueError( 'Arguments must be 1-d, got arg {} of rank {}'.format(i, a_rank)) else: tf.debugging.Assert(a_rank == 1, [a_rank]) new_shape = [1] * n new_shape[i] = -1 dtype = a.dtype if dtype == tf.bool: output.append( utils.tensor_to_ndarray(tf.reshape(nonzero(a)[0].data, new_shape))) elif dtype.is_integer: output.append(utils.tensor_to_ndarray(tf.reshape(a, new_shape))) else: raise ValueError( 'Only integer and bool dtypes are supported, got {}'.format(dtype)) return output
def diagonal(a, offset=0, axis1=0, axis2=1): # pylint: disable=missing-docstring a = asarray(a).data maybe_rank = a.shape.rank if maybe_rank is not None and offset == 0 and ( axis1 == maybe_rank - 2 or axis1 == -2) and (axis2 == maybe_rank - 1 or axis2 == -1): return utils.tensor_to_ndarray(tf.linalg.diag_part(a)) a = moveaxis(utils.tensor_to_ndarray(a), (axis1, axis2), (-2, -1)).data a_shape = tf.shape(a) def _zeros(): # pylint: disable=missing-docstring return (tf.zeros(tf.concat([a_shape[:-1], [0]], 0), dtype=a.dtype), 0) # All zeros since diag_part doesn't handle all possible k (aka offset). # Written this way since cond will run shape inference on both branches, # and diag_part shape inference will fail when offset is out of bounds. a, offset = utils.cond( utils.logical_or( utils.less_equal(offset, -1 * utils.getitem(a_shape, -2)), utils.greater_equal(offset, utils.getitem(a_shape, -1)), ), _zeros, lambda: (a, offset)) a = utils.tensor_to_ndarray(tf.linalg.diag_part(a, k=offset)) return a
def flip(m, axis=None): # pylint: disable=missing-docstring m = asarray(m).data if axis is None: return utils.tensor_to_ndarray(tf.reverse(m, tf.range(tf.rank(m)))) axis = utils._canonicalize_axis(axis, tf.rank(m)) # pylint: disable=protected-access return utils.tensor_to_ndarray(tf.reverse(m, [axis]))
def _reduce(tf_fn, a, axis=None, dtype=None, keepdims=None, promote_int=_TO_INT64, tf_bool_fn=None, preserve_bool=False): """A general reduction function. Args: tf_fn: the TF reduction function. a: the array to be reduced. axis: (optional) the axis along which to do the reduction. If None, all dimensions are reduced. dtype: (optional) the dtype of the result. keepdims: (optional) whether to keep the reduced dimension(s). promote_int: how to promote integer and bool inputs. There are three choices: (1) _TO_INT64: always promote them to int64 or uint64; (2) _TO_FLOAT: always promote them to a float type (determined by dtypes.default_float_type); (3) None: don't promote. tf_bool_fn: (optional) the TF reduction function for bool inputs. It will only be used if `dtype` is explicitly set to `np.bool_` or if `a`'s dtype is `np.bool_` and `preserve_bool` is True. preserve_bool: a flag to control whether to use `tf_bool_fn` if `a`'s dtype is `np.bool_` (some reductions such as np.sum convert bools to integers, while others such as np.max preserve bools. Returns: An ndarray. """ if dtype: dtype = utils.result_type(dtype) if keepdims is None: keepdims = False a = asarray(a, dtype=dtype) if ((dtype == np.bool_ or preserve_bool and a.dtype == np.bool_) and tf_bool_fn is not None): return utils.tensor_to_ndarray( tf_bool_fn(input_tensor=a.data, axis=axis, keepdims=keepdims)) if dtype is None: dtype = a.dtype if np.issubdtype(dtype, np.integer) or dtype == np.bool_: if promote_int == _TO_INT64: # If a is an integer/bool type and whose bit width is less than 64, # numpy up-casts it to 64-bit. if dtype == np.bool_: is_signed = True width = 8 # We can use any number here that is less than 64 else: is_signed = np.issubdtype(dtype, np.signedinteger) width = np.iinfo(dtype).bits if width < 64: if is_signed: dtype = np.int64 else: dtype = np.uint64 a = a.astype(dtype) elif promote_int == _TO_FLOAT: a = a.astype(dtypes.default_float_type()) return utils.tensor_to_ndarray( tf_fn(input_tensor=a.data, axis=axis, keepdims=keepdims))
def roll(a, shift, axis=None): # pylint: disable=missing-docstring a = asarray(a).data if axis is not None: return utils.tensor_to_ndarray(tf.roll(a, shift, axis)) # If axis is None, the roll happens as a 1-d tensor. original_shape = tf.shape(a) a = tf.roll(tf.reshape(a, [-1]), shift, 0) return utils.tensor_to_ndarray(tf.reshape(a, original_shape))
def sort(a, axis=-1, kind='quicksort', order=None): # pylint: disable=missing-docstring if kind != 'quicksort': raise ValueError("Only 'quicksort' is supported.") if order is not None: raise ValueError("'order' argument to sort is not supported.") a = array_ops.array(a) if axis is None: result_t = tf.sort(tf.reshape(a.data, [-1]), 0) return utils.tensor_to_ndarray(result_t) else: return utils.tensor_to_ndarray(tf.sort(a.data, axis))
def repeat(a, repeats, axis=None): # pylint: disable=missing-docstring a = asarray(a).data original_shape = a._shape_as_list() # pylint: disable=protected-access # Best effort recovery of the shape. if original_shape is not None and None not in original_shape: if not original_shape: original_shape = (repeats,) else: repeats_np = np.ravel(np.array(repeats)) if repeats_np.size == 1: repeats_np = repeats_np.item() if axis is None: original_shape = (repeats_np * np.prod(original_shape),) else: original_shape[axis] = repeats_np * original_shape[axis] else: if axis is None: original_shape = (repeats_np.sum(),) else: original_shape[axis] = repeats_np.sum() repeats = asarray(repeats).data result = tf.repeat(a, repeats, axis) result.set_shape(original_shape) return utils.tensor_to_ndarray(result)
def _bin_op(tf_fun, a, b, promote=True): if promote: a, b = array_ops._promote_dtype(a, b) # pylint: disable=protected-access else: a = array_ops.array(a) b = array_ops.array(b) return utils.tensor_to_ndarray(tf_fun(a.data, b.data))
def diag(v, k=0): # pylint: disable=missing-docstring """Raises an error if input is not 1- or 2-d.""" v = asarray(v).data v_rank = tf.rank(v) v.shape.with_rank_at_most(2) # TODO(nareshmodi): Consider a utils.Assert version that will fail during # tracing time if the shape is known. tf.debugging.Assert( utils.logical_or(tf.equal(v_rank, 1), tf.equal(v_rank, 2)), [v_rank]) def _diag(v, k): return utils.cond( tf.equal(tf.size(v), 0), lambda: tf.zeros([abs(k), abs(k)], dtype=v.dtype), lambda: tf.linalg.diag(v, k=k)) def _diag_part(v, k): v_shape = tf.shape(v) v, k = utils.cond( utils.logical_or( utils.less_equal(k, -1 * utils.getitem(v_shape, 0)), utils.greater_equal(k, utils.getitem(v_shape, 1)), ), lambda: (tf.zeros([0, 0], dtype=v.dtype), 0), lambda: (v, k)) result = tf.linalg.diag_part(v, k=k) return result result = utils.cond( tf.equal(v_rank, 1), lambda: _diag(v, k), lambda: _diag_part(v, k)) return utils.tensor_to_ndarray(result)
def vander(x, N=None, increasing=False): # pylint: disable=missing-docstring,invalid-name x = asarray(x).data x_shape = tf.shape(x) N = N or x_shape[0] N_temp = utils.get_static_value(N) # pylint: disable=invalid-name if N_temp is not None: N = N_temp if N < 0: raise ValueError('N must be nonnegative') else: tf.debugging.Assert(N >= 0, [N]) rank = tf.rank(x) rank_temp = utils.get_static_value(rank) if rank_temp is not None: rank = rank_temp if rank != 1: raise ValueError('x must be a one-dimensional array') else: tf.debugging.Assert(rank == 1, [rank]) if increasing: start = 0 limit = N delta = 1 else: start = N - 1 limit = -1 delta = -1 x = tf.expand_dims(x, -1) return utils.tensor_to_ndarray( tf.math.pow(x, tf.cast(tf.range(start, limit, delta), dtype=x.dtype)))
def _argminmax(fn, a, axis=None): a = array_ops.array(a) if axis is None: # When axis is None numpy flattens the array. a_t = tf.reshape(a.data, [-1]) else: a_t = array_ops.atleast_1d(a).data return utils.tensor_to_ndarray(fn(input=a_t, axis=axis))
def where(condition, x=None, y=None): """Raises ValueError if exactly one of x or y is not None.""" condition = asarray(condition, dtype=np.bool_) if x is None and y is None: return nonzero(condition) elif x is not None and y is not None: x, y = _promote_dtype(x, y) return utils.tensor_to_ndarray(tf.where(condition.data, x.data, y.data)) raise ValueError('Both x and y must be ndarrays, or both must be None.')
def _comparison(tf_fun, x1, x2, cast_bool_to_int=False): dtype = utils.result_type(x1, x2) # Cast x1 and x2 to the result_type if needed. x1 = array_ops.array(x1, dtype=dtype) x2 = array_ops.array(x2, dtype=dtype) x1 = x1.data x2 = x2.data if cast_bool_to_int and x1.dtype == tf.bool: x1 = tf.cast(x1, tf.int32) x2 = tf.cast(x2, tf.int32) return utils.tensor_to_ndarray(tf_fun(x1, x2))
def moveaxis(a, source, destination): # pylint: disable=missing-docstring """Raises ValueError if source, destination not in (-ndim(a), ndim(a)).""" if not source and not destination: return a a = asarray(a).data if isinstance(source, int): source = (source,) if isinstance(destination, int): destination = (destination,) a_rank = utils._maybe_static(tf.rank(a)) # pylint: disable=protected-access def _correct_axis(axis, rank): if axis < 0: return axis + rank return axis source = tuple(_correct_axis(axis, a_rank) for axis in source) destination = tuple(_correct_axis(axis, a_rank) for axis in destination) if a.shape.rank is not None: perm = [i for i in range(a_rank) if i not in source] for dest, src in sorted(zip(destination, source)): assert dest <= len(perm) perm.insert(dest, src) else: r = tf.range(a_rank) def _remove_indices(a, b): """Remove indices (`b`) from `a`.""" items = tf.unstack(tf.sort(tf.stack(b)), num=len(b)) i = 0 result = [] for item in items: result.append(a[i:item]) i = item + 1 result.append(a[i:]) return tf.concat(result, 0) minus_sources = _remove_indices(r, source) minus_dest = _remove_indices(r, destination) perm = tf.scatter_nd(tf.expand_dims(minus_dest, 1), minus_sources, [a_rank]) perm = tf.tensor_scatter_nd_update(perm, tf.expand_dims(destination, 1), source) a = tf.transpose(a, perm) return utils.tensor_to_ndarray(a)
def expand_dims(a, axis): """Expand the shape of an array. Args: a: array_like. Could be an ndarray, a Tensor or any object that can be converted to a Tensor using `tf.convert_to_tensor`. axis: int. axis on which to expand the shape. Returns: An ndarray with the contents and dtype of `a` and shape expanded on axis. """ a = asarray(a) return utils.tensor_to_ndarray(tf.expand_dims(a.data, axis=axis))
def clip(a, a_min, a_max): # pylint: disable=missing-docstring if a_min is None and a_max is None: raise ValueError( 'Not more than one of `a_min` and `a_max` may be `None`.') if a_min is None: return minimum(a, a_max) elif a_max is None: return maximum(a, a_min) else: a, a_min, a_max = array_ops._promote_dtype(a, a_min, a_max) # pylint: disable=protected-access return utils.tensor_to_ndarray( tf.clip_by_value( *utils.tf_broadcast(a.data, a_min.data, a_max.data)))
def cumsum(a, axis=None, dtype=None): # pylint: disable=missing-docstring a = asarray(a, dtype=dtype) if dtype is None: a = _maybe_promote_to_int(a) # If axis is None, the input is flattened. if axis is None: a = ravel(a) axis = 0 elif axis < 0: axis += tf.rank(a.data) return utils.tensor_to_ndarray(tf.cumsum(a.data, axis))
def swapaxes(a, axis1, axis2): # pylint: disable=missing-docstring a = asarray(a) a_rank = tf.rank(a) if axis1 < 0: axis1 += a_rank if axis2 < 0: axis2 += a_rank perm = tf.range(a_rank) perm = tf.tensor_scatter_nd_update(perm, [[axis1], [axis2]], [axis2, axis1]) a = tf.transpose(a, perm) return utils.tensor_to_ndarray(a)
def trace(a, offset=0, axis1=0, axis2=1, dtype=None): # pylint: disable=missing-docstring if dtype: dtype = utils.result_type(dtype) a = array_ops.asarray(a, dtype).data if offset == 0: a_shape = a.shape if a_shape.rank is not None: rank = len(a_shape) if (axis1 == -2 or axis1 == rank - 2) and (axis2 == -1 or axis2 == rank - 1): return utils.tensor_to_ndarray(tf.linalg.trace(a)) a = array_ops.diagonal(a, offset, axis1, axis2) return array_ops.sum(a, -1, dtype)
def imag(a): """Returns imaginary parts of all elements in `a`. Uses `tf.imag`. Args: a: array_like. Could be an ndarray, a Tensor or any object that can be converted to a Tensor using `tf.convert_to_tensor`. Returns: An ndarray with the same shape as `a`. """ a = asarray(a) # TODO(srbs): np.imag returns a scalar if a is a scalar, whereas we always # return an ndarray. return utils.tensor_to_ndarray(tf.math.imag(a.data))
def squeeze(a, axis=None): """Removes single-element axes from the array. Args: a: array_like. Could be an ndarray, a Tensor or any object that can be converted to a Tensor using `tf.convert_to_tensor`. axis: scalar or list/tuple of ints. TODO(srbs): tf.squeeze throws error when axis is a Tensor eager execution is enabled. So we cannot allow axis to be array_like here. Fix. Returns: An ndarray. """ a = asarray(a) return utils.tensor_to_ndarray(tf.squeeze(a, axis))
def triu(m, k=0): # pylint: disable=missing-docstring m = asarray(m).data m_shape = m.shape.as_list() if len(m_shape) < 2: raise ValueError('Argument to triu must have rank at least 2') if m_shape[-1] is None or m_shape[-2] is None: raise ValueError('Currently, the last two dimensions of the input array ' 'need to be known.') z = tf.constant(0, m.dtype) mask = tri(*m_shape[-2:], k=k - 1, dtype=bool) return utils.tensor_to_ndarray( tf.where(tf.broadcast_to(mask, tf.shape(m)), z, m))
def real(val): """Returns real parts of all elements in `a`. Uses `tf.real`. Args: val: array_like. Could be an ndarray, a Tensor or any object that can be converted to a Tensor using `tf.convert_to_tensor`. Returns: An ndarray with the same shape as `a`. """ val = asarray(val) # TODO(srbs): np.real returns a scalar if val is a scalar, whereas we always # return an ndarray. return utils.tensor_to_ndarray(tf.math.real(val.data))
def around(a, decimals=0): # pylint: disable=missing-docstring a = asarray(a) dtype = a.dtype factor = math.pow(10, decimals) if np.issubdtype(dtype, np.inexact): factor = tf.cast(factor, dtype) else: # Use float as the working dtype when a.dtype is exact (e.g. integer), # because `decimals` can be negative. float_dtype = dtypes.default_float_type() a = a.astype(float_dtype).data factor = tf.cast(factor, float_dtype) a = tf.multiply(a, factor) a = tf.round(a) a = tf.math.divide(a, factor) return utils.tensor_to_ndarray(a).astype(dtype)
def randn(*args): """Returns samples from a normal distribution. Uses `tf.random_normal`. Args: *args: The shape of the output array. Returns: An ndarray with shape `args` and dtype `float64`. """ # TODO(wangpeng): Use new stateful RNG if utils.isscalar(args): args = (args, ) return utils.tensor_to_ndarray( tf.random.normal(args, dtype=DEFAULT_RANDN_DTYPE))
def compress(condition, a, axis=None): """Compresses `a` by selecting values along `axis` with `condition` true. Uses `tf.boolean_mask`. Args: condition: 1-d array of bools. If `condition` is shorter than the array axis (or the flattened array if axis is None), it is padded with False. a: array_like. Could be an ndarray, a Tensor or any object that can be converted to a Tensor using `tf.convert_to_tensor`. axis: Optional. Axis along which to select elements. If None, `condition` is applied on flattened array. Returns: An ndarray. Raises: ValueError: if `condition` is not of rank 1. """ condition = asarray(condition, dtype=bool) a = asarray(a) if condition.ndim != 1: raise ValueError('condition must be a 1-d array.') # `np.compress` treats scalars as 1-d arrays. if a.ndim == 0: a = ravel(a) if axis is None: a = ravel(a) axis = 0 if axis < 0: axis += a.ndim assert axis >= 0 and axis < a.ndim # `tf.boolean_mask` requires the first dimensions of array and condition to # match. `np.compress` pads condition with False when it is shorter. condition_t = condition.data a_t = a.data if condition.shape[0] < a.shape[axis]: padding = tf.fill([a.shape[axis] - condition.shape[0]], False) condition_t = tf.concat([condition_t, padding], axis=0) return utils.tensor_to_ndarray(tf.boolean_mask(tensor=a_t, mask=condition_t, axis=axis))
def reshape(a, newshape, order='C'): """order argument can only b 'C' or 'F'.""" if order not in {'C', 'F'}: raise ValueError('Unsupported order argument {}'.format(order)) a = asarray(a) if isinstance(newshape, arrays_lib.ndarray): newshape = newshape.data if isinstance(newshape, int): newshape = [newshape] if order == 'F': r = tf.transpose(tf.reshape(tf.transpose(a.data), newshape[::-1])) else: r = tf.reshape(a.data, newshape) return utils.tensor_to_ndarray(r)
def transpose(a, axes=None): """Permutes dimensions of the array. Args: a: array_like. Could be an ndarray, a Tensor or any object that can be converted to a Tensor using `tf.convert_to_tensor`. axes: array_like. A list of ints with length rank(a) or None specifying the order of permutation. The i'th dimension of the output array corresponds to axes[i]'th dimension of the `a`. If None, the axes are reversed. Returns: An ndarray. """ a = asarray(a) if axes is not None: axes = asarray(axes) return utils.tensor_to_ndarray(tf.transpose(a=a.data, perm=axes))
def _scalar(tf_fn, x, promote_to_float=False): """Computes the tf_fn(x) for each element in `x`. Args: tf_fn: function that takes a single Tensor argument. x: array_like. Could be an ndarray, a Tensor or any object that can be converted to a Tensor using `tf.convert_to_tensor`. promote_to_float: whether to cast the argument to a float dtype (`dtypes.default_float_type`) if it is not already. Returns: An ndarray with the same shape as `x`. The default output dtype is determined by `dtypes.default_float_type`, unless x is an ndarray with a floating point type, in which case the output type is same as x.dtype. """ x = array_ops.asarray(x) if promote_to_float and not np.issubdtype(x.dtype, np.inexact): x = x.astype(dtypes.default_float_type()) return utils.tensor_to_ndarray(tf_fn(x.data))
def meshgrid(*xi, **kwargs): """This currently requires copy=True and sparse=False.""" sparse = kwargs.get('sparse', False) if sparse: raise ValueError('tf.numpy doesnt support returning sparse arrays yet') copy = kwargs.get('copy', True) if not copy: raise ValueError('tf.numpy only supports copy=True') indexing = kwargs.get('indexing', 'xy') xi = [array_ops.asarray(arg).data for arg in xi] kwargs = {'indexing': indexing} outputs = tf.meshgrid(*xi, **kwargs) outputs = [utils.tensor_to_ndarray(output) for output in outputs] return outputs