def _bin_op(tf_fun, a, b, promote=True): if promote: a, b = array_ops._promote_dtype(a, b) # pylint: disable=protected-access else: a = array_ops.array(a) b = array_ops.array(b) return utils.tensor_to_ndarray(tf_fun(a.data, b.data))
def _testReduce(self, math_fun, np_fun, name): axis_transforms = [ lambda x: x, # Identity, tf.convert_to_tensor, np.array, array_ops.array, lambda x: array_ops.array(x, dtype=np.float32), lambda x: array_ops.array(x, dtype=np.float64), ] def run_test(a, **kwargs): axis = kwargs.pop('axis', None) for fn1 in self.array_transforms: for fn2 in axis_transforms: arg1 = fn1(a) axis_arg = fn2(axis) if axis is not None else None self.match(math_fun(arg1, axis=axis_arg, **kwargs), np_fun(arg1, axis=axis, **kwargs), msg='{}({}, axis={}, keepdims={})'.format( name, arg1, axis, kwargs.get('keepdims'))) run_test(5) run_test([2, 3]) run_test([[2, -3], [-6, 7]]) run_test([[2, -3], [-6, 7]], axis=0) run_test([[2, -3], [-6, 7]], axis=0, keepdims=True) run_test([[2, -3], [-6, 7]], axis=1) run_test([[2, -3], [-6, 7]], axis=1, keepdims=True) run_test([[2, -3], [-6, 7]], axis=(0, 1)) run_test([[2, -3], [-6, 7]], axis=(1, 0))
def nan_reduction(a, axis=None, dtype=None, keepdims=False): a = array_ops.array(a) v = array_ops.array(init_val, dtype=a.dtype) return reduction(array_ops.where(isnan(a), v, a), axis=axis, dtype=dtype, keepdims=keepdims)
def test_setitem(self): # Single integer index. a = array_ops.array([1., 2., 3.]) b = array_ops.array(5.) c = array_ops.array(10.) tensors = [arr.data for arr in [a, b, c]] with tf.GradientTape() as g: g.watch(tensors) a[1] = b + c loss = array_ops.sum(a) gradients = g.gradient(loss.data, tensors) self.assertSequenceEqual( array_ops.array(gradients[0]).tolist(), [1., 0., 1.]) self.assertEqual(array_ops.array(gradients[1]).tolist(), 1.) self.assertEqual(array_ops.array(gradients[2]).tolist(), 1.) # Tuple index. a = array_ops.array([[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]]]) # 2x2x2 array. b = array_ops.array([10., 11.]) tensors = [arr.data for arr in [a, b]] with tf.GradientTape() as g: g.watch(tensors) a[(1, 0)] = b loss = array_ops.sum(a) gradients = g.gradient(loss.data, tensors) self.assertSequenceEqual( array_ops.array(gradients[0]).tolist(), [[[1., 1.], [1., 1.]], [[0., 0.], [1., 1.]]]) self.assertEqual(array_ops.array(gradients[1]).tolist(), [1., 1.])
def testLogSpace(self): array_transforms = [ lambda x: x, # Identity, tf.convert_to_tensor, np.array, lambda x: np.array(x, dtype=np.float32), lambda x: np.array(x, dtype=np.float64), array_ops.array, lambda x: array_ops.array(x, dtype=np.float32), lambda x: array_ops.array(x, dtype=np.float64) ] def run_test(start, stop, **kwargs): for fn1 in array_transforms: for fn2 in array_transforms: arg1 = fn1(start) arg2 = fn2(stop) self.match(math_ops.logspace(arg1, arg2, **kwargs), np.logspace(arg1, arg2, **kwargs), msg='logspace({}, {})'.format(arg1, arg2)) run_test(0, 5) run_test(0, 5, num=10) run_test(0, 5, endpoint=False) run_test(0, 5, base=2.0) run_test(0, -5) run_test(0, -5, num=10) run_test(0, -5, endpoint=False) run_test(0, -5, base=2.0)
def average(a, axis=None, weights=None, returned=False): # pylint: disable=missing-docstring if axis is not None and not isinstance(axis, six.integer_types): # TODO(wangpeng): Support tuple of ints as `axis` raise ValueError('`axis` must be an integer. Tuple of ints is not ' 'supported yet. Got type: %s' % type(axis)) a = array_ops.array(a) if weights is None: # Treat all weights as 1 if not np.issubdtype(a.dtype, np.inexact): a = a.astype( utils.result_type(a.dtype, dtypes.default_float_type())) avg = tf.reduce_mean(a.data, axis=axis) if returned: if axis is None: weights_sum = tf.size(a.data) else: weights_sum = tf.shape(a.data)[axis] weights_sum = tf.cast(weights_sum, a.data.dtype) else: if np.issubdtype(a.dtype, np.inexact): out_dtype = utils.result_type(a.dtype, weights) else: out_dtype = utils.result_type(a.dtype, weights, dtypes.default_float_type()) a = array_ops.array(a, out_dtype).data weights = array_ops.array(weights, out_dtype).data def rank_equal_case(): tf.debugging.Assert( tf.reduce_all(tf.shape(a) == tf.shape(weights)), [tf.shape(a), tf.shape(weights)]) weights_sum = tf.reduce_sum(weights, axis=axis) avg = tf.reduce_sum(a * weights, axis=axis) / weights_sum return avg, weights_sum if axis is None: avg, weights_sum = rank_equal_case() else: def rank_not_equal_case(): tf.debugging.Assert(tf.rank(weights) == 1, [tf.rank(weights)]) weights_sum = tf.reduce_sum(weights) axes = tf.convert_to_tensor([[axis], [0]]) avg = tf.tensordot(a, weights, axes) / weights_sum return avg, weights_sum # We condition on rank rather than shape equality, because if we do the # latter, when the shapes are partially unknown but the ranks are known # and different, utils.cond will run shape checking on the true branch, # which will raise a shape-checking error. avg, weights_sum = utils.cond( tf.rank(a) == tf.rank(weights), rank_equal_case, rank_not_equal_case) avg = array_ops.array(avg) if returned: weights_sum = array_ops.broadcast_to(weights_sum, tf.shape(avg.data)) return avg, weights_sum return avg
def _comparison(tf_fun, x1, x2, cast_bool_to_int=False): dtype = utils.result_type(x1, x2) # Cast x1 and x2 to the result_type if needed. x1 = array_ops.array(x1, dtype=dtype) x2 = array_ops.array(x2, dtype=dtype) x1 = x1.data x2 = x2.data if cast_bool_to_int and x1.dtype == tf.bool: x1 = tf.cast(x1, tf.int32) x2 = tf.cast(x2, tf.int32) return utils.tensor_to_ndarray(tf_fun(x1, x2))
def setUp(self): super(MathTest, self).setUp() self.array_transforms = [ lambda x: x, # Identity, tf.convert_to_tensor, np.array, lambda x: np.array(x, dtype=np.float32), lambda x: np.array(x, dtype=np.float64), array_ops.array, lambda x: array_ops.array(x, dtype=np.float32), lambda x: array_ops.array(x, dtype=np.float64), ] self.types = [np.int32, np.int64, np.float32, np.float64]
def tile(a, reps): a = array_ops.array(a).data reps = array_ops.array(reps, dtype=tf.int32).reshape([-1]).data a_rank = tf.rank(a) reps_size = tf.size(reps) reps = tf.pad(reps, [[tf.math.maximum(a_rank - reps_size, 0), 0]], constant_values=1) a_shape = tf.pad(tf.shape(a), [[tf.math.maximum(reps_size - a_rank, 0), 0]], constant_values=1) a = tf.reshape(a, a_shape) return arrays.tensor_to_ndarray(tf.tile(a, reps))
def testArgMaxArgMin(self): data = [ 0, 5, [1], [1, 2, 3], [[1, 2, 3]], [[4, 6], [7, 8]], [[[4, 6], [9, 10]], [[7, 8], [12, 34]]], ] for fn, d in itertools.product(self.array_transforms, data): arr = fn(d) self.match(math_ops.argmax(arr), np.argmax(arr)) self.match(math_ops.argmin(arr), np.argmin(arr)) if hasattr(arr, 'shape'): ndims = len(arr.shape) else: ndims = array_ops.array(arr, copy=False).ndim if ndims == 0: # Numpy flattens the scalar ndarray and treats it as a 1-d array of # size 1. ndims = 1 for axis in range(-ndims, ndims): self.match(math_ops.argmax(arr, axis=axis), np.argmax(arr, axis=axis)) self.match(math_ops.argmin(arr, axis=axis), np.argmin(arr, axis=axis))
def _argminmax(fn, a, axis=None): a = array_ops.array(a) if axis is None: # When axis is None numpy flattens the array. a_t = tf.reshape(a.data, [-1]) else: a_t = array_ops.atleast_1d(a).data return utils.tensor_to_ndarray(fn(input=a_t, axis=axis))
def concatenate(arys, axis=0): if not isinstance(arys, (list, tuple)): arys = [arys] if not arys: raise ValueError('Need at least one array to concatenate.') dtype = utils.result_type(*arys) arys = [array_ops.array(array, dtype=dtype).data for array in arys] return arrays.tensor_to_ndarray(tf.concat(arys, axis))
def testIndexedSlices(self): dtype = tf.int64 iss = tf.IndexedSlices(values=tf.ones([2, 3], dtype=dtype), indices=tf.constant([1, 9]), dense_shape=[10, 3]) a = array_ops.array(iss, copy=False) expected = tf.scatter_nd([[1], [9]], tf.ones([2, 3], dtype=dtype), [10, 3]) self.assertAllEqual(expected, a)
def run_test(a, b): for fn in self.array_transforms: arg1 = fn(a) arg2 = fn(b) self.match(math_fun(arg1, arg2), np_fun(arg1, arg2), msg='{}({}, {})'.format(name, arg1, arg2)) # Tests type promotion for type_a in self.types: for type_b in self.types: if not check_promotion and type_a != type_b: continue arg1 = array_ops.array(a, dtype=type_a) arg2 = array_ops.array(b, dtype=type_b) self.match(math_fun(arg1, arg2), np_fun(arg1, arg2), msg='{}({}, {})'.format(name, arg1, arg2), check_dtype=check_promotion_result_type)
def setUp(self): super().setUp() python_shapes = [ 0, 1, 2, (), (1, ), (2, ), (1, 2, 3), [], [1], [2], [1, 2, 3] ] self.shape_transforms = [ lambda x: x, lambda x: np.array(x, dtype=int), lambda x: array_ops.array(x, dtype=int), tf.TensorShape ] self.all_shapes = [] for fn in self.shape_transforms: self.all_shapes.extend([fn(s) for s in python_shapes]) if sys.version_info.major == 3: # There is a bug of np.empty (and alike) in Python 3 causing a crash when # the `shape` argument is an arrays.ndarray scalar (or tf.Tensor scalar). def not_ndarray_scalar(s): return not (isinstance(s, arrays.ndarray) and s.ndim == 0) self.all_shapes = list(filter(not_ndarray_scalar, self.all_shapes)) self.all_types = [ int, float, np.int16, np.int32, np.int64, np.float16, np.float32, np.float64 ] source_array_data = [ 1, 5.5, 7, (), (8, 10.), ((), ()), ((1, 4), (2, 8)), [], [7], [8, 10.], [[], []], [[1, 4], [2, 8]], ([], []), ([1, 4], [2, 8]), [(), ()], [(1, 4), (2, 8)], ] self.array_transforms = [ lambda x: x, tf.convert_to_tensor, np.array, array_ops.array, ] self.all_arrays = [] for fn in self.array_transforms: self.all_arrays.extend([fn(s) for s in source_array_data])
def run_test(arr, index, value): for fn in self.array_transforms: value_arg = fn(value) tf_array = array_ops.array(arr) np_array = np.array(arr) tf_array[index] = value_arg # TODO(srbs): "setting an array element with a sequence" is thrown # if we do not wrap value_arg in a numpy array. Investigate how this can # be avoided. np_array[index] = np.array(value_arg) self.match(tf_array, np_array)
def nanmean(a, axis=None, dtype=None, keepdims=None): # pylint: disable=missing-docstring a = array_ops.array(a) if np.issubdtype(a.dtype, np.bool_) or np.issubdtype(a.dtype, np.integer): return array_ops.mean(a, axis=axis, dtype=dtype, keepdims=keepdims) nan_mask = logical_not(isnan(a)) if dtype is None: dtype = a.dtype normalizer = array_ops.sum(nan_mask, axis=axis, dtype=dtype, keepdims=keepdims) return nansum(a, axis=axis, dtype=dtype, keepdims=keepdims) / normalizer
def sort(a, axis=-1, kind='quicksort', order=None): # pylint: disable=missing-docstring if kind != 'quicksort': raise ValueError("Only 'quicksort' is supported.") if order is not None: raise ValueError("'order' argument to sort is not supported.") a = array_ops.array(a) if axis is None: result_t = tf.sort(tf.reshape(a.data, [-1]), 0) return utils.tensor_to_ndarray(result_t) else: return utils.tensor_to_ndarray(tf.sort(a.data, axis))
def testDiagFlat(self): array_transforms = [ lambda x: x, # Identity, tf.convert_to_tensor, np.array, lambda x: np.array(x, dtype=np.float32), lambda x: np.array(x, dtype=np.float64), array_ops.array, lambda x: array_ops.array(x, dtype=np.float32), lambda x: array_ops.array(x, dtype=np.float64) ] def run_test(arr): for fn in array_transforms: arr = fn(arr) self.match( array_ops.diagflat(arr), np.diagflat(arr), msg='diagflat({})'.format(arr)) for k in range(-3, 3): self.match( array_ops.diagflat(arr, k), np.diagflat(arr, k), msg='diagflat({}, k={})'.format(arr, k)) # 1-d arrays. run_test([]) run_test([1]) run_test([1, 2]) # 2-d arrays. run_test([[]]) run_test([[5]]) run_test([[], []]) run_test(np.arange(4).reshape((2, 2)).tolist()) run_test(np.arange(2).reshape((2, 1)).tolist()) run_test(np.arange(2).reshape((1, 2)).tolist()) # 3-d arrays run_test(np.arange(8).reshape((2, 2, 2)).tolist())
def argsort(a, axis=-1, kind='quicksort', order=None): # pylint: disable=missing-docstring # TODO(nareshmodi): make string tensors also work. if kind not in ('quicksort', 'stable'): raise ValueError( "Only 'quicksort' and 'stable' arguments are supported.") if order is not None: raise ValueError("'order' argument to sort is not supported.") stable = (kind == 'stable') a = array_ops.array(a).data def _argsort(a, axis, stable): if axis is None: a = tf.reshape(a, [-1]) axis = 0 return tf.argsort(a, axis, stable=stable) tf_ans = tf.cond( tf.rank(a) == 0, lambda: tf.constant([0]), lambda: _argsort(a, axis, stable)) return array_ops.array(tf_ans, dtype=np.intp)
def linspace( # pylint: disable=missing-docstring start, stop, num=50, endpoint=True, retstep=False, dtype=float, axis=0): if dtype: dtype = utils.result_type(dtype) start = array_ops.array(start, dtype=dtype).data stop = array_ops.array(stop, dtype=dtype).data if num < 0: raise ValueError( 'Number of samples {} must be non-negative.'.format(num)) step = tf.convert_to_tensor(np.nan) if endpoint: result = tf.linspace(start, stop, num, axis=axis) if num > 1: step = (stop - start) / (num - 1) else: # tf.linspace does not support endpoint=False so we manually handle it # here. if num > 1: step = ((stop - start) / num) new_stop = tf.cast(stop, step.dtype) - step start = tf.cast(start, new_stop.dtype) result = tf.linspace(start, new_stop, num, axis=axis) else: result = tf.linspace(start, stop, num, axis=axis) if dtype: result = tf.cast(result, dtype) if retstep: return arrays.tensor_to_ndarray(result), arrays.tensor_to_ndarray(step) else: return arrays.tensor_to_ndarray(result)
def testArray(self): ndmins = [0, 1, 2, 5] for a, dtype, ndmin, copy in itertools.product(self.all_arrays, self.all_types, ndmins, [True, False]): self.match(array_ops.array(a, dtype=dtype, ndmin=ndmin, copy=copy), np.array(a, dtype=dtype, ndmin=ndmin, copy=copy)) zeros_list = array_ops.zeros(5) # TODO(srbs): Test that copy=True when context.device is different from # tensor device copies the tensor. # Backing tensor is the same if copy=False, other attributes being None. self.assertIs( array_ops.array(zeros_list, copy=False).data, zeros_list.data) self.assertIs( array_ops.array(zeros_list.data, copy=False).data, zeros_list.data) # Backing tensor is different if ndmin is not satisfied. self.assertIsNot( array_ops.array(zeros_list, copy=False, ndmin=2).data, zeros_list.data) self.assertIsNot( array_ops.array(zeros_list.data, copy=False, ndmin=2).data, zeros_list.data) self.assertIs( array_ops.array(zeros_list, copy=False, ndmin=1).data, zeros_list.data) self.assertIs( array_ops.array(zeros_list.data, copy=False, ndmin=1).data, zeros_list.data) # Backing tensor is different if dtype is not satisfied. self.assertIsNot( array_ops.array(zeros_list, copy=False, dtype=int).data, zeros_list.data) self.assertIsNot( array_ops.array(zeros_list.data, copy=False, dtype=int).data, zeros_list.data) self.assertIs( array_ops.array(zeros_list, copy=False, dtype=float).data, zeros_list.data) self.assertIs( array_ops.array(zeros_list.data, copy=False, dtype=float).data, zeros_list.data)
def logical_not(x): x = array_ops.array(x, dtype=np.bool_) return utils.tensor_to_ndarray(tf.logical_not(x.data))
def _logical_binary_op(tf_fun, x1, x2): x1 = array_ops.array(x1, dtype=np.bool_) x2 = array_ops.array(x2, dtype=np.bool_) return utils.tensor_to_ndarray(tf_fun(x1.data, x2.data))
def count_nonzero(a, axis=None): return arrays.tensor_to_ndarray( tf.math.count_nonzero(array_ops.array(a).data, axis))
def iscomplexobj(x): x = array_ops.array(x) return np.issubdtype(x.dtype, np.complexfloating)