def iscomplexobj(x): x = np_array_ops.array(x) return np.issubdtype(x.dtype.as_numpy_dtype, np.complexfloating)
def _logical_binary_op(tf_fun, x1, x2): x1 = np_array_ops.array(x1, dtype=np.bool_) x2 = np_array_ops.array(x2, dtype=np.bool_) return tf_fun(x1, x2)
def count_nonzero(a, axis=None): return math_ops.count_nonzero(np_array_ops.array(a), axis)
def average(a, axis=None, weights=None, returned=False): # pylint: disable=missing-docstring if axis is not None and not isinstance(axis, six.integer_types): # TODO(wangpeng): Support tuple of ints as `axis` raise ValueError('`axis` must be an integer. Tuple of ints is not ' 'supported yet. Got type: %s' % type(axis)) a = np_array_ops.array(a) if weights is None: # Treat all weights as 1 if not np.issubdtype(a.dtype.as_numpy_dtype, np.inexact): a = a.astype( np_utils.result_type(a.dtype, np_dtypes.default_float_type())) avg = math_ops.reduce_mean(a, axis=axis) if returned: if axis is None: weights_sum = array_ops.size(a) else: weights_sum = array_ops.shape(a)[axis] weights_sum = math_ops.cast(weights_sum, a.dtype) else: if np.issubdtype(a.dtype.as_numpy_dtype, np.inexact): out_dtype = np_utils.result_type(a.dtype, weights) else: out_dtype = np_utils.result_type(a.dtype, weights, np_dtypes.default_float_type()) a = np_array_ops.array(a, out_dtype) weights = np_array_ops.array(weights, out_dtype) def rank_equal_case(): control_flow_ops.Assert( math_ops.reduce_all( array_ops.shape(a) == array_ops.shape(weights)), [array_ops.shape(a), array_ops.shape(weights)]) weights_sum = math_ops.reduce_sum(weights, axis=axis) avg = math_ops.reduce_sum(a * weights, axis=axis) / weights_sum return avg, weights_sum if axis is None: avg, weights_sum = rank_equal_case() else: def rank_not_equal_case(): control_flow_ops.Assert( array_ops.rank(weights) == 1, [array_ops.rank(weights)]) weights_sum = math_ops.reduce_sum(weights) axes = ops.convert_to_tensor([[axis], [0]]) avg = math_ops.tensordot(a, weights, axes) / weights_sum return avg, weights_sum # We condition on rank rather than shape equality, because if we do the # latter, when the shapes are partially unknown but the ranks are known # and different, np_utils.cond will run shape checking on the true branch, # which will raise a shape-checking error. avg, weights_sum = np_utils.cond( math_ops.equal(array_ops.rank(a), array_ops.rank(weights)), rank_equal_case, rank_not_equal_case) avg = np_array_ops.array(avg) if returned: weights_sum = np_array_ops.broadcast_to(weights_sum, array_ops.shape(avg)) return avg, weights_sum return avg
def logical_not(x): x = np_array_ops.array(x, dtype=np.bool_) return math_ops.logical_not(x)
def count_nonzero(a, axis=None): return np_arrays.tensor_to_ndarray( math_ops.count_nonzero(np_array_ops.array(a).data, axis))
def logical_not(x): x = np_array_ops.array(x, dtype=np.bool_) return np_utils.tensor_to_ndarray(math_ops.logical_not(x.data))
def _logical_binary_op(tf_fun, x1, x2): x1 = np_array_ops.array(x1, dtype=np.bool_) x2 = np_array_ops.array(x2, dtype=np.bool_) return np_utils.tensor_to_ndarray(tf_fun(x1.data, x2.data))
def testArray(self): ndmins = [0, 1, 2, 5] for a, dtype, ndmin, copy in itertools.product(self.all_arrays, self.all_types, ndmins, [True, False]): self.match( np_array_ops.array(a, dtype=dtype, ndmin=ndmin, copy=copy), np.array(a, dtype=dtype, ndmin=ndmin, copy=copy)) zeros_list = np_array_ops.zeros(5) # TODO(srbs): Test that copy=True when context.device is different from # tensor device copies the tensor. # Backing tensor is the same if copy=False, other attributes being None. self.assertIs( np_array_ops.array(zeros_list, copy=False).data, zeros_list.data) self.assertIs( np_array_ops.array(zeros_list.data, copy=False).data, zeros_list.data) # Backing tensor is different if ndmin is not satisfied. self.assertIsNot( np_array_ops.array(zeros_list, copy=False, ndmin=2).data, zeros_list.data) self.assertIsNot( np_array_ops.array(zeros_list.data, copy=False, ndmin=2).data, zeros_list.data) self.assertIs( np_array_ops.array(zeros_list, copy=False, ndmin=1).data, zeros_list.data) self.assertIs( np_array_ops.array(zeros_list.data, copy=False, ndmin=1).data, zeros_list.data) # Backing tensor is different if dtype is not satisfied. self.assertIsNot( np_array_ops.array(zeros_list, copy=False, dtype=int).data, zeros_list.data) self.assertIsNot( np_array_ops.array(zeros_list.data, copy=False, dtype=int).data, zeros_list.data) self.assertIs( np_array_ops.array(zeros_list, copy=False, dtype=float).data, zeros_list.data) self.assertIs( np_array_ops.array(zeros_list.data, copy=False, dtype=float).data, zeros_list.data)