def kron(a, b): # pylint: disable=missing-function-docstring # pylint: disable=protected-access,g-complex-comprehension a, b = np_array_ops._promote_dtype(a, b) t_a = np_utils.cond( a.ndim < b.ndim, lambda: np_array_ops.reshape( # pylint: disable=g-long-lambda a, np_array_ops._pad_left_to(b.ndim, a.shape)), lambda: a) t_b = np_utils.cond( b.ndim < a.ndim, lambda: np_array_ops.reshape( # pylint: disable=g-long-lambda b, np_array_ops._pad_left_to(a.ndim, b.shape)), lambda: b) def _make_shape(shape, prepend): ones = array_ops.ones_like(shape) if prepend: shapes = [ones, shape] else: shapes = [shape, ones] return array_ops.reshape(array_ops.stack(shapes, axis=1), [-1]) a_shape = array_ops.shape(t_a) b_shape = array_ops.shape(t_b) a_reshaped = np_array_ops.reshape(t_a, _make_shape(a_shape, False)) b_reshaped = np_array_ops.reshape(t_b, _make_shape(b_shape, True)) out_shape = a_shape * b_shape return np_array_ops.reshape(a_reshaped * b_reshaped, out_shape)
def f(a, b): # pylint: disable=missing-docstring return np_utils.cond( np_utils.logical_or(math_ops.equal(array_ops.rank(a), 0), math_ops.equal(array_ops.rank(b), 0)), lambda: a * b, lambda: np_utils.cond( # pylint: disable=g-long-lambda math_ops.equal(array_ops.rank(b), 1), lambda: math_ops. tensordot(a, b, axes=[[-1], [-1]]), lambda: math_ops.tensordot( a, b, axes=[[-1], [-2]])))
def f(x1, x2): return np_utils.cond( math_ops.equal(array_ops.rank(x1), array_ops.rank(x2)), lambda: np_utils.cond( # pylint: disable=g-long-lambda np_utils.reduce_all( math_ops.equal(array_ops.shape(x1), array_ops.shape(x2)) ), lambda: math_ops.reduce_all(math_ops.equal(x1, x2)), lambda: constant_op.constant(False)), lambda: constant_op.constant(False))
def new_shape(_, old_shape): # pylint: disable=g-long-lambda ndim_ = array_ops.size(old_shape) return np_utils.cond( math_ops.equal(ndim_, 0), lambda: constant_op.constant([1, 1, 1], dtype=dtypes.int32), lambda: np_utils.cond( math_ops.equal(ndim_, 1), lambda: array_ops.pad( old_shape, [[1, 1]], constant_values=1), lambda: array_ops. pad(old_shape, [[0, 1]], constant_values=1)))
def f(x1, x2): try: return np_utils.cond( math_ops.equal(array_ops.rank(x2), 1), lambda: math_ops.tensordot(x1, x2, axes=1), lambda: np_utils.cond( math_ops.equal(array_ops.rank(x1), 1), # pylint: disable=g-long-lambda lambda: math_ops.tensordot( # pylint: disable=g-long-lambda x1, x2, axes=[[0], [-2]]), lambda: math_ops.matmul(x1, x2))) except errors.InvalidArgumentError as err: six.reraise(ValueError, ValueError(str(err)), sys.exc_info()[2])
def f(x1, x2): try: if x1._rank() == 2 and x2._rank() == 2: # pylint: disable=protected-access # Fast path for known ranks. return gen_math_ops.mat_mul(x1, x2) return np_utils.cond( math_ops.equal(np_utils.tf_rank(x2), 1), lambda: math_ops.tensordot(x1, x2, axes=1), lambda: np_utils.cond( # pylint: disable=g-long-lambda math_ops.equal(np_utils.tf_rank(x1), 1), lambda: math_ops.tensordot( # pylint: disable=g-long-lambda x1, x2, axes=[[0], [-2]]), lambda: math_ops.matmul(x1, x2))) except errors.InvalidArgumentError as err: raise ValueError(str(err)).with_traceback(sys.exc_info()[2])
def f(x1, x2): try: if x1.shape.rank == 2 and x2.shape.rank == 2: # Fast path for known ranks. return math_ops.matmul(x1, x2) return np_utils.cond( math_ops.equal(np_utils.tf_rank(x2), 1), lambda: math_ops.tensordot(x1, x2, axes=1), lambda: np_utils.cond( # pylint: disable=g-long-lambda math_ops.equal(np_utils.tf_rank(x1), 1), lambda: math_ops.tensordot( # pylint: disable=g-long-lambda x1, x2, axes=[[0], [-2]]), lambda: math_ops.matmul(x1, x2))) except errors.InvalidArgumentError as err: six.reraise(ValueError, ValueError(str(err)), sys.exc_info()[2])
def diag(v, k=0): # pylint: disable=missing-docstring """Raises an error if input is not 1- or 2-d.""" v = asarray(v).data v_rank = array_ops.rank(v) v.shape.with_rank_at_most(2) # TODO(nareshmodi): Consider a np_utils.Assert version that will fail during # tracing time if the shape is known. control_flow_ops.Assert( np_utils.logical_or(math_ops.equal(v_rank, 1), math_ops.equal(v_rank, 2)), [v_rank]) def _diag(v, k): return np_utils.cond( math_ops.equal(array_ops.size(v), 0), lambda: array_ops.zeros([abs(k), abs(k)], dtype=v.dtype), lambda: array_ops.matrix_diag(v, k=k)) def _diag_part(v, k): v_shape = array_ops.shape(v) v, k = np_utils.cond( np_utils.logical_or( np_utils.less_equal(k, -1 * np_utils.getitem(v_shape, 0)), np_utils.greater_equal(k, np_utils.getitem(v_shape, 1)), ), lambda: (array_ops.zeros([0, 0], dtype=v.dtype), 0), lambda: (v, k)) result = array_ops.matrix_diag_part(v, k=k) return result result = np_utils.cond(math_ops.equal(v_rank, 1), lambda: _diag(v, k), lambda: _diag_part(v, k)) return np_utils.tensor_to_ndarray(result)
def average(a, axis=None, weights=None, returned=False): # pylint: disable=missing-docstring if axis is not None and not isinstance(axis, six.integer_types): # TODO(wangpeng): Support tuple of ints as `axis` raise ValueError('`axis` must be an integer. Tuple of ints is not ' 'supported yet. Got type: %s' % type(axis)) a = np_array_ops.array(a) if weights is None: # Treat all weights as 1 if not np.issubdtype(a.dtype, np.inexact): a = a.astype( np_utils.result_type(a.dtype, np_dtypes.default_float_type())) avg = math_ops.reduce_mean(a.data, axis=axis) if returned: if axis is None: weights_sum = array_ops.size(a.data) else: weights_sum = array_ops.shape(a.data)[axis] weights_sum = math_ops.cast(weights_sum, a.data.dtype) else: if np.issubdtype(a.dtype, np.inexact): out_dtype = np_utils.result_type(a.dtype, weights) else: out_dtype = np_utils.result_type(a.dtype, weights, np_dtypes.default_float_type()) a = np_array_ops.array(a, out_dtype).data weights = np_array_ops.array(weights, out_dtype).data def rank_equal_case(): control_flow_ops.Assert( math_ops.reduce_all(array_ops.shape(a) == array_ops.shape(weights)), [array_ops.shape(a), array_ops.shape(weights)]) weights_sum = math_ops.reduce_sum(weights, axis=axis) avg = math_ops.reduce_sum(a * weights, axis=axis) / weights_sum return avg, weights_sum if axis is None: avg, weights_sum = rank_equal_case() else: def rank_not_equal_case(): control_flow_ops.Assert( array_ops.rank(weights) == 1, [array_ops.rank(weights)]) weights_sum = math_ops.reduce_sum(weights) axes = ops.convert_to_tensor([[axis], [0]]) avg = math_ops.tensordot(a, weights, axes) / weights_sum return avg, weights_sum # We condition on rank rather than shape equality, because if we do the # latter, when the shapes are partially unknown but the ranks are known # and different, np_utils.cond will run shape checking on the true branch, # which will raise a shape-checking error. avg, weights_sum = np_utils.cond( math_ops.equal(array_ops.rank(a), array_ops.rank(weights)), rank_equal_case, rank_not_equal_case) avg = np_array_ops.array(avg) if returned: weights_sum = np_array_ops.broadcast_to(weights_sum, array_ops.shape(avg.data)) return avg, weights_sum return avg
def argsort(a, axis=-1, kind='quicksort', order=None): # pylint: disable=missing-docstring # TODO(nareshmodi): make string tensors also work. if kind not in ('quicksort', 'stable'): raise ValueError( 'Invalid value for argument `kind`. ' 'Only kind="quicksort" and kind="stable" are supported. ' f'Received: kind={kind}') if order is not None: raise ValueError('The `order` argument is not supported. Pass order=None') stable = (kind == 'stable') a = np_array_ops.array(a) def _argsort(a, axis, stable): if axis is None: a = array_ops.reshape(a, [-1]) axis = 0 return sort_ops.argsort(a, axis, stable=stable) tf_ans = np_utils.cond( math_ops.equal(array_ops.rank(a), 0), lambda: constant_op.constant([0]), lambda: _argsort(a, axis, stable)) return np_array_ops.array(tf_ans, dtype=np.intp)
def diagonal(a, offset=0, axis1=0, axis2=1): # pylint: disable=missing-docstring a = asarray(a).data maybe_rank = a.shape.rank if maybe_rank is not None and offset == 0 and ( axis1 == maybe_rank - 2 or axis1 == -2) and (axis2 == maybe_rank - 1 or axis2 == -1): return np_utils.tensor_to_ndarray(array_ops.matrix_diag_part(a)) a = moveaxis(np_utils.tensor_to_ndarray(a), (axis1, axis2), (-2, -1)).data a_shape = array_ops.shape(a) def _zeros(): # pylint: disable=missing-docstring return (array_ops.zeros(array_ops.concat([a_shape[:-1], [0]], 0), dtype=a.dtype), 0) # All zeros since diag_part doesn't handle all possible k (aka offset). # Written this way since cond will run shape inference on both branches, # and diag_part shape inference will fail when offset is out of bounds. a, offset = np_utils.cond( np_utils.logical_or( np_utils.less_equal(offset, -1 * np_utils.getitem(a_shape, -2)), np_utils.greater_equal(offset, np_utils.getitem(a_shape, -1)), ), _zeros, lambda: (a, offset)) a = np_utils.tensor_to_ndarray(array_ops.matrix_diag_part(a, k=offset)) return a
def f(x): # pylint: disable=g-long-lambda x = asarray(x) return asarray( np_utils.cond( np_utils.greater(n, array_ops.rank(x)), lambda: reshape(x, new_shape(n, array_ops.shape(x.data))).data, lambda: x.data))
def array(val, dtype=None, copy=True, ndmin=0): # pylint: disable=redefined-outer-name """Creates an ndarray with the contents of val. Args: val: array_like. Could be an ndarray, a Tensor or any object that can be converted to a Tensor using `tf.convert_to_tensor`. dtype: Optional, defaults to dtype of the `val`. The type of the resulting ndarray. Could be a python type, a NumPy type or a TensorFlow `DType`. copy: Determines whether to create a copy of the backing buffer. Since Tensors are immutable, a copy is made only if val is placed on a different device than the current one. Even if `copy` is False, a new Tensor may need to be built to satisfy `dtype` and `ndim`. This is used only if `val` is an ndarray or a Tensor. ndmin: The minimum rank of the returned array. Returns: An ndarray. """ if dtype: dtype = np_utils.result_type(dtype) if isinstance(val, np_arrays.ndarray): result_t = val.data else: result_t = val if copy and isinstance(result_t, ops.Tensor): # Note: In eager mode, a copy of `result_t` is made only if it is not on # the context device. result_t = array_ops.identity(result_t) if not isinstance(result_t, ops.Tensor): if not dtype: dtype = np_utils.result_type(result_t) # We can't call `convert_to_tensor(result_t, dtype=dtype)` here because # convert_to_tensor doesn't allow incompatible arguments such as (5.5, int) # while np.array allows them. We need to convert-then-cast. def maybe_data(x): if isinstance(x, np_arrays.ndarray): return x.data return x # Handles lists of ndarrays result_t = nest.map_structure(maybe_data, result_t) result_t = np_arrays.convert_to_tensor(result_t) result_t = math_ops.cast(result_t, dtype=dtype) elif dtype: result_t = math_ops.cast(result_t, dtype) ndims = array_ops.rank(result_t) def true_fn(): old_shape = array_ops.shape(result_t) new_shape = array_ops.concat( [array_ops.ones(ndmin - ndims, dtypes.int32), old_shape], axis=0) return array_ops.reshape(result_t, new_shape) result_t = np_utils.cond(np_utils.greater(ndmin, ndims), true_fn, lambda: result_t) return np_arrays.tensor_to_ndarray(result_t)
def hstack(tup): arrays = [atleast_1d(a) for a in tup] arrays = _promote_dtype(*arrays) # pylint: disable=protected-access unwrapped_arrays = [ a.data if isinstance(a, np_arrays.ndarray) else a for a in arrays ] rank = array_ops.rank(unwrapped_arrays[0]) return np_utils.cond(math_ops.equal(rank, 1), lambda: array_ops.concat(unwrapped_arrays, axis=0), lambda: array_ops.concat(unwrapped_arrays, axis=1))
def _diag_part(v, k): v_shape = array_ops.shape(v) v, k = np_utils.cond( np_utils.logical_or( np_utils.less_equal(k, -1 * np_utils.getitem(v_shape, 0)), np_utils.greater_equal(k, np_utils.getitem(v_shape, 1)), ), lambda: (array_ops.zeros([0, 0], dtype=v.dtype), 0), lambda: (v, k)) result = array_ops.matrix_diag_part(v, k=k) return result
def maybe_pad_0(a, size_of_last_dim): def pad_0(a): return array_ops.pad( a, array_ops.concat([ array_ops.zeros([array_ops.rank(a) - 1, 2], dtypes.int32), constant_op.constant([[0, 1]], dtypes.int32) ], axis=0)) return np_utils.cond(math_ops.equal(size_of_last_dim, 2), lambda: pad_0(a), lambda: a)
def maybe_move_axis_to_last(a, axis): def move_axis_to_last(a, axis): return array_ops.transpose( a, array_ops.concat([ math_ops.range(axis), math_ops.range(axis + 1, array_ops.rank(a)), [axis] ], axis=0)) return np_utils.cond( axis == np_utils.subtract(array_ops.rank(a), 1), lambda: a, lambda: move_axis_to_last(a, axis))
def argsort(a, axis=-1, kind='quicksort', order=None): # pylint: disable=missing-docstring # TODO(nareshmodi): make string tensors also work. if kind not in ('quicksort', 'stable'): raise ValueError("Only 'quicksort' and 'stable' arguments are supported.") if order is not None: raise ValueError("'order' argument to sort is not supported.") stable = (kind == 'stable') a = np_array_ops.array(a).data def _argsort(a, axis, stable): if axis is None: a = array_ops.reshape(a, [-1]) axis = 0 return sort_ops.argsort(a, axis, stable=stable) tf_ans = np_utils.cond( math_ops.equal(array_ops.rank(a), 0), lambda: constant_op.constant([0]), lambda: _argsort(a, axis, stable)) return np_array_ops.array(tf_ans, dtype=np.intp)
def f(a, b): # pylint: disable=missing-docstring # We can't assign to captured variable `axisa`, so make a new variable if axis is None: axis_a = axisa axis_b = axisb axis_c = axisc else: axis_a = axis axis_b = axis axis_c = axis if axis_a < 0: axis_a = np_utils.add(axis_a, array_ops.rank(a)) if axis_b < 0: axis_b = np_utils.add(axis_b, array_ops.rank(b)) def maybe_move_axis_to_last(a, axis): def move_axis_to_last(a, axis): return array_ops.transpose( a, array_ops.concat([ math_ops.range(axis), math_ops.range(axis + 1, array_ops.rank(a)), [axis] ], axis=0)) return np_utils.cond( axis == np_utils.subtract(array_ops.rank(a), 1), lambda: a, lambda: move_axis_to_last(a, axis)) a = maybe_move_axis_to_last(a, axis_a) b = maybe_move_axis_to_last(b, axis_b) a_dim = np_utils.getitem(array_ops.shape(a), -1) b_dim = np_utils.getitem(array_ops.shape(b), -1) def maybe_pad_0(a, size_of_last_dim): def pad_0(a): return array_ops.pad( a, array_ops.concat([ array_ops.zeros([array_ops.rank(a) - 1, 2], dtypes.int32), constant_op.constant([[0, 1]], dtypes.int32) ], axis=0)) return np_utils.cond(math_ops.equal(size_of_last_dim, 2), lambda: pad_0(a), lambda: a) a = maybe_pad_0(a, a_dim) b = maybe_pad_0(b, b_dim) c = math_ops.cross(*np_utils.tf_broadcast(a, b)) if axis_c < 0: axis_c = np_utils.add(axis_c, array_ops.rank(c)) def move_last_to_axis(a, axis): r = array_ops.rank(a) return array_ops.transpose( a, array_ops.concat([ math_ops.range(axis), [r - 1], math_ops.range(axis, r - 1) ], axis=0)) c = np_utils.cond( (a_dim == 2) & (b_dim == 2), lambda: c[..., 2], lambda: np_utils.cond( # pylint: disable=g-long-lambda axis_c == np_utils.subtract(array_ops.rank(c), 1), lambda: c, lambda: move_last_to_axis(c, axis_c))) return c
def f(a, b): return np_utils.cond( np_utils.logical_or(math_ops.equal(array_ops.rank(a), 0), math_ops.equal(array_ops.rank(b), 0)), lambda: a * b, lambda: math_ops.tensordot(a, b, axes=[[-1], [-1]]))
def _diag(v, k): return np_utils.cond( math_ops.equal(array_ops.size(v), 0), lambda: array_ops.zeros([abs(k), abs(k)], dtype=v.dtype), lambda: array_ops.matrix_diag(v, k=k))