def eye(N, M=None, k=0, dtype=float): # pylint: disable=invalid-name,missing-docstring if dtype: dtype = np_utils.result_type(dtype) if not M: M = N # Making sure N, M and k are `int` N = int(N) M = int(M) k = int(k) if k >= M or -k >= N: # tf.linalg.diag will raise an error in this case return zeros([N, M], dtype=dtype) if k == 0: return np_arrays.tensor_to_ndarray(linalg_ops.eye(N, M, dtype=dtype)) # We need the precise length, otherwise tf.linalg.diag will raise an error diag_len = min(N, M) if k > 0: if N >= M: diag_len -= k elif N + k > M: diag_len = M - k elif k <= 0: if M >= N: diag_len += k elif M - k > N: diag_len = N + k diagonal_ = array_ops.ones([diag_len], dtype=dtype) return np_arrays.tensor_to_ndarray( array_ops.matrix_diag(diagonal=diagonal_, num_rows=N, num_cols=M, k=k))
def linspace( # pylint: disable=missing-docstring start, stop, num=50, endpoint=True, retstep=False, dtype=float, axis=0): if dtype: dtype = np_utils.result_type(dtype) start = np_array_ops.array(start, dtype=dtype).data stop = np_array_ops.array(stop, dtype=dtype).data if num < 0: raise ValueError('Number of samples {} must be non-negative.'.format(num)) step = ops.convert_to_tensor(np.nan) if endpoint: result = math_ops.linspace(start, stop, num, axis=axis) if num > 1: step = (stop - start) / (num - 1) else: # math_ops.linspace does not support endpoint=False so we manually handle it # here. if num > 1: step = ((stop - start) / num) new_stop = math_ops.cast(stop, step.dtype) - step start = math_ops.cast(start, new_stop.dtype) result = math_ops.linspace(start, new_stop, num, axis=axis) else: result = math_ops.linspace(start, stop, num, axis=axis) if dtype: result = math_ops.cast(result, dtype) if retstep: return (np_arrays.tensor_to_ndarray(result), np_arrays.tensor_to_ndarray(step)) else: return np_arrays.tensor_to_ndarray(result)
def concatenate(arys, axis=0): if not isinstance(arys, (list, tuple)): arys = [arys] if not arys: raise ValueError('Need at least one array to concatenate.') dtype = np_utils.result_type(*arys) arys = [np_array_ops.array(array, dtype=dtype).data for array in arys] return np_arrays.tensor_to_ndarray(array_ops.concat(arys, axis))
def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None, axis=0): dtype = np_utils.result_type(start, stop, dtype) result = linspace( start, stop, num=num, endpoint=endpoint, dtype=dtype, axis=axis).data result = math_ops.pow(math_ops.cast(base, result.dtype), result) if dtype: result = math_ops.cast(result, dtype) return np_arrays.tensor_to_ndarray(result)
def array(val, dtype=None, copy=True, ndmin=0): # pylint: disable=redefined-outer-name """Creates an ndarray with the contents of val. Args: val: array_like. Could be an ndarray, a Tensor or any object that can be converted to a Tensor using `tf.convert_to_tensor`. dtype: Optional, defaults to dtype of the `val`. The type of the resulting ndarray. Could be a python type, a NumPy type or a TensorFlow `DType`. copy: Determines whether to create a copy of the backing buffer. Since Tensors are immutable, a copy is made only if val is placed on a different device than the current one. Even if `copy` is False, a new Tensor may need to be built to satisfy `dtype` and `ndim`. This is used only if `val` is an ndarray or a Tensor. ndmin: The minimum rank of the returned array. Returns: An ndarray. """ if dtype: dtype = np_utils.result_type(dtype) if isinstance(val, np_arrays.ndarray): result_t = val.data else: result_t = val if copy and isinstance(result_t, ops.Tensor): # Note: In eager mode, a copy of `result_t` is made only if it is not on # the context device. result_t = array_ops.identity(result_t) if not isinstance(result_t, ops.Tensor): if not dtype: dtype = np_utils.result_type(result_t) # We can't call `convert_to_tensor(result_t, dtype=dtype)` here because # convert_to_tensor doesn't allow incompatible arguments such as (5.5, int) # while np.array allows them. We need to convert-then-cast. def maybe_data(x): if isinstance(x, np_arrays.ndarray): return x.data return x # Handles lists of ndarrays result_t = nest.map_structure(maybe_data, result_t) result_t = np_arrays.convert_to_tensor(result_t) result_t = math_ops.cast(result_t, dtype=dtype) elif dtype: result_t = math_ops.cast(result_t, dtype) ndims = array_ops.rank(result_t) def true_fn(): old_shape = array_ops.shape(result_t) new_shape = array_ops.concat( [array_ops.ones(ndmin - ndims, dtypes.int32), old_shape], axis=0) return array_ops.reshape(result_t, new_shape) result_t = np_utils.cond(np_utils.greater(ndmin, ndims), true_fn, lambda: result_t) return np_arrays.tensor_to_ndarray(result_t)
def tile(a, reps): # pylint: disable=missing-function-docstring a = np_array_ops.array(a).data reps = np_array_ops.array(reps, dtype=dtypes.int32).reshape([-1]).data a_rank = array_ops.rank(a) reps_size = array_ops.size(reps) reps = array_ops.pad( reps, [[math_ops.maximum(a_rank - reps_size, 0), 0]], constant_values=1) a_shape = array_ops.pad( array_ops.shape(a), [[math_ops.maximum(reps_size - a_rank, 0), 0]], constant_values=1) a = array_ops.reshape(a, a_shape) return np_arrays.tensor_to_ndarray(array_ops.tile(a, reps))
def full_like(a, fill_value, dtype=None, order='K', subok=True, shape=None): # pylint: disable=missing-docstring,redefined-outer-name """order, subok and shape arguments mustn't be changed.""" if order != 'K': raise ValueError('Non-standard orders are not supported.') if not subok: raise ValueError('subok being False is not supported.') if shape: raise ValueError('Overriding the shape is not supported.') a = asarray(a).data dtype = dtype or np_utils.result_type(a) fill_value = asarray(fill_value, dtype=dtype) return np_arrays.tensor_to_ndarray( array_ops.broadcast_to(fill_value.data, array_ops.shape(a)))
def _fetch_jvp(tensor): if hasattr(tensor, "handle"): unwrapped_tensor = ops.convert_to_tensor(tensor.handle) else: if isinstance(tensor, np_arrays.ndarray): unwrapped_tensor = tensor.data else: unwrapped_tensor = tensor result = pywrap_tfe.TFE_Py_ForwardAccumulatorJVP( self._accumulator, unwrapped_tensor) if result is None and unconnected_gradients == UnconnectedGradients.ZERO: result = array_ops.zeros_like(tensor) if result is not None and isinstance(tensor, np_arrays.ndarray): return np_arrays.tensor_to_ndarray(result) return result
def zeros(shape, dtype=float): # pylint: disable=redefined-outer-name """Returns an ndarray with the given shape and type filled with zeros. Args: shape: A fully defined shape. Could be - NumPy array or a python scalar, list or tuple of integers, - TensorFlow tensor/ndarray of integer type and rank <=1. dtype: Optional, defaults to float. The type of the resulting ndarray. Could be a python type, a NumPy type or a TensorFlow `DType`. Returns: An ndarray. """ if dtype: dtype = np_utils.result_type(dtype) if isinstance(shape, np_arrays.ndarray): shape = shape.data return np_arrays.tensor_to_ndarray(array_ops.zeros(shape, dtype=dtype))
def geomspace(start, stop, num=50, endpoint=True, dtype=float): # pylint: disable=missing-docstring if dtype: dtype = np_utils.result_type(dtype) if num < 0: raise ValueError( 'Number of samples {} must be non-negative.'.format(num)) if not num: return empty([0]) step = 1. if endpoint: if num > 1: step = math_ops.pow((stop / start), 1 / (num - 1)) else: step = math_ops.pow((stop / start), 1 / num) result = math_ops.cast(math_ops.range(num), step.dtype) result = math_ops.pow(step, result) result = math_ops.multiply(result, start) if dtype: result = math_ops.cast(result, dtype=dtype) return np_arrays.tensor_to_ndarray(result)
def ones_like(a, dtype=None): """Returns an array of ones with the shape and type of the input array. Args: a: array_like. Could be an ndarray, a Tensor or any object that can be converted to a Tensor using `tf.convert_to_tensor`. dtype: Optional, defaults to dtype of the input array. The type of the resulting ndarray. Could be a python type, a NumPy type or a TensorFlow `DType`. Returns: An ndarray. """ if isinstance(a, np_arrays.ndarray): a = a.data if dtype is None: dtype = np_utils.result_type(a) else: dtype = np_utils.result_type(dtype) return np_arrays.tensor_to_ndarray(array_ops.ones_like(a, dtype))
def arange(start, stop=None, step=1, dtype=None): """Returns `step`-separated values in the range [start, stop). Args: start: Start of the interval. Included in the range. stop: End of the interval. If not specified, `start` is treated as 0 and `start` value is used as `stop`. If specified, it is not included in the range if `step` is integer. When `step` is floating point, it may or may not be included. step: The difference between 2 consecutive values in the output range. It is recommended to use `linspace` instead of using non-integer values for `step`. dtype: Optional. Type of the resulting ndarray. Could be a python type, a NumPy type or a TensorFlow `DType`. If not provided, the largest type of `start`, `stop`, `step` is used. Raises: ValueError: If step is zero. """ if not step: raise ValueError('step must be non-zero.') if dtype: dtype = np_utils.result_type(dtype) else: if stop is None: dtype = np_utils.result_type(start, step) else: dtype = np_utils.result_type(start, step, stop) if step > 0 and ((stop is not None and start > stop) or (stop is None and start < 0)): return array([], dtype=dtype) if step < 0 and ((stop is not None and start < stop) or (stop is None and start > 0)): return array([], dtype=dtype) # TODO(srbs): There are some bugs when start or stop is float type and dtype # is integer type. return np_arrays.tensor_to_ndarray( math_ops.cast(math_ops.range(start, limit=stop, delta=step), dtype=dtype))
def full(shape, fill_value, dtype=None): # pylint: disable=redefined-outer-name """Returns an array with given shape and dtype filled with `fill_value`. Args: shape: A valid shape object. Could be a native python object or an object of type ndarray, numpy.ndarray or tf.TensorShape. fill_value: array_like. Could be an ndarray, a Tensor or any object that can be converted to a Tensor using `tf.convert_to_tensor`. dtype: Optional, defaults to dtype of the `fill_value`. The type of the resulting ndarray. Could be a python type, a NumPy type or a TensorFlow `DType`. Returns: An ndarray. Raises: ValueError: if `fill_value` can not be broadcast to shape `shape`. """ fill_value = asarray(fill_value, dtype=dtype) if np_utils.isscalar(shape): shape = array_ops.reshape(shape, [1]) return np_arrays.tensor_to_ndarray( array_ops.broadcast_to(fill_value.data, shape))
def zeros_like(a, dtype=None): """Returns an array of zeros with the shape and type of the input array. Args: a: array_like. Could be an ndarray, a Tensor or any object that can be converted to a Tensor using `tf.convert_to_tensor`. dtype: Optional, defaults to dtype of the input array. The type of the resulting ndarray. Could be a python type, a NumPy type or a TensorFlow `DType`. Returns: An ndarray. """ if isinstance(a, np_arrays.ndarray): a = a.data if dtype is None: # We need to let np_utils.result_type decide the dtype, not tf.zeros_like dtype = np_utils.result_type(a) else: # TF and numpy has different interpretations of Python types such as # `float`, so we let `np_utils.result_type` decide. dtype = np_utils.result_type(dtype) dtype = dtypes.as_dtype(dtype) # Work around b/149877262 return np_arrays.tensor_to_ndarray(array_ops.zeros_like(a, dtype))
def _pfor_impl(loop_fn, iters, fallback_to_while_loop, parallel_iterations=None, pfor_config=None): """Implementation of pfor.""" assert not context.executing_eagerly() loop_fn_has_config = _loop_fn_has_config(loop_fn) existing_ops = set(ops.get_default_graph().get_operations()) # Run the loop body with ops.name_scope("loop_body"): loop_var = array_ops.placeholder_with_default(0, shape=[]) if loop_fn_has_config: if pfor_config is None: pfor_config = PForConfig() pfor_config._set_iters(iters) # pylint: disable=protected-access loop_fn_outputs = loop_fn(loop_var, **{PFOR_CONFIG_ARG: pfor_config}) else: assert pfor_config is None loop_fn_outputs = loop_fn(loop_var) # Convert outputs to Tensor if needed. rewrap_as_ndarray = False tmp_loop_fn_outputs = [] for loop_fn_output in nest.flatten(loop_fn_outputs): if (loop_fn_output is not None and not isinstance( loop_fn_output, (ops.Operation, ops.Tensor, sparse_tensor.SparseTensor))): if isinstance(loop_fn_output, indexed_slices.IndexedSlices): logging.warn( "Converting %s to a dense representation may make it slow." " Alternatively, output the indices and values of the" " IndexedSlices separately, and handle the vectorized" " outputs directly." % loop_fn_output) loop_fn_output = ops.convert_to_tensor(loop_fn_output) elif isinstance(loop_fn_output, np_arrays.ndarray): loop_fn_output = loop_fn_output.data rewrap_as_ndarray = True else: loop_fn_output = ops.convert_to_tensor(loop_fn_output) tmp_loop_fn_outputs.append(loop_fn_output) loop_fn_outputs = nest.pack_sequence_as(loop_fn_outputs, tmp_loop_fn_outputs) new_ops = set(ops.get_default_graph().get_operations()) - existing_ops iters = ops.convert_to_tensor(iters) if parallel_iterations is not None: if parallel_iterations < 1: raise ValueError( "parallel_iterations must be None or a positive integer") if parallel_iterations == 1: raise ValueError( "Found parallel_iterations == 1. Use for_loop instead.") iters_value = tensor_util.constant_value(iters) if iters_value is not None and iters_value < parallel_iterations: parallel_iterations = None if parallel_iterations is None: with ops.name_scope("pfor"): converter = PFor(loop_var, iters, new_ops, fallback_to_while_loop=fallback_to_while_loop, pfor_config=pfor_config) outputs = [] for loop_fn_output in nest.flatten(loop_fn_outputs): output = converter.convert(loop_fn_output) if rewrap_as_ndarray: output = np_arrays.tensor_to_ndarray(output) outputs.append(output) return nest.pack_sequence_as(loop_fn_outputs, outputs) else: if pfor_config is not None and pfor_config._has_reductions(): # pylint: disable=protected-access raise ValueError( "Setting parallel_iterations currently unsupported if" " reductions across iterations are performed.") num_tiled_iterations = iters // parallel_iterations num_remaining_iterations = iters % parallel_iterations # TODO(agarwal): Avoid calling loop_fn twice. Generate the loop body inside # a tf.function and extract the graph from there to vectorize it. with ops.name_scope("pfor_untiled"): converter = PFor(loop_var, num_remaining_iterations, new_ops, fallback_to_while_loop=fallback_to_while_loop, pfor_config=pfor_config) remaining_outputs = [] flattened_loop_fn_outputs = nest.flatten(loop_fn_outputs) for loop_fn_output in flattened_loop_fn_outputs: output = converter.convert(loop_fn_output) if rewrap_as_ndarray: output = np_arrays.tensor_to_ndarray(output) remaining_outputs.append(output) with ops.name_scope("pfor_tiled"): loop_fn_dtypes = [ ops.convert_to_tensor(x).dtype for x in flattened_loop_fn_outputs ] def tiled_loop_body(j): offset = j * parallel_iterations + num_remaining_iterations def tiled_loop_fn(i, pfor_config=None): if loop_fn_has_config: return nest.flatten( loop_fn(i + offset, pfor_config=pfor_config)) else: return nest.flatten(loop_fn(i + offset)) return _pfor_impl( tiled_loop_fn, parallel_iterations, fallback_to_while_loop=fallback_to_while_loop, pfor_config=pfor_config) tiled_outputs = for_loop(tiled_loop_body, loop_fn_dtypes, num_tiled_iterations, parallel_iterations=1) tiled_outputs = [_flatten_first_two_dims(y) for y in tiled_outputs] with ops.name_scope("pfor"): iters_value = tensor_util.constant_value(iters) if iters_value is None or iters_value % parallel_iterations: outputs = control_flow_ops.cond( math_ops.equal(num_remaining_iterations, 0), lambda: tiled_outputs, lambda: [ array_ops.concat([x, y], axis=0) for x, y in zip(remaining_outputs, tiled_outputs) ]) else: outputs = tiled_outputs flattened_outputs = nest.flatten(outputs) if rewrap_as_ndarray: flattened_outputs = [ np_arrays.tensor_to_ndarray(x) for x in flattened_outputs ] return nest.pack_sequence_as(loop_fn_outputs, nest.flatten(outputs))
def count_nonzero(a, axis=None): return np_arrays.tensor_to_ndarray( math_ops.count_nonzero(np_array_ops.array(a).data, axis))