# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Experimental Numpy backend.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function # Dependency imports import numpy as np import tensorflow as tf from tensorflow_probability.python.internal.backend.numpy.internal import utils __all__ = [ 'constant', ] constant = utils.copy_docstring( tf.compat.v1.initializers.constant, lambda value=0, dtype=tf.dtypes.float32, verify_shape=False: ( # pylint: disable=g-long-lambda lambda shape, dtype=None, partition_info=None, verify_shape=None: ( # pylint: disable=g-long-lambda np.ones(shape, dtype=dtype) * value)))
def _vlog(*_, **__): # pylint: disable=unused-argument pass def _warn(*_, **__): # pylint: disable=unused-argument pass def _warning(*_, **__): # pylint: disable=unused-argument pass # --- Begin Public Functions -------------------------------------------------- TaskLevelStatusMessage = utils.copy_docstring( # pylint: disable=invalid-name tf.compat.v1.logging.TaskLevelStatusMessage, _TaskLevelStatusMessage) debug = utils.copy_docstring(tf.compat.v1.logging.debug, _debug) error = utils.copy_docstring(tf.compat.v1.logging.error, _error) fatal = utils.copy_docstring(tf.compat.v1.logging.fatal, _fatal) flush = utils.copy_docstring(tf.compat.v1.logging.flush, _flush) get_verbosity = utils.copy_docstring(tf.compat.v1.logging.get_verbosity, _get_verbosity) info = utils.copy_docstring(tf.compat.v1.logging.info, _info) log = utils.copy_docstring(tf.compat.v1.logging.log, _log)
def _argsort(values, axis=-1, direction='ASCENDING', stable=False, name=None): # pylint: disable=unused-argument """Numpy implementation of `tf.argsort`.""" if direction == 'ASCENDING': pass elif direction == 'DESCENDING': values = np.negative(values) else: raise ValueError('Unrecognized direction: {}.'.format(direction)) return np.argsort(values, axis, kind='stable' if stable else 'quicksort') def _sort(values, axis=-1, direction='ASCENDING', stable=False, name=None): # pylint: disable=unused-argument """Numpy implementation of `tf.sort`.""" if direction == 'ASCENDING': pass elif direction == 'DESCENDING': values = np.negative(values) else: raise ValueError('Unrecognized direction: {}.'.format(direction)) result = np.sort(values, axis, kind='stable' if stable else 'quicksort') if direction == 'DESCENDING': return np.negative(result) return result # --- Begin Public Functions -------------------------------------------------- argsort = utils.copy_docstring(tf.argsort, _argsort) sort = utils.copy_docstring(tf.sort, _sort)
name=None): # pylint: disable=unused-argument """Numpy matmul wrapper.""" if a_is_sparse or b_is_sparse: raise NotImplementedError( 'Numpy backend does not support sparse matmul.') if transpose_a or adjoint_a: a = _matrix_transpose(a, conjugate=adjoint_a) if transpose_b or adjoint_b: b = _matrix_transpose(b, conjugate=adjoint_b) return np.matmul(a, b) # --- Begin Public Functions -------------------------------------------------- band_part = utils.copy_docstring( tf.linalg.band_part, lambda input, num_lower, num_upper, name=None: ( # pylint: disable=g-long-lambda np.tril(np.triu(input, -num_lower), num_upper))) cholesky = utils.copy_docstring( tf.linalg.cholesky, lambda input, name=None: np.linalg.cholesky(input)) cholesky_solve = utils.copy_docstring( tf.linalg.cholesky_solve, lambda chol, rhs, name=None: scipy_linalg.cho_solve((chol, True), rhs)) diag = utils.copy_docstring(tf.linalg.diag, lambda diagonal, name=None: np.diag(diagonal)) diag_part = utils.copy_docstring(tf.linalg.diag_part, lambda input, name=None: np.diagonal(input))
name=None): # pylint: disable=unused-argument """Numpy matmul wrapper.""" if a_is_sparse or b_is_sparse: raise NotImplementedError( 'Numpy backend does not support sparse matmul.') if transpose_a or adjoint_a: a = _matrix_transpose(a, conjugate=adjoint_a) if transpose_b or adjoint_b: b = _matrix_transpose(b, conjugate=adjoint_b) return np.matmul(a, b) # --- Begin Public Functions -------------------------------------------------- band_part = utils.copy_docstring( tf.linalg.band_part, lambda input, num_lower, num_upper, name=None: ( # pylint: disable=g-long-lambda np.tril(np.triu(input, -num_lower), num_upper))) cholesky = utils.copy_docstring( tf.linalg.cholesky, lambda input, name=None: np.linalg.cholesky(input)) cholesky_solve = utils.copy_docstring( tf.linalg.cholesky_solve, lambda chol, rhs, name=None: scipy_linalg.cho_solve((chol, True), rhs)) det = utils.copy_docstring(tf.linalg.det, lambda input, name=None: np.linalg.det(input)) diag = utils.copy_docstring(tf.linalg.diag, lambda diagonal, name=None: np.diag(diagonal))
def _scan( # pylint: disable=unused-argument fn, elems, initializer=None, parallel_iterations=10, back_prop=True, swap_memory=False, infer_shape=True, reverse=False, name=None): """Scan implementation.""" out = [] if initializer is None: arg = elems[0] elems = elems[1:] else: arg = initializer for x in elems: arg = fn(arg, x) out.append(arg) return np.array(out) # --- Begin Public Functions -------------------------------------------------- map_fn = utils.copy_docstring(tf.map_fn, _map_fn) scan = utils.copy_docstring(tf.scan, _scan)
variable_def=None, dtype=dtype, import_scope=None, constraint=None) def _placeholder_with_default(input, shape, name=None): # pylint: disable=redefined-builtin,unused-argument x = np.array(input) if shape is None or any(s is None for s in shape): return x return np.reshape(x, shape) # --- Begin Public Functions -------------------------------------------------- assert_equal = utils.copy_docstring(tf.compat.v1.assert_equal, _assert_equal) assert_greater = utils.copy_docstring(tf.compat.v1.assert_greater, _assert_greater) assert_less = utils.copy_docstring(tf.compat.v1.assert_less, _assert_less) assert_rank = utils.copy_docstring(tf.compat.v1.assert_rank, _assert_rank) assert_scalar = utils.copy_docstring(tf.compat.v1.assert_scalar, _assert_scalar) assert_greater_equal = utils.copy_docstring(tf.compat.v1.assert_greater_equal, _assert_greater_equal) assert_integer = utils.copy_docstring(tf.compat.v1.assert_integer,
# limitations under the License. # ============================================================================ """Numpy implementations of TensorFlow functions.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function # Dependency imports import numpy as np import tensorflow as tf from tensorflow_probability.python.internal.backend.numpy.internal import utils from tensorflow_probability.python.internal.backend.numpy.math import log_softmax from tensorflow_probability.python.internal.backend.numpy.math import softmax from tensorflow_probability.python.internal.backend.numpy.math import softplus __all__ = [ 'log_softmax', 'relu', 'softmax', 'softplus', # 'sigmoid_cross_entropy_with_logits', ] # --- Begin Public Functions -------------------------------------------------- relu = utils.copy_docstring(tf.nn.relu, lambda features, name=None: np.max(features, 0))
def _function( func=None, input_signature=None, autograph=True, # pylint: disable=unused-argument experimental_autograph_options=None, # pylint: disable=unused-argument experimental_relax_shapes=False, experimental_compile=None): # pylint: disable=unused-argument """Dummy version of `tf.function`.""" # This code path is for the `foo = tf.function(foo, ...)` use case. if func is not None: return func # This code path is for the following use case: # @tf.function(...) # def foo(...): # ... # This case is equivalent to `foo = tf.function(...)(foo)`. return lambda inner_function: inner_function # --- Begin Public Functions -------------------------------------------------- compat = collections.namedtuple( 'compat', 'dimension_value')(lambda dim: None if dim is None else int(dim)) function = utils.copy_docstring(tf.function, _function) eye = linalg.eye matmul = linalg.matmul del collections, tf, utils
# Dependency imports import numpy as np import tensorflow as tf from tensorflow_probability.python.internal.backend.numpy.internal import utils __all__ = [ 'difference', ] def _difference(a, b, aminusb=True, validate_indices=True): if not aminusb: raise NotImplementedError( 'Argument `aminusb != True` is currently unimplemented.') if not validate_indices: raise NotImplementedError( 'Argument `validate_indices != True` is currently unimplemented.') return np.setdiff1d(a, b) # --- Begin Public Functions -------------------------------------------------- # TODO(b/136555907): Add unit test. difference = utils.copy_docstring( tf.sets.difference, _difference)
# See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Experimental Numpy backend.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function # Dependency imports import tensorflow as tf from tensorflow_probability.python.internal.backend.numpy.internal import utils __all__ = [ 'Assert', 'check_numerics', ] Assert = utils.copy_docstring( # pylint: disable=invalid-name tf.debugging.Assert, lambda condition, data, summarize=None, name=None: None) check_numerics = utils.copy_docstring( tf.debugging.check_numerics, lambda x, *_, **__: x )
import tensorflow as tf from tensorflow_probability.python.internal.backend.numpy import v1 from tensorflow_probability.python.internal.backend.numpy import v2 from tensorflow_probability.python.internal.backend.numpy.internal import utils __all__ = [ 'dimension_value', 'v1', 'v2', ] def _dimension_value(dimension): if dimension is None: return None return int(dimension) # --- Begin Public Functions -------------------------------------------------- dimension_value = utils.copy_docstring( tf.compat.dimension_value, _dimension_value) del tf, utils
'placeholder_with_default', 'set_random_seed', ] # --- Begin Public Functions -------------------------------------------------- assert_equal = tf.compat.v1.assert_equal assert_greater = tf.compat.v1.assert_greater assert_less = tf.compat.v1.assert_less assert_rank = tf.compat.v1.assert_rank assert_greater_equal = tf.compat.v1.assert_greater_equal assert_integer = tf.compat.v1.assert_integer assert_less_equal = tf.compat.v1.assert_less_equal assert_near = tf.compat.v1.assert_near assert_non_negative = tf.compat.v1.assert_non_negative assert_non_positive = tf.compat.v1.assert_non_positive assert_none_equal = tf.compat.v1.assert_none_equal assert_positive = tf.compat.v1.assert_positive assert_positive = tf.compat.v1.assert_positive assert_rank_at_least = tf.compat.v1.assert_rank_at_least placeholder_with_default = utils.copy_docstring( tf.compat.v1.placeholder_with_default, lambda input, shape, name=None: np.array(input).reshape(shape)) # pylint: disable=redefined-builtin,unused-argument set_random_seed = utils.copy_docstring(tf.compat.v1.set_random_seed, np.random.seed) del tf
'uint64', 'uint8', # 'as_string', # 'bfloat16', # 'dtypes', # 'qint16', # 'qint32', # 'qint8', # 'quint16', # 'quint8', ] # --- Begin Public Functions -------------------------------------------------- as_dtype = utils.copy_docstring( tf.as_dtype, lambda type_value: np.dtype( # pylint: disable=g-long-lambda type_value.name if hasattr(type_value, 'name') else type_value).type) bool = np.bool # pylint: disable=redefined-builtin complex = np.complex # pylint: disable=redefined-builtin complex128 = np.complex128 complex64 = np.complex64 double = np.double float16 = np.float16 float32 = np.float32
# 'gamma', # 'learned_unigram_candidate_sampler', # 'log_uniform_candidate_sampler', # 'normal', # 'poisson', # 'set_seed', # 'shuffle', # 'stateless_categorical', # 'stateless_normal', # 'stateless_truncated_normal', # 'stateless_uniform', # 'truncated_normal', # 'uniform', # 'uniform_candidate_sampler', ] def _normal(shape, mean=0.0, stddev=1.0, dtype=tf.float32, seed=None, name=None): # pylint: disable=unused-argument rng = np.random if seed is None else np.random.RandomState(seed) dtype = utils.common_dtype([mean, stddev], preferred_dtype=np.float32) return rng.normal(loc=mean, scale=stddev, size=shape).astype(dtype) # --- Begin Public Functions -------------------------------------------------- normal = utils.copy_docstring( tf.random.normal, _normal)
assert_equal = tf.compat.v1.assert_equal assert_greater = tf.compat.v1.assert_greater assert_less = tf.compat.v1.assert_less assert_rank = tf.compat.v1.assert_rank assert_greater_equal = tf.compat.v1.assert_greater_equal assert_integer = tf.compat.v1.assert_integer assert_less_equal = tf.compat.v1.assert_less_equal assert_near = tf.compat.v1.assert_near assert_non_negative = tf.compat.v1.assert_non_negative assert_non_positive = tf.compat.v1.assert_non_positive assert_none_equal = tf.compat.v1.assert_none_equal assert_positive = tf.compat.v1.assert_positive assert_positive = tf.compat.v1.assert_positive assert_rank_at_least = tf.compat.v1.assert_rank_at_least placeholder_with_default = utils.copy_docstring( tf.compat.v1.placeholder_with_default, _placeholder_with_default) global_variables_initializer = utils.copy_docstring( tf.compat.v1.global_variables_initializer, lambda: None) set_random_seed = utils.copy_docstring( tf.compat.v1.set_random_seed, np.random.seed) del tf
def __exit__(self, typ, value, traceback): # pylint: disable=unused-argument pass def watch(self, tensor): # pylint: disable=unused-argument pass def gradient( self, target, sources, output_gradients=None, # pylint: disable=unused-argument unconnected_gradients=UnconnectedGradients.NONE): # pylint: disable=unused-argument return sources broadcast_dynamic_shape = utils.copy_docstring(tf.broadcast_dynamic_shape, _broadcast_static_shape) broadcast_static_shape = utils.copy_docstring(tf.broadcast_static_shape, _broadcast_static_shape) broadcast_to = utils.copy_docstring( tf.broadcast_to, lambda input, shape, name=None: np.broadcast_to(input, shape)) cast = utils.copy_docstring( tf.cast, lambda x, dtype, name=None: np.array(x).astype(utils.numpy_dtype(dtype))) clip_by_value = utils.copy_docstring( tf.clip_by_value, lambda t, clip_value_min, clip_value_max, name=None: # pylint: disable=g-long-lambda
logits = np.reshape(logits, [-1, num_classes]) labels = np.reshape(labels, [-1]) labels = numpy_array.one_hot(labels, num_classes) cost = -np.sum( labels * (logits - reduce_logsumexp(logits, axis=-1, keepdims=True)), axis=-1) cost = np.reshape(cost, labels_shape) return cost # --- Begin Public Functions -------------------------------------------------- l2_normalize = utils.copy_docstring( tf.nn.l2_normalize, l2_normalize) relu = utils.copy_docstring( tf.nn.relu, lambda features, name=None: np.max(features, 0)) softplus = utils.copy_docstring( tf.nn.softplus, lambda features, name=None: np.log(1 + np.exp(features))) sigmoid_cross_entropy_with_logits = utils.copy_docstring( tf.nn.sigmoid_cross_entropy_with_logits,
# We offer a non SP version just in case SP isn't installed and this # because logsumexp is often used. m = _max_mask_non_finite(input_tensor, axis=axis, keepdims=True) y = input_tensor - m y = np.exp(y, out=y) return m + np.log(np.sum(y, axis=_astuple(axis), keepdims=keepdims)) def _top_k(input, k=1, sorted=True, name=None): # pylint: disable=unused-argument,redefined-builtin raise NotImplementedError # --- Begin Public Functions -------------------------------------------------- abs = utils.copy_docstring( # pylint: disable=redefined-builtin tf.math.abs, lambda x, name=None: np.abs(x)) accumulate_n = utils.copy_docstring( tf.math.accumulate_n, lambda inputs, shape=None, tensor_dtype=None, name=None: ( # pylint: disable=g-long-lambda sum(map(np.array, inputs)).astype(utils.numpy_dtype(tensor_dtype)))) acos = utils.copy_docstring(tf.math.acos, lambda x, name=None: np.arccos(x)) acosh = utils.copy_docstring(tf.math.acosh, lambda x, name=None: np.arccosh(x)) add = utils.copy_docstring(tf.math.add, lambda x, y, name=None: np.add(x, y)) add_n = utils.copy_docstring( tf.math.add_n, lambda inputs, name=None: sum(map(np.array, inputs)))
def _transpose(a, perm=None, conjugate=False, name='transpose'): # pylint: disable=unused-argument x = np.transpose(a, perm) return np.conjugate(x) if conjugate else x def _zeros_like(input, dtype=None, name=None): # pylint: disable=redefined-builtin s = _shape(input) if isinstance(s, (np.ndarray, np.generic)): return np.zeros(s, utils.numpy_dtype(dtype or input.dtype)) return tf.zeros(s, dtype or s.dtype, name) # --- Begin Public Functions -------------------------------------------------- concat = utils.copy_docstring( tf.concat, lambda values, axis, name=None: np.concatenate(values, axis)) eye = utils.copy_docstring(tf.eye, _eye) expand_dims = utils.copy_docstring( tf.expand_dims, lambda input, axis, name=None: np.expand_dims(input, axis)) fill = utils.copy_docstring( tf.fill, lambda dims, value, name=None: value * np.ones(dims, np.array(value).dtype)) reverse = utils.copy_docstring( tf.reverse, lambda tensor, axis, name=None: np.flip(tensor, axis)) linspace = utils.copy_docstring(
from tensorflow_probability.python.internal.backend.numpy import v1 from tensorflow_probability.python.internal.backend.numpy import v2 from tensorflow_probability.python.internal.backend.numpy.internal import utils __all__ = [ 'dimension_value', 'function', 'v1', 'v2', ] def _dimension_value(dimension): if dimension is None: return None return int(dimension) # --- Begin Public Functions -------------------------------------------------- dimension_value = utils.copy_docstring(tf.compat.dimension_value, _dimension_value) function = utils.copy_docstring( tf.function, lambda func=None, input_signature=None, autograph=True, # pylint: disable=g-long-lambda experimental_autograph_options=None, experimental_relax_shapes=False: func) del tf, utils
def _normal(shape, mean=0.0, stddev=1.0, dtype=tf.float32, seed=None, name=None): # pylint: disable=unused-argument rng = np.random if seed is None else np.random.RandomState(seed) dtype = utils.common_dtype([mean, stddev], preferred_dtype=dtype) return rng.normal(loc=mean, scale=stddev, size=shape).astype(dtype) def _uniform(shape, minval=0, maxval=None, dtype=tf.float32, seed=None, name=None): # pylint: disable=unused-argument rng = np.random if seed is None else np.random.RandomState(seed) dtype = utils.common_dtype([minval, maxval], preferred_dtype=dtype) maxval = 1 if maxval is None else maxval return rng.uniform(low=minval, high=maxval, size=shape).astype(dtype) # --- Begin Public Functions -------------------------------------------------- normal = utils.copy_docstring(tf.random.normal, _normal) uniform = utils.copy_docstring(tf.random.uniform, _uniform)
def _transpose(a, perm=None, conjugate=False, name='transpose'): # pylint: disable=unused-argument x = np.transpose(a, perm) return np.conjugate(x) if conjugate else x def _zeros_like(input, dtype=None, name=None): # pylint: disable=redefined-builtin s = _shape(input) if isinstance(s, (np.ndarray, np.generic)): return np.zeros(s, utils.numpy_dtype(dtype or input.dtype)) return tf.zeros(s, dtype or s.dtype, name) # --- Begin Public Functions -------------------------------------------------- batch_gather = utils.copy_docstring(tf.compat.v1.batch_gather, _gather) concat = utils.copy_docstring( tf.concat, lambda values, axis, name='concat': np.concatenate(values, axis)) expand_dims = utils.copy_docstring( tf.expand_dims, lambda input, axis, name=None: np.expand_dims(input, axis)) fill = utils.copy_docstring( tf.fill, lambda dims, value, name=None: value * np.ones(dims, np.array(value).dtype)) gather = utils.copy_docstring(tf.gather, _gather)
dtype = utils.common_dtype([lam], preferred_dtype=dtype) shape = _shape([lam], shape) return rng.poisson(lam=lam, size=shape).astype(dtype) def _uniform(shape, minval=0, maxval=None, dtype=tf.float32, seed=None, name=None): # pylint: disable=unused-argument rng = np.random if seed is None else np.random.RandomState(seed & 0xffffffff) dtype = utils.common_dtype([minval, maxval], preferred_dtype=dtype) maxval = 1 if maxval is None else maxval shape = _shape([minval, maxval], shape) return rng.uniform(low=minval, high=maxval, size=shape).astype(dtype) # --- Begin Public Functions -------------------------------------------------- categorical = utils.copy_docstring(tf.random.categorical, _categorical) gamma = utils.copy_docstring(tf.random.gamma, _gamma) normal = utils.copy_docstring(tf.random.normal, _normal) poisson = utils.copy_docstring(tf.random.poisson, _poisson) uniform = utils.copy_docstring(tf.random.uniform, _uniform)
from tensorflow_probability.python.internal.backend.numpy.internal import utils __all__ = [ 'while_loop', # 'case', # 'cond', # 'dynamic_partition', # 'dynamic_stitch', # 'map_fn', # 'scan', ] def _while_loop(cond, body, loop_vars, shape_invariants=None, parallel_iterations=10, # pylint: disable=unused-argument back_prop=True, swap_memory=False, # pylint: disable=unused-argument maximum_iterations=None, name=None): # pylint: disable=unused-argument i = 0 while (cond(*loop_vars) and (maximum_iterations is None or i < maximum_iterations)): loop_vars = body(*loop_vars) i += 1 return loop_vars # --- Begin Public Functions -------------------------------------------------- while_loop = utils.copy_docstring( tf.while_loop, _while_loop)
'uint32', 'uint64', 'uint8', # 'as_string', # 'bfloat16', # 'dtypes', # 'qint16', # 'qint32', # 'qint8', # 'quint16', # 'quint8', ] # --- Begin Public Functions -------------------------------------------------- as_dtype = utils.copy_docstring(tf.as_dtype, lambda type_value: np.dtype(type_value).type) bool = np.bool # pylint: disable=redefined-builtin complex = np.complex # pylint: disable=redefined-builtin complex128 = np.complex128 complex64 = np.complex64 double = np.double float16 = np.float16 float32 = np.float32