def __init__(self, scale=1., concentration=1., validate_args=False, name='weibull'): """Instantiates the `Weibull` bijector. Args: scale: Positive Float-type `Tensor` that is the same dtype and is broadcastable with `concentration`. This is `l` in `Y = g(X) = 1 - exp((-x / l) ** k)`. concentration: Positive Float-type `Tensor` that is the same dtype and is broadcastable with `scale`. This is `k` in `Y = g(X) = 1 - exp((-x / l) ** k)`. validate_args: Python `bool` indicating whether arguments should be checked for correctness. name: Python `str` name given to ops managed by this object. """ with tf.name_scope(name) as name: dtype = dtype_util.common_dtype([scale, concentration], dtype_hint=tf.float32) self._scale = tensor_util.convert_nonref_to_tensor(scale, dtype=dtype, name='scale') self._concentration = tensor_util.convert_nonref_to_tensor( concentration, dtype=dtype, name='concentration') super(Weibull, self).__init__(forward_min_event_ndims=0, validate_args=validate_args, name=name)
def __init__(self, skewness=None, tailweight=None, validate_args=False, name="sinh_arcsinh"): """Instantiates the `SinhArcsinh` bijector. Args: skewness: Skewness parameter. Float-type `Tensor`. Default is `0` of type `float32`. tailweight: Tailweight parameter. Positive `Tensor` of same `dtype` as `skewness` and broadcastable `shape`. Default is `1` of type `float32`. validate_args: Python `bool` indicating whether arguments should be checked for correctness. name: Python `str` name given to ops managed by this object. """ with tf.name_scope(name) as name: tailweight = 1. if tailweight is None else tailweight skewness = 0. if skewness is None else skewness dtype = dtype_util.common_dtype([tailweight, skewness], dtype_hint=tf.float32) self._skewness = tensor_util.convert_nonref_to_tensor( skewness, dtype=dtype, name="skewness") self._tailweight = tensor_util.convert_nonref_to_tensor( tailweight, dtype=dtype, name="tailweight") self._scale_number = tf.convert_to_tensor(2., dtype=dtype) super(SinhArcsinh, self).__init__(forward_min_event_ndims=0, validate_args=validate_args, name=name)
def __init__(self, loc=0., scale=1., validate_args=False, name='gumbel'): """Instantiates the `Gumbel` bijector. Args: loc: Float-like `Tensor` that is the same dtype and is broadcastable with `scale`. This is `loc` in `Y = g(X) = exp(-exp(-(X - loc) / scale))`. scale: Positive Float-like `Tensor` that is the same dtype and is broadcastable with `loc`. This is `scale` in `Y = g(X) = exp(-exp(-(X - loc) / scale))`. validate_args: Python `bool` indicating whether arguments should be checked for correctness. name: Python `str` name given to ops managed by this object. """ with tf.name_scope(name) as name: dtype = dtype_util.common_dtype([loc, scale], dtype_hint=tf.float32) self._loc = tensor_util.convert_nonref_to_tensor(loc, dtype=dtype, name='loc') self._scale = tensor_util.convert_nonref_to_tensor(scale, dtype=dtype, name='scale') super(Gumbel, self).__init__(validate_args=validate_args, forward_min_event_ndims=0, name=name)
def __init__(self, total_count, logits=None, probs=None, validate_args=False, allow_nan_stats=True, name='NegativeBinomial'): """Construct NegativeBinomial distributions. Args: total_count: Non-negative floating-point `Tensor` with shape broadcastable to `[B1,..., Bb]` with `b >= 0` and the same dtype as `probs` or `logits`. Defines this as a batch of `N1 x ... x Nm` different Negative Binomial distributions. In practice, this represents the number of negative Bernoulli trials to stop at (the `total_count` of failures). Its components should be equal to integer values. logits: Floating-point `Tensor` with shape broadcastable to `[B1, ..., Bb]` where `b >= 0` indicates the number of batch dimensions. Each entry represents logits for the probability of success for independent Negative Binomial distributions and must be in the open interval `(-inf, inf)`. Only one of `logits` or `probs` should be specified. probs: Positive floating-point `Tensor` with shape broadcastable to `[B1, ..., Bb]` where `b >= 0` indicates the number of batch dimensions. Each entry represents the probability of success for independent Negative Binomial distributions and must be in the open interval `(0, 1)`. Only one of `logits` or `probs` should be specified. validate_args: Python `bool`, default `False`. When `True` distribution parameters are checked for validity despite possibly degrading runtime performance. When `False` invalid inputs may silently render incorrect outputs. allow_nan_stats: Python `bool`, default `True`. When `True`, statistics (e.g., mean, mode, variance) use the value "`NaN`" to indicate the result is undefined. When `False`, an exception is raised if one or more of the statistic's batch members are undefined. name: Python `str` name prefixed to Ops created by this class. """ parameters = dict(locals()) if (probs is None) == (logits is None): raise ValueError( 'Construct `NegativeBinomial` with `probs` or `logits` but not both.' ) with tf.name_scope(name) as name: dtype = dtype_util.common_dtype([total_count, logits, probs], dtype_hint=tf.float32) self._probs = tensor_util.convert_nonref_to_tensor( probs, dtype_hint=tf.float32, name='probs') self._logits = tensor_util.convert_nonref_to_tensor( logits, dtype_hint=tf.float32, name='logits') self._total_count = tensor_util.convert_nonref_to_tensor( total_count, dtype=dtype, name='total_count') super(NegativeBinomial, self).__init__( dtype=dtype, reparameterization_type=reparameterization.NOT_REPARAMETERIZED, validate_args=validate_args, allow_nan_stats=allow_nan_stats, parameters=parameters, name=name)
def _kl_gamma_gamma(concentration0, rate0, concentration1, rate1, name=None): """Calculate batched KL divergence KL(g0 || g1) with given Gamma parameters. Args: concentration0: Concentration of first Gamma distribution (g0). rate0: Rate of first Gamma distirbution (g0). concentration1: Concentration of second Gamma distribution (g1). rate1: Rate of second Gamma distirbution (g1). name: Python `str` name to use for created operations. Default value: `None` (i.e., `'kl_gamma_gamma'`). Returns: kl_gamma_gamma: `Tensor`. The batchwise KL(g0 || g1). """ with tf.name_scope(name or 'kl_gamma_gamma'): # Result from: # http://www.fil.ion.ucl.ac.uk/~wpenny/publications/densities.ps # For derivation see: # http://stats.stackexchange.com/questions/11646/kullback-leibler-divergence-between-two-gamma-distributions pylint: disable=line-too-long dtype = dtype_util.common_dtype( [concentration0, rate0, concentration1, rate1], dtype_hint=tf.float32) g0_concentration = tf.convert_to_tensor(concentration0, dtype=dtype) g0_rate = tf.convert_to_tensor(rate0, dtype=dtype) g1_concentration = tf.convert_to_tensor(concentration1, dtype=dtype) g1_rate = tf.convert_to_tensor(rate1, dtype=dtype) return (((g0_concentration - g1_concentration) * tf.math.digamma(g0_concentration)) + tf.math.lgamma(g1_concentration) - tf.math.lgamma(g0_concentration) + g1_concentration * tf.math.log(g0_rate) - g1_concentration * tf.math.log(g1_rate) + g0_concentration * (g1_rate / g0_rate - 1.))
def __init__(self, concentration1=1., concentration0=1., validate_args=False, name="kumaraswamy"): """Instantiates the `Kumaraswamy` bijector. Args: concentration1: Python `float` scalar indicating the transform power, i.e., `Y = g(X) = (1 - (1 - X)**(1 / b))**(1 / a)` where `a` is `concentration1`. concentration0: Python `float` scalar indicating the transform power, i.e., `Y = g(X) = (1 - (1 - X)**(1 / b))**(1 / a)` where `b` is `concentration0`. validate_args: Python `bool` indicating whether arguments should be checked for correctness. name: Python `str` name given to ops managed by this object. """ with tf.name_scope(name) as name: dtype = dtype_util.common_dtype([concentration0, concentration1], dtype_hint=tf.float32) self._concentration0 = tensor_util.convert_nonref_to_tensor( concentration0, dtype=dtype, name="concentration0") self._concentration1 = tensor_util.convert_nonref_to_tensor( concentration1, dtype=dtype, name="concentration1") super(Kumaraswamy, self).__init__(forward_min_event_ndims=0, validate_args=validate_args, name=name)
def __init__(self, scale, validate_args=False, allow_nan_stats=True, name='HalfNormal'): """Construct HalfNormals with scale `scale`. Args: scale: Floating point tensor; the scales of the distribution(s). Must contain only positive values. validate_args: Python `bool`, default `False`. When `True` distribution parameters are checked for validity despite possibly degrading runtime performance. When `False` invalid inputs may silently render incorrect outputs. allow_nan_stats: Python `bool`, default `True`. When `True`, statistics (e.g., mean, mode, variance) use the value '`NaN`' to indicate the result is undefined. When `False`, an exception is raised if one or more of the statistic's batch members are undefined. name: Python `str` name prefixed to Ops created by this class. """ parameters = dict(locals()) with tf.name_scope(name) as name: dtype = dtype_util.common_dtype([scale], dtype_hint=tf.float32) self._scale = tensor_util.convert_nonref_to_tensor(scale, name='scale', dtype=dtype) super(HalfNormal, self).__init__(dtype=dtype, reparameterization_type=reparameterization. FULLY_REPARAMETERIZED, validate_args=validate_args, allow_nan_stats=allow_nan_stats, parameters=parameters, name=name)
def _kl_uniform_uniform(a, b, name=None): """Calculate the batched KL divergence KL(a || b) with a and b Uniform. Note that the KL divergence is infinite if the support of `a` is not a subset of the support of `b`. Args: a: instance of a Uniform distribution object. b: instance of a Uniform distribution object. name: (optional) Name to use for created operations. default is "kl_uniform_uniform". Returns: Batchwise KL(a || b) """ with tf.name_scope(name or 'kl_uniform_uniform'): # Consistent with # http://www.mast.queensu.ca/~communications/Papers/gil-msc11.pdf, page 60 # Watch out for the change in conventions--they use 'a' and 'b' to refer to # lower and upper bounds respectively there. dtype = dtype_util.common_dtype([a.low, a.high, b.low, b.high], tf.float32) a_low = tf.convert_to_tensor(a.low) b_low = tf.convert_to_tensor(b.low) a_high = tf.convert_to_tensor(a.high) b_high = tf.convert_to_tensor(b.high) return tf.where( (b_low <= a_low) & (a_high <= b_high), tf.math.log(b_high - b_low) - tf.math.log(a_high - a_low), dtype_util.as_numpy_dtype(dtype)(np.inf))
def __init__(self, low=0., high=1., peak=0.5, validate_args=False, allow_nan_stats=True, name='Triangular'): """Initialize a batch of Triangular distributions. Args: low: Floating point tensor, lower boundary of the output interval. Must have `low < high`. Default value: `0`. high: Floating point tensor, upper boundary of the output interval. Must have `low < high`. Default value: `1`. peak: Floating point tensor, mode of the output interval. Must have `low <= peak` and `peak <= high`. Default value: `0.5`. validate_args: Python `bool`, default `False`. When `True` distribution parameters are checked for validity despite possibly degrading runtime performance. When `False` invalid inputs may silently render incorrect outputs. Default value: `False`. allow_nan_stats: Python `bool`, default `True`. When `True`, statistics (e.g., mean, mode, variance) use the value "`NaN`" to indicate the result is undefined. When `False`, an exception is raised if one or more of the statistic's batch members are undefined. Default value: `True`. name: Python `str` name prefixed to Ops created by this class. Default value: `'Triangular'`. Raises: InvalidArgumentError: if `validate_args=True` and one of the following is True: * `low >= high`. * `peak > high`. * `low > peak`. """ parameters = dict(locals()) with tf.name_scope(name) as name: dtype = dtype_util.common_dtype([low, high, peak], tf.float32) self._low = tensor_util.convert_nonref_to_tensor(low, name='low', dtype=dtype) self._high = tensor_util.convert_nonref_to_tensor(high, name='high', dtype=dtype) self._peak = tensor_util.convert_nonref_to_tensor(peak, name='peak', dtype=dtype) super(Triangular, self).__init__(dtype=self._low.dtype, reparameterization_type=reparameterization. FULLY_REPARAMETERIZED, validate_args=validate_args, allow_nan_stats=allow_nan_stats, parameters=parameters, name=name)
def cholesky_concat(chol, cols, name=None): """Concatenates `chol @ chol.T` with additional rows and columns. This operation is conceptually identical to: ```python def cholesky_concat_slow(chol, cols): # cols shaped (n + m) x m = z x m mat = tf.matmul(chol, chol, adjoint_b=True) # batch of n x n # Concat columns. mat = tf.concat([mat, cols[..., :tf.shape(mat)[-2], :]], axis=-1) # n x z # Concat rows. mat = tf.concat([mat, tf.linalg.matrix_transpose(cols)], axis=-2) # z x z return tf.linalg.cholesky(mat) ``` but whereas `cholesky_concat_slow` would cost `O(z**3)` work, `cholesky_concat` only costs `O(z**2 + m**3)` work. The resulting (implicit) matrix must be symmetric and positive definite. Thus, the bottom right `m x m` must be self-adjoint, and we do not require a separate `rows` argument (which can be inferred from `conj(cols.T)`). Args: chol: Cholesky decomposition of `mat = chol @ chol.T`. cols: The new columns whose first `n` rows we would like concatenated to the right of `mat = chol @ chol.T`, and whose conjugate transpose we would like concatenated to the bottom of `concat(mat, cols[:n,:])`. A `Tensor` with final dims `(n+m, m)`. The first `n` rows are the top right rectangle (their conjugate transpose forms the bottom left), and the bottom `m x m` is self-adjoint. name: Optional name for this op. Returns: chol_concat: The Cholesky decomposition of: ``` [ [ mat cols[:n, :] ] [ conj(cols.T) ] ] ``` """ with tf.name_scope(name or 'cholesky_extend'): dtype = dtype_util.common_dtype([chol, cols], dtype_hint=tf.float32) chol = tf.convert_to_tensor(chol, name='chol', dtype=dtype) cols = tf.convert_to_tensor(cols, name='cols', dtype=dtype) n = prefer_static.shape(chol)[-1] mat_nm, mat_mm = cols[..., :n, :], cols[..., n:, :] solved_nm = linear_operator_util.matrix_triangular_solve_with_broadcast( chol, mat_nm) lower_right_mm = tf.linalg.cholesky( mat_mm - tf.matmul(solved_nm, solved_nm, adjoint_a=True)) lower_left_mn = tf.math.conj(tf.linalg.matrix_transpose(solved_nm)) out_batch = prefer_static.shape(solved_nm)[:-2] chol = tf.broadcast_to( chol, tf.concat([out_batch, prefer_static.shape(chol)[-2:]], axis=0)) top_right_zeros_nm = tf.zeros_like(solved_nm) return tf.concat([ tf.concat([chol, top_right_zeros_nm], axis=-1), tf.concat([lower_left_mn, lower_right_mm], axis=-1) ], axis=-2)
def __init__(self, df, loc, scale, validate_args=False, allow_nan_stats=True, name='StudentT'): """Construct Student's t distributions. The distributions have degree of freedom `df`, mean `loc`, and scale `scale`. The parameters `df`, `loc`, and `scale` must be shaped in a way that supports broadcasting (e.g. `df + loc + scale` is a valid operation). Args: df: Floating-point `Tensor`. The degrees of freedom of the distribution(s). `df` must contain only positive values. loc: Floating-point `Tensor`. The mean(s) of the distribution(s). scale: Floating-point `Tensor`. The scaling factor(s) for the distribution(s). Note that `scale` is not technically the standard deviation of this distribution but has semantics more similar to standard deviation than variance. validate_args: Python `bool`, default `False`. When `True` distribution parameters are checked for validity despite possibly degrading runtime performance. When `False` invalid inputs may silently render incorrect outputs. allow_nan_stats: Python `bool`, default `True`. When `True`, statistics (e.g., mean, mode, variance) use the value '`NaN`' to indicate the result is undefined. When `False`, an exception is raised if one or more of the statistic's batch members are undefined. name: Python `str` name prefixed to Ops created by this class. Raises: TypeError: if loc and scale are different dtypes. """ parameters = dict(locals()) with tf.name_scope(name) as name: dtype = dtype_util.common_dtype([df, loc, scale], tf.float32) self._df = tensor_util.convert_nonref_to_tensor(df, name='df', dtype=dtype) self._loc = tensor_util.convert_nonref_to_tensor(loc, name='loc', dtype=dtype) self._scale = tensor_util.convert_nonref_to_tensor(scale, name='scale', dtype=dtype) dtype_util.assert_same_float_dtype( (self._df, self._loc, self._scale)) super(StudentT, self).__init__(dtype=dtype, reparameterization_type=reparameterization. FULLY_REPARAMETERIZED, validate_args=validate_args, allow_nan_stats=allow_nan_stats, parameters=parameters, name=name)
def __init__(self, loc, scale, validate_args=False, allow_nan_stats=True, name='Gumbel'): """Construct Gumbel distributions with location and scale `loc` and `scale`. The parameters `loc` and `scale` must be shaped in a way that supports broadcasting (e.g. `loc + scale` is a valid operation). Args: loc: Floating point tensor, the means of the distribution(s). scale: Floating point tensor, the scales of the distribution(s). scale must contain only positive values. validate_args: Python `bool`, default `False`. When `True` distribution parameters are checked for validity despite possibly degrading runtime performance. When `False` invalid inputs may silently render incorrect outputs. Default value: `False`. allow_nan_stats: Python `bool`, default `True`. When `True`, statistics (e.g., mean, mode, variance) use the value `NaN` to indicate the result is undefined. When `False`, an exception is raised if one or more of the statistic's batch members are undefined. Default value: `True`. name: Python `str` name prefixed to Ops created by this class. Default value: `'Gumbel'`. Raises: TypeError: if loc and scale are different dtypes. """ parameters = dict(locals()) with tf.name_scope(name) as name: dtype = dtype_util.common_dtype([loc, scale], dtype_hint=tf.float32) loc = tensor_util.convert_nonref_to_tensor( loc, name='loc', dtype=dtype) scale = tensor_util.convert_nonref_to_tensor( scale, name='scale', dtype=dtype) dtype_util.assert_same_float_dtype([loc, scale]) # Positive scale is asserted by the incorporated Gumbel bijector. self._gumbel_bijector = gumbel_bijector.Gumbel( loc=loc, scale=scale, validate_args=validate_args) # Because the uniform sampler generates samples in `[0, 1)` this would # cause samples to lie in `(inf, -inf]` instead of `(inf, -inf)`. To fix # this, we use `np.finfo(dtype_util.as_numpy_dtype(self.dtype).tiny` # because it is the smallest, positive, 'normal' number. super(Gumbel, self).__init__( distribution=uniform.Uniform( low=np.finfo(dtype_util.as_numpy_dtype(dtype)).tiny, high=tf.ones([], dtype=dtype), allow_nan_stats=allow_nan_stats), # The Gumbel bijector encodes the quantile function as the forward, # and hence needs to be inverted. bijector=invert_bijector.Invert(self._gumbel_bijector), batch_shape=distribution_util.get_broadcast_shape(loc, scale), parameters=parameters, name=name)
def __init__(self, total_count, logits=None, probs=None, validate_args=False, allow_nan_stats=True, name='Multinomial'): """Initialize a batch of Multinomial distributions. Args: total_count: Non-negative floating point tensor with shape broadcastable to `[N1,..., Nm]` with `m >= 0`. Defines this as a batch of `N1 x ... x Nm` different Multinomial distributions. Its components should be equal to integer values. logits: Floating point tensor representing unnormalized log-probabilities of a positive event with shape broadcastable to `[N1,..., Nm, K]` `m >= 0`, and the same dtype as `total_count`. Defines this as a batch of `N1 x ... x Nm` different `K` class Multinomial distributions. Only one of `logits` or `probs` should be passed in. probs: Positive floating point tensor with shape broadcastable to `[N1,..., Nm, K]` `m >= 0` and same dtype as `total_count`. Defines this as a batch of `N1 x ... x Nm` different `K` class Multinomial distributions. `probs`'s components in the last portion of its shape should sum to `1`. Only one of `logits` or `probs` should be passed in. validate_args: Python `bool`, default `False`. When `True` distribution parameters are checked for validity despite possibly degrading runtime performance. When `False` invalid inputs may silently render incorrect outputs. allow_nan_stats: Python `bool`, default `True`. When `True`, statistics (e.g., mean, mode, variance) use the value "`NaN`" to indicate the result is undefined. When `False`, an exception is raised if one or more of the statistic's batch members are undefined. name: Python `str` name prefixed to Ops created by this class. """ parameters = dict(locals()) if (probs is None) == (logits is None): raise ValueError('Must pass probs or logits, but not both.') with tf.name_scope(name) as name: dtype = dtype_util.common_dtype([total_count, logits, probs], dtype_hint=tf.float32) self._total_count = tensor_util.convert_nonref_to_tensor( total_count, name='total_count', dtype=dtype) self._probs = tensor_util.convert_nonref_to_tensor(probs, dtype=dtype, name='probs') self._logits = tensor_util.convert_nonref_to_tensor(logits, dtype=dtype, name='logits') super(Multinomial, self).__init__( dtype=dtype, reparameterization_type=reparameterization.NOT_REPARAMETERIZED, validate_args=validate_args, allow_nan_stats=allow_nan_stats, parameters=parameters, name=name)
def __init__(self, dimension, concentration, input_output_cholesky=False, validate_args=False, allow_nan_stats=True, name='LKJ'): """Construct LKJ distributions. Args: dimension: Python `int`. The dimension of the correlation matrices to sample. concentration: `float` or `double` `Tensor`. The positive concentration parameter of the LKJ distributions. The pdf of a sample matrix `X` is proportional to `det(X) ** (concentration - 1)`. input_output_cholesky: Python `bool`. If `True`, functions whose input or output have the semantics of samples assume inputs are in Cholesky form and return outputs in Cholesky form. In particular, if this flag is `True`, input to `log_prob` is presumed of Cholesky form and output from `sample` is of Cholesky form. Setting this argument to `True` is purely a computational optimization and does not change the underlying distribution. Additionally, validation checks which are only defined on the multiplied-out form are omitted, even if `validate_args` is `True`. Default value: `False` (i.e., input/output does not have Cholesky semantics). validate_args: Python `bool`, default `False`. When `True` distribution parameters are checked for validity despite possibly degrading runtime performance. When `False` invalid inputs may silently render incorrect outputs. allow_nan_stats: Python `bool`, default `True`. When `True`, statistics (e.g., mean, mode, variance) use the value `NaN` to indicate the result is undefined. When `False`, an exception is raised if one or more of the statistic's batch members are undefined. name: Python `str` name prefixed to Ops created by this class. Raises: ValueError: If `dimension` is negative. """ if dimension < 0: raise ValueError( 'There are no negative-dimension correlation matrices.') parameters = dict(locals()) self._input_output_cholesky = input_output_cholesky with tf.name_scope(name): dtype = dtype_util.common_dtype([concentration], tf.float32) self._concentration = tensor_util.convert_nonref_to_tensor( concentration, name='concentration', dtype=dtype) self._dimension = dimension super(LKJ, self).__init__( dtype=self._concentration.dtype, validate_args=validate_args, allow_nan_stats=allow_nan_stats, reparameterization_type=reparameterization.NOT_REPARAMETERIZED, parameters=parameters, name=name)
def __init__(self, loc, scale, concentration, validate_args=False, allow_nan_stats=True, name=None): """Construct a Generalized Pareto distribution. Args: loc: The location / shift of the distribution. GeneralizedPareto is a location-scale distribution. This parameter lower bounds the distribution's support. Must broadcast with `scale`, `concentration`. Floating point `Tensor`. scale: The scale of the distribution. GeneralizedPareto is a location-scale distribution, so doubling the `scale` doubles a sample and halves the density. Strictly positive floating point `Tensor`. Must broadcast with `loc`, `concentration`. concentration: The shape parameter of the distribution. The larger the magnitude, the more the distribution concentrates near `loc` (for `concentration >= 0`) or near `loc - (scale/concentration)` (for `concentration < 0`). Floating point `Tensor`. validate_args: Python `bool`, default `False`. When `True` distribution parameters are checked for validity despite possibly degrading runtime performance. When `False` invalid inputs may silently render incorrect outputs. allow_nan_stats: Python `bool`, default `True`. When `True`, statistics (e.g., mean, variance) use the value "`NaN`" to indicate the result is undefined. When `False`, an exception is raised if one or more of the statistic's batch members are undefined. name: Python `str` name prefixed to Ops created by this class. Raises: TypeError: if `loc`, `scale`, or `concentration` have different dtypes. """ parameters = dict(locals()) with tf.name_scope(name or 'GeneralizedPareto') as name: dtype = dtype_util.common_dtype([loc, scale, concentration], dtype_hint=tf.float32) self._loc = tensor_util.convert_nonref_to_tensor(loc, dtype=dtype, name='loc') self._scale = tensor_util.convert_nonref_to_tensor(scale, dtype=dtype, name='scale') self._concentration = tensor_util.convert_nonref_to_tensor( concentration, dtype=dtype, name='concentration') super(GeneralizedPareto, self).__init__(dtype=dtype, validate_args=validate_args, allow_nan_stats=allow_nan_stats, reparameterization_type=reparameterization. FULLY_REPARAMETERIZED, parameters=parameters, name=name)
def __init__(self, samples, event_ndims=0, validate_args=False, allow_nan_stats=True, name='Empirical'): """Initialize `Empirical` distributions. Args: samples: Numeric `Tensor` of shape [B1, ..., Bk, S, E1, ..., En]`, `k, n >= 0`. Samples or batches of samples on which the distribution is based. The first `k` dimensions index into a batch of independent distributions. Length of `S` dimension determines number of samples in each multiset. The last `n` dimension represents samples for each distribution. n is specified by argument event_ndims. event_ndims: Python `int32`, default `0`. number of dimensions for each event. When `0` this distribution has scalar samples. When `1` this distribution has vector-like samples. validate_args: Python `bool`, default `False`. When `True` distribution parameters are checked for validity despite possibly degrading runtime performance. When `False` invalid inputs may silently render incorrect outputs. allow_nan_stats: Python `bool`, default `True`. When `True`, statistics (e.g., mean, mode, variance) use the value `NaN` to indicate the result is undefined. When `False`, an exception is raised if one or more of the statistic's batch members are undefined. name: Python `str` name prefixed to Ops created by this class. Raises: ValueError: if the rank of `samples` is statically known and less than event_ndims + 1. """ parameters = dict(locals()) with tf.name_scope(name): self._samples = tensor_util.convert_nonref_to_tensor(samples) dtype = dtype_util.common_dtype([self._samples], dtype_hint=self._samples.dtype) self._event_ndims = event_ndims # Note: this tf.rank call affects the graph, but is ok in `__init__` # because we don't expect shapes (or ranks) to be runtime-variable, nor # ever need to differentiate with respect to them. samples_rank = prefer_static.rank(self.samples) self._samples_axis = samples_rank - self._event_ndims - 1 super(Empirical, self).__init__(dtype=dtype, reparameterization_type=reparameterization. FULLY_REPARAMETERIZED, validate_args=validate_args, allow_nan_stats=allow_nan_stats, parameters=parameters, name=name)
def __init__( self, temperature, logits=None, probs=None, validate_args=False, allow_nan_stats=True, name='ExpRelaxedOneHotCategorical'): """Initialize ExpRelaxedOneHotCategorical using class log-probabilities. Args: temperature: An 0-D `Tensor`, representing the temperature of a set of ExpRelaxedCategorical distributions. The temperature should be positive. logits: An N-D `Tensor`, `N >= 1`, representing the log probabilities of a set of ExpRelaxedCategorical distributions. The first `N - 1` dimensions index into a batch of independent distributions and the last dimension represents a vector of logits for each class. Only one of `logits` or `probs` should be passed in. probs: An N-D `Tensor`, `N >= 1`, representing the probabilities of a set of ExpRelaxedCategorical distributions. The first `N - 1` dimensions index into a batch of independent distributions and the last dimension represents a vector of probabilities for each class. Only one of `logits` or `probs` should be passed in. validate_args: Python `bool`, default `False`. When `True` distribution parameters are checked for validity despite possibly degrading runtime performance. When `False` invalid inputs may silently render incorrect outputs. allow_nan_stats: Python `bool`, default `True`. When `True`, statistics (e.g., mean, mode, variance) use the value "`NaN`" to indicate the result is undefined. When `False`, an exception is raised if one or more of the statistic's batch members are undefined. name: Python `str` name prefixed to Ops created by this class. """ parameters = dict(locals()) with tf.name_scope(name) as name: dtype = dtype_util.common_dtype([logits, probs, temperature], tf.float32) self._temperature = tensor_util.convert_nonref_to_tensor( temperature, dtype_hint=dtype, name='temperature') self._logits = tensor_util.convert_nonref_to_tensor( logits, dtype_hint=dtype, name='logits') self._probs = tensor_util.convert_nonref_to_tensor( probs, dtype_hint=dtype, name='probs') if (self._probs is None) == (self._logits is None): raise ValueError('Must pass `probs` or `logits`, but not both.') super(ExpRelaxedOneHotCategorical, self).__init__( dtype=dtype, reparameterization_type=reparameterization.FULLY_REPARAMETERIZED, validate_args=validate_args, allow_nan_stats=allow_nan_stats, parameters=parameters, name=name)
def __init__(self, concentration, mixing_concentration, mixing_rate, validate_args=False, allow_nan_stats=True, name='GammaGamma'): """Initializes a batch of Gamma-Gamma distributions. The parameters `concentration` and `rate` must be shaped in a way that supports broadcasting (e.g. `concentration + mixing_concentration + mixing_rate` is a valid operation). Args: concentration: Floating point tensor, the concentration params of the distribution(s). Must contain only positive values. mixing_concentration: Floating point tensor, the concentration params of the mixing Gamma distribution(s). Must contain only positive values. mixing_rate: Floating point tensor, the rate params of the mixing Gamma distribution(s). Must contain only positive values. validate_args: Python `bool`, default `False`. When `True` distribution parameters are checked for validity despite possibly degrading runtime performance. When `False` invalid inputs may silently render incorrect outputs. allow_nan_stats: Python `bool`, default `True`. When `True`, statistics (e.g., mean, mode, variance) use the value "`NaN`" to indicate the result is undefined. When `False`, an exception is raised if one or more of the statistic's batch members are undefined. name: Python `str` name prefixed to Ops created by this class. Raises: TypeError: if `concentration` and `rate` are different dtypes. """ parameters = dict(locals()) with tf.name_scope(name): dtype = dtype_util.common_dtype( [concentration, mixing_concentration, mixing_rate], dtype_hint=tf.float32) self._concentration = tensor_util.convert_nonref_to_tensor( concentration, name='concentration', dtype=dtype) self._mixing_concentration = tensor_util.convert_nonref_to_tensor( mixing_concentration, name='mixing_concentration', dtype=dtype) self._mixing_rate = tensor_util.convert_nonref_to_tensor( mixing_rate, name='mixing_rate', dtype=dtype) super(GammaGamma, self).__init__(dtype=self._concentration.dtype, validate_args=validate_args, allow_nan_stats=allow_nan_stats, reparameterization_type=reparameterization. FULLY_REPARAMETERIZED, parameters=parameters, name=name)
def __init__(self, total_count, concentration, validate_args=False, allow_nan_stats=True, name='DirichletMultinomial'): """Initialize a batch of DirichletMultinomial distributions. Args: total_count: Non-negative integer-valued tensor, whose dtype is the same as `concentration`. The shape is broadcastable to `[N1,..., Nm]` with `m >= 0`. Defines this as a batch of `N1 x ... x Nm` different Dirichlet multinomial distributions. Its components should be equal to integer values. concentration: Positive floating point tensor with shape broadcastable to `[N1,..., Nm, K]` `m >= 0`. Defines this as a batch of `N1 x ... x Nm` different `K` class Dirichlet multinomial distributions. validate_args: Python `bool`, default `False`. When `True` distribution parameters are checked for validity despite possibly degrading runtime performance. When `False` invalid inputs may silently render incorrect outputs. allow_nan_stats: Python `bool`, default `True`. When `True`, statistics (e.g., mean, variance) use the value "`NaN`" to indicate the result is undefined. When `False`, an exception is raised if one or more of the statistic's batch members are undefined. name: Python `str` name prefixed to Ops created by this class. """ # Broadcasting works because: # * The broadcasting convention is to prepend dimensions of size [1], and # we use the last dimension for the distribution, whereas # the batch dimensions are the leading dimensions, which forces the # distribution dimension to be defined explicitly (i.e. it cannot be # created automatically by prepending). This forces enough explicitness. # * All calls involving `counts` eventually require a broadcast between # `counts` and concentration. # * We broadcast explicitly to include the effect of `counts` on # `concentration` for calls that do not involve `counts`. parameters = dict(locals()) with tf.name_scope(name) as name: dtype = dtype_util.common_dtype([total_count, concentration], tf.float32) self._total_count = tensor_util.convert_nonref_to_tensor( total_count, dtype=dtype, name='total_count') self._concentration = tensor_util.convert_nonref_to_tensor( concentration, name='concentration') super(DirichletMultinomial, self).__init__( dtype=dtype, validate_args=validate_args, allow_nan_stats=allow_nan_stats, reparameterization_type=reparameterization.NOT_REPARAMETERIZED, parameters=parameters, name=name)
def __init__(self, loc, scale, validate_args=False, allow_nan_stats=True, name='Cauchy'): """Construct Cauchy distributions. The parameters `loc` and `scale` must be shaped in a way that supports broadcasting (e.g. `loc + scale` is a valid operation). Args: loc: Floating point tensor; the modes of the distribution(s). scale: Floating point tensor; the locations of the distribution(s). Must contain only positive values. validate_args: Python `bool`, default `False`. When `True` distribution parameters are checked for validity despite possibly degrading runtime performance. When `False` invalid inputs may silently render incorrect outputs. allow_nan_stats: Python `bool`, default `True`. When `True`, statistics (e.g., mean, mode, variance) use the value '`NaN`' to indicate the result is undefined. When `False`, an exception is raised if one or more of the statistic's batch members are undefined. name: Python `str` name prefixed to Ops created by this class. Raises: TypeError: if `loc` and `scale` have different `dtype`. """ parameters = dict(locals()) with tf.name_scope(name) as name: dtype = dtype_util.common_dtype([loc, scale], tf.float32) self._loc = tensor_util.convert_nonref_to_tensor(loc, name='loc', dtype=dtype) self._scale = tensor_util.convert_nonref_to_tensor(scale, name='scale', dtype=dtype) dtype_util.assert_same_float_dtype([self._loc, self._scale]) super(Cauchy, self).__init__(dtype=self._scale.dtype, reparameterization_type=reparameterization. FULLY_REPARAMETERIZED, validate_args=validate_args, allow_nan_stats=allow_nan_stats, parameters=parameters, name=name)
def log_add_exp(x, y, name=None): """Computes `log(exp(x) + exp(y))` in a numerically stable way. Args: x: `float` `Tensor` broadcastable with `y`. y: `float` `Tensor` broadcastable with `x`. name: Python `str` name prefixed to Ops created by this function. Default value: `None` (i.e., `'log_add_exp'`). Returns: log_add_exp: `log(exp(x) + exp(y))` computed in a numerically stable way. """ with tf.name_scope(name or 'log_add_exp'): dtype = dtype_util.common_dtype([x, y], dtype_hint=tf.float32) x = tf.convert_to_tensor(x, dtype=dtype, name='x') y = tf.convert_to_tensor(y, dtype=dtype, name='y') return tf.maximum(x, y) + tf.math.softplus(-abs(x - y))
def __init__(self, loc, scale, validate_args=False, allow_nan_stats=True, name='HalfCauchy'): """Construct a half-Cauchy distribution with `loc` and `scale`. Args: loc: Floating-point `Tensor`; the location(s) of the distribution(s). scale: Floating-point `Tensor`; the scale(s) of the distribution(s). Must contain only positive values. validate_args: Python `bool`, default `False`. When `True` distribution parameters are checked for validity despite possibly degrading runtime performance. When `False` invalid inputs may silently render incorrect outputs. Default value: `False` (i.e. do not validate args). allow_nan_stats: Python `bool`, default `True`. When `True`, statistics (e.g., mean, mode, variance) use the value "`NaN`" to indicate the result is undefined. When `False`, an exception is raised if one or more of the statistic's batch members are undefined. Default value: `True`. name: Python `str` name prefixed to Ops created by this class. Default value: 'HalfCauchy'. Raises: TypeError: if `loc` and `scale` have different `dtype`. """ parameters = dict(locals()) with tf.name_scope(name) as name: dtype = dtype_util.common_dtype([loc, scale], dtype_hint=tf.float32) self._loc = tensor_util.convert_nonref_to_tensor(loc, name='loc', dtype=dtype) self._scale = tensor_util.convert_nonref_to_tensor(scale, name='scale', dtype=dtype) super(HalfCauchy, self).__init__(dtype=dtype, reparameterization_type=reparameterization. FULLY_REPARAMETERIZED, validate_args=validate_args, allow_nan_stats=allow_nan_stats, parameters=parameters, name=name)
def non_negative_axis(axis, rank, name=None): # pylint:disable=redefined-outer-name """Make (possibly negatively indexed) `axis` argument non-negative.""" with tf.name_scope(name or 'non_negative_axis'): if axis is None: return None if rank is None: raise ValueError('Argument `rank` cannot be `None`.') dtype = dtype_util.as_numpy_dtype( dtype_util.common_dtype([axis, rank], dtype_hint=tf.int32)) rank_ = tf.get_static_value(rank) axis_ = tf.get_static_value(axis) if rank_ is None or axis_ is None: axis = tf.convert_to_tensor(axis, dtype=dtype, name='axis') rank = tf.convert_to_tensor(rank, dtype=dtype, name='rank') return tf.where(axis < 0, rank + axis, axis) axis_ = np.array(axis_, dtype=dtype) rank_ = np.array(rank_, dtype=dtype) return np.where(axis_ < 0, axis_ + rank_, axis_)
def __init__(self, shift=None, scale=None, adjoint=False, validate_args=False, name='affine_linear_operator'): """Instantiates the `AffineLinearOperator` bijector. Args: shift: Floating-point `Tensor`. scale: Subclass of `LinearOperator`. Represents the (batch) positive definite matrix `M` in `R^{k x k}`. adjoint: Python `bool` indicating whether to use the `scale` matrix as specified or its adjoint. Default value: `False`. validate_args: Python `bool` indicating whether arguments should be checked for correctness. name: Python `str` name given to ops managed by this object. Raises: TypeError: if `scale` is not a `LinearOperator`. TypeError: if `shift.dtype` does not match `scale.dtype`. ValueError: if not `scale.is_non_singular`. """ with tf.name_scope(name) as name: dtype = dtype_util.common_dtype([shift, scale], dtype_hint=tf.float32) self._shift = tensor_util.convert_nonref_to_tensor(shift, dtype=dtype, name='shift') if scale is not None: if not isinstance(scale, tf.linalg.LinearOperator): raise TypeError( 'scale is not an instance of tf.LinearOperator') if validate_args and not scale.is_non_singular: raise ValueError('Scale matrix must be non-singular.') self._scale = scale self._adjoint = adjoint super(AffineLinearOperator, self).__init__(forward_min_event_ndims=1, is_constant_jacobian=True, dtype=dtype, validate_args=validate_args, name=name)
def __init__(self, concentration1, concentration0, validate_args=False, allow_nan_stats=True, name="Beta"): """Initialize a batch of Beta distributions. Args: concentration1: Positive floating-point `Tensor` indicating mean number of successes; aka "alpha". Implies `self.dtype` and `self.batch_shape`, i.e., `concentration1.shape = [N1, N2, ..., Nm] = self.batch_shape`. concentration0: Positive floating-point `Tensor` indicating mean number of failures; aka "beta". Otherwise has same semantics as `concentration1`. validate_args: Python `bool`, default `False`. When `True` distribution parameters are checked for validity despite possibly degrading runtime performance. When `False` invalid inputs may silently render incorrect outputs. allow_nan_stats: Python `bool`, default `True`. When `True`, statistics (e.g., mean, mode, variance) use the value "`NaN`" to indicate the result is undefined. When `False`, an exception is raised if one or more of the statistic's batch members are undefined. name: Python `str` name prefixed to Ops created by this class. """ parameters = dict(locals()) with tf.name_scope(name) as name: dtype = dtype_util.common_dtype([concentration1, concentration0], dtype_hint=tf.float32) self._concentration1 = tensor_util.convert_nonref_to_tensor( concentration1, dtype=dtype, name="concentration1") self._concentration0 = tensor_util.convert_nonref_to_tensor( concentration0, dtype=dtype, name="concentration0") super(Beta, self).__init__(dtype=dtype, validate_args=validate_args, allow_nan_stats=allow_nan_stats, reparameterization_type=reparameterization. FULLY_REPARAMETERIZED, parameters=parameters, name=name)
def __init__(self, loc, concentration, validate_args=False, allow_nan_stats=True, name='InverseGaussian'): """Constructs inverse Gaussian distribution with `loc` and `concentration`. Args: loc: Floating-point `Tensor`, the loc params. Must contain only positive values. concentration: Floating-point `Tensor`, the concentration params. Must contain only positive values. validate_args: Python `bool`, default `False`. When `True` distribution parameters are checked for validity despite possibly degrading runtime performance. When `False` invalid inputs may silently render incorrect outputs. Default value: `False` (i.e. do not validate args). allow_nan_stats: Python `bool`, default `True`. When `True`, statistics (e.g., mean, mode, variance) use the value "`NaN`" to indicate the result is undefined. When `False`, an exception is raised if one or more of the statistic's batch members are undefined. Default value: `True`. name: Python `str` name prefixed to Ops created by this class. Default value: 'InverseGaussian'. """ parameters = dict(locals()) with tf.name_scope(name): dtype = dtype_util.common_dtype([loc, concentration], dtype_hint=tf.float32) self._concentration = tensor_util.convert_nonref_to_tensor( concentration, dtype=dtype, name='concentration') self._loc = tensor_util.convert_nonref_to_tensor( loc, dtype=dtype, name='loc') super(InverseGaussian, self).__init__( dtype=self._loc.dtype, reparameterization_type=reparameterization.NOT_REPARAMETERIZED, validate_args=validate_args, allow_nan_stats=allow_nan_stats, parameters=parameters, name=name)
def _setdiff1d(a, b, aminusb=True, validate_indices=True): """Compute set difference of elements in last dimension of `a` and `b`.""" if not aminusb: raise NotImplementedError( 'Argument `aminusb != True` is currently unimplemented.') if not validate_indices: raise NotImplementedError( 'Argument `validate_indices != True` is currently unimplemented.') with tf.name_scope('setdiff1d'): dtype = dtype_util.as_numpy_dtype( dtype_util.common_dtype([a, b], dtype_hint=tf.int32)) a_ = tf.get_static_value(a) b_ = tf.get_static_value(b) if a_ is None or b_ is None: a = tf.convert_to_tensor(a, dtype=dtype, name='a') b = tf.convert_to_tensor(b, dtype=dtype, name='b') return tf.sparse.to_dense( tf.sets.difference(a[tf.newaxis], b[tf.newaxis]))[0] a_ = np.array(a_, dtype=dtype) b_ = np.array(b_, dtype=dtype) return np.setdiff1d(a_, b_)
def __init__(self, concentration, scale=1., validate_args=False, allow_nan_stats=True, name='Pareto'): """Construct Pareto distribution with `concentration` and `scale`. Args: concentration: Floating point tensor. Must contain only positive values. scale: Floating point tensor, equivalent to `mode`. `scale` also restricts the domain of this distribution to be in `[scale, inf)`. Must contain only positive values. Default value: `1`. validate_args: Python `bool`, default `False`. When `True` distribution parameters are checked for validity despite possibly degrading runtime performance. When `False` invalid inputs may silently render incorrect outputs. Default value: `False` (i.e. do not validate args). allow_nan_stats: Python `bool`, default `True`. When `True`, statistics (e.g., mean, mode, variance) use the value '`NaN`' to indicate the result is undefined. When `False`, an exception is raised if one or more of the statistic's batch members are undefined. Default value: `True`. name: Python `str` name prefixed to Ops created by this class. Default value: 'Pareto'. """ parameters = dict(locals()) with tf.name_scope(name): dtype = dtype_util.common_dtype([concentration, scale], dtype_hint=tf.float32) self._concentration = tensor_util.convert_nonref_to_tensor( concentration, name='concentration', dtype=dtype) self._scale = tensor_util.convert_nonref_to_tensor( scale, name='scale', dtype=dtype) super(Pareto, self).__init__( dtype=self._concentration.dtype, reparameterization_type=reparameterization.FULLY_REPARAMETERIZED, validate_args=validate_args, allow_nan_stats=allow_nan_stats, parameters=parameters, name=name)
def __init__(self, map_values, validate_args=False, name='categorical_to_discrete'): """Instantiates `CategoricalToDiscrete` bijector. Args: map_values: 1D numerical tensor of discrete values to map to, sorted in strictly increasing order. validate_args: Python `bool` indicating whether arguments should be checked for correctness. name: Python `str` name given to ops managed by this object. """ with tf.name_scope(name): dtype = dtype_util.common_dtype([map_values], tf.float32) self._map_values = tensor_util.convert_nonref_to_tensor( map_values, name='map_values', dtype=dtype) super(CategoricalToDiscrete, self).__init__(forward_min_event_ndims=0, is_constant_jacobian=True, validate_args=validate_args, name=name)
def __init__(self, rate, validate_args=False, allow_nan_stats=True, name="Exponential"): """Construct Exponential distribution with parameter `rate`. Args: rate: Floating point tensor, equivalent to `1 / mean`. Must contain only positive values. validate_args: Python `bool`, default `False`. When `True` distribution parameters are checked for validity despite possibly degrading runtime performance. When `False` invalid inputs may silently render incorrect outputs. allow_nan_stats: Python `bool`, default `True`. When `True`, statistics (e.g., mean, mode, variance) use the value "`NaN`" to indicate the result is undefined. When `False`, an exception is raised if one or more of the statistic's batch members are undefined. name: Python `str` name prefixed to Ops created by this class. """ parameters = dict(locals()) # Even though all statistics of are defined for valid inputs, this is not # true in the parent class "Gamma." Therefore, passing # allow_nan_stats=True # through to the parent class results in unnecessary asserts. with tf.name_scope(name) as name: self._rate = tensor_util.convert_nonref_to_tensor( rate, name="rate", dtype=dtype_util.common_dtype([rate], dtype_hint=tf.float32)) super(Exponential, self).__init__(concentration=1., rate=self._rate, allow_nan_stats=allow_nan_stats, validate_args=validate_args, name=name) self._parameters = parameters