def check_cam_coherence(path): """Check the coherence of a camera path.""" cam_gt = path + 'cam0_gt.visim' cam_render = path + 'cam0.render' lines = tf.string_split([tf.read_file(cam_render)], '\n').values lines = lines[3:] lines = tf.strided_slice(lines, [0], [lines.shape_as_list()[0]], [2]) fields = tf.reshape(tf.string_split(lines, ' ').values, [-1, 10]) timestamp_from_render, numbers = tf.split(fields, [1, 9], -1) numbers = tf.strings.to_number(numbers) eye, lookat, up = tf.split(numbers, [3, 3, 3], -1) up_vector = tf.nn.l2_normalize(up - eye) lookat_vector = tf.nn.l2_normalize(lookat - eye) rotation_from_lookat = lookat_matrix(up_vector, lookat_vector) lines = tf.string_split([tf.read_file(cam_gt)], '\n').values lines = lines[1:] fields = tf.reshape(tf.string_split(lines, ',').values, [-1, 8]) timestamp_from_gt, numbers = tf.split(fields, [1, 7], -1) numbers = tf.strings.to_number(numbers) position, quaternion = tf.split(numbers, [3, 4], -1) rotation_from_quaternion = from_quaternion(quaternion) assert tf.reduce_all(tf.equal(timestamp_from_render, timestamp_from_gt)) assert tf.reduce_all(tf.equal(eye, position)) so3_diff = (tf.trace( tf.matmul( rotation_from_lookat, rotation_from_quaternion, transpose_a=True)) - 1) / 2 tf.assert_near(so3_diff, tf.ones_like(so3_diff))
def _validate_correlationness(self, x): if not self.validate_args: return x checks = [ tf.assert_less_equal(-1., x, message='Correlations must be >= -1.'), tf.assert_less_equal(x, 1., message='Correlations must be <= 1.'), tf.assert_near( tf.matrix_diag_part(x), 1., message='Self-correlations must be = 1.'), tf.assert_near( x, tf.matrix_transpose(x), message='Correlation matrices must be symmetric') ] with tf.control_dependencies(checks): return tf.identity(x)
def _assert_valid_sample(self, x): if not self.validate_args: return x return control_flow_ops.with_dependencies([ tf.assert_non_positive(x), tf.assert_near(tf.zeros([], dtype=self.dtype), tf.reduce_logsumexp(x, axis=[-1])), ], x)
def _assert_valid_sample(self, x): if not self.validate_args: return x return control_flow_ops.with_dependencies([ tf.assert_non_positive(x), tf.assert_near( tf.zeros([], dtype=self.dtype), tf.reduce_logsumexp(x, axis=[-1])), ], x)
def _maybe_assert_valid_sample(self, x): """Checks the validity of a sample.""" if not self.validate_args: return x return control_flow_ops.with_dependencies([ tf.assert_positive(x, message="samples must be positive"), tf.assert_near(tf.ones([], dtype=self.dtype), tf.reduce_sum(x, -1), message="sample last-dimension must sum to `1`"), ], x)
def _validate_correlationness(self, x): if not self.validate_args or self.input_output_cholesky: return x checks = [ tf.assert_less_equal(tf.cast(-1., dtype=x.dtype.base_dtype), x, message='Correlations must be >= -1.'), tf.assert_less_equal(x, tf.cast(1., x.dtype.base_dtype), message='Correlations must be <= 1.'), tf.assert_near(tf.matrix_diag_part(x), tf.cast(1., x.dtype.base_dtype), message='Self-correlations must be = 1.'), tf.assert_near(x, tf.matrix_transpose(x), message='Correlation matrices must be symmetric') ] with tf.control_dependencies(checks): return tf.identity(x)
def _maybe_assert_valid_sample(self, x): """Checks the validity of a sample.""" if not self.validate_args: return x return control_flow_ops.with_dependencies([ tf.assert_positive(x, message="samples must be positive"), tf.assert_near( tf.ones([], dtype=self.dtype), tf.reduce_sum(x, -1), message="sample last-dimension must sum to `1`"), ], x)
def _validate_correlationness(self, x): if not self.validate_args: return x checks = [ tf.assert_less_equal( tf.cast(-1., dtype=x.dtype.base_dtype), x, message='Correlations must be >= -1.'), tf.assert_less_equal( x, tf.cast(1., x.dtype.base_dtype), message='Correlations must be <= 1.'), tf.assert_near( tf.matrix_diag_part(x), tf.cast(1., x.dtype.base_dtype), message='Self-correlations must be = 1.'), tf.assert_near( x, tf.matrix_transpose(x), message='Correlation matrices must be symmetric') ] with tf.control_dependencies(checks): return tf.identity(x)
def _maybe_assert_valid_sample(self, samples): """Check counts for proper shape, values, then return tensor version.""" if not self.validate_args: return samples with tf.control_dependencies([ tf.assert_near( 1., tf.linalg.norm(samples, axis=-1), message='samples must be unit length'), tf.assert_equal( tf.shape(samples)[-1:], self.event_shape_tensor(), message=('samples must have innermost dimension matching that of ' '`self.mean_direction`')), ]): return tf.identity(samples)
def backward_tensor(self, y): size = np.prod(y.shape.as_list()) N = int(np.sqrt(size / self.num_matrices)) reshaped = tf.reshape(y, shape=(N, N, self.num_matrices)) indices = np.dstack(np.tril_indices(N))[0] # triangular = tf.reshape(tf.gather_nd(reshaped, indices), shape=[-1]) triangular = tf.transpose(tf.gather_nd(reshaped, indices)) with tf.control_dependencies([ tf.assert_near(y, self.forward_tensor(triangular), message='check equal') ]): triangular = triangular + tf.zeros_like(triangular) return triangular
def __init__(self, loc=None, covariance_matrix=None, validate_args=False, allow_nan_stats=True, name="MultivariateNormalFullCovariance"): """Construct Multivariate Normal distribution on `R^k`. The `batch_shape` is the broadcast shape between `loc` and `covariance_matrix` arguments. The `event_shape` is given by last dimension of the matrix implied by `covariance_matrix`. The last dimension of `loc` (if provided) must broadcast with this. A non-batch `covariance_matrix` matrix is a `k x k` symmetric positive definite matrix. In other words it is (real) symmetric with all eigenvalues strictly positive. Additional leading dimensions (if any) will index batches. Args: loc: Floating-point `Tensor`. If this is set to `None`, `loc` is implicitly `0`. When specified, may have shape `[B1, ..., Bb, k]` where `b >= 0` and `k` is the event size. covariance_matrix: Floating-point, symmetric positive definite `Tensor` of same `dtype` as `loc`. The strict upper triangle of `covariance_matrix` is ignored, so if `covariance_matrix` is not symmetric no error will be raised (unless `validate_args is True`). `covariance_matrix` has shape `[B1, ..., Bb, k, k]` where `b >= 0` and `k` is the event size. validate_args: Python `bool`, default `False`. When `True` distribution parameters are checked for validity despite possibly degrading runtime performance. When `False` invalid inputs may silently render incorrect outputs. allow_nan_stats: Python `bool`, default `True`. When `True`, statistics (e.g., mean, mode, variance) use the value "`NaN`" to indicate the result is undefined. When `False`, an exception is raised if one or more of the statistic's batch members are undefined. name: Python `str` name prefixed to Ops created by this class. Raises: ValueError: if neither `loc` nor `covariance_matrix` are specified. """ parameters = dict(locals()) # Convert the covariance_matrix up to a scale_tril and call MVNTriL. with tf.name_scope(name) as name: with tf.name_scope("init", values=[loc, covariance_matrix]): if covariance_matrix is None: scale_tril = None else: covariance_matrix = tf.convert_to_tensor( covariance_matrix, name="covariance_matrix") if validate_args: covariance_matrix = control_flow_ops.with_dependencies( [ tf.assert_near( covariance_matrix, tf.matrix_transpose(covariance_matrix), message="Matrix was not symmetric") ], covariance_matrix) # No need to validate that covariance_matrix is non-singular. # LinearOperatorLowerTriangular has an assert_non_singular method that # is called by the Bijector. # However, cholesky() ignores the upper triangular part, so we do need # to separately assert symmetric. scale_tril = tf.cholesky(covariance_matrix) super(MultivariateNormalFullCovariance, self).__init__(loc=loc, scale_tril=scale_tril, validate_args=validate_args, allow_nan_stats=allow_nan_stats, name=name) self._parameters = parameters
def _sample_n(self, n, seed=None): seed = seed_stream.SeedStream(seed, salt='vom_mises_fisher') # The sampling strategy relies on the fact that vMF variates are symmetric # about the mean direction. Accordingly, if we have a sampling strategy for # the away-from-mean angle, then we can uniformly sample the remaining # dimensions on the S^{dim-2} sphere for , and rotate these samples from a # (1, 0, 0, ..., 0)-mode distribution into the target orientation. # # This is easy to imagine on the 1-sphere (S^1; in 2-D space): sample a # von-Mises distributed `x` value in [-1, 1], then uniformly select what # amounts to a "up" or "down" additional degree of freedom after unit # normalizing, followed by a final rotation to the desired mean direction # from a basis of (1, 0). # # On S^2 (in 3-D), selecting a vMF `x` identifies a circle in `yz` on the # unit sphere over which the distribution is uniform, in particular the # circle where x = \hat{x} intersects the unit sphere. We pick a point on # that circle, then rotate to the desired mean direction from a basis of # (1, 0, 0). event_dim = self.event_shape[0].value or self._event_shape_tensor()[0] sample_batch_shape = tf.concat([[n], self._batch_shape_tensor()], axis=0) dim = tf.cast(event_dim - 1, self.dtype) if event_dim == 3: samples_dim0 = self._sample_3d(n, seed=seed) else: # Wood'94 provides a rejection algorithm to sample the x coordinate. # Wood'94 definition of b: # b = (-2 * kappa + tf.sqrt(4 * kappa**2 + dim**2)) / dim # https://stats.stackexchange.com/questions/156729 suggests: b = dim / (2 * self.concentration + tf.sqrt(4 * self.concentration**2 + dim**2)) # TODO(bjp): Integrate any useful numerical tricks from hyperspherical VAE # https://github.com/nicola-decao/s-vae-tf/ x = (1 - b) / (1 + b) c = self.concentration * x + dim * tf.log1p(-x**2) beta = tf.distributions.Beta(dim / 2, dim / 2) def cond_fn(w, should_continue): del w return tf.reduce_any(should_continue) def body_fn(w, should_continue): z = beta.sample(sample_shape=sample_batch_shape, seed=seed()) w = tf.where(should_continue, (1 - (1 + b) * z) / (1 - (1 - b) * z), w) w = tf.check_numerics(w, 'w') should_continue = tf.logical_and( should_continue, self.concentration * w + dim * tf.log1p(-x * w) - c < tf.log( tf.random_uniform(sample_batch_shape, seed=seed(), dtype=self.dtype))) return w, should_continue w = tf.zeros(sample_batch_shape, dtype=self.dtype) should_continue = tf.ones(sample_batch_shape, dtype=tf.bool) samples_dim0 = tf.while_loop(cond_fn, body_fn, (w, should_continue))[0] samples_dim0 = samples_dim0[..., tf.newaxis] if not self._allow_nan_stats: # Verify samples are w/in -1, 1, with useful error output tensors (top # value rather than all values). with tf.control_dependencies([ tf.assert_less_equal( samples_dim0, self.dtype.as_numpy_dtype(1.01), data=[tf.nn.top_k(tf.reshape(samples_dim0, [-1]))[0]]), tf.assert_greater_equal( samples_dim0, self.dtype.as_numpy_dtype(-1.01), data=[ -tf.nn.top_k(tf.reshape(-samples_dim0, [-1]))[0] ]) ]): samples_dim0 = tf.identity(samples_dim0) samples_otherdims_shape = tf.concat( [sample_batch_shape, [event_dim - 1]], axis=0) unit_otherdims = tf.nn.l2_normalize(tf.random_normal( samples_otherdims_shape, seed=seed(), dtype=self.dtype), axis=-1) samples = tf.concat( [ samples_dim0, # we must avoid sqrt(1 - (>1)**2) tf.sqrt(tf.maximum(1 - samples_dim0**2, 0.)) * unit_otherdims ], axis=-1) samples = tf.nn.l2_normalize(samples, axis=-1) if not self._allow_nan_stats: samples = tf.check_numerics(samples, 'samples') # Runtime assert that samples are unit length. if not self._allow_nan_stats: worst, idx = tf.nn.top_k( tf.reshape(tf.abs(1 - tf.linalg.norm(samples, axis=-1)), [-1])) with tf.control_dependencies([ tf.assert_near(self.dtype.as_numpy_dtype(0), worst, data=[ worst, idx, tf.gather( tf.reshape(samples, [-1, event_dim]), idx) ], atol=1e-4, summarize=100) ]): samples = tf.identity(samples) # The samples generated are symmetric around a mode at (1, 0, 0, ...., 0). # Now, we move the mode to `self.mean_direction` using a rotation matrix. if not self._allow_nan_stats: # Assert that the basis vector rotates to the mean direction, as expected. basis = tf.cast( tf.concat([[1.], tf.zeros([event_dim - 1])], axis=0), self.dtype) with tf.control_dependencies([ tf.assert_less( tf.linalg.norm(self._rotate(basis) - self.mean_direction, axis=-1), self.dtype.as_numpy_dtype(1e-5)) ]): return self._rotate(samples) return self._rotate(samples)
def __init__(self, mean_direction, concentration, validate_args=False, allow_nan_stats=True, name='VonMisesFisher'): """Creates a new `VonMisesFisher` instance. Args: mean_direction: Floating-point `Tensor` with shape [B1, ... Bn, D]. A unit vector indicating the mode of the distribution, or the unit-normalized direction of the mean. (This is *not* in general the mean of the distribution; the mean is not generally in the support of the distribution.) NOTE: `D` is currently restricted to <= 5. concentration: Floating-point `Tensor` having batch shape [B1, ... Bn] broadcastable with `mean_direction`. The level of concentration of samples around the `mean_direction`. `concentration=0` indicates a uniform distribution over the unit hypersphere, and `concentration=+inf` indicates a `Deterministic` distribution (delta function) at `mean_direction`. validate_args: Python `bool`, default `False`. When `True` distribution parameters are checked for validity despite possibly degrading runtime performance. When `False` invalid inputs may silently render incorrect outputs. allow_nan_stats: Python `bool`, default `True`. When `True`, statistics (e.g., mean, mode, variance) use the value "`NaN`" to indicate the result is undefined. When `False`, an exception is raised if one or more of the statistic's batch members are undefined. name: Python `str` name prefixed to Ops created by this class. Raises: ValueError: For known-bad arguments, i.e. unsupported event dimension. """ parameters = dict(locals()) with tf.name_scope(name, values=[mean_direction, concentration]) as name: assertions = [ tf.assert_non_negative( concentration, message='`concentration` must be non-negative'), tf.assert_greater( tf.shape(mean_direction)[-1], 1, message='`mean_direction` may not have scalar event shape' ), tf.assert_near(1., tf.linalg.norm(mean_direction, axis=-1), message='`mean_direction` must be unit-length') ] if validate_args else [] if mean_direction.shape.with_rank_at_least( 1)[-1].value is not None: if mean_direction.shape.with_rank_at_least(1)[-1].value > 5: raise ValueError( 'vMF ndims > 5 is not currently supported') elif validate_args: assertions += [ tf.assert_less_equal( tf.shape(mean_direction)[-1], 5, message='vMF ndims > 5 is not currently supported') ] with tf.control_dependencies(assertions): self._mean_direction = tf.convert_to_tensor( mean_direction, name='mean_direction') self._concentration = tf.convert_to_tensor( concentration, name='concentration') tf.assert_same_float_dtype( [self._mean_direction, self._concentration]) # mean_direction is always reparameterized. # concentration is only for event_dim==3, via an inversion sampler. reparameterization_type = ( tf.distributions.FULLY_REPARAMETERIZED if mean_direction.shape.with_rank_at_least(1)[-1].value == 3 else tf.distributions.NOT_REPARAMETERIZED) super(VonMisesFisher, self).__init__( dtype=self._concentration.dtype, validate_args=validate_args, allow_nan_stats=allow_nan_stats, reparameterization_type=reparameterization_type, parameters=parameters, graph_parents=[self._mean_direction, self._concentration], name=name)
def _sample_n(self, n, seed=None): seed = seed_stream.SeedStream(seed, salt='vom_mises_fisher') # The sampling strategy relies on the fact that vMF variates are symmetric # about the mean direction. Accordingly, if we have a sampling strategy for # the away-from-mean angle, then we can uniformly sample the remaining # dimensions on the S^{dim-2} sphere for , and rotate these samples from a # (1, 0, 0, ..., 0)-mode distribution into the target orientation. # # This is easy to imagine on the 1-sphere (S^1; in 2-D space): sample a # von-Mises distributed `x` value in [-1, 1], then uniformly select what # amounts to a "up" or "down" additional degree of freedom after unit # normalizing, followed by a final rotation to the desired mean direction # from a basis of (1, 0). # # On S^2 (in 3-D), selecting a vMF `x` identifies a circle in `yz` on the # unit sphere over which the distribution is uniform, in particular the # circle where x = \hat{x} intersects the unit sphere. We pick a point on # that circle, then rotate to the desired mean direction from a basis of # (1, 0, 0). event_dim = self.event_shape[0].value or self._event_shape_tensor()[0] sample_batch_shape = tf.concat([[n], self._batch_shape_tensor()], axis=0) dim = tf.cast(event_dim - 1, self.dtype) if event_dim == 3: samples_dim0 = self._sample_3d(n, seed=seed) else: # Wood'94 provides a rejection algorithm to sample the x coordinate. # Wood'94 definition of b: # b = (-2 * kappa + tf.sqrt(4 * kappa**2 + dim**2)) / dim # https://stats.stackexchange.com/questions/156729 suggests: b = dim / (2 * self.concentration + tf.sqrt(4 * self.concentration**2 + dim**2)) # TODO(bjp): Integrate any useful numerical tricks from hyperspherical VAE # https://github.com/nicola-decao/s-vae-tf/ x = (1 - b) / (1 + b) c = self.concentration * x + dim * tf.log1p(-x**2) beta = beta_lib.Beta(dim / 2, dim / 2) def cond_fn(w, should_continue): del w return tf.reduce_any(should_continue) def body_fn(w, should_continue): z = beta.sample(sample_shape=sample_batch_shape, seed=seed()) w = tf.where(should_continue, (1 - (1 + b) * z) / (1 - (1 - b) * z), w) w = tf.check_numerics(w, 'w') should_continue = tf.logical_and( should_continue, self.concentration * w + dim * tf.log1p(-x * w) - c < tf.log(tf.random_uniform(sample_batch_shape, seed=seed(), dtype=self.dtype))) return w, should_continue w = tf.zeros(sample_batch_shape, dtype=self.dtype) should_continue = tf.ones(sample_batch_shape, dtype=tf.bool) samples_dim0 = tf.while_loop(cond_fn, body_fn, (w, should_continue))[0] samples_dim0 = samples_dim0[..., tf.newaxis] if not self._allow_nan_stats: # Verify samples are w/in -1, 1, with useful error output tensors (top # value rather than all values). with tf.control_dependencies([ tf.assert_less_equal( samples_dim0, self.dtype.as_numpy_dtype(1.01), data=[tf.nn.top_k(tf.reshape(samples_dim0, [-1]))[0]]), tf.assert_greater_equal( samples_dim0, self.dtype.as_numpy_dtype(-1.01), data=[-tf.nn.top_k(tf.reshape(-samples_dim0, [-1]))[0]])]): samples_dim0 = tf.identity(samples_dim0) samples_otherdims_shape = tf.concat([sample_batch_shape, [event_dim - 1]], axis=0) unit_otherdims = tf.nn.l2_normalize( tf.random_normal(samples_otherdims_shape, seed=seed(), dtype=self.dtype), axis=-1) samples = tf.concat([ samples_dim0, # we must avoid sqrt(1 - (>1)**2) tf.sqrt(tf.maximum(1 - samples_dim0**2, 0.)) * unit_otherdims ], axis=-1) samples = tf.nn.l2_normalize(samples, axis=-1) if not self._allow_nan_stats: samples = tf.check_numerics(samples, 'samples') # Runtime assert that samples are unit length. if not self._allow_nan_stats: worst, idx = tf.nn.top_k( tf.reshape(tf.abs(1 - tf.linalg.norm(samples, axis=-1)), [-1])) with tf.control_dependencies([ tf.assert_near( self.dtype.as_numpy_dtype(0), worst, data=[worst, idx, tf.gather(tf.reshape(samples, [-1, event_dim]), idx)], atol=1e-4, summarize=100)]): samples = tf.identity(samples) # The samples generated are symmetric around a mode at (1, 0, 0, ...., 0). # Now, we move the mode to `self.mean_direction` using a rotation matrix. if not self._allow_nan_stats: # Assert that the basis vector rotates to the mean direction, as expected. basis = tf.cast(tf.concat([[1.], tf.zeros([event_dim - 1])], axis=0), self.dtype) with tf.control_dependencies([ tf.assert_less( tf.linalg.norm(self._rotate(basis) - self.mean_direction, axis=-1), self.dtype.as_numpy_dtype(1e-5)) ]): return self._rotate(samples) return self._rotate(samples)
def __init__(self, mean_direction, concentration, validate_args=False, allow_nan_stats=True, name='VonMisesFisher'): """Creates a new `VonMisesFisher` instance. Args: mean_direction: Floating-point `Tensor` with shape [B1, ... Bn, D]. A unit vector indicating the mode of the distribution, or the unit-normalized direction of the mean. (This is *not* in general the mean of the distribution; the mean is not generally in the support of the distribution.) NOTE: `D` is currently restricted to <= 5. concentration: Floating-point `Tensor` having batch shape [B1, ... Bn] broadcastable with `mean_direction`. The level of concentration of samples around the `mean_direction`. `concentration=0` indicates a uniform distribution over the unit hypersphere, and `concentration=+inf` indicates a `Deterministic` distribution (delta function) at `mean_direction`. validate_args: Python `bool`, default `False`. When `True` distribution parameters are checked for validity despite possibly degrading runtime performance. When `False` invalid inputs may silently render incorrect outputs. allow_nan_stats: Python `bool`, default `True`. When `True`, statistics (e.g., mean, mode, variance) use the value "`NaN`" to indicate the result is undefined. When `False`, an exception is raised if one or more of the statistic's batch members are undefined. name: Python `str` name prefixed to Ops created by this class. Raises: ValueError: For known-bad arguments, i.e. unsupported event dimension. """ parameters = dict(locals()) with tf.name_scope(name, values=[mean_direction, concentration]) as name: dtype = dtype_util.common_dtype([mean_direction, concentration], tf.float32) mean_direction = tf.convert_to_tensor( mean_direction, name='mean_direction', dtype=dtype) concentration = tf.convert_to_tensor( concentration, name='concentration', dtype=dtype) assertions = [ tf.assert_non_negative( concentration, message='`concentration` must be non-negative'), tf.assert_greater( tf.shape(mean_direction)[-1], 1, message='`mean_direction` may not have scalar event shape'), tf.assert_near( 1., tf.linalg.norm(mean_direction, axis=-1), message='`mean_direction` must be unit-length') ] if validate_args else [] if mean_direction.shape.with_rank_at_least(1)[-1].value is not None: if mean_direction.shape.with_rank_at_least(1)[-1].value > 5: raise ValueError('vMF ndims > 5 is not currently supported') elif validate_args: assertions += [tf.assert_less_equal( tf.shape(mean_direction)[-1], 5, message='vMF ndims > 5 is not currently supported')] with tf.control_dependencies(assertions): self._mean_direction = tf.identity(mean_direction) self._concentration = tf.identity(concentration) tf.assert_same_float_dtype([self._mean_direction, self._concentration]) # mean_direction is always reparameterized. # concentration is only for event_dim==3, via an inversion sampler. reparameterization_type = ( reparameterization.FULLY_REPARAMETERIZED if mean_direction.shape.with_rank_at_least(1)[-1].value == 3 else reparameterization.NOT_REPARAMETERIZED) super(VonMisesFisher, self).__init__( dtype=self._concentration.dtype, validate_args=validate_args, allow_nan_stats=allow_nan_stats, reparameterization_type=reparameterization_type, parameters=parameters, graph_parents=[self._mean_direction, self._concentration], name=name)
def __init__(self, loc=None, covariance_matrix=None, validate_args=False, allow_nan_stats=True, name="MultivariateNormalFullCovariance"): """Construct Multivariate Normal distribution on `R^k`. The `batch_shape` is the broadcast shape between `loc` and `covariance_matrix` arguments. The `event_shape` is given by last dimension of the matrix implied by `covariance_matrix`. The last dimension of `loc` (if provided) must broadcast with this. A non-batch `covariance_matrix` matrix is a `k x k` symmetric positive definite matrix. In other words it is (real) symmetric with all eigenvalues strictly positive. Additional leading dimensions (if any) will index batches. Args: loc: Floating-point `Tensor`. If this is set to `None`, `loc` is implicitly `0`. When specified, may have shape `[B1, ..., Bb, k]` where `b >= 0` and `k` is the event size. covariance_matrix: Floating-point, symmetric positive definite `Tensor` of same `dtype` as `loc`. The strict upper triangle of `covariance_matrix` is ignored, so if `covariance_matrix` is not symmetric no error will be raised (unless `validate_args is True`). `covariance_matrix` has shape `[B1, ..., Bb, k, k]` where `b >= 0` and `k` is the event size. validate_args: Python `bool`, default `False`. When `True` distribution parameters are checked for validity despite possibly degrading runtime performance. When `False` invalid inputs may silently render incorrect outputs. allow_nan_stats: Python `bool`, default `True`. When `True`, statistics (e.g., mean, mode, variance) use the value "`NaN`" to indicate the result is undefined. When `False`, an exception is raised if one or more of the statistic's batch members are undefined. name: Python `str` name prefixed to Ops created by this class. Raises: ValueError: if neither `loc` nor `covariance_matrix` are specified. """ parameters = dict(locals()) # Convert the covariance_matrix up to a scale_tril and call MVNTriL. with tf.name_scope(name) as name: with tf.name_scope("init", values=[loc, covariance_matrix]): dtype = dtype_util.common_dtype([loc, covariance_matrix], tf.float32) loc = loc if loc is None else tf.convert_to_tensor( loc, name="loc", dtype=dtype) if covariance_matrix is None: scale_tril = None else: covariance_matrix = tf.convert_to_tensor( covariance_matrix, name="covariance_matrix", dtype=dtype) if validate_args: covariance_matrix = control_flow_ops.with_dependencies([ tf.assert_near( covariance_matrix, tf.matrix_transpose(covariance_matrix), message="Matrix was not symmetric") ], covariance_matrix) # No need to validate that covariance_matrix is non-singular. # LinearOperatorLowerTriangular has an assert_non_singular method that # is called by the Bijector. # However, cholesky() ignores the upper triangular part, so we do need # to separately assert symmetric. scale_tril = tf.cholesky(covariance_matrix) super(MultivariateNormalFullCovariance, self).__init__( loc=loc, scale_tril=scale_tril, validate_args=validate_args, allow_nan_stats=allow_nan_stats, name=name) self._parameters = parameters