def testStudentCDFAndLogCDF(self):
        with self.test_session():
            batch_size = 6
            df = constant_op.constant([3.] * batch_size)
            mu = constant_op.constant([7.] * batch_size)
            sigma = constant_op.constant([8.] * batch_size)
            df_v = 3.
            mu_v = 7.
            sigma_v = 8.
            t = np.array([-2.5, 2.5, 8., 0., -1., 2.], dtype=np.float32)
            student = student_t.StudentT(df, mu=mu, sigma=sigma)

            log_cdf = student.log_cdf(t)
            self.assertEquals(log_cdf.get_shape(), (6, ))
            log_cdf_values = log_cdf.eval()
            cdf = student.cdf(t)
            self.assertEquals(cdf.get_shape(), (6, ))
            cdf_values = cdf.eval()

            expected_log_cdf = stats.t.logcdf(t, df_v, loc=mu_v, scale=sigma_v)
            expected_cdf = stats.t.cdf(t, df_v, loc=mu_v, scale=sigma_v)
            self.assertAllClose(expected_log_cdf,
                                log_cdf_values,
                                atol=0.,
                                rtol=1e-5)
            self.assertAllClose(np.log(expected_cdf),
                                log_cdf_values,
                                atol=0.,
                                rtol=1e-5)
            self.assertAllClose(expected_cdf, cdf_values, atol=0., rtol=1e-5)
            self.assertAllClose(np.exp(expected_log_cdf),
                                cdf_values,
                                atol=0.,
                                rtol=1e-5)
Exemplo n.º 2
0
    def __init__(self,
                 df,
                 loc=None,
                 scale_identity_multiplier=None,
                 scale_diag=None,
                 scale_tril=None,
                 scale_perturb_factor=None,
                 scale_perturb_diag=None,
                 validate_args=False,
                 allow_nan_stats=True,
                 name="VectorStudentT"):
        """Instantiates the vector Student's t-distributions on `R^k`.

    The `batch_shape` is the broadcast between `df.batch_shape` and
    `Affine.batch_shape` where `Affine` is constructed from `loc` and
    `scale_*` arguments.

    The `event_shape` is the event shape of `Affine.event_shape`.

    Args:
      df: Floating-point `Tensor`. The degrees of freedom of the
        distribution(s). `df` must contain only positive values. Must be
        scalar if `loc`, `scale_*` imply non-scalar batch_shape or must have the
        same `batch_shape` implied by `loc`, `scale_*`.
      loc: Floating-point `Tensor`. If this is set to `None`, no `loc` is
        applied.
      scale_identity_multiplier: floating point rank 0 `Tensor` representing a
        scaling done to the identity matrix. When `scale_identity_multiplier =
        scale_diag=scale_tril = None` then `scale += IdentityMatrix`. Otherwise
        no scaled-identity-matrix is added to `scale`.
      scale_diag: Floating-point `Tensor` representing the diagonal matrix.
        `scale_diag` has shape [N1, N2, ..., k], which represents a k x k
        diagonal matrix. When `None` no diagonal term is added to `scale`.
      scale_tril: Floating-point `Tensor` representing the diagonal matrix.
        `scale_diag` has shape [N1, N2, ..., k, k], which represents a k x k
        lower triangular matrix. When `None` no `scale_tril` term is added to
        `scale`. The upper triangular elements above the diagonal are ignored.
      scale_perturb_factor: Floating-point `Tensor` representing factor matrix
        with last two dimensions of shape `(k, r)`. When `None`, no rank-r
        update is added to `scale`.
      scale_perturb_diag: Floating-point `Tensor` representing the diagonal
        matrix. `scale_perturb_diag` has shape [N1, N2, ..., r], which
        represents an r x r Diagonal matrix. When `None` low rank updates will
        take the form `scale_perturb_factor * scale_perturb_factor.T`.
      validate_args: Python `bool`, default `False`. When `True` distribution
        parameters are checked for validity despite possibly degrading runtime
        performance. When `False` invalid inputs may silently render incorrect
        outputs.
      allow_nan_stats: Python `bool`, default `True`. When `True`,
        statistics (e.g., mean, mode, variance) use the value "`NaN`" to
        indicate the result is undefined. When `False`, an exception is raised
        if one or more of the statistic's batch members are undefined.
      name: Python `str` name prefixed to Ops created by this class.
    """
        parameters = locals()
        graph_parents = [
            df, loc, scale_identity_multiplier, scale_diag, scale_tril,
            scale_perturb_factor, scale_perturb_diag
        ]
        with ops.name_scope(name) as ns:
            with ops.name_scope("init", values=graph_parents):
                # The shape of the _VectorStudentT distribution is governed by the
                # relationship between df.batch_shape and affine.batch_shape. In
                # pseudocode the basic procedure is:
                #   if df.batch_shape is scalar:
                #     if affine.batch_shape is not scalar:
                #       # broadcast distribution.sample so
                #       # it has affine.batch_shape.
                #     self.batch_shape = affine.batch_shape
                #   else:
                #     if affine.batch_shape is scalar:
                #       # let affine broadcasting do its thing.
                #     self.batch_shape = df.batch_shape
                # All of the above magic is actually handled by TransformedDistribution.
                # Here we really only need to collect the affine.batch_shape and decide
                # what we're going to pass in to TransformedDistribution's
                # (override) batch_shape arg.
                affine = bijectors.Affine(
                    shift=loc,
                    scale_identity_multiplier=scale_identity_multiplier,
                    scale_diag=scale_diag,
                    scale_tril=scale_tril,
                    scale_perturb_factor=scale_perturb_factor,
                    scale_perturb_diag=scale_perturb_diag,
                    validate_args=validate_args)
                distribution = student_t.StudentT(
                    df=df,
                    loc=array_ops.zeros([], dtype=affine.dtype),
                    scale=array_ops.ones([], dtype=affine.dtype))
                batch_shape, override_event_shape = _infer_shapes(
                    affine.scale, affine.shift)
                override_batch_shape = distribution_util.pick_vector(
                    distribution.is_scalar_batch(), batch_shape,
                    constant_op.constant([], dtype=dtypes.int32))
                super(_VectorStudentT,
                      self).__init__(distribution=distribution,
                                     bijector=affine,
                                     batch_shape=override_batch_shape,
                                     event_shape=override_event_shape,
                                     validate_args=validate_args,
                                     name=ns)
                self._parameters = parameters
Exemplo n.º 3
0
    def __init__(self,
                 df,
                 shift=None,
                 scale_identity_multiplier=None,
                 scale_diag=None,
                 scale_tril=None,
                 scale_perturb_factor=None,
                 scale_perturb_diag=None,
                 validate_args=False,
                 allow_nan_stats=True,
                 name="VectorStudentT"):
        """Instantiates the vector Student's t-distributions on `R^k`.

    The `batch_shape` is the broadcast between `df.batch_shape` and
    `Affine.batch_shape` where `Affine` is constructed from `shift` and
    `scale_*` arguments.

    The `event_shape` is the event shape of `Affine.event_shape`.

    Args:
      df: Numeric `Tensor`. The degrees of freedom of the distribution(s).
        `df` must contain only positive values.
        Must be scalar if `shift`, `scale_*` imply non-scalar batch_shape or
        must have the same `batch_shape` implied by `shift`, `scale_*`.
      shift: Numeric `Tensor`.  If this is set to `None`, no `shift` is applied.
      scale_identity_multiplier: floating point rank 0 `Tensor` representing a
        scaling done to the identity matrix.
        When `scale_identity_multiplier = scale_diag=scale_tril = None` then
        `scale += IdentityMatrix`. Otherwise no scaled-identity-matrix is added
        to `scale`.
      scale_diag: Numeric `Tensor` representing the diagonal matrix.
        `scale_diag` has shape [N1, N2, ... k], which represents a k x k
        diagonal matrix.
        When `None` no diagonal term is added to `scale`.
      scale_tril: Numeric `Tensor` representing the diagonal matrix.
        `scale_diag` has shape [N1, N2, ... k, k], which represents a k x k
        lower triangular matrix.
        When `None` no `scale_tril` term is added to `scale`.
        The upper triangular elements above the diagonal are ignored.
      scale_perturb_factor: Numeric `Tensor` representing factor matrix with
        last two dimensions of shape `(k, r)`.
        When `None`, no rank-r update is added to `scale`.
      scale_perturb_diag: Numeric `Tensor` representing the diagonal matrix.
        `scale_perturb_diag` has shape [N1, N2, ... r], which represents an
        r x r Diagonal matrix.
        When `None` low rank updates will take the form `scale_perturb_factor *
        scale_perturb_factor.T`.
      validate_args: `Boolean`, default `False`.  Whether to validate input
        with asserts.  If `validate_args` is `False`, and the inputs are
        invalid, correct behavior is not guaranteed.
      allow_nan_stats: `Boolean`, default `True`.  If `False`, raise an
        exception if a statistic (e.g. mean/mode/etc...) is undefined for any
        batch member If `True`, batch members with valid parameters leading to
        undefined statistics will return NaN for this statistic.
      name: The name to give Ops created by the initializer.
    """
        parameters = locals()
        parameters.pop("self")
        graph_parents = [
            df, shift, scale_identity_multiplier, scale_diag, scale_tril,
            scale_perturb_factor, scale_perturb_diag
        ]
        with ops.name_scope(name) as ns:
            with ops.name_scope("init", values=graph_parents):
                # The shape of the _VectorStudentT distribution is governed by the
                # relationship between df.batch_shape and affine.batch_shape. In
                # pseudocode the basic procedure is:
                #   if df.batch_shape is scalar:
                #     if affine.batch_shape is not scalar:
                #       # broadcast self._distribution.sample so
                #       # it has affine.batch_shape.
                #     self.batch_shape = affine.batch_shape
                #   else:
                #     if affine.batch_shape is scalar:
                #       # let affine broadcasting do its thing.
                #     self.batch_shape = df.batch_shape
                # All of the above magic is actually handled by TransformedDistribution.
                # Here we really only need to collect the affine.batch_shape and decide
                # what we're going to pass in to TransformedDistribution's
                # (override) batch_shape arg.
                self._distribution = student_t.StudentT(df=df, mu=0., sigma=1.)
                self._affine = bijectors.Affine(
                    shift=shift,
                    scale_identity_multiplier=scale_identity_multiplier,
                    scale_diag=scale_diag,
                    scale_tril=scale_tril,
                    scale_perturb_factor=scale_perturb_factor,
                    scale_perturb_diag=scale_perturb_diag,
                    validate_args=validate_args)
                self._batch_shape, self._override_event_shape = _infer_shapes(
                    self.scale, self.shift)
                self._override_batch_shape = distribution_util.pick_vector(
                    self._distribution.is_scalar_batch(), self._batch_shape,
                    constant_op.constant([], dtype=dtypes.int32))
                super(_VectorStudentT,
                      self).__init__(distribution=self._distribution,
                                     bijector=self._affine,
                                     batch_shape=self._override_batch_shape,
                                     event_shape=self._override_event_shape,
                                     validate_args=validate_args,
                                     name=ns)