Пример #1
0
def test_box_cox_tranform(
    lambdas: Tuple[float, float],
    mu_sigma: Tuple[float, float],
    hybridize: bool,
):
    '''
    Test to check that maximizing the likelihood recovers the parameters
    '''
    # test instance
    lam_1, lam_2 = lambdas
    mu, sigma = mu_sigma

    # generate samples
    lamdas_1 = mx.nd.zeros((NUM_SAMPLES, )) + lam_1
    lamdas_2 = mx.nd.zeros((NUM_SAMPLES, )) + lam_2
    transform = InverseBoxCoxTransform(lamdas_1, lamdas_2)

    mus = mx.nd.zeros((NUM_SAMPLES, )) + mu
    sigmas = mx.nd.zeros((NUM_SAMPLES, )) + sigma
    gausian_distr = Gaussian(mus, sigmas)

    # Here the base distribution is Guassian which is transformed to
    # non-Gaussian via the inverse Box-Cox transform.
    # Sampling from `trans_distr` gives non-Gaussian samples
    trans_distr = TransformedDistribution(gausian_distr, transform)

    # Given the non-Gaussian samples find the true parameters
    # of the Box-Cox transformation as well as the underlying Gaussian distribution.
    samples = trans_distr.sample()

    init_biases = [
        mu - START_TOL_MULTIPLE * TOL * mu,
        inv_softplus(sigma - START_TOL_MULTIPLE * TOL * sigma),
        lam_1 - START_TOL_MULTIPLE * TOL * lam_1,
        inv_softplus(lam_2 - START_TOL_MULTIPLE * TOL * lam_2),
    ]

    mu_hat, sigma_hat, lam_1_hat, lam_2_hat = maximum_likelihood_estimate_sgd(
        TransformedDistributionOutput(
            GaussianOutput(),
            InverseBoxCoxTransformOutput(lb_obs=lam_2, fix_lambda_2=True),
        ),
        samples,
        init_biases=init_biases,
        hybridize=hybridize,
        learning_rate=PositiveFloat(0.01),
        num_epochs=PositiveInt(18),
    )

    assert (np.abs(lam_1_hat - lam_1) < TOL * lam_1
            ), f"lam_1 did not match: lam_1 = {lam_1}, lam_1_hat = {lam_1_hat}"
    # assert (
    #     np.abs(lam_2_hat - lam_2) < TOL * lam_2
    # ), f"lam_2 did not match: lam_2 = {lam_2}, lam_2_hat = {lam_2_hat}"

    assert np.abs(mu_hat - mu) < TOL * np.abs(
        mu), f"mu did not match: mu = {mu}, mu_hat = {mu_hat}"
    assert (np.abs(sigma_hat - sigma) < TOL * sigma
            ), f"sigma did not match: sigma = {sigma}, sigma_hat = {sigma_hat}"
def test_transformed_distribution() -> None:
    zero = nd.zeros(1)
    one = nd.ones(1)

    # If Y = -log(U) with U ~ Uniform(0, 1), then Y ~ Exponential(1)
    exponential = TransformedDistribution(
        Uniform(zero, one),
        bijection.log,
        bijection.AffineTransformation(scale=-1 * one),
    )

    # For Y ~ Exponential(1), P(Y) = e^{-x) ==> log P(Y) = -x
    assert exponential.log_prob(1 * one).asscalar() == -1.0
    assert exponential.log_prob(2 * one).asscalar() == -2.0

    v = np.linspace(0, 5, 101)
    assert np.allclose(exponential.cdf(nd.array(v)).asnumpy(), exp_cdf(v))

    # If Y ~ Exponential(1), then U = 1 - e^{-Y} has Uniform(0, 1) distribution
    uniform = TransformedDistribution(
        exponential,
        bijection.AffineTransformation(scale=-1 * one),
        bijection.log.inverse_bijection(),  # == bijection.exp
        bijection.AffineTransformation(loc=one, scale=-1 * one),
    )
    # For U ~ Uniform(0, 1), log P(U) = 0
    assert uniform.log_prob(0.5 * one).asscalar() == 0
    assert uniform.log_prob(0.2 * one).asscalar() == 0

    v = np.linspace(0, 1, 101)
    assert np.allclose(uniform.cdf(nd.array(v)).asnumpy(), v)
Пример #3
0
def test_transformed_distribution(serialize_fn) -> None:
    zero = nd.zeros(1)
    one = nd.ones(1)

    # If Y = -log(U) with U ~ Uniform(0, 1), then Y ~ Exponential(1)
    exponential = TransformedDistribution(
        Uniform(zero, one),
        [bijection.log,
         bijection.AffineTransformation(scale=-1 * one)],
    )
    exponential = serialize_fn(exponential)

    # For Y ~ Exponential(1), P(Y) = e^{-x) ==> log P(Y) = -x
    assert exponential.log_prob(1 * one).asscalar() == -1.0
    assert exponential.log_prob(2 * one).asscalar() == -2.0

    v = np.linspace(0, 5, 101)
    assert np.allclose(exponential.cdf(nd.array(v)).asnumpy(), exp_cdf(v))

    level = np.linspace(1.0e-5, 1.0 - 1.0e-5, 101)

    qs_calc = exponential.quantile(nd.array(level)).asnumpy()[:, 0]
    qs_theo = exp_quantile(level)
    assert np.allclose(qs_calc, qs_theo, atol=1.0e-2)

    # If Y ~ Exponential(1), then U = 1 - e^{-Y} has Uniform(0, 1) distribution
    uniform = TransformedDistribution(
        exponential,
        [
            bijection.AffineTransformation(scale=-1 * one),
            bijection.log.inverse_bijection(),  # == bijection.exp
            bijection.AffineTransformation(loc=one, scale=-1 * one),
        ],
    )
    uniform = serialize_fn(uniform)
    # For U ~ Uniform(0, 1), log P(U) = 0
    assert uniform.log_prob(0.5 * one).asscalar() == 0
    assert uniform.log_prob(0.2 * one).asscalar() == 0

    v = np.linspace(0, 1, 101)
    assert np.allclose(uniform.cdf(nd.array(v)).asnumpy(), v)

    qs_calc = uniform.quantile(nd.array(level)).asnumpy()[:, 0]
    assert np.allclose(qs_calc, level, atol=1.0e-2)
    def distribution(self,
                     distr_args,
                     scale: Optional[Tensor] = None,
                     **kwargs) -> Distribution:
        distr_args, transforms_args = self._split_args(distr_args)
        distr = self.base_distr_output.distr_cls(*distr_args)
        transforms = [
            transform_output.bij_cls(*bij_args) for transform_output, bij_args
            in zip(self.transforms_output, transforms_args)
        ]

        trans_distr = TransformedDistribution(distr, transforms)

        # Apply scaling as well at the end if scale is not None!
        if scale is None:
            return trans_distr
        else:
            return TransformedDistribution(trans_distr,
                                           AffineTransformation(scale=scale))