def test_neg_binomial(mu_alpha: Tuple[float, float]) -> None:
    """
    Test to check that maximizing the likelihood recovers the parameters
    """
    # test instance
    mu, alpha = mu_alpha

    # generate samples
    mus = torch.zeros((NUM_SAMPLES, )) + mu
    alphas = torch.zeros((NUM_SAMPLES, )) + alpha

    neg_bin_distr = NegativeBinomial(total_count=1.0 / alphas,
                                     probs=mus * alphas / (1.0 + mus * alphas))
    samples = neg_bin_distr.sample()

    init_biases = [
        inv_softplus(mu - START_TOL_MULTIPLE * TOL * mu),
        inv_softplus(alpha + START_TOL_MULTIPLE * TOL * alpha),
    ]

    mu_hat, alpha_hat = maximum_likelihood_estimate_sgd(
        NegativeBinomialOutput(),
        samples,
        init_biases=init_biases,
        num_epochs=15,
    )

    assert (np.abs(mu_hat - mu) <
            TOL * mu), f"mu did not match: mu = {mu}, mu_hat = {mu_hat}"
    assert (np.abs(alpha_hat - alpha) < TOL * alpha
            ), f"alpha did not match: alpha = {alpha}, alpha_hat = {alpha_hat}"
Ejemplo n.º 2
0
    def __init__(self, gate, total_count, probs=None, logits=None, validate_args=None):
        base_dist = NegativeBinomial(
            total_count=total_count, probs=probs, logits=logits, validate_args=False,
        )
        base_dist._validate_args = validate_args

        super().__init__(gate, base_dist, validate_args=validate_args)
def test_neg_binomial(total_count: float, logit: float) -> None:
    """
    Test to check that maximizing the likelihood recovers the parameters
    """
    # generate samples
    total_counts = torch.zeros((NUM_SAMPLES,)) + total_count
    logits = torch.zeros((NUM_SAMPLES,)) + logit

    neg_bin_distr = NegativeBinomial(total_count=total_counts, logits=logits)
    samples = neg_bin_distr.sample()

    init_biases = [
        inv_softplus(total_count - START_TOL_MULTIPLE * TOL * total_count),
        logit - START_TOL_MULTIPLE * TOL * logit,
    ]

    total_count_hat, logit_hat = maximum_likelihood_estimate_sgd(
        NegativeBinomialOutput(),
        samples,
        init_biases=init_biases,
        num_epochs=15,
    )

    assert (
        np.abs(total_count_hat - total_count) < TOL * total_count
    ), f"total_count did not match: total_count = {total_count}, total_count_hat = {total_count_hat}"
    assert (
        np.abs(logit_hat - logit) < TOL * logit
    ), f"logit did not match: logit = {logit}, logit_hat = {logit_hat}"
Ejemplo n.º 4
0
def test_zinb_0_gate(total_count, probs):
    # if gate is 0 ZINB is NegativeBinomial
    zinb_ = ZeroInflatedNegativeBinomial(torch.zeros(1),
                                         total_count=torch.tensor(total_count),
                                         probs=torch.tensor(probs))
    neg_bin = NegativeBinomial(torch.tensor(total_count),
                               probs=torch.tensor(probs))
    s = neg_bin.sample((20, ))
    zinb_prob = zinb_.log_prob(s)
    neg_bin_prob = neg_bin.log_prob(s)
    assert_close(zinb_prob, neg_bin_prob, atol=1e-06)
Ejemplo n.º 5
0
    def loss(
        self,
        tensors,
        inference_outputs,
        generative_outputs,
        kl_weight: float = 1.0,
        n_obs: int = 1.0,
    ):
        x = tensors[_CONSTANTS.X_KEY]
        px_rate = generative_outputs["px_rate"]
        px_o = generative_outputs["px_o"]

        reconst_loss = -NegativeBinomial(px_rate,
                                         logits=px_o).log_prob(x).sum(-1)
        # prior likelihood
        mean = torch.zeros_like(self.eta)
        scale = torch.ones_like(self.eta)
        neg_log_likelihood_prior = -Normal(mean, scale).log_prob(
            self.eta).sum()

        if self.prior_weight == "n_obs":
            # the correct way to reweight observations while performing stochastic optimization
            loss = n_obs * torch.mean(reconst_loss) + neg_log_likelihood_prior
        else:
            # the original way it is done in Stereoscope; we use this option to show reproducibility of their codebase
            loss = torch.sum(reconst_loss) + neg_log_likelihood_prior
        return SCVILoss(loss, reconst_loss, torch.zeros((1, )),
                        neg_log_likelihood_prior)
Ejemplo n.º 6
0
 def _sampler(self, samples=1000):
     d_ = torch.ones(samples)
     if d == 1:
         # If SZ is adopted, then some Districts and Schools buy in
         dist = Poisson(self.n_districts)\
                 .sample([samples])\
                 .reshape([samples])
         schools = NegativeBinomial(tensor([3.]),
                                    tensor([0.8]))\
                     .sample([samples, self.n_districts.int()])\
                     .sum(dim=1)\
                     .reshape([samples])
         sz = 15000. * dist + 2430 * schools
     else:
         dist, schools, sz = torch.zeros(samples),\
                             torch.zeros(samples),\
                             torch.zeros(samples)
     if d < 2:
         sf = LogNormal(
                 *self._lognormal_params(300000., 10000.))\
                     .sample([samples])
     else:
         sf = torch.zeros(samples)
     # System & Infrastructure
     az = LogNormal(self.az_means[d], self.az_sds[d]).sample([samples])
     salary_estimate = Normal(70000., 5000.).sample([samples])
     fa = Beta(self.fa_ms[d], self.fa_ks[d]).sample([samples])
     dt = Beta(self.dt_ms[d], self.dt_ks[d]).sample([samples])
     return d_, dist, schools, sz, az, sf, fa, dt
Ejemplo n.º 7
0
    def distribution(self,
                     distr_args,
                     scale: Optional[torch.Tensor] = None) -> Distribution:
        total_count, logits = distr_args

        if scale is not None:
            logits += scale.log()

        return self.independent(
            NegativeBinomial(total_count=total_count, logits=logits))
Ejemplo n.º 8
0
    def distribution(self,
                     distr_args,
                     scale: Optional[torch.Tensor] = None) -> Distribution:
        mu, alpha = distr_args

        if scale is not None:
            mu *= scale
            alpha *= torch.sqrt(scale + 1.0)

        n = 1.0 / alpha
        p = mu * alpha / (1.0 + mu * alpha)

        return NegativeBinomial(total_count=n, probs=p)
Ejemplo n.º 9
0
    def distribution(
        self, distr_args, scale: Optional[torch.Tensor] = None
    ) -> Distribution:
        mu, alpha = distr_args

        if scale is not None:
            mu *= scale
            # alpha = alpha + (scale - 1) / (scale * mu) # multiply 2nd moment by scale
            alpha += (scale - 1) / mu

        n = 1.0 / alpha
        p = mu * alpha / (1.0 + mu * alpha)

        return NegativeBinomial(total_count=n, probs=p)
Ejemplo n.º 10
0
    def loss(
        self,
        tensors,
        inference_outputs,
        generative_outputs,
        kl_weight: float = 1.0,
    ):
        x = tensors[_CONSTANTS.X_KEY]
        px_rate = generative_outputs["px_rate"]
        px_o = generative_outputs["px_o"]
        scaling_factor = generative_outputs["scaling_factor"]

        reconst_loss = -NegativeBinomial(px_rate,
                                         logits=px_o).log_prob(x).sum(-1)
        loss = torch.mean(scaling_factor * reconst_loss)

        return SCVILoss(loss, reconst_loss, torch.zeros((1, )), 0.0)
Ejemplo n.º 11
0
    def loss(
        self,
        tensors,
        inference_outputs,
        generative_outputs,
        kl_weight: float = 1.0,
    ):
        x = tensors[REGISTRY_KEYS.X_KEY]
        px_rate = generative_outputs["px_rate"]
        px_o = generative_outputs["px_o"]
        scaling_factor = generative_outputs["scaling_factor"]

        reconst_loss = -NegativeBinomial(px_rate,
                                         logits=px_o).log_prob(x).sum(-1)
        loss = torch.sum(scaling_factor * reconst_loss)

        return LossRecorder(loss, reconst_loss, torch.zeros((1, )),
                            torch.tensor(0.0))