def get_reconstruction_loss( self, x: torch.Tensor, y: torch.Tensor, px_dict: Dict[str, torch.Tensor], py_dict: Dict[str, torch.Tensor], pro_batch_mask_minibatch: Optional[torch.Tensor] = None, ) -> Tuple[torch.Tensor, torch.Tensor]: """Compute reconstruction loss.""" px_ = px_dict py_ = py_dict # Reconstruction Loss if self.gene_likelihood == "zinb": reconst_loss_gene = ( -ZeroInflatedNegativeBinomial( mu=px_["rate"], theta=px_["r"], zi_logits=px_["dropout"] ) .log_prob(x) .sum(dim=-1) ) else: reconst_loss_gene = ( -NegativeBinomial(mu=px_["rate"], theta=px_["r"]) .log_prob(x) .sum(dim=-1) ) py_conditional = NegativeBinomialMixture( mu1=py_["rate_back"], mu2=py_["rate_fore"], theta1=py_["r"], mixture_logits=py_["mixing"], ) reconst_loss_protein_full = -py_conditional.log_prob(y) if pro_batch_mask_minibatch is not None: temp_pro_loss_full = torch.zeros_like(reconst_loss_protein_full) temp_pro_loss_full.masked_scatter_( pro_batch_mask_minibatch.bool(), reconst_loss_protein_full ) reconst_loss_protein = temp_pro_loss_full.sum(dim=-1) else: reconst_loss_protein = reconst_loss_protein_full.sum(dim=-1) return reconst_loss_gene, reconst_loss_protein
def sample(self, tensors, n_samples=1): inference_kwargs = dict(n_samples=n_samples) with torch.no_grad(): inference_outputs, generative_outputs, = self.forward( tensors, inference_kwargs=inference_kwargs, compute_loss=False, ) px_ = generative_outputs["px_"] py_ = generative_outputs["py_"] rna_dist = NegativeBinomial(mu=px_["rate"], theta=px_["r"]) protein_dist = NegativeBinomialMixture( mu1=py_["rate_back"], mu2=py_["rate_fore"], theta1=py_["r"], mixture_logits=py_["mixing"], ) rna_sample = rna_dist.sample().cpu() protein_sample = protein_dist.sample().cpu() return rna_sample, protein_sample