def get_normalized_probs(self, ctc_logits, logits, log_probs): """Get normalized probabilities (or log probs) from a net's output.""" if log_probs: ctc_res = utils.log_softmax(ctc_logits.float(), dim=-1) res = utils.log_softmax(logits.float(), dim=-1) else: ctc_res = utils.softmax(ctc_logits.float(), dim=-1) res = utils.softmax(logits.float(), dim=-1) ctc_res.batch_first = True res.batch_first = True return ctc_res, res
def get_normalized_probs(self, net_output, log_probs): """Get normalized probabilities (or log probs) from a net's output.""" logits = net_output["encoder_out"] if log_probs: return utils.log_softmax(logits.float(), dim=-1) else: return utils.softmax(logits.float(), dim=-1)
def get_normalized_probs( self, net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]], log_probs: bool, sample: Optional[Dict[str, Tensor]] = None, ): """Get normalized probabilities (or log probs) from a net's output.""" if hasattr(self, "adaptive_softmax") and self.adaptive_softmax is not None: if sample is not None: assert "target" in sample target = sample["target"] else: target = None out = self.adaptive_softmax.get_log_prob(net_output[0], target=target) return out.exp_() if not log_probs else out logits = net_output[0] if log_probs: return utils.log_softmax(logits, dim=-1, onnx_trace=self.onnx_trace) else: return utils.softmax(logits, dim=-1, onnx_trace=self.onnx_trace)