def get_metrics(self, reset: bool = False) -> Dict[str, float]: """Get the metrics of our classifier, see :func:`~allennlp_2.models.Model.get_metrics`. Parameters ---------- reset Reset the metrics after obtaining them? Returns ------- A dictionary with all metric names and values. """ final_metrics = {} if "accuracy" in self.metrics.keys(): final_metrics.update( {"accuracy": self.metrics["accuracy"].get_metric(reset)} ) for metric_name in ["micro", "macro"]: if metric_name in self.metrics.keys(): for k, v in self.metrics[metric_name].get_metric(reset).items(): final_metrics.update({"{}/{}".format(metric_name, k): v}) if "per_label" in self.metrics.keys(): for k, values in self.metrics["per_label"].get_metric(reset).items(): for i, v in enumerate(values): label = vocabulary.label_for_index(self.backbone.vocab, i) # sanitize label using same patterns as tensorboardX to avoid summary writer warnings label = helpers.sanitize_metric_name(label) final_metrics.update({"_{}/{}".format(k, label): v}) return final_metrics
def get_metrics(self, reset: bool = False) -> Dict[str, float]: """Get the metrics of our classifier, see :func:`~allennlp_2.models.Model.get_metrics`. Parameters ---------- reset Reset the metrics after obtaining them? Returns ------- A dictionary with all metric names and values. """ metrics, final_metrics = self._metrics.get_dict( is_train=self.training), {} for name, metric in metrics.items(): if name == "accuracy": final_metrics.update({"accuracy": metric.get_metric(reset)}) elif name in ["macro", "micro"]: final_metrics.update({ f"{name}/{key}": value for key, value in metric.get_metric(reset).items() }) elif name == "per_label": for key, values in metric.get_metric(reset).items(): for i, value in enumerate(values): label = vocabulary.label_for_index( self.backbone.vocab, i) # sanitize label using same patterns as tensorboardX to avoid summary writer warnings label = helpers.sanitize_metric_name(label) final_metrics.update({f"_{key}/{label}": value}) return final_metrics
def _add_and_sort_labels_and_probabilities( self, probabilities: torch.Tensor) -> Tuple[List[str], List[float]]: """Returns the labels and probabilities sorted by the probability (descending) Helper function for the `self._compute_labels_and_probabilities` method. The list of the returned probabilities can be larger than the input probabilities, since we add all defined labels in the head. Parameters ---------- probabilities Probabilities of the model's prediction for one instance Returns ------- labels, probabilities """ all_classes_probs = torch.zeros( self.num_labels, # this can be >= probabilities.size()[0] device=probabilities.get_device() if probabilities.get_device() > -1 else None, ) all_classes_probs[:probabilities.size()[0]] = probabilities sorted_indexes_by_prob = torch.argsort(all_classes_probs, descending=True).tolist() labels = [ vocabulary.label_for_index(self.backbone.vocab, idx) for idx in sorted_indexes_by_prob ] probabilities = [ float(all_classes_probs[idx]) for idx in sorted_indexes_by_prob ] return labels, probabilities
def _decode_tags( self, viterbi_paths: List[Tuple[List[int], float]]) -> List[List[str]]: """Decode predicted tags""" return [[ vocabulary.label_for_index(self.backbone.vocab, idx) for idx in tags ] for tags, score in viterbi_paths]
def decode(self, output: TaskOutput) -> TaskOutput: output.tags = ([[ vocabulary.label_for_index(self.backbone.vocab, idx) for idx in tags[0] ] for instance_k_tags in output.k_tags for tags in instance_k_tags], ) del output.k_tags return output
def _make_tags( self, viterbi_paths: List[Tuple[List[int], float]]) -> List[List[str]]: """Makes the 'tags' key of the task prediction""" return [[ vocabulary.label_for_index(self.backbone.vocab, idx) for idx in tags ] for tags, score in viterbi_paths]
def _labels_with_probabilities( self, probabilities: torch.Tensor ) -> Dict[str, float]: """ Calculates the descendant sorted label + probs dictionary using all output classes (not only predicted) """ all_classes_probs = torch.zeros( self.num_labels, device=probabilities.get_device() if probabilities.get_device() > -1 else None, ) all_classes_probs[: probabilities.size()[0]] = probabilities sorted_indexes_by_prob = torch.argsort( all_classes_probs, descending=True ).tolist() return { vocabulary.label_for_index(self.backbone.vocab, idx): all_classes_probs[idx] for idx in sorted_indexes_by_prob }