def metrics(self):
        metrics = []

        if self.config.predict_target:
            metrics.append(
                F1Metric(
                    prefix=const.TARGET_TAGS,
                    target_name=const.TARGET_TAGS,
                    PAD=const.PAD_TAGS_ID,
                    labels=const.LABELS,
                )
            )
            metrics.append(
                CorrectMetric(
                    prefix=const.TARGET_TAGS,
                    target_name=const.TARGET_TAGS,
                    PAD=const.PAD_TAGS_ID,
                )
            )
        if self.config.predict_source:
            metrics.append(
                F1Metric(
                    prefix=const.SOURCE_TAGS,
                    target_name=const.SOURCE_TAGS,
                    PAD=const.PAD_TAGS_ID,
                    labels=const.LABELS,
                )
            )
            metrics.append(
                CorrectMetric(
                    prefix=const.SOURCE_TAGS,
                    target_name=const.SOURCE_TAGS,
                    PAD=const.PAD_TAGS_ID,
                )
            )
        if self.config.predict_gaps:
            metrics.append(
                F1Metric(
                    prefix=const.GAP_TAGS,
                    target_name=const.GAP_TAGS,
                    PAD=const.PAD_TAGS_ID,
                    labels=const.LABELS,
                )
            )
            metrics.append(
                CorrectMetric(
                    prefix=const.GAP_TAGS,
                    target_name=const.GAP_TAGS,
                    PAD=const.PAD_TAGS_ID,
                )
            )

        metrics.append(LogMetric(targets=[(const.LOSS, const.LOSS)]))

        return metrics
Beispiel #2
0
    def metrics(self):
        metrics = []

        main_metric = PerplexityMetric(
            prefix=self.config.target_side,
            target_name=self.config.target_side,
            PAD=const.PAD_ID,
            STOP=const.STOP_ID,
        )
        metrics.append(main_metric)

        metrics.append(
            CorrectMetric(
                prefix=self.config.target_side,
                target_name=self.config.target_side,
                PAD=const.PAD_ID,
                STOP=const.STOP_ID,
            ))
        metrics.append(
            ExpectedErrorMetric(
                prefix=self.config.target_side,
                target_name=self.config.target_side,
                PAD=const.PAD_ID,
                STOP=const.STOP_ID,
            ))
        return metrics
Beispiel #3
0
    def metrics(self) -> List[Metric]:
        if self._metrics is None:
            metrics = []
            if self.config.word_level.target and self.config.word_level.gaps:
                metrics += tag_metrics(
                    const.TARGET_TAGS,
                    const.GAP_TAGS,
                    prefix='WMT19_',
                    labels=self.labels(const.TARGET_TAGS),
                )
            if self.config.word_level.target:
                metrics += tag_metrics(const.TARGET_TAGS,
                                       labels=self.labels(const.TARGET_TAGS))
            if self.config.word_level.gaps:
                metrics += tag_metrics(const.GAP_TAGS,
                                       labels=self.labels(const.GAP_TAGS))
            if self.config.word_level.source:
                metrics += tag_metrics(const.SOURCE_TAGS,
                                       labels=self.labels(const.SOURCE_TAGS))

            if self.config.sentence_level.hter:
                metrics.append(PearsonMetric(const.SENTENCE_SCORES, prefix=''))
                metrics.append(SpearmanMetric(const.SENTENCE_SCORES,
                                              prefix=''))
                metrics.append(RMSEMetric(const.SENTENCE_SCORES, prefix=''))
            if self.config.sentence_level.binary:
                metrics.append(
                    CorrectMetric(
                        const.BINARY,
                        prefix='binary_',
                        labels=self.labels(const.TARGET_TAGS),
                    ))
            # metrics.append(LogMetric(log_targets=[(const.LOSS, const.LOSS)]))
            self._metrics = metrics
        return self._metrics
Beispiel #4
0
def tag_metrics(*targets, prefix=None, labels=None):
    metrics = [
        F1MultMetric(*targets, prefix=prefix, labels=labels),
        MatthewsMetric(*targets, prefix=prefix, labels=labels),
        # ThresholdCalibrationMetric(*targets, prefix=prefix),
        CorrectMetric(*targets, prefix=prefix, labels=labels),
    ]
    return metrics
 def metrics(self):
     if self._metrics is None:
         metrics = []
         for output_side, layer in self.masked_word_outputs.items():
             metrics.append(PerplexityMetric(output_side))
             metrics.append(
                 ExpectedErrorMetric(output_side,
                                     labels=self.labels(output_side)))
             metrics.append(
                 CorrectMetric(output_side,
                               labels=self.labels(output_side)))
         self._metrics = metrics
     return self._metrics
    def metrics(self):
        metrics = []

        if self.config.predict_target:
            metrics.append(
                F1Metric(
                    prefix=const.TARGET_TAGS,
                    target_name=const.TARGET_TAGS,
                    PAD=const.PAD_TAGS_ID,
                    labels=const.LABELS,
                ))
            metrics.append(
                ThresholdCalibrationMetric(
                    prefix=const.TARGET_TAGS,
                    target_name=const.TARGET_TAGS,
                    PAD=const.PAD_TAGS_ID,
                ))
            metrics.append(
                CorrectMetric(
                    prefix=const.TARGET_TAGS,
                    target_name=const.TARGET_TAGS,
                    PAD=const.PAD_TAGS_ID,
                ))

        if self.config.predict_source:
            metrics.append(
                F1Metric(
                    prefix=const.SOURCE_TAGS,
                    target_name=const.SOURCE_TAGS,
                    PAD=const.PAD_TAGS_ID,
                    labels=const.LABELS,
                ))
            metrics.append(
                CorrectMetric(
                    prefix=const.SOURCE_TAGS,
                    target_name=const.SOURCE_TAGS,
                    PAD=const.PAD_TAGS_ID,
                ))
        if self.config.predict_gaps:
            metrics.append(
                F1Metric(
                    prefix=const.GAP_TAGS,
                    target_name=const.GAP_TAGS,
                    PAD=const.PAD_TAGS_ID,
                    labels=const.LABELS,
                ))
            metrics.append(
                CorrectMetric(
                    prefix=const.GAP_TAGS,
                    target_name=const.GAP_TAGS,
                    PAD=const.PAD_TAGS_ID,
                ))

        if self.config.sentence_level:
            metrics.append(RMSEMetric(target_name=const.SENTENCE_SCORES))
            metrics.append(PearsonMetric(target_name=const.SENTENCE_SCORES))
            metrics.append(SpearmanMetric(target_name=const.SENTENCE_SCORES))
            if self.config.sentence_ll:
                metrics.append(
                    LogMetric(targets=[('model_out', const.SENT_SIGMA)]))
        if self.config.binary_level:
            metrics.append(
                CorrectMetric(prefix=const.BINARY, target_name=const.BINARY))
        if self.config.token_level and self.predictor_tgt is not None:
            metrics.append(
                CorrectMetric(
                    prefix=const.PE,
                    target_name=const.PE,
                    PAD=const.PAD_ID,
                    STOP=const.STOP_ID,
                ))
            metrics.append(
                ExpectedErrorMetric(
                    prefix=const.PE,
                    target_name=const.PE,
                    PAD=const.PAD_ID,
                    STOP=const.STOP_ID,
                ))
            metrics.append(
                PerplexityMetric(
                    prefix=const.PE,
                    target_name=const.PE,
                    PAD=const.PAD_ID,
                    STOP=const.STOP_ID,
                ))
        if self.config.token_level and self.predictor_src is not None:
            metrics.append(
                CorrectMetric(
                    prefix=const.SOURCE,
                    target_name=const.SOURCE,
                    PAD=const.PAD_ID,
                    STOP=const.STOP_ID,
                ))
            metrics.append(
                ExpectedErrorMetric(
                    prefix=const.SOURCE,
                    target_name=const.SOURCE,
                    PAD=const.PAD_ID,
                    STOP=const.STOP_ID,
                ))
            metrics.append(
                PerplexityMetric(
                    prefix=const.SOURCE,
                    target_name=const.SOURCE,
                    PAD=const.PAD_ID,
                    STOP=const.STOP_ID,
                ))
        metrics.append(
            TokenMetric(target_name=const.TARGET,
                        STOP=const.STOP_ID,
                        PAD=const.PAD_ID))
        return metrics