示例#1
0
    def metrics(self):
        metrics = []

        main_metric = PerplexityMetric(
            prefix=self.config.target_side,
            target_name=self.config.target_side,
            PAD=const.PAD_ID,
            STOP=const.STOP_ID,
        )
        metrics.append(main_metric)

        metrics.append(
            CorrectMetric(
                prefix=self.config.target_side,
                target_name=self.config.target_side,
                PAD=const.PAD_ID,
                STOP=const.STOP_ID,
            ))
        metrics.append(
            ExpectedErrorMetric(
                prefix=self.config.target_side,
                target_name=self.config.target_side,
                PAD=const.PAD_ID,
                STOP=const.STOP_ID,
            ))
        return metrics
 def metrics(self):
     if self._metrics is None:
         metrics = []
         for output_side, layer in self.masked_word_outputs.items():
             metrics.append(PerplexityMetric(output_side))
             metrics.append(
                 ExpectedErrorMetric(output_side,
                                     labels=self.labels(output_side)))
             metrics.append(
                 CorrectMetric(output_side,
                               labels=self.labels(output_side)))
         self._metrics = metrics
     return self._metrics
    def metrics(self):
        metrics = []

        if self.config.predict_target:
            metrics.append(
                F1Metric(
                    prefix=const.TARGET_TAGS,
                    target_name=const.TARGET_TAGS,
                    PAD=const.PAD_TAGS_ID,
                    labels=const.LABELS,
                ))
            metrics.append(
                ThresholdCalibrationMetric(
                    prefix=const.TARGET_TAGS,
                    target_name=const.TARGET_TAGS,
                    PAD=const.PAD_TAGS_ID,
                ))
            metrics.append(
                CorrectMetric(
                    prefix=const.TARGET_TAGS,
                    target_name=const.TARGET_TAGS,
                    PAD=const.PAD_TAGS_ID,
                ))

        if self.config.predict_source:
            metrics.append(
                F1Metric(
                    prefix=const.SOURCE_TAGS,
                    target_name=const.SOURCE_TAGS,
                    PAD=const.PAD_TAGS_ID,
                    labels=const.LABELS,
                ))
            metrics.append(
                CorrectMetric(
                    prefix=const.SOURCE_TAGS,
                    target_name=const.SOURCE_TAGS,
                    PAD=const.PAD_TAGS_ID,
                ))
        if self.config.predict_gaps:
            metrics.append(
                F1Metric(
                    prefix=const.GAP_TAGS,
                    target_name=const.GAP_TAGS,
                    PAD=const.PAD_TAGS_ID,
                    labels=const.LABELS,
                ))
            metrics.append(
                CorrectMetric(
                    prefix=const.GAP_TAGS,
                    target_name=const.GAP_TAGS,
                    PAD=const.PAD_TAGS_ID,
                ))

        if self.config.sentence_level:
            metrics.append(RMSEMetric(target_name=const.SENTENCE_SCORES))
            metrics.append(PearsonMetric(target_name=const.SENTENCE_SCORES))
            metrics.append(SpearmanMetric(target_name=const.SENTENCE_SCORES))
            if self.config.sentence_ll:
                metrics.append(
                    LogMetric(targets=[('model_out', const.SENT_SIGMA)]))
        if self.config.binary_level:
            metrics.append(
                CorrectMetric(prefix=const.BINARY, target_name=const.BINARY))
        if self.config.token_level and self.predictor_tgt is not None:
            metrics.append(
                CorrectMetric(
                    prefix=const.PE,
                    target_name=const.PE,
                    PAD=const.PAD_ID,
                    STOP=const.STOP_ID,
                ))
            metrics.append(
                ExpectedErrorMetric(
                    prefix=const.PE,
                    target_name=const.PE,
                    PAD=const.PAD_ID,
                    STOP=const.STOP_ID,
                ))
            metrics.append(
                PerplexityMetric(
                    prefix=const.PE,
                    target_name=const.PE,
                    PAD=const.PAD_ID,
                    STOP=const.STOP_ID,
                ))
        if self.config.token_level and self.predictor_src is not None:
            metrics.append(
                CorrectMetric(
                    prefix=const.SOURCE,
                    target_name=const.SOURCE,
                    PAD=const.PAD_ID,
                    STOP=const.STOP_ID,
                ))
            metrics.append(
                ExpectedErrorMetric(
                    prefix=const.SOURCE,
                    target_name=const.SOURCE,
                    PAD=const.PAD_ID,
                    STOP=const.STOP_ID,
                ))
            metrics.append(
                PerplexityMetric(
                    prefix=const.SOURCE,
                    target_name=const.SOURCE,
                    PAD=const.PAD_ID,
                    STOP=const.STOP_ID,
                ))
        metrics.append(
            TokenMetric(target_name=const.TARGET,
                        STOP=const.STOP_ID,
                        PAD=const.PAD_ID))
        return metrics