Пример #1
0
    def testMakeScorers(self):
        def _check_scorers(scorers, instances):
            self.assertLen(scorers, len(instances))
            for scorer, instance in zip(scorers, instances):
                self.assertIsInstance(scorer, instance)

        _check_scorers(scorers.make_scorers("bleu"), [scorers.BLEUScorer])
        _check_scorers(scorers.make_scorers("BLEU"), [scorers.BLEUScorer])
        _check_scorers(scorers.make_scorers(["BLEU", "rouge"]),
                       [scorers.BLEUScorer, scorers.ROUGEScorer])
        _check_scorers(scorers.make_scorers("prf"), [scorers.PRFScorer])
        _check_scorers(scorers.make_scorers("prfmeasure"), [scorers.PRFScorer])
Пример #2
0
    def from_config(cls, model, config, features_file=None, labels_file=None):
        """Creates an evaluator from the configuration.

        Args:
          model: A :class:`opennmt.models.Model` to evaluate.
          config: The global user configuration.
          features_file: Optional input features file to evaluate. If not set, will
            load ``eval_features_file`` from the data configuration.
          labels_file: Optional output labels file to evaluate. If not set, will load
            ``eval_labels_file`` from the data configuration.

        Returns:
          A :class:`opennmt.evaluation.Evaluator` instance.

        Raises:
          ValueError: for supervised models, if one of :obj:`features_file` and
            :obj:`labels_file` is set but not the other.
          ValueError: for unsupervised models, if :obj:`labels_file` is set.
        """
        if model.unsupervised:
            if labels_file is not None:
                raise ValueError(
                    "labels_file can not be set when evaluating unsupervised models"
                )
        elif (features_file is None) != (labels_file is None):
            raise ValueError(
                "features_file and labels_file should be both set for evaluation"
            )
        eval_config = config["eval"]
        scorers = eval_config.get("external_evaluators")
        if scorers is not None:
            scorers = scorers_lib.make_scorers(scorers)
        early_stopping_config = eval_config.get("early_stopping")
        if early_stopping_config is not None:
            early_stopping = EarlyStopping(
                metric=early_stopping_config.get("metric", "loss"),
                min_improvement=early_stopping_config.get(
                    "min_improvement", 0),
                steps=early_stopping_config["steps"],
            )
        else:
            early_stopping = None
        return cls(
            model,
            features_file or config["data"]["eval_features_file"],
            labels_file or config["data"].get("eval_labels_file"),
            eval_config["batch_size"],
            batch_type=eval_config.get("batch_type", "examples"),
            length_bucket_width=eval_config.get("length_bucket_width"),
            scorers=scorers,
            save_predictions=eval_config.get("save_eval_predictions", False),
            early_stopping=early_stopping,
            model_dir=config["model_dir"],
            export_on_best=eval_config.get("export_on_best"),
            exporter=exporters.make_exporter(
                eval_config.get("export_format", "saved_model")),
            max_exports_to_keep=eval_config.get("max_exports_to_keep", 5),
        )
Пример #3
0
    def from_config(cls, model, config, features_file=None, labels_file=None):
        """Creates an evaluator from the configuration.

    Args:
      model: A :class:`opennmt.models.model.Model` to evaluate.
      config: The global user configuration.
      features_file: Optional input features file to evaluate. If not set, will
        load ``eval_features_file`` from the data configuration.
      labels_file: Optional output labels file to evaluate. If not set, will load
        ``eval_labels_file`` from the data configuration.

    Returns:
      A :class:`opennmt.evaluation.Evaluator` instance.

    Raises:
      ValueError: if one of :obj:`features_file` and :obj:`labels_file` is set
        but not the other.
    """
        if (features_file is None) != (labels_file is None):
            raise ValueError(
                "features_file and labels_file should be both set for evaluation"
            )
        scorers = config["eval"].get("external_evaluators")
        if scorers is not None:
            scorers = scorers_lib.make_scorers(scorers)
        early_stopping_config = config["eval"].get("early_stopping")
        if early_stopping_config is not None:
            early_stopping = EarlyStopping(
                metric=early_stopping_config.get("metric", "loss"),
                min_improvement=early_stopping_config.get(
                    "min_improvement", 0),
                steps=early_stopping_config["steps"])
        else:
            early_stopping = None
        return cls(model,
                   features_file or config["data"]["eval_features_file"],
                   labels_file or config["data"].get("eval_labels_file"),
                   config["eval"]["batch_size"],
                   scorers=scorers,
                   save_predictions=config["eval"].get("save_eval_predictions",
                                                       False),
                   early_stopping=early_stopping,
                   eval_dir=os.path.join(config["model_dir"], "eval"))