def build_argument_parser(): # Ensure all of the subcommands have been loaded import_module_and_submodules('sacrerouge') parser = argparse.ArgumentParser() subparsers = parser.add_subparsers() # Add all of the root-level commands using the registry for name, (cls_, _) in sorted(Registrable._registry[RootSubcommand].items()): cls_().add_subparser(subparsers) # Add a command for each individual metric metric_command.add_metric_subcommands(subparsers) return parser
def run(self, args): prepare_global_logging(file_path=args.log_file, silent=args.silent) import_module_and_submodules('sacrerouge') include_packages = args.include_packages or [] for package in include_packages: import_module_and_submodules(package) params = Params.from_file(args.config, args.overrides) dataset_reader = DatasetReader.from_params(params.pop('dataset_reader')) metrics = _load_metrics(params) input_files = params.pop('input_files') if isinstance(input_files, str): input_files = [input_files] instances = dataset_reader.read(*input_files) metrics_dicts = score_instances(instances, metrics, args.disable_peer_jackknifing) save_score_results(metrics_dicts, args.output_jsonl, args.silent)
def run(self, args): prepare_global_logging(file_path=args.log_file, silent=args.silent) import_module_and_submodules('sacrerouge') include_packages = args.include_packages or [] for package in include_packages: import_module_and_submodules(package) params = Params.from_file(args.config, args.overrides) dataset_reader = DatasetReader.from_params( params.pop('dataset_reader')) metrics = load_metrics(params) input_files = params.pop('input_files') if isinstance(input_files, str): input_files = [input_files] instances = dataset_reader.read(*input_files) macro, micro_list = evaluate_instances(instances, metrics) save_evaluation_results(macro, micro_list, args.macro_output_json, args.micro_output_jsonl, args.silent)