Esempio n. 1
0
    def __init__(self, conf_file='configurations.json'):
        self.annotators = OrderedDict()
        self.datasets = OrderedDict()
        self.experiments = OrderedDict()

        with open(conf_file, 'r') as inputfile:
            conf = json.load(inputfile)

            # Load annotators
            for annotator_conf in conf["annotators"]:
                nickname = annotator_conf["alias"]
                module_name = annotator_conf["name"]
                annotator = create_annotator(str(module_name))
                annotator.set_configuration(annotator_conf["configuration"])

                self.annotators[nickname] = annotator

            # Load datasets
            for dataset_conf in conf["datasets"]:
                self.datasets[dataset_conf["name"]] = \
                    Dataset.load_tsv(dataset_conf["file"])

            # Load experiments
            for exp_conf in conf["experiments"]:
                self.experiments[exp_conf["name"]] = \
                    create_experiment(str(exp_conf["name"]), exp_conf)
Esempio n. 2
0
        micro_msg = "[micro P: %.3f R: %.3f F1: %.3f]" % (
            self.metrics.precision(), self.metrics.recall(), self.metrics.f1()
        )

        if self.metrics.has_macro():
            macro_msg = " [macro P: %.3f R: %.3f F1: %.3f]" % (
                self.metrics.macro_precision(),
                self.metrics.macro_recall(),
                self.metrics.macro_f1()
            )
        else:
            macro_msg = ""

        self.log.info("%s %s%s" % (count_msg, micro_msg, macro_msg))

if __name__ == "__main__":
    import sys
    from wikibench.dataset import Dataset
    from wikibench.utils import create_annotator, create_benchmark

    benchmark_name, annotator_name, dataset = sys.argv[1:4]

    dataset = Dataset.load(dataset)
    annotator = create_annotator(annotator_name)
    benchmark = create_benchmark(benchmark_name)

    benchmark.parse_arguments(sys.argv[4:])
    benchmark.run(dataset, annotator)
    benchmark.summary()