Example #1
0
 def __init__(self, best_match_num, test_triple_num, gold_triple_num):
     num_matches, num_only_guessed, num_only_ref = (best_match_num,
                                                    test_triple_num - best_match_num,
                                                    gold_triple_num - best_match_num)
     super().__init__({PRIMARY: evaluation.SummaryStatistics(num_matches, num_only_guessed, num_only_ref)},
                      default={PRIMARY.name: PRIMARY})
     self.p, self.r, self.f1 = smatch.compute_f(best_match_num, test_triple_num, gold_triple_num)
Example #2
0
def get_scores(s1, s2, eval_type, verbose):
    converter = ConlluConverter()
    edges = [[
        e for nodes, _ in converter.build_nodes(s) for n in nodes
        for e in n.outgoing
    ] for s in (s1, s2)]
    for es in edges:
        for e in es:
            e.rel = None if eval_type == evaluation.UNLABELED else e.rel.partition(
                ":")[0]
    g, r = map(set, edges)
    res = evaluation.EvaluatorResults(
        {
            PRIMARY:
            evaluation.SummaryStatistics(len(g & r), len(g - r), len(r - g))
        },
        default={PRIMARY.name: PRIMARY})
    if verbose:
        print("Evaluation type: (" + eval_type + ")")
        res.print()
    return res
Example #3
0
def get_scores(s1, s2, eval_type, verbose):
    converter = SdpConverter()
    edges = [[
        e for nodes, _ in converter.build_nodes(s) for n in nodes
        for e in n.outgoing +
        ([create_top_edge(converter)] if n.is_top else [])
    ] for s in (s1, s2)]
    if eval_type == evaluation.UNLABELED:
        for es in edges:
            for e in es:
                e.rel = None
    g, r = map(set, edges)
    res = evaluation.EvaluatorResults(
        {
            PRIMARY:
            evaluation.SummaryStatistics(len(g & r), len(g - r), len(r - g))
        },
        default={PRIMARY.name: PRIMARY})
    if verbose:
        print("Evaluation type: (" + eval_type + ")")
        res.print()
    return res
Example #4
0
def get_scores(s1, s2, eval_type, verbose):
    converter = SdpConverter()
    edges = [[
        e for g in converter.generate_graphs(s) for n in g.nodes
        for e in n.outgoing +
        ([SdpConverter.Edge(rel=SdpConverter.TOP, remote=False, head=g.root
                            )] if n.is_top else [])
    ] for s in (s1, s2)]
    if eval_type == evaluation.UNLABELED:
        for es in edges:
            for e in es:
                e.rel = None
    g, r = map(set, edges)
    res = evaluation.EvaluatorResults(
        {
            PRIMARY:
            evaluation.SummaryStatistics(len(g & r), len(g - r), len(r - g))
        },
        default={PRIMARY.name: PRIMARY})
    if verbose:
        print("Evaluation type: (" + eval_type + ")")
        res.print()
    return res
Example #5
0
File: sdp.py Project: StefPac/tupa
def get_scores(s1, s2, eval_type, verbose):
    converter = SdpConverter()
    edges = [{
        e
        for nodes, _ in converter.build_nodes(s) for n in nodes
        for e in n.outgoing
    } for s in (s1, s2)]
    if eval_type == evaluation.UNLABELED:
        for es in edges:
            for e in es:
                e.rel = None
    res = evaluation.EvaluatorResults(
        {
            PRIMARY:
            evaluation.SummaryStatistics(len(edges[0] & edges[1]),
                                         len(edges[0] - edges[1]),
                                         len(edges[1] - edges[0]))
        },
        default={PRIMARY.name: PRIMARY})
    if verbose:
        print("Evaluation type: (" + eval_type + ")")
        res.print()
    return res
Example #6
0
def get_scores(s1, s2, eval_type, verbose=False, units=False):
    converter = ConlluConverter()
    n1, n2 = list(map(list, list(map(converter.build_nodes, (s1, s2)))))
    t1, t2 = list(map(join_tokens, (n1, n2)))
    assert t1 == t2, "Tokens do not match: '%s' != '%s'" % diff(t1, t2)
    edges = [[e for nodes, _ in dep_nodes for n in nodes for e in n.outgoing] for dep_nodes in (n1, n2)]
    for es in edges:
        for e in es:
            e.rel = None if eval_type == evaluation.UNLABELED else e.rel.partition(":")[0]
    g, r = map(set, edges)
    res = evaluation.EvaluatorResults({PRIMARY: evaluation.SummaryStatistics(len(g & r), len(g - r), len(r - g))},
                                      default={PRIMARY.name: PRIMARY})
    if verbose:
        print()
        print("Evaluation type: (" + eval_type + ")")
        if units:
            print("==> Mutual Units:")
            print(g & r)
            print("==> Only in guessed:")
            print(g - r)
            print("==> Only in reference:")
            print(r - g)
        res.print()
    return res