Exemple #1
0
 def common_logic(cls, cur_name):
     name = cur_name.split("test_")[1]
     for spec, sent_ in cls.out[name].items():
         sent = [v.copy() for v in sent_]
         add_basic_edges(sent)
         converted, _ = convert(
             [sent],
             True,
             True,
             True,
             math.inf,
             False,
             False,
             False,
             False,
             False,
             funcs_to_cancel=list(
                 set(api.get_conversion_names()).difference({
                     name, "extra_inner_weak_modifier_verb_reconstruction"
                 })))
         serialized_conllu = serialize_conllu(converted, [None], False)
         for gold_line, out_line in zip(cls.gold[name][spec],
                                        serialized_conllu.split("\n")):
             assert out_line.split() == gold_line, spec + str(
                 print("\n")) + str(
                     [print(s) for s in serialized_conllu.split("\n")])
Exemple #2
0
    def common_logic_combined(cls, cur_name, rnac=False):
        name = cur_name.split("test_combined_")[1]
        for spec, sent_ in cls.out[name].items():
            sent = {k: v.copy() for k, v in sent_.items()}
            add_basic_edges(sent)
            converted, _ = convert([sent], True, True, True, math.inf, False,
                                   False, rnac, False, False, ConvsCanceler())
            serialized_conllu = serialize_conllu(converted, [None], False)
            for gold_line, out_line in zip(cls.gold_combined[name][spec],
                                           serialized_conllu.split("\n")):
                assert gold_line == out_line.split(), spec + str(
                    [print(s) for s in serialized_conllu.split("\n")])

    def test_no_node_adding(self):
        self.common_logic_combined("test_combined_no_node_adding", rnac=True)


for cur_func_name in api.get_conversion_names():
    if cur_func_name in ['extra_inner_weak_modifier_verb_reconstruction']:
        continue
    test_func_name = "test_" + cur_func_name
    setattr(
        TestConversions, test_func_name,
        staticmethod(lambda func_name=test_func_name: TestConversions.
                     common_logic(func_name)))
    combined_func_name = "test_combined_" + cur_func_name
    setattr(
        TestConversions, combined_func_name,
        staticmethod(lambda func_name=combined_func_name: TestConversions.
                     common_logic_combined(func_name)))
                            type=str,
                            default=None)

    args = arg_parser.parse_args()

    # what group of stratagies to use
    if args.strat_start >= 0:
        if args.strat_end >= 0:
            strategies = strategies[args.strat_start:args.strat_end]
        else:
            strategies = strategies[args.strat_start:args.strat_start + 1]

    # for ablation test, get ablated function name
    ablation = ""
    if args.ablation >= 0:
        ablations = uda_api.get_conversion_names()
        ablation = sorted(list(ablations))[args.ablation]

    # if we want to use specific triggers (w/o we use all the triggers under triggers folder)
    if args.specified_triggers:
        g_triggers = args.specified_triggers

    # the different actions we take:
    #   annotate: as we need to have annotated dataset for Spike to Index.
    #       After running this command, we need to Index the annotated data using Spike,
    #       only then we should run the 'generate' and 'eval' commands.
    #   generate: as we need to generate patterns (from the train set)
    #       that would later be attested on the dev and test sets using the 'eval' command
    #   eval: as we want to filter and evaluate the generated patterns on the dev and test sets (respectively).
    #   ablations: a different test, to check each conversions' contribution.
    if args.action == 'annotate':