def determine_best_strategy(self, annotation_file, questions_file, corpus_file, debug=False):
        """
        For every strategy provided in the constructor, determines the program accuracy and stores the information sorted
        in the variable __strategies_map

        :param annotation_file: file_path to the annotation file
        :param questions_file: file_path to the questions file
        :param corpus_file: file_path to the corpus file
        :param debug: if true, prints the current strategy being used at a given instance
        """
        result = []

        try:
            for tupl in self.__strategies:
                triggers_strat = tupl[0]
                answers_strat = tupl[1]
                accuracy = tupl[2]

                if accuracy is None:
                    if debug: print "bsc.add_test(st." + triggers_strat.description + ", sa." + answers_strat.description + ", ",
                    accuracy = myAvalia(annotation_file, questions_file, corpus_file, trigger_strategy=triggers_strat, answer_strategy=answers_strat)
                    if debug: print str(accuracy) + ")"

                result.append(self._create_tuple(triggers_strat, answers_strat, accuracy))

        except KeyboardInterrupt:
            print ".... Stopping....."
        finally:
            # sort
            self.__strategies = list(result)
            self.__sorted_strategies = list(result)
            self.__sorted_strategies.sort(key=lambda tup: tup[2], reverse=True)
Example #2
0
    test_input_file = "TestResources/TestInput.txt"
    corpus_file_path = "TestResources/PerguntasPosSistema.txt"

    question_1 = "A tua familia é numerosa?"
    print "Q: ", question_1
    print "R: ", sss(corpus_file_path, question_1)

    print ""

    question_2 = "Tens filhos?"
    print "Q: ", question_1
    print "R: ", sss(corpus_file_path, question_2)

    print ""

    accuracy = myAvalia(annotations_file_path, test_input_file, corpus_file_path)
    print test_input_file, "accuracy is:", accuracy

    ################################
    # Using custom strategies
    ###############################

    tagger = BigramForestTagger()  # training corpus floresta
    tagger.train()

    trigger_strategy = st.Braccard(tagger, 0.25, 0.50, True)
    answer_strategy = sa.YesNoSimilar(0.75, 0.5, dice_sentence, True)

    question_1 = "A tua familia é numerosa?"
    print "Q: ", question_1
    print "R: ", sss(corpus_file_path, question_1, trigger_strategy, answer_strategy)
Example #3
0
# -*- coding: utf-8 -*-

from Interface import sss
from Interface import myAvalia

if __name__ == "__main__":
    corpus_src = "TestResources/PerguntasPosSistema.txt"

    question_1 = "A tua familia é numerosa?"
    print "Q: ", question_1
    print "R: ", sss(corpus_src, question_1)

    print ""

    question_2 = "Tens filhos?"
    print "Q: ", question_1
    print "R: ", sss(corpus_src, question_2)

    print ""

    accuracy = myAvalia("TestResources/AnotadoAll.txt", "TestResources/Perguntas.txt", "TestResources/PerguntasPosSistema.txt")
    print "Perguntas.txt accuracy is: ", accuracy