def myAvalia(annotation_file, questions_file, corpus_file_path="TestResources/PerguntasPosSistema.txt"):
    # process corpus file
    questions_answers_reader = AnswerPicker()
    questions_answers_reader.process_file(corpus_file_path)

    # analyse annotation_file and return the accuracy
    annotation_check = AnnotationCheck(annotation_file)
    accuracy = annotation_check.evaluate_accuracy(questions_answers_reader, questions_file, 20)

    return accuracy
def myAvalia(annotation_file, questions_file, corpus_file_path="TestResources/PerguntasPosSistema.txt", trigger_strategy=None, answer_strategy=None):
    """
    :param annotation_file: annotated file
    :param questions_file: list of questions file path
    :param corpus_file_path: corpus file path
    :param trigger_strategy: optional. Strategy for comparing User Input and Trigger
    :param answer_strategy: optional. Strategy for comparing answers
    :return: accuracy of the system
    """

    # process corpus file
    file_reader = UserInputTriggerAnswerReader()
    answer_picker = AnswerPicker(file_reader, trigger_strategy, answer_strategy)
    file_reader.process_file(corpus_file_path, answer_picker.process_user_input_answer)

    # analyse annotation_file and return the accuracy
    annotation_check = AnnotationCheck(annotation_file)
    accuracy = annotation_check.evaluate_accuracy(answer_picker, questions_file, 20)

    return accuracy
    def __init__(self, corpus_path, annotation_file_path):
        AnnotationCheck.__init__(self, annotation_file_path)

        self._file_reader = UserInputTriggerAnswerReader()
        self._answer_picker = AnswerPicker(self._file_reader)
        self._file_reader.process_file(corpus_path, self._answer_picker.process_user_input_answer)
 def __init__(self, corpus_path, annotation_file_path):
     AnnotationCheck.__init__(self, annotation_file_path)
     self._answer_picker.process_file(corpus_path)