Ejemplo n.º 1
0
def test_grade_dag():
    for submission, expected, expected_ed in zip(problem_1_submissions, problem_1_expected, problem_1_expected_ed):
        assert grade_dag(submission, problem_1_dag, problem_1_groups) == (expected, 10)
        assert lcs_partial_credit(submission, problem_1_dag, {}) == expected_ed

    for submission, expected, expected_ed_no_groups, expected_ed_groups in zip(problem_2_submissions, problem_2_expected, problem_2_expected_ed_no_groups, problem_2_expected_ed_groups):
        assert grade_dag(submission, problem_2_dag, problem_2_groups) == (expected, 7)
        assert lcs_partial_credit(submission, problem_2_dag_no_groups, {}) == expected_ed_no_groups
        assert lcs_partial_credit(submission, problem_2_dag, problem_2_groups) == expected_ed_groups
Ejemplo n.º 2
0
def test_grade_dag():
    for submission, expected, expected_ed in zip(problem_1_submissions,
                                                 problem_1_expected,
                                                 problem_1_expected_ed):
        assert grade_dag(submission, problem_1_dag,
                         problem_1_groups) == expected
        assert lcs_partial_credit(submission, problem_1_dag, {}) == expected_ed

    count = 0
    for submission, expected, expected_ed_no_groups, expected_ed_groups in zip(
            problem_2_submissions, problem_2_expected,
            problem_2_expected_ed_no_groups, problem_2_expected_ed_groups):
        print(count)
        count += 1
        assert grade_dag(submission, problem_2_dag,
                         problem_2_groups) == expected
        assert lcs_partial_credit(submission, problem_2_dag,
                                  {}) == expected_ed_no_groups
        assert lcs_partial_credit(submission, problem_2_dag,
                                  problem_2_groups) == expected_ed_groups
Ejemplo n.º 3
0
def grade(element_html, data):
    element = lxml.html.fragment_fromstring(element_html)
    answer_name = pl.get_string_attrib(element, 'answers-name')

    student_answer = data['submitted_answers'][answer_name]
    grading_mode = pl.get_string_attrib(element, 'grading-method',
                                        GRADING_METHOD_DEFAULT)
    check_indentation = pl.get_boolean_attrib(element, 'indentation',
                                              INDENTION_DEFAULT)
    feedback_type = pl.get_string_attrib(element, 'feedback', FEEDBACK_DEFAULT)
    answer_weight = pl.get_integer_attrib(element, 'weight', WEIGHT_DEFAULT)

    true_answer_list = data['correct_answers'][answer_name]

    indent_score = 0
    final_score = 0
    feedback = ''
    first_wrong = -1

    if len(student_answer) == 0:
        data['format_errors'][answer_name] = 'Your submitted answer was empty.'
        return

    if grading_mode == 'unordered':
        true_answer_list = filter_multiple_from_array(
            true_answer_list, ['uuid', 'indent', 'inner_html'])
        correct_selections = [
            opt for opt in student_answer if opt in true_answer_list
        ]
        incorrect_selections = [
            opt for opt in student_answer if opt not in true_answer_list
        ]
        final_score = float(
            (len(correct_selections) - len(incorrect_selections)) /
            len(true_answer_list))
        final_score = max(0.0, final_score)  # scores cannot be below 0
    elif grading_mode == 'ordered':
        student_answer = [ans['inner_html'] for ans in student_answer]
        true_answer = [ans['inner_html'] for ans in true_answer_list]
        final_score = 1 if student_answer == true_answer else 0

    elif grading_mode == 'ranking':
        ranking = filter_multiple_from_array(
            data['submitted_answers'][answer_name], ['ranking'])
        ranking = list(map(lambda x: x['ranking'], ranking))
        correctness = 1 + ranking.count(0)
        partial_credit = 0
        if len(ranking) != 0 and len(ranking) == len(true_answer_list):
            ranking = list(filter(lambda x: x != 0, ranking))
            for x in range(0, len(ranking) - 1):
                if int(ranking[x]) == int(
                        ranking[x + 1]) or int(ranking[x]) + 1 == int(
                            ranking[x + 1]):
                    correctness += 1
        else:
            correctness = 0
        correctness = max(correctness, partial_credit)
        final_score = float(correctness / len(true_answer_list))
    elif grading_mode == 'dag':
        order = [ans['tag'] for ans in student_answer]
        depends_graph = {
            ans['tag']: ans['depends']
            for ans in true_answer_list
        }
        group_belonging = {
            ans['tag']: ans['group']
            for ans in true_answer_list
        }

        correctness, first_wrong = grade_dag(order, depends_graph,
                                             group_belonging)

        if correctness == len(depends_graph.keys()):
            final_score = 1
        elif correctness < len(depends_graph.keys()):
            final_score = 0  # TODO figure out a partial credit scheme
            if feedback_type == 'none':
                feedback = ''
            elif feedback_type == 'first-wrong':
                if first_wrong == -1:
                    feedback = 'Your answer is correct so far, but it is incomplete.'
                else:
                    feedback = r"""Your answer is incorrect starting at <span style="color:red;">block number """ + str(first_wrong + 1) + \
                        r"""</span>. The problem is most likely one of the following:
                        <ul><li> This block is not a part of the correct solution </li>
                        <li> This block is not adequately supported by previous block </li>
                        <li> You have attempted to start a new section of the answer without finishing the previous section </li></ul>"""

    if check_indentation:
        student_answer_indent = filter_multiple_from_array(
            data['submitted_answers'][answer_name], ['indent'])
        student_answer_indent = list(
            map(lambda x: x['indent'], student_answer_indent))
        true_answer_indent = filter_multiple_from_array(
            data['correct_answers'][answer_name], ['indent'])
        true_answer_indent = list(
            map(lambda x: x['indent'], true_answer_indent))
        for i, indent in enumerate(student_answer_indent):
            if true_answer_indent[i] == '-1' or int(
                    indent) == true_answer_indent[i]:
                indent_score += 1
        final_score = final_score * (indent_score / len(true_answer_indent))
    data['partial_scores'][answer_name] = {
        'score': round(final_score, 2),
        'feedback': feedback,
        'weight': answer_weight,
        'first_wrong': first_wrong
    }
Ejemplo n.º 4
0
def grade(element_html, data):
    element = lxml.html.fragment_fromstring(element_html)
    answer_name = pl.get_string_attrib(element, 'answers-name')

    student_answer = data['submitted_answers'][answer_name]
    grading_mode = pl.get_string_attrib(element, 'grading-method',
                                        GRADING_METHOD_DEFAULT)
    check_indentation = pl.get_boolean_attrib(element, 'indentation',
                                              INDENTION_DEFAULT)
    feedback_type = pl.get_string_attrib(element, 'feedback', FEEDBACK_DEFAULT)
    answer_weight = pl.get_integer_attrib(element, 'weight', WEIGHT_DEFAULT)
    partial_credit_type = pl.get_string_attrib(element, 'partial-credit',
                                               'lcs')

    true_answer_list = data['correct_answers'][answer_name]

    final_score = 0
    feedback = ''
    first_wrong = -1

    if len(student_answer) == 0:
        data['format_errors'][answer_name] = 'Your submitted answer was empty.'
        return

    if check_indentation:
        indentations = {ans['uuid']: ans['indent'] for ans in true_answer_list}
        for ans in student_answer:
            if ans['indent'] != indentations.get(ans['uuid']):
                if 'tag' in ans:
                    ans['tag'] = None
                else:
                    ans['inner_html'] = None

    if grading_mode == 'unordered':
        true_answer_list = filter_multiple_from_array(
            true_answer_list, ['uuid', 'indent', 'inner_html'])
        correct_selections = [
            opt for opt in student_answer if opt in true_answer_list
        ]
        incorrect_selections = [
            opt for opt in student_answer if opt not in true_answer_list
        ]
        final_score = float(
            (len(correct_selections) - len(incorrect_selections)) /
            len(true_answer_list))
        final_score = max(0.0, final_score)  # scores cannot be below 0
    elif grading_mode == 'ordered':
        student_answer = [ans['inner_html'] for ans in student_answer]
        true_answer = [ans['inner_html'] for ans in true_answer_list]
        final_score = 1 if student_answer == true_answer else 0

    elif grading_mode in ['ranking', 'dag']:
        submission = [ans['tag'] for ans in student_answer]
        depends_graph = {}
        group_belonging = {}

        if grading_mode == 'ranking':
            true_answer_list = sorted(true_answer_list,
                                      key=lambda x: int(x['ranking']))
            true_answer = [answer['tag'] for answer in true_answer_list]
            tag_to_rank = {
                answer['tag']: answer['ranking']
                for answer in true_answer_list
            }
            lines_of_rank = {
                rank: [tag for tag in tag_to_rank if tag_to_rank[tag] == rank]
                for rank in set(tag_to_rank.values())
            }

            cur_rank_depends = []
            prev_rank = None
            for tag in true_answer:
                ranking = tag_to_rank[tag]
                if prev_rank is not None and ranking != prev_rank:
                    cur_rank_depends = lines_of_rank[prev_rank]
                depends_graph[tag] = cur_rank_depends
                prev_rank = ranking

        elif grading_mode == 'dag':
            depends_graph, group_belonging = extract_dag(true_answer_list)

        num_initial_correct, true_answer_length = grade_dag(
            submission, depends_graph, group_belonging)
        first_wrong = -1 if num_initial_correct == len(
            submission) else num_initial_correct

        if partial_credit_type == 'none':
            if num_initial_correct == true_answer_length:
                final_score = 1
            elif num_initial_correct < true_answer_length:
                final_score = 0
        elif partial_credit_type == 'lcs':
            edit_distance = lcs_partial_credit(submission, depends_graph,
                                               group_belonging)
            final_score = max(
                0,
                float(true_answer_length - edit_distance) / true_answer_length)

        if final_score < 1:
            if feedback_type == 'none':
                feedback = ''
            elif feedback_type == 'first-wrong':
                if first_wrong == -1:
                    feedback = FIRST_WRONG_FEEDBACK['incomplete']
                else:
                    feedback = FIRST_WRONG_FEEDBACK['wrong-at-block'].format(
                        str(first_wrong + 1))
                    has_block_groups = group_belonging != {} and set(
                        group_belonging.values()) != {None}
                    if check_indentation:
                        feedback += FIRST_WRONG_FEEDBACK['indentation']
                    if has_block_groups:
                        feedback += FIRST_WRONG_FEEDBACK['block-group']
                    feedback += '</ul>'

    data['partial_scores'][answer_name] = {
        'score': round(final_score, 2),
        'feedback': feedback,
        'weight': answer_weight,
        'first_wrong': first_wrong
    }
Ejemplo n.º 5
0
def grade(element_html, data):
    element = lxml.html.fragment_fromstring(element_html)
    answer_name = pl.get_string_attrib(element, 'answers-name')

    student_answer = data['submitted_answers'][answer_name]
    grading_mode = pl.get_string_attrib(element, 'grading-method',
                                        GRADING_METHOD_DEFAULT)
    check_indentation = pl.get_boolean_attrib(element, 'indentation',
                                              INDENTION_DEFAULT)
    feedback_type = pl.get_string_attrib(element, 'feedback', FEEDBACK_DEFAULT)
    answer_weight = pl.get_integer_attrib(element, 'weight', WEIGHT_DEFAULT)

    true_answer_list = data['correct_answers'][answer_name]

    indent_score = 0
    final_score = 0
    feedback = ''
    first_wrong = -1

    if len(student_answer) == 0:
        data['format_errors'][answer_name] = 'Your submitted answer was empty.'
        return

    if grading_mode == 'unordered':
        true_answer_list = filter_multiple_from_array(
            true_answer_list, ['uuid', 'indent', 'inner_html'])
        correct_selections = [
            opt for opt in student_answer if opt in true_answer_list
        ]
        incorrect_selections = [
            opt for opt in student_answer if opt not in true_answer_list
        ]
        final_score = float(
            (len(correct_selections) - len(incorrect_selections)) /
            len(true_answer_list))
        final_score = max(0.0, final_score)  # scores cannot be below 0
    elif grading_mode == 'ordered':
        student_answer = [ans['inner_html'] for ans in student_answer]
        true_answer = [ans['inner_html'] for ans in true_answer_list]
        final_score = 1 if student_answer == true_answer else 0

    elif grading_mode == 'ranking':
        ranking = filter_multiple_from_array(
            data['submitted_answers'][answer_name], ['ranking'])
        ranking = list(map(lambda x: x['ranking'], ranking))
        correctness = 1 + ranking.count(0)
        partial_credit = 0
        if len(ranking) != 0 and len(ranking) == len(true_answer_list):
            ranking = list(filter(lambda x: x != 0, ranking))
            for x in range(0, len(ranking) - 1):
                if int(ranking[x]) == int(
                        ranking[x + 1]) or int(ranking[x]) + 1 == int(
                            ranking[x + 1]):
                    correctness += 1
        else:
            correctness = 0
        correctness = max(correctness, partial_credit)
        final_score = float(correctness / len(true_answer_list))
    elif grading_mode == 'dag':
        order = [ans['tag'] for ans in student_answer]
        depends_graph = {
            ans['tag']: ans['depends']
            for ans in true_answer_list
        }
        group_belonging = {
            ans['tag']: ans['group']
            for ans in true_answer_list
        }

        correctness, first_wrong = grade_dag(order, depends_graph,
                                             group_belonging)

        if correctness == len(depends_graph.keys()):
            final_score = 1
        elif correctness < len(depends_graph.keys()):
            final_score = 0  # TODO figure out a partial credit scheme
            if feedback_type == 'none':
                feedback = ''
            elif feedback_type == 'first-wrong':
                if first_wrong == -1:
                    feedback = DAG_FIRST_WRONG_FEEDBACK['incomplete']
                else:
                    feedback = DAG_FIRST_WRONG_FEEDBACK[
                        'wrong-at-block'].format(str(first_wrong + 1))

    if check_indentation:
        student_answer_indent = filter_multiple_from_array(
            data['submitted_answers'][answer_name], ['indent'])
        student_answer_indent = list(
            map(lambda x: x['indent'], student_answer_indent))
        true_answer_indent = filter_multiple_from_array(
            data['correct_answers'][answer_name], ['indent'])
        true_answer_indent = list(
            map(lambda x: x['indent'], true_answer_indent))
        for i, indent in enumerate(student_answer_indent):
            if true_answer_indent[i] == '-1' or int(
                    indent) == true_answer_indent[i]:
                indent_score += 1
        final_score = final_score * (indent_score / len(true_answer_indent))
    data['partial_scores'][answer_name] = {
        'score': round(final_score, 2),
        'feedback': feedback,
        'weight': answer_weight,
        'first_wrong': first_wrong
    }
Ejemplo n.º 6
0
def test_solve_dag():
    problems = [(problem_1_dag, problem_1_groups), (problem_2_dag, problem_2_groups), (problem_3_dag, problem_3_groups)]
    for depends_graph, group_belonging in problems:
        solution = solve_dag(depends_graph, group_belonging)
        assert len(solution) == grade_dag(solution, depends_graph, group_belonging)[0]