Esempio n. 1
0
def _grade(student, request, course, keep_raw_scores, field_data_cache,
           scores_client):
    """
    Unwrapped version of "grade"

    This grades a student as quickly as possible. It returns the
    output from the course grader, augmented with the final letter
    grade. The keys in the output are:

    course: a CourseDescriptor

    - grade : A final letter grade.
    - percent : The final percent for the class (rounded up).
    - section_breakdown : A breakdown of each section that makes
      up the grade. (For display)
    - grade_breakdown : A breakdown of the major components that
      make up the final grade. (For display)
    - keep_raw_scores : if True, then value for key 'raw_scores' contains scores
      for every graded module

    More information on the format is in the docstring for CourseGrader.
    """
    with outer_atomic():
        if field_data_cache is None:
            field_data_cache = field_data_cache_for_grading(course, student)
        if scores_client is None:
            scores_client = ScoresClient.from_field_data_cache(
                field_data_cache)

    # Dict of item_ids -> (earned, possible) point tuples. This *only* grabs
    # scores that were registered with the submissions API, which for the moment
    # means only openassessment (edx-ora2)
    # We need to import this here to avoid a circular dependency of the form:
    # XBlock --> submissions --> Django Rest Framework error strings -->
    # Django translation --> ... --> courseware --> submissions
    from submissions import api as sub_api  # installed from the edx-submissions repository

    with outer_atomic():
        submissions_scores = sub_api.get_scores(
            course.id.to_deprecated_string(),
            anonymous_id_for_user(student, course.id))
        max_scores_cache = MaxScoresCache.create_for_course(course)

        # For the moment, we have to get scorable_locations from field_data_cache
        # and not from scores_client, because scores_client is ignorant of things
        # in the submissions API. As a further refactoring step, submissions should
        # be hidden behind the ScoresClient.
        max_scores_cache.fetch_from_remote(field_data_cache.scorable_locations)

    grading_context = course.grading_context
    raw_scores = []

    totaled_scores = {}
    # This next complicated loop is just to collect the totaled_scores, which is
    # passed to the grader
    for section_format, sections in grading_context[
            'graded_sections'].iteritems():
        format_scores = []
        for section in sections:
            section_descriptor = section['section_descriptor']
            section_name = section_descriptor.display_name_with_default

            with outer_atomic():
                # some problems have state that is updated independently of interaction
                # with the LMS, so they need to always be scored. (E.g. combinedopenended ORA1)
                # TODO This block is causing extra savepoints to be fired that are empty because no queries are executed
                # during the loop. When refactoring this code please keep this outer_atomic call in mind and ensure we
                # are not making unnecessary database queries.
                should_grade_section = any(
                    descriptor.always_recalculate_grades
                    for descriptor in section['xmoduledescriptors'])

                # If there are no problems that always have to be regraded, check to
                # see if any of our locations are in the scores from the submissions
                # API. If scores exist, we have to calculate grades for this section.
                if not should_grade_section:
                    should_grade_section = any(
                        descriptor.location.to_deprecated_string() in
                        submissions_scores
                        for descriptor in section['xmoduledescriptors'])

                if not should_grade_section:
                    should_grade_section = any(
                        descriptor.location in scores_client
                        for descriptor in section['xmoduledescriptors'])

                # If we haven't seen a single problem in the section, we don't have
                # to grade it at all! We can assume 0%
                if should_grade_section:
                    scores = []

                    def create_module(descriptor):
                        '''creates an XModule instance given a descriptor'''
                        # TODO: We need the request to pass into here. If we could forego that, our arguments
                        # would be simpler
                        return get_module_for_descriptor(student,
                                                         request,
                                                         descriptor,
                                                         field_data_cache,
                                                         course.id,
                                                         course=course)

                    descendants = yield_dynamic_descriptor_descendants(
                        section_descriptor, student.id, create_module)
                    for module_descriptor in descendants:
                        user_access = has_access(
                            student, 'load', module_descriptor,
                            module_descriptor.location.course_key)
                        if not user_access:
                            continue

                        (correct, total) = get_score(
                            student,
                            module_descriptor,
                            create_module,
                            scores_client,
                            submissions_scores,
                            max_scores_cache,
                        )
                        if correct is None and total is None:
                            continue

                        if settings.GENERATE_PROFILE_SCORES:  # for debugging!
                            if total > 1:
                                correct = random.randrange(
                                    max(total - 2, 1), total + 1)
                            else:
                                correct = total

                        graded = module_descriptor.graded
                        if not total > 0:
                            # We simply cannot grade a problem that is 12/0, because we might need it as a percentage
                            graded = False

                        scores.append(
                            Score(correct, total, graded,
                                  module_descriptor.display_name_with_default,
                                  module_descriptor.location))

                    __, graded_total = graders.aggregate_scores(
                        scores, section_name)
                    if keep_raw_scores:
                        raw_scores += scores
                else:
                    graded_total = Score(0.0, 1.0, True, section_name, None)

                #Add the graded total to totaled_scores
                if graded_total.possible > 0:
                    format_scores.append(graded_total)
                else:
                    log.info(
                        "Unable to grade a section with a total possible score of zero. "
                        + str(section_descriptor.location))

        totaled_scores[section_format] = format_scores

    with outer_atomic():
        # Grading policy might be overriden by a CCX, need to reset it
        course.set_grading_policy(course.grading_policy)
        grade_summary = course.grader.grade(
            totaled_scores,
            generate_random_scores=settings.GENERATE_PROFILE_SCORES)

        # We round the grade here, to make sure that the grade is an whole percentage and
        # doesn't get displayed differently than it gets grades
        grade_summary['percent'] = round(grade_summary['percent'] * 100 +
                                         0.05) / 100

        letter_grade = grade_for_percentage(course.grade_cutoffs,
                                            grade_summary['percent'])
        grade_summary['grade'] = letter_grade
        grade_summary[
            'totaled_scores'] = totaled_scores  # make this available, eg for instructor download & debugging
        if keep_raw_scores:
            # way to get all RAW scores out to instructor
            # so grader can be double-checked
            grade_summary['raw_scores'] = raw_scores

        max_scores_cache.push_to_remote()

    return grade_summary
Esempio n. 2
0
def _progress_summary(student, request, course):
    """
    Unwrapped version of "progress_summary".

    This pulls a summary of all problems in the course.

    Returns
    - courseware_summary is a summary of all sections with problems in the course.
    It is organized as an array of chapters, each containing an array of sections,
    each containing an array of scores. This contains information for graded and
    ungraded problems, and is good for displaying a course summary with due dates,
    etc.

    Arguments:
        student: A User object for the student to grade
        course: A Descriptor containing the course to grade

    If the student does not have access to load the course module, this function
    will return None.

    """
    with manual_transaction():
        field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
            course.id, student, course, depth=None
        )
        # TODO: We need the request to pass into here. If we could
        # forego that, our arguments would be simpler
        course_module = get_module_for_descriptor(
            student, request, course, field_data_cache, course.id, course=course
        )
        if not course_module:
            # This student must not have access to the course.
            return None

        course_module = getattr(course_module, '_x_module', course_module)

    submissions_scores = sub_api.get_scores(course.id.to_deprecated_string(), anonymous_id_for_user(student, course.id))

    chapters = []
    # Don't include chapters that aren't displayable (e.g. due to error)
    for chapter_module in course_module.get_display_items():
        # Skip if the chapter is hidden
        if chapter_module.hide_from_toc:
            continue

        sections = []

        for section_module in chapter_module.get_display_items():
            # Skip if the section is hidden
            with manual_transaction():
                if section_module.hide_from_toc:
                    continue

                graded = section_module.graded
                scores = []

                module_creator = section_module.xmodule_runtime.get_module

                for module_descriptor in yield_dynamic_descriptor_descendants(
                        section_module, student.id, module_creator
                ):
                    course_id = course.id
                    (correct, total) = get_score(
                        course_id, student, module_descriptor, module_creator, scores_cache=submissions_scores
                    )
                    if correct is None and total is None:
                        continue

                    scores.append(
                        Score(
                            correct,
                            total,
                            graded,
                            module_descriptor.display_name_with_default,
                            module_descriptor.location
                        )
                    )

                scores.reverse()
                section_total, _ = graders.aggregate_scores(
                    scores, section_module.display_name_with_default)

                module_format = section_module.format if section_module.format is not None else ''
                sections.append({
                    'display_name': section_module.display_name_with_default,
                    'url_name': section_module.url_name,
                    'scores': scores,
                    'section_total': section_total,
                    'format': module_format,
                    'due': section_module.due,
                    'graded': graded,
                })

        chapters.append({
            'course': course.display_name_with_default,
            'display_name': chapter_module.display_name_with_default,
            'url_name': chapter_module.url_name,
            'sections': sections
        })

    return chapters
 def score_from_dict(encoded):
     """ Given a formerly JSON-encoded Score tuple, return the Score tuple """
     if encoded['module_id']:
         encoded['module_id'] = UsageKey.from_string(encoded['module_id'])
     return Score(**encoded)
Esempio n. 4
0
def _progress_summary(student, request, course, field_data_cache=None, scores_client=None):
    """
    Unwrapped version of "progress_summary".

    This pulls a summary of all problems in the course.

    Returns
    - courseware_summary is a summary of all sections with problems in the course.
    It is organized as an array of chapters, each containing an array of sections,
    each containing an array of scores. This contains information for graded and
    ungraded problems, and is good for displaying a course summary with due dates,
    etc.

    Arguments:
        student: A User object for the student to grade
        course: A Descriptor containing the course to grade

    If the student does not have access to load the course module, this function
    will return None.

    """
    with outer_atomic():
        if field_data_cache is None:
            field_data_cache = field_data_cache_for_grading(course, student)
        if scores_client is None:
            scores_client = ScoresClient.from_field_data_cache(field_data_cache)

        course_module = get_module_for_descriptor(
            student, request, course, field_data_cache, course.id, course=course
        )
        if not course_module:
            return None

        course_module = getattr(course_module, '_x_module', course_module)

    # We need to import this here to avoid a circular dependency of the form:
    # XBlock --> submissions --> Django Rest Framework error strings -->
    # Django translation --> ... --> courseware --> submissions
    from submissions import api as sub_api  # installed from the edx-submissions repository
    with outer_atomic():
        submissions_scores = sub_api.get_scores(
            course.id.to_deprecated_string(), anonymous_id_for_user(student, course.id)
        )

        max_scores_cache = MaxScoresCache.create_for_course(course)
        # For the moment, we have to get scorable_locations from field_data_cache
        # and not from scores_client, because scores_client is ignorant of things
        # in the submissions API. As a further refactoring step, submissions should
        # be hidden behind the ScoresClient.
        max_scores_cache.fetch_from_remote(field_data_cache.scorable_locations)

    # Check for gated content
    gated_content = gating_api.get_gated_content(course, student)

    chapters = []
    locations_to_children = defaultdict(list)
    locations_to_weighted_scores = {}
    # Don't include chapters that aren't displayable (e.g. due to error)
    for chapter_module in course_module.get_display_items():
        # Skip if the chapter is hidden
        if chapter_module.hide_from_toc:
            continue

        sections = []
        for section_module in chapter_module.get_display_items():
            # Skip if the section is hidden
            with outer_atomic():
                if section_module.hide_from_toc or unicode(section_module.location) in gated_content:
                    continue

                graded = section_module.graded
                scores = []

                module_creator = section_module.xmodule_runtime.get_module

                for module_descriptor in yield_dynamic_descriptor_descendants(
                        section_module, student.id, module_creator
                ):
                    location_parent = module_descriptor.parent.replace(version=None, branch=None)
                    location_to_save = module_descriptor.location.replace(version=None, branch=None)
                    locations_to_children[location_parent].append(location_to_save)
                    (correct, total) = get_score(
                        student,
                        module_descriptor,
                        module_creator,
                        scores_client,
                        submissions_scores,
                        max_scores_cache,
                    )
                    if correct is None and total is None:
                        continue

                    weighted_location_score = Score(
                        correct,
                        total,
                        graded,
                        module_descriptor.display_name_with_default_escaped,
                        module_descriptor.location
                    )

                    scores.append(weighted_location_score)
                    locations_to_weighted_scores[location_to_save] = weighted_location_score

                scores.reverse()
                section_total, _ = graders.aggregate_scores(
                    scores, section_module.display_name_with_default_escaped)

                module_format = section_module.format if section_module.format is not None else ''
                sections.append({
                    'display_name': section_module.display_name_with_default_escaped,
                    'url_name': section_module.url_name,
                    'scores': scores,
                    'section_total': section_total,
                    'format': module_format,
                    'due': section_module.due,
                    'graded': graded,
                })

        chapters.append({
            'course': course.display_name_with_default_escaped,
            'display_name': chapter_module.display_name_with_default_escaped,
            'url_name': chapter_module.url_name,
            'sections': sections
        })

    max_scores_cache.push_to_remote()

    return ProgressSummary(chapters, locations_to_weighted_scores, locations_to_children)
Esempio n. 5
0
def _grade(student, request, course, keep_raw_scores):
    """
    Unwrapped version of "grade"

    This grades a student as quickly as possible. It returns the
    output from the course grader, augmented with the final letter
    grade. The keys in the output are:

    course: a CourseDescriptor

    - grade : A final letter grade.
    - percent : The final percent for the class (rounded up).
    - section_breakdown : A breakdown of each section that makes
      up the grade. (For display)
    - grade_breakdown : A breakdown of the major components that
      make up the final grade. (For display)
    - keep_raw_scores : if True, then value for key 'raw_scores' contains scores
      for every graded module

    More information on the format is in the docstring for CourseGrader.
    """
    grading_context = course.grading_context
    raw_scores = []

    # Dict of item_ids -> (earned, possible) point tuples. This *only* grabs
    # scores that were registered with the submissions API, which for the moment
    # means only openassessment (edx-ora2)
    submissions_scores = sub_api.get_scores(
        course.id.to_deprecated_string(), anonymous_id_for_user(student, course.id)
    )

    totaled_scores = {}
    # This next complicated loop is just to collect the totaled_scores, which is
    # passed to the grader
    for section_format, sections in grading_context['graded_sections'].iteritems():
        format_scores = []
        for section in sections:
            section_descriptor = section['section_descriptor']
            section_name = section_descriptor.display_name_with_default

            # some problems have state that is updated independently of interaction
            # with the LMS, so they need to always be scored. (E.g. foldit.,
            # combinedopenended)
            should_grade_section = any(
                descriptor.always_recalculate_grades for descriptor in section['xmoduledescriptors']
            )

            # If there are no problems that always have to be regraded, check to
            # see if any of our locations are in the scores from the submissions
            # API. If scores exist, we have to calculate grades for this section.
            if not should_grade_section:
                should_grade_section = any(
                    descriptor.location.to_deprecated_string() in submissions_scores
                    for descriptor in section['xmoduledescriptors']
                )

            if not should_grade_section:
                with manual_transaction():
                    should_grade_section = StudentModule.objects.filter(
                        student=student,
                        module_state_key__in=[
                            descriptor.location for descriptor in section['xmoduledescriptors']
                        ]
                    ).exists()

            # If we haven't seen a single problem in the section, we don't have
            # to grade it at all! We can assume 0%
            if should_grade_section:
                scores = []

                def create_module(descriptor):
                    '''creates an XModule instance given a descriptor'''
                    # TODO: We need the request to pass into here. If we could forego that, our arguments
                    # would be simpler
                    with manual_transaction():
                        field_data_cache = FieldDataCache([descriptor], course.id, student)
                    return get_module_for_descriptor(
                        student, request, descriptor, field_data_cache, course.id, course=course
                    )

                for module_descriptor in yield_dynamic_descriptor_descendants(
                        section_descriptor, student.id, create_module
                ):

                    (correct, total) = get_score(
                        course.id, student, module_descriptor, create_module, scores_cache=submissions_scores
                    )
                    if correct is None and total is None:
                        continue

                    if settings.GENERATE_PROFILE_SCORES:  	# for debugging!
                        if total > 1:
                            correct = random.randrange(max(total - 2, 1), total + 1)
                        else:
                            correct = total

                    graded = module_descriptor.graded
                    if not total > 0:
                        # We simply cannot grade a problem that is 12/0, because we might need it as a percentage
                        graded = False

                    scores.append(
                        Score(
                            correct,
                            total,
                            graded,
                            module_descriptor.display_name_with_default,
                            module_descriptor.location
                        )
                    )

                _, graded_total = graders.aggregate_scores(scores, section_name)
                if keep_raw_scores:
                    raw_scores += scores
            else:
                graded_total = Score(0.0, 1.0, True, section_name, None)

            #Add the graded total to totaled_scores
            if graded_total.possible > 0:
                format_scores.append(graded_total)
            else:
                log.info(
                    "Unable to grade a section with a total possible score of zero. " +
                    str(section_descriptor.location)
                )

        totaled_scores[section_format] = format_scores

    # Grading policy might be overriden by a CCX, need to reset it
    course.set_grading_policy(course.grading_policy)
    grade_summary = course.grader.grade(totaled_scores, generate_random_scores=settings.GENERATE_PROFILE_SCORES)

    # We round the grade here, to make sure that the grade is an whole percentage and
    # doesn't get displayed differently than it gets grades
    grade_summary['percent'] = round(grade_summary['percent'] * 100 + 0.05) / 100

    letter_grade = grade_for_percentage(course.grade_cutoffs, grade_summary['percent'])
    grade_summary['grade'] = letter_grade
    grade_summary['totaled_scores'] = totaled_scores  	# make this available, eg for instructor download & debugging
    if keep_raw_scores:
        # way to get all RAW scores out to instructor
        # so grader can be double-checked
        grade_summary['raw_scores'] = raw_scores
    return grade_summary
Esempio n. 6
0
def progress_summary(student, request, course, field_data_cache):
    """
    This pulls a summary of all problems in the course.

    Returns
    - courseware_summary is a summary of all sections with problems in the course.
    It is organized as an array of chapters, each containing an array of sections,
    each containing an array of scores. This contains information for graded and
    ungraded problems, and is good for displaying a course summary with due dates,
    etc.

    Arguments:
        student: A User object for the student to grade
        course: A Descriptor containing the course to grade
        field_data_cache: A FieldDataCache initialized with all
             instance_modules for the student

    If the student does not have access to load the course module, this function
    will return None.

    """

    # TODO: We need the request to pass into here. If we could forego that, our arguments
    # would be simpler
    course_module = get_module(student, request, course.location, field_data_cache, course.id, depth=None)
    if not course_module:
        # This student must not have access to the course.
        return None

    chapters = []
    # Don't include chapters that aren't displayable (e.g. due to error)
    for chapter_module in course_module.get_display_items():
        # Skip if the chapter is hidden
        if chapter_module.hide_from_toc:
            continue

        sections = []
        for section_module in chapter_module.get_display_items():
            # Skip if the section is hidden
            if section_module.hide_from_toc:
                continue

            # Same for sections
            graded = section_module.graded
            scores = []

            module_creator = section_module.system.get_module

            for module_descriptor in yield_dynamic_descriptor_descendents(section_module.descriptor, module_creator):

                course_id = course.id
                (correct, total) = get_score(course_id, student, module_descriptor, module_creator, field_data_cache)
                if correct is None and total is None:
                    continue

                scores.append(Score(correct, total, graded, module_descriptor.display_name_with_default))

            scores.reverse()
            section_total, _ = graders.aggregate_scores(
                scores, section_module.display_name_with_default)

            module_format = section_module.format if section_module.format is not None else ''
            sections.append({
                'display_name': section_module.display_name_with_default,
                'url_name': section_module.url_name,
                'scores': scores,
                'section_total': section_total,
                'format': module_format,
                'due': section_module.due,
                'graded': graded,
            })

        chapters.append({'course': course.display_name_with_default,
                         'display_name': chapter_module.display_name_with_default,
                         'url_name': chapter_module.url_name,
                         'sections': sections})

    return chapters
Esempio n. 7
0
def summary(student, course, course_structure=None):
    """
    This pulls a summary of all problems in the course.

    Returns
    - courseware_summary is a summary of all sections with problems in the course.
    It is organized as an array of chapters, each containing an array of sections,
    each containing an array of scores. This contains information for graded and
    ungraded problems, and is good for displaying a course summary with due dates,
    etc.
    - None if the student does not have access to load the course module.

    Arguments:
        student: A User object for the student to grade
        course: A Descriptor containing the course to grade

    """
    if course_structure is None:
        course_structure = get_course_blocks(student, course.location)
    if not len(course_structure):
        return ProgressSummary()
    scorable_locations = [
        block_key for block_key in course_structure
        if possibly_scored(block_key)
    ]

    with outer_atomic():
        scores_client = ScoresClient.create_for_locations(
            course.id, student.id, scorable_locations)

    # We need to import this here to avoid a circular dependency of the form:
    # XBlock --> submissions --> Django Rest Framework error strings -->
    # Django translation --> ... --> courseware --> submissions
    from submissions import api as sub_api  # installed from the edx-submissions repository
    with outer_atomic():
        submissions_scores = sub_api.get_scores(
            unicode(course.id), anonymous_id_for_user(student, course.id))

    # Check for gated content
    gated_content = gating_api.get_gated_content(course, student)

    chapters = []
    locations_to_weighted_scores = {}

    for chapter_key in course_structure.get_children(
            course_structure.root_block_usage_key):
        chapter = course_structure[chapter_key]
        sections = []
        for section_key in course_structure.get_children(chapter_key):
            if unicode(section_key) in gated_content:
                continue

            section = course_structure[section_key]

            graded = getattr(section, 'graded', False)
            scores = []

            for descendant_key in course_structure.post_order_traversal(
                    filter_func=possibly_scored,
                    start_node=section_key,
            ):
                descendant = course_structure[descendant_key]

                (correct, total) = get_score(
                    student,
                    descendant,
                    scores_client,
                    submissions_scores,
                )
                if correct is None and total is None:
                    continue

                weighted_location_score = Score(
                    correct, total, graded,
                    block_metadata_utils.display_name_with_default_escaped(
                        descendant), descendant.location)

                scores.append(weighted_location_score)
                locations_to_weighted_scores[
                    descendant.location] = weighted_location_score

            escaped_section_name = block_metadata_utils.display_name_with_default_escaped(
                section)
            section_total, _ = graders.aggregate_scores(
                scores, escaped_section_name)

            sections.append({
                'display_name':
                escaped_section_name,
                'url_name':
                block_metadata_utils.url_name_for_block(section),
                'scores':
                scores,
                'section_total':
                section_total,
                'format':
                getattr(section, 'format', ''),
                'due':
                getattr(section, 'due', None),
                'graded':
                graded,
            })

        chapters.append({
            'course':
            course.display_name_with_default_escaped,
            'display_name':
            block_metadata_utils.display_name_with_default_escaped(chapter),
            'url_name':
            block_metadata_utils.url_name_for_block(chapter),
            'sections':
            sections
        })

    return ProgressSummary(chapters, locations_to_weighted_scores,
                           course_structure.get_children)
Esempio n. 8
0
class GraderTest(unittest.TestCase):
    '''Tests grader implementations'''

    empty_gradesheet = {
    }

    incomplete_gradesheet = {
        'Homework': [],
        'Lab': [],
        'Midterm': [],
    }

    test_gradesheet = {
        'Homework': [Score(earned=2, possible=20.0, graded=True, section='hw1'),
                     Score(
                         earned=16, possible=16.0, graded=True, section='hw2')],
        # The dropped scores should be from the assignments that don't exist
        # yet

        'Lab': [Score(earned=1, possible=2.0, graded=True, section='lab1'),  # Dropped
                Score(earned=1, possible=1.0, graded=True, section='lab2'),
                Score(earned=1, possible=1.0, graded=True, section='lab3'),
                Score(earned=5, possible=25.0,
                      graded=True, section='lab4'),  # Dropped
                Score(earned=3, possible=4.0,
                      graded=True, section='lab5'),  # Dropped
                Score(earned=6, possible=7.0, graded=True, section='lab6'),
                Score(earned=5, possible=6.0, graded=True, section='lab7')],

        'Midterm': [Score(earned=50.5, possible=100, graded=True, section="Midterm Exam"), ],
    }

    def test_single_section_grader(self):
        midterm_grader = graders.SingleSectionGrader("Midterm", "Midterm Exam")
        lab4_grader = graders.SingleSectionGrader("Lab", "lab4")
        bad_lab_grader = graders.SingleSectionGrader("Lab", "lab42")

        for graded in [midterm_grader.grade(self.empty_gradesheet),
                       midterm_grader.grade(self.incomplete_gradesheet),
                       bad_lab_grader.grade(self.test_gradesheet)]:
            self.assertEqual(len(graded['section_breakdown']), 1)
            self.assertEqual(graded['percent'], 0.0)

        graded = midterm_grader.grade(self.test_gradesheet)
        self.assertAlmostEqual(graded['percent'], 0.505)
        self.assertEqual(len(graded['section_breakdown']), 1)

        graded = lab4_grader.grade(self.test_gradesheet)
        self.assertAlmostEqual(graded['percent'], 0.2)
        self.assertEqual(len(graded['section_breakdown']), 1)

    def test_assignment_format_grader(self):
        homework_grader = graders.AssignmentFormatGrader("Homework", 12, 2)
        no_drop_grader = graders.AssignmentFormatGrader("Homework", 12, 0)
        # Even though the minimum number is 3, this should grade correctly when
        # 7 assignments are found
        overflow_grader = graders.AssignmentFormatGrader("Lab", 3, 2)
        lab_grader = graders.AssignmentFormatGrader("Lab", 7, 3)

        # Test the grading of an empty gradesheet
        for graded in [homework_grader.grade(self.empty_gradesheet),
                       no_drop_grader.grade(self.empty_gradesheet),
                       homework_grader.grade(self.incomplete_gradesheet),
                       no_drop_grader.grade(self.incomplete_gradesheet)]:
            self.assertAlmostEqual(graded['percent'], 0.0)
            # Make sure the breakdown includes 12 sections, plus one summary
            self.assertEqual(len(graded['section_breakdown']), 12 + 1)

        graded = homework_grader.grade(self.test_gradesheet)
        self.assertAlmostEqual(graded[
                               'percent'], 0.11)  # 100% + 10% / 10 assignments
        self.assertEqual(len(graded['section_breakdown']), 12 + 1)

        graded = no_drop_grader.grade(self.test_gradesheet)
        self.assertAlmostEqual(graded[
                               'percent'], 0.0916666666666666)  # 100% + 10% / 12 assignments
        self.assertEqual(len(graded['section_breakdown']), 12 + 1)

        graded = overflow_grader.grade(self.test_gradesheet)
        self.assertAlmostEqual(graded[
                               'percent'], 0.8880952380952382)  # 100% + 10% / 5 assignments
        self.assertEqual(len(graded['section_breakdown']), 7 + 1)

        graded = lab_grader.grade(self.test_gradesheet)
        self.assertAlmostEqual(graded['percent'], 0.9226190476190477)
        self.assertEqual(len(graded['section_breakdown']), 7 + 1)

    def test_assignment_format_grader_on_single_section_entry(self):
        midterm_grader = graders.AssignmentFormatGrader("Midterm", 1, 0)
        # Test the grading on a section with one item:
        for graded in [midterm_grader.grade(self.empty_gradesheet),
                       midterm_grader.grade(self.incomplete_gradesheet)]:
            self.assertAlmostEqual(graded['percent'], 0.0)
            # Make sure the breakdown includes just the one summary
            self.assertEqual(len(graded['section_breakdown']), 0 + 1)
            self.assertEqual(graded[
                             'section_breakdown'][0]['label'], 'Midterm')

        graded = midterm_grader.grade(self.test_gradesheet)
        self.assertAlmostEqual(graded['percent'], 0.505)
        self.assertEqual(len(graded['section_breakdown']), 0 + 1)

    def test_weighted_subsections_grader(self):
        # First, a few sub graders
        homework_grader = graders.AssignmentFormatGrader("Homework", 12, 2)
        lab_grader = graders.AssignmentFormatGrader("Lab", 7, 3)
        # phasing out the use of SingleSectionGraders, and instead using AssignmentFormatGraders that
        # will act like SingleSectionGraders on single sections.
        midterm_grader = graders.AssignmentFormatGrader("Midterm", 1, 0)

        weighted_grader = graders.WeightedSubsectionsGrader(
            [(homework_grader, homework_grader.category, 0.25),
             (lab_grader,
              lab_grader.category, 0.25),
             (midterm_grader, midterm_grader.category, 0.5)])

        over_one_weights_grader = graders.WeightedSubsectionsGrader(
            [(homework_grader, homework_grader.category, 0.5),
             (lab_grader,
              lab_grader.category, 0.5),
             (midterm_grader, midterm_grader.category, 0.5)])

        # The midterm should have all weight on this one
        zero_weights_grader = graders.WeightedSubsectionsGrader(
            [(homework_grader, homework_grader.category, 0.0),
             (lab_grader,
              lab_grader.category, 0.0),
             (midterm_grader, midterm_grader.category, 0.5)])

        # This should always have a final percent of zero
        all_zero_weights_grader = graders.WeightedSubsectionsGrader(
            [(homework_grader, homework_grader.category, 0.0),
             (lab_grader,
              lab_grader.category, 0.0),
             (midterm_grader, midterm_grader.category, 0.0)])

        empty_grader = graders.WeightedSubsectionsGrader([])

        graded = weighted_grader.grade(self.test_gradesheet)
        self.assertAlmostEqual(graded['percent'], 0.5106547619047619)
        self.assertEqual(len(graded[
                         'section_breakdown']), (12 + 1) + (7 + 1) + 1)
        self.assertEqual(len(graded['grade_breakdown']), 3)

        graded = over_one_weights_grader.grade(self.test_gradesheet)
        self.assertAlmostEqual(graded['percent'], 0.7688095238095238)
        self.assertEqual(len(graded[
                         'section_breakdown']), (12 + 1) + (7 + 1) + 1)
        self.assertEqual(len(graded['grade_breakdown']), 3)

        graded = zero_weights_grader.grade(self.test_gradesheet)
        self.assertAlmostEqual(graded['percent'], 0.2525)
        self.assertEqual(len(graded[
                         'section_breakdown']), (12 + 1) + (7 + 1) + 1)
        self.assertEqual(len(graded['grade_breakdown']), 3)

        graded = all_zero_weights_grader.grade(self.test_gradesheet)
        self.assertAlmostEqual(graded['percent'], 0.0)
        self.assertEqual(len(graded[
                         'section_breakdown']), (12 + 1) + (7 + 1) + 1)
        self.assertEqual(len(graded['grade_breakdown']), 3)

        for graded in [weighted_grader.grade(self.empty_gradesheet),
                       weighted_grader.grade(self.incomplete_gradesheet),
                       zero_weights_grader.grade(self.empty_gradesheet),
                       all_zero_weights_grader.grade(self.empty_gradesheet)]:
            self.assertAlmostEqual(graded['percent'], 0.0)
            self.assertEqual(len(graded[
                             'section_breakdown']), (12 + 1) + (7 + 1) + 1)
            self.assertEqual(len(graded['grade_breakdown']), 3)

        graded = empty_grader.grade(self.test_gradesheet)
        self.assertAlmostEqual(graded['percent'], 0.0)
        self.assertEqual(len(graded['section_breakdown']), 0)
        self.assertEqual(len(graded['grade_breakdown']), 0)

    def test_grader_from_conf(self):

        # Confs always produce a graders.WeightedSubsectionsGrader, so we test this by repeating the test
        # in test_graders.WeightedSubsectionsGrader, but generate the graders
        # with confs.

        weighted_grader = graders.grader_from_conf([
            {
                'type': "Homework",
                'min_count': 12,
                'drop_count': 2,
                'short_label': "HW",
                'weight': 0.25,
            },
            {
                'type': "Lab",
                'min_count': 7,
                'drop_count': 3,
                'category': "Labs",
                'weight': 0.25
            },
            {
                'type': "Midterm",
                'name': "Midterm Exam",
                'short_label': "Midterm",
                'weight': 0.5,
            },
        ])

        empty_grader = graders.grader_from_conf([])

        graded = weighted_grader.grade(self.test_gradesheet)
        self.assertAlmostEqual(graded['percent'], 0.5106547619047619)
        self.assertEqual(len(graded[
                         'section_breakdown']), (12 + 1) + (7 + 1) + 1)
        self.assertEqual(len(graded['grade_breakdown']), 3)

        graded = empty_grader.grade(self.test_gradesheet)
        self.assertAlmostEqual(graded['percent'], 0.0)
        self.assertEqual(len(graded['section_breakdown']), 0)
        self.assertEqual(len(graded['grade_breakdown']), 0)

        # Test that graders can also be used instead of lists of dictionaries
        homework_grader = graders.AssignmentFormatGrader("Homework", 12, 2)
        homework_grader2 = graders.grader_from_conf(homework_grader)

        graded = homework_grader2.grade(self.test_gradesheet)
        self.assertAlmostEqual(graded['percent'], 0.11)
        self.assertEqual(len(graded['section_breakdown']), 12 + 1)
Esempio n. 9
0
def grade(student, request, course, field_data_cache=None, keep_raw_scores=False):
    """
    This grades a student as quickly as possible. It returns the
    output from the course grader, augmented with the final letter
    grade. The keys in the output are:

    course: a CourseDescriptor

    - grade : A final letter grade.
    - percent : The final percent for the class (rounded up).
    - section_breakdown : A breakdown of each section that makes
        up the grade. (For display)
    - grade_breakdown : A breakdown of the major components that
        make up the final grade. (For display)
    - keep_raw_scores : if True, then value for key 'raw_scores' contains scores for every graded module

    More information on the format is in the docstring for CourseGrader.
    """

    grading_context = course.grading_context
    raw_scores = []

    if field_data_cache is None:
        field_data_cache = FieldDataCache(grading_context['all_descriptors'], course.id, student)

    totaled_scores = {}
    # This next complicated loop is just to collect the totaled_scores, which is
    # passed to the grader
    for section_format, sections in grading_context['graded_sections'].iteritems():
        format_scores = []
        for section in sections:
            section_descriptor = section['section_descriptor']
            section_name = section_descriptor.display_name_with_default

            should_grade_section = False
            # If we haven't seen a single problem in the section, we don't have to grade it at all! We can assume 0%
            for moduledescriptor in section['xmoduledescriptors']:
                # some problems have state that is updated independently of interaction
                # with the LMS, so they need to always be scored. (E.g. foldit.)
                if moduledescriptor.always_recalculate_grades:
                    should_grade_section = True
                    break

                # Create a fake key to pull out a StudentModule object from the FieldDataCache

                key = DjangoKeyValueStore.Key(
                    Scope.user_state,
                    student.id,
                    moduledescriptor.location,
                    None
                )
                if field_data_cache.find(key):
                    should_grade_section = True
                    break

            if should_grade_section:
                scores = []

                def create_module(descriptor):
                    '''creates an XModule instance given a descriptor'''
                    # TODO: We need the request to pass into here. If we could forego that, our arguments
                    # would be simpler
                    return get_module_for_descriptor(student, request, descriptor, field_data_cache, course.id)

                for module_descriptor in yield_dynamic_descriptor_descendents(section_descriptor, create_module):

                    (correct, total) = get_score(course.id, student, module_descriptor, create_module, field_data_cache)
                    if correct is None and total is None:
                        continue

                    if settings.GENERATE_PROFILE_SCORES:  	# for debugging!
                        if total > 1:
                            correct = random.randrange(max(total - 2, 1), total + 1)
                        else:
                            correct = total

                    graded = module_descriptor.graded
                    if not total > 0:
                        #We simply cannot grade a problem that is 12/0, because we might need it as a percentage
                        graded = False

                    scores.append(Score(correct, total, graded, module_descriptor.display_name_with_default))

                _, graded_total = graders.aggregate_scores(scores, section_name)
                if keep_raw_scores:
                    raw_scores += scores
            else:
                graded_total = Score(0.0, 1.0, True, section_name)

            #Add the graded total to totaled_scores
            if graded_total.possible > 0:
                format_scores.append(graded_total)
            else:
                log.exception("Unable to grade a section with a total possible score of zero. " +
                              str(section_descriptor.location))

        totaled_scores[section_format] = format_scores

    grade_summary = course.grader.grade(totaled_scores, generate_random_scores=settings.GENERATE_PROFILE_SCORES)

    # We round the grade here, to make sure that the grade is an whole percentage and
    # doesn't get displayed differently than it gets grades
    grade_summary['percent'] = round(grade_summary['percent'] * 100 + 0.05) / 100

    letter_grade = grade_for_percentage(course.grade_cutoffs, grade_summary['percent'])
    grade_summary['grade'] = letter_grade
    grade_summary['totaled_scores'] = totaled_scores  	# make this available, eg for instructor download & debugging
    if keep_raw_scores:
        grade_summary['raw_scores'] = raw_scores        # way to get all RAW scores out to instructor
                                                        # so grader can be double-checked
    return grade_summary
Esempio n. 10
0
    def test_weighted_grading(self):
        scores = []
        Score.__sub__ = lambda me, other: (
            me.earned - other.earned) + (me.possible - other.possible)

        all_total, graded_total = aggregate_scores(scores)
        self.assertEqual(all_total, Score(
            earned=0, possible=0, graded=False, section="summary"))
        self.assertEqual(graded_total, Score(
            earned=0, possible=0, graded=True, section="summary"))

        scores.append(Score(
            earned=0, possible=5, graded=False, section="summary"))
        all_total, graded_total = aggregate_scores(scores)
        self.assertEqual(all_total, Score(
            earned=0, possible=5, graded=False, section="summary"))
        self.assertEqual(graded_total, Score(
            earned=0, possible=0, graded=True, section="summary"))

        scores.append(Score(
            earned=3, possible=5, graded=True, section="summary"))
        all_total, graded_total = aggregate_scores(scores)
        self.assertAlmostEqual(all_total, Score(
            earned=3, possible=10, graded=False, section="summary"))
        self.assertAlmostEqual(graded_total, Score(
            earned=3, possible=5, graded=True, section="summary"))

        scores.append(Score(
            earned=2, possible=5, graded=True, section="summary"))
        all_total, graded_total = aggregate_scores(scores)
        self.assertAlmostEqual(all_total, Score(
            earned=5, possible=15, graded=False, section="summary"))
        self.assertAlmostEqual(graded_total, Score(
            earned=5, possible=10, graded=True, section="summary"))
Esempio n. 11
0
    def progress_summary(student,
                         request,
                         course,
                         field_data_cache=None,
                         scores_client=None,
                         grading_type='vertical'):
        """
        This pulls a summary of all problems in the course.

        Returns
        - courseware_summary is a summary of all sections with problems in the course.
        It is organized as an array of chapters, each containing an array of sections,
        each containing an array of scores. This contains information for graded and
        ungraded problems, and is good for displaying a course summary with due dates,
        etc.

        Arguments:
            student: A User object for the student to grade
            course: A Descriptor containing the course to grade

        If the student does not have access to load the course module, this function
        will return None.

        """

        with manual_transaction():
            if field_data_cache is None:
                field_data_cache = field_data_cache_for_grading(
                    course, student)
            if scores_client is None:
                scores_client = ScoresClient.from_field_data_cache(
                    field_data_cache)

            course_module = get_module_for_descriptor(student,
                                                      request,
                                                      course,
                                                      field_data_cache,
                                                      course.id,
                                                      course=course)
            if not course_module:
                return None

            course_module = getattr(course_module, '_x_module', course_module)

        submissions_scores = sub_api.get_scores(
            course.id.to_deprecated_string(),
            anonymous_id_for_user(student, course.id))
        max_scores_cache = MaxScoresCache.create_for_course(course)
        # For the moment, we have to get scorable_locations from field_data_cache
        # and not from scores_client, because scores_client is ignorant of things
        # in the submissions API. As a further refactoring step, submissions should
        # be hidden behind the ScoresClient.
        max_scores_cache.fetch_from_remote(field_data_cache.scorable_locations)

        blocks_stack = [course_module]
        blocks_dict = {}

        while blocks_stack:
            curr_block = blocks_stack.pop()
            with manual_transaction():
                # Skip if the block is hidden
                if curr_block.hide_from_toc:
                    continue

                key = unicode(curr_block.scope_ids.usage_id)
                children = curr_block.get_display_items(
                ) if curr_block.category != grading_type else []
                block = {
                    'display_name':
                    curr_block.display_name_with_default,
                    'block_type':
                    curr_block.category,
                    'url_name':
                    curr_block.url_name,
                    'children':
                    [unicode(child.scope_ids.usage_id) for child in children],
                }

                if curr_block.category == grading_type:
                    graded = curr_block.graded
                    scores = []

                    module_creator = curr_block.xmodule_runtime.get_module
                    for module_descriptor in yield_dynamic_descriptor_descendants(
                            curr_block, student.id, module_creator):
                        (correct, total) = get_score(
                            student,
                            module_descriptor,
                            module_creator,
                            scores_client,
                            submissions_scores,
                            max_scores_cache,
                        )

                        if correct is None and total is None:
                            continue

                        scores.append(
                            Score(correct, total, graded,
                                  module_descriptor.display_name_with_default,
                                  module_descriptor.location))

                    scores.reverse()
                    total, _ = aggregate_scores(
                        scores, curr_block.display_name_with_default)

                    module_format = curr_block.format if curr_block.format is not None else ''
                    block.update({
                        'scores': scores,
                        'total': total,
                        'format': module_format,
                        'due': curr_block.due,
                        'graded': graded,
                    })

                blocks_dict[key] = block
                # Add this blocks children to the stack so that we can traverse them as well.
                blocks_stack.extend(children)

        max_scores_cache.push_to_remote()

        return {
            'root': unicode(course.scope_ids.usage_id),
            'blocks': blocks_dict,
        }
Esempio n. 12
0
class GraderTest(unittest.TestCase):
    '''Tests grader implementations'''
    test_gradesheet = {
        'Homework': [
            Score(earned=2,
                  possible=20.0,
                  graded=True,
                  section='hw1',
                  module_id=None),
            Score(earned=16,
                  possible=16.0,
                  graded=True,
                  section='hw2',
                  module_id=None)
        ],
        # The dropped scores should be from the assignments that don't exist yet
        'Lab': [
            Score(earned=1,
                  possible=2.0,
                  graded=True,
                  section='lab1',
                  module_id=None),  # Dropped
            Score(earned=1,
                  possible=1.0,
                  graded=True,
                  section='lab2',
                  module_id=None),
            Score(earned=1,
                  possible=1.0,
                  graded=True,
                  section='lab3',
                  module_id=None),
            Score(earned=5,
                  possible=25.0,
                  graded=True,
                  section='lab4',
                  module_id=None),  # Dropped
            Score(earned=3,
                  possible=4.0,
                  graded=True,
                  section='lab5',
                  module_id=None),  # Dropped
            Score(earned=6,
                  possible=7.0,
                  graded=True,
                  section='lab6',
                  module_id=None),
            Score(earned=5,
                  possible=6.0,
                  graded=True,
                  section='lab7',
                  module_id=None)
        ],
        'Midterm': [
            Score(earned=50.5,
                  possible=100,
                  graded=True,
                  section="Midterm Exam",
                  module_id=None),
        ],
    }

    def test_grader_from_conf(self):

        # Confs always produce a graders.WeightedSubsectionsGrader, so we test this by repeating the test
        # in test_graders.WeightedSubsectionsGrader, but generate the graders with confs.

        weighted_grader = graders.grader_from_conf([
            {
                'type': "Homework",
                'min_count': 12,
                'drop_count': 2,
                'short_label': "HW",
                'weight': 0.25,
            },
            {
                'type': "Lab",
                'min_count': 7,
                'drop_count': 3,
                'category': "Labs",
                'weight': 0.25
            },
            {
                'type': "Midterm",
                'name': "Midterm Exam",
                'short_label': "Midterm",
                'weight': 0.5,
            },
        ])

        empty_grader = graders.grader_from_conf([])

        graded = weighted_grader.grade(self.test_gradesheet)
        self.assertAlmostEqual(graded['percent'], 0.5106547619047619)
        self.assertEqual(len(graded['section_breakdown']),
                         (12 + 1) + (7 + 1) + 1)
        self.assertEqual(len(graded['grade_breakdown']), 3)

        graded = empty_grader.grade(self.test_gradesheet)
        self.assertAlmostEqual(graded['percent'], 0.0)
        self.assertEqual(len(graded['section_breakdown']), 0)
        self.assertEqual(len(graded['grade_breakdown']), 0)

        # Test that graders can also be used instead of lists of dictionaries
        homework_grader = graders.get_grader('AssignmentFormatGrader')(
            "Homework", 12, 2)
        homework_grader2 = graders.grader_from_conf(homework_grader)

        graded = homework_grader2.grade(self.test_gradesheet)
        self.assertAlmostEqual(graded['percent'], 0.11)
        self.assertEqual(len(graded['section_breakdown']), 12 + 1)
Esempio n. 13
0
def _calculate_totaled_scores(
        student,
        grading_context_result,
        submissions_scores,
        scores_client,
        keep_raw_scores,
):
    """
    Returns a tuple of totaled scores and raw scores, which can be passed to the grader.
    """
    raw_scores = []
    totaled_scores = {}
    for section_format, sections in grading_context_result['all_graded_sections'].iteritems():
        format_scores = []
        for section_info in sections:
            section = section_info['section_block']
            section_name = block_metadata_utils.display_name_with_default(section)

            with outer_atomic():
                # Check to
                # see if any of our locations are in the scores from the submissions
                # API. If scores exist, we have to calculate grades for this section.
                should_grade_section = any(
                    unicode(descendant.location) in submissions_scores
                    for descendant in section_info['scored_descendants']
                )

                if not should_grade_section:
                    should_grade_section = any(
                        descendant.location in scores_client
                        for descendant in section_info['scored_descendants']
                    )

                # If we haven't seen a single problem in the section, we don't have
                # to grade it at all! We can assume 0%
                if should_grade_section:
                    scores = []

                    for descendant in section_info['scored_descendants']:

                        (correct, total) = get_score(
                            student,
                            descendant,
                            scores_client,
                            submissions_scores,
                        )
                        if correct is None and total is None:
                            continue

                        if settings.GENERATE_PROFILE_SCORES:  # for debugging!
                            if total > 1:
                                correct = random.randrange(max(total - 2, 1), total + 1)
                            else:
                                correct = total

                        graded = descendant.graded
                        if not total > 0:
                            # We simply cannot grade a problem that is 12/0, because we might need it as a percentage
                            graded = False

                        scores.append(
                            Score(
                                correct,
                                total,
                                graded,
                                block_metadata_utils.display_name_with_default_escaped(descendant),
                                descendant.location
                            )
                        )

                    __, graded_total = graders.aggregate_scores(scores, section_name)
                    if keep_raw_scores:
                        raw_scores += scores
                else:
                    graded_total = Score(0.0, 1.0, True, section_name, None)

                # Add the graded total to totaled_scores
                if graded_total.possible > 0:
                    format_scores.append(graded_total)
                else:
                    log.info(
                        "Unable to grade a section with a total possible score of zero. " +
                        str(section.location)
                    )

        totaled_scores[section_format] = format_scores

    return totaled_scores, raw_scores
Esempio n. 14
0
def _progress_summary(student,
                      request,
                      course,
                      field_data_cache=None,
                      scores_client=None):
    """
    Unwrapped version of "progress_summary".

    This pulls a summary of all problems in the course.

    Returns
    - courseware_summary is a summary of all sections with problems in the course.
    It is organized as an array of chapters, each containing an array of sections,
    each containing an array of scores. This contains information for graded and
    ungraded problems, and is good for displaying a course summary with due dates,
    etc.

    Arguments:
        student: A User object for the student to grade
        course: A Descriptor containing the course to grade

    If the student does not have access to load the course module, this function
    will return None.

    """
    with manual_transaction():
        if field_data_cache is None:
            field_data_cache = field_data_cache_for_grading(course, student)
        if scores_client is None:
            scores_client = ScoresClient.from_field_data_cache(
                field_data_cache)

        course_module = get_module_for_descriptor(student,
                                                  request,
                                                  course,
                                                  field_data_cache,
                                                  course.id,
                                                  course=course)
        if not course_module:
            return None

        course_module = getattr(course_module, '_x_module', course_module)

    submissions_scores = sub_api.get_scores(
        course.id.to_deprecated_string(),
        anonymous_id_for_user(student, course.id))
    max_scores_cache = MaxScoresCache.create_for_course(course)
    # For the moment, we have to get scorable_locations from field_data_cache
    # and not from scores_client, because scores_client is ignorant of things
    # in the submissions API. As a further refactoring step, submissions should
    # be hidden behind the ScoresClient.
    max_scores_cache.fetch_from_remote(field_data_cache.scorable_locations)

    chapters = []
    # Don't include chapters that aren't displayable (e.g. due to error)
    for chapter_module in course_module.get_display_items():
        # Skip if the chapter is hidden
        if chapter_module.hide_from_toc:
            continue

        sections = []

        for section_module in chapter_module.get_display_items():
            # Skip if the section is hidden
            with manual_transaction():
                if section_module.hide_from_toc:
                    continue

                graded = section_module.graded
                scores = []

                module_creator = section_module.xmodule_runtime.get_module

                for module_descriptor in yield_dynamic_descriptor_descendants(
                        section_module, student.id, module_creator):
                    course_id = course.id
                    (correct, total) = get_score(
                        student,
                        module_descriptor,
                        module_creator,
                        scores_client,
                        submissions_scores,
                        max_scores_cache,
                    )
                    if correct is None and total is None:
                        continue

                    scores.append(
                        Score(correct, total, graded,
                              module_descriptor.display_name_with_default,
                              module_descriptor.location))

                scores.reverse()
                section_total, _ = graders.aggregate_scores(
                    scores, section_module.display_name_with_default)

                module_format = section_module.format if section_module.format is not None else ''
                sections.append({
                    'display_name': section_module.display_name_with_default,
                    'url_name': section_module.url_name,
                    'scores': scores,
                    'section_total': section_total,
                    'format': module_format,
                    'due': section_module.due,
                    'graded': graded,
                })

        chapters.append({
            'course': course.display_name_with_default,
            'display_name': chapter_module.display_name_with_default,
            'url_name': chapter_module.url_name,
            'sections': sections
        })

    max_scores_cache.push_to_remote()

    return chapters