예제 #1
0
    def grade(student, request, course, keep_raw_scores, field_data_cache, scores_client):
        """
        This grades a student as quickly as possible. It returns the
        output from the course grader, augmented with the final letter
        grade. The keys in the output are:

        course: a CourseDescriptor

        - grade : A final letter grade.
        - percent : The final percent for the class (rounded up).
        - section_breakdown : A breakdown of each section that makes
          up the grade. (For display)
        - grade_breakdown : A breakdown of the major components that
          make up the final grade. (For display)
        - keep_raw_scores : if True, then value for key 'raw_scores' contains scores
          for every graded module

        More information on the format is in the docstring for CourseGrader.
        """
        if field_data_cache is None:
            with manual_transaction():
                field_data_cache = field_data_cache_for_grading(course, student)
        if scores_client is None:
            scores_client = ScoresClient.from_field_data_cache(field_data_cache)

        # Dict of item_ids -> (earned, possible) point tuples. This *only* grabs
        # scores that were registered with the submissions API, which for the moment
        # means only openassessment (edx-ora2)
        submissions_scores = sub_api.get_scores(
            course.id.to_deprecated_string(), anonymous_id_for_user(student, course.id)
        )
        max_scores_cache = MaxScoresCache.create_for_course(course)
        # For the moment, we have to get scorable_locations from field_data_cache
        # and not from scores_client, because scores_client is ignorant of things
        # in the submissions API. As a further refactoring step, submissions should
        # be hidden behind the ScoresClient.
        max_scores_cache.fetch_from_remote(field_data_cache.scorable_locations)

        grading_context = course.grading_context
        raw_scores = []

        totaled_scores = {}
        # This next complicated loop is just to collect the totaled_scores, which is
        # passed to the grader
        for section_format, sections in grading_context['graded_sections'].iteritems():
            format_scores = []
            for section in sections:
                section_descriptor = section['section_descriptor']
                section_name = section_descriptor.display_name_with_default

                # some problems have state that is updated independently of interaction
                # with the LMS, so they need to always be scored. (E.g. foldit.,
                # combinedopenended)
                should_grade_section = any(
                    descriptor.always_recalculate_grades for descriptor in section['xmoduledescriptors']
                )

                # If there are no problems that always have to be regraded, check to
                # see if any of our locations are in the scores from the submissions
                # API. If scores exist, we have to calculate grades for this section.
                if not should_grade_section:
                    should_grade_section = any(
                        descriptor.location.to_deprecated_string() in submissions_scores
                        for descriptor in section['xmoduledescriptors']
                    )

                if not should_grade_section:
                    should_grade_section = any(
                        descriptor.location in scores_client for descriptor in section['xmoduledescriptors']
                    )

                # If we haven't seen a single problem in the section, we don't have
                # to grade it at all! We can assume 0%
                if should_grade_section:
                    scores = []

                    def create_module(descriptor):
                        '''creates an XModule instance given a descriptor'''
                        # TODO: We need the request to pass into here. If we could forego that, our arguments
                        # would be simpler
                        return get_module_for_descriptor(
                            student, request, descriptor, field_data_cache, course.id, course=course
                        )

                    descendants = yield_dynamic_descriptor_descendants(section_descriptor, student.id, create_module)
                    for module_descriptor in descendants:
                        (correct, total) = get_score(
                            student,
                            module_descriptor,
                            create_module,
                            scores_client,
                            submissions_scores,
                            max_scores_cache,
                        )
                        if correct is None and total is None:
                            continue

                        if settings.GENERATE_PROFILE_SCORES:    # for debugging!
                            if total > 1:
                                correct = random.randrange(max(total - 2, 1), total + 1)
                            else:
                                correct = total

                        graded = module_descriptor.graded
                        if not total > 0:
                            # We simply cannot grade a problem that is 12/0, because we might need it as a percentage
                            graded = False

                        scores.append(
                            Score(
                                correct,
                                total,
                                graded,
                                module_descriptor.display_name_with_default,
                                module_descriptor.location
                            )
                        )

                    __, graded_total = aggregate_section_scores(
                        scores, section_name, getattr(section_descriptor, 'weight', 1.0)
                    )
                    if keep_raw_scores:
                        raw_scores += scores
                else:
                    graded_total = WeightedScore(
                        0.0, 1.0, True, section_name, None, getattr(section_descriptor, 'weight', 1.0)
                    )

                # Add the graded total to totaled_scores
                if graded_total.possible > 0:
                    format_scores.append(graded_total)
                else:
                    log.info(
                        "Unable to grade a section with a total possible score of zero. {}".format(
                            section_descriptor.location
                        )
                    )

            totaled_scores[section_format] = format_scores

        # Grading policy might be overriden by a CCX, need to reset it
        course.set_grading_policy(course.grading_policy)
        grade_summary = course.grader.grade(totaled_scores, generate_random_scores=settings.GENERATE_PROFILE_SCORES)

        # We round the grade here, to make sure that the grade is an whole percentage and
        # doesn't get displayed differently than it gets grades
        grade_summary['percent'] = round(grade_summary['percent'] * 100 + 0.05) / 100

        letter_grade = grade_for_percentage(
            course.grade_cutoffs, grade_summary['percent'], grade_summary['sections_passed']
        )
        grade_summary['grade'] = letter_grade
        grade_summary['totaled_scores'] = totaled_scores   # make this available, eg for instructor download & debugging
        if keep_raw_scores:
            # way to get all RAW scores out to instructor
            # so grader can be double-checked
            grade_summary['raw_scores'] = raw_scores

        max_scores_cache.push_to_remote()

        return grade_summary
예제 #2
0
 def test_grade_for_percentage(self, percent, sections_passed, expected):
     actual = grade_for_percentage(self.GRADE_CUTOFFS, percent, sections_passed)
     self.assertEqual(actual, expected)
예제 #3
0
    def grade(student, request, course, keep_raw_scores, field_data_cache,
              scores_client):
        """
        This grades a student as quickly as possible. It returns the
        output from the course grader, augmented with the final letter
        grade. The keys in the output are:

        course: a CourseDescriptor

        - grade : A final letter grade.
        - percent : The final percent for the class (rounded up).
        - section_breakdown : A breakdown of each section that makes
          up the grade. (For display)
        - grade_breakdown : A breakdown of the major components that
          make up the final grade. (For display)
        - keep_raw_scores : if True, then value for key 'raw_scores' contains scores
          for every graded module

        More information on the format is in the docstring for CourseGrader.
        """
        if field_data_cache is None:
            with manual_transaction():
                field_data_cache = field_data_cache_for_grading(
                    course, student)
        if scores_client is None:
            scores_client = ScoresClient.from_field_data_cache(
                field_data_cache)

        # Dict of item_ids -> (earned, possible) point tuples. This *only* grabs
        # scores that were registered with the submissions API, which for the moment
        # means only openassessment (edx-ora2)
        submissions_scores = sub_api.get_scores(
            course.id.to_deprecated_string(),
            anonymous_id_for_user(student, course.id))
        max_scores_cache = MaxScoresCache.create_for_course(course)
        # For the moment, we have to get scorable_locations from field_data_cache
        # and not from scores_client, because scores_client is ignorant of things
        # in the submissions API. As a further refactoring step, submissions should
        # be hidden behind the ScoresClient.
        max_scores_cache.fetch_from_remote(field_data_cache.scorable_locations)

        grading_context = course.grading_context
        raw_scores = []

        totaled_scores = {}
        # This next complicated loop is just to collect the totaled_scores, which is
        # passed to the grader
        for section_format, sections in grading_context[
                'graded_sections'].iteritems():
            format_scores = []
            for section in sections:
                section_descriptor = section['section_descriptor']
                section_name = section_descriptor.display_name_with_default

                # some problems have state that is updated independently of interaction
                # with the LMS, so they need to always be scored. (E.g. foldit.,
                # combinedopenended)
                should_grade_section = any(
                    descriptor.always_recalculate_grades
                    for descriptor in section['xmoduledescriptors'])

                # If there are no problems that always have to be regraded, check to
                # see if any of our locations are in the scores from the submissions
                # API. If scores exist, we have to calculate grades for this section.
                if not should_grade_section:
                    should_grade_section = any(
                        descriptor.location.to_deprecated_string() in
                        submissions_scores
                        for descriptor in section['xmoduledescriptors'])

                if not should_grade_section:
                    should_grade_section = any(
                        descriptor.location in scores_client
                        for descriptor in section['xmoduledescriptors'])

                # If we haven't seen a single problem in the section, we don't have
                # to grade it at all! We can assume 0%
                if should_grade_section:
                    scores = []

                    def create_module(descriptor):
                        '''creates an XModule instance given a descriptor'''
                        # TODO: We need the request to pass into here. If we could forego that, our arguments
                        # would be simpler
                        return get_module_for_descriptor(student,
                                                         request,
                                                         descriptor,
                                                         field_data_cache,
                                                         course.id,
                                                         course=course)

                    descendants = yield_dynamic_descriptor_descendants(
                        section_descriptor, student.id, create_module)
                    for module_descriptor in descendants:
                        (correct, total) = get_score(
                            student,
                            module_descriptor,
                            create_module,
                            scores_client,
                            submissions_scores,
                            max_scores_cache,
                        )
                        if correct is None and total is None:
                            continue

                        if settings.GENERATE_PROFILE_SCORES:  # for debugging!
                            if total > 1:
                                correct = random.randrange(
                                    max(total - 2, 1), total + 1)
                            else:
                                correct = total

                        graded = module_descriptor.graded
                        if not total > 0:
                            # We simply cannot grade a problem that is 12/0, because we might need it as a percentage
                            graded = False

                        scores.append(
                            Score(correct, total, graded,
                                  module_descriptor.display_name_with_default,
                                  module_descriptor.location))

                    __, graded_total = aggregate_section_scores(
                        scores, section_name,
                        getattr(section_descriptor, 'weight', 1.0))
                    if keep_raw_scores:
                        raw_scores += scores
                else:
                    graded_total = WeightedScore(
                        0.0, 1.0, True, section_name, None,
                        getattr(section_descriptor, 'weight', 1.0))

                # Add the graded total to totaled_scores
                if graded_total.possible > 0:
                    format_scores.append(graded_total)
                else:
                    log.info(
                        "Unable to grade a section with a total possible score of zero. {}"
                        .format(section_descriptor.location))

            totaled_scores[section_format] = format_scores

        # Grading policy might be overriden by a CCX, need to reset it
        course.set_grading_policy(course.grading_policy)
        grade_summary = course.grader.grade(
            totaled_scores,
            generate_random_scores=settings.GENERATE_PROFILE_SCORES)

        # We round the grade here, to make sure that the grade is an whole percentage and
        # doesn't get displayed differently than it gets grades
        grade_summary['percent'] = round(grade_summary['percent'] * 100 +
                                         0.05) / 100

        letter_grade = grade_for_percentage(course.grade_cutoffs,
                                            grade_summary['percent'],
                                            grade_summary['sections_passed'])
        grade_summary['grade'] = letter_grade
        grade_summary[
            'totaled_scores'] = totaled_scores  # make this available, eg for instructor download & debugging
        if keep_raw_scores:
            # way to get all RAW scores out to instructor
            # so grader can be double-checked
            grade_summary['raw_scores'] = raw_scores

        max_scores_cache.push_to_remote()

        return grade_summary