コード例 #1
0
    def test_weighted_grading(self):
        scores = []
        Score.__sub__ = lambda me, other: (
            me.earned - other.earned) + (me.possible - other.possible)

        all_total, graded_total = aggregate_scores(scores)
        self.assertEqual(all_total, Score(
            earned=0, possible=0, graded=False, section="summary"))
        self.assertEqual(graded_total, Score(
            earned=0, possible=0, graded=True, section="summary"))

        scores.append(Score(
            earned=0, possible=5, graded=False, section="summary"))
        all_total, graded_total = aggregate_scores(scores)
        self.assertEqual(all_total, Score(
            earned=0, possible=5, graded=False, section="summary"))
        self.assertEqual(graded_total, Score(
            earned=0, possible=0, graded=True, section="summary"))

        scores.append(Score(
            earned=3, possible=5, graded=True, section="summary"))
        all_total, graded_total = aggregate_scores(scores)
        self.assertAlmostEqual(all_total, Score(
            earned=3, possible=10, graded=False, section="summary"))
        self.assertAlmostEqual(graded_total, Score(
            earned=3, possible=5, graded=True, section="summary"))

        scores.append(Score(
            earned=2, possible=5, graded=True, section="summary"))
        all_total, graded_total = aggregate_scores(scores)
        self.assertAlmostEqual(all_total, Score(
            earned=5, possible=15, graded=False, section="summary"))
        self.assertAlmostEqual(graded_total, Score(
            earned=5, possible=10, graded=True, section="summary"))
コード例 #2
0
    def test_weighted_grading(self):
        scores = []
        Score.__sub__ = lambda me, other: (me.earned - other.earned) + (me.possible - other.possible)

        all_total, graded_total = aggregate_scores(scores)
        self.assertEqual(all_total, Score(earned=0, possible=0, graded=False, section="summary", module_id=None))
        self.assertEqual(graded_total, Score(earned=0, possible=0, graded=True, section="summary", module_id=None))

        scores.append(Score(earned=0, possible=5, graded=False, section="summary", module_id=None))
        all_total, graded_total = aggregate_scores(scores)
        self.assertEqual(all_total, Score(earned=0, possible=5, graded=False, section="summary", module_id=None))
        self.assertEqual(graded_total, Score(earned=0, possible=0, graded=True, section="summary", module_id=None))

        scores.append(Score(earned=3, possible=5, graded=True, section="summary", module_id=None))
        all_total, graded_total = aggregate_scores(scores)
        self.assertAlmostEqual(all_total, Score(earned=3, possible=10, graded=False, section="summary", module_id=None))
        self.assertAlmostEqual(
            graded_total, Score(earned=3, possible=5, graded=True, section="summary", module_id=None)
        )

        scores.append(Score(earned=2, possible=5, graded=True, section="summary", module_id=None))
        all_total, graded_total = aggregate_scores(scores)
        self.assertAlmostEqual(all_total, Score(earned=5, possible=15, graded=False, section="summary", module_id=None))
        self.assertAlmostEqual(
            graded_total, Score(earned=5, possible=10, graded=True, section="summary", module_id=None)
        )
コード例 #3
0
ファイル: test_graders.py プロジェクト: CUCWD/edx-platform
    def test_weighted_grading(self):
        scores = []
        agg_fields = dict(display_name="aggregated_score", module_id=None, attempted=False)
        prob_fields = dict(
            display_name="problem_score", module_id=None, raw_earned=0, raw_possible=0, weight=0, attempted=False,
        )

        # No scores
        all_total, graded_total = aggregate_scores(scores, display_name=agg_fields['display_name'])
        self.assertEqual(
            all_total,
            AggregatedScore(tw_earned=0, tw_possible=0, graded=False, **agg_fields),
        )
        self.assertEqual(
            graded_total,
            AggregatedScore(tw_earned=0, tw_possible=0, graded=True, **agg_fields),
        )

        # (0/5 non-graded)
        scores.append(ProblemScore(weighted_earned=0, weighted_possible=5, graded=False, **prob_fields))
        all_total, graded_total = aggregate_scores(scores, display_name=agg_fields['display_name'])
        self.assertEqual(
            all_total,
            AggregatedScore(tw_earned=0, tw_possible=5, graded=False, **agg_fields),
        )
        self.assertEqual(
            graded_total,
            AggregatedScore(tw_earned=0, tw_possible=0, graded=True, **agg_fields),
        )

        # (0/5 non-graded) + (3/5 graded) = 3/10 total, 3/5 graded
        prob_fields['attempted'] = True
        agg_fields['attempted'] = True
        scores.append(ProblemScore(weighted_earned=3, weighted_possible=5, graded=True, **prob_fields))
        all_total, graded_total = aggregate_scores(scores, display_name=agg_fields['display_name'])
        self.assertAlmostEqual(
            all_total,
            AggregatedScore(tw_earned=3, tw_possible=10, graded=False, **agg_fields),
        )
        self.assertAlmostEqual(
            graded_total,
            AggregatedScore(tw_earned=3, tw_possible=5, graded=True, **agg_fields),
        )

        # (0/5 non-graded) + (3/5 graded) + (2/5 graded) = 5/15 total, 5/10 graded
        scores.append(ProblemScore(weighted_earned=2, weighted_possible=5, graded=True, **prob_fields))
        all_total, graded_total = aggregate_scores(scores, display_name=agg_fields['display_name'])
        self.assertAlmostEqual(
            all_total,
            AggregatedScore(tw_earned=5, tw_possible=15, graded=False, **agg_fields),
        )
        self.assertAlmostEqual(
            graded_total,
            AggregatedScore(tw_earned=5, tw_possible=10, graded=True, **agg_fields),
        )
コード例 #4
0
    def test_weighted_grading(self):
        scores = []
        agg_fields = dict(first_attempted=None)
        prob_fields = dict(raw_earned=0, raw_possible=0, weight=0, first_attempted=None)

        # No scores
        all_total, graded_total = aggregate_scores(scores)
        self.assertEqual(
            all_total,
            AggregatedScore(tw_earned=0, tw_possible=0, graded=False, **agg_fields),
        )
        self.assertEqual(
            graded_total,
            AggregatedScore(tw_earned=0, tw_possible=0, graded=True, **agg_fields),
        )

        # (0/5 non-graded)
        scores.append(ProblemScore(weighted_earned=0, weighted_possible=5, graded=False, **prob_fields))
        all_total, graded_total = aggregate_scores(scores)
        self.assertEqual(
            all_total,
            AggregatedScore(tw_earned=0, tw_possible=5, graded=False, **agg_fields),
        )
        self.assertEqual(
            graded_total,
            AggregatedScore(tw_earned=0, tw_possible=0, graded=True, **agg_fields),
        )

        # (0/5 non-graded) + (3/5 graded) = 3/10 total, 3/5 graded
        now = datetime.now()
        prob_fields['first_attempted'] = now
        agg_fields['first_attempted'] = now
        scores.append(ProblemScore(weighted_earned=3, weighted_possible=5, graded=True, **prob_fields))
        all_total, graded_total = aggregate_scores(scores)
        self.assertAlmostEqual(
            all_total,
            AggregatedScore(tw_earned=3, tw_possible=10, graded=False, **agg_fields),
        )
        self.assertAlmostEqual(
            graded_total,
            AggregatedScore(tw_earned=3, tw_possible=5, graded=True, **agg_fields),
        )

        # (0/5 non-graded) + (3/5 graded) + (2/5 graded) = 5/15 total, 5/10 graded
        scores.append(ProblemScore(weighted_earned=2, weighted_possible=5, graded=True, **prob_fields))
        all_total, graded_total = aggregate_scores(scores)
        self.assertAlmostEqual(
            all_total,
            AggregatedScore(tw_earned=5, tw_possible=15, graded=False, **agg_fields),
        )
        self.assertAlmostEqual(
            graded_total,
            AggregatedScore(tw_earned=5, tw_possible=10, graded=True, **agg_fields),
        )
コード例 #5
0
    def test_weighted_grading(self):
        scores = []
        agg_fields = dict(first_attempted=None)
        prob_fields = dict(raw_earned=0, raw_possible=0, weight=0, first_attempted=None)

        # No scores
        all_total, graded_total = aggregate_scores(scores)
        self.assertEqual(
            all_total,
            AggregatedScore(tw_earned=0, tw_possible=0, graded=False, **agg_fields),
        )
        self.assertEqual(
            graded_total,
            AggregatedScore(tw_earned=0, tw_possible=0, graded=True, **agg_fields),
        )

        # (0/5 non-graded)
        scores.append(ProblemScore(weighted_earned=0, weighted_possible=5, graded=False, **prob_fields))
        all_total, graded_total = aggregate_scores(scores)
        self.assertEqual(
            all_total,
            AggregatedScore(tw_earned=0, tw_possible=5, graded=False, **agg_fields),
        )
        self.assertEqual(
            graded_total,
            AggregatedScore(tw_earned=0, tw_possible=0, graded=True, **agg_fields),
        )

        # (0/5 non-graded) + (3/5 graded) = 3/10 total, 3/5 graded
        now = datetime.now()
        prob_fields['first_attempted'] = now
        agg_fields['first_attempted'] = now
        scores.append(ProblemScore(weighted_earned=3, weighted_possible=5, graded=True, **prob_fields))
        all_total, graded_total = aggregate_scores(scores)
        self.assertAlmostEqual(
            all_total,
            AggregatedScore(tw_earned=3, tw_possible=10, graded=False, **agg_fields),
        )
        self.assertAlmostEqual(
            graded_total,
            AggregatedScore(tw_earned=3, tw_possible=5, graded=True, **agg_fields),
        )

        # (0/5 non-graded) + (3/5 graded) + (2/5 graded) = 5/15 total, 5/10 graded
        scores.append(ProblemScore(weighted_earned=2, weighted_possible=5, graded=True, **prob_fields))
        all_total, graded_total = aggregate_scores(scores)
        self.assertAlmostEqual(
            all_total,
            AggregatedScore(tw_earned=5, tw_possible=15, graded=False, **agg_fields),
        )
        self.assertAlmostEqual(
            graded_total,
            AggregatedScore(tw_earned=5, tw_possible=10, graded=True, **agg_fields),
        )
コード例 #6
0
    def __init__(self, subsection, course_structure, submissions_scores,
                 csm_scores):
        self.problem_scores = OrderedDict()
        for block_key in course_structure.post_order_traversal(
                filter_func=possibly_scored,
                start_node=subsection.location,
        ):
            problem_score = self._compute_block_score(block_key,
                                                      course_structure,
                                                      submissions_scores,
                                                      csm_scores)

            # TODO: Remove as part of EDUCATOR-4602.
            if str(block_key.course_key) == 'course-v1:UQx+BUSLEAD5x+2T2019':
                log.info(
                    u'Calculated problem score ***{}*** for block ***{!s}***'
                    u' in subsection ***{}***.'.format(problem_score,
                                                       block_key,
                                                       subsection.location))
            if problem_score:
                self.problem_scores[block_key] = problem_score

        all_total, graded_total = graders.aggregate_scores(
            list(self.problem_scores.values()))

        # TODO: Remove as part of EDUCATOR-4602.
        if str(subsection.location.course_key
               ) == 'course-v1:UQx+BUSLEAD5x+2T2019':
            log.info(
                u'Calculated aggregate all_total ***{}***'
                u' and grade_total ***{}*** for subsection ***{}***'.format(
                    all_total, graded_total, subsection.location))

        super(CreateSubsectionGrade, self).__init__(subsection, all_total,
                                                    graded_total)
コード例 #7
0
    def test_weighted_grading(self):
        scores = []
        agg_fields = dict(display_name="aggregated_score", module_id=None)
        prob_fields = dict(display_name="problem_score", module_id=None, raw_earned=0, raw_possible=0, weight=0)

        all_total, graded_total = aggregate_scores(scores, display_name=agg_fields['display_name'])
        self.assertEqual(
            all_total,
            AggregatedScore(tw_earned=0, tw_possible=0, graded=False, **agg_fields),
        )
        self.assertEqual(
            graded_total,
            AggregatedScore(tw_earned=0, tw_possible=0, graded=True, **agg_fields),
        )

        scores.append(ProblemScore(weighted_earned=0, weighted_possible=5, graded=False, **prob_fields))
        all_total, graded_total = aggregate_scores(scores, display_name=agg_fields['display_name'])
        self.assertEqual(
            all_total,
            AggregatedScore(tw_earned=0, tw_possible=5, graded=False, **agg_fields),
        )
        self.assertEqual(
            graded_total,
            AggregatedScore(tw_earned=0, tw_possible=0, graded=True, **agg_fields),
        )

        scores.append(ProblemScore(weighted_earned=3, weighted_possible=5, graded=True, **prob_fields))
        all_total, graded_total = aggregate_scores(scores, display_name=agg_fields['display_name'])
        self.assertAlmostEqual(
            all_total,
            AggregatedScore(tw_earned=3, tw_possible=10, graded=False, **agg_fields),
        )
        self.assertAlmostEqual(
            graded_total,
            AggregatedScore(tw_earned=3, tw_possible=5, graded=True, **agg_fields),
        )

        scores.append(ProblemScore(weighted_earned=2, weighted_possible=5, graded=True, **prob_fields))
        all_total, graded_total = aggregate_scores(scores, display_name=agg_fields['display_name'])
        self.assertAlmostEqual(
            all_total,
            AggregatedScore(tw_earned=5, tw_possible=15, graded=False, **agg_fields),
        )
        self.assertAlmostEqual(
            graded_total,
            AggregatedScore(tw_earned=5, tw_possible=10, graded=True, **agg_fields),
        )
コード例 #8
0
    def compute(self, student, course_structure, scores_client, submissions_scores):
        """
        Compute the grade of this subsection for the given student and course.
        """
        for descendant_key in course_structure.post_order_traversal(
                filter_func=possibly_scored,
                start_node=self.location,
        ):
            self._compute_block_score(student, descendant_key, course_structure, scores_client, submissions_scores)

        self.all_total, self.graded_total = graders.aggregate_scores(self.scores, self.display_name, self.location)
コード例 #9
0
    def __init__(self, subsection, course_structure, submissions_scores, csm_scores):
        self.problem_scores = OrderedDict()
        for block_key in course_structure.post_order_traversal(
                filter_func=possibly_scored,
                start_node=subsection.location,
        ):
            problem_score = self._compute_block_score(block_key, course_structure, submissions_scores, csm_scores)
            if problem_score:
                self.problem_scores[block_key] = problem_score

        all_total, graded_total = graders.aggregate_scores(list(self.problem_scores.values()))

        super(CreateSubsectionGrade, self).__init__(subsection, all_total, graded_total)
コード例 #10
0
    def init_from_structure(self, student, course_structure, submissions_scores, csm_scores):
        """
        Compute the grade of this subsection for the given student and course.
        """
        for descendant_key in course_structure.post_order_traversal(
                filter_func=possibly_scored,
                start_node=self.location,
        ):
            self._compute_block_score(descendant_key, course_structure, submissions_scores, csm_scores)

        self.all_total, self.graded_total = graders.aggregate_scores(self.scores)
        self._log_event(log.debug, u"init_from_structure", student)
        return self
コード例 #11
0
    def __init__(self, subsection, course_structure, submissions_scores, csm_scores):
        self.problem_scores = OrderedDict()
        for block_key in course_structure.post_order_traversal(
                filter_func=possibly_scored,
                start_node=subsection.location,
        ):
            problem_score = self._compute_block_score(block_key, course_structure, submissions_scores, csm_scores)
            if problem_score:
                self.problem_scores[block_key] = problem_score

        all_total, graded_total = graders.aggregate_scores(self.problem_scores.values())

        super(CreateSubsectionGrade, self).__init__(subsection, all_total, graded_total)
コード例 #12
0
 def init_from_structure(self, student, course_structure, scores_client, submissions_scores):
     """
     Compute the grade of this subsection for the given student and course.
     """
     assert self._scores is None
     for descendant_key in course_structure.post_order_traversal(
             filter_func=possibly_scored,
             start_node=self.location,
     ):
         self._compute_block_score(
             student, descendant_key, course_structure, scores_client, submissions_scores, persisted_values={},
         )
     self.all_total, self.graded_total = graders.aggregate_scores(self.scores, self.display_name, self.location)
     self._log_event(log.warning, u"init_from_structure", student)
コード例 #13
0
    def compute(self, student, course_structure, scores_client, submissions_scores):
        """
        Compute the grade of this subsection for the given student and course.
        """
        try:
            for descendant_key in course_structure.post_order_traversal(
                    filter_func=possibly_scored,
                    start_node=self.location,
            ):
                self._compute_block_score(student, descendant_key, course_structure, scores_client, submissions_scores)
        finally:
            # self.scores may hold outdated data, force it to refresh on next access
            lazy.invalidate(self, 'scores')

        self.all_total, self.graded_total = graders.aggregate_scores(self.scores, self.display_name, self.location)
コード例 #14
0
    def compute(self, student, course_structure, scores_client,
                submissions_scores):
        """
        Compute the grade of this subsection for the given student and course.
        """
        try:
            for descendant_key in course_structure.post_order_traversal(
                    filter_func=possibly_scored,
                    start_node=self.location,
            ):
                self._compute_block_score(student, descendant_key,
                                          course_structure, scores_client,
                                          submissions_scores)
        finally:
            # self.scores may hold outdated data, force it to refresh on next access
            lazy.invalidate(self, 'scores')

        self.all_total, self.graded_total = graders.aggregate_scores(
            self.scores, self.display_name, self.location)
コード例 #15
0
 def init_from_structure(self, student, course_structure, scores_client,
                         submissions_scores):
     """
     Compute the grade of this subsection for the given student and course.
     """
     assert self._scores is None
     for descendant_key in course_structure.post_order_traversal(
             filter_func=possibly_scored,
             start_node=self.location,
     ):
         self._compute_block_score(
             student,
             descendant_key,
             course_structure,
             scores_client,
             submissions_scores,
             persisted_values={},
         )
     self.all_total, self.graded_total = graders.aggregate_scores(
         self.scores, self.display_name, self.location)
     self._log_event(log.warning, u"init_from_structure", student)
コード例 #16
0
    def compute(self, student, course_structure, scores_client, submissions_scores):
        """
        Compute the grade of this subsection for the given student and course.
        """
        for descendant_key in course_structure.post_order_traversal(
                filter_func=possibly_scored,
                start_node=self.location,
        ):
            descendant = course_structure[descendant_key]

            if not getattr(descendant, 'has_score', False):
                continue

            (earned, possible) = get_score(
                student,
                descendant,
                scores_client,
                submissions_scores,
            )
            if earned is None and possible is None:
                continue

            # cannot grade a problem with a denominator of 0
            descendant_graded = descendant.graded if possible > 0 else False

            self.locations_to_scores[descendant.location] = Score(
                earned,
                possible,
                descendant_graded,
                block_metadata_utils.display_name_with_default_escaped(descendant),
                descendant.location,
            )

        self.all_total, self.graded_total = graders.aggregate_scores(
            self.scores, self.display_name,
        )
コード例 #17
0
ファイル: grades.py プロジェクト: BeiLuoShiMen/edx-platform
def _grade(student, request, course, keep_raw_scores):
    """
    Unwrapped version of "grade"

    This grades a student as quickly as possible. It returns the
    output from the course grader, augmented with the final letter
    grade. The keys in the output are:

    course: a CourseDescriptor

    - grade : A final letter grade.
    - percent : The final percent for the class (rounded up).
    - section_breakdown : A breakdown of each section that makes
      up the grade. (For display)
    - grade_breakdown : A breakdown of the major components that
      make up the final grade. (For display)
    - keep_raw_scores : if True, then value for key 'raw_scores' contains scores
      for every graded module

    More information on the format is in the docstring for CourseGrader.
    """
    grading_context = course.grading_context
    raw_scores = []

    totaled_scores = {}
    # This next complicated loop is just to collect the totaled_scores, which is
    # passed to the grader
    for section_format, sections in grading_context['graded_sections'].iteritems():
        format_scores = []
        for section in sections:
            section_descriptor = section['section_descriptor']
            section_name = section_descriptor.display_name_with_default

            # some problems have state that is updated independently of interaction
            # with the LMS, so they need to always be scored. (E.g. foldit.,
            # combinedopenended)
            should_grade_section = any(
                descriptor.always_recalculate_grades for descriptor in section['xmoduledescriptors']
            )

            # If we haven't seen a single problem in the section, we don't have to grade it at all! We can assume 0%
            if not should_grade_section:
                with manual_transaction():
                    should_grade_section = StudentModule.objects.filter(
                        student=student,
                        module_state_key__in=[
                            descriptor.location for descriptor in section['xmoduledescriptors']
                        ]
                    ).exists()

            if should_grade_section:
                scores = []

                def create_module(descriptor):
                    '''creates an XModule instance given a descriptor'''
                    # TODO: We need the request to pass into here. If we could forego that, our arguments
                    # would be simpler
                    with manual_transaction():
                        field_data_cache = FieldDataCache([descriptor], course.id, student)
                    return get_module_for_descriptor(student, request, descriptor, field_data_cache, course.id)

                for module_descriptor in yield_dynamic_descriptor_descendents(section_descriptor, create_module):

                    (correct, total) = get_score(course.id, student, module_descriptor, create_module)
                    if correct is None and total is None:
                        continue

                    if settings.GENERATE_PROFILE_SCORES:  	# for debugging!
                        if total > 1:
                            correct = random.randrange(max(total - 2, 1), total + 1)
                        else:
                            correct = total

                    graded = module_descriptor.graded
                    if not total > 0:
                        #We simply cannot grade a problem that is 12/0, because we might need it as a percentage
                        graded = False

                    scores.append(Score(correct, total, graded, module_descriptor.display_name_with_default))

                _, graded_total = graders.aggregate_scores(scores, section_name)
                if keep_raw_scores:
                    raw_scores += scores
            else:
                graded_total = Score(0.0, 1.0, True, section_name)

            #Add the graded total to totaled_scores
            if graded_total.possible > 0:
                format_scores.append(graded_total)
            else:
                log.exception("Unable to grade a section with a total possible score of zero. " +
                              str(section_descriptor.location))

        totaled_scores[section_format] = format_scores

    grade_summary = course.grader.grade(totaled_scores, generate_random_scores=settings.GENERATE_PROFILE_SCORES)

    # We round the grade here, to make sure that the grade is an whole percentage and
    # doesn't get displayed differently than it gets grades
    grade_summary['percent'] = round(grade_summary['percent'] * 100 + 0.05) / 100

    letter_grade = grade_for_percentage(course.grade_cutoffs, grade_summary['percent'])
    grade_summary['grade'] = letter_grade
    grade_summary['totaled_scores'] = totaled_scores  	# make this available, eg for instructor download & debugging
    if keep_raw_scores:
        grade_summary['raw_scores'] = raw_scores        # way to get all RAW scores out to instructor
                                                        # so grader can be double-checked
    return grade_summary
コード例 #18
0
def grade(student,
          request,
          course,
          field_data_cache=None,
          keep_raw_scores=False):
    """
    This grades a student as quickly as possible. It returns the
    output from the course grader, augmented with the final letter
    grade. The keys in the output are:

    course: a CourseDescriptor

    - grade : A final letter grade.
    - percent : The final percent for the class (rounded up).
    - section_breakdown : A breakdown of each section that makes
        up the grade. (For display)
    - grade_breakdown : A breakdown of the major components that
        make up the final grade. (For display)
    - keep_raw_scores : if True, then value for key 'raw_scores' contains scores for every graded module

    More information on the format is in the docstring for CourseGrader.
    """

    grading_context = course.grading_context
    raw_scores = []

    if field_data_cache is None:
        field_data_cache = FieldDataCache(grading_context['all_descriptors'],
                                          course.id, student)

    totaled_scores = {}
    # This next complicated loop is just to collect the totaled_scores, which is
    # passed to the grader
    for section_format, sections in grading_context[
            'graded_sections'].iteritems():
        format_scores = []
        for section in sections:
            section_descriptor = section['section_descriptor']
            section_name = section_descriptor.display_name_with_default

            should_grade_section = False
            # If we haven't seen a single problem in the section, we don't have to grade it at all! We can assume 0%
            for moduledescriptor in section['xmoduledescriptors']:
                # some problems have state that is updated independently of interaction
                # with the LMS, so they need to always be scored. (E.g. foldit.)
                if moduledescriptor.always_recalculate_grades:
                    should_grade_section = True
                    break

                # Create a fake key to pull out a StudentModule object from the FieldDataCache

                key = DjangoKeyValueStore.Key(Scope.user_state, student.id,
                                              moduledescriptor.location, None)
                if field_data_cache.find(key):
                    should_grade_section = True
                    break

            if should_grade_section:
                scores = []

                def create_module(descriptor):
                    '''creates an XModule instance given a descriptor'''
                    # TODO: We need the request to pass into here. If we could forego that, our arguments
                    # would be simpler
                    return get_module_for_descriptor(student, request,
                                                     descriptor,
                                                     field_data_cache,
                                                     course.id)

                for module_descriptor in yield_dynamic_descriptor_descendents(
                        section_descriptor, create_module):

                    (correct,
                     total) = get_score(course.id, student, module_descriptor,
                                        create_module, field_data_cache)
                    if correct is None and total is None:
                        continue

                    if settings.GENERATE_PROFILE_SCORES:  # for debugging!
                        if total > 1:
                            correct = random.randrange(max(total - 2, 1),
                                                       total + 1)
                        else:
                            correct = total

                    graded = module_descriptor.graded
                    if not total > 0:
                        #We simply cannot grade a problem that is 12/0, because we might need it as a percentage
                        graded = False

                    scores.append(
                        Score(correct, total, graded,
                              module_descriptor.display_name_with_default))

                _, graded_total = graders.aggregate_scores(
                    scores, section_name)
                if keep_raw_scores:
                    raw_scores += scores
            else:
                graded_total = Score(0.0, 1.0, True, section_name)

            #Add the graded total to totaled_scores
            if graded_total.possible > 0:
                format_scores.append(graded_total)
            else:
                log.exception(
                    "Unable to grade a section with a total possible score of zero. "
                    + str(section_descriptor.location))

        totaled_scores[section_format] = format_scores

    grade_summary = course.grader.grade(
        totaled_scores,
        generate_random_scores=settings.GENERATE_PROFILE_SCORES)

    # We round the grade here, to make sure that the grade is an whole percentage and
    # doesn't get displayed differently than it gets grades
    grade_summary['percent'] = round(grade_summary['percent'] * 100 +
                                     0.05) / 100

    letter_grade = grade_for_percentage(course.grade_cutoffs,
                                        grade_summary['percent'])
    grade_summary['grade'] = letter_grade
    grade_summary[
        'totaled_scores'] = totaled_scores  # make this available, eg for instructor download & debugging
    if keep_raw_scores:
        grade_summary[
            'raw_scores'] = raw_scores  # way to get all RAW scores out to instructor
        # so grader can be double-checked
    return grade_summary
コード例 #19
0
def progress_summary(student, request, course, field_data_cache):
    """
    This pulls a summary of all problems in the course.

    Returns
    - courseware_summary is a summary of all sections with problems in the course.
    It is organized as an array of chapters, each containing an array of sections,
    each containing an array of scores. This contains information for graded and
    ungraded problems, and is good for displaying a course summary with due dates,
    etc.

    Arguments:
        student: A User object for the student to grade
        course: A Descriptor containing the course to grade
        field_data_cache: A FieldDataCache initialized with all
             instance_modules for the student

    If the student does not have access to load the course module, this function
    will return None.

    """

    # TODO: We need the request to pass into here. If we could forego that, our arguments
    # would be simpler
    course_module = get_module(student,
                               request,
                               course.location,
                               field_data_cache,
                               course.id,
                               depth=None)
    if not course_module:
        # This student must not have access to the course.
        return None

    chapters = []
    # Don't include chapters that aren't displayable (e.g. due to error)
    for chapter_module in course_module.get_display_items():
        # Skip if the chapter is hidden
        if chapter_module.hide_from_toc:
            continue

        sections = []
        for section_module in chapter_module.get_display_items():
            # Skip if the section is hidden
            if section_module.hide_from_toc:
                continue

            # Same for sections
            graded = section_module.graded
            scores = []

            module_creator = section_module.system.get_module

            for module_descriptor in yield_dynamic_descriptor_descendents(
                    section_module.descriptor, module_creator):

                course_id = course.id
                (correct, total) = get_score(course_id, student,
                                             module_descriptor, module_creator,
                                             field_data_cache)
                if correct is None and total is None:
                    continue

                scores.append(
                    Score(correct, total, graded,
                          module_descriptor.display_name_with_default))

            scores.reverse()
            section_total, _ = graders.aggregate_scores(
                scores, section_module.display_name_with_default)

            module_format = section_module.format if section_module.format is not None else ''
            sections.append({
                'display_name': section_module.display_name_with_default,
                'url_name': section_module.url_name,
                'scores': scores,
                'section_total': section_total,
                'format': module_format,
                'due': section_module.due,
                'graded': graded,
            })

        chapters.append({
            'course': course.display_name_with_default,
            'display_name': chapter_module.display_name_with_default,
            'url_name': chapter_module.url_name,
            'sections': sections
        })

    return chapters
コード例 #20
0
def _progress_summary(student, request, course):
    """
    Unwrapped version of "progress_summary".

    This pulls a summary of all problems in the course.

    Returns
    - courseware_summary is a summary of all sections with problems in the course.
    It is organized as an array of chapters, each containing an array of sections,
    each containing an array of scores. This contains information for graded and
    ungraded problems, and is good for displaying a course summary with due dates,
    etc.

    Arguments:
        student: A User object for the student to grade
        course: A Descriptor containing the course to grade

    If the student does not have access to load the course module, this function
    will return None.

    """
    with manual_transaction():
        field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
            course.id, student, course, depth=None
        )
        # TODO: We need the request to pass into here. If we could
        # forego that, our arguments would be simpler
        course_module = get_module_for_descriptor(student, request, course, field_data_cache, course.id)
        if not course_module:
            # This student must not have access to the course.
            return None

        course_module = getattr(course_module, '_x_module', course_module)

    submissions_scores = sub_api.get_scores(course.id.to_deprecated_string(), anonymous_id_for_user(student, course.id))

    chapters = []
    # Don't include chapters that aren't displayable (e.g. due to error)
    for chapter_module in course_module.get_display_items():
        # Skip if the chapter is hidden
        if chapter_module.hide_from_toc:
            continue

        sections = []

        for section_module in chapter_module.get_display_items():
            # Skip if the section is hidden
            with manual_transaction():
                if section_module.hide_from_toc:
                    continue

                graded = section_module.graded
                scores = []

                module_creator = section_module.xmodule_runtime.get_module

                for module_descriptor in yield_dynamic_descriptor_descendents(section_module, module_creator):
                    course_id = course.id
                    (correct, total) = get_score(
                        course_id, student, module_descriptor, module_creator, scores_cache=submissions_scores
                    )
                    if correct is None and total is None:
                        continue

                    scores.append(
                        Score(
                            correct,
                            total,
                            graded,
                            module_descriptor.display_name_with_default,
                            module_descriptor.location
                        )
                    )

                scores.reverse()
                section_total, _ = graders.aggregate_scores(
                    scores, section_module.display_name_with_default)

                module_format = section_module.format if section_module.format is not None else ''
                sections.append({
                    'display_name': section_module.display_name_with_default,
                    'url_name': section_module.url_name,
                    'scores': scores,
                    'section_total': section_total,
                    'format': module_format,
                    'due': section_module.due,
                    'graded': graded,
                })

        chapters.append({
            'course': course.display_name_with_default,
            'display_name': chapter_module.display_name_with_default,
            'url_name': chapter_module.url_name,
            'sections': sections
        })

    return chapters
コード例 #21
0
def progress_summary(student, request, course, model_data_cache):
    """
    This pulls a summary of all problems in the course.

    Returns
    - courseware_summary is a summary of all sections with problems in the course.
    It is organized as an array of chapters, each containing an array of sections,
    each containing an array of scores. This contains information for graded and
    ungraded problems, and is good for displaying a course summary with due dates,
    etc.

    Arguments:
        student: A User object for the student to grade
        course: A Descriptor containing the course to grade
        model_data_cache: A ModelDataCache initialized with all
             instance_modules for the student

    If the student does not have access to load the course module, this function
    will return None.

    """

    # TODO: We need the request to pass into here. If we could forego that, our arguments
    # would be simpler
    course_module = get_module(student, request, course.location, model_data_cache, course.id, depth=None)
    if not course_module:
        # This student must not have access to the course.
        return None

    chapters = []
    # Don't include chapters that aren't displayable (e.g. due to error)
    for chapter_module in course_module.get_display_items():
        # Skip if the chapter is hidden
        if chapter_module.lms.hide_from_toc:
            continue

        sections = []
        for section_module in chapter_module.get_display_items():

            # Skip if the section is hidden
            if section_module.lms.hide_from_toc:
                continue

            graded = section_module.lms.graded
            scores = []

            module_creator = section_module.system.get_module

            for module_descriptor in yield_dynamic_descriptor_descendents(section_module.descriptor, module_creator):

                course_id = course.id
                (correct, total) = get_score(course_id, student, module_descriptor, module_creator, model_data_cache)

                if correct is None and total is None:
                    continue

                scores.append(Score(correct, total, graded, module_descriptor.display_name_with_default))



            scores.reverse()
            section_total, _ = graders.aggregate_scores(
                scores, section_module.display_name_with_default)

            module_format = section_module.lms.format if section_module.lms.format is not None else ''
            sections.append({
                'display_name': section_module.display_name_with_default,
                'url_name': section_module.url_name,
                'scores': scores,
                'unlock_term': section_module.unlock_term,
	            'section_total': section_total,
                'format': module_format,
                'unlocked': True,
                'due': section_module.lms.due,
                'graded': graded,
            })

        chapters.append({'course': course.display_name_with_default,
                         'display_name': chapter_module.display_name_with_default,
                         'url_name': chapter_module.url_name,
                         'sections': sections})
    chapters = set_locks(chapters)
    return chapters
コード例 #22
0
ファイル: grades.py プロジェクト: singhularity/edx-platform
def _progress_summary(student, request, course, field_data_cache=None, scores_client=None):
    """
    Unwrapped version of "progress_summary".

    This pulls a summary of all problems in the course.

    Returns
    - courseware_summary is a summary of all sections with problems in the course.
    It is organized as an array of chapters, each containing an array of sections,
    each containing an array of scores. This contains information for graded and
    ungraded problems, and is good for displaying a course summary with due dates,
    etc.

    Arguments:
        student: A User object for the student to grade
        course: A Descriptor containing the course to grade

    If the student does not have access to load the course module, this function
    will return None.

    """
    with manual_transaction():
        if field_data_cache is None:
            field_data_cache = field_data_cache_for_grading(course, student)
        if scores_client is None:
            scores_client = ScoresClient.from_field_data_cache(field_data_cache)

        course_module = get_module_for_descriptor(
            student, request, course, field_data_cache, course.id, course=course
        )
        if not course_module:
            return None

        course_module = getattr(course_module, '_x_module', course_module)

    submissions_scores = sub_api.get_scores(course.id.to_deprecated_string(), anonymous_id_for_user(student, course.id))
    max_scores_cache = MaxScoresCache.create_for_course(course)
    # For the moment, we have to get scorable_locations from field_data_cache
    # and not from scores_client, because scores_client is ignorant of things
    # in the submissions API. As a further refactoring step, submissions should
    # be hidden behind the ScoresClient.
    max_scores_cache.fetch_from_remote(field_data_cache.scorable_locations)

    chapters = []
    locations_to_children = defaultdict(list)
    locations_to_weighted_scores = {}
    # Don't include chapters that aren't displayable (e.g. due to error)
    for chapter_module in course_module.get_display_items():
        # Skip if the chapter is hidden
        if chapter_module.hide_from_toc:
            continue

        sections = []
        for section_module in chapter_module.get_display_items():
            # Skip if the section is hidden
            with manual_transaction():
                if section_module.hide_from_toc:
                    continue

                graded = section_module.graded
                scores = []

                module_creator = section_module.xmodule_runtime.get_module

                for module_descriptor in yield_dynamic_descriptor_descendants(
                        section_module, student.id, module_creator
                ):
                    locations_to_children[module_descriptor.parent].append(module_descriptor.location)
                    (correct, total) = get_score(
                        student,
                        module_descriptor,
                        module_creator,
                        scores_client,
                        submissions_scores,
                        max_scores_cache,
                    )
                    if correct is None and total is None:
                        continue

                    weighted_location_score = Score(
                        correct,
                        total,
                        graded,
                        module_descriptor.display_name_with_default,
                        module_descriptor.location
                    )

                    scores.append(weighted_location_score)
                    locations_to_weighted_scores[module_descriptor.location] = weighted_location_score

                scores.reverse()
                section_total, _ = graders.aggregate_scores(
                    scores, section_module.display_name_with_default)

                module_format = section_module.format if section_module.format is not None else ''
                sections.append({
                    'display_name': section_module.display_name_with_default,
                    'url_name': section_module.url_name,
                    'scores': scores,
                    'section_total': section_total,
                    'format': module_format,
                    'due': section_module.due,
                    'graded': graded,
                })

        chapters.append({
            'course': course.display_name_with_default,
            'display_name': chapter_module.display_name_with_default,
            'url_name': chapter_module.url_name,
            'sections': sections
        })

    max_scores_cache.push_to_remote()

    return ProgressSummary(chapters, locations_to_weighted_scores, locations_to_children)
コード例 #23
0
def get_grades(course,student):
	
	
	field_data_cache = field_data_cache_for_grading(course, student)
	scores_client = ScoresClient.from_field_data_cache(field_data_cache)

	grading_context = course.grading_context
    	raw_scores = []
	keep_raw_scores = None

   	 # Dict of item_ids -> (earned, possible) point tuples. This *only* grabs
    	 # scores that were registered with the submissions API, which for the moment
    	 # means only openassessment (edx-ora2)
    	submissions_scores = sub_api.get_scores(
            course.id.to_deprecated_string(),
            anonymous_id_for_user(student, course.id)
        )
        max_scores_cache = MaxScoresCache.create_for_course(course)


	## Fix for grading certificate subsection issue 
	tem_set = set()
        for set_item in field_data_cache.scorable_locations:
                set_item = set_item.version_agnostic()
                set_item = set_item.replace(branch=None)
                tem_set.update([set_item])
        field_data_cache.scorable_locations = tem_set

        # For the moment, we have to get scorable_locations from field_data_cache
        # and not from scores_client, because scores_client is ignorant of things
        # in the submissions API. As a further refactoring step, submissions should
        # be hidden behind the ScoresClient.
	max_scores_cache.fetch_from_remote(field_data_cache.scorable_locations)

    	totaled_scores = {}
	graded_total = []
    	# This next complicated loop is just to collect the totaled_scores, which is
    	# passed to the grader
    	for section_format, sections in grading_context['graded_sections'].iteritems():
       		format_scores = []
        	for section in sections:
            		section_descriptor = section['section_descriptor']
            		section_name = section_descriptor.display_name_with_default

            		# some problems have state that is updated independently of interaction
            		# with the LMS, so they need to always be scored. (E.g. foldit.,
            		# combinedopenended)
            		should_grade_section = any(
                		descriptor.always_recalculate_grades for descriptor in section['xmoduledescriptors']
            		)

            		# If there are no problems that always have to be regraded, check to
            		# see if any of our locations are in the scores from the submissions
            		# API. If scores exist, we have to calculate grades for this section.
            		if not should_grade_section:
                		should_grade_section = any(
                    			descriptor.location.to_deprecated_string() in submissions_scores
                    			for descriptor in section['xmoduledescriptors']
                		)

			## Fix for grading certificate subsection issue 
			for descriptor in section['xmoduledescriptors']:
                                descriptor.location = descriptor.location.version_agnostic()
                                descriptor.location =  descriptor.location.replace(branch=None)



            		if not should_grade_section:
                		
				should_grade_section = any(
                        					descriptor.location in scores_client
                        					for descriptor in section['xmoduledescriptors']
							)
            		# If we haven't seen a single problem in the section, we don't have
            		# to grade it at all! We can assume 0%
            		if should_grade_section:
                		scores = []

				try:
                                        create_module = section_descriptor.xmodule_runtime.get_module
                                except :

                                        def create_module(descriptor):
                                                '''creates an XModule instance given a descriptor'''
                                                #TODO: We need the request to pass into here. If we could forego that, our arguments
                                                # would be simpler

                                                pass

                		for module_descriptor in yield_dynamic_descriptor_descendants(section_descriptor, student.id, create_module):
					(correct, total) = get_score(
                            					student,
                            					module_descriptor,
                            					create_module,
                            					scores_client,
                            					submissions_scores,
                            					max_scores_cache,
							   )
					print "total==============",total
					print "correct================",correct
                    			if correct is None and total is None:
                        			continue

                        		if settings.GENERATE_PROFILE_SCORES:  	# for debugging!
                        			if total > 1:
                            				correct = random.randrange(max(total - 2, 1), total + 1)
                       	        		else:
                            				correct = total

                    			graded = module_descriptor.graded
                    			if not total > 0:
                        		#We simply cannot grade a problem that is 12/0, because we might need it as a percentage
                        			graded = False

                    			#scores.append(Score(correct, total, graded, module_descriptor.display_name_with_default))
					scores.append(Score(correct,total,graded,module_descriptor.display_name_with_default,module_descriptor.location))

                			_, graded_total = graders.aggregate_scores(scores, section_name)
                			if keep_raw_scores:
                    				raw_scores += scores
            		else:
            			graded_total = Score(0.0, 1.0, True, section_name,None)

            			#Add the graded total to totaled_scores
			if graded_total:
            			if graded_total.possible > 0:
                			format_scores.append(graded_total)
            			else:
                			log.info("Unable to grade a section with a total possible score of zero. " +
                              		str(section_descriptor.location))
		totaled_scores[section_format] = format_scores

    	grade_summary = course.grader.grade(totaled_scores, generate_random_scores=settings.GENERATE_PROFILE_SCORES)

    	# We round the grade here, to make sure that the grade is an whole percentage and
    	# doesn't get displayed differently than it gets grades
    	grade_summary['percent'] = round(grade_summary['percent'] * 100 + 0.05) / 100

    	letter_grade = grade_for_percentage(course.grade_cutoffs, grade_summary['percent'])
    	grade_summary['grade'] = letter_grade
    	grade_summary['totaled_scores'] = totaled_scores  	# make this available, eg for instructor download & debugging
    	if keep_raw_scores:
        	grade_summary['raw_scores'] = raw_scores        # way to get all RAW scores out to instructor
        
	max_scores_cache.push_to_remote()                                                # so grader can be double-checked
	return grade_summary
コード例 #24
0
    def progress_summary(student,
                         request,
                         course,
                         field_data_cache=None,
                         scores_client=None,
                         grading_type='vertical'):
        """
        This pulls a summary of all problems in the course.

        Returns
        - courseware_summary is a summary of all sections with problems in the course.
        It is organized as an array of chapters, each containing an array of sections,
        each containing an array of scores. This contains information for graded and
        ungraded problems, and is good for displaying a course summary with due dates,
        etc.

        Arguments:
            student: A User object for the student to grade
            course: A Descriptor containing the course to grade

        If the student does not have access to load the course module, this function
        will return None.

        """

        with manual_transaction():
            if field_data_cache is None:
                field_data_cache = field_data_cache_for_grading(
                    course, student)
            if scores_client is None:
                scores_client = ScoresClient.from_field_data_cache(
                    field_data_cache)

            course_module = get_module_for_descriptor(student,
                                                      request,
                                                      course,
                                                      field_data_cache,
                                                      course.id,
                                                      course=course)
            if not course_module:
                return None

            course_module = getattr(course_module, '_x_module', course_module)

        submissions_scores = sub_api.get_scores(
            course.id.to_deprecated_string(),
            anonymous_id_for_user(student, course.id))
        max_scores_cache = MaxScoresCache.create_for_course(course)
        # For the moment, we have to get scorable_locations from field_data_cache
        # and not from scores_client, because scores_client is ignorant of things
        # in the submissions API. As a further refactoring step, submissions should
        # be hidden behind the ScoresClient.
        max_scores_cache.fetch_from_remote(field_data_cache.scorable_locations)

        blocks_stack = [course_module]
        blocks_dict = {}

        while blocks_stack:
            curr_block = blocks_stack.pop()
            with manual_transaction():
                # Skip if the block is hidden
                if curr_block.hide_from_toc:
                    continue

                key = unicode(curr_block.scope_ids.usage_id)
                children = curr_block.get_display_items(
                ) if curr_block.category != grading_type else []
                block = {
                    'display_name':
                    curr_block.display_name_with_default,
                    'block_type':
                    curr_block.category,
                    'url_name':
                    curr_block.url_name,
                    'children':
                    [unicode(child.scope_ids.usage_id) for child in children],
                }

                if curr_block.category == grading_type:
                    graded = curr_block.graded
                    scores = []

                    module_creator = curr_block.xmodule_runtime.get_module
                    for module_descriptor in yield_dynamic_descriptor_descendants(
                            curr_block, student.id, module_creator):
                        (correct, total) = get_score(
                            student,
                            module_descriptor,
                            module_creator,
                            scores_client,
                            submissions_scores,
                            max_scores_cache,
                        )

                        if correct is None and total is None:
                            continue

                        scores.append(
                            Score(correct, total, graded,
                                  module_descriptor.display_name_with_default,
                                  module_descriptor.location))

                    scores.reverse()
                    total, _ = aggregate_scores(
                        scores, curr_block.display_name_with_default)

                    module_format = curr_block.format if curr_block.format is not None else ''
                    block.update({
                        'scores': scores,
                        'total': total,
                        'format': module_format,
                        'due': curr_block.due,
                        'graded': graded,
                    })

                blocks_dict[key] = block
                # Add this blocks children to the stack so that we can traverse them as well.
                blocks_stack.extend(children)

        max_scores_cache.push_to_remote()

        return {
            'root': unicode(course.scope_ids.usage_id),
            'blocks': blocks_dict,
        }
コード例 #25
0
ファイル: grades.py プロジェクト: marcore/edx-platform
def _calculate_totaled_scores(
        student,
        grading_context_result,
        submissions_scores,
        scores_client,
        keep_raw_scores,
):
    """
    Returns the totaled scores, which can be passed to the grader.
    """
    raw_scores = []
    totaled_scores = {}
    for section_format, sections in grading_context_result['all_graded_sections'].iteritems():
        format_scores = []
        for section_info in sections:
            section = section_info['section_block']
            section_name = block_metadata_utils.display_name_with_default(section)

            with outer_atomic():
                # Check to
                # see if any of our locations are in the scores from the submissions
                # API. If scores exist, we have to calculate grades for this section.
                should_grade_section = any(
                    unicode(descendant.location) in submissions_scores
                    for descendant in section_info['scored_descendants']
                )

                if not should_grade_section:
                    should_grade_section = any(
                        descendant.location in scores_client
                        for descendant in section_info['scored_descendants']
                    )

                # If we haven't seen a single problem in the section, we don't have
                # to grade it at all! We can assume 0%
                if should_grade_section:
                    scores = []

                    for descendant in section_info['scored_descendants']:

                        (correct, total) = get_score(
                            student,
                            descendant,
                            scores_client,
                            submissions_scores,
                        )
                        if correct is None and total is None:
                            continue

                        if settings.GENERATE_PROFILE_SCORES:  # for debugging!
                            if total > 1:
                                correct = random.randrange(max(total - 2, 1), total + 1)
                            else:
                                correct = total

                        graded = descendant.graded
                        if not total > 0:
                            # We simply cannot grade a problem that is 12/0, because we might need it as a percentage
                            graded = False

                        scores.append(
                            Score(
                                correct,
                                total,
                                graded,
                                block_metadata_utils.display_name_with_default_escaped(descendant),
                                descendant.location
                            )
                        )

                    __, graded_total = graders.aggregate_scores(scores, section_name)
                    if keep_raw_scores:
                        raw_scores += scores
                else:
                    graded_total = Score(0.0, 1.0, True, section_name, None)

                # Add the graded total to totaled_scores
                if graded_total.possible > 0:
                    format_scores.append(graded_total)
                else:
                    log.info(
                        "Unable to grade a section with a total possible score of zero. " +
                        str(section.location)
                    )

        totaled_scores[section_format] = format_scores

    return totaled_scores, raw_scores
コード例 #26
0
ファイル: test_graders.py プロジェクト: yewtzeee/edx-platform
    def test_weighted_grading(self):
        scores = []
        agg_fields = dict(display_name="aggregated_score", module_id=None)
        prob_fields = dict(display_name="problem_score",
                           module_id=None,
                           raw_earned=0,
                           raw_possible=0,
                           weight=0)

        all_total, graded_total = aggregate_scores(
            scores, display_name=agg_fields['display_name'])
        self.assertEqual(
            all_total,
            AggregatedScore(tw_earned=0,
                            tw_possible=0,
                            graded=False,
                            **agg_fields),
        )
        self.assertEqual(
            graded_total,
            AggregatedScore(tw_earned=0,
                            tw_possible=0,
                            graded=True,
                            **agg_fields),
        )

        scores.append(
            ProblemScore(weighted_earned=0,
                         weighted_possible=5,
                         graded=False,
                         **prob_fields))
        all_total, graded_total = aggregate_scores(
            scores, display_name=agg_fields['display_name'])
        self.assertEqual(
            all_total,
            AggregatedScore(tw_earned=0,
                            tw_possible=5,
                            graded=False,
                            **agg_fields),
        )
        self.assertEqual(
            graded_total,
            AggregatedScore(tw_earned=0,
                            tw_possible=0,
                            graded=True,
                            **agg_fields),
        )

        scores.append(
            ProblemScore(weighted_earned=3,
                         weighted_possible=5,
                         graded=True,
                         **prob_fields))
        all_total, graded_total = aggregate_scores(
            scores, display_name=agg_fields['display_name'])
        self.assertAlmostEqual(
            all_total,
            AggregatedScore(tw_earned=3,
                            tw_possible=10,
                            graded=False,
                            **agg_fields),
        )
        self.assertAlmostEqual(
            graded_total,
            AggregatedScore(tw_earned=3,
                            tw_possible=5,
                            graded=True,
                            **agg_fields),
        )

        scores.append(
            ProblemScore(weighted_earned=2,
                         weighted_possible=5,
                         graded=True,
                         **prob_fields))
        all_total, graded_total = aggregate_scores(
            scores, display_name=agg_fields['display_name'])
        self.assertAlmostEqual(
            all_total,
            AggregatedScore(tw_earned=5,
                            tw_possible=15,
                            graded=False,
                            **agg_fields),
        )
        self.assertAlmostEqual(
            graded_total,
            AggregatedScore(tw_earned=5,
                            tw_possible=10,
                            graded=True,
                            **agg_fields),
        )
コード例 #27
0
ファイル: test_graders.py プロジェクト: pjki100/edx-platform
    def test_weighted_grading(self):
        scores = []
        agg_fields = dict(display_name="aggregated_score",
                          module_id=None,
                          attempted=False)
        prob_fields = dict(
            display_name="problem_score",
            module_id=None,
            raw_earned=0,
            raw_possible=0,
            weight=0,
            attempted=False,
        )

        # No scores
        all_total, graded_total = aggregate_scores(
            scores, display_name=agg_fields['display_name'])
        self.assertEqual(
            all_total,
            AggregatedScore(tw_earned=0,
                            tw_possible=0,
                            graded=False,
                            **agg_fields),
        )
        self.assertEqual(
            graded_total,
            AggregatedScore(tw_earned=0,
                            tw_possible=0,
                            graded=True,
                            **agg_fields),
        )

        # (0/5 non-graded)
        scores.append(
            ProblemScore(weighted_earned=0,
                         weighted_possible=5,
                         graded=False,
                         **prob_fields))
        all_total, graded_total = aggregate_scores(
            scores, display_name=agg_fields['display_name'])
        self.assertEqual(
            all_total,
            AggregatedScore(tw_earned=0,
                            tw_possible=5,
                            graded=False,
                            **agg_fields),
        )
        self.assertEqual(
            graded_total,
            AggregatedScore(tw_earned=0,
                            tw_possible=0,
                            graded=True,
                            **agg_fields),
        )

        # (0/5 non-graded) + (3/5 graded) = 3/10 total, 3/5 graded
        prob_fields['attempted'] = True
        agg_fields['attempted'] = True
        scores.append(
            ProblemScore(weighted_earned=3,
                         weighted_possible=5,
                         graded=True,
                         **prob_fields))
        all_total, graded_total = aggregate_scores(
            scores, display_name=agg_fields['display_name'])
        self.assertAlmostEqual(
            all_total,
            AggregatedScore(tw_earned=3,
                            tw_possible=10,
                            graded=False,
                            **agg_fields),
        )
        self.assertAlmostEqual(
            graded_total,
            AggregatedScore(tw_earned=3,
                            tw_possible=5,
                            graded=True,
                            **agg_fields),
        )

        # (0/5 non-graded) + (3/5 graded) + (2/5 graded) = 5/15 total, 5/10 graded
        scores.append(
            ProblemScore(weighted_earned=2,
                         weighted_possible=5,
                         graded=True,
                         **prob_fields))
        all_total, graded_total = aggregate_scores(
            scores, display_name=agg_fields['display_name'])
        self.assertAlmostEqual(
            all_total,
            AggregatedScore(tw_earned=5,
                            tw_possible=15,
                            graded=False,
                            **agg_fields),
        )
        self.assertAlmostEqual(
            graded_total,
            AggregatedScore(tw_earned=5,
                            tw_possible=10,
                            graded=True,
                            **agg_fields),
        )
コード例 #28
0
    def progress_summary(student, request, course, field_data_cache=None, scores_client=None, grading_type='vertical'):
        """
        This pulls a summary of all problems in the course.

        Returns
        - courseware_summary is a summary of all sections with problems in the course.
        It is organized as an array of chapters, each containing an array of sections,
        each containing an array of scores. This contains information for graded and
        ungraded problems, and is good for displaying a course summary with due dates,
        etc.

        Arguments:
            student: A User object for the student to grade
            course: A Descriptor containing the course to grade

        If the student does not have access to load the course module, this function
        will return None.

        """

        with manual_transaction():
            if field_data_cache is None:
                field_data_cache = field_data_cache_for_grading(course, student)
            if scores_client is None:
                scores_client = ScoresClient.from_field_data_cache(field_data_cache)

            course_module = get_module_for_descriptor(
                student, request, course, field_data_cache, course.id, course=course
            )
            if not course_module:
                return None

            course_module = getattr(course_module, '_x_module', course_module)

        submissions_scores = sub_api.get_scores(
            course.id.to_deprecated_string(), anonymous_id_for_user(student, course.id)
        )
        max_scores_cache = MaxScoresCache.create_for_course(course)
        # For the moment, we have to get scorable_locations from field_data_cache
        # and not from scores_client, because scores_client is ignorant of things
        # in the submissions API. As a further refactoring step, submissions should
        # be hidden behind the ScoresClient.
        max_scores_cache.fetch_from_remote(field_data_cache.scorable_locations)

        blocks_stack = [course_module]
        blocks_dict = {}

        while blocks_stack:
            curr_block = blocks_stack.pop()
            with manual_transaction():
                # Skip if the block is hidden
                if curr_block.hide_from_toc:
                    continue

                key = unicode(curr_block.scope_ids.usage_id)
                children = curr_block.get_display_items() if curr_block.category != grading_type else []
                block = {
                    'display_name': curr_block.display_name_with_default,
                    'block_type': curr_block.category,
                    'url_name': curr_block.url_name,
                    'children': [unicode(child.scope_ids.usage_id) for child in children],
                }

                if curr_block.category == grading_type:
                    graded = curr_block.graded
                    scores = []

                    module_creator = curr_block.xmodule_runtime.get_module
                    for module_descriptor in yield_dynamic_descriptor_descendants(
                            curr_block, student.id, module_creator
                    ):
                        (correct, total) = get_score(
                            student,
                            module_descriptor,
                            module_creator,
                            scores_client,
                            submissions_scores,
                            max_scores_cache,
                        )

                        if correct is None and total is None:
                            continue

                        scores.append(
                            Score(
                                correct,
                                total,
                                graded,
                                module_descriptor.display_name_with_default,
                                module_descriptor.location
                            )
                        )

                    scores.reverse()
                    total, _ = aggregate_scores(scores, curr_block.display_name_with_default)

                    module_format = curr_block.format if curr_block.format is not None else ''
                    block.update({
                        'scores': scores,
                        'total': total,
                        'format': module_format,
                        'due': curr_block.due,
                        'graded': graded,
                    })

                blocks_dict[key] = block
                # Add this blocks children to the stack so that we can traverse them as well.
                blocks_stack.extend(children)

        max_scores_cache.push_to_remote()

        return {
            'root': unicode(course.scope_ids.usage_id),
            'blocks': blocks_dict,
        }
コード例 #29
0
 def _aggregate_scores(self):
     return graders.aggregate_scores(self.problem_scores.values())
コード例 #30
0
def summary(student, course, course_structure=None):
    """
    This pulls a summary of all problems in the course.

    Returns
    - courseware_summary is a summary of all sections with problems in the course.
    It is organized as an array of chapters, each containing an array of sections,
    each containing an array of scores. This contains information for graded and
    ungraded problems, and is good for displaying a course summary with due dates,
    etc.
    - None if the student does not have access to load the course module.

    Arguments:
        student: A User object for the student to grade
        course: A Descriptor containing the course to grade

    """
    if course_structure is None:
        course_structure = get_course_blocks(student, course.location)
    if not len(course_structure):
        return ProgressSummary()
    scorable_locations = [
        block_key for block_key in course_structure
        if possibly_scored(block_key)
    ]

    with outer_atomic():
        scores_client = ScoresClient.create_for_locations(
            course.id, student.id, scorable_locations)

    # We need to import this here to avoid a circular dependency of the form:
    # XBlock --> submissions --> Django Rest Framework error strings -->
    # Django translation --> ... --> courseware --> submissions
    from submissions import api as sub_api  # installed from the edx-submissions repository
    with outer_atomic():
        submissions_scores = sub_api.get_scores(
            unicode(course.id), anonymous_id_for_user(student, course.id))

    # Check for gated content
    gated_content = gating_api.get_gated_content(course, student)

    chapters = []
    locations_to_weighted_scores = {}

    for chapter_key in course_structure.get_children(
            course_structure.root_block_usage_key):
        chapter = course_structure[chapter_key]
        sections = []
        for section_key in course_structure.get_children(chapter_key):
            if unicode(section_key) in gated_content:
                continue

            section = course_structure[section_key]

            graded = getattr(section, 'graded', False)
            scores = []

            for descendant_key in course_structure.post_order_traversal(
                    filter_func=possibly_scored,
                    start_node=section_key,
            ):
                descendant = course_structure[descendant_key]

                (correct, total) = get_score(
                    student,
                    descendant,
                    scores_client,
                    submissions_scores,
                )
                if correct is None and total is None:
                    continue

                weighted_location_score = Score(
                    correct, total, graded,
                    block_metadata_utils.display_name_with_default_escaped(
                        descendant), descendant.location)

                scores.append(weighted_location_score)
                locations_to_weighted_scores[
                    descendant.location] = weighted_location_score

            escaped_section_name = block_metadata_utils.display_name_with_default_escaped(
                section)
            section_total, _ = graders.aggregate_scores(
                scores, escaped_section_name)

            sections.append({
                'display_name':
                escaped_section_name,
                'url_name':
                block_metadata_utils.url_name_for_block(section),
                'scores':
                scores,
                'section_total':
                section_total,
                'format':
                getattr(section, 'format', ''),
                'due':
                getattr(section, 'due', None),
                'graded':
                graded,
            })

        chapters.append({
            'course':
            course.display_name_with_default_escaped,
            'display_name':
            block_metadata_utils.display_name_with_default_escaped(chapter),
            'url_name':
            block_metadata_utils.url_name_for_block(chapter),
            'sections':
            sections
        })

    return ProgressSummary(chapters, locations_to_weighted_scores,
                           course_structure.get_children)
コード例 #31
0
ファイル: grades.py プロジェクト: BeiLuoShiMen/edx-platform
def _progress_summary(student, request, course):
    """
    Unwrapped version of "progress_summary".

    This pulls a summary of all problems in the course.

    Returns
    - courseware_summary is a summary of all sections with problems in the course.
    It is organized as an array of chapters, each containing an array of sections,
    each containing an array of scores. This contains information for graded and
    ungraded problems, and is good for displaying a course summary with due dates,
    etc.

    Arguments:
        student: A User object for the student to grade
        course: A Descriptor containing the course to grade

    If the student does not have access to load the course module, this function
    will return None.

    """
    with manual_transaction():
        field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
            course.id, student, course, depth=None
        )
        # TODO: We need the request to pass into here. If we could
        # forego that, our arguments would be simpler
        course_module = get_module_for_descriptor(student, request, course, field_data_cache, course.id)
        if not course_module:
            # This student must not have access to the course.
            return None

    chapters = []
    # Don't include chapters that aren't displayable (e.g. due to error)
    for chapter_module in course_module.get_display_items():
        # Skip if the chapter is hidden
        if chapter_module.hide_from_toc:
            continue

        sections = []

        for section_module in chapter_module.get_display_items():
            # Skip if the section is hidden
            with manual_transaction():
                if section_module.hide_from_toc:
                    continue

                graded = section_module.graded
                scores = []

                module_creator = section_module.xmodule_runtime.get_module

                for module_descriptor in yield_dynamic_descriptor_descendents(section_module, module_creator):
                    course_id = course.id
                    (correct, total) = get_score(course_id, student, module_descriptor, module_creator)
                    if correct is None and total is None:
                        continue

                    scores.append(Score(correct, total, graded, module_descriptor.display_name_with_default))

                scores.reverse()
                section_total, _ = graders.aggregate_scores(
                    scores, section_module.display_name_with_default)

                module_format = section_module.format if section_module.format is not None else ''
                sections.append({
                    'display_name': section_module.display_name_with_default,
                    'url_name': section_module.url_name,
                    'scores': scores,
                    'section_total': section_total,
                    'format': module_format,
                    'due': get_extended_due_date(section_module),
                    'graded': graded,
                })

        chapters.append({
            'course': course.display_name_with_default,
            'display_name': chapter_module.display_name_with_default,
            'url_name': chapter_module.url_name,
            'sections': sections
        })

    return chapters
コード例 #32
0
ファイル: grades.py プロジェクト: marcore/edx-platform
def _progress_summary(student, course, course_structure=None):
    """
    Unwrapped version of "progress_summary".

    This pulls a summary of all problems in the course.

    Returns
    - courseware_summary is a summary of all sections with problems in the course.
    It is organized as an array of chapters, each containing an array of sections,
    each containing an array of scores. This contains information for graded and
    ungraded problems, and is good for displaying a course summary with due dates,
    etc.
    - None if the student does not have access to load the course module.

    Arguments:
        student: A User object for the student to grade
        course: A Descriptor containing the course to grade

    """
    if course_structure is None:
        course_structure = get_course_blocks(student, course.location)
    if not len(course_structure):
        return None
    scorable_locations = [block_key for block_key in course_structure if possibly_scored(block_key)]

    with outer_atomic():
        scores_client = ScoresClient.create_for_locations(course.id, student.id, scorable_locations)

    # We need to import this here to avoid a circular dependency of the form:
    # XBlock --> submissions --> Django Rest Framework error strings -->
    # Django translation --> ... --> courseware --> submissions
    from submissions import api as sub_api  # installed from the edx-submissions repository
    with outer_atomic():
        submissions_scores = sub_api.get_scores(
            unicode(course.id), anonymous_id_for_user(student, course.id)
        )

    # Check for gated content
    gated_content = gating_api.get_gated_content(course, student)

    chapters = []
    locations_to_weighted_scores = {}

    for chapter_key in course_structure.get_children(course_structure.root_block_usage_key):
        chapter = course_structure[chapter_key]
        sections = []
        for section_key in course_structure.get_children(chapter_key):
            if unicode(section_key) in gated_content:
                continue

            section = course_structure[section_key]

            graded = getattr(section, 'graded', False)
            scores = []

            for descendant_key in course_structure.post_order_traversal(
                    filter_func=possibly_scored,
                    start_node=section_key,
            ):
                descendant = course_structure[descendant_key]

                (correct, total) = get_score(
                    student,
                    descendant,
                    scores_client,
                    submissions_scores,
                )
                if correct is None and total is None:
                    continue

                weighted_location_score = Score(
                    correct,
                    total,
                    graded,
                    block_metadata_utils.display_name_with_default_escaped(descendant),
                    descendant.location
                )

                scores.append(weighted_location_score)
                locations_to_weighted_scores[descendant.location] = weighted_location_score

            escaped_section_name = block_metadata_utils.display_name_with_default_escaped(section)
            section_total, _ = graders.aggregate_scores(scores, escaped_section_name)

            sections.append({
                'display_name': escaped_section_name,
                'url_name': block_metadata_utils.url_name_for_block(section),
                'scores': scores,
                'section_total': section_total,
                'format': getattr(section, 'format', ''),
                'due': getattr(section, 'due', None),
                'graded': graded,
            })

        chapters.append({
            'course': course.display_name_with_default_escaped,
            'display_name': block_metadata_utils.display_name_with_default_escaped(chapter),
            'url_name': block_metadata_utils.url_name_for_block(chapter),
            'sections': sections
        })

    return ProgressSummary(chapters, locations_to_weighted_scores, course_structure.get_children)
コード例 #33
0
ファイル: grades.py プロジェクト: singhularity/edx-platform
def _grade(student, request, course, keep_raw_scores, field_data_cache, scores_client):
    """
    Unwrapped version of "grade"

    This grades a student as quickly as possible. It returns the
    output from the course grader, augmented with the final letter
    grade. The keys in the output are:

    course: a CourseDescriptor

    - grade : A final letter grade.
    - percent : The final percent for the class (rounded up).
    - section_breakdown : A breakdown of each section that makes
      up the grade. (For display)
    - grade_breakdown : A breakdown of the major components that
      make up the final grade. (For display)
    - keep_raw_scores : if True, then value for key 'raw_scores' contains scores
      for every graded module

    More information on the format is in the docstring for CourseGrader.
    """
    if field_data_cache is None:
        with manual_transaction():
            field_data_cache = field_data_cache_for_grading(course, student)
    if scores_client is None:
        scores_client = ScoresClient.from_field_data_cache(field_data_cache)

    # Dict of item_ids -> (earned, possible) point tuples. This *only* grabs
    # scores that were registered with the submissions API, which for the moment
    # means only openassessment (edx-ora2)
    submissions_scores = sub_api.get_scores(course.id.to_deprecated_string(), anonymous_id_for_user(student, course.id))
    max_scores_cache = MaxScoresCache.create_for_course(course)
    # For the moment, we have to get scorable_locations from field_data_cache
    # and not from scores_client, because scores_client is ignorant of things
    # in the submissions API. As a further refactoring step, submissions should
    # be hidden behind the ScoresClient.
    max_scores_cache.fetch_from_remote(field_data_cache.scorable_locations)

    grading_context = course.grading_context
    raw_scores = []

    totaled_scores = {}
    # This next complicated loop is just to collect the totaled_scores, which is
    # passed to the grader
    for section_format, sections in grading_context['graded_sections'].iteritems():
        format_scores = []
        for section in sections:
            section_descriptor = section['section_descriptor']
            section_name = section_descriptor.display_name_with_default

            # some problems have state that is updated independently of interaction
            # with the LMS, so they need to always be scored. (E.g. foldit.,
            # combinedopenended)
            should_grade_section = any(
                descriptor.always_recalculate_grades for descriptor in section['xmoduledescriptors']
            )

            # If there are no problems that always have to be regraded, check to
            # see if any of our locations are in the scores from the submissions
            # API. If scores exist, we have to calculate grades for this section.
            if not should_grade_section:
                should_grade_section = any(
                    descriptor.location.to_deprecated_string() in submissions_scores
                    for descriptor in section['xmoduledescriptors']
                )

            if not should_grade_section:
                should_grade_section = any(
                    descriptor.location in scores_client
                    for descriptor in section['xmoduledescriptors']
                )

            # If we haven't seen a single problem in the section, we don't have
            # to grade it at all! We can assume 0%
            if should_grade_section:
                scores = []

                def create_module(descriptor):
                    '''creates an XModule instance given a descriptor'''
                    # TODO: We need the request to pass into here. If we could forego that, our arguments
                    # would be simpler
                    return get_module_for_descriptor(
                        student, request, descriptor, field_data_cache, course.id, course=course
                    )

                descendants = yield_dynamic_descriptor_descendants(section_descriptor, student.id, create_module)
                for module_descriptor in descendants:
                    (correct, total) = get_score(
                        student,
                        module_descriptor,
                        create_module,
                        scores_client,
                        submissions_scores,
                        max_scores_cache,
                    )
                    if correct is None and total is None:
                        continue

                    if settings.GENERATE_PROFILE_SCORES:    # for debugging!
                        if total > 1:
                            correct = random.randrange(max(total - 2, 1), total + 1)
                        else:
                            correct = total

                    graded = module_descriptor.graded
                    if not total > 0:
                        # We simply cannot grade a problem that is 12/0, because we might need it as a percentage
                        graded = False

                    scores.append(
                        Score(
                            correct,
                            total,
                            graded,
                            module_descriptor.display_name_with_default,
                            module_descriptor.location
                        )
                    )

                __, graded_total = graders.aggregate_scores(scores, section_name)
                if keep_raw_scores:
                    raw_scores += scores
            else:
                graded_total = Score(0.0, 1.0, True, section_name, None)

            #Add the graded total to totaled_scores
            if graded_total.possible > 0:
                format_scores.append(graded_total)
            else:
                log.info(
                    "Unable to grade a section with a total possible score of zero. " +
                    str(section_descriptor.location)
                )

        totaled_scores[section_format] = format_scores

    # Grading policy might be overriden by a CCX, need to reset it
    course.set_grading_policy(course.grading_policy)
    grade_summary = course.grader.grade(totaled_scores, generate_random_scores=settings.GENERATE_PROFILE_SCORES)

    # We round the grade here, to make sure that the grade is an whole percentage and
    # doesn't get displayed differently than it gets grades
    grade_summary['percent'] = round(grade_summary['percent'] * 100 + 0.05) / 100

    letter_grade = grade_for_percentage(course.grade_cutoffs, grade_summary['percent'])
    grade_summary['grade'] = letter_grade
    grade_summary['totaled_scores'] = totaled_scores   # make this available, eg for instructor download & debugging
    if keep_raw_scores:
        # way to get all RAW scores out to instructor
        # so grader can be double-checked
        grade_summary['raw_scores'] = raw_scores

    max_scores_cache.push_to_remote()

    return grade_summary
コード例 #34
0
def _calculate_totaled_scores(
        student,
        grading_context_result,
        submissions_scores,
        scores_client,
        keep_raw_scores,
):
    """
    Returns a tuple of totaled scores and raw scores, which can be passed to the grader.
    """
    raw_scores = []
    totaled_scores = {}
    for section_format, sections in grading_context_result['all_graded_sections'].iteritems():
        format_scores = []
        for section_info in sections:
            section = section_info['section_block']
            section_name = block_metadata_utils.display_name_with_default(section)

            with outer_atomic():
                # Check to
                # see if any of our locations are in the scores from the submissions
                # API. If scores exist, we have to calculate grades for this section.
                should_grade_section = any(
                    unicode(descendant.location) in submissions_scores
                    for descendant in section_info['scored_descendants']
                )

                if not should_grade_section:
                    should_grade_section = any(
                        descendant.location in scores_client
                        for descendant in section_info['scored_descendants']
                    )

                # If we haven't seen a single problem in the section, we don't have
                # to grade it at all! We can assume 0%
                if should_grade_section:
                    scores = []

                    for descendant in section_info['scored_descendants']:

                        (correct, total) = get_score(
                            student,
                            descendant,
                            scores_client,
                            submissions_scores,
                        )
                        if correct is None and total is None:
                            continue

                        if settings.GENERATE_PROFILE_SCORES:  # for debugging!
                            if total > 1:
                                correct = random.randrange(max(total - 2, 1), total + 1)
                            else:
                                correct = total

                        graded = descendant.graded
                        if not total > 0:
                            # We simply cannot grade a problem that is 12/0, because we might need it as a percentage
                            graded = False

                        scores.append(
                            Score(
                                correct,
                                total,
                                graded,
                                block_metadata_utils.display_name_with_default_escaped(descendant),
                                descendant.location
                            )
                        )

                    __, graded_total = graders.aggregate_scores(scores, section_name)
                    if keep_raw_scores:
                        raw_scores += scores
                else:
                    graded_total = Score(0.0, 1.0, True, section_name, None)

                # Add the graded total to totaled_scores
                if graded_total.possible > 0:
                    format_scores.append(graded_total)
                else:
                    log.info(
                        "Unable to grade a section with a total possible score of zero. " +
                        str(section.location)
                    )

        totaled_scores[section_format] = format_scores

    return totaled_scores, raw_scores
コード例 #35
0
 def _aggregate_scores(self):
     return graders.aggregate_scores(self.problem_scores.values())
コード例 #36
0
ファイル: grades.py プロジェクト: geekaia/edx-platform
def _progress_summary(student, request, course):
    """
    Unwrapped version of "progress_summary".

    This pulls a summary of all problems in the course.

    Returns
    - courseware_summary is a summary of all sections with problems in the course.
    It is organized as an array of chapters, each containing an array of sections,
    each containing an array of scores. This contains information for graded and
    ungraded problems, and is good for displaying a course summary with due dates,
    etc.

    Arguments:
        student: A User object for the student to grade
        course: A Descriptor containing the course to grade

    If the student does not have access to load the course module, this function
    will return None.

    """
    with manual_transaction():
        field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
            course.id, student, course, depth=None
        )
        # TODO: We need the request to pass into here. If we could
        # forego that, our arguments would be simpler
        course_module = get_module_for_descriptor(student, request, course, field_data_cache, course.id)
        if not course_module:
            # This student must not have access to the course.
            return None

    submissions_scores = sub_api.get_scores(course.id.to_deprecated_string(), anonymous_id_for_user(student, course.id))

    chapters = []
    # Don't include chapters that aren't displayable (e.g. due to error)
    for chapter_module in course_module.get_display_items():
        # Skip if the chapter is hidden
        if chapter_module.hide_from_toc:
            continue

        sections = []

        for section_module in chapter_module.get_display_items():
            # Skip if the section is hidden
            with manual_transaction():
                if section_module.hide_from_toc:
                    continue

                graded = section_module.graded
                scores = []

                module_creator = section_module.xmodule_runtime.get_module
                inforExerc = []

                # Adicionarei mais algumas informacoes para que seja possivel pegar o que foi digitado pelo aluno
                # A quantidade de tentativas, que opcao o usuario escolheu ou que informacao foi digitada


                for module_descriptor in yield_dynamic_descriptor_descendents(section_module, module_creator):
                    course_id = course.id
                    (correct, total) = get_score(
                        course_id, student, module_descriptor, module_creator, scores_cache=submissions_scores
                    )
                    tentativas, valorDigitado, idProblem = get_InforExerc(
                        course_id, student, module_descriptor
                    )



                    if correct is None and total is None:

                        continue
                    print "USUARIO: ", student
                    print "TENTATIVAS ", tentativas, " VALOR DIGITADO: ", valorDigitado
                    print "CORRECT: ", correct, " total ", total
                    print "IDPROBLEM: ", idProblem

                    vals=[]
                    vals.append(tentativas)
                    vals.append(valorDigitado)
                    vals.append(idProblem)

                    inforExerc.append(vals)



                    scores.append(Score(correct, total, graded, module_descriptor.display_name_with_default))

                scores.reverse()
                inforExerc.reverse()

                section_total, _ = graders.aggregate_scores(
                    scores, section_module.display_name_with_default)

                module_format = section_module.format if section_module.format is not None else ''
                sections.append({
                    'display_name': section_module.display_name_with_default,
                    'url_name': section_module.url_name,
                    'scores': scores,
                    'inforExerc': inforExerc,
                    'section_total': section_total,
                    'format': module_format,
                    'due': get_extended_due_date(section_module),
                    'graded': graded,
                })

        chapters.append({
            'course': course.display_name_with_default,
            'display_name': chapter_module.display_name_with_default,
            'url_name': chapter_module.url_name,
            'sections': sections
        })

    return chapters