def test_invalid_scopes(self): for scope in (Scope(user=True, block=BlockScope.DEFINITION), Scope(user=False, block=BlockScope.TYPE), Scope(user=False, block=BlockScope.ALL)): key = DjangoKeyValueStore.Key(scope, None, None, 'field') self.assertRaises(InvalidScopeError, self.kvs.get, key) self.assertRaises(InvalidScopeError, self.kvs.set, key, 'value') self.assertRaises(InvalidScopeError, self.kvs.delete, key) self.assertRaises(InvalidScopeError, self.kvs.has, key) self.assertRaises(InvalidScopeError, self.kvs.set_many, {key: 'value'})
def get_score(course_id, user, problem_descriptor, module_creator, field_data_cache): """ Return the score for a user on a problem, as a tuple (correct, total). e.g. (5,7) if you got 5 out of 7 points. If this problem doesn't have a score, or we couldn't load it, returns (None, None). user: a Student object problem_descriptor: an XModuleDescriptor module_creator: a function that takes a descriptor, and returns the corresponding XModule for this user. Can return None if user doesn't have access, or if something else went wrong. cache: A FieldDataCache """ if not user.is_authenticated(): return (None, None) # some problems have state that is updated independently of interaction # with the LMS, so they need to always be scored. (E.g. foldit.) if problem_descriptor.always_recalculate_grades: problem = module_creator(problem_descriptor) if problem is None: return (None, None) score = problem.get_score() if score is not None: return (score['score'], score['total']) else: return (None, None) if not problem_descriptor.has_score: # These are not problems, and do not have a score return (None, None) # Create a fake KeyValueStore key to pull out the StudentModule key = DjangoKeyValueStore.Key(Scope.user_state, user.id, problem_descriptor.location, None) student_module = field_data_cache.find(key) if student_module is not None and student_module.max_grade is not None: correct = student_module.grade if student_module.grade is not None else 0 total = student_module.max_grade else: # If the problem was not in the cache, or hasn't been graded yet, # we need to instantiate the problem. # Otherwise, the max score (cached in student_module) won't be available problem = module_creator(problem_descriptor) if problem is None: return (None, None) correct = 0.0 total = problem.max_score() # Problem may be an error module (if something in the problem builder failed) # In which case total might be None if total is None: return (None, None) # Now we re-weight the problem, if specified weight = problem_descriptor.weight if weight is not None: if total == 0: log.exception( "Cannot reweight a problem with zero total points. Problem: " + str(student_module)) return (correct, total) correct = correct * weight / total total = weight return (correct, total)
def grade(student, request, course, field_data_cache=None, keep_raw_scores=False): """ This grades a student as quickly as possible. It returns the output from the course grader, augmented with the final letter grade. The keys in the output are: course: a CourseDescriptor - grade : A final letter grade. - percent : The final percent for the class (rounded up). - section_breakdown : A breakdown of each section that makes up the grade. (For display) - grade_breakdown : A breakdown of the major components that make up the final grade. (For display) - keep_raw_scores : if True, then value for key 'raw_scores' contains scores for every graded module More information on the format is in the docstring for CourseGrader. """ grading_context = course.grading_context raw_scores = [] if field_data_cache is None: field_data_cache = FieldDataCache(grading_context['all_descriptors'], course.id, student) totaled_scores = {} # This next complicated loop is just to collect the totaled_scores, which is # passed to the grader for section_format, sections in grading_context[ 'graded_sections'].iteritems(): format_scores = [] for section in sections: section_descriptor = section['section_descriptor'] section_name = section_descriptor.display_name_with_default should_grade_section = False # If we haven't seen a single problem in the section, we don't have to grade it at all! We can assume 0% for moduledescriptor in section['xmoduledescriptors']: # some problems have state that is updated independently of interaction # with the LMS, so they need to always be scored. (E.g. foldit.) if moduledescriptor.always_recalculate_grades: should_grade_section = True break # Create a fake key to pull out a StudentModule object from the FieldDataCache key = DjangoKeyValueStore.Key(Scope.user_state, student.id, moduledescriptor.location, None) if field_data_cache.find(key): should_grade_section = True break if should_grade_section: scores = [] def create_module(descriptor): '''creates an XModule instance given a descriptor''' # TODO: We need the request to pass into here. If we could forego that, our arguments # would be simpler return get_module_for_descriptor(student, request, descriptor, field_data_cache, course.id) for module_descriptor in yield_dynamic_descriptor_descendents( section_descriptor, create_module): (correct, total) = get_score(course.id, student, module_descriptor, create_module, field_data_cache) if correct is None and total is None: continue if settings.GENERATE_PROFILE_SCORES: # for debugging! if total > 1: correct = random.randrange(max(total - 2, 1), total + 1) else: correct = total graded = module_descriptor.graded if not total > 0: #We simply cannot grade a problem that is 12/0, because we might need it as a percentage graded = False scores.append( Score(correct, total, graded, module_descriptor.display_name_with_default)) _, graded_total = graders.aggregate_scores( scores, section_name) if keep_raw_scores: raw_scores += scores else: graded_total = Score(0.0, 1.0, True, section_name) #Add the graded total to totaled_scores if graded_total.possible > 0: format_scores.append(graded_total) else: log.exception( "Unable to grade a section with a total possible score of zero. " + str(section_descriptor.location)) totaled_scores[section_format] = format_scores grade_summary = course.grader.grade( totaled_scores, generate_random_scores=settings.GENERATE_PROFILE_SCORES) # We round the grade here, to make sure that the grade is an whole percentage and # doesn't get displayed differently than it gets grades grade_summary['percent'] = round(grade_summary['percent'] * 100 + 0.05) / 100 letter_grade = grade_for_percentage(course.grade_cutoffs, grade_summary['percent']) grade_summary['grade'] = letter_grade grade_summary[ 'totaled_scores'] = totaled_scores # make this available, eg for instructor download & debugging if keep_raw_scores: grade_summary[ 'raw_scores'] = raw_scores # way to get all RAW scores out to instructor # so grader can be double-checked return grade_summary