def test_split_user_partitions(self): """ Tests the get_split_user_partitions helper method. """ first_random_partition = UserPartition( 0, 'first_partition', 'First Partition', [Group("0", 'alpha'), Group("1", 'beta')], self.random_scheme ) second_random_partition = UserPartition( 0, 'second_partition', 'Second Partition', [Group("4", 'zeta'), Group("5", 'omega')], self.random_scheme ) all_partitions = [ first_random_partition, # Only UserPartitions with scheme "random" will be returned as available options. UserPartition( 1, 'non_random_partition', 'Will Not Be Returned', [Group("1", 'apple'), Group("2", 'banana')], self.non_random_scheme ), second_random_partition ] self.assertEqual( [first_random_partition, second_random_partition], get_split_user_partitions(all_partitions) )
def _has_group_access(descriptor, user, course_key): """ This function returns a boolean indicating whether or not `user` has sufficient group memberships to "load" a block (the `descriptor`) """ if len(descriptor.user_partitions) == len(get_split_user_partitions(descriptor.user_partitions)): # Short-circuit the process, since there are no defined user partitions that are not # user_partitions used by the split_test module. The split_test module handles its own access # via updating the children of the split_test module. return True # use merged_group_access which takes group access on the block's # parents / ancestors into account merged_access = descriptor.merged_group_access # check for False in merged_access, which indicates that at least one # partition's group list excludes all students. if False in merged_access.values(): log.warning("Group access check excludes all students, access will be denied.", exc_info=True) return False # resolve the partition IDs in group_access to actual # partition objects, skipping those which contain empty group directives. # if a referenced partition could not be found, access will be denied. try: partitions = [ descriptor._get_user_partition(partition_id) # pylint:disable=protected-access for partition_id, group_ids in merged_access.items() if group_ids is not None ] except NoSuchUserPartitionError: log.warning("Error looking up user partition, access will be denied.", exc_info=True) return False # next resolve the group IDs specified within each partition partition_groups = [] try: for partition in partitions: groups = [partition.get_group(group_id) for group_id in merged_access[partition.id]] if groups: partition_groups.append((partition, groups)) except NoSuchUserPartitionGroupError: log.warning("Error looking up referenced user partition group, access will be denied.", exc_info=True) return False # look up the user's group for each partition user_groups = {} for partition, groups in partition_groups: user_groups[partition.id] = partition.scheme.get_group_for_user(course_key, user, partition) # finally: check that the user has a satisfactory group assignment # for each partition. if not all(user_groups.get(partition.id) in groups for partition, groups in partition_groups): return False # all checks passed. return True
def get_split_test_partitions_with_usage(store, course): """ Returns json split_test group configurations updated with usage information. """ usage_info = GroupConfiguration.get_content_experiment_usage_info(store, course) configurations = [] for partition in get_split_user_partitions(course.user_partitions): configuration = partition.to_json() configuration['usage'] = usage_info.get(partition.id, []) configurations.append(configuration) return configurations
def test_split_user_partitions(self): """ Tests the get_split_user_partitions helper method. """ first_random_partition = UserPartition( 0, "first_partition", "First Partition", [Group("0", "alpha"), Group("1", "beta")], self.random_scheme ) second_random_partition = UserPartition( 0, "second_partition", "Second Partition", [Group("4", "zeta"), Group("5", "omega")], self.random_scheme ) all_partitions = [ first_random_partition, # Only UserPartitions with scheme "random" will be returned as available options. UserPartition( 1, "non_random_partition", "Will Not Be Returned", [Group("1", "apple"), Group("2", "banana")], self.non_random_scheme, ), second_random_partition, ] self.assertEqual([first_random_partition, second_random_partition], get_split_user_partitions(all_partitions))
def course_experiments(self): return get_split_user_partitions(self.course.user_partitions)
def upload_grades_csv(_xmodule_instance_args, _entry_id, course_id, _task_input, action_name): """ For a given `course_id`, generate a grades CSV file for all students that are enrolled, and store using a `ReportStore`. Once created, the files can be accessed by instantiating another `ReportStore` (via `ReportStore.from_config()`) and calling `link_for()` on it. Writes are buffered, so we'll never write part of a CSV file to S3 -- i.e. any files that are visible in ReportStore will be complete ones. As we start to add more CSV downloads, it will probably be worthwhile to make a more general CSVDoc class instead of building out the rows like we do here. """ start_time = time() start_date = datetime.now(UTC) status_interval = 100 enrolled_students = CourseEnrollment.users_enrolled_in(course_id) task_progress = TaskProgress(action_name, enrolled_students.count(), start_time) fmt = u'Task: {task_id}, InstructorTask ID: {entry_id}, Course: {course_id}, Input: {task_input}' task_info_string = fmt.format( task_id=_xmodule_instance_args.get('task_id') if _xmodule_instance_args is not None else None, entry_id=_entry_id, course_id=course_id, task_input=_task_input ) TASK_LOG.info(u'%s, Task type: %s, Starting task execution', task_info_string, action_name) course = get_course_by_id(course_id) course_is_cohorted = is_course_cohorted(course.id) cohorts_header = ['Cohort Name'] if course_is_cohorted else [] experiment_partitions = get_split_user_partitions(course.user_partitions) group_configs_header = [u'Experiment Group ({})'.format(partition.name) for partition in experiment_partitions] # Loop over all our students and build our CSV lists in memory header = None rows = [] err_rows = [["id", "username", "error_msg"]] current_step = {'step': 'Calculating Grades'} total_enrolled_students = enrolled_students.count() student_counter = 0 TASK_LOG.info( u'%s, Task type: %s, Current step: %s, Starting grade calculation for total students: %s', task_info_string, action_name, current_step, total_enrolled_students ) for student, gradeset, err_msg in iterate_grades_for(course_id, enrolled_students): # Periodically update task status (this is a cache write) if task_progress.attempted % status_interval == 0: task_progress.update_task_state(extra_meta=current_step) task_progress.attempted += 1 # Now add a log entry after certain intervals to get a hint that task is in progress student_counter += 1 if student_counter % 1000 == 0: TASK_LOG.info( u'%s, Task type: %s, Current step: %s, Grade calculation in-progress for students: %s/%s', task_info_string, action_name, current_step, student_counter, total_enrolled_students ) if gradeset: # We were able to successfully grade this student for this course. task_progress.succeeded += 1 if not header: header = [section['label'] for section in gradeset[u'section_breakdown']] rows.append( ["id", "email", "username", "grade"] + header + cohorts_header + group_configs_header ) percents = { section['label']: section.get('percent', 0.0) for section in gradeset[u'section_breakdown'] if 'label' in section } cohorts_group_name = [] if course_is_cohorted: group = get_cohort(student, course_id, assign=False) cohorts_group_name.append(group.name if group else '') group_configs_group_names = [] for partition in experiment_partitions: group = LmsPartitionService(student, course_id).get_group(partition, assign=False) group_configs_group_names.append(group.name if group else '') # Not everybody has the same gradable items. If the item is not # found in the user's gradeset, just assume it's a 0. The aggregated # grades for their sections and overall course will be calculated # without regard for the item they didn't have access to, so it's # possible for a student to have a 0.0 show up in their row but # still have 100% for the course. row_percents = [percents.get(label, 0.0) for label in header] rows.append( [student.id, student.email, student.username, gradeset['percent']] + row_percents + cohorts_group_name + group_configs_group_names ) else: # An empty gradeset means we failed to grade a student. task_progress.failed += 1 err_rows.append([student.id, student.username, err_msg]) TASK_LOG.info( u'%s, Task type: %s, Current step: %s, Grade calculation completed for students: %s/%s', task_info_string, action_name, current_step, student_counter, total_enrolled_students ) # By this point, we've got the rows we're going to stuff into our CSV files. current_step = {'step': 'Uploading CSVs'} task_progress.update_task_state(extra_meta=current_step) TASK_LOG.info(u'%s, Task type: %s, Current step: %s', task_info_string, action_name, current_step) # Perform the actual upload upload_csv_to_report_store(rows, 'grade_report', course_id, start_date) # If there are any error rows (don't count the header), write them out as well if len(err_rows) > 1: upload_csv_to_report_store(err_rows, 'grade_report_err', course_id, start_date) # One last update before we close out... TASK_LOG.info(u'%s, Task type: %s, Finalizing grade task', task_info_string, action_name) return task_progress.update_task_state(extra_meta=current_step)
def _has_group_access(descriptor, user, course_key): """ This function returns a boolean indicating whether or not `user` has sufficient group memberships to "load" a block (the `descriptor`) """ if len(descriptor.user_partitions) == len( get_split_user_partitions(descriptor.user_partitions)): # Short-circuit the process, since there are no defined user partitions that are not # user_partitions used by the split_test module. The split_test module handles its own access # via updating the children of the split_test module. return ACCESS_GRANTED # use merged_group_access which takes group access on the block's # parents / ancestors into account merged_access = descriptor.merged_group_access # check for False in merged_access, which indicates that at least one # partition's group list excludes all students. if False in merged_access.values(): log.warning( "Group access check excludes all students, access will be denied.", exc_info=True) return ACCESS_DENIED # resolve the partition IDs in group_access to actual # partition objects, skipping those which contain empty group directives. # If a referenced partition could not be found, it will be denied # If the partition is found but is no longer active (meaning it's been disabled) # then skip the access check for that partition. partitions = [] for partition_id, group_ids in merged_access.items(): try: partition = descriptor._get_user_partition(partition_id) # pylint: disable=protected-access if partition.active: if group_ids is not None: partitions.append(partition) else: log.debug( "Skipping partition with ID %s in course %s because it is no longer active", partition.id, course_key) except NoSuchUserPartitionError: log.warning( "Error looking up user partition, access will be denied.", exc_info=True) return ACCESS_DENIED # next resolve the group IDs specified within each partition partition_groups = [] try: for partition in partitions: groups = [ partition.get_group(group_id) for group_id in merged_access[partition.id] ] if groups: partition_groups.append((partition, groups)) except NoSuchUserPartitionGroupError: log.warning( "Error looking up referenced user partition group, access will be denied.", exc_info=True) return ACCESS_DENIED # look up the user's group for each partition user_groups = {} for partition, groups in partition_groups: user_groups[partition.id] = partition.scheme.get_group_for_user( course_key, user, partition, ) # finally: check that the user has a satisfactory group assignment # for each partition. if not all( user_groups.get(partition.id) in groups for partition, groups in partition_groups): return ACCESS_DENIED # all checks passed. return ACCESS_GRANTED
def upload_user_grades_csv(_xmodule_instance_args, _entry_id, course_id, _task_input, action_name): # pylint: disable=too-many-statements """ For a given `course_id`, for given usernames generates a grades CSV file, and store using a `ReportStore`. Once created, the files can be accessed by instantiating another `ReportStore` (via `ReportStore.from_config()`) and calling `link_for()` on it. Unenrolled users and unknown usernames are stored in *_err_*.csv This task is very close to the .upload_grades_csv from instructor_tasks.task_helper The difference is that we filter enrolled students against requested usernames and we push info about this into PLP """ start_time = time() start_date = datetime.now(UTC) status_interval = 100 fmt = u'Task: {task_id}, InstructorTask ID: {entry_id}, Course: {course_id}, Input: {task_input}' task_info_string = fmt.format( task_id=_xmodule_instance_args.get('task_id') if _xmodule_instance_args is not None else None, entry_id=_entry_id, course_id=course_id, task_input=_task_input ) TASK_LOG.info(u'%s, Task type: %s, Starting task execution', task_info_string, action_name) extended_kwargs_id = _task_input.get("extended_kwargs_id") extended_kwargs = InstructorTaskExtendedKwargs.get_kwargs_for_id(extended_kwargs_id) usernames = extended_kwargs.get("usernames", None) err_rows = [["id", "username", "error_msg"]] if usernames is None: message = "Error occured during edx task execution: no usersnames in InstructorTaskExtendedKwargs." TASK_LOG.error(u'%s, Task type: %s, ' + message, task_info_string) err_rows.append(["-1", "__", message]) usernames = [] enrolled_students = CourseEnrollment.objects.users_enrolled_in(course_id) enrolled_students = enrolled_students.filter(username__in=usernames) total_enrolled_students = enrolled_students.count() requester_id = _task_input.get("requester_id") task_progress = TaskProgress(action_name, total_enrolled_students, start_time) course = get_course_by_id(course_id) course_is_cohorted = is_course_cohorted(course.id) teams_enabled = course.teams_enabled cohorts_header = ['Cohort Name'] if course_is_cohorted else [] teams_header = ['Team Name'] if teams_enabled else [] experiment_partitions = get_split_user_partitions(course.user_partitions) group_configs_header = [u'Experiment Group ({})'.format(partition.name) for partition in experiment_partitions] certificate_info_header = ['Certificate Eligible', 'Certificate Delivered', 'Certificate Type'] certificate_whitelist = CertificateWhitelist.objects.filter(course_id=course_id, whitelist=True) whitelisted_user_ids = [entry.user_id for entry in certificate_whitelist] # Loop over all our students and build our CSV lists in memory rows = [] current_step = {'step': 'Calculating Grades'} TASK_LOG.info( u'%s, Task type: %s, Current step: %s, Starting grade calculation for total students: %s', task_info_string, action_name, current_step, total_enrolled_students, ) found_students = User.objects.filter(username__in=usernames) # Check invalid usernames if len(found_students)!= len(usernames): found_students_usernames = [x.username for x in found_students] for u in usernames: if u not in found_students_usernames: err_rows.append([-1, u, "invalid_username"]) # Check not enrolled requested students if found_students != enrolled_students: diff = found_students.exclude(id__in=enrolled_students) for u in diff: if u in diff: err_rows.append([u.id, u.username, "enrollment_for_username_not_found"]) total_enrolled_students = enrolled_students.count() student_counter = 0 TASK_LOG.info( u'%s, Task type: %s, Current step: %s, Starting grade calculation for total students: %s', task_info_string, action_name, current_step, total_enrolled_students ) graded_assignments = course.grading.graded_assignments(course_id) grade_header = course.grading.grade_header(graded_assignments) rows.append( ["Student ID", "Email", "Username", "Last Name", "First Name", "Second Name", "Grade", "Grade Percent"] + grade_header + cohorts_header + group_configs_header + teams_header + ['Enrollment Track', 'Verification Status'] + certificate_info_header ) for student, course_grade, err_msg in CourseGradeFactory().iter(course, enrolled_students): # Periodically update task status (this is a cache write) if task_progress.attempted % status_interval == 0: task_progress.update_task_state(extra_meta=current_step) task_progress.attempted += 1 # Now add a log entry after each student is graded to get a sense # of the task's progress student_counter += 1 TASK_LOG.info( u'%s, Task type: %s, Current step: %s, Grade calculation in-progress for students: %s/%s', task_info_string, action_name, current_step, student_counter, total_enrolled_students ) if not course_grade: # An empty course_grade means we failed to grade a student. task_progress.failed += 1 err_rows.append([student.id, student.username, err_msg]) continue # We were able to successfully grade this student for this course. task_progress.succeeded += 1 cohorts_group_name = [] if course_is_cohorted: group = get_cohort(student, course_id, assign=False) cohorts_group_name.append(group.name if group else '') group_configs_group_names = [] for partition in experiment_partitions: group = PartitionService(course_id).get_group(student, partition, assign=False) group_configs_group_names.append(group.name if group else '') team_name = [] if teams_enabled: try: membership = CourseTeamMembership.objects.get(user=student, team__course_id=course_id) team_name.append(membership.team.name) except CourseTeamMembership.DoesNotExist: team_name.append('') enrollment_mode = CourseEnrollment.enrollment_mode_for_user(student, course_id)[0] verification_status = SoftwareSecurePhotoVerification.verification_status_for_user( student, course_id, enrollment_mode ) certificate_info = certificate_info_for_user( student, course_id, course_grade.letter_grade, student.id in whitelisted_user_ids ) second_name = '' try: up = UserProfile.objects.get(user=student) if up.goals: second_name = json.loads(up.goals).get('second_name', '') except ValueError: pass if certificate_info[0] == 'Y': TASK_LOG.info( u'Student is marked eligible_for_certificate' u'(user=%s, course_id=%s, grade_percent=%s gradecutoffs=%s, allow_certificate=%s, is_whitelisted=%s)', student, course_id, course_grade.percent, course.grade_cutoffs, student.profile.allow_certificate, student.id in whitelisted_user_ids ) grade_results = course.grading.grade_results(graded_assignments, course_grade) grade_results = list(chain.from_iterable(grade_results)) rows.append( [student.id, student.email, student.username, student.last_name, student.first_name, second_name, course_grade.percent, course_grade.percent*100] + grade_results + cohorts_group_name + group_configs_group_names + team_name + [enrollment_mode] + [verification_status] + certificate_info ) TASK_LOG.info( u'%s, Task type: %s, Current step: %s, Grade calculation completed for students: %s/%s', task_info_string, action_name, current_step, student_counter, total_enrolled_students ) # By this point, we've got the rows we're going to stuff into our CSV files. current_step = {'step': 'Uploading CSVs'} task_progress.update_task_state(extra_meta=current_step) TASK_LOG.info(u'%s, Task type: %s, Current step: %s', task_info_string, action_name, current_step) # Perform the actual upload custom_grades_download = get_custom_grade_config() report_hash_unique_hash = hex(random.getrandbits(32))[2:] report_name = 'plp_grade_users_report_{}_id_{}'.format(report_hash_unique_hash, requester_id) err_report_name = 'plp_grade_users_report_err_{}_id_{}'.format(report_hash_unique_hash, requester_id) upload_csv_to_report_store(rows, report_name, course_id, start_date, config_name=custom_grades_download) # If there are any error rows (don't count the header), write them out as well has_errors = len(err_rows) > 1 if has_errors: upload_csv_to_report_store(err_rows, err_report_name, course_id, start_date, config_name=custom_grades_download) callback_url = _task_input.get("callback_url", None) if callback_url: report_store = ReportStore.from_config(config_name=custom_grades_download) files_urls_pairs = report_store.links_for(course_id) find_by_name = lambda name: [url for filename, url in files_urls_pairs if name in filename][0] try: csv_url = find_by_name(report_name) csv_err_url = find_by_name(err_report_name) if has_errors else None PlpApiClient().push_grade_api_result(callback_url, csv_url, csv_err_url) except Exception as e: TASK_LOG.error("Failed push to PLP:{}".format(str(e))) # One last update before we close out... TASK_LOG.info(u'%s, Task type: %s, Finalizing grade task', task_info_string, action_name) return task_progress.update_task_state(extra_meta=current_step)
def upload_user_grades_csv(_xmodule_instance_args, _entry_id, course_id, _task_input, action_name): # pylint: disable=too-many-statements """ For a given `course_id`, for given usernames generate a grades CSV file, and store using a `ReportStore`. Once created, the files can be accessed by instantiating another `ReportStore` (via `ReportStore.from_config()`) and calling `link_for()` on it. Writes are buffered, so we'll never write part of a CSV file to S3 -- i.e. any files that are visible in ReportStore will be complete ones. Unenrolled users and unknown usernames are store in *_err_*.csv """ start_time = time() start_date = datetime.now(UTC) status_interval = 100 enrolled_students = CourseEnrollment.objects.users_enrolled_in(course_id) usernames = _task_input.get("usernames") found_students = User.objects.filter(username__in=usernames) enrolled_students = enrolled_students.filter(username__in=usernames) requester_id = _task_input.get("requester_id") task_progress = TaskProgress(action_name, enrolled_students.count(), start_time) fmt = u'Task: {task_id}, InstructorTask ID: {entry_id}, Course: {course_id}, Input: {task_input}' task_info_string = fmt.format( task_id=_xmodule_instance_args.get('task_id') if _xmodule_instance_args is not None else None, entry_id=_entry_id, course_id=course_id, task_input=_task_input) TASK_LOG.info(u'%s, Task type: %s, Starting task execution', task_info_string, action_name) course = get_course_by_id(course_id) course_is_cohorted = is_course_cohorted(course.id) teams_enabled = course.teams_enabled cohorts_header = ['Cohort Name'] if course_is_cohorted else [] teams_header = ['Team Name'] if teams_enabled else [] experiment_partitions = get_split_user_partitions(course.user_partitions) group_configs_header = [ u'Experiment Group ({})'.format(partition.name) for partition in experiment_partitions ] certificate_info_header = [ 'Certificate Eligible', 'Certificate Delivered', 'Certificate Type' ] certificate_whitelist = CertificateWhitelist.objects.filter( course_id=course_id, whitelist=True) whitelisted_user_ids = [entry.user_id for entry in certificate_whitelist] # Loop over all our students and build our CSV lists in memory header = None rows = [] err_rows = [["id", "username", "error_msg"]] current_step = {'step': 'Calculating Grades'} #Check invalid usernames if len(found_students) != len(usernames): found_students_usernames = [x.username for x in found_students] for u in usernames: if u not in found_students_usernames: err_rows.append([-1, u, "invalid_username"]) #Check not enrolled requested students if found_students != enrolled_students: diff = found_students.exclude(id__in=enrolled_students) for u in diff: if u in diff: err_rows.append( [u.id, u.username, "enrollment_for_username_not_found"]) total_enrolled_students = enrolled_students.count() student_counter = 0 TASK_LOG.info( u'%s, Task type: %s, Current step: %s, Starting grade calculation for total students: %s', task_info_string, action_name, current_step, total_enrolled_students) for student, gradeset, err_msg in iterate_grades_for( course_id, enrolled_students): # Periodically update task status (this is a cache write) if task_progress.attempted % status_interval == 0: task_progress.update_task_state(extra_meta=current_step) task_progress.attempted += 1 # Now add a log entry after each student is graded to get a sense # of the task's progress student_counter += 1 TASK_LOG.info( u'%s, Task type: %s, Current step: %s, Grade calculation in-progress for students: %s/%s', task_info_string, action_name, current_step, student_counter, total_enrolled_students) if gradeset: # We were able to successfully grade this student for this course. task_progress.succeeded += 1 if not header: header = [ section['label'] for section in gradeset[u'section_breakdown'] ] rows.append(["id", "email", "username", "grade"] + header + cohorts_header + group_configs_header + teams_header + ['Enrollment Track', 'Verification Status'] + certificate_info_header) percents = { section['label']: section.get('percent', 0.1) for section in gradeset[u'section_breakdown'] if 'label' in section } cohorts_group_name = [] if course_is_cohorted: group = get_cohort(student, course_id, assign=False) cohorts_group_name.append(group.name if group else '') group_configs_group_names = [] for partition in experiment_partitions: group = LmsPartitionService(student, course_id).get_group(partition, assign=False) group_configs_group_names.append(group.name if group else '') team_name = [] if teams_enabled: try: membership = CourseTeamMembership.objects.get( user=student, team__course_id=course_id) team_name.append(membership.team.name) except CourseTeamMembership.DoesNotExist: team_name.append('') enrollment_mode = CourseEnrollment.enrollment_mode_for_user( student, course_id)[0] verification_status = SoftwareSecurePhotoVerification.verification_status_for_user( student, course_id, enrollment_mode) certificate_info = certificate_info_for_user( student, course_id, gradeset['grade'], student.id in whitelisted_user_ids) # Not everybody has the same gradable items. If the item is not # found in the user's gradeset, just assume it's a 0. The aggregated # grades for their sections and overall course will be calculated # without regard for the item they didn't have access to, so it's # possible for a student to have a 0.0 show up in their row but # still have 100% for the course. row_percents = [percents.get(label, 0.0) for label in header] rows.append([ student.id, student.email, student.username, gradeset['percent'] ] + row_percents + cohorts_group_name + group_configs_group_names + team_name + [enrollment_mode] + [verification_status] + certificate_info) else: # An empty gradeset means we failed to grade a student. task_progress.failed += 1 err_rows.append([student.id, student.username, err_msg]) TASK_LOG.info( u'%s, Task type: %s, Current step: %s, Grade calculation completed for students: %s/%s', task_info_string, action_name, current_step, student_counter, total_enrolled_students) # By this point, we've got the rows we're going to stuff into our CSV files. current_step = {'step': 'Uploading CSVs'} task_progress.update_task_state(extra_meta=current_step) TASK_LOG.info(u'%s, Task type: %s, Current step: %s', task_info_string, action_name, current_step) # Perform the actual upload custom_grades_download = get_custom_grade_config() upload_csv_to_report_store(rows, 'grade_users_report_id_{}'.format(requester_id), course_id, start_date, config_name=custom_grades_download) # If there are any error rows (don't count the header), write them out as well if len(err_rows) > 1: upload_csv_to_report_store( err_rows, 'grade_users_report_err_id_{}'.format(requester_id), course_id, start_date, config_name=custom_grades_download) # One last update before we close out... TASK_LOG.info(u'%s, Task type: %s, Finalizing grade task', task_info_string, action_name) return task_progress.update_task_state(extra_meta=current_step)
def upload_grades_csv(_xmodule_instance_args, _entry_id, course_id, _task_input, action_name): """ For a given `course_id`, generate a grades CSV file for all students that are enrolled, and store using a `ReportStore`. Once created, the files can be accessed by instantiating another `ReportStore` (via `ReportStore.from_config()`) and calling `link_for()` on it. Writes are buffered, so we'll never write part of a CSV file to S3 -- i.e. any files that are visible in ReportStore will be complete ones. As we start to add more CSV downloads, it will probably be worthwhile to make a more general CSVDoc class instead of building out the rows like we do here. """ start_time = time() start_date = datetime.now(UTC) status_interval = 100 enrolled_students = CourseEnrollment.users_enrolled_in(course_id) task_progress = TaskProgress(action_name, enrolled_students.count(), start_time) fmt = u'Task: {task_id}, InstructorTask ID: {entry_id}, Course: {course_id}, Input: {task_input}' task_info_string = fmt.format( task_id=_xmodule_instance_args.get('task_id') if _xmodule_instance_args is not None else None, entry_id=_entry_id, course_id=course_id, task_input=_task_input) TASK_LOG.info(u'%s, Task type: %s, Starting task execution', task_info_string, action_name) course = get_course_by_id(course_id) course_is_cohorted = is_course_cohorted(course.id) cohorts_header = ['Cohort Name'] if course_is_cohorted else [] experiment_partitions = get_split_user_partitions(course.user_partitions) group_configs_header = [ u'Experiment Group ({})'.format(partition.name) for partition in experiment_partitions ] # Loop over all our students and build our CSV lists in memory header = None rows = [] err_rows = [["id", "username", "error_msg"]] current_step = {'step': 'Calculating Grades'} total_enrolled_students = enrolled_students.count() student_counter = 0 TASK_LOG.info( u'%s, Task type: %s, Current step: %s, Starting grade calculation for total students: %s', task_info_string, action_name, current_step, total_enrolled_students) for student, gradeset, err_msg in iterate_grades_for( course_id, enrolled_students): # Periodically update task status (this is a cache write) if task_progress.attempted % status_interval == 0: task_progress.update_task_state(extra_meta=current_step) task_progress.attempted += 1 # Now add a log entry after certain intervals to get a hint that task is in progress student_counter += 1 if student_counter % 1000 == 0: TASK_LOG.info( u'%s, Task type: %s, Current step: %s, Grade calculation in-progress for students: %s/%s', task_info_string, action_name, current_step, student_counter, total_enrolled_students) if gradeset: # We were able to successfully grade this student for this course. task_progress.succeeded += 1 if not header: header = [ section['label'] for section in gradeset[u'section_breakdown'] ] rows.append(["id", "email", "username", "grade"] + header + cohorts_header + group_configs_header) percents = { section['label']: section.get('percent', 0.0) for section in gradeset[u'section_breakdown'] if 'label' in section } cohorts_group_name = [] if course_is_cohorted: group = get_cohort(student, course_id, assign=False) cohorts_group_name.append(group.name if group else '') group_configs_group_names = [] for partition in experiment_partitions: group = LmsPartitionService(student, course_id).get_group(partition, assign=False) group_configs_group_names.append(group.name if group else '') # Not everybody has the same gradable items. If the item is not # found in the user's gradeset, just assume it's a 0. The aggregated # grades for their sections and overall course will be calculated # without regard for the item they didn't have access to, so it's # possible for a student to have a 0.0 show up in their row but # still have 100% for the course. row_percents = [percents.get(label, 0.0) for label in header] rows.append([ student.id, student.email, student.username, gradeset['percent'] ] + row_percents + cohorts_group_name + group_configs_group_names) else: # An empty gradeset means we failed to grade a student. task_progress.failed += 1 err_rows.append([student.id, student.username, err_msg]) TASK_LOG.info( u'%s, Task type: %s, Current step: %s, Grade calculation completed for students: %s/%s', task_info_string, action_name, current_step, student_counter, total_enrolled_students) # By this point, we've got the rows we're going to stuff into our CSV files. current_step = {'step': 'Uploading CSVs'} task_progress.update_task_state(extra_meta=current_step) TASK_LOG.info(u'%s, Task type: %s, Current step: %s', task_info_string, action_name, current_step) # Perform the actual upload upload_csv_to_report_store(rows, 'grade_report', course_id, start_date) # If there are any error rows (don't count the header), write them out as well if len(err_rows) > 1: upload_csv_to_report_store(err_rows, 'grade_report_err', course_id, start_date) # One last update before we close out... TASK_LOG.info(u'%s, Task type: %s, Finalizing grade task', task_info_string, action_name) return task_progress.update_task_state(extra_meta=current_step)
def _has_group_access(descriptor, user, course_key): """ This function returns a boolean indicating whether or not `user` has sufficient group memberships to "load" a block (the `descriptor`) """ if len(descriptor.user_partitions) == len(get_split_user_partitions(descriptor.user_partitions)): # Short-circuit the process, since there are no defined user partitions that are not # user_partitions used by the split_test module. The split_test module handles its own access # via updating the children of the split_test module. return ACCESS_GRANTED # Allow staff and instructors roles group access, as they are not masquerading as a student. if get_user_role(user, course_key) in ['staff', 'instructor']: return ACCESS_GRANTED # use merged_group_access which takes group access on the block's # parents / ancestors into account merged_access = descriptor.merged_group_access # check for False in merged_access, which indicates that at least one # partition's group list excludes all students. if False in merged_access.values(): log.warning("Group access check excludes all students, access will be denied.", exc_info=True) return ACCESS_DENIED # resolve the partition IDs in group_access to actual # partition objects, skipping those which contain empty group directives. # If a referenced partition could not be found, it will be denied # If the partition is found but is no longer active (meaning it's been disabled) # then skip the access check for that partition. partitions = [] for partition_id, group_ids in merged_access.items(): try: partition = descriptor._get_user_partition(partition_id) # pylint: disable=protected-access if partition.active: if group_ids is not None: partitions.append(partition) else: log.debug( "Skipping partition with ID %s in course %s because it is no longer active", partition.id, course_key ) except NoSuchUserPartitionError: log.warning("Error looking up user partition, access will be denied.", exc_info=True) return ACCESS_DENIED # next resolve the group IDs specified within each partition partition_groups = [] try: for partition in partitions: groups = [ partition.get_group(group_id) for group_id in merged_access[partition.id] ] if groups: partition_groups.append((partition, groups)) except NoSuchUserPartitionGroupError: log.warning("Error looking up referenced user partition group, access will be denied.", exc_info=True) return ACCESS_DENIED # look up the user's group for each partition user_groups = {} for partition, groups in partition_groups: user_groups[partition.id] = partition.scheme.get_group_for_user( course_key, user, partition, ) # finally: check that the user has a satisfactory group assignment # for each partition. if not all(user_groups.get(partition.id) in groups for partition, groups in partition_groups): return ACCESS_DENIED # all checks passed. return ACCESS_GRANTED