コード例 #1
0
 def test_nonexistent_course(self):
     """If the course we want to get grades for does not exist, a `Http404`
     should be raised. This is a horrible crossing of abstraction boundaries
     and should be fixed, but for now we're just testing the behavior. :-("""
     with self.assertRaises(Http404):
         gradeset_results = iterate_grades_for(SlashSeparatedCourseKey("I", "dont", "exist"), [])
         gradeset_results.next()
コード例 #2
0
 def test_nonexistent_course(self):
     """If the course we want to get grades for does not exist, a `Http404`
     should be raised. This is a horrible crossing of abstraction boundaries
     and should be fixed, but for now we're just testing the behavior. :-("""
     with self.assertRaises(Http404):
         gradeset_results = iterate_grades_for(SlashSeparatedCourseKey("I", "dont", "exist"), [])
         gradeset_results.next()
コード例 #3
0
def ccx_grades_csv(request, course):
    """
    Download grades as CSV.
    """
    # Need course module for overrides to function properly
    field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
        course.id, request.user, course, depth=2)
    course = get_module_for_descriptor(request.user, request, course,
                                       field_data_cache, course.id)
    ccx = get_ccx_for_coach(course, request.user)
    with ccx_context(ccx):
        # The grading policy for the MOOC is probably already cached.  We need
        # to make sure we have the CCX grading policy loaded.
        course._field_data_cache = {}  # pylint: disable=protected-access
        course.set_grading_policy(course.grading_policy)

        enrolled_students = User.objects.filter(
            ccxmembership__ccx=ccx, ccxmembership__active=1).order_by(
                'username').select_related("profile")
        grades = iterate_grades_for(course, enrolled_students)

        header = None
        rows = []
        for student, gradeset, __ in grades:
            if gradeset:
                # We were able to successfully grade this student for this
                # course.
                if not header:
                    # Encode the header row in utf-8 encoding in case there are
                    # unicode characters
                    header = [
                        section['label'].encode('utf-8')
                        for section in gradeset[u'section_breakdown']
                    ]
                    rows.append(["id", "email", "username", "grade"] + header)

                percents = {
                    section['label']: section.get('percent', 0.0)
                    for section in gradeset[u'section_breakdown']
                    if 'label' in section
                }

                row_percents = [percents.get(label, 0.0) for label in header]
                rows.append([
                    student.id, student.email, student.username,
                    gradeset['percent']
                ] + row_percents)

        buf = StringIO()
        writer = csv.writer(buf)
        for row in rows:
            writer.writerow(row)

        return HttpResponse(buf.getvalue(), content_type='text/plain')
コード例 #4
0
def ccx_grades_csv(request, course, ccx=None):
    """
    Download grades as CSV.
    """
    if not ccx:
        raise Http404

    ccx_key = CCXLocator.from_course_locator(course.id, ccx.id)
    with ccx_course(ccx_key) as course:
        prep_course_for_grading(course, request)

        enrolled_students = User.objects.filter(
            courseenrollment__course_id=ccx_key,
            courseenrollment__is_active=1).order_by('username').select_related(
                "profile")
        grades = iterate_grades_for(course, enrolled_students)

        header = None
        rows = []
        for student, gradeset, __ in grades:
            if gradeset:
                # We were able to successfully grade this student for this
                # course.
                if not header:
                    # Encode the header row in utf-8 encoding in case there are
                    # unicode characters
                    header = [
                        section['label'].encode('utf-8')
                        for section in gradeset[u'section_breakdown']
                    ]
                    rows.append(["id", "email", "username", "grade"] + header)

                percents = {
                    section['label']: section.get('percent', 0.0)
                    for section in gradeset[u'section_breakdown']
                    if 'label' in section
                }

                row_percents = [percents.get(label, 0.0) for label in header]
                rows.append([
                    student.id, student.email, student.username,
                    gradeset['percent']
                ] + row_percents)

        buf = StringIO()
        writer = csv.writer(buf)
        for row in rows:
            writer.writerow(row)

        response = HttpResponse(buf.getvalue(), content_type='text/csv')
        response['Content-Disposition'] = 'attachment'

        return response
コード例 #5
0
ファイル: views.py プロジェクト: Cgruppo/edx-platform
def ccx_grades_csv(request, course):
    """
    Download grades as CSV.
    """
    # Need course module for overrides to function properly
    field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
        course.id, request.user, course, depth=2)
    course = get_module_for_descriptor(
        request.user, request, course, field_data_cache, course.id)
    ccx = get_ccx_for_coach(course, request.user)
    with ccx_context(ccx):
        # The grading policy for the MOOC is probably already cached.  We need
        # to make sure we have the CCX grading policy loaded.
        course._field_data_cache = {}  # pylint: disable=protected-access
        course.set_grading_policy(course.grading_policy)

        enrolled_students = User.objects.filter(
            ccxmembership__ccx=ccx,
            ccxmembership__active=1
        ).order_by('username').select_related("profile")
        grades = iterate_grades_for(course, enrolled_students)

        header = None
        rows = []
        for student, gradeset, __ in grades:
            if gradeset:
                # We were able to successfully grade this student for this
                # course.
                if not header:
                    # Encode the header row in utf-8 encoding in case there are
                    # unicode characters
                    header = [section['label'].encode('utf-8')
                              for section in gradeset[u'section_breakdown']]
                    rows.append(["id", "email", "username", "grade"] + header)

                percents = {
                    section['label']: section.get('percent', 0.0)
                    for section in gradeset[u'section_breakdown']
                    if 'label' in section
                }

                row_percents = [percents.get(label, 0.0) for label in header]
                rows.append([student.id, student.email, student.username,
                             gradeset['percent']] + row_percents)

        buf = StringIO()
        writer = csv.writer(buf)
        for row in rows:
            writer.writerow(row)

        return HttpResponse(buf.getvalue(), content_type='text/plain')
コード例 #6
0
    def _gradesets_and_errors_for(self, course_id, students):
        """Simple helper method to iterate through student grades and give us
        two dictionaries -- one that has all students and their respective
        gradesets, and one that has only students that could not be graded and
        their respective error messages."""
        students_to_gradesets = {}
        students_to_errors = {}

        for student, gradeset, err_msg in iterate_grades_for(course_id, students):
            students_to_gradesets[student] = gradeset
            if err_msg:
                students_to_errors[student] = err_msg

        return students_to_gradesets, students_to_errors
コード例 #7
0
    def _gradesets_and_errors_for(self, course_id, students):
        """Simple helper method to iterate through student grades and give us
        two dictionaries -- one that has all students and their respective
        gradesets, and one that has only students that could not be graded and
        their respective error messages."""
        students_to_gradesets = {}
        students_to_errors = {}

        for student, gradeset, err_msg in iterate_grades_for(course_id, students):
            students_to_gradesets[student] = gradeset
            if err_msg:
                students_to_errors[student] = err_msg

        return students_to_gradesets, students_to_errors
コード例 #8
0
ファイル: views.py プロジェクト: andela-ijubril/edx-platform
def ccx_grades_csv(request, course, ccx=None):
    """
    Download grades as CSV.
    """
    if not ccx:
        raise Http404

    ccx_key = CCXLocator.from_course_locator(course.id, ccx.id)
    with ccx_course(ccx_key) as course:
        prep_course_for_grading(course, request)

        enrolled_students = User.objects.filter(
            courseenrollment__course_id=ccx_key,
            courseenrollment__is_active=1
        ).order_by('username').select_related("profile")
        grades = iterate_grades_for(course, enrolled_students)

        header = None
        rows = []
        for student, gradeset, __ in grades:
            if gradeset:
                # We were able to successfully grade this student for this
                # course.
                if not header:
                    # Encode the header row in utf-8 encoding in case there are
                    # unicode characters
                    header = [section['label'].encode('utf-8')
                              for section in gradeset[u'section_breakdown']]
                    rows.append(["id", "email", "username", "grade"] + header)

                percents = {
                    section['label']: section.get('percent', 0.0)
                    for section in gradeset[u'section_breakdown']
                    if 'label' in section
                }

                row_percents = [percents.get(label, 0.0) for label in header]
                rows.append([student.id, student.email, student.username,
                             gradeset['percent']] + row_percents)

        buf = StringIO()
        writer = csv.writer(buf)
        for row in rows:
            writer.writerow(row)

        response = HttpResponse(buf.getvalue(), content_type='text/csv')
        response['Content-Disposition'] = 'attachment'

        return response
コード例 #9
0
def upload_problem_grade_report(_xmodule_instance_args, _entry_id, course_id,
                                _task_input, action_name):
    """
    Generate a CSV containing all students' problem grades within a given
    `course_id`.
    """
    start_time = time()
    start_date = datetime.now(UTC)
    status_interval = 100
    enrolled_students = CourseEnrollment.users_enrolled_in(course_id)
    task_progress = TaskProgress(action_name, enrolled_students.count(),
                                 start_time)

    # This struct encapsulates both the display names of each static item in the
    # header row as values as well as the django User field names of those items
    # as the keys.  It is structured in this way to keep the values related.
    header_row = OrderedDict([('id', 'Student ID'), ('email', 'Email'),
                              ('username', 'Username')])

    try:
        course_structure = CourseStructure.objects.get(course_id=course_id)
        blocks = course_structure.ordered_blocks
        problems = _order_problems(blocks)
    except CourseStructure.DoesNotExist:
        return task_progress.update_task_state(extra_meta={
            'step':
            'Generating course structure. Please refresh and try again.'
        })

    # Just generate the static fields for now.
    rows = [
        list(header_row.values()) + ['Final Grade'] +
        list(chain.from_iterable(problems.values()))
    ]
    error_rows = [list(header_row.values()) + ['error_msg']]
    current_step = {'step': 'Calculating Grades'}

    for student, gradeset, err_msg in iterate_grades_for(course_id,
                                                         enrolled_students,
                                                         keep_raw_scores=True):
        student_fields = [
            getattr(student, field_name) for field_name in header_row
        ]
        task_progress.attempted += 1

        if err_msg:
            # There was an error grading this student.
            error_rows.append(student_fields + [err_msg])
            task_progress.failed += 1
            continue

        final_grade = gradeset['percent']
        # Only consider graded problems
        problem_scores = {
            unicode(score.module_id): score
            for score in gradeset['raw_scores'] if score.graded
        }
        earned_possible_values = list()
        for problem_id in problems:
            try:
                problem_score = problem_scores[problem_id]
                earned_possible_values.append(
                    [problem_score.earned, problem_score.possible])
            except KeyError:
                # The student has not been graded on this problem.  For example,
                # iterate_grades_for skips problems that students have never
                # seen in order to speed up report generation.  It could also be
                # the case that the student does not have access to it (e.g. A/B
                # test or cohorted courseware).
                earned_possible_values.append(['N/A', 'N/A'])
        rows.append(student_fields + [final_grade] +
                    list(chain.from_iterable(earned_possible_values)))

        task_progress.succeeded += 1
        if task_progress.attempted % status_interval == 0:
            task_progress.update_task_state(extra_meta=current_step)

    # Perform the upload if any students have been successfully graded
    if len(rows) > 1:
        upload_csv_to_report_store(rows, 'problem_grade_report', course_id,
                                   start_date)
    # If there are any error rows, write them out as well
    if len(error_rows) > 1:
        upload_csv_to_report_store(error_rows, 'problem_grade_report_err',
                                   course_id, start_date)

    return task_progress.update_task_state(
        extra_meta={'step': 'Uploading CSV'})
コード例 #10
0
    def handle(self, *args, **options):
        def get_detail(course_key, attribute):
            usage_key = course_key.make_usage_key("about", attribute)
            try:
                value = modulestore().get_item(usage_key).data
            except ItemNotFoundError:
                value = None
            return value

        def iso_date(thing):
            if isinstance(thing, datetime.datetime):
                return thing.isoformat()
            return thing

        exclusion_list = []
        inclusion_list = []

        if options["exclude_file"]:
            try:
                with open(options["exclude_file"], "rb") as exclusion_file:
                    data = exclusion_file.readlines()
                exclusion_list = [x.strip() for x in data]
            except IOError:
                raise CommandError("Could not read exclusion list from '{0}'".format(options["exclude_file"]))

        if options["include_file"]:
            try:
                with open(options["include_file"], "rb") as inclusion_file:
                    data = inclusion_file.readlines()
                inclusion_list = [x.strip() for x in data]
            except IOError:
                raise CommandError("Could not read inclusion list from '{0}'".format(options["include_file"]))

        store = modulestore()
        epoch = int(time.time())
        blob = {"epoch": epoch, "courses": []}

        for course in store.get_courses():

            course_id_string = course.id.to_deprecated_string()

            if options["single_course"]:
                if course_id_string not in [options["single_course"].strip()]:
                    continue
            elif inclusion_list:
                if not course_id_string in inclusion_list:
                    continue
            elif exclusion_list:
                if course_id_string in exclusion_list:
                    continue

            print "Processing {}".format(course_id_string)

            students = CourseEnrollment.objects.users_enrolled_in(course.id)

            course_block = {
                "id": course_id_string,
                "meta_data": {
                    "about": {"display_name": course.display_name, "media": {"course_image": course_image_url(course)}},
                    # Yes, I'm duplicating them for now, because the about section is shot.
                    "display_name": course.display_name,
                    "banner": course_image_url(course),
                    "id_org": course.org,
                    "id_number": course.number,
                    "graded": course.graded,
                    "hidden": course.visible_to_staff_only,
                    "ispublic": not (course.visible_to_staff_only or False),  # course.ispublic was removed in dogwood.
                    "grading_policy": course.grading_policy,
                    "advanced_modules": course.advanced_modules,
                    "lowest_passing_grade": course.lowest_passing_grade,
                    "start": iso_date(course.start),
                    "advertised_start": iso_date(course.advertised_start),
                    "end": iso_date(course.end),
                    "enrollment_end": iso_date(course.enrollment_end),
                    "enrollment_start": iso_date(course.enrollment_start),
                    "has_started": course.has_started(),
                    "has_ended": course.has_ended(),
                    "overview": get_detail(course.id, "overview"),
                    "short_description": get_detail(course.id, "short_description"),
                    "pre_requisite_courses": get_detail(course.id, "pre_requisite_courses"),
                    "video": get_detail(course.id, "video"),
                },
                "students": [x.username for x in students],
                "global_anonymous_id": {x.username: anonymous_id_for_user(x, None) for x in students},
                "local_anonymous_id": {x.username: anonymous_id_for_user(x, course.id) for x in students},
            }

            if not options["meta_only"]:
                blob["grading_data_epoch"] = epoch
                course_block["grading_data"] = []
                # Grab grades for all students that have ever had anything to do with the course.
                graded_students = User.objects.filter(
                    pk__in=CourseEnrollment.objects.filter(course_id=course.id).values_list("user", flat=True)
                )
                print "{0} graded students in course {1}".format(graded_students.count(), course_id_string)
                if graded_students.count():
                    for student, gradeset, error_message in iterate_grades_for(course.id, graded_students):
                        if gradeset:
                            course_block["grading_data"].append({"username": student.username, "grades": gradeset})
                        else:
                            print error_message

            blob["courses"].append(course_block)
        if options["output"]:
            # Ensure the dump is atomic.
            with tempfile.NamedTemporaryFile("w", dir=os.path.dirname(options["output"]), delete=False) as output_file:
                json.dump(blob, output_file)
                tempname = output_file.name
            os.rename(tempname, options["output"])
        else:
            print "Blob output:"
            print json.dumps(blob, indent=2, ensure_ascii=False)
コード例 #11
0
 def test_empty_student_list(self):
     """If we don't pass in any students, it should return a zero-length
     iterator, but it shouldn't error."""
     gradeset_results = list(iterate_grades_for(self.course.id, []))
     self.assertEqual(gradeset_results, [])
コード例 #12
0
ファイル: tasks_helper.py プロジェクト: 6thfdwp/edx-platform
def push_grades_to_s3(_xmodule_instance_args, _entry_id, course_id, _task_input, action_name):
    """
    For a given `course_id`, generate a grades CSV file for all students that
    are enrolled, and store using a `GradesStore`. Once created, the files can
    be accessed by instantiating another `GradesStore` (via
    `GradesStore.from_config()`) and calling `link_for()` on it. Writes are
    buffered, so we'll never write part of a CSV file to S3 -- i.e. any files
    that are visible in GradesStore will be complete ones.

    As we start to add more CSV downloads, it will probably be worthwhile to
    make a more general CSVDoc class instead of building out the rows like we
    do here.
    """
    start_time = datetime.now(UTC)
    status_interval = 100

    enrolled_students = CourseEnrollment.users_enrolled_in(course_id)
    num_total = enrolled_students.count()
    num_attempted = 0
    num_succeeded = 0
    num_failed = 0
    curr_step = "Calculating Grades"

    def update_task_progress():
        """Return a dict containing info about current task"""
        current_time = datetime.now(UTC)
        progress = {
            'action_name': action_name,
            'attempted': num_attempted,
            'succeeded': num_succeeded,
            'failed': num_failed,
            'total': num_total,
            'duration_ms': int((current_time - start_time).total_seconds() * 1000),
            'step': curr_step,
        }
        _get_current_task().update_state(state=PROGRESS, meta=progress)

        return progress

    # Loop over all our students and build our CSV lists in memory
    header = None
    rows = []
    err_rows = [["id", "username", "error_msg"]]
    for student, gradeset, err_msg in iterate_grades_for(course_id, enrolled_students):
        # Periodically update task status (this is a cache write)
        if num_attempted % status_interval == 0:
            update_task_progress()
        num_attempted += 1

        if gradeset:
            # We were able to successfully grade this student for this course.
            num_succeeded += 1
            if not header:
                # Encode the header row in utf-8 encoding in case there are unicode characters
                header = [section['label'].encode('utf-8') for section in gradeset[u'section_breakdown']]
                rows.append(["id", "email", "username", "grade"] + header)

            percents = {
                section['label']: section.get('percent', 0.0)
                for section in gradeset[u'section_breakdown']
                if 'label' in section
            }

            # Not everybody has the same gradable items. If the item is not
            # found in the user's gradeset, just assume it's a 0. The aggregated
            # grades for their sections and overall course will be calculated
            # without regard for the item they didn't have access to, so it's
            # possible for a student to have a 0.0 show up in their row but
            # still have 100% for the course.
            row_percents = [percents.get(label, 0.0) for label in header]
            rows.append([student.id, student.email, student.username, gradeset['percent']] + row_percents)
        else:
            # An empty gradeset means we failed to grade a student.
            num_failed += 1
            err_rows.append([student.id, student.username, err_msg])

    # By this point, we've got the rows we're going to stuff into our CSV files.
    curr_step = "Uploading CSVs"
    update_task_progress()

    # Generate parts of the file name
    timestamp_str = start_time.strftime("%Y-%m-%d-%H%M")
    course_id_prefix = urllib.quote(course_id.replace("/", "_"))

    # Perform the actual upload
    grades_store = GradesStore.from_config()
    grades_store.store_rows(
        course_id,
        "{}_grade_report_{}.csv".format(course_id_prefix, timestamp_str),
        rows
    )

    # If there are any error rows (don't count the header), write them out as well
    if len(err_rows) > 1:
        grades_store.store_rows(
            course_id,
            "{}_grade_report_{}_err.csv".format(course_id_prefix, timestamp_str),
            err_rows
        )

    # One last update before we close out...
    return update_task_progress()
コード例 #13
0
ファイル: tasks_helper.py プロジェクト: olexiim/edx-platform
def upload_grades_csv(_xmodule_instance_args, _entry_id, course_id, _task_input, action_name):
    """
    For a given `course_id`, generate a grades CSV file for all students that
    are enrolled, and store using a `ReportStore`. Once created, the files can
    be accessed by instantiating another `ReportStore` (via
    `ReportStore.from_config()`) and calling `link_for()` on it. Writes are
    buffered, so we'll never write part of a CSV file to S3 -- i.e. any files
    that are visible in ReportStore will be complete ones.

    As we start to add more CSV downloads, it will probably be worthwhile to
    make a more general CSVDoc class instead of building out the rows like we
    do here.
    """
    start_time = time()
    start_date = datetime.now(UTC)
    status_interval = 100
    enrolled_students = CourseEnrollment.users_enrolled_in(course_id)
    task_progress = TaskProgress(action_name, enrolled_students.count(), start_time)

    course = get_course_by_id(course_id)
    cohorts_header = ["Cohort Name"] if course.is_cohorted else []

    partition_service = LmsPartitionService(user=None, course_id=course_id)
    partitions = partition_service.course_partitions
    group_configs_header = ["Group Configuration Group Name ({})".format(partition.name) for partition in partitions]

    # Loop over all our students and build our CSV lists in memory
    header = None
    rows = []
    err_rows = [["id", "username", "error_msg"]]
    current_step = {"step": "Calculating Grades"}
    for student, gradeset, err_msg in iterate_grades_for(course_id, enrolled_students):
        # Periodically update task status (this is a cache write)
        if task_progress.attempted % status_interval == 0:
            task_progress.update_task_state(extra_meta=current_step)
        task_progress.attempted += 1

        if gradeset:
            # We were able to successfully grade this student for this course.
            task_progress.succeeded += 1
            if not header:
                header = [section["label"] for section in gradeset[u"section_breakdown"]]
                rows.append(["id", "email", "username", "grade"] + header + cohorts_header + group_configs_header)

            percents = {
                section["label"]: section.get("percent", 0.0)
                for section in gradeset[u"section_breakdown"]
                if "label" in section
            }

            cohorts_group_name = []
            if course.is_cohorted:
                group = get_cohort(student, course_id, assign=False)
                cohorts_group_name.append(group.name if group else "")

            group_configs_group_names = []
            for partition in partitions:
                group = LmsPartitionService(student, course_id).get_group(partition, assign=False)
                group_configs_group_names.append(group.name if group else "")

            # Not everybody has the same gradable items. If the item is not
            # found in the user's gradeset, just assume it's a 0. The aggregated
            # grades for their sections and overall course will be calculated
            # without regard for the item they didn't have access to, so it's
            # possible for a student to have a 0.0 show up in their row but
            # still have 100% for the course.
            row_percents = [percents.get(label, 0.0) for label in header]
            rows.append(
                [student.id, student.email, student.username, gradeset["percent"]]
                + row_percents
                + cohorts_group_name
                + group_configs_group_names
            )
        else:
            # An empty gradeset means we failed to grade a student.
            task_progress.failed += 1
            err_rows.append([student.id, student.username, err_msg])

    # By this point, we've got the rows we're going to stuff into our CSV files.
    current_step = {"step": "Uploading CSVs"}
    task_progress.update_task_state(extra_meta=current_step)

    # Perform the actual upload
    upload_csv_to_report_store(rows, "grade_report", course_id, start_date)

    # If there are any error rows (don't count the header), write them out as well
    if len(err_rows) > 1:
        upload_csv_to_report_store(err_rows, "grade_report_err", course_id, start_date)

    # One last update before we close out...
    return task_progress.update_task_state(extra_meta=current_step)
コード例 #14
0
 def test_empty_student_list(self):
     """If we don't pass in any students, it should return a zero-length
     iterator, but it shouldn't error."""
     gradeset_results = list(iterate_grades_for(self.course.id, []))
     self.assertEqual(gradeset_results, [])
コード例 #15
0
def upload_grades_csv(_xmodule_instance_args, _entry_id, course_id, _task_input, action_name):
    """
    For a given `course_id`, generate a grades CSV file for all students that
    are enrolled, and store using a `ReportStore`. Once created, the files can
    be accessed by instantiating another `ReportStore` (via
    `ReportStore.from_config()`) and calling `link_for()` on it. Writes are
    buffered, so we'll never write part of a CSV file to S3 -- i.e. any files
    that are visible in ReportStore will be complete ones.

    As we start to add more CSV downloads, it will probably be worthwhile to
    make a more general CSVDoc class instead of building out the rows like we
    do here.
    """
    start_time = time()
    start_date = datetime.now(UTC)
    status_interval = 100
    enrolled_students = CourseEnrollment.users_enrolled_in(course_id)
    task_progress = TaskProgress(action_name, enrolled_students.count(), start_time)

    # Loop over all our students and build our CSV lists in memory
    header = None
    rows = []
    err_rows = [["id", "username", "error_msg"]]
    current_step = {'step': 'Calculating Grades'}
    for student, gradeset, err_msg in iterate_grades_for(course_id, enrolled_students):
        # Periodically update task status (this is a cache write)
        if task_progress.attempted % status_interval == 0:
            task_progress.update_task_state(extra_meta=current_step)
        task_progress.attempted += 1

        if gradeset:
            # We were able to successfully grade this student for this course.
            task_progress.succeeded += 1
            if not header:
                # Encode the header row in utf-8 encoding in case there are unicode characters
                header = [section['label'].encode('utf-8') for section in gradeset[u'section_breakdown']]
                rows.append(["id", "email", "username", "grade"] + header)

            percents = {
                section['label']: section.get('percent', 0.0)
                for section in gradeset[u'section_breakdown']
                if 'label' in section
            }

            # Not everybody has the same gradable items. If the item is not
            # found in the user's gradeset, just assume it's a 0. The aggregated
            # grades for their sections and overall course will be calculated
            # without regard for the item they didn't have access to, so it's
            # possible for a student to have a 0.0 show up in their row but
            # still have 100% for the course.
            row_percents = [percents.get(label, 0.0) for label in header]
            rows.append([student.id, student.email, student.username, gradeset['percent']] + row_percents)
        else:
            # An empty gradeset means we failed to grade a student.
            task_progress.failed += 1
            err_rows.append([student.id, student.username, err_msg])

    # By this point, we've got the rows we're going to stuff into our CSV files.
    current_step = {'step': 'Uploading CSVs'}
    task_progress.update_task_state(extra_meta=current_step)

    # Perform the actual upload
    upload_csv_to_report_store(rows, 'grade_report', course_id, start_date)

    # If there are any error rows (don't count the header), write them out as well
    if len(err_rows) > 1:
        upload_csv_to_report_store(err_rows, 'grade_report_err', course_id, start_date)

    # One last update before we close out...
    return task_progress.update_task_state(extra_meta=current_step)
コード例 #16
0
ファイル: tasks_helper.py プロジェクト: HowestX/edx-platform
def upload_problem_grade_report(_xmodule_instance_args, _entry_id, course_id, _task_input, action_name):
    """
    Generate a CSV containing all students' problem grades within a given
    `course_id`.
    """
    start_time = time()
    start_date = datetime.now(UTC)
    status_interval = 100
    enrolled_students = CourseEnrollment.users_enrolled_in(course_id)
    task_progress = TaskProgress(action_name, enrolled_students.count(), start_time)

    # This struct encapsulates both the display names of each static item in the
    # header row as values as well as the django User field names of those items
    # as the keys.  It is structured in this way to keep the values related.
    header_row = OrderedDict([('id', 'Student ID'), ('email', 'Email'), ('username', 'Username')])

    try:
        course_structure = CourseStructure.objects.get(course_id=course_id)
        blocks = course_structure.ordered_blocks
        problems = _order_problems(blocks)
    except CourseStructure.DoesNotExist:
        return task_progress.update_task_state(
            extra_meta={'step': 'Generating course structure. Please refresh and try again.'}
        )

    # Just generate the static fields for now.
    rows = [list(header_row.values()) + ['Final Grade'] + list(chain.from_iterable(problems.values()))]
    error_rows = [list(header_row.values()) + ['error_msg']]
    current_step = {'step': 'Calculating Grades'}

    for student, gradeset, err_msg in iterate_grades_for(course_id, enrolled_students, keep_raw_scores=True):
        student_fields = [getattr(student, field_name) for field_name in header_row]
        task_progress.attempted += 1

        if err_msg:
            # There was an error grading this student.
            error_rows.append(student_fields + [err_msg])
            task_progress.failed += 1
            continue

        final_grade = gradeset['percent']
        # Only consider graded problems
        problem_scores = {unicode(score.module_id): score for score in gradeset['raw_scores'] if score.graded}
        earned_possible_values = list()
        for problem_id in problems:
            try:
                problem_score = problem_scores[problem_id]
                earned_possible_values.append([problem_score.earned, problem_score.possible])
            except KeyError:
                # The student has not been graded on this problem.  For example,
                # iterate_grades_for skips problems that students have never
                # seen in order to speed up report generation.  It could also be
                # the case that the student does not have access to it (e.g. A/B
                # test or cohorted courseware).
                earned_possible_values.append(['N/A', 'N/A'])
        rows.append(student_fields + [final_grade] + list(chain.from_iterable(earned_possible_values)))

        task_progress.succeeded += 1
        if task_progress.attempted % status_interval == 0:
            task_progress.update_task_state(extra_meta=current_step)

    # Perform the upload if any students have been successfully graded
    if len(rows) > 1:
        upload_csv_to_report_store(rows, 'problem_grade_report', course_id, start_date)
    # If there are any error rows, write them out as well
    if len(error_rows) > 1:
        upload_csv_to_report_store(error_rows, 'problem_grade_report_err', course_id, start_date)

    return task_progress.update_task_state(extra_meta={'step': 'Uploading CSV'})
コード例 #17
0
    def handle(self, *args, **options):
        def get_detail(course_key, attribute):
            usage_key = course_key.make_usage_key('about', attribute)
            try:
                value = modulestore().get_item(usage_key).data
            except ItemNotFoundError:
                value = None
            return value

        def iso_date(thing):
            if isinstance(thing, datetime.datetime):
                return thing.isoformat()
            return thing

        exclusion_list = []
        inclusion_list = []

        if options['exclude_file']:
            try:
                with open(options['exclude_file'], 'rb') as exclusion_file:
                    data = exclusion_file.readlines()
                exclusion_list = [x.strip() for x in data]
            except IOError:
                raise CommandError(
                    "Could not read exclusion list from '{0}'".format(
                        options['exclude_file']))

        if options['include_file']:
            try:
                with open(options['include_file'], 'rb') as inclusion_file:
                    data = inclusion_file.readlines()
                inclusion_list = [x.strip() for x in data]
            except IOError:
                raise CommandError(
                    "Could not read inclusion list from '{0}'".format(
                        options['include_file']))

        store = modulestore()
        epoch = int(time.time())
        blob = {
            'epoch': epoch,
            'courses': [],
        }

        # For course TOC we need a user and a request. Find the first superuser defined,
        # that will be our user.
        request_user = User.objects.filter(is_superuser=True).first()
        factory = RequestFactory()

        for course in store.get_courses():

            course_id_string = course.id.to_deprecated_string()

            if options['single_course']:
                if course_id_string not in [options['single_course'].strip()]:
                    continue
            elif inclusion_list:
                if not course_id_string in inclusion_list:
                    continue
            elif exclusion_list:
                if course_id_string in exclusion_list:
                    continue

            print "Processing {}".format(course_id_string)

            students = CourseEnrollment.objects.users_enrolled_in(course.id)

            # The method of getting a table of contents for a course is quite obtuse.
            # We have to go all the way to simulating a request.

            request = factory.get('/')
            request.user = request_user

            raw_blocks = get_blocks(request,
                                    store.make_course_usage_key(course.id),
                                    request_user,
                                    requested_fields=[
                                        'id', 'type', 'display_name',
                                        'children', 'lms_web_url'
                                    ])

            # We got the block structure. Now we need to massage it so we get the proper jump urls without site domain.
            # Because on the test server the site domain is wrong.
            blocks = {}
            for block_key, block in raw_blocks['blocks'].items():
                try:
                    direct_url = '/courses/' + block.get('lms_web_url').split(
                        '/courses/')[1]
                except IndexError:
                    direct_url = ''
                blocks[block_key] = {
                    'id': block.get('id', ''),
                    'display_name': block.get('display_name', ''),
                    'type': block.get('type', ''),
                    'children_ids': block.get('children', []),
                    'url': direct_url
                }

            # Then we need to recursively stitch it into a tree.
            # We're only interested in three layers of the hierarchy for now: 'course', 'chapter', 'sequential', 'vertical'.
            # Everything else is the individual blocks and problems we don't care about right now.

            INTERESTING_BLOCKS = [
                'course', 'chapter', 'sequential', 'vertical'
            ]

            def _get_children(parent):
                children = [
                    blocks.get(n) for n in parent['children_ids']
                    if blocks.get(n)
                ]  # and blocks.get(n)['type'] in INTERESTING_BLOCKS]
                for child in children:
                    child['children'] = _get_children(child)
                parent['children'] = children
                del parent['children_ids']
                return children

            block_tree = _get_children(blocks[raw_blocks['root']])

            course_block = {
                'id': course_id_string,
                'meta_data': {
                    'about': {
                        'display_name': course.display_name,
                        'media': {
                            'course_image': course_image_url(course),
                        }
                    },
                    'block_tree':
                    block_tree,
                    # Yes, I'm duplicating them for now, because the about section is shot.
                    'display_name':
                    course.display_name,
                    'banner':
                    course_image_url(course),
                    'id_org':
                    course.org,
                    'id_number':
                    course.number,
                    'graded':
                    course.graded,
                    'hidden':
                    course.visible_to_staff_only,
                    'ispublic':
                    not (course.visible_to_staff_only
                         or False),  # course.ispublic was removed in dogwood.
                    'grading_policy':
                    course.grading_policy,
                    'advanced_modules':
                    course.advanced_modules,
                    'lowest_passing_grade':
                    course.lowest_passing_grade,
                    'start':
                    iso_date(course.start),
                    'advertised_start':
                    iso_date(course.advertised_start),
                    'end':
                    iso_date(course.end),
                    'enrollment_end':
                    iso_date(course.enrollment_end),
                    'enrollment_start':
                    iso_date(course.enrollment_start),
                    'has_started':
                    course.has_started(),
                    'has_ended':
                    course.has_ended(),
                    'overview':
                    get_detail(course.id, 'overview'),
                    'short_description':
                    get_detail(course.id, 'short_description'),
                    'pre_requisite_courses':
                    get_detail(course.id, 'pre_requisite_courses'),
                    'video':
                    get_detail(course.id, 'video'),
                },
                'students': [x.username for x in students],
                'global_anonymous_id':
                {x.username: anonymous_id_for_user(x, None)
                 for x in students},
                'local_anonymous_id': {
                    x.username: anonymous_id_for_user(x, course.id)
                    for x in students
                },
            }

            if not options['meta_only']:
                blob['grading_data_epoch'] = epoch
                course_block['grading_data'] = []
                # Grab grades for all students that have ever had anything to do with the course.
                graded_students = User.objects.filter(
                    pk__in=CourseEnrollment.objects.filter(
                        course_id=course.id).values_list('user', flat=True))
                print "{0} graded students in course {1}".format(
                    graded_students.count(), course_id_string)
                if graded_students.count():
                    for student, gradeset, error_message \
                        in iterate_grades_for(course.id, graded_students):
                        if gradeset:
                            course_block['grading_data'].append({
                                'username':
                                student.username,
                                'grades':
                                gradeset,
                            })
                        else:
                            print error_message

            blob['courses'].append(course_block)
        if options['output']:
            # Ensure the dump is atomic.
            with tempfile.NamedTemporaryFile('w',
                                             dir=os.path.dirname(
                                                 options['output']),
                                             delete=False) as output_file:
                json.dump(blob, output_file, default=json_util.default)
                tempname = output_file.name
            os.rename(tempname, options['output'])
        else:
            print "Blob output:"
            print json.dumps(blob,
                             indent=2,
                             ensure_ascii=False,
                             default=json_util.default)
コード例 #18
0
ファイル: tasks_helper.py プロジェクト: Cgruppo/edx-platform
def upload_grades_csv(_xmodule_instance_args, _entry_id, course_id, _task_input, action_name):
    """
    For a given `course_id`, generate a grades CSV file for all students that
    are enrolled, and store using a `ReportStore`. Once created, the files can
    be accessed by instantiating another `ReportStore` (via
    `ReportStore.from_config()`) and calling `link_for()` on it. Writes are
    buffered, so we'll never write part of a CSV file to S3 -- i.e. any files
    that are visible in ReportStore will be complete ones.

    As we start to add more CSV downloads, it will probably be worthwhile to
    make a more general CSVDoc class instead of building out the rows like we
    do here.
    """
    start_time = time()
    start_date = datetime.now(UTC)
    status_interval = 100
    enrolled_students = CourseEnrollment.users_enrolled_in(course_id)
    task_progress = TaskProgress(action_name, enrolled_students.count(), start_time)

    fmt = u'Task: {task_id}, InstructorTask ID: {entry_id}, Course: {course_id}, Input: {task_input}'
    task_info_string = fmt.format(
        task_id=_xmodule_instance_args.get('task_id') if _xmodule_instance_args is not None else None,
        entry_id=_entry_id,
        course_id=course_id,
        task_input=_task_input
    )
    TASK_LOG.info(u'%s, Task type: %s, Starting task execution', task_info_string, action_name)

    course = get_course_by_id(course_id)
    course_is_cohorted = is_course_cohorted(course.id)
    cohorts_header = ['Cohort Name'] if course_is_cohorted else []

    experiment_partitions = get_split_user_partitions(course.user_partitions)
    group_configs_header = [u'Experiment Group ({})'.format(partition.name) for partition in experiment_partitions]

    # Loop over all our students and build our CSV lists in memory
    header = None
    rows = []
    err_rows = [["id", "username", "error_msg"]]
    current_step = {'step': 'Calculating Grades'}

    total_enrolled_students = enrolled_students.count()
    student_counter = 0
    TASK_LOG.info(
        u'%s, Task type: %s, Current step: %s, Starting grade calculation for total students: %s',
        task_info_string,
        action_name,
        current_step,
        total_enrolled_students
    )
    for student, gradeset, err_msg in iterate_grades_for(course_id, enrolled_students):
        # Periodically update task status (this is a cache write)
        if task_progress.attempted % status_interval == 0:
            task_progress.update_task_state(extra_meta=current_step)
        task_progress.attempted += 1

        # Now add a log entry after certain intervals to get a hint that task is in progress
        student_counter += 1
        if student_counter % 1000 == 0:
            TASK_LOG.info(
                u'%s, Task type: %s, Current step: %s, Grade calculation in-progress for students: %s/%s',
                task_info_string,
                action_name,
                current_step,
                student_counter,
                total_enrolled_students
            )

        if gradeset:
            # We were able to successfully grade this student for this course.
            task_progress.succeeded += 1
            if not header:
                header = [section['label'] for section in gradeset[u'section_breakdown']]
                rows.append(
                    ["id", "email", "username", "grade"] + header + cohorts_header + group_configs_header
                )

            percents = {
                section['label']: section.get('percent', 0.0)
                for section in gradeset[u'section_breakdown']
                if 'label' in section
            }

            cohorts_group_name = []
            if course_is_cohorted:
                group = get_cohort(student, course_id, assign=False)
                cohorts_group_name.append(group.name if group else '')

            group_configs_group_names = []
            for partition in experiment_partitions:
                group = LmsPartitionService(student, course_id).get_group(partition, assign=False)
                group_configs_group_names.append(group.name if group else '')

            # Not everybody has the same gradable items. If the item is not
            # found in the user's gradeset, just assume it's a 0. The aggregated
            # grades for their sections and overall course will be calculated
            # without regard for the item they didn't have access to, so it's
            # possible for a student to have a 0.0 show up in their row but
            # still have 100% for the course.
            row_percents = [percents.get(label, 0.0) for label in header]
            rows.append(
                [student.id, student.email, student.username, gradeset['percent']] +
                row_percents + cohorts_group_name + group_configs_group_names
            )
        else:
            # An empty gradeset means we failed to grade a student.
            task_progress.failed += 1
            err_rows.append([student.id, student.username, err_msg])

    TASK_LOG.info(
        u'%s, Task type: %s, Current step: %s, Grade calculation completed for students: %s/%s',
        task_info_string,
        action_name,
        current_step,
        student_counter,
        total_enrolled_students
    )

    # By this point, we've got the rows we're going to stuff into our CSV files.
    current_step = {'step': 'Uploading CSVs'}
    task_progress.update_task_state(extra_meta=current_step)
    TASK_LOG.info(u'%s, Task type: %s, Current step: %s', task_info_string, action_name, current_step)

    # Perform the actual upload
    upload_csv_to_report_store(rows, 'grade_report', course_id, start_date)

    # If there are any error rows (don't count the header), write them out as well
    if len(err_rows) > 1:
        upload_csv_to_report_store(err_rows, 'grade_report_err', course_id, start_date)

    # One last update before we close out...
    TASK_LOG.info(u'%s, Task type: %s, Finalizing grade task', task_info_string, action_name)
    return task_progress.update_task_state(extra_meta=current_step)
コード例 #19
0
def upload_grades_csv(_xmodule_instance_args, _entry_id, course_id,
                      _task_input, action_name):  # pylint: disable=too-many-statements
    """
    For a given `course_id`, generate a grades CSV file for all students that
    are enrolled, and store using a `ReportStore`. Once created, the files can
    be accessed by instantiating another `ReportStore` (via
    `ReportStore.from_config()`) and calling `link_for()` on it. Writes are
    buffered, so we'll never write part of a CSV file to S3 -- i.e. any files
    that are visible in ReportStore will be complete ones.

    As we start to add more CSV downloads, it will probably be worthwhile to
    make a more general CSVDoc class instead of building out the rows like we
    do here.
    """
    start_time = time()
    start_date = datetime.now(UTC)
    status_interval = 100
    enrolled_students = CourseEnrollment.users_enrolled_in(course_id)
    task_progress = TaskProgress(action_name, enrolled_students.count(),
                                 start_time)

    fmt = u'Task: {task_id}, InstructorTask ID: {entry_id}, Course: {course_id}, Input: {task_input}'
    task_info_string = fmt.format(
        task_id=_xmodule_instance_args.get('task_id')
        if _xmodule_instance_args is not None else None,
        entry_id=_entry_id,
        course_id=course_id,
        task_input=_task_input)
    TASK_LOG.info(u'%s, Task type: %s, Starting task execution',
                  task_info_string, action_name)

    course = get_course_by_id(course_id)
    course_is_cohorted = is_course_cohorted(course.id)
    cohorts_header = ['Cohort Name'] if course_is_cohorted else []

    experiment_partitions = get_split_user_partitions(course.user_partitions)
    group_configs_header = [
        u'Experiment Group ({})'.format(partition.name)
        for partition in experiment_partitions
    ]

    certificate_info_header = [
        'Certificate Eligible', 'Certificate Delivered', 'Certificate Type'
    ]
    certificate_whitelist = CertificateWhitelist.objects.filter(
        course_id=course_id, whitelist=True)
    whitelisted_user_ids = [entry.user_id for entry in certificate_whitelist]

    # Loop over all our students and build our CSV lists in memory
    header = None
    rows = []
    err_rows = [["id", "username", "error_msg"]]
    current_step = {'step': 'Calculating Grades'}

    total_enrolled_students = enrolled_students.count()
    student_counter = 0
    TASK_LOG.info(
        u'%s, Task type: %s, Current step: %s, Starting grade calculation for total students: %s',
        task_info_string, action_name, current_step, total_enrolled_students)
    for student, gradeset, err_msg in iterate_grades_for(
            course_id, enrolled_students):
        # Periodically update task status (this is a cache write)
        if task_progress.attempted % status_interval == 0:
            task_progress.update_task_state(extra_meta=current_step)
        task_progress.attempted += 1

        # Now add a log entry after each student is graded to get a sense
        # of the task's progress
        student_counter += 1
        TASK_LOG.info(
            u'%s, Task type: %s, Current step: %s, Grade calculation in-progress for students: %s/%s',
            task_info_string, action_name, current_step, student_counter,
            total_enrolled_students)

        if gradeset:
            # We were able to successfully grade this student for this course.
            task_progress.succeeded += 1
            if not header:
                header = [
                    section['label']
                    for section in gradeset[u'section_breakdown']
                ]
                rows.append(["id", "email", "username", "grade"] + header +
                            cohorts_header + group_configs_header +
                            ['Enrollment Track', 'Verification Status'] +
                            certificate_info_header)

            percents = {
                section['label']: section.get('percent', 0.0)
                for section in gradeset[u'section_breakdown']
                if 'label' in section
            }

            cohorts_group_name = []
            if course_is_cohorted:
                group = get_cohort(student, course_id, assign=False)
                cohorts_group_name.append(group.name if group else '')

            group_configs_group_names = []
            for partition in experiment_partitions:
                group = LmsPartitionService(student,
                                            course_id).get_group(partition,
                                                                 assign=False)
                group_configs_group_names.append(group.name if group else '')

            enrollment_mode = CourseEnrollment.enrollment_mode_for_user(
                student, course_id)[0]
            verification_status = SoftwareSecurePhotoVerification.verification_status_for_user(
                student, course_id, enrollment_mode)
            certificate_info = certificate_info_for_user(
                student, course_id, gradeset['grade'], student.id
                in whitelisted_user_ids)

            # Not everybody has the same gradable items. If the item is not
            # found in the user's gradeset, just assume it's a 0. The aggregated
            # grades for their sections and overall course will be calculated
            # without regard for the item they didn't have access to, so it's
            # possible for a student to have a 0.0 show up in their row but
            # still have 100% for the course.
            row_percents = [percents.get(label, 0.0) for label in header]
            rows.append([
                student.id, student.email, student.username,
                gradeset['percent']
            ] + row_percents + cohorts_group_name + group_configs_group_names +
                        [enrollment_mode] + [verification_status] +
                        certificate_info)
        else:
            # An empty gradeset means we failed to grade a student.
            task_progress.failed += 1
            err_rows.append([student.id, student.username, err_msg])

    TASK_LOG.info(
        u'%s, Task type: %s, Current step: %s, Grade calculation completed for students: %s/%s',
        task_info_string, action_name, current_step, student_counter,
        total_enrolled_students)

    # By this point, we've got the rows we're going to stuff into our CSV files.
    current_step = {'step': 'Uploading CSVs'}
    task_progress.update_task_state(extra_meta=current_step)
    TASK_LOG.info(u'%s, Task type: %s, Current step: %s', task_info_string,
                  action_name, current_step)

    # Perform the actual upload
    upload_csv_to_report_store(rows, 'grade_report', course_id, start_date)

    # If there are any error rows (don't count the header), write them out as well
    if len(err_rows) > 1:
        upload_csv_to_report_store(err_rows, 'grade_report_err', course_id,
                                   start_date)

    # One last update before we close out...
    TASK_LOG.info(u'%s, Task type: %s, Finalizing grade task',
                  task_info_string, action_name)
    return task_progress.update_task_state(extra_meta=current_step)
コード例 #20
0
def push_grades_to_s3(_xmodule_instance_args, _entry_id, course_id,
                      _task_input, action_name):
    """
    For a given `course_id`, generate a grades CSV file for all students that
    are enrolled, and store using a `ReportStore`. Once created, the files can
    be accessed by instantiating another `ReportStore` (via
    `ReportStore.from_config()`) and calling `link_for()` on it. Writes are
    buffered, so we'll never write part of a CSV file to S3 -- i.e. any files
    that are visible in ReportStore will be complete ones.

    As we start to add more CSV downloads, it will probably be worthwhile to
    make a more general CSVDoc class instead of building out the rows like we
    do here.
    """
    start_time = datetime.now(UTC)
    status_interval = 100

    enrolled_students = CourseEnrollment.users_enrolled_in(course_id)
    num_total = enrolled_students.count()
    num_attempted = 0
    num_succeeded = 0
    num_failed = 0
    curr_step = "Calculating Grades"

    def update_task_progress():
        """Return a dict containing info about current task"""
        current_time = datetime.now(UTC)
        progress = {
            'action_name': action_name,
            'attempted': num_attempted,
            'succeeded': num_succeeded,
            'failed': num_failed,
            'total': num_total,
            'duration_ms': int(
                (current_time - start_time).total_seconds() * 1000),
            'step': curr_step,
        }
        _get_current_task().update_state(state=PROGRESS, meta=progress)

        return progress

    # Loop over all our students and build our CSV lists in memory
    header = None
    rows = []
    err_rows = [["id", "username", "error_msg"]]
    for student, gradeset, err_msg in iterate_grades_for(
            course_id, enrolled_students):
        # Periodically update task status (this is a cache write)
        if num_attempted % status_interval == 0:
            update_task_progress()
        num_attempted += 1

        if gradeset:
            # We were able to successfully grade this student for this course.
            num_succeeded += 1
            if not header:
                # Encode the header row in utf-8 encoding in case there are unicode characters
                header = [
                    section['label'].encode('utf-8')
                    for section in gradeset[u'section_breakdown']
                ]
                rows.append(["id", "email", "username", "grade"] + header)

            percents = {
                section['label']: section.get('percent', 0.0)
                for section in gradeset[u'section_breakdown']
                if 'label' in section
            }

            # Not everybody has the same gradable items. If the item is not
            # found in the user's gradeset, just assume it's a 0. The aggregated
            # grades for their sections and overall course will be calculated
            # without regard for the item they didn't have access to, so it's
            # possible for a student to have a 0.0 show up in their row but
            # still have 100% for the course.
            row_percents = [percents.get(label, 0.0) for label in header]
            rows.append([
                student.id,
                student.email.encode('utf-8'), student.username,
                gradeset['percent']
            ] + row_percents)
        else:
            # An empty gradeset means we failed to grade a student.
            num_failed += 1
            err_rows.append([student.id, student.username, err_msg])

    # By this point, we've got the rows we're going to stuff into our CSV files.
    curr_step = "Uploading CSVs"
    update_task_progress()

    # Generate parts of the file name
    timestamp_str = start_time.strftime("%Y-%m-%d-%H%M")
    course_id_prefix = urllib.quote(course_id.to_deprecated_string().replace(
        "/", "_"))

    # Perform the actual upload
    report_store = ReportStore.from_config()
    report_store.store_rows(
        course_id, u"{}_grade_report_{}.csv".format(course_id_prefix,
                                                    timestamp_str), rows)

    # If there are any error rows (don't count the header), write them out as well
    if len(err_rows) > 1:
        report_store.store_rows(
            course_id,
            u"{}_grade_report_{}_err.csv".format(course_id_prefix,
                                                 timestamp_str), err_rows)

    # One last update before we close out...
    return update_task_progress()
コード例 #21
0
    def handle(self, *args, **options):

        def get_detail(course_key, attribute):
            usage_key = course_key.make_usage_key('about', attribute)
            try:
                value = modulestore().get_item(usage_key).data
            except ItemNotFoundError:
                value = None
            return value

        def iso_date(thing):
            if isinstance(thing, datetime.datetime):
                return thing.isoformat()
            return thing

        exclusion_list = []
        inclusion_list = []

        if options['exclude_file']:
            try:
                with open(options['exclude_file'],'rb') as exclusion_file:
                    data = exclusion_file.readlines()
                exclusion_list = [x.strip() for x in data]
            except IOError:
                raise CommandError("Could not read exclusion list from '{0}'".format(options['exclude_file']))

        if options['include_file']:
            try:
                with open(options['include_file'],'rb') as inclusion_file:
                    data = inclusion_file.readlines()
                inclusion_list = [x.strip() for x in data]
            except IOError:
                raise CommandError("Could not read inclusion list from '{0}'".format(options['include_file']))

        store = modulestore()
        epoch = int(time.time())
        blob = {
            'epoch': epoch,
            'courses': [],
        }

        # For course TOC we need a user and a request. Find the first superuser defined,
        # that will be our user.
        request_user = User.objects.filter(is_superuser=True).first()
        factory = RequestFactory()

        for course in store.get_courses():

            course_id_string = course.id.to_deprecated_string()

            if options['single_course']:
                if course_id_string not in [options['single_course'].strip()]:
                    continue
            elif inclusion_list:
                if not course_id_string in inclusion_list:
                    continue
            elif exclusion_list:
                if course_id_string in exclusion_list:
                    continue

            print "Processing {}".format(course_id_string)

            students = CourseEnrollment.objects.users_enrolled_in(course.id)

            # The method of getting a table of contents for a course is quite obtuse.
            # We have to go all the way to simulating a request.

            request = factory.get('/')
            request.user = request_user

            raw_blocks = get_blocks(request, store.make_course_usage_key(course.id), request_user, 
                                requested_fields=['id', 'type', 'display_name', 'children', 'lms_web_url'])

            # We got the block structure. Now we need to massage it so we get the proper jump urls without site domain.
            # Because on the test server the site domain is wrong.
            blocks = {}
            for block_key, block in raw_blocks['blocks'].items():
                try:
                    direct_url = '/courses/' + block.get('lms_web_url').split('/courses/')[1]
                except IndexError:
                    direct_url = ''
                blocks[block_key] = {
                    'id': block.get('id', ''),
                    'display_name': block.get('display_name', ''),
                    'type': block.get('type', ''),
                    'children_ids': block.get('children', []),
                    'url': direct_url
                }

            # Then we need to recursively stitch it into a tree.
            # We're only interested in three layers of the hierarchy for now: 'course', 'chapter', 'sequential', 'vertical'.
            # Everything else is the individual blocks and problems we don't care about right now.

            INTERESTING_BLOCKS = ['course', 'chapter', 'sequential', 'vertical']

            def _get_children(parent):
                children = [blocks.get(n) for n in parent['children_ids'] if blocks.get(n)] # and blocks.get(n)['type'] in INTERESTING_BLOCKS]
                for child in children:
                    child['children'] = _get_children(child)
                parent['children'] = children
                del parent['children_ids']
                return children

            block_tree = _get_children(blocks[raw_blocks['root']])

            course_block = {
              'id': course_id_string,
              'meta_data': {
                'about': {
                    'display_name': course.display_name,
                    'media': {
                        'course_image': course_image_url(course),
                    }
                },
                'block_tree': block_tree,
                # Yes, I'm duplicating them for now, because the about section is shot.
                'display_name': course.display_name,
                'banner': course_image_url(course),
                'id_org': course.org,
                'id_number': course.number,
                'graded': course.graded,
                'hidden': course.visible_to_staff_only,
                'ispublic': not (course.visible_to_staff_only or False), # course.ispublic was removed in dogwood.
                'grading_policy': course.grading_policy,
                'advanced_modules': course.advanced_modules,
                'lowest_passing_grade': course.lowest_passing_grade,
                'start': iso_date(course.start),
                'advertised_start': iso_date(course.advertised_start),
                'end': iso_date(course.end),
                'enrollment_end': iso_date(course.enrollment_end),
                'enrollment_start': iso_date(course.enrollment_start),
                'has_started': course.has_started(),
                'has_ended': course.has_ended(),
                'overview': get_detail(course.id,'overview'),
                'short_description': get_detail(course.id,'short_description'),
                'pre_requisite_courses': get_detail(course.id,'pre_requisite_courses'),
                'video': get_detail(course.id,'video'),
              },
              'students': [x.username for x in students],
              'global_anonymous_id': { x.username:anonymous_id_for_user(x, None) for x in students },
              'local_anonymous_id': { x.username:anonymous_id_for_user(x, course.id) for x in students },
            }

            if not options['meta_only']:
                blob['grading_data_epoch'] = epoch
                course_block['grading_data'] = []
                # Grab grades for all students that have ever had anything to do with the course.
                graded_students = User.objects.filter(pk__in=CourseEnrollment.objects.filter(course_id=course.id).values_list('user',flat=True))
                print "{0} graded students in course {1}".format(graded_students.count(),course_id_string)
                if graded_students.count():
                    for student, gradeset, error_message \
                        in iterate_grades_for(course.id, graded_students):
                        if gradeset:
                            course_block['grading_data'].append({
                                'username': student.username,
                                'grades': gradeset,
                            })
                        else:
                            print error_message

            blob['courses'].append(course_block)
        if options['output']:
            # Ensure the dump is atomic.
            with tempfile.NamedTemporaryFile('w', dir=os.path.dirname(options['output']), delete=False) as output_file:
                json.dump(blob, output_file, default=json_util.default)
                tempname = output_file.name
            os.rename(tempname, options['output'])
        else:
            print "Blob output:"
            print json.dumps(blob, indent=2, ensure_ascii=False, default=json_util.default)
コード例 #22
0
ファイル: tests.py プロジェクト: ovnicraft/edx-gea
 def edx_grade_students(self, users):
     return iterate_grades_for(self.course, users)