Пример #1
0
def push_student_responses_to_s3(_xmodule_instance_args, _entry_id, course_id,
                                 _task_input, action_name):
    """
    For a given `course_id`, generate a responses CSV file for students that
    have submitted problem responses, and store using a `ReportStore`. Once
    created, the files can be accessed by instantiating another `ReportStore` (via
    `ReportStore.from_config()`) and calling `link_for()` on it. Writes are
    buffered, so we'll never write part of a CSV file to S3 -- i.e. any files
    that are visible in ReportStore will be complete ones.
    """
    start_time = datetime.now(UTC)
    try:
        course = get_course_by_id(course_id)
    except ValueError as e:
        TASK_LOG.error(e.message)
        return "failed"
    rows = student_response_rows(course)
    # Generate parts of the file name
    timestamp_str = start_time.strftime("%Y-%m-%d-%H%M")
    course_id_prefix = course_filename_prefix_generator(course_id)
    # Perform the actual upload
    report_store = ReportStore.from_config(config_name='GRADES_DOWNLOAD')
    report_store.store_rows(
        course_id,
        u"{course_id_prefix}_responses_report_{timestamp_str}.csv".format(
            course_id_prefix=course_id_prefix,
            timestamp_str=timestamp_str,
        ), rows)
    return "succeeded"
Пример #2
0
def push_student_responses_to_s3(_xmodule_instance_args, _entry_id, course_id, _task_input, action_name):
    """
    For a given `course_id`, generate a responses CSV file for students that
    have submitted problem responses, and store using a `ReportStore`. Once
    created, the files can be accessed by instantiating another `ReportStore` (via
    `ReportStore.from_config()`) and calling `link_for()` on it. Writes are
    buffered, so we'll never write part of a CSV file to S3 -- i.e. any files
    that are visible in ReportStore will be complete ones.
    """
    start_time = datetime.now(UTC)
    try:
        course = get_course_by_id(course_id)
    except ValueError as e:
        TASK_LOG.error(e.message)
        return "failed"
    rows = student_response_rows(course)
    # Generate parts of the file name
    timestamp_str = start_time.strftime("%Y-%m-%d-%H%M")
    course_id_prefix = course_filename_prefix_generator(course_id)
    # Perform the actual upload
    report_store = ReportStore.from_config(config_name='GRADES_DOWNLOAD')
    report_store.store_rows(
        course_id,
        u"{course_id_prefix}_responses_report_{timestamp_str}.csv".format(
            course_id_prefix=course_id_prefix,
            timestamp_str=timestamp_str,
        ),
        rows
    )
    return "succeeded"
Пример #3
0
def _upload_exec_summary_to_store(data_dict,
                                  report_name,
                                  course_id,
                                  generated_at,
                                  config_name='FINANCIAL_REPORTS'):
    """
    Upload Executive Summary Html file using ReportStore.

    Arguments:
        data_dict: containing executive report data.
        report_name: Name of the resulting Html File.
        course_id: ID of the course
    """
    report_store = ReportStore.from_config(config_name)

    # Use the data dict and html template to generate the output buffer
    output_buffer = StringIO(
        render_to_string(
            "instructor/instructor_dashboard_2/executive_summary.html",
            data_dict))

    report_store.store(
        course_id,
        u"{course_prefix}_{report_name}_{timestamp_str}.html".format(
            course_prefix=course_filename_prefix_generator(course_id),
            report_name=report_name,
            timestamp_str=generated_at.strftime("%Y-%m-%d-%H%M")),
        output_buffer,
    )
    tracker_emit(report_name)
Пример #4
0
 def test_gradeucursos_post_from_instructor_tab_is_resumen(self):
     """
         Test gradeucursos post from instructor tab normal process with is_resumen params
     """
     self.skipTest("disabled temporarily")
     with mock_get_score(1, 2):
         self.grade_factory.update(self.student, self.course, force_update_subsections=True)
         self.grade_factory.update(self.student_2, self.course, force_update_subsections=True)
     try:
         from uchileedxlogin.models import EdxLoginUser
         EdxLoginUser.objects.create(user=self.student, run='09472337K')
     except ImportError:
         self.skipTest("import error uchileedxlogin")
     task_input = {
         'grade_type': 'seven_scale',
         'course_id': str(self.course.id),
         'instructor_tab': True,
         'assig_type': 'Homework',
         'is_resumen': True
     }
     with patch('lms.djangoapps.instructor_task.tasks_helper.runner._get_current_task'):
         result = task_get_data(
             None, None, self.course.id,
             task_input, 'EOL_GRADE_UCURSOS'
         )
     report_store = ReportStore.from_config(config_name='GRADES_DOWNLOAD')
     report_csv_filename = report_store.links_for(self.course.id)[0][0]
     report_path = report_store.path_to(self.course.id, report_csv_filename)
     self.assertTrue('_notas_estudiantes_' in report_csv_filename)
     self.assertTrue('_notas_estudiantes_' in report_path)
Пример #5
0
    def verify_rows_in_csv(self, expected_rows, file_index=0, verify_order=True, ignore_other_columns=False):
        """
        Verify that the last ReportStore CSV contains the expected content.

        Arguments:
            expected_rows (iterable): An iterable of dictionaries,
                where each dict represents a row of data in the last
                ReportStore CSV.  Each dict maps keys from the CSV
                header to values in that row's corresponding cell.
            file_index (int): Describes which report store file to
                open.  Files are ordered by last modified date, and 0
                corresponds to the most recently modified file.
            verify_order (boolean): When True (default), we verify that
                both the content and order of `expected_rows` matches
                the actual csv rows.  When False, we only verify that
                the content matches.
            ignore_other_columns (boolean): When True, we verify that `expected_rows`
                contain data which is the subset of actual csv rows.
        """
        report_store = ReportStore.from_config(config_name='GRADES_DOWNLOAD')
        report_csv_filename = report_store.links_for(self.course.id)[file_index][0]
        report_path = report_store.path_to(self.course.id, report_csv_filename)
        with report_store.storage.open(report_path) as csv_file:
            # Expand the dict reader generator so we don't lose it's content
            csv_rows = [row for row in unicodecsv.DictReader(csv_file)]

            if ignore_other_columns:
                csv_rows = [
                    {key: row.get(key) for key in expected_rows[index].keys()} for index, row in enumerate(csv_rows)
                ]

            if verify_order:
                self.assertEqual(csv_rows, expected_rows)
            else:
                self.assertItemsEqual(csv_rows, expected_rows)
Пример #6
0
 def test_xblockcompletion_get_all_data_no_responses(self, report):
     """
         Test xblockcompletion view all data when xblock dont have responses yet
     """
     generated_report_data = {}               
     report.return_value = generated_report_data
     data = {'format': False, 'course': str(self.course.id), 'base_url':'this_is_a_url'}
     task_input = {'data': data }
     with patch('lms.djangoapps.instructor_task.tasks_helper.runner._get_current_task'):
         result = generate(
             None, None, self.course.id,
             task_input, 'EOL_Xblock_Completion'
         )
     report_store = ReportStore.from_config(config_name='GRADES_DOWNLOAD')
     header_row = ",".join(['Titulo', 'Username', 'Email', 'Run', 'Seccion', 'SubSeccion', 'Unidad', 'Pregunta', 'Respuesta Estudiante', 'Resp. Correcta', 'Intentos', 'Pts Ganados', 'Pts Posibles', 'Pts Total Componente', 'Url', 'block_id'])
     base_student_row = ",".join([
         self.items[0].display_name,
         self.student.username,
         self.student.email,
         '',
         '1.' + self.chapter.display_name,
         '1.1.' + self.section.display_name,
         '1.1.1.' + self.subsection.display_name
     ])
     report_csv_filename = report_store.links_for(self.course.id)[0][0]
     report_path = report_store.path_to(self.course.id, report_csv_filename)
     with report_store.storage.open(report_path) as csv_file:
         csv_file_data = csv_file.read()
         # Removing unicode signature (BOM) from the beginning
         csv_file_data = csv_file_data.decode("utf-8-sig")
         self.assertIn(header_row, csv_file_data)
         self.assertFalse(base_student_row in csv_file_data)
Пример #7
0
    def verify_rows_in_csv(self, expected_rows, file_index=0, verify_order=True, ignore_other_columns=False):
        """
        Verify that the last ReportStore CSV contains the expected content.

        Arguments:
            expected_rows (iterable): An iterable of dictionaries,
                where each dict represents a row of data in the last
                ReportStore CSV.  Each dict maps keys from the CSV
                header to values in that row's corresponding cell.
            file_index (int): Describes which report store file to
                open.  Files are ordered by last modified date, and 0
                corresponds to the most recently modified file.
            verify_order (boolean): When True (default), we verify that
                both the content and order of `expected_rows` matches
                the actual csv rows.  When False, we only verify that
                the content matches.
            ignore_other_columns (boolean): When True, we verify that `expected_rows`
                contain data which is the subset of actual csv rows.
        """
        report_store = ReportStore.from_config(config_name='GRADES_DOWNLOAD')
        report_csv_filename = report_store.links_for(self.course.id)[file_index][0]
        report_path = report_store.path_to(self.course.id, report_csv_filename)
        with report_store.storage.open(report_path) as csv_file:
            # Expand the dict reader generator so we don't lose it's content
            csv_rows = [row for row in unicodecsv.DictReader(csv_file, encoding='utf-8-sig')]

            if ignore_other_columns:
                csv_rows = [
                    {key: row.get(key) for key in expected_rows[index].keys()} for index, row in enumerate(csv_rows)
                ]

            if verify_order:
                self.assertEqual(csv_rows, expected_rows)
            else:
                self.assertItemsEqual(csv_rows, expected_rows)
Пример #8
0
 def test_xblockcompletion_get_resumen(self):
     """
     test to generate course survey report
     and then test the report authenticity.
     """
     from lms.djangoapps.courseware.models import StudentModule
     data = {'format': True, 'course': str(self.course.id), 'base_url':'this_is_a_url'}
     task_input = {'data': data }
     module = StudentModule(
         module_state_key=self.items[0].location,
         student=self.student,
         course_id=self.course.id,
         module_type='problem',
         state='{"score": {"raw_earned": 0, "raw_possible": 3}, "seed": 1, "attempts":1}')
     module.save()
     with patch('lms.djangoapps.instructor_task.tasks_helper.runner._get_current_task'):
         result = generate(
             None, None, self.course.id,
             task_input, 'EOL_Xblock_Completion'
         )
     report_store = ReportStore.from_config(config_name='GRADES_DOWNLOAD')
     header_row = ",".join(['Titulo', 'Username', 'Email', 'Run', 'Seccion', 'SubSeccion', 'Unidad', 'Intentos', 'Pts Ganados', 'Pts Posibles', 'Url', 'block_id'])
     student1_row = ",".join([
         self.items[0].display_name,
         self.student.username,
         self.student.email,
         '',
         '1.' + self.chapter.display_name,
         '1.1.' + self.section.display_name,
         '1.1.1.' + self.subsection.display_name,
         '1','0','3'
     ])
     expected_data = [header_row, student1_row]
     self._verify_csv_file_report(report_store, expected_data)
Пример #9
0
def upload_csv_to_report_store(rows, csv_name, course_id, timestamp, config_name='GRADES_DOWNLOAD'):
    """
    Upload data as a CSV using ReportStore.

    Arguments:
        rows: CSV data in the following format (first column may be a
            header):
            [
                [row1_colum1, row1_colum2, ...],
                ...
            ]
        csv_name: Name of the resulting CSV
        course_id: ID of the course

    Returns:
        report_name: string - Name of the generated report
    """
    report_store = ReportStore.from_config(config_name)
    report_name = u"{course_prefix}_{csv_name}_{timestamp_str}.csv".format(
        course_prefix=course_filename_prefix_generator(course_id),
        csv_name=csv_name,
        timestamp_str=timestamp.strftime("%Y-%m-%d-%H%M")
    )

    report_store.store_rows(course_id, report_name, rows)
    tracker_emit(csv_name)
    return report_name
Пример #10
0
def upload_csv_to_report_store(rows, csv_name, course_id, timestamp, config_name='GRADES_DOWNLOAD'):
    """
    Upload data as a CSV using ReportStore.

    Arguments:
        rows: CSV data in the following format (first column may be a
            header):
            [
                [row1_colum1, row1_colum2, ...],
                ...
            ]
        csv_name: Name of the resulting CSV
        course_id: ID of the course
    """
    report_store = ReportStore.from_config(config_name)
    report_store.store_rows(
        course_id,
        u"{course_prefix}_{csv_name}_{timestamp_str}.csv".format(
            course_prefix=course_filename_prefix_generator(course_id),
            csv_name=csv_name,
            timestamp_str=timestamp.strftime("%Y-%m-%d-%H%M")
        ),
        rows
    )
    tracker_emit(csv_name)
Пример #11
0
def upload_csv_to_report_store(rows,
                               csv_name,
                               course_id,
                               timestamp,
                               config_name='GRADES_DOWNLOAD',
                               parent_dir=''):
    """
    Upload data as a CSV using ReportStore.

    Arguments:
        rows: CSV data in the following format (first column may be a
            header):
            [
                [row1_colum1, row1_colum2, ...],
                ...
            ]
        csv_name: Name of the resulting CSV
        course_id: ID of the course
        parent_dor: Name of the directory where the CSV file will be stored

    Returns:
        report_name: string - Name of the generated report
    """
    report_store = ReportStore.from_config(config_name)
    report_name = "{course_prefix}_{csv_name}_{timestamp_str}.csv".format(
        course_prefix=course_filename_prefix_generator(course_id),
        csv_name=csv_name,
        timestamp_str=timestamp.strftime("%Y-%m-%d-%H%M"))

    report_store.store_rows(course_id, report_name, rows, parent_dir)
    tracker_emit(csv_name)
    return report_name
Пример #12
0
 def test_gradeucursos_post_from_instructor_tab_grade_cutoff_not_defined_in_report(self, grade):
     """
         Test gradeucursos post from instructor tab when grade cutoff is not defined
     """
     grade.return_value = None
     try:
         from uchileedxlogin.models import EdxLoginUser
         EdxLoginUser.objects.create(user=self.student, run='09472337K')
     except ImportError:
         self.skipTest("import error uchileedxlogin")
     task_input = {
         'grade_type': 'seven_scale',
         'course_id': str(self.course.id),
         'instructor_tab': True,
         'assig_type': 'gradeucursos_total',
         'is_resumen': False
     }
     with patch('lms.djangoapps.instructor_task.tasks_helper.runner._get_current_task'):
         result = task_get_data(
             None, None, self.course.id,
             task_input, 'EOL_GRADE_UCURSOS'
         )
     report_store = ReportStore.from_config(config_name='GRADES_DOWNLOAD')
     report_csv_filename = report_store.links_for(self.course.id)[0][0]
     report_path = report_store.path_to(self.course.id, report_csv_filename)
     self.assertTrue('_Error_notas_estudiantes_' in report_csv_filename)
     self.assertTrue('_Error_notas_estudiantes_' in report_path)
Пример #13
0
def graph_course_forums_usage(request, course_id):
    """
    Generate a d3 graphable csv-string by checking the report store for the clicked_on file
    """
    clicked_text = request.POST.get('clicked_on')
    course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
    report_store = ReportStore.from_config(config_name='GRADES_DOWNLOAD')
    graph = None
    if clicked_text:
        for name, url in report_store.links_for(course_key):
            if clicked_text in name and 'course_forums' in name:
                request = urllib2.Request(url)
                request.add_header('Accept-encoding', 'gzip')
                try:
                    url_handle = urllib2.urlopen(request)
                    if url_handle.info().get('Content-Encoding') == 'gzip':
                        file_buffer = StringIO.StringIO(url_handle.read())
                        url_handle = gzip.GzipFile(fileobj=file_buffer)
                    graph = generate_course_forums_d3(url_handle)
                except Exception as error:
                    LOG.error(
                        "Error opening graph_course_forums_usage data report: %s",
                        error,
                    )
                break
    if graph:
        response = JsonResponse({
            'data': graph,
            'filename': clicked_text,
        })
    else:
        response = JsonResponse({
            'data': 'failure',
        })
    return response
Пример #14
0
 def create_report_store(self):
     """
     Create and return a DjangoStorageReportStore using the old
     S3ReportStore configuration.
     """
     connection = boto.connect_s3()
     connection.create_bucket(settings.GRADES_DOWNLOAD['BUCKET'])
     return ReportStore.from_config(config_name='GRADES_DOWNLOAD')
Пример #15
0
 def create_report_store(self):
     """
     Create and return a DjangoStorageReportStore using the old
     S3ReportStore configuration.
     """
     connection = boto.connect_s3()
     connection.create_bucket(settings.GRADES_DOWNLOAD['BUCKET'])
     return ReportStore.from_config(config_name='GRADES_DOWNLOAD')
Пример #16
0
 def create_report_store(self):
     """
     Create and return a DjangoStorageReportStore configured to use the
     local filesystem for storage.
     """
     test_settings = copy.deepcopy(settings.GRADES_DOWNLOAD)
     test_settings['STORAGE_KWARGS'] = {'location': settings.GRADES_DOWNLOAD['ROOT_PATH']}
     with override_settings(GRADES_DOWNLOAD=test_settings):
         return ReportStore.from_config(config_name='GRADES_DOWNLOAD')
 def download_url_for_last_report(self):
     """ Get the URL for the last report, if any """
     # Unfortunately this is a bit inefficient due to the ReportStore API
     if not self.last_export_result or self.last_export_result['error'] is not None:
         return None
     from lms.djangoapps.instructor_task.models import ReportStore
     report_store = ReportStore.from_config(config_name='GRADES_DOWNLOAD')
     course_key = getattr(self.scope_ids.usage_id, 'course_key', None)
     return dict(report_store.links_for(course_key)).get(self.last_export_result['report_filename'])
Пример #18
0
 def download_url_for_last_report(self):
     """ Get the URL for the last report, if any """
     # Unfortunately this is a bit inefficient due to the ReportStore API
     if not self.last_export_result or self.last_export_result['error'] is not None:
         return None
     from lms.djangoapps.instructor_task.models import ReportStore
     report_store = ReportStore.from_config(config_name='GRADES_DOWNLOAD')
     course_key = getattr(self.scope_ids.usage_id, 'course_key', None)
     return dict(report_store.links_for(course_key)).get(self.last_export_result['report_filename'])
Пример #19
0
 def create_report_store(self):
     """
     Create and return a DjangoStorageReportStore configured to use the
     local filesystem for storage.
     """
     test_settings = copy.deepcopy(settings.GRADES_DOWNLOAD)
     test_settings['STORAGE_KWARGS'] = {'location': settings.GRADES_DOWNLOAD['ROOT_PATH']}
     with override_settings(GRADES_DOWNLOAD=test_settings):
         return ReportStore.from_config(config_name='GRADES_DOWNLOAD')
Пример #20
0
 def get_csv_row_with_headers(self):
     """
     Helper function to return list with the column names from the CSV file (the first row)
     """
     report_store = ReportStore.from_config(config_name='GRADES_DOWNLOAD')
     report_csv_filename = report_store.links_for(self.course.id)[0][0]
     report_path = report_store.path_to(self.course.id, report_csv_filename)
     with report_store.storage.open(report_path) as csv_file:
         rows = unicodecsv.reader(csv_file, encoding='utf-8-sig')
         return next(rows)
Пример #21
0
 def get_csv_row_with_headers(self):
     """
     Helper function to return list with the column names from the CSV file (the first row)
     """
     report_store = ReportStore.from_config(config_name='GRADES_DOWNLOAD')
     report_csv_filename = report_store.links_for(self.course.id)[0][0]
     report_path = report_store.path_to(self.course.id, report_csv_filename)
     with report_store.storage.open(report_path) as csv_file:
         rows = unicodecsv.reader(csv_file, encoding='utf-8')
         return rows.next()
Пример #22
0
def delete_report_download(request, course_id):
    """
    Delete a downloaded report from the Instructor Dashboard
    """
    course_id = SlashSeparatedCourseKey.from_string(course_id)
    filename = request.POST.get('filename')
    report_store = ReportStore.from_config(config_name='GRADES_DOWNLOAD')
    report_store.delete_file(course_id, filename)
    message = {
        'status': _('The report was successfully deleted!'),
    }
    return JsonResponse(message)
Пример #23
0
 def test_delete_report(self):
     report_store = ReportStore.from_config(config_name='GRADES_DOWNLOAD')
     task_input = {'features': []}
     links = report_store.links_for(self.course.id)
     self.assertEquals(len(links), 0)
     with patch('lms.djangoapps.instructor_task.tasks_helper._get_current_task'):
         upload_students_csv(None, None, self.course.id, task_input, 'calculated')
     links = report_store.links_for(self.course.id)
     self.assertEquals(len(links), 1)
     filename = links[0][0]
     report_store.delete_file(self.course.id, filename)
     links = report_store.links_for(self.course.id)
     self.assertEquals(len(links), 0)
Пример #24
0
 def save_archive(self,
                  course_id,
                  filename,
                  timestamp,
                  config_name='GRADES_DOWNLOAD'):
     report_store = ReportStore.from_config(config_name)
     zip_file_name = u"{filename}_{course}_{timestamp_str}.zip".format(
         filename=filename,
         course=get_valid_filename(
             unicode("_").join(
                 [course_id.org, course_id.course, course_id.run])),
         timestamp_str=timestamp.strftime("%Y-%m-%d-%H%M"))
     report_store.store(course_id, zip_file_name, self.archive.read())
Пример #25
0
 def test_delete_report(self):
     report_store = ReportStore.from_config(config_name='GRADES_DOWNLOAD')
     task_input = {'features': []}
     links = report_store.links_for(self.course.id)
     self.assertEquals(len(links), 0)
     with patch('lms.djangoapps.instructor_task.tasks_helper.runner._get_current_task'):
         upload_students_csv(None, None, self.course.id, task_input, 'calculated')
     links = report_store.links_for(self.course.id)
     self.assertEquals(len(links), 1)
     filename = links[0][0]
     report_store.delete_file(self.course.id, filename)
     links = report_store.links_for(self.course.id)
     self.assertEquals(len(links), 0)
Пример #26
0
 def test_financial_report_overrides(self):
     """
     Test that CUSTOM_DOMAIN from FINANCIAL_REPORTS is used to construct file url. instead of domain defined via
     AWS_S3_CUSTOM_DOMAIN setting.
     """
     with override_settings(FINANCIAL_REPORTS={
         'STORAGE_TYPE': 's3',
         'BUCKET': 'edx-financial-reports',
         'CUSTOM_DOMAIN': 'edx-financial-reports.s3.amazonaws.com',
         'ROOT_PATH': 'production',
     }):
         report_store = ReportStore.from_config(config_name="FINANCIAL_REPORTS")
         # Make sure CUSTOM_DOMAIN from FINANCIAL_REPORTS is used to construct file url
         self.assertIn("edx-financial-reports.s3.amazonaws.com", report_store.storage.url(""))
Пример #27
0
def upload_csv_to_report_store(rows,
                               csv_name,
                               course_id,
                               timestamp,
                               config_name='GRADES_DOWNLOAD'):
    report_store = ReportStore.from_config(config_name)
    report_name = u"{course_prefix}_{csv_name}_{timestamp_str}.csv".format(
        course_prefix=course_filename_prefix_generator(course_id),
        csv_name=csv_name,
        timestamp_str=timestamp.strftime("%Y-%m-%d-%H%M"))

    report_store.store_rows(course_id, report_name, rows)
    tracker_emit(csv_name)
    return report_name
Пример #28
0
    def _get_report_urls(self, context):
        report_store = ReportStore.from_config(config_name='GRADES_DOWNLOAD')
        files_urls_pairs = report_store.links_for(context.course_id)
        get_first = lambda iterable: iterable[0] if len(iterable) else None
        find_by_name = lambda name: get_first(
            [url for filename, url in files_urls_pairs if name in filename])

        report_name = self.REPORT_NAME_TEMPLATE.format(task_id=context.task_id)
        err_report_name = self.REPORT_NAME_TEMPLATE.format(
            task_id=context.task_id)

        csv_url = find_by_name(report_name)
        csv_err_url = find_by_name(err_report_name)
        return csv_url, csv_err_url
Пример #29
0
 def create_report_store(self):
     """
     Create and return a DjangoStorageReportStore configured to use S3 for
     storage.
     """
     test_settings = copy.deepcopy(settings.GRADES_DOWNLOAD)
     test_settings['STORAGE_CLASS'] = 'storages.backends.s3boto.S3BotoStorage'
     test_settings['STORAGE_KWARGS'] = {
         'bucket': settings.GRADES_DOWNLOAD['BUCKET'],
         'location': settings.GRADES_DOWNLOAD['ROOT_PATH'],
     }
     with override_settings(GRADES_DOWNLOAD=test_settings):
         self.mocked_connection.create_bucket(settings.GRADES_DOWNLOAD['STORAGE_KWARGS']['bucket'])
         return ReportStore.from_config(config_name='GRADES_DOWNLOAD')
Пример #30
0
 def save_csv(self,
              course_id,
              filename,
              cvs_file,
              timestamp,
              config_name='GRADES_DOWNLOAD'):
     report_store = ReportStore.from_config(config_name)
     csv_filename = u"{filename}_{course}_{timestamp_str}.csv".format(
         filename=filename,
         course=get_valid_filename(
             unicode("_").join(
                 [course_id.org, course_id.course, course_id.run])),
         timestamp_str=timestamp.strftime("%Y-%m-%d-%H%M"))
     report_store.store(course_id, csv_filename, cvs_file)
Пример #31
0
 def test_financial_report_overrides(self):
     """
     Test that CUSTOM_DOMAIN from FINANCIAL_REPORTS is used to construct file url. instead of domain defined via
     AWS_S3_CUSTOM_DOMAIN setting.
     """
     with override_settings(FINANCIAL_REPORTS={
         'STORAGE_TYPE': 's3',
         'BUCKET': 'edx-financial-reports',
         'CUSTOM_DOMAIN': 'edx-financial-reports.s3.amazonaws.com',
         'ROOT_PATH': 'production',
     }):
         report_store = ReportStore.from_config(config_name="FINANCIAL_REPORTS")
         # Make sure CUSTOM_DOMAIN from FINANCIAL_REPORTS is used to construct file url
         self.assertIn("edx-financial-reports.s3.amazonaws.com", report_store.storage.url(""))
Пример #32
0
def upload_zip_to_report_store(file, zip_name, course_id, timestamp, config_name='GRADES_DOWNLOAD'):
    """
    Upload given file buffer as a zip file using ReportStore.
    """
    report_store = ReportStore.from_config(config_name)

    report_name = "{course_prefix}_{zip_name}_{timestamp_str}.zip".format(
        course_prefix=course_filename_prefix_generator(course_id),
        zip_name=zip_name,
        timestamp_str=timestamp.strftime("%Y-%m-%d-%H%M")
    )

    report_store.store(course_id, report_name, file)
    tracker_emit(zip_name)
    return report_name
Пример #33
0
 def create_report_store(self):
     """
     Create and return a DjangoStorageReportStore configured to use S3 for
     storage.
     """
     test_settings = copy.deepcopy(settings.GRADES_DOWNLOAD)
     test_settings['STORAGE_CLASS'] = 'openedx.core.storage.S3ReportStorage'
     test_settings['STORAGE_KWARGS'] = {
         'bucket': settings.GRADES_DOWNLOAD['BUCKET'],
         'location': settings.GRADES_DOWNLOAD['ROOT_PATH'],
     }
     with override_settings(GRADES_DOWNLOAD=test_settings):
         connection = boto.connect_s3()
         connection.create_bucket(settings.GRADES_DOWNLOAD['STORAGE_KWARGS']['bucket'])
         return ReportStore.from_config(config_name='GRADES_DOWNLOAD')
Пример #34
0
 def test_xblockcompletion_get_all_data(self, report):
     """
         Test xblockcompletion view all data
     """
     state_1 = {_("Answer ID"): 'answer_id',
         _("Question"): 'question_text',
         _("Answer"): 'answer_text',
         _("Correct Answer") : 'correct_answer_text'
         }
     state_2 = {_("Answer ID"): 'answer_id',
         _("Question"): 'question_text',
         _("Answer"): 'correct_answer_text',
         _("Correct Answer") : 'correct_answer_text'
         }
     generated_report_data = {self.student.username : [state_1,state_2,state_1]}               
     report.return_value = generated_report_data
     from lms.djangoapps.courseware.models import StudentModule
     data = {'format': False, 'course': str(self.course.id), 'base_url':'this_is_a_url'}
     task_input = {'data': data }
     module = StudentModule(
         module_state_key=self.items[0].location,
         student=self.student,
         course_id=self.course.id,
         module_type='problem',
         state='{"score": {"raw_earned": 1, "raw_possible": 3}, "seed": 1, "attempts": 1}')
     module.save()
     with patch('lms.djangoapps.instructor_task.tasks_helper.runner._get_current_task'):
         result = generate(
             None, None, self.course.id,
             task_input, 'EOL_Xblock_Completion'
         )
     report_store = ReportStore.from_config(config_name='GRADES_DOWNLOAD')
     header_row = ",".join(['Titulo', 'Username', 'Email', 'Run', 'Seccion', 'SubSeccion', 'Unidad', 'Pregunta', 'Respuesta Estudiante', 'Resp. Correcta', 'Intentos', 'Pts Ganados', 'Pts Posibles', 'Pts Total Componente', 'Url', 'block_id'])
     base_student_row = ",".join([
         self.items[0].display_name,
         self.student.username,
         self.student.email,
         '',
         '1.' + self.chapter.display_name,
         '1.1.' + self.section.display_name,
         '1.1.1.' + self.subsection.display_name
     ])
     student_row = base_student_row + ',question_text,answer_text,correct_answer_text,1,0,1.0,3'
     student_row2 = base_student_row + ',question_text,correct_answer_text,correct_answer_text,1,1.0,1.0,3'
     expected_data = [header_row, student_row, student_row2, student_row]
     self._verify_csv_file_report(report_store, expected_data)
Пример #35
0
def generate(_xmodule_instance_args, _entry_id, course_id, task_input, action_name):
    """
    For a given `course_id`, generate a CSV file containing
    all student answers to a given problem, and store using a `ReportStore`.
    """
    start_time = time()
    start_date = datetime.now(UTC)
    num_reports = 1
    task_progress = TaskProgress(action_name, num_reports, start_time)
    current_step = {'step': 'XblockCompletion - Calculating students answers to problem'}
    task_progress.update_task_state(extra_meta=current_step)
    
    data = task_input.get('data')
    filter_types = ['problem']
    students = XblockCompletionView().get_all_enrolled_users(data['course'])
    course_structure = get_data_course(data['course'])

    report_store = ReportStore.from_config('GRADES_DOWNLOAD')
    csv_name = 'Reporte_de_Preguntas'
    if data['format']:
        csv_name = 'Reporte_de_Preguntas_Resumen'

    report_name = u"{course_prefix}_{csv_name}_{timestamp_str}.csv".format(
        course_prefix=course_filename_prefix_generator(course_id),
        csv_name=csv_name,
        timestamp_str=start_date.strftime("%Y-%m-%d-%H%M")
    )
    output_buffer = ContentFile('')
    if six.PY2:
        output_buffer.write(codecs.BOM_UTF8)
    csvwriter = csv.writer(output_buffer)

    student_states = XblockCompletionView().get_all_states(data['course'], filter_types)
    csvwriter = XblockCompletionView()._build_student_data(data, students, course_structure, student_states, filter_types, csvwriter)

    current_step = {'step': 'XblockCompletion - Uploading CSV'}
    task_progress.update_task_state(extra_meta=current_step)

    output_buffer.seek(0)
    report_store.store(course_id, report_name, output_buffer)
    current_step = {
        'step': 'XblockCompletion - CSV uploaded',
        'report_name': report_name,
    }

    return task_progress.update_task_state(extra_meta=current_step)
Пример #36
0
def get_report_info(csv_name,
                    course_id,
                    timestamp,
                    config_name='GRADES_DOWNLOAD'):
    """
    Returns ReportStore and Report Name.

    Arguments:
        csv_name: Name of the resulting CSV
        course_id: ID of the course

    Returns:
        report_stroe: ReportStore - Instance of report store
        report_name: string - Name of the generated report
    """
    report_store = ReportStore.from_config(config_name)
    report_name = u"{course_prefix}_{csv_name}_{timestamp_str}.csv".format(
        course_prefix=course_filename_prefix_generator(course_id),
        csv_name=csv_name,
        timestamp_str=timestamp.strftime("%Y-%m-%d-%H%M"))
    return report_store, report_name
Пример #37
0
def export_csv_data(block_id, course_id):
    """
    Exports student answers to all supported questions to a CSV file.
    """

    src_block = modulestore().get_item(UsageKey.from_string(block_id))

    start_timestamp = time.time()
    course_key = CourseKey.from_string(course_id)

    filename = src_block.get_filename()

    report_store = ReportStore.from_config(config_name='GRADES_DOWNLOAD')
    report_store.store_rows(course_key, filename, src_block.prepare_data())

    generation_time_s = time.time() - start_timestamp

    return {
        "error": None,
        "report_filename": filename,
        "start_timestamp": start_timestamp,
        "generation_time_s": generation_time_s,
    }
Пример #38
0
def _upload_exec_summary_to_store(data_dict, report_name, course_id, generated_at, config_name='FINANCIAL_REPORTS'):
    """
    Upload Executive Summary Html file using ReportStore.

    Arguments:
        data_dict: containing executive report data.
        report_name: Name of the resulting Html File.
        course_id: ID of the course
    """
    report_store = ReportStore.from_config(config_name)

    # Use the data dict and html template to generate the output buffer
    output_buffer = StringIO(render_to_string("instructor/instructor_dashboard_2/executive_summary.html", data_dict))

    report_store.store(
        course_id,
        u"{course_prefix}_{report_name}_{timestamp_str}.html".format(
            course_prefix=course_filename_prefix_generator(course_id),
            report_name=report_name,
            timestamp_str=generated_at.strftime("%Y-%m-%d-%H%M")
        ),
        output_buffer,
    )
    tracker_emit(report_name)
Пример #39
0
def export_data(course_id, source_block_id_str, block_types, user_ids, match_string):
    """
    Exports student answers to all supported questions to a CSV file.
    """
    start_timestamp = time.time()

    logger.debug("Beginning data export")
    try:
        course_key = CourseKey.from_string(course_id)
        usage_key = UsageKey.from_string(source_block_id_str)
    except InvalidKeyError:
        raise ValueError("Could not find the specified Block ID.")

    src_block = modulestore().get_item(usage_key)
    course_key_str = unicode(course_key)
    type_map = {cls.__name__: cls for cls in [MCQBlock, MRQBlock, RatingBlock, AnswerBlock]}

    if not block_types:
        block_types = tuple(type_map.values())
    else:
        block_types = tuple(type_map[class_name] for class_name in block_types)

    # Build an ordered list of blocks to include in the export
    blocks_to_include = []

    def scan_for_blocks(block):
        """ Recursively scan the course tree for blocks of interest """
        if isinstance(block, block_types):
            blocks_to_include.append(block)
        elif block.has_children:
            for child_id in block.children:
                try:
                    scan_for_blocks(block.runtime.get_block(child_id))
                except ItemNotFoundError:
                    # Blocks may refer to missing children. Don't break in this case.
                    pass

    scan_for_blocks(src_block)

    # Define the header row of our CSV:
    rows = []
    rows.append(
        ["Section", "Subsection", "Unit", "Type", "Question", "Answer", "Username", "User ID", "User E-mail"]
    )

    # Collect results for each block in blocks_to_include
    for block in blocks_to_include:
        if not user_ids:
            results = _extract_data(course_key_str, block, None, match_string)
            rows += results
        else:
            for user_id in user_ids:
                results = _extract_data(course_key_str, block, user_id, match_string)
                rows += results

    # Generate the CSV:
    filename = u"pb-data-export-{}.csv".format(time.strftime("%Y-%m-%d-%H%M%S", time.gmtime(start_timestamp)))
    report_store = ReportStore.from_config(config_name='GRADES_DOWNLOAD')
    report_store.store_rows(course_key, filename, rows)

    generation_time_s = time.time() - start_timestamp
    logger.debug("Done data export - took {} seconds".format(generation_time_s))

    return {
        "error": None,
        "report_filename": filename,
        "start_timestamp": start_timestamp,
        "generation_time_s": generation_time_s,
        "display_data": [] if len(rows) == 1 else rows[1:1001]  # Limit to preview of 1000 items
    }
Пример #40
0
 def create_report_store(self):
     """
     Create and return a DjangoStorageReportStore using the old
     LocalFSReportStore configuration.
     """
     return ReportStore.from_config(config_name='GRADES_DOWNLOAD')
Пример #41
0
 def _build_student_data(self, data, students, course_structure, student_states, filter_types, csvwriter):
     """
         Create list of list to make csv report
     """
     url_base = data['base_url']
     course_id = data['course']
     is_resumen = data['format']
     course_key = CourseKey.from_string(course_id)
     if is_resumen:
         header = ['Titulo', 'Username', 'Email', 'Run', 'Seccion', 'SubSeccion', 'Unidad', 'Intentos', 'Pts Ganados', 'Pts Posibles', 'Url', 'block_id']
     else:
         header = ['Titulo', 'Username', 'Email', 'Run', 'Seccion', 'SubSeccion', 'Unidad', 'Pregunta', 'Respuesta Estudiante', 'Resp. Correcta', 'Intentos', 'Pts Ganados', 'Pts Posibles', 'Pts Total Componente', 'Url', 'block_id']
     csvwriter.writerow(_get_utf8_encoded_rows(header))
     max_count = None
     store = modulestore()
     list_blocks = self.process_data_course(course_structure, filter_types, [], iteri=[1,1,1])
     for block in list_blocks:
         with store.bulk_operations(course_key):
             block_key = UsageKey.from_string(block['block_id'])
             if filter_types is not None and block_key.block_type not in filter_types:
                 continue
             block_item = store.get_item(block_key)
             generated_report_data = defaultdict(list)
             if not is_resumen:
                 generated_report_data = self.get_report_xblock(block_key, student_states[block['block_id']], block_item)
             if generated_report_data is None:
                 continue
             jumo_to_url = url_base + reverse('jump_to',kwargs={
                         'course_id': course_id,
                         'location': block['block_id']})
             for response in student_states[block['block_id']]:
                 if response['username'] not in students:
                     continue
                 if is_resumen:
                     if block_key.block_type != 'problem':
                         pass
                     else:
                         responses = self.set_data_is_resumen(
                             block_item.display_name, 
                             block['block_id'],
                             response,
                             block['section'],
                             block['subsection'],
                             block['unit'],
                             students, jumo_to_url
                             )
                         if responses:
                             csvwriter.writerow(_get_utf8_encoded_rows(responses))
                 else:
                     # A human-readable location for the current block
                     # A machine-friendly location for the current block
                     # A block that has a single state per user can contain multiple responses
                     # within the same state.
                     if block_key.block_type != 'problem':
                         pass
                     else:
                         user_states = generated_report_data.get(response['username'])
                         if user_states:
                             responses = self.set_data_is_all(
                                     block_item.display_name, 
                                     block['block_id'],
                                     response,
                                     block['section'],
                                     block['subsection'],
                                     block['unit'],
                                     students,
                                     user_states, jumo_to_url
                                     )
                             if responses:
                                 csvwriter.writerows(ReportStore()._get_utf8_encoded_rows(responses))
     return csvwriter
Пример #42
0
def upload_user_grades_csv(_xmodule_instance_args, _entry_id, course_id, _task_input, action_name):  # pylint: disable=too-many-statements
    """
    For a given `course_id`, for given usernames generates a grades CSV file,
    and store using a `ReportStore`. Once created, the files can
    be accessed by instantiating another `ReportStore` (via
    `ReportStore.from_config()`) and calling `link_for()` on it.

    Unenrolled users and unknown usernames are stored in *_err_*.csv
    This task is very close to the .upload_grades_csv from instructor_tasks.task_helper
    The difference is that we filter enrolled students against requested usernames and
    we push info about this into PLP
    """
    start_time = time()
    start_date = datetime.now(UTC)
    status_interval = 100
    fmt = u'Task: {task_id}, InstructorTask ID: {entry_id}, Course: {course_id}, Input: {task_input}'
    task_info_string = fmt.format(
        task_id=_xmodule_instance_args.get('task_id') if _xmodule_instance_args is not None else None,
        entry_id=_entry_id,
        course_id=course_id,
        task_input=_task_input
    )
    TASK_LOG.info(u'%s, Task type: %s, Starting task execution', task_info_string, action_name)

    extended_kwargs_id = _task_input.get("extended_kwargs_id")
    extended_kwargs = InstructorTaskExtendedKwargs.get_kwargs_for_id(extended_kwargs_id)
    usernames = extended_kwargs.get("usernames", None)

    err_rows = [["id", "username", "error_msg"]]
    if usernames is None:
        message = "Error occured during edx task execution: no usersnames in InstructorTaskExtendedKwargs."
        TASK_LOG.error(u'%s, Task type: %s, ' + message, task_info_string)
        err_rows.append(["-1", "__", message])
        usernames = []

    enrolled_students = CourseEnrollment.objects.users_enrolled_in(course_id)
    enrolled_students = enrolled_students.filter(username__in=usernames)
    total_enrolled_students = enrolled_students.count()
    requester_id = _task_input.get("requester_id")
    task_progress = TaskProgress(action_name, total_enrolled_students, start_time)

    course = get_course_by_id(course_id)
    course_is_cohorted = is_course_cohorted(course.id)
    teams_enabled = course.teams_enabled
    cohorts_header = ['Cohort Name'] if course_is_cohorted else []
    teams_header = ['Team Name'] if teams_enabled else []

    experiment_partitions = get_split_user_partitions(course.user_partitions)
    group_configs_header = [u'Experiment Group ({})'.format(partition.name) for partition in experiment_partitions]

    certificate_info_header = ['Certificate Eligible', 'Certificate Delivered', 'Certificate Type']
    certificate_whitelist = CertificateWhitelist.objects.filter(course_id=course_id, whitelist=True)
    whitelisted_user_ids = [entry.user_id for entry in certificate_whitelist]

    # Loop over all our students and build our CSV lists in memory
    rows = []
    current_step = {'step': 'Calculating Grades'}

    TASK_LOG.info(
        u'%s, Task type: %s, Current step: %s, Starting grade calculation for total students: %s',
        task_info_string,
        action_name,
        current_step,
        total_enrolled_students,
    )
    found_students = User.objects.filter(username__in=usernames)
    # Check invalid usernames
    if len(found_students)!= len(usernames):
        found_students_usernames = [x.username for x in found_students]
        for u in usernames:
            if u not in found_students_usernames:
                err_rows.append([-1, u, "invalid_username"])
    # Check not enrolled requested students
    if found_students != enrolled_students:
        diff = found_students.exclude(id__in=enrolled_students)
        for u in diff:
            if u in diff:
                err_rows.append([u.id, u.username, "enrollment_for_username_not_found"])

    total_enrolled_students = enrolled_students.count()
    student_counter = 0
    TASK_LOG.info(
        u'%s, Task type: %s, Current step: %s, Starting grade calculation for total students: %s',
        task_info_string,
        action_name,
        current_step,

        total_enrolled_students
    )

    graded_assignments = course.grading.graded_assignments(course_id)
    grade_header = course.grading.grade_header(graded_assignments)

    rows.append(
        ["Student ID", "Email", "Username", "Last Name", "First Name", "Second Name", "Grade", "Grade Percent"] +
        grade_header +
        cohorts_header +
        group_configs_header +
        teams_header +
        ['Enrollment Track', 'Verification Status'] +
        certificate_info_header
    )
    for student, course_grade, err_msg in CourseGradeFactory().iter(course, enrolled_students):
        # Periodically update task status (this is a cache write)
        if task_progress.attempted % status_interval == 0:
            task_progress.update_task_state(extra_meta=current_step)
        task_progress.attempted += 1

        # Now add a log entry after each student is graded to get a sense
        # of the task's progress
        student_counter += 1
        TASK_LOG.info(
            u'%s, Task type: %s, Current step: %s, Grade calculation in-progress for students: %s/%s',
            task_info_string,
            action_name,
            current_step,
            student_counter,
            total_enrolled_students
        )

        if not course_grade:
            # An empty course_grade means we failed to grade a student.
            task_progress.failed += 1
            err_rows.append([student.id, student.username, err_msg])
            continue

        # We were able to successfully grade this student for this course.
        task_progress.succeeded += 1

        cohorts_group_name = []
        if course_is_cohorted:
            group = get_cohort(student, course_id, assign=False)
            cohorts_group_name.append(group.name if group else '')

        group_configs_group_names = []
        for partition in experiment_partitions:
            group = PartitionService(course_id).get_group(student, partition, assign=False)
            group_configs_group_names.append(group.name if group else '')

        team_name = []
        if teams_enabled:
            try:
                membership = CourseTeamMembership.objects.get(user=student, team__course_id=course_id)
                team_name.append(membership.team.name)
            except CourseTeamMembership.DoesNotExist:
                team_name.append('')

        enrollment_mode = CourseEnrollment.enrollment_mode_for_user(student, course_id)[0]
        verification_status = SoftwareSecurePhotoVerification.verification_status_for_user(
            student,
            course_id,
            enrollment_mode
        )
        certificate_info = certificate_info_for_user(
            student,
            course_id,
            course_grade.letter_grade,
            student.id in whitelisted_user_ids
        )
        second_name = ''
        try:
            up = UserProfile.objects.get(user=student)
            if up.goals:
                second_name = json.loads(up.goals).get('second_name', '')
        except ValueError:
            pass
        if certificate_info[0] == 'Y':
            TASK_LOG.info(
                u'Student is marked eligible_for_certificate'
                u'(user=%s, course_id=%s, grade_percent=%s gradecutoffs=%s, allow_certificate=%s, is_whitelisted=%s)',
                student,
                course_id,
                course_grade.percent,
                course.grade_cutoffs,
                student.profile.allow_certificate,
                student.id in whitelisted_user_ids
            )

        grade_results = course.grading.grade_results(graded_assignments, course_grade)

        grade_results = list(chain.from_iterable(grade_results))

        rows.append(
            [student.id, student.email, student.username, student.last_name, student.first_name,
             second_name, course_grade.percent, course_grade.percent*100] +
            grade_results + cohorts_group_name + group_configs_group_names + team_name +
            [enrollment_mode] + [verification_status] + certificate_info
        )
    TASK_LOG.info(
        u'%s, Task type: %s, Current step: %s, Grade calculation completed for students: %s/%s',
        task_info_string,
        action_name,
        current_step,
        student_counter,
        total_enrolled_students
    )

    # By this point, we've got the rows we're going to stuff into our CSV files.
    current_step = {'step': 'Uploading CSVs'}
    task_progress.update_task_state(extra_meta=current_step)
    TASK_LOG.info(u'%s, Task type: %s, Current step: %s', task_info_string, action_name, current_step)

    # Perform the actual upload
    custom_grades_download = get_custom_grade_config()

    report_hash_unique_hash = hex(random.getrandbits(32))[2:]
    report_name = 'plp_grade_users_report_{}_id_{}'.format(report_hash_unique_hash, requester_id)
    err_report_name = 'plp_grade_users_report_err_{}_id_{}'.format(report_hash_unique_hash, requester_id)
    upload_csv_to_report_store(rows, report_name, course_id, start_date, config_name=custom_grades_download)

    # If there are any error rows (don't count the header), write them out as well
    has_errors = len(err_rows) > 1
    if has_errors:
        upload_csv_to_report_store(err_rows, err_report_name, course_id, start_date, config_name=custom_grades_download)

    callback_url = _task_input.get("callback_url", None)

    if callback_url:
        report_store = ReportStore.from_config(config_name=custom_grades_download)
        files_urls_pairs = report_store.links_for(course_id)
        find_by_name = lambda name: [url for filename, url in files_urls_pairs if name in filename][0]
        try:
            csv_url = find_by_name(report_name)
            csv_err_url = find_by_name(err_report_name) if has_errors else None
            PlpApiClient().push_grade_api_result(callback_url, csv_url, csv_err_url)
        except Exception as e:
            TASK_LOG.error("Failed push to PLP:{}".format(str(e)))

    # One last update before we close out...
    TASK_LOG.info(u'%s, Task type: %s, Finalizing grade task', task_info_string, action_name)
    return task_progress.update_task_state(extra_meta=current_step)
Пример #43
0
 def create_report_store(self):
     """
     Create and return a DjangoStorageReportStore using the old
     LocalFSReportStore configuration.
     """
     return ReportStore.from_config(config_name='GRADES_DOWNLOAD')
Пример #44
0
def upload_user_grades_csv(_xmodule_instance_args, _entry_id, course_id, _task_input, action_name):  # pylint: disable=too-many-statements
    """
    For a given `course_id`, for given usernames generates a grades CSV file,
    and store using a `ReportStore`. Once created, the files can
    be accessed by instantiating another `ReportStore` (via
    `ReportStore.from_config()`) and calling `link_for()` on it.

    Unenrolled users and unknown usernames are stored in *_err_*.csv
    This task is very close to the .upload_grades_csv from instructor_tasks.task_helper
    The difference is that we filter enrolled students against requested usernames and
    we push info about this into PLP
    """
    start_time = time()
    start_date = datetime.now(UTC)
    status_interval = 100
    fmt = u'Task: {task_id}, InstructorTask ID: {entry_id}, Course: {course_id}, Input: {task_input}'
    task_info_string = fmt.format(
        task_id=_xmodule_instance_args.get('task_id') if _xmodule_instance_args is not None else None,
        entry_id=_entry_id,
        course_id=course_id,
        task_input=_task_input
    )
    TASK_LOG.info(u'%s, Task type: %s, Starting task execution', task_info_string, action_name)

    extended_kwargs_id = _task_input.get("extended_kwargs_id")
    extended_kwargs = InstructorTaskExtendedKwargs.get_kwargs_for_id(extended_kwargs_id)
    usernames = extended_kwargs.get("usernames", None)

    err_rows = [["id", "username", "error_msg"]]
    if usernames is None:
        message = "Error occured during edx task execution: no usersnames in InstructorTaskExtendedKwargs."
        TASK_LOG.error(u'%s, Task type: %s, ' + message, task_info_string)
        err_rows.append(["-1", "__", message])
        usernames = []

    enrolled_students = CourseEnrollment.objects.users_enrolled_in(course_id)
    enrolled_students = enrolled_students.filter(username__in=usernames)
    total_enrolled_students = enrolled_students.count()
    requester_id = _task_input.get("requester_id")
    task_progress = TaskProgress(action_name, total_enrolled_students, start_time)

    course = get_course_by_id(course_id)
    course_is_cohorted = is_course_cohorted(course.id)
    teams_enabled = course.teams_enabled
    cohorts_header = ['Cohort Name'] if course_is_cohorted else []
    teams_header = ['Team Name'] if teams_enabled else []

    experiment_partitions = get_split_user_partitions(course.user_partitions)
    group_configs_header = [u'Experiment Group ({})'.format(partition.name) for partition in experiment_partitions]

    certificate_info_header = ['Certificate Eligible', 'Certificate Delivered', 'Certificate Type']
    certificate_whitelist = CertificateWhitelist.objects.filter(course_id=course_id, whitelist=True)
    whitelisted_user_ids = [entry.user_id for entry in certificate_whitelist]

    # Loop over all our students and build our CSV lists in memory
    rows = []
    current_step = {'step': 'Calculating Grades'}

    TASK_LOG.info(
        u'%s, Task type: %s, Current step: %s, Starting grade calculation for total students: %s',
        task_info_string,
        action_name,
        current_step,
        total_enrolled_students,
    )
    found_students = User.objects.filter(username__in=usernames)
    # Check invalid usernames
    if len(found_students)!= len(usernames):
        found_students_usernames = [x.username for x in found_students]
        for u in usernames:
            if u not in found_students_usernames:
                err_rows.append([-1, u, "invalid_username"])
    # Check not enrolled requested students
    if found_students != enrolled_students:
        diff = found_students.exclude(id__in=enrolled_students)
        for u in diff:
            if u in diff:
                err_rows.append([u.id, u.username, "enrollment_for_username_not_found"])

    total_enrolled_students = enrolled_students.count()
    student_counter = 0
    TASK_LOG.info(
        u'%s, Task type: %s, Current step: %s, Starting grade calculation for total students: %s',
        task_info_string,
        action_name,
        current_step,

        total_enrolled_students
    )

    graded_assignments = course.grading.graded_assignments(course_id)
    grade_header = course.grading.grade_header(graded_assignments)

    rows.append(
        ["Student ID", "Email", "Username", "Last Name", "First Name", "Second Name", "Grade", "Grade Percent"] +
        grade_header +
        cohorts_header +
        group_configs_header +
        teams_header +
        ['Enrollment Track', 'Verification Status'] +
        certificate_info_header
    )
    for student, course_grade, err_msg in CourseGradeFactory().iter(course, enrolled_students):
        # Periodically update task status (this is a cache write)
        if task_progress.attempted % status_interval == 0:
            task_progress.update_task_state(extra_meta=current_step)
        task_progress.attempted += 1

        # Now add a log entry after each student is graded to get a sense
        # of the task's progress
        student_counter += 1
        TASK_LOG.info(
            u'%s, Task type: %s, Current step: %s, Grade calculation in-progress for students: %s/%s',
            task_info_string,
            action_name,
            current_step,
            student_counter,
            total_enrolled_students
        )

        if not course_grade:
            # An empty course_grade means we failed to grade a student.
            task_progress.failed += 1
            err_rows.append([student.id, student.username, err_msg])
            continue

        # We were able to successfully grade this student for this course.
        task_progress.succeeded += 1

        cohorts_group_name = []
        if course_is_cohorted:
            group = get_cohort(student, course_id, assign=False)
            cohorts_group_name.append(group.name if group else '')

        group_configs_group_names = []
        for partition in experiment_partitions:
            group = PartitionService(course_id).get_group(student, partition, assign=False)
            group_configs_group_names.append(group.name if group else '')

        team_name = []
        if teams_enabled:
            try:
                membership = CourseTeamMembership.objects.get(user=student, team__course_id=course_id)
                team_name.append(membership.team.name)
            except CourseTeamMembership.DoesNotExist:
                team_name.append('')

        enrollment_mode = CourseEnrollment.enrollment_mode_for_user(student, course_id)[0]
        verification_status = SoftwareSecurePhotoVerification.verification_status_for_user(
            student,
            course_id,
            enrollment_mode
        )
        certificate_info = certificate_info_for_user(
            student,
            course_id,
            course_grade.letter_grade,
            student.id in whitelisted_user_ids
        )
        second_name = ''
        try:
            up = UserProfile.objects.get(user=student)
            if up.goals:
                second_name = json.loads(up.goals).get('second_name', '')
        except ValueError:
            pass
        if certificate_info[0] == 'Y':
            TASK_LOG.info(
                u'Student is marked eligible_for_certificate'
                u'(user=%s, course_id=%s, grade_percent=%s gradecutoffs=%s, allow_certificate=%s, is_whitelisted=%s)',
                student,
                course_id,
                course_grade.percent,
                course.grade_cutoffs,
                student.profile.allow_certificate,
                student.id in whitelisted_user_ids
            )

        grade_results = course.grading.grade_results(graded_assignments, course_grade)

        grade_results = list(chain.from_iterable(grade_results))

        rows.append(
            [student.id, student.email, student.username, student.last_name, student.first_name,
             second_name, course_grade.percent, course_grade.percent*100] +
            grade_results + cohorts_group_name + group_configs_group_names + team_name +
            [enrollment_mode] + [verification_status] + certificate_info
        )
    TASK_LOG.info(
        u'%s, Task type: %s, Current step: %s, Grade calculation completed for students: %s/%s',
        task_info_string,
        action_name,
        current_step,
        student_counter,
        total_enrolled_students
    )

    # By this point, we've got the rows we're going to stuff into our CSV files.
    current_step = {'step': 'Uploading CSVs'}
    task_progress.update_task_state(extra_meta=current_step)
    TASK_LOG.info(u'%s, Task type: %s, Current step: %s', task_info_string, action_name, current_step)

    # Perform the actual upload
    custom_grades_download = get_custom_grade_config()

    report_hash_unique_hash = hex(random.getrandbits(32))[2:]
    report_name = 'plp_grade_users_report_{}_id_{}'.format(report_hash_unique_hash, requester_id)
    err_report_name = 'plp_grade_users_report_err_{}_id_{}'.format(report_hash_unique_hash, requester_id)
    upload_csv_to_report_store(rows, report_name, course_id, start_date, config_name=custom_grades_download)

    # If there are any error rows (don't count the header), write them out as well
    has_errors = len(err_rows) > 1
    if has_errors:
        upload_csv_to_report_store(err_rows, err_report_name, course_id, start_date, config_name=custom_grades_download)

    callback_url = _task_input.get("callback_url", None)

    if callback_url:
        report_store = ReportStore.from_config(config_name=custom_grades_download)
        files_urls_pairs = report_store.links_for(course_id)
        find_by_name = lambda name: [url for filename, url in files_urls_pairs if name in filename][0]
        try:
            csv_url = find_by_name(report_name)
            csv_err_url = find_by_name(err_report_name) if has_errors else None
            PlpApiClient().push_grade_api_result(callback_url, csv_url, csv_err_url)
        except Exception as e:
            TASK_LOG.error("Failed push to PLP:{}".format(str(e)))

    # One last update before we close out...
    TASK_LOG.info(u'%s, Task type: %s, Finalizing grade task', task_info_string, action_name)
    return task_progress.update_task_state(extra_meta=current_step)