コード例 #1
0
ファイル: test_data.py プロジェクト: skim-ks/edx-ora2
    def test_many_submissions(self):
        # Create a lot of submissions
        num_submissions = 234
        for index in range(num_submissions):
            student_item = {
                'student_id': "test_user_{}".format(index),
                'course_id': 'test_course',
                'item_id': 'test_item',
                'item_type': 'openassessment',
            }
            submission_text = "test submission {}".format(index)
            submission = sub_api.create_submission(student_item, submission_text)
            workflow_api.create_workflow(submission['uuid'], ['peer', 'self'])

        # Generate a CSV file for the submissions
        output_streams = self._output_streams(['submission'])
        writer = CsvWriter(output_streams)
        writer.write_to_csv('test_course')

        # Parse the generated CSV
        content = output_streams['submission'].getvalue()
        rows = content.split('\n')

        # Remove the first row (header) and last row (blank line)
        rows = rows[1:-1]

        # Check that we have the right number of rows
        self.assertEqual(len(rows), num_submissions)
コード例 #2
0
ファイル: test_data.py プロジェクト: skim-ks/edx-ora2
    def test_other_course_id(self):
        # Try a course ID with no submissions
        self._load_fixture('db_fixtures/scored.json')
        output_streams = self._output_streams(CsvWriter.MODELS)
        writer = CsvWriter(output_streams)
        writer.write_to_csv('other_course')

        # Expect that each output has only two lines (the header and a blank line)
        # since this course has no submissions
        for output in output_streams.values():
            content = output.getvalue()
            rows = content.split('\n')
            self.assertEqual(len(rows), 2)
コード例 #3
0
ファイル: upload_oa_data.py プロジェクト: guangyawli/edx-ora2
    def _dump_to_csv(self, course_id, csv_dir):
        """
        Create CSV files for submission/assessment data in a directory.

        Args:
            course_id (unicode): The ID of the course to dump data from.
            csv_dir (unicode): The absolute path to the directory in which to create CSV files.

        Returns:
            None
        """
        output_streams = {
            name: open(os.path.join(csv_dir, rel_path), 'w')
            for name, rel_path in self.OUTPUT_CSV_PATHS.items()
        }
        csv_writer = CsvWriter(output_streams, self._progress_callback)
        csv_writer.write_to_csv(course_id)
コード例 #4
0
ファイル: upload_oa_data.py プロジェクト: Akif-Vohra/edx-ora2
    def _dump_to_csv(self, course_id, csv_dir):
        """
        Create CSV files for submission/assessment data in a directory.

        Args:
            course_id (unicode): The ID of the course to dump data from.
            csv_dir (unicode): The absolute path to the directory in which to create CSV files.

        Returns:
            None
        """
        output_streams = {
            name: open(os.path.join(csv_dir, rel_path), 'w')
            for name, rel_path in self.OUTPUT_CSV_PATHS.iteritems()
        }
        csv_writer = CsvWriter(output_streams, self._progress_callback)
        csv_writer.write_to_csv(course_id)
コード例 #5
0
ファイル: test_data.py プロジェクト: skim-ks/edx-ora2
    def test_unicode(self):
        # Flush out unicode errors
        self._load_fixture('db_fixtures/unicode.json')
        output_streams = self._output_streams(CsvWriter.MODELS)
        CsvWriter(output_streams).write_to_csv(u"𝓽𝓮𝓼𝓽_𝓬𝓸𝓾𝓻𝓼𝓮")

        # Check that data ended up in the reports
        for output in output_streams.values():
            content = output.getvalue()
            rows = content.split('\n')
            self.assertGreater(len(rows), 2)
コード例 #6
0
ファイル: test_data.py プロジェクト: skim-ks/edx-ora2
    def test_write_to_csv(self, data):
        # Create in-memory buffers for the CSV file data
        output_streams = self._output_streams(data['expected_csv'].keys())

        # Load the database fixture
        # We use the database fixture to ensure that this test will
        # catch backwards-compatibility issues even if the Django model
        # implementation or API calls change.
        self._load_fixture(data['fixture'])

        # Write the data to CSV
        writer = CsvWriter(output_streams)
        writer.write_to_csv(data['course_id'])

        # Check that the CSV matches what we expected
        for output_name, expected_csv in data['expected_csv'].iteritems():
            output_buffer = output_streams[output_name]
            output_buffer.seek(0)
            actual_csv = csv.reader(output_buffer)
            for expected_row in expected_csv:
                try:
                    actual_row = actual_csv.next()
                except StopIteration:
                    actual_row = None
                self.assertEqual(
                    actual_row, expected_row,
                    msg="Output name: {}".format(output_name)
                )

            # Check for extra rows
            try:
                extra_row = actual_csv.next()
            except StopIteration:
                extra_row = None

            if extra_row is not None:
                self.fail(u"CSV contains extra row: {}".format(extra_row))