Example #1
0
    def run_csv_export_test(
        self, expected_lines, export_options=None, asset=None, user=None
    ):
        """
        Repeat yourself less while writing CSV export tests.

        `expected_lines`: a list of strings *without* trailing newlines whose
                          UTF-8 encoded representation should match the export
                          result
        `export_options`: (optional) a list of extra options for
                          `ExportTask.data`. Do not include `source` or `type`
        `asset`: (optional) the asset to export. Defaults to `self.asset`
        `user`: (optional) the user to own the export. Defaults to `self.user`
        """
        export_task = ExportTask()
        asset = self.asset if asset is None else asset
        export_task.user = self.user if user is None else user
        export_task.data = {
            'source': reverse('asset-detail', args=[asset.uid]),
            'type': 'csv'
        }
        if export_options:
            export_task.data.update(export_options)
        messages = defaultdict(list)
        export_task._run_task(messages)
        expected_lines = [
            (line + '\r\n').encode('utf-8') for line in expected_lines
        ]
        result_lines = list(export_task.result)

        self.assertEqual(result_lines, expected_lines)
        self.assertFalse(messages)
Example #2
0
 def test_export_exceeding_api_submission_limit(self):
     """
     Make sure the limit on count of submissions returned by the API does
     not apply to exports
     """
     limit = settings.SUBMISSION_LIST_LIMIT
     excess = 10
     asset = Asset.objects.create(
         name='Lots of submissions',
         owner=self.asset.owner,
         content={'survey': [{'name': 'q', 'type': 'integer'}]},
     )
     asset.deploy(backend='mock', active=True)
     submissions = [
         {
             '__version__': asset.latest_deployed_version.uid,
             'q': i,
         } for i in range(limit + excess)
     ]
     asset.deployment.mock_submissions(submissions)
     export_task = ExportTask()
     export_task.user = self.user
     export_task.data = {
         'source': reverse('asset-detail', args=[asset.uid]),
         'type': 'csv'
     }
     messages = defaultdict(list)
     export_task._run_task(messages)
     # Don't forget to add one for the header row!
     self.assertEqual(len(list(export_task.result)), limit + excess + 1)
Example #3
0
    def run_xls_export_test(self, expected_rows, export_options=None, user=None):
        """
        Repeat yourself less while writing XLS export tests.

        `expected_rows`: a list of strings *without* trailing newlines whose
                          UTF-8 encoded representation should match the export
                          result
        `export_options`: a list of extra options for `ExportTask.data`. Do not
                          include `source` or `type`
        """
        export_task = ExportTask()
        export_task.user = self.user if user is None else user
        export_task.data = {
            'source': reverse('asset-detail', args=[self.asset.uid]),
            'type': 'xls',
        }
        if export_options:
            export_task.data.update(export_options)
        messages = defaultdict(list)
        export_task._run_task(messages)
        self.assertFalse(messages)

        book = xlrd.open_workbook(file_contents=export_task.result.read())
        self.assertEqual(book.sheet_names(), [self.asset.name])
        sheet = book.sheets()[0]
        self.assertEqual(sheet.nrows, len(expected_rows))
        row_index = 0
        for expected_row in expected_rows:
            result_row = [cell.value for cell in sheet.row(row_index)]
            self.assertEqual(result_row, expected_row)
            row_index += 1
Example #4
0
    def test_xls_export_english_labels(self):
        export_task = ExportTask()
        export_task.user = self.user
        export_task.data = {
            'source': reverse('asset-detail', args=[self.asset.uid]),
            'type': 'xls',
            'lang': 'English',
        }
        messages = defaultdict(list)
        export_task._run_task(messages)
        self.assertFalse(messages)

        expected_rows = [
            ['start', 'end', 'What kind of symmetry do you have?', 'What kind of symmetry do you have?/Spherical', 'What kind of symmetry do you have?/Radial', 'What kind of symmetry do you have?/Bilateral', 'How many segments does your body have?', 'Do you have body fluids that occupy intracellular space?', 'Do you descend from an ancestral unicellular organism?', '_id', '_uuid', '_submission_time', '_index'],
            ['', '', '#symmetry', '#symmetry', '#symmetry', '#symmetry', '#segments', '#fluids', '', '', '', '', ''],
            ['2017-10-23T05:40:39.000-04:00', '2017-10-23T05:41:13.000-04:00', 'Spherical Radial Bilateral', '1', '1', '1', '6', 'Yes, and some extracellular space', 'No', 61.0, '48583952-1892-4931-8d9c-869e7b49bafb', '2017-10-23T09:41:19', 1.0],
            ['2017-10-23T05:41:14.000-04:00', '2017-10-23T05:41:32.000-04:00', 'Radial', '0', '1', '0', '3', 'Yes', 'No', 62.0, '317ba7b7-bea4-4a8c-8620-a483c3079c4b', '2017-10-23T09:41:38', 2.0],
            ['2017-10-23T05:41:32.000-04:00', '2017-10-23T05:42:05.000-04:00', 'Bilateral', '0', '0', '1', '2', 'No / Unsure', 'Yes', 63.0, '3f15cdfe-3eab-4678-8352-7806febf158d', '2017-10-23T09:42:11', 3.0],
        ]
        book = xlrd.open_workbook(file_contents=export_task.result.read())
        self.assertEqual(book.sheet_names(), [self.asset.name])
        sheet = book.sheets()[0]
        self.assertEqual(sheet.nrows, len(expected_rows))
        row_index = 0
        for expected_row in expected_rows:
            result_row = [cell.value for cell in sheet.row(row_index)]
            self.assertEqual(result_row, expected_row)
            row_index += 1
    def test_csv_export(self):
        """
        Ignores the order of the rows and columns
        """

        export_task = ExportTask()
        export_task.user = self.user
        export_task.data = {
            'source': reverse('asset-detail', args=[self.asset.uid]),
            'type': 'csv',
            'lang': '_xml'
        }
        messages = defaultdict(list)
        export_task._run_task(messages)
        result_lines = list(export_task.result)
        header = self._split_formpack_csv(result_lines[0])
        field_column_numbers = dict(zip(header, itertools.count()))
        results = {}
        for result_line in result_lines[1:]:
            values = self._split_formpack_csv(result_line)
            fields_values = {}
            for field in self.fields_to_inspect:
                fields_values[field] = values[field_column_numbers[field]]
            sub_id = values[field_column_numbers[self.submission_id_field]]
            results[sub_id] = fields_values
        self.assertEqual(results, self.expected_results)
Example #6
0
 def test_log_and_mark_stuck_exports_as_errored(self):
     task_data = {
         'source': reverse('asset-detail', args=[self.asset.uid]),
         'type': 'csv',
     }
     self.assertEqual(
         0,
         ExportTask._filter_by_source_kludge(
             ExportTask.objects.filter(
                 user=self.user),
             task_data['source']
         ).count()
     )
     # Simulate a few stuck exports
     for status in (ExportTask.CREATED, ExportTask.PROCESSING):
         export_task = ExportTask()
         export_task.user = self.user
         export_task.data = task_data
         export_task.status = status
         export_task.save()
         export_task.date_created -= datetime.timedelta(days=1)
         export_task.save()
     self.assertSequenceEqual(
         [ExportTask.CREATED, ExportTask.PROCESSING],
         ExportTask._filter_by_source_kludge(
             ExportTask.objects.filter(
                 user=self.user),
             task_data['source']
         ).order_by('pk').values_list('status', flat=True)
     )
     # Run another export, which invokes the cleanup logic
     export_task = ExportTask()
     export_task.user = self.user
     export_task.data = task_data
     export_task.save()
     export_task.run()
     # Verify that the stuck exports have been marked
     self.assertSequenceEqual(
         [ExportTask.ERROR, ExportTask.ERROR, ExportTask.COMPLETE],
         ExportTask._filter_by_source_kludge(
             ExportTask.objects.filter(
                 user=self.user),
             task_data['source']
         ).order_by('pk').values_list('status', flat=True)
     )
Example #7
0
 def test_remove_excess_exports(self):
     task_data = {
         'source': reverse('asset-detail', args=[self.asset.uid]),
         'type': 'csv',
     }
     # Create and run one export, so we can verify that it's `result` file
     # is later deleted
     export_task = ExportTask()
     export_task.user = self.user
     export_task.data = task_data
     export_task.save()
     export_task.run()
     self.assertEqual(export_task.status, ExportTask.COMPLETE)
     result = export_task.result
     self.assertTrue(result.storage.exists(result.name))
     # Make an excessive amount of additional exports
     excess_count = 5 + settings.MAXIMUM_EXPORTS_PER_USER_PER_FORM
     for _ in range(excess_count):
         export_task = ExportTask()
         export_task.user = self.user
         export_task.data = task_data
         export_task.save()
     created_export_tasks = ExportTask._filter_by_source_kludge(
         ExportTask.objects.filter(user=self.user),
         task_data['source']
     )
     self.assertEqual(excess_count + 1, created_export_tasks.count())
     # Identify which exports should be kept
     export_tasks_to_keep = created_export_tasks.order_by('-date_created')[
         :settings.MAXIMUM_EXPORTS_PER_USER_PER_FORM]
     # Call `run()` once more since it invokes the cleanup logic
     export_task.run()
     self.assertEqual(export_task.status, ExportTask.COMPLETE)
     # Verify the cleanup
     self.assertFalse(result.storage.exists(result.name))
     self.assertListEqual( # assertSequenceEqual isn't working...
         list(export_tasks_to_keep.values_list('pk', flat=True)),
         list(ExportTask._filter_by_source_kludge(
             ExportTask.objects.filter(
                 user=self.user),
             task_data['source']
         ).order_by('-date_created').values_list('pk', flat=True))
     )
Example #8
0
 def _create_export_task(asset):
     export_task = ExportTask()
     export_task.user = self.user
     export_task.data = {
         'source': reverse('asset-detail', args=[asset.uid]),
         'type': 'csv'
     }
     messages = defaultdict(list)
     export_task._run_task(messages)
     return export_task
    def _create_export_task(self, asset=None, _type='csv'):
        uid = self.asset.uid if asset is None else asset.uid

        export_task = ExportTask()
        export_task.user = self.user
        export_task.data = {
            'source':
            reverse(
                self._get_endpoint('asset-detail'),
                kwargs={'uid': uid},
            ),
            'type':
            _type,
        }
        messages = defaultdict(list)
        export_task._run_task(messages)

        return export_task
Example #10
0
    def test_export_long_form_title(self):
        what_a_title = (
            'the quick brown fox jumped over the lazy dog and jackdaws love '
            'my big sphinx of quartz and pack my box with five dozen liquor '
            'jugs dum cornelia legit flavia scribit et laeta est flavia quod '
            'cornelia iam in villa habitat et cornelia et flavia sunt amicae')
        assert len(what_a_title) > ExportTask.MAXIMUM_FILENAME_LENGTH
        self.asset.name = what_a_title
        self.asset.save()
        task_data = {
            'source': reverse('asset-detail', args=[self.asset.uid]),
            'type': 'csv',
        }
        export_task = ExportTask()
        export_task.user = self.user
        export_task.data = task_data
        export_task.save()
        export_task.run()

        assert (len(os.path.basename(
            export_task.result.name)) == ExportTask.MAXIMUM_FILENAME_LENGTH)
Example #11
0
 def test_export_spss_labels(self):
     export_task = ExportTask()
     export_task.user = self.user
     export_task.data = {
         'source': reverse('asset-detail', args=[self.asset.uid]),
         'type': 'spss_labels',
     }
     messages = defaultdict(list)
     # Set the current date and time artificially to generate a predictable
     # file name for the export
     utcnow = datetime.datetime.utcnow()
     with mock.patch('kpi.models.import_export_task.utcnow') as mock_utcnow:
         mock_utcnow.return_value = utcnow
         export_task._run_task(messages)
     self.assertFalse(messages)
     self.assertEqual(
         os.path.split(export_task.result.name)[-1],
         'Identificaci\xf3n_de_animales_-_all_versions_-_SPSS_Labels_-_'
         '{date:%Y-%m-%d-%H-%M-%S}.zip'.format(date=utcnow)
     )
     expected_file_names_and_content_lines = {
         'Identificaci\xf3n de animales - Spanish - SPSS labels.sps': [
             '\ufeffVARIABLE LABELS',
             " start 'start'",
             " /end 'end'",
             " /What_kind_of_symmetry_do_you_have '\xbfQu\xe9 tipo de simetr\xeda tiene?'",
             " /What_kind_of_symmetry_do_you_have_spherical '\xbfQu\xe9 tipo de simetr\xeda tiene? :: Esf\xe9rico'",
             " /What_kind_of_symmetry_do_you_have_radial '\xbfQu\xe9 tipo de simetr\xeda tiene? :: Radial'",
             " /What_kind_of_symmetry_do_you_have_bilateral '\xbfQu\xe9 tipo de simetr\xeda tiene? :: Bilateral'",
             " /How_many_segments_does_your_body_have '\xbfCu\xe1ntos segmentos tiene tu cuerpo?'",
             " /Do_you_have_body_flu_intracellular_space '\xbfTienes fluidos corporales que ocupan espacio intracelular?'",
             " /Do_you_descend_from_unicellular_organism '\xbfDesciende de un organismo unicelular ancestral?'",
             " /_id '_id'",
             " /_uuid '_uuid'",
             " /_submission_time '_submission_time'",
             " /_validation_status '_validation_status'",
             ' .',
             'VALUE LABELS',
             ' Do_you_have_body_flu_intracellular_space',
             " 'yes' 'S\xed'",
             " 'yes__and_some_' 'S\xed, y alg\xfan espacio extracelular'",
             " 'no___unsure' 'No / Inseguro'",
             ' /Do_you_descend_from_unicellular_organism',
             " 'yes' 'S\xed'",
             " 'no' 'No'",
             ' .'
         ],
         'Identificaci\xf3n de animales - English - SPSS labels.sps': [
             '\ufeffVARIABLE LABELS',
             " start 'start'",
             " /end 'end'",
             " /What_kind_of_symmetry_do_you_have 'What kind of symmetry do you have?'",
             " /What_kind_of_symmetry_do_you_have_spherical 'What kind of symmetry do you have? :: Spherical'",
             " /What_kind_of_symmetry_do_you_have_radial 'What kind of symmetry do you have? :: Radial'",
             " /What_kind_of_symmetry_do_you_have_bilateral 'What kind of symmetry do you have? :: Bilateral'",
             " /How_many_segments_does_your_body_have 'How many segments does your body have?'",
             " /Do_you_have_body_flu_intracellular_space 'Do you have body fluids that occupy intracellular space?'",
             " /Do_you_descend_from_unicellular_organism 'Do you descend from an ancestral unicellular organism?'",
             " /_id '_id'",
             " /_uuid '_uuid'",
             " /_submission_time '_submission_time'",
             " /_validation_status '_validation_status'",
             ' .',
             'VALUE LABELS',
             ' Do_you_have_body_flu_intracellular_space',
             " 'yes' 'Yes'",
             " 'yes__and_some_' 'Yes, and some extracellular space'",
             " 'no___unsure' 'No / Unsure'",
             ' /Do_you_descend_from_unicellular_organism',
             " 'yes' 'Yes'",
             " 'no' 'No'",
             ' .'
         ],
     }
     result_zip = zipfile.ZipFile(export_task.result, 'r')
     for name, content_lines in expected_file_names_and_content_lines.items():
         self.assertEqual(
             # we have `unicode_literals` but the rest of the app doesn't
             result_zip.open(name, 'r').read().decode('utf-8'),
             '\r\n'.join(content_lines)
         )