def test_osm_csv_export(self): self._publish_osm_with_submission() count = Export.objects.all().count() view = XFormViewSet.as_view({ 'get': 'retrieve' }) request = self.factory.get('/', data={'include_images': False}, **self.extra) response = view(request, pk=self.xform.pk, format='csv') self.assertEqual(response.status_code, 200) self.assertEquals(count + 1, Export.objects.all().count()) headers = dict(response.items()) self.assertEqual(headers['Content-Type'], 'application/csv') content_disposition = headers['Content-Disposition'] filename = filename_from_disposition(content_disposition) basename, ext = os.path.splitext(filename) self.assertEqual(ext, '.csv') content = get_response_content(response) reader = csv.DictReader(StringIO(content)) data = [_ for _ in reader] test_file_path = os.path.join(self.fixtures_dir, 'osm.csv') with open(test_file_path, 'r') as test_file: expected_csv_reader = csv.DictReader(test_file) for index, row in enumerate(expected_csv_reader): self.assertDictContainsSubset(row, data[index]) request = self.factory.get('/', **self.extra) response = view(request, pk=self.xform.pk, format='csv') self.assertEqual(response.status_code, 200)
def _check_xls_export(self): xls_export_url = reverse('xls_export', kwargs={ 'username': self.user.username, 'id_string': self.xform.id_string }) response = self.client.get(xls_export_url) expected_xls = open_workbook( os.path.join(self.this_directory, "fixtures", "transportation", "transportation_export.xls")) content = get_response_content(response, decode=False) actual_xls = open_workbook(file_contents=content) actual_sheet = actual_xls.sheet_by_index(0) expected_sheet = expected_xls.sheet_by_index(0) # check headers self.assertEqual(actual_sheet.row_values(0), expected_sheet.row_values(0)) # check cell data self.assertEqual(actual_sheet.ncols, expected_sheet.ncols) self.assertEqual(actual_sheet.nrows, expected_sheet.nrows) for i in range(1, actual_sheet.nrows): actual_row = actual_sheet.row_values(i) expected_row = expected_sheet.row_values(i) # remove _id from result set, varies depending on the database del actual_row[23] del expected_row[23] self.assertEqual(actual_row, expected_row)
def test_osm_csv_export(self): self._publish_osm_with_submission() count = Export.objects.all().count() view = XFormViewSet.as_view({'get': 'retrieve'}) request = self.factory.get('/', data={'include_images': False}, **self.extra) response = view(request, pk=self.xform.pk, format='csv') self.assertEqual(response.status_code, 200) self.assertEquals(count + 1, Export.objects.all().count()) headers = dict(response.items()) self.assertEqual(headers['Content-Type'], 'application/csv') content_disposition = headers['Content-Disposition'] filename = filename_from_disposition(content_disposition) basename, ext = os.path.splitext(filename) self.assertEqual(ext, '.csv') content = get_response_content(response) reader = csv.DictReader(StringIO(content)) data = [_ for _ in reader] test_file_path = os.path.join(self.fixtures_dir, 'osm.csv') with open(test_file_path, 'r') as test_file: expected_csv_reader = csv.DictReader(test_file) for index, row in enumerate(expected_csv_reader): self.assertDictContainsSubset(row, data[index]) request = self.factory.get('/', **self.extra) response = view(request, pk=self.xform.pk, format='csv') self.assertEqual(response.status_code, 200)
def _filter_export_test(self, url, export_format): """ Test filter exports. Use sleep to ensure we don't have unique seconds. Number of rows equals number of surveys plus 1, the header row. """ time.sleep(1) # 1 survey exists before this time start_time = timezone.now().strftime('%y_%m_%d_%H_%M_%S') time.sleep(1) s = self.surveys[1] self._make_submission( os.path.join(self.this_directory, 'fixtures', 'transportation', 'instances', s, s + '.xml')) time.sleep(1) # 2 surveys exist before this time end_time = timezone.now().strftime('%y_%m_%d_%H_%M_%S') time.sleep(1) # 3 surveys exist in total s = self.surveys[2] self._make_submission( os.path.join(self.this_directory, 'fixtures', 'transportation', 'instances', s, s + '.xml')) # test restricting to before end time params = {'end': end_time} response = self.client.get(url, params) self.assertEqual(response.status_code, 200) content = get_response_content(response, decode=False) self.assertEqual(self._num_rows(content, export_format), 3) # test restricting to after start time, thus excluding the initial # submission params = {'start': start_time} response = self.client.get(url, params) self.assertEqual(response.status_code, 200) content = get_response_content(response, decode=False) self.assertEqual(self._num_rows(content, export_format), 3) # test no time restriction response = self.client.get(url) self.assertEqual(response.status_code, 200) content = get_response_content(response, decode=False) self.assertEqual(self._num_rows(content, export_format), 4) # test restricting to between start time and end time params = {'start': start_time, 'end': end_time} response = self.client.get(url, params) self.assertEqual(response.status_code, 200) content = get_response_content(response, decode=False) self.assertEqual(self._num_rows(content, export_format), 2)
def _get_csv_(self): # todo: get the csv.reader to handle unicode as done here: # http://docs.python.org/library/csv.html#examples url = reverse('csv_export', kwargs={ 'username': self.user.username, 'id_string': self.xform.id_string }) response = self.client.get(url) self.assertEqual(response.status_code, 200) actual_csv = get_response_content(response) actual_lines = actual_csv.split("\n") return csv.reader(actual_lines)
def _test_csv_response(self, response, csv_file_path): headers = dict(response.items()) self.assertEqual(headers['Content-Type'], 'application/csv') content_disposition = headers['Content-Disposition'] filename = filename_from_disposition(content_disposition) __, ext = os.path.splitext(filename) self.assertEqual(ext, '.csv') data = get_response_content(response) reader = csv.DictReader(StringIO(data)) data = [_ for _ in reader] with open(csv_file_path, 'r') as test_file: expected_csv_reader = csv.DictReader(test_file) for index, row in enumerate(expected_csv_reader): if None in row: row.pop(None) self.assertDictContainsSubset(row, data[index])
def _test_csv_response(self, response, csv_file_path): headers = dict(response.items()) self.assertEqual(headers['Content-Type'], 'application/csv') content_disposition = headers['Content-Disposition'] filename = filename_from_disposition(content_disposition) __, ext = os.path.splitext(filename) self.assertEqual(ext, '.csv') data = get_response_content(response) reader = csv.DictReader(StringIO(data)) data = [_ for _ in reader] with open(csv_file_path, encoding='utf-8') as test_file: expected_csv_reader = csv.DictReader(test_file) for index, row in enumerate(expected_csv_reader): if None in row: row.pop(None) self.assertDictContainsSubset(row, data[index])
def _check_csv_export_second_pass(self): url = reverse('csv_export', kwargs={ 'username': self.user.username, 'id_string': self.xform.id_string }) response = self.client.get(url) self.assertEqual(response.status_code, 200) actual_csv = get_response_content(response) actual_lines = actual_csv.split("\n") actual_csv = csv.reader(actual_lines) headers = next(actual_csv) data = [{ "image1": "1335783522563.jpg", 'meta/instanceID': 'uuid:5b2cc313-fc09-437e-8149-fcd32f695d41', '_uuid': '5b2cc313-fc09-437e-8149-fcd32f695d41', '_submission_time': '2013-02-14T15:37:21', '_tags': '', '_notes': '', '_version': '2014111', '_duration': '', '_submitted_by': 'bob', '_total_media': '1', '_media_count': '0', }, { "available_transportation_types_to_referral_facility/ambulance": "True", "available_transportation_types_to_referral_facility/bicycle": "True", self.ambulance_key: "daily", self.bicycle_key: "weekly", "meta/instanceID": "uuid:f3d8dc65-91a6-4d0f-9e97-802128083390", '_uuid': 'f3d8dc65-91a6-4d0f-9e97-802128083390', '_submission_time': '2013-02-14T15:37:22', '_tags': '', '_notes': '', '_version': '2014111', '_duration': '', '_submitted_by': 'bob', '_total_media': '0', '_media_count': '0', '_media_all_received': 'True' }, { "available_transportation_types_to_referral_facility/ambulance": "True", self.ambulance_key: "weekly", "meta/instanceID": "uuid:9c6f3468-cfda-46e8-84c1-75458e72805d", '_uuid': '9c6f3468-cfda-46e8-84c1-75458e72805d', '_submission_time': '2013-02-14T15:37:23', '_tags': '', '_notes': '', '_version': '2014111', '_duration': '', '_submitted_by': 'bob', '_total_media': '0', '_media_count': '0', '_media_all_received': 'True' }, { "available_transportation_types_to_referral_facility/taxi": "True", "available_transportation_types_to_referral_facility/other": "True", "available_transportation_types_to_referral_facility_other": "camel", self.taxi_key: "daily", "meta/instanceID": "uuid:9f0a1508-c3b7-4c99-be00-9b237c26bcbf", '_uuid': '9f0a1508-c3b7-4c99-be00-9b237c26bcbf', '_submission_time': '2013-02-14T15:37:24', '_tags': '', '_notes': '', '_version': '2014111', '_duration': '', '_submitted_by': 'bob', '_total_media': '0', '_media_count': '0', '_media_all_received': 'True' }] dd = DataDictionary.objects.get(pk=self.xform.pk) additional_headers = dd._additional_headers() + ['_id'] for row, expected_dict in zip(actual_csv, data): test_dict = {} d = dict(zip(headers, row)) for (k, v) in iteritems(d): if not (v in ["n/a", "False"] or k in additional_headers): test_dict[k] = v this_list = [] for k, v in expected_dict.items(): if k in ['image1', 'meta/instanceID'] or k.startswith("_"): this_list.append((k, v)) else: this_list.append(("transport/" + k, v)) self.assertEqual(test_dict, dict(this_list))