def test_organizations_csv(self): self.app.config['EXPORT_CSV_MODELS'] = [] with self.autoindex(): orgs = [OrganizationFactory() for _ in range(5)] hidden_org = OrganizationFactory(deleted=datetime.now()) response = self.get(url_for('site.organizations_csv')) self.assert200(response) self.assertEqual(response.mimetype, 'text/csv') self.assertEqual(response.charset, 'utf-8') csvfile = StringIO.StringIO(response.data) reader = csv.get_reader(csvfile) header = reader.next() self.assertEqual(header[0], 'id') self.assertIn('name', header) self.assertIn('description', header) self.assertIn('created_at', header) self.assertIn('last_modified', header) self.assertIn('metric.datasets', header) rows = list(reader) ids = [row[0] for row in rows] self.assertEqual(len(rows), len(orgs)) for org in orgs: self.assertIn(str(org.id), ids) self.assertNotIn(str(hidden_org.id), ids)
def test_datasets_csv(self): with self.autoindex(): datasets = [DatasetFactory(resources=[ResourceFactory()]) for _ in range(5)] hidden_dataset = DatasetFactory() response = self.get(url_for('site.datasets_csv')) self.assert200(response) self.assertEqual(response.mimetype, 'text/csv') self.assertEqual(response.charset, 'utf-8') csvfile = StringIO.StringIO(response.data) reader = csv.get_reader(csvfile) header = reader.next() self.assertEqual(header[0], 'id') self.assertIn('title', header) self.assertIn('description', header) self.assertIn('created_at', header) self.assertIn('last_modified', header) self.assertIn('tags', header) self.assertIn('metric.reuses', header) rows = list(reader) ids = [row[0] for row in rows] self.assertEqual(len(rows), len(datasets)) for dataset in datasets: self.assertIn(str(dataset.id), ids) self.assertNotIn(str(hidden_dataset.id), ids)
def test_resources_csv(self): with self.autoindex(): datasets = [DatasetFactory(resources=[ResourceFactory(), ResourceFactory()]) for _ in range(3)] hidden_dataset = DatasetFactory() response = self.get(url_for('site.resources_csv')) self.assert200(response) self.assertEqual(response.mimetype, 'text/csv') self.assertEqual(response.charset, 'utf-8') csvfile = StringIO.StringIO(response.data) reader = csv.get_reader(csvfile) header = reader.next() self.assertEqual(header[0], 'dataset.id') self.assertIn('dataset.title', header) self.assertIn('dataset.url', header) self.assertIn('title', header) self.assertIn('description', header) self.assertIn('type', header) self.assertIn('url', header) self.assertIn('created_at', header) self.assertIn('modified', header) self.assertIn('downloads', header) resource_id_index = header.index('id') rows = list(reader) ids = [(row[0], row[resource_id_index]) for row in rows] self.assertEqual(len(rows), sum(len(d.resources) for d in datasets)) for dataset in datasets: for resource in dataset.resources: self.assertIn((str(dataset.id), str(resource.id)), ids)
def test_organizations_csv(self): with self.autoindex(): orgs = [OrganizationFactory() for _ in range(5)] hidden_org = OrganizationFactory(deleted=datetime.now()) response = self.get(url_for('site.organizations_csv')) self.assert200(response) self.assertEqual(response.mimetype, 'text/csv') self.assertEqual(response.charset, 'utf-8') csvfile = StringIO(response.data.decode('utf8')) reader = csv.get_reader(csvfile) header = next(reader) self.assertEqual(header[0], 'id') self.assertIn('name', header) self.assertIn('description', header) self.assertIn('created_at', header) self.assertIn('last_modified', header) self.assertIn('metric.datasets', header) rows = list(reader) ids = [row[0] for row in rows] self.assertEqual(len(rows), len(orgs)) for org in orgs: self.assertIn(str(org.id), ids) self.assertNotIn(str(hidden_org.id), ids)
def test_supplied_datasets_csv(self): with self.autoindex(): org = OrganizationFactory() datasets = [DatasetFactory(supplier=org, resources=[ResourceFactory()]) for _ in range(3)] not_org_dataset = DatasetFactory(resources=[ResourceFactory()]) hidden_dataset = DatasetFactory() response = self.get(url_for('organizations.supplied_datasets_csv', org=org)) self.assert200(response) self.assertEqual(response.mimetype, 'text/csv') self.assertEqual(response.charset, 'utf-8') csvfile = StringIO.StringIO(response.data) reader = csv.get_reader(csvfile) header = reader.next() self.assertEqual(header[0], 'id') self.assertIn('title', header) self.assertIn('description', header) self.assertIn('created_at', header) self.assertIn('last_modified', header) self.assertIn('tags', header) self.assertIn('metric.reuses', header) rows = list(reader) ids = [row[0] for row in rows] self.assertEqual(len(rows), len(datasets)) for dataset in datasets: self.assertIn(str(dataset.id), ids) self.assertNotIn(str(hidden_dataset.id), ids) self.assertNotIn(str(not_org_dataset.id), ids)
def test_datasets_csv(self): with self.autoindex(): datasets = [ DatasetFactory(resources=[ResourceFactory()]) for _ in range(5) ] hidden_dataset = DatasetFactory() response = self.get(url_for('site.datasets_csv')) self.assert200(response) self.assertEqual(response.mimetype, 'text/csv') self.assertEqual(response.charset, 'utf-8') csvfile = StringIO(response.data.decode('utf8')) reader = csv.get_reader(csvfile) header = next(reader) self.assertEqual(header[0], 'id') self.assertIn('title', header) self.assertIn('description', header) self.assertIn('created_at', header) self.assertIn('last_modified', header) self.assertIn('tags', header) self.assertIn('metric.reuses', header) rows = list(reader) ids = [row[0] for row in rows] self.assertEqual(len(rows), len(datasets)) for dataset in datasets: self.assertIn(str(dataset.id), ids) self.assertNotIn(str(hidden_dataset.id), ids)
def test_organizations_csv_with_filters(self): '''Should handle filtering but ignore paging or facets''' with self.autoindex(): filtered_orgs = [OrganizationFactory(public_service=True) for _ in range(6)] orgs = [OrganizationFactory() for _ in range(3)] hidden_org = OrganizationFactory(deleted=datetime.now()) response = self.get(url_for('site.organizations_csv', public_services=True, page_size=3, facets=True)) self.assert200(response) self.assertEqual(response.mimetype, 'text/csv') self.assertEqual(response.charset, 'utf-8') csvfile = StringIO.StringIO(response.data) reader = csv.get_reader(csvfile) header = reader.next() self.assertEqual(header[0], 'id') self.assertIn('name', header) self.assertIn('description', header) self.assertIn('created_at', header) self.assertIn('last_modified', header) self.assertIn('metric.datasets', header) rows = list(reader) ids = [row[0] for row in rows] # Should ignore paging self.assertEqual(len(rows), len(filtered_orgs)) # SHoulf pass filter for org in filtered_orgs: self.assertIn(str(org.id), ids) for org in orgs: self.assertNotIn(str(org.id), ids) self.assertNotIn(str(hidden_org.id), ids)
def test_resources_csv_with_filters(self): '''Should handle filtering but ignore paging or facets''' with self.autoindex(): filtered_datasets = [ DatasetFactory( resources=[ResourceFactory(), ResourceFactory()], tags=['selected']) for _ in range(6) ] [DatasetFactory(resources=[ResourceFactory()]) for _ in range(3)] DatasetFactory() response = self.get( url_for('site.resources_csv', tag='selected', page_size=3, facets=True)) self.assert200(response) self.assertEqual(response.mimetype, 'text/csv') self.assertEqual(response.charset, 'utf-8') csvfile = StringIO(response.data.decode('utf8')) reader = csv.get_reader(csvfile) header = next(reader) self.assertEqual(header[0], 'dataset.id') self.assertIn('dataset.title', header) self.assertIn('dataset.url', header) self.assertIn('title', header) self.assertIn('description', header) self.assertIn('filetype', header) self.assertIn('url', header) self.assertIn('created_at', header) self.assertIn('modified', header) self.assertIn('downloads', header) resource_id_index = header.index('id') rows = list(reader) ids = [(row[0], row[resource_id_index]) for row in rows] self.assertEqual(len(rows), sum(len(d.resources) for d in filtered_datasets)) for dataset in filtered_datasets: for resource in dataset.resources: self.assertIn((str(dataset.id), str(resource.id)), ids)
def test_stream_unicode(self): fake = FakeFactory(title='é\xe9') response = self.get(url_for('testcsv.from_adapter')) self.assert200(response) self.assertEqual(response.mimetype, 'text/csv') self.assertEqual(response.charset, 'utf-8') csvfile = StringIO(response.data.decode('utf8')) reader = csv.get_reader(csvfile) header = next(reader) self.assertEqual(header, ['title', 'description']) row = next(reader) self.assertEqual(len(row), len(header)) self.assertEqual(row[0], fake.title) self.assertEqual(row[1], fake.description)
def test_stream_unicode(self): fake = FakeFactory(title='é\xe9') response = self.get(url_for('testcsv.from_adapter')) self.assert200(response) self.assertEqual(response.mimetype, 'text/csv') self.assertEqual(response.charset, 'utf-8') csvfile = StringIO.StringIO(response.data) reader = csv.get_reader(csvfile) header = reader.next() self.assertEqual(header, ['title', 'description']) row = reader.next() self.assertEqual(len(row), len(header)) self.assertEqual(row[0], fake.title) self.assertEqual(row[1], fake.description)
def test_resources_csv(self): with self.autoindex(): org = OrganizationFactory() datasets = [ DatasetFactory( organization=org, resources=[ResourceFactory(), ResourceFactory()]) for _ in range(3) ] not_org_dataset = DatasetFactory(resources=[ResourceFactory()]) hidden_dataset = DatasetFactory() response = self.get( url_for('organizations.datasets_resources_csv', org=org)) self.assert200(response) self.assertEqual(response.mimetype, 'text/csv') self.assertEqual(response.charset, 'utf-8') csvfile = StringIO.StringIO(response.data) reader = reader = csv.get_reader(csvfile) header = reader.next() self.assertEqual(header[0], 'dataset.id') self.assertIn('dataset.title', header) self.assertIn('dataset.url', header) self.assertIn('title', header) self.assertIn('filetype', header) self.assertIn('url', header) self.assertIn('created_at', header) self.assertIn('modified', header) self.assertIn('downloads', header) resource_id_index = header.index('id') rows = list(reader) ids = [(row[0], row[resource_id_index]) for row in rows] self.assertEqual(len(rows), sum(len(d.resources) for d in datasets)) for dataset in datasets: for resource in dataset.resources: self.assertIn((str(dataset.id), str(resource.id)), ids) dataset_ids = set(row[0] for row in rows) self.assertNotIn(str(hidden_dataset.id), dataset_ids) self.assertNotIn(str(not_org_dataset.id), dataset_ids)
def test_supplied_resources_csv(self): with self.autoindex(): org = OrganizationFactory() datasets = [ DatasetFactory( supplier=org, resources=[ResourceFactory(), ResourceFactory()]) for _ in range(3) ] not_org_dataset = DatasetFactory(resources=[ResourceFactory()]) hidden_dataset = DatasetFactory() response = self.get( url_for('organizations.supplied_datasets_resources_csv', org=org)) self.assert200(response) self.assertEqual(response.mimetype, 'text/csv') self.assertEqual(response.charset, 'utf-8') csvfile = StringIO.StringIO(response.data) reader = reader = csv.get_reader(csvfile) header = reader.next() self.assertEqual(header[0], 'dataset.id') self.assertIn('dataset.title', header) self.assertIn('title', header) self.assertIn('filetype', header) self.assertIn('url', header) self.assertIn('created_at', header) self.assertIn('modified', header) self.assertIn('downloads', header) resource_id_index = header.index('id') rows = list(reader) ids = [(row[0], row[resource_id_index]) for row in rows] self.assertEqual(len(rows), sum(len(d.resources) for d in datasets)) for dataset in datasets: for resource in dataset.resources: self.assertIn((str(dataset.id), str(resource.id)), ids) dataset_ids = set(row[0] for row in rows) self.assertNotIn(str(hidden_dataset.id), dataset_ids) self.assertNotIn(str(not_org_dataset.id), dataset_ids)
def test_datasets_csv_with_filters(self): '''Should handle filtering but ignore paging or facets''' with self.autoindex(): filtered_datasets = [ DatasetFactory(resources=[ResourceFactory()], tags=['selected']) for _ in range(6) ] datasets = [ DatasetFactory(resources=[ResourceFactory()]) for _ in range(3) ] hidden_dataset = DatasetFactory() response = self.get( url_for('site.datasets_csv', tag='selected', page_size=3, facets=True)) self.assert200(response) self.assertEqual(response.mimetype, 'text/csv') self.assertEqual(response.charset, 'utf-8') csvfile = StringIO(response.data.decode('utf8')) reader = csv.get_reader(csvfile) header = next(reader) self.assertEqual(header[0], 'id') self.assertIn('title', header) self.assertIn('description', header) self.assertIn('created_at', header) self.assertIn('last_modified', header) self.assertIn('tags', header) self.assertIn('metric.reuses', header) rows = list(reader) ids = [row[0] for row in rows] # Should ignore paging self.assertEqual(len(rows), len(filtered_datasets)) # SHoulf pass filter for dataset in filtered_datasets: self.assertIn(str(dataset.id), ids) for dataset in datasets: self.assertNotIn(str(dataset.id), ids) self.assertNotIn(str(hidden_dataset.id), ids)
def test_organizations_csv_with_filters(self): '''Should handle filtering but ignore paging or facets''' user = self.login() with self.autoindex(): public_service_badge = Badge(kind=PUBLIC_SERVICE, created_by=user) filtered_orgs = [ OrganizationFactory(badges=[public_service_badge]) for _ in range(6) ] orgs = [OrganizationFactory() for _ in range(3)] hidden_org = OrganizationFactory(deleted=datetime.now()) response = self.get( url_for('site.organizations_csv', badge=PUBLIC_SERVICE, page_size=3, facets=True)) self.assert200(response) self.assertEqual(response.mimetype, 'text/csv') self.assertEqual(response.charset, 'utf-8') csvfile = StringIO(response.data.decode('utf8')) reader = csv.get_reader(csvfile) header = next(reader) self.assertEqual(header[0], 'id') self.assertIn('name', header) self.assertIn('description', header) self.assertIn('created_at', header) self.assertIn('last_modified', header) self.assertIn('metric.datasets', header) rows = list(reader) ids = [row[0] for row in rows] # Should ignore paging self.assertEqual(len(rows), len(filtered_orgs)) # SHoulf pass filter for org in filtered_orgs: self.assertIn(str(org.id), ids) for org in orgs: self.assertNotIn(str(org.id), ids) self.assertNotIn(str(hidden_org.id), ids)
def test_resources_csv_with_filters(self): '''Should handle filtering but ignore paging or facets''' with self.autoindex(): filtered_datasets = [DatasetFactory(resources=[ResourceFactory(), ResourceFactory()], tags=['selected']) for _ in range(6)] [DatasetFactory(resources=[ResourceFactory()]) for _ in range(3)] DatasetFactory() response = self.get( url_for('site.resources_csv', tag='selected', page_size=3, facets=True)) self.assert200(response) self.assertEqual(response.mimetype, 'text/csv') self.assertEqual(response.charset, 'utf-8') csvfile = StringIO.StringIO(response.data) reader = csv.get_reader(csvfile) header = reader.next() self.assertEqual(header[0], 'dataset.id') self.assertIn('dataset.title', header) self.assertIn('dataset.url', header) self.assertIn('title', header) self.assertIn('description', header) self.assertIn('filetype', header) self.assertIn('url', header) self.assertIn('created_at', header) self.assertIn('modified', header) self.assertIn('downloads', header) resource_id_index = header.index('id') rows = list(reader) ids = [(row[0], row[resource_id_index]) for row in rows] self.assertEqual(len(rows), sum(len(d.resources) for d in filtered_datasets)) for dataset in filtered_datasets: for resource in dataset.resources: self.assertIn((str(dataset.id), str(resource.id)), ids)
def test_datasets_csv_with_filters(self): '''Should handle filtering but ignore paging or facets''' with self.autoindex(): filtered_datasets = [ DatasetFactory(resources=[ResourceFactory()], tags=['selected']) for _ in range(6)] datasets = [DatasetFactory(resources=[ResourceFactory()]) for _ in range(3)] hidden_dataset = DatasetFactory() response = self.get( url_for( 'site.datasets_csv', tag='selected', page_size=3, facets=True)) self.assert200(response) self.assertEqual(response.mimetype, 'text/csv') self.assertEqual(response.charset, 'utf-8') csvfile = StringIO.StringIO(response.data) reader = csv.get_reader(csvfile) header = reader.next() self.assertEqual(header[0], 'id') self.assertIn('title', header) self.assertIn('description', header) self.assertIn('created_at', header) self.assertIn('last_modified', header) self.assertIn('tags', header) self.assertIn('metric.reuses', header) rows = list(reader) ids = [row[0] for row in rows] # Should ignore paging self.assertEqual(len(rows), len(filtered_datasets)) # SHoulf pass filter for dataset in filtered_datasets: self.assertIn(str(dataset.id), ids) for dataset in datasets: self.assertNotIn(str(dataset.id), ids) self.assertNotIn(str(hidden_dataset.id), ids)
def test_csv(self): Tag.objects.create(name="datasets-only", counts={"datasets": 15}) Tag.objects.create(name="reuses-only", counts={"reuses": 10}) Tag.objects.create(name="both", counts={"reuses": 10, "datasets": 15}) response = self.get(url_for("tags.csv")) self.assert200(response) self.assertEqual(response.mimetype, "text/csv") self.assertEqual(response.charset, "utf-8") csvfile = StringIO.StringIO(response.data) reader = reader = csv.get_reader(csvfile) header = reader.next() rows = list(reader) self.assertEqual(header, ["name", "datasets", "reuses", "total"]) self.assertEqual(len(rows), 3) self.assertEqual(rows[0], ["both", "15", "10", "25"]) self.assertEqual(rows[1], ["datasets-only", "15", "0", "15"]) self.assertEqual(rows[2], ["reuses-only", "0", "10", "10"])
def test_csv(self, client): Tag.objects.create(name='datasets-only', counts={'datasets': 15}) Tag.objects.create(name='reuses-only', counts={'reuses': 10}) Tag.objects.create(name='both', counts={'reuses': 10, 'datasets': 15}) response = client.get(url_for('tags.csv')) assert200(response) assert response.mimetype == 'text/csv' assert response.charset == 'utf-8' csvfile = StringIO.StringIO(response.data) reader = reader = csv.get_reader(csvfile) header = reader.next() rows = list(reader) assert header == ['name', 'datasets', 'reuses', 'total'] assert len(rows) is 3 assert rows[0] == ['both', '15', '10', '25'] assert rows[1] == ['datasets-only', '15', '0', '15'] assert rows[2] == ['reuses-only', '0', '10', '10']
def test_csv(self, client): Tag.objects.create(name='datasets-only', counts={'datasets': 15}) Tag.objects.create(name='reuses-only', counts={'reuses': 10}) Tag.objects.create(name='both', counts={'reuses': 10, 'datasets': 15}) response = client.get(url_for('tags.csv')) assert200(response) assert response.mimetype == 'text/csv' assert response.charset == 'utf-8' csvfile = StringIO(response.data.decode('utf8')) reader = reader = csv.get_reader(csvfile) header = next(reader) rows = list(reader) assert header == ['name', 'datasets', 'reuses', 'total'] assert len(rows) is 3 assert rows[0] == ['both', '15', '10', '25'] assert rows[1] == ['datasets-only', '15', '0', '15'] assert rows[2] == ['reuses-only', '0', '10', '10']
def test_csv(self): Tag.objects.create(name='datasets-only', counts={'datasets': 15}) Tag.objects.create(name='reuses-only', counts={'reuses': 10}) Tag.objects.create(name='both', counts={'reuses': 10, 'datasets': 15}) response = self.get(url_for('tags.csv')) self.assert200(response) self.assertEqual(response.mimetype, 'text/csv') self.assertEqual(response.charset, 'utf-8') csvfile = StringIO.StringIO(response.data) reader = reader = csv.get_reader(csvfile) header = reader.next() rows = list(reader) self.assertEqual(header, ['name', 'datasets', 'reuses', 'total']) self.assertEqual(len(rows), 3) self.assertEqual(rows[0], ['both', '15', '10', '25']) self.assertEqual(rows[1], ['datasets-only', '15', '0', '15']) self.assertEqual(rows[2], ['reuses-only', '0', '10', '10'])
def test_resources_csv(self): self.app.config['EXPORT_CSV_MODELS'] = [] with self.autoindex(): datasets = [ DatasetFactory( resources=[ResourceFactory(), ResourceFactory()]) for _ in range(3) ] DatasetFactory() response = self.get(url_for('site.resources_csv')) self.assert200(response) self.assertEqual(response.mimetype, 'text/csv') self.assertEqual(response.charset, 'utf-8') csvfile = StringIO(response.data.decode('utf8')) reader = csv.get_reader(csvfile) header = next(reader) self.assertEqual(header[0], 'dataset.id') self.assertIn('dataset.title', header) self.assertIn('dataset.url', header) self.assertIn('title', header) self.assertIn('description', header) self.assertIn('filetype', header) self.assertIn('url', header) self.assertIn('created_at', header) self.assertIn('modified', header) self.assertIn('downloads', header) resource_id_index = header.index('id') rows = list(reader) ids = [(row[0], row[resource_id_index]) for row in rows] self.assertEqual(len(rows), sum(len(d.resources) for d in datasets)) for dataset in datasets: for resource in dataset.resources: self.assertIn((str(dataset.id), str(resource.id)), ids)
def assert_csv(self, endpoint, objects): @csv.adapter(Fake) class Adapter(csv.Adapter): fields = ['title', 'description'] response = self.get(url_for(endpoint)) self.assert200(response) self.assertEqual(response.mimetype, 'text/csv') self.assertEqual(response.charset, 'utf-8') csvfile = StringIO.StringIO(response.data) reader = csv.get_reader(csvfile) header = reader.next() self.assertEqual(header, ['title', 'description']) rows = list(reader) self.assertEqual(len(rows), len(objects)) for row, obj in zip(rows, objects): self.assertEqual(len(row), len(header)) self.assertEqual(row[0], obj.title) self.assertEqual(row[1], obj.description) return response
def test_stream_nested_from_adapter(self): fake = FakeFactory.build() for i in range(3): fake.nested.append(NestedFake(key=faker.word(), value=i)) fake.save() response = self.get(url_for('testcsv.from_nested')) self.assert200(response) self.assertEqual(response.mimetype, 'text/csv') self.assertEqual(response.charset, 'utf-8') csvfile = StringIO.StringIO(response.data) reader = csv.get_reader(csvfile) header = reader.next() self.assertEqual(header, ['title', 'description', 'key', 'alias']) rows = list(reader) self.assertEqual(len(rows), len(fake.nested)) for row, obj in zip(rows, fake.nested): self.assertEqual(len(row), len(header)) self.assertEqual(row[0], fake.title) self.assertEqual(row[1], fake.description) self.assertEqual(row[2], obj.key) self.assertEqual(row[3], str(obj.value))
def test_stream_nested_from_adapter(self): fake = FakeFactory.build() for i in range(3): fake.nested.append(NestedFake(key=faker.word(), value=i)) fake.save() response = self.get(url_for('testcsv.from_nested')) self.assert200(response) self.assertEqual(response.mimetype, 'text/csv') self.assertEqual(response.charset, 'utf-8') csvfile = StringIO(response.data.decode('utf8')) reader = csv.get_reader(csvfile) header = next(reader) self.assertEqual(header, ['title', 'description', 'key', 'alias']) rows = list(reader) self.assertEqual(len(rows), len(fake.nested)) for row, obj in zip(rows, fake.nested): self.assertEqual(len(row), len(header)) self.assertEqual(row[0], fake.title) self.assertEqual(row[1], fake.description) self.assertEqual(row[2], obj.key) self.assertEqual(row[3], str(obj.value))
def assert_csv(self, endpoint, objects): @csv.adapter(Fake) class Adapter(csv.Adapter): fields = ['title', 'description'] response = self.get(url_for(endpoint)) self.assert200(response) self.assertEqual(response.mimetype, 'text/csv') self.assertEqual(response.charset, 'utf-8') csvfile = StringIO(response.data.decode('utf8')) reader = csv.get_reader(csvfile) header = next(reader) self.assertEqual(header, ['title', 'description']) rows = list(reader) self.assertEqual(len(rows), len(objects)) for row, obj in zip(rows, objects): self.assertEqual(len(row), len(header)) self.assertEqual(row[0], obj.title) self.assertEqual(row[1], obj.description) return response