def test_members(self): members = list(self.entity.members()) h.assert_equal(len(members), 5) members = list( self.entity.members(self.entity.alias.c.name == 'Dept032')) h.assert_equal(len(members), 1)
def test_count_with_query_kwarg_containing_boolean_value(self): self._make_entry(name='one', foo=True) self._make_entry(name='two', foo=True) self._make_entry(name='three', foo=False) h.clean_and_reindex_solr() count = logic.entry.count(foo=True) h.assert_equal(count, 2)
def test_package_is_importable(self): p = ckan.Package h.assert_equal(p('foo').is_importable(), False) h.assert_equal(p('bar').is_importable(), True) h.assert_equal(p('baz').is_importable(), False) h.assert_equal(p('missingdata').is_importable(), False) h.assert_equal(p('withmapping').is_importable(), True)
def test_distincts_with_query_kwarg(self): self._make_entry(name='one', region='RegionA') self._make_entry(name='two', region='RegionA') self._make_entry(name='three', region='RegionB') # without a dataset_name it returns the distincts across all datasets distincts = logic.entry.distinct('name', region=u'RegionA') h.assert_equal(distincts, ['one', 'two'])
def test_facets_with_query_kwarg_containing_boolean_value(self): self._make_entry(name='one', foo=True) self._make_entry(name='two', foo=True) self._make_entry(name='three', foo=False) h.clean_and_reindex_solr() facets = logic.entry.facets_for_fields(['name'], foo=True) h.assert_equal(facets, {u'name': {u'one': 1, u'two': 1}})
def test_get_classifier(self): testname = 'testname' testtaxonomy = 'testtaxonomy' created = logic.classifier.create_classifier(testname, testtaxonomy) fetched = logic.classifier.get_classifier(testname, testtaxonomy) h.assert_true(isinstance(fetched, Classifier)) h.assert_false(created is fetched) h.assert_equal(created, fetched)
def test_count_with_query(self): self._make_entry(name='one', region="A") self._make_entry(name='two', region="A") self._make_entry(name='three', region="B") self._make_entry(name='four') h.clean_and_reindex_solr() count = logic.entry.count(region="A") h.assert_equal(count, 2)
def test_facets_with_query_kwarg(self): self._make_entry(name='one', region="RegionA") self._make_entry(name='two', region="RegionA") self._make_entry(name='three', region="Region B") h.clean_and_reindex_solr() facets = logic.entry.facets_for_fields(['name'], region=u'RegionA') h.assert_equal(facets, {u'name': {u'one': 1, u'two': 1}})
def test_count_with_dataset_name(self): self._make_entry(name='one') self._make_entry(name='two') other_dataset = self._make_dataset('other_dataset') self._make_entry(name='three', dataset=other_dataset) self._make_entry(name='four', dataset=other_dataset) h.clean_and_reindex_solr() count = logic.entry.count(dataset_name='other_dataset') h.assert_equal(count, 2)
def test_members(self): self.ds.generate() self.entity.load(self.ds.bind, {'name': 'one', 'label': 'Label One'}) self.entity.load(self.ds.bind, {'name': 'two', 'label': 'Label Two'}) members = list(self.entity.members()) h.assert_equal(len(members), 2) members = list(self.entity.members(self.entity.alias.c.name == 'one')) h.assert_equal(len(members), 1)
def test_aggregation(self): # Test that values are aggregated in the cube. cube = self._make_cube() collection = cube.db[cube.collection_name] from_a = list(collection.find({'from.name': 'a'}, as_class=dict)) from_b = list(collection.find({'from.name': 'b'}, as_class=dict)) h.assert_equal(len(from_a), 1) h.assert_equal(from_a[0]['amount'], 2000) h.assert_equal(len(from_b), 2) h.assert_equal(from_b[0]['amount'], 1000) h.assert_equal(from_b[1]['amount'], 1000)
def test_default_dimensons(self): # test the dimensions for a default cube. # We exclude 'name', 'label' and 'time'. # But include 'to' and 'from', 'year', and if necessary 'name'. cube = self._make_cube() h.assert_equal(sorted(cube.dimensions), ['from', 'to', 'year']) dataset = cube.dataset dataset['time_axis'] = u'time.from.month' new_default_cube = Cube.configure_default_cube(dataset) h.assert_equal(sorted(new_default_cube.dimensions), ['from', 'month', 'to', 'year'])
def test_facets(self): self._make_entry(name='one', region="Region A") self._make_entry(name='two', region="Region A") self._make_entry(name='three', region="Region B") h.clean_and_reindex_solr() facets = logic.entry.facets_for_fields(['name', 'region']) h.assert_equal(facets, {u'name': {u'one': 1, u'two': 1, u'three': 1}, u'region': {'Region A': 2, 'Region B': 1}})
def test_facets_with_query_kwarg_and_space(self): self._make_entry(name='one', region="Region A") self._make_entry(name='two', region="Region A") self._make_entry(name='three', region="Region B") h.clean_and_reindex_solr() facets = logic.entry.facets_for_fields(['name'], region='Region A') h.skip("This test has been failing for a long time, commented out. "\ "Skipping to register known failure that needs fixing eventually.") h.assert_equal(facets, {u'name': {u'one': 1, u'two': 1}})
def assert_order(result, keys, expect): if isinstance(keys, basestring): keys = [keys] results = [] for key in keys: results.append([deep_get(cell, key) for cell in result['drilldown']]) if len(results) == 1: result = results[0] else: result = zip(*results) h.assert_equal(result, expect, 'Not the expected order. result: %s, expected: %s' % (result, expect))
def test_metadata_for_resource(self): p = ckan.Package('bar') r = p['resources'][1] h.assert_equal(p.metadata_for_resource(r), { 'currency': 'usd', 'description': 'Notes for bar', 'label': 'The Bar dataset', 'name': 'bar', 'source_description': 'Some bar data', 'source_format': 'text/csv', 'source_id': '456-data', 'source_url': 'http://example.com/data.csv', 'temporal_granularity': 'year' })
def test_facets_fail_for_solr_textgen_fields(self): # facets for a solr field return facets for the tokens stored # in the field. Depending on the type this may mean that # it's not the string stored in the field, but tokens after # splitting, stemming or lowercasing self._make_entry(name='one', description="Description One") self._make_entry(name='two', description="Description Two") h.clean_and_reindex_solr() facets = logic.entry.facets_for_fields(['description']) # The result is not ["Description One", "Description Two"] h.assert_equal(facets, {u'description': {u'description': 2, u'two': 1, u'one': 1}})
def test_distincts_by_dataset_name(self): self._make_entry(name='one', region='Region A') self._make_entry(name='two', region='Region A') self._make_entry(name='three', region='Region B') other_dataset = self._make_dataset('other_dataset') self._make_entry(name='four', region='Region C', dataset=other_dataset) # without a dataset_name it returns the distincts across all datasets distincts = logic.entry.distinct('region') h.assert_equal(distincts, ['Region A', 'Region B', 'Region C']) # we can limit it with dataset_name distincts = logic.entry.distinct('region', dataset_name='other_dataset') h.assert_equal(distincts, ['Region C'])
def test_distincts_create_collection(self): testdataset = self._make_dataset(name='testdataset') self._make_entry(name='one', region="Region 1", region2="Region 2", dataset=testdataset) self._make_entry(name='two', region="Region 2", region2="Region 3", dataset=testdataset) db = model.mongo.db() h.assert_true('compute_distincts' in db.system_js.list()) # compute a distincts collection h.assert_true('distincts__testdataset' not in db.collection_names()) distincts = logic.entry.distinct('region', dataset_name='testdataset') h.assert_true('distincts__testdataset' in db.collection_names()) h.assert_equal(sorted(distincts), [u'Region 1', u'Region 2'])
def test_classify_entry(self): entry = {'name': u'Test Entry', 'amount': 1000.00} c_name = u'support-transparency' c_taxonomy = u'Good Reasons' c_label = u'Support Transparency Initiatives' classifier = logic.classifier.create_classifier(name=c_name, label=c_label, taxonomy=c_taxonomy) logic.entry.classify_entry(entry, classifier, name=u'reason') h.assert_equal(entry.keys(), [u'reason', 'amount', 'name', 'classifiers']) h.assert_equal(entry['classifiers'], [classifier['_id']]) h.assert_equal(entry['reason']['label'], c_label) h.assert_equal(entry['reason']['name'], c_name) h.assert_equal(entry['reason']['taxonomy'], c_taxonomy) h.assert_true(isinstance(entry['reason']['ref'], DBRef))
def test_limit(self): # A limit turns a result into a paginated result with # the pagesize of limit cube = self._make_cube() result = cube.query(pagesize=2) h.assert_equal(result['summary']['pagesize'], 2) h.assert_equal(len(result['drilldown']), 2) h.assert_equal(result['summary']['page'], 1) h.assert_equal(result['summary']['pages'], 3)
def test_facets_by_dataset(self): self._make_entry(name='one') self._make_entry(name='two') other_dataset = self._make_dataset('other_dataset') self._make_entry(name='three', dataset=other_dataset) self._make_entry(name='four', dataset=other_dataset) h.clean_and_reindex_solr() # without a dataset_name it returns the distincts across all datasets facets = logic.entry.facets_for_fields(['name']) h.assert_equal(facets, {u'name': {u'one': 1, u'two': 1, u'three': 1, u'four': 1}}) # we can limit it with dataset_name facets = logic.entry.facets_for_fields(['name'], dataset_name='other_dataset') h.assert_equal(facets, {u'name': {u'three': 1, u'four': 1}})
def test_fallback_for_missing_entity_name(self): # We use the objectid of an entity as a fallback value for 'name' loader = self._make_loader() loader.create_dimension('name', 'Name', '') loader.create_dimension('label', 'Label', '') loader.create_dimension('from', 'From', '') from_entity = self._make_entity(loader, name="", label='Entity w/o name') entry = {'name': 'Entry', 'label': 'Entry Label', 'from': from_entity, 'time': {'from': {'year': 2009, 'day': 20090101}}} self._make_entry(loader, **entry) cube = Cube.configure_default_cube(loader.dataset) cube.compute() cube_collection = mongo.db()[cube.collection_name] h.assert_equal(cube_collection.find().count(), 1) cube_from = cube_collection.find_one()['from'] h.assert_equal(cube_from['name'], cube_from['_id'])
def test_distincts(self): testdataset = self._make_dataset(name='testdataset') self._make_entry(name='one', region="Region 1", region2="Region 2", dataset=testdataset) self._make_entry(name='two', region="Region 2", region2="Region 3", dataset=testdataset) db = model.mongo.db() h.assert_true('compute_distincts' in db.system_js.list()) # compute a distincts collection db.system_js.compute_distincts('testdataset') h.assert_true('distincts__testdataset' in db.collection_names()) # test the distincts collection manually distinct_regions = db.distincts__testdataset.find({ 'value.keys': u'region' }).distinct('_id') h.assert_equal(sorted(distinct_regions), [u'Region 1', u'Region 2']) distincts = logic.entry.distinct('region', dataset_name='testdataset') h.assert_equal(sorted(distincts), [u'Region 1', u'Region 2'])
def test_drilldown(self): def sorted_extract(drilldown): extracted = [] for cell in drilldown: cell_extract = [] for key in ('from.name', 'to.name', 'num_entries', 'amount'): cell_extract.append(deep_get(cell, key)) extracted.append(cell_extract) return sorted(extracted) cube = self._make_cube() h.assert_equal(cube.db[cube.collection_name].find().count(), 5) # drilldown on from and to result = cube.query(drilldowns=['from', 'to']) drilldown = result['drilldown'] h.assert_equal(len(drilldown), 5) h.assert_equal(sorted_extract(drilldown), [[u'a', u'b', 2, 2000.0], [u'b', u'b', 1, 1000.0], [u'b', u'c', 1, 1000.0], [u'c', u'a', 1, 1000.0], [u'c', u'b', 1, 1000.0]]) # drilldown on from (to is not included in the drilldown) result = cube.query(drilldowns=['from']) drilldown = result['drilldown'] h.assert_equal(len(drilldown), 3) h.assert_equal(sorted_extract(drilldown), [[u'a', None, 2, 2000.0], [u'b', None, 2, 2000.0], [u'c', None, 2, 2000.0]]) # drilldown on to (from is not included in the drilldown) result = cube.query(drilldowns=['to']) drilldown = result['drilldown'] h.assert_equal(len(drilldown), 3) h.assert_equal(sorted_extract(drilldown), [[None, u'a', 1, 1000.0], [None, u'b', 4, 4000.0], [None, u'c', 1, 1000.0]])
def test_entry_properties(self): h.assert_equal(self.ent.name, 'testentry') h.assert_equal(self.ent.label, 'An Entry') h.assert_equal(self.ent.amount, 123.45) h.assert_equal(self.ent.currency, 'GBP')
def test_facet_dimensions(self): h.assert_equal([d.name for d in self.ds.facet_dimensions], ["to"])
def test_classifier_properties(self): h.assert_equal(self.cla.label, 'Foo Classifier') h.assert_equal(self.cla.level, '1') h.assert_equal(self.cla.taxonomy, 'class.foo') h.assert_equal(self.cla.description, 'Denotes the foo property.') h.assert_equal(self.cla.parent, 'class')
def test_facet_dimensions(self): h.assert_equal([d.name for d in self.ds.facet_dimensions], ['to'])
def test_get_resource(self): p = ckan.Package('bar') h.assert_equal(p.get_resource('456-data')['url'], 'http://example.com/data.csv')
def test_paginate(self): cube = self._make_cube() # with pagesize < # of drilldowns result = cube.query(page=1, pagesize=2) h.assert_equal(len(result['drilldown']), 2) summary = result['summary'] h.assert_equal(summary['pagesize'], 2) h.assert_equal(summary['page'], 1) h.assert_equal(summary['pages'], 3) # with pagesize < # of drilldowns, but page > max pages result = cube.query(page=5, pagesize=2) h.assert_equal(len(result['drilldown']), 0) summary = result['summary'] h.assert_equal(summary['pagesize'], 2) h.assert_equal(summary['page'], 5) h.assert_equal(summary['pages'], 3) # with pagesize > # of drilldowns result = cube.query(page=1, pagesize=7) h.assert_equal(len(result['drilldown']), 5) summary = result['summary'] h.assert_equal(summary['pagesize'], 7) h.assert_equal(summary['page'], 1) h.assert_equal(summary['pages'], 1)
def test_members(self): members = list(self.entity.members()) h.assert_equal(len(members), 5) members = list(self.entity.members(self.entity.alias.c.name == 'Dept032')) h.assert_equal(len(members), 1)