def test_Language(self): from clld.db.models.common import Language d = Language(id='abc') assert d.glottocode is None assert d.iso_code is None assert d.__solr__(None)
def test_Versioning(self): from clld.db.models.common import Language, Identifier, LanguageIdentifier from clld.db.meta import VersionedDBSession l = Language(id='abc', name='Old Name', jsondata={'i': 2}) VersionedDBSession.add(l) VersionedDBSession.flush() self.assertEqual(l.version, 1) l.name = 'New Name' l.description = 'New Description' VersionedDBSession.flush() self.assertEqual(l.version, 2) History = l.__history_mapper__.class_ res = VersionedDBSession.query(History).filter(History.pk == l.pk).all() self.assertEqual(res[0].name, 'Old Name') li = LanguageIdentifier( identifier=Identifier(id='asd', type='wals'), language=l) VersionedDBSession.flush() VersionedDBSession.delete(li) VersionedDBSession.delete(l) VersionedDBSession.flush()
def test_Versioning(self): from clld.db.models.common import Language, Identifier, LanguageIdentifier from clld.db.meta import VersionedDBSession l = Language(id='abc', name='Old Name', jsondata={'i': 2}) VersionedDBSession.add(l) VersionedDBSession.flush() self.assertEqual(l.version, 1) l.name = 'New Name' l.description = 'New Description' VersionedDBSession.flush() self.assertEqual(l.version, 2) History = l.__history_mapper__.class_ res = VersionedDBSession.query(History).filter( History.pk == l.pk).all() self.assertEqual(res[0].name, 'Old Name') li = LanguageIdentifier(identifier=Identifier(id='asd', type='wals'), language=l) VersionedDBSession.flush() VersionedDBSession.delete(li) VersionedDBSession.delete(l) VersionedDBSession.flush()
def test_GeoJson(mocker): adapter = geojson.GeoJson(None) assert len(list(adapter.feature_iterator(None, None))) == 0 assert len(list(adapter.feature_iterator(Language(), None))) == 1 assert len( list( adapter.feature_iterator(mocker.Mock(languages=[Language()]), None))) == 1
def test_Data(db): from clld.db.models.common import Language, Language_data l = Language(id='abc', name='Name') l.data.append(Language_data(key='abstract', value='c')) DBSession.add(l) DBSession.flush() DBSession.refresh(l) assert l.datadict()['abstract'] == 'c'
def test_Data(self): from clld.db.models.common import Language, Language_data l = Language(id='abc', name='Name') l.data.append(Language_data(key='abstract', value='c')) DBSession.add(l) DBSession.flush() DBSession.refresh(l) self.assertEqual(l.datadict()['abstract'], 'c')
def test_Base(db): l = Language(id='abc', name='Name') DBSession.add(l) DBSession.flush() DBSession.expunge(l) l = Language.get('abc', session=DBSession) assert l.name == 'Name' Language().__str__() assert repr(l) == "<Language 'abc'>"
def test_GeoJson(self): adapter = geojson.GeoJson(None) self.assertEquals(len(list(adapter.feature_iterator(None, None))), 0) self.assertEquals( len(list(adapter.feature_iterator(Language(), None))), 1) self.assertEquals( len( list( adapter.feature_iterator(Mock(languages=[Language()]), None))), 1)
def test_Base(db): l = Language(id='abc', name='Name') VersionedDBSession.add(l) VersionedDBSession.flush() VersionedDBSession.expunge(l) l = Language.get('abc', session=VersionedDBSession) assert l.name == 'Name' assert not list(l.history()) Language().__str__() assert repr(l) == "<Language 'abc'>"
def test_Base(self): l = Language(id='abc', name='Name') VersionedDBSession.add(l) VersionedDBSession.flush() VersionedDBSession.expunge(l) l = Language.get('abc') self.assertEqual(l.name, 'Name') assert not list(l.history()) # a bit of a hack to test the human readable representations. # we exploit the fact, that on py2, string and unicode comparison does type # coercion, while on py3, the two methods should actually return the same string. self.assertEqual(l.__str__(), l.__unicode__()) Language().__str__()
def get_identifier_objs(self, type_): if getattr(type_, 'value', type_) == IdentifierType.glottolog.value: return [ Identifier(name=self.id, type=IdentifierType.glottolog.value)] #elif type_ == IdentifierType.iso and : # return return Language.get_identifier_objs(self, type_)
def test_CLLDRequest(self): self.assertTrue(isinstance(self.env['request'].purl, URL)) c = self.env['request'].db.query(Contribution).first() self.env['request'].resource_url(c, ext='geojson') self.assertEqual(None, self.env['request'].ctx_for_url('/some/path/to/nowhere')) self.env['request'].file_url(Language_files(id='1', object=Language.first())) assert self.env['request'].get_datatable('valuesets', ValueSet)
def get_record(self, req, identifier): """ """ rec = Language.get(self.parse_identifier(req, identifier), default=None) assert rec return rec
def update(args): count = 0 for url, glottocode in args.json.items(): lang = Language.get(glottocode, default=None) if lang: count += 1 lang.update_jsondata(languagelandscape=url) print 'assigned', count, 'languagelandscape urls'
def __init__(self, req, model, language=None, **kw): super(Sources, self).__init__(req, model, **kw) if language: self.language = language elif 'language' in req.params: self.language = Language.get(req.params['language']) else: self.language = None
def test_add_language_codes(env): from clld.db.models.common import Language from clld.scripts.util import Data, add_language_codes add_language_codes(Data(), Language(), 'iso', glottocodes=dict(iso='glot1234'))
def test_CLLDRequest(env): assert isinstance(env['request'].purl, URL) c = env['request'].db.query(Contribution).first() env['request'].resource_url(c, ext='geojson') assert env['request'].ctx_for_url('/some/path/to/nowhere') is None assert env['request'].ctx_for_url('/') env['request'].file_url(Language_files(id='1', object=Language.first())) assert env['request'].get_datatable('valuesets', ValueSet) assert env['request'].blog is None
def __init__(self, req, model, language=None, **kw): if language: self.language = language elif 'language' in req.params: self.language = Language.get(req.params['language']) else: self.language = None DataTable.__init__(self, req, model, **kw)
def test_JSONEncodedDict(db): l = Language(id='abc', name='Name', jsondata={'i': 2}) DBSession.add(l) DBSession.flush() DBSession.expunge(l) for lang in DBSession.query(Language).filter(Language.id == 'abc'): assert lang.jsondata['i'] == 2 break
def test_CLLDRequest(self): self.assertTrue(isinstance(self.env["request"].purl, URL)) c = self.env["request"].db.query(Contribution).first() self.env["request"].resource_url(c, ext="geojson") self.assertEqual(None, self.env["request"].ctx_for_url("/some/path/to/nowhere")) assert self.env["request"].ctx_for_url("/") self.env["request"].file_url(Language_files(id="1", object=Language.first())) assert self.env["request"].get_datatable("valuesets", ValueSet) assert self.env["request"].blog is None
def test_CLLDRequest(self): self.assertTrue(isinstance(self.env['request'].purl, URL)) c = self.env['request'].db.query(Contribution).first() self.env['request'].resource_url(c, ext='geojson') self.assertEqual( None, self.env['request'].ctx_for_url('/some/path/to/nowhere')) self.env['request'].file_url( Language_files(id='1', object=Language.first())) assert self.env['request'].get_datatable('valuesets', ValueSet)
def test_compute_language_sources(self): from clld.db.models.common import Source, Sentence, Language, SentenceReference from clld.db.meta import DBSession from clld.db.util import compute_language_sources s = Sentence(id='sentenced', language=Language(id='newlang')) sr = SentenceReference(sentence=s, source=Source.first()) DBSession.add(sr) DBSession.flush() compute_language_sources()
def test_Base_jsondata(self): l = Language(id='abc', name='Name') VersionedDBSession.add(l) VersionedDBSession.flush() l.update_jsondata(a=1) self.assertTrue('a' in l.jsondata) l.update_jsondata(b=1) self.assertTrue('b' in l.jsondata and 'a' in l.jsondata) self.assertTrue('b' in l.__json__(None)['jsondata'])
def test_Base_jsondata(db): l = Language(id='abc', name='Name') VersionedDBSession.add(l) VersionedDBSession.flush() l.update_jsondata(a=1) assert 'a' in l.jsondata l.update_jsondata(b=1) assert 'b' in l.jsondata and 'a' in l.jsondata assert 'b' in l.__json__(None)['jsondata']
def __init__(self, req, *args, **kw): Parameters.__init__(self, req, *args, **kw) if kw.get('languages'): self.languages = kw['languages'] elif 'languages' in req.params: self.languages = nfilter([ Language.get(id_, default=None) for id_ in req.params['languages'].split(',')]) else: self.languages = [] self._langs = [ aliased(ValueSet, name='l%s' % i) for i in range(len(self.languages))]
def test_Files(self): from clld.db.models.common import Language, Language_files if PY3: return # pragma: no cover l = Language(id='abc', name='Name') assert l.iso_code is None l._files.append(Language_files(id='abstract')) DBSession.add(l) DBSession.flush() DBSession.refresh(l) f = l.files['abstract']
def test_Versioning(self): from clld.db.models.common import Language, Language_data from clld.db.meta import VersionedDBSession l = Language(id='abc', name='Old Name', jsondata={'i': 2}) VersionedDBSession.add(l) VersionedDBSession.flush() self.assertEqual(l.version, 1) l.name = 'New Name' l.description = 'New Description' VersionedDBSession.flush() self.assertEqual(l.version, 2) History = l.__history_mapper__.class_ res = VersionedDBSession.query(History).filter(History.pk == l.pk).all() self.assertEqual(res[0].name, 'Old Name') l.data.append(Language_data(key='k', value='v')) VersionedDBSession.flush() assert l.datadict() VersionedDBSession.delete(l) VersionedDBSession.flush()
def test_Versioning(db): from clld.db.models.common import Language, Language_data from clld.db.meta import VersionedDBSession l = Language(id='abc', name='Old Name', jsondata={'i': 2}) VersionedDBSession.add(l) VersionedDBSession.flush() assert l.version == 1 l.name = 'New Name' l.description = 'New Description' VersionedDBSession.flush() assert l.version == 2 History = l.__history_mapper__.class_ res = VersionedDBSession.query(History).filter(History.pk == l.pk).all() assert res[0].name == 'Old Name' l.data.append(Language_data(key='k', value='v')) VersionedDBSession.flush() assert l.datadict() VersionedDBSession.delete(l) VersionedDBSession.flush()
def __init__(self, req, *args, **kw): Parameters.__init__(self, req, *args, **kw) if kw.get('languages'): self.languages = kw['languages'] elif 'languages' in req.params: self.languages = nfilter([ Language.get(id_, default=None) for id_ in req.params['languages'].split(',') ]) else: self.languages = [] self._langs = [ aliased(ValueSet, name='l%s' % i) for i in range(len(self.languages)) ]
def test_CustomModelMixin_polymorphic(self): from clld.tests.fixtures import CustomLanguage lang = Language(id='def', name='Name') clang = CustomLanguage(id='abc', name='Name', custom='c') DBSession.add_all([lang, clang]) DBSession.flush() DBSession.expunge_all() lang = DBSession.query(Language).filter_by(id='def').one() clang = DBSession.query(Language).filter_by(id='abc').one() self.assertEqual(lang.polymorphic_type, 'base') self.assertEqual(clang.polymorphic_type, 'custom') self.assertIs(type(lang), Language) self.assertIs(type(clang), CustomLanguage)
def test_CustomModelMixin_polymorphic(db, custom_language): lang = Language(id='def', name='Name') assert repr(lang).startswith("<Language ") assert is_base(Language) assert not is_base(custom_language) clang = custom_language(id='abc', name='Name', custom='c') DBSession.add_all([lang, clang]) DBSession.flush() DBSession.expunge_all() lang = DBSession.query(Language).filter_by(id='def').one() clang = DBSession.query(Language).filter_by(id='abc').one() assert lang.polymorphic_type == 'base' assert clang.polymorphic_type == 'custom' assert type(lang) is Language assert type(clang) is custom_language
def test_Base(db): l = Language(id='abc', name='Name') VersionedDBSession.add(l) VersionedDBSession.flush() VersionedDBSession.expunge(l) l = Language.get('abc', session=VersionedDBSession) assert l.name == 'Name' assert not list(l.history()) # a bit of a hack to test the human readable representations. # we exploit the fact, that on py2, string and unicode comparison does type # coercion, while on py3, the two methods should actually return the same string. assert l.__str__() == l.__unicode__() Language().__str__() assert repr(l) == "<Language 'abc'>" if PY3 else "<Language u'abc'>"
def test_CsvMixin(db): l1 = Language(id='abc', name='Name', latitude=12.4, jsondata=dict(a=None)) DBSession.add(l1) DBSession.flush() l1 = Language.csv_query(DBSession).first() cols = l1.csv_head() row = l1.to_csv() for k, v in zip(cols, row): if k == 'jsondata': assert 'a' in json.loads(v) l2 = Language.from_csv(row) assert pytest.approx(l1.latitude) == l2.latitude row[cols.index('latitude')] = '3,5' l2 = Language.from_csv(row) assert l2.latitude < l1.latitude
def test_CsvMixin(self): l1 = Language(id='abc', name='Name', latitude=12.4, jsondata=dict(a=None)) DBSession.add(l1) DBSession.flush() l1 = Language.csv_query(DBSession).first() cols = l1.csv_head() row = l1.to_csv() for k, v in zip(cols, row): if k == 'jsondata': self.assertIn('a', json.loads(v)) l2 = Language.from_csv(row) assert_almost_equal(l1.latitude, l2.latitude) row[cols.index('latitude')] = '3,5' l2 = Language.from_csv(row) self.assertLess(l2.latitude, l1.latitude)
def test_Base(self): l = Language(id='abc', name='Name') VersionedDBSession.add(l) VersionedDBSession.flush() VersionedDBSession.expunge(l) #print('pk: %s' % l.pk) #transaction.commit() #transaction.begin() #l = VersionedDBSession.query(Language).get(1) #print(l) #l.name = 'New name' #print('pk: %s' % l.pk) #transaction.commit() #transaction.begin() l = Language.get('abc') #print(l.version) self.assertEqual(l.name, 'Name') l.history() # a bit of a hack to test the human readable representations. # we exploit the fact, that on py2, string and unicode comparison does type # coercion, while on py3, the two methods should actualy return the same string. self.assertEqual(l.__str__(), l.__unicode__()) Language().__str__()
def test_UnitValue(db): from clld.db.models.common import Unit, Language, UnitParameter, UnitValue, UnitDomainElement u = Unit(name='unit', language=Language(name='language')) p1 = UnitParameter() p2 = UnitParameter() # NOTE: we assume paramter of UnitValue and UnitDomainElement are identical # (i.e. we do not enforce/check this) v = UnitValue(unit=u, unitparameter=p1, unitdomainelement=UnitDomainElement(parameter=p1, name='ude')) assert str(v) == 'ude' DBSession.add(v) DBSession.add(p2) DBSession.flush() try: v.unitparameter_pk = p2.pk raise ValueError # pragma: no cover except AssertionError: pass v.unitparameter_pk = p1.pk DBSession.flush()
def get_query(self, *args, **kw): return [Language.first()]
def test_get_feature(self): l = Language.first() self.assertEquals(geojson.get_feature(l)['id'], l.id) self.assertEquals(geojson.get_feature(l)['properties']['name'], l.name) self.assertEquals(geojson.get_feature(l, name='geo')['properties']['name'], 'geo')
def import_dataset(path, provider): # look for metadata # look for sources # then loop over values dirpath, fname = os.path.split(path) basename, ext = os.path.splitext(fname) glottolog = Glottolog() mdpath = path + "-metadata.json" assert os.path.exists(mdpath) md = jsonload(mdpath) md, parameters = md["properties"], md["parameters"] cname = md["name"] if "id" in md: cname = "%s [%s]" % (cname, md["id"]) contrib = Wordlist(id=basename, name=cname) contributors = md.get("typedby", md.get("contributors")) if contributors: contributor_name = HumanName(contributors) contributor_id = slug(contributor_name.last + contributor_name.first) contributor = Contributor.get(contributor_id, default=None) if not contributor: contributor = Contributor(id=contributor_id, name="%s" % contributor_name) DBSession.add(ContributionContributor(contribution=contrib, contributor=contributor)) # bibpath = os.path.join(dirpath, basename + '.bib') # if os.path.exists(bibpath): # for rec in Database.from_file(bibpath): # if rec['key'] not in data['Source']: # data.add(Source, rec['key'], _obj=bibtex2source(rec)) data = Data() concepts = {p.id: p for p in DBSession.query(Concept)} language = None for i, row in enumerate(reader(path, dicts=True, delimiter=",")): if not row["Value"] or not row["Feature_ID"]: continue fid = row["Feature_ID"].split("/")[-1] vsid = "%s-%s-%s" % (basename, row["Language_ID"], fid) vid = "%s-%s-%s" % (provider, basename, i + 1) if language: assert language.id == row["Language_ID"] else: language = Language.get(row["Language_ID"], default=None) if language is None: # query glottolog! languoid = glottolog.languoid(row["Language_ID"]) language = LexibankLanguage( id=row["Language_ID"], name=languoid.name, latitude=languoid.latitude, longitude=languoid.longitude ) parameter = concepts.get(fid) if parameter is None: concepts[fid] = parameter = Concept( id=fid, name=parameters[row["Feature_ID"]], concepticon_url=row["Feature_ID"] ) vs = data["ValueSet"].get(vsid) if vs is None: vs = data.add( ValueSet, vsid, id=vsid, parameter=parameter, language=language, contribution=contrib, source=row.get("Source"), ) counterpart = Counterpart( id=vid, valueset=vs, name=row["Value"], description=row.get("Comment"), loan=row.get("Loan") == "yes" ) if row.get("Cognate_Set"): csid = row["Cognate_Set"].split(",")[0].strip() cs = Cognateset.get(csid, key="name", default=None) if cs is None: cs = Cognateset(name=csid) counterpart.cognateset = cs # for key, src in data['Source'].items(): # if key in vs.source: # ValueSetReference(valueset=vs, source=src, key=key) contrib.language = language
def test_get_feature(self): l = Language.first() self.assertEquals(geojson.get_feature(l)["id"], l.id) self.assertEquals(geojson.get_feature(l)["properties"]["name"], l.name) self.assertEquals(geojson.get_feature(l, name="geo")["properties"]["name"], "geo")
def test_Base_get(db): assert 42 == Language.get('doesntexist', default=42) with pytest.raises(NoResultFound): Language.get('doesntexist')
def test_Base_get(self): self.assertEqual(42, Language.get('doesntexist', default=42)) self.assertRaises(NoResultFound, Language.get, 'doesntexist')
def create(self, req, filename=None, verbose=True, link_callback=None, lang=None): html = [] lang = lang or Language.get('afr') entries = list( DBSession.query(ValueSet).join(ValueSet.parameter) .filter(ValueSet.language_pk == lang.pk) .order_by( Taxon.kingdom, Taxon.phylum, Taxon.class_, Taxon.order, Taxon.family, Parameter.name) .options(contains_eager(ValueSet.parameter), joinedload(ValueSet.values))) for kingdom, taxa1 in groupby(entries, key=lambda vs: vs.parameter.kingdom): html.append('<h2>Kingdom: %s</h2>' % (kingdom or 'other')) for phylum, taxa2 in groupby(taxa1, key=lambda vs: vs.parameter.phylum): html.append('<h3>Phylum: %s</h3>' % (phylum or 'other')) for class_, taxa3 in groupby(taxa2, key=lambda vs: vs.parameter.class_): html.append('<h4>Class: %s</h4>' % (class_ or 'other')) for order, taxa4 in groupby(taxa3, key=lambda vs: vs.parameter.order): html.append('<h5>Order: %s</h5>' % (order or 'other')) for family, taxa5 in groupby(taxa4, key=lambda vs: vs.parameter.family): html.append('<h6>Family: %s</h6>' % (family or 'other')) for entry in taxa5: adapter = get_adapter( IRepresentation, entry, req, ext='snippet.html') html.append(adapter.render(entry, req)) html.append('<p class="separator"> <p>') with open(str(download_path('%s.pdf' % lang.id)), 'wb') as fp: editors = '' if lang.contribution.contributor_assocs: editors = 'edited by ' + ' and '.join( c.last_first() for c in lang.contribution.primary_contributors) pisa.CreatePDF( html_tmpl % ( css_tmpl.format(charis_font_spec_css()), req.resource_url(req.dataset), """ <h1 style="text-align: center; font-size: 12mm;">%(language)s names for Plants and Animals</h1> <h2 style="text-align: center; font-size: 8mm;">%(editors)s</h2> <p style="font-size: 5mm;"> This document was created from <a href="%(url)s">%(dataset)s</a> on %(date)s. </p> <p style="font-size: 5mm;"> %(dataset)s is published under a %(license)s and should be cited as </p> <blockquote style="font-size: 5mm;"><i>%(citation)s</i></blockquote> <p style="font-size: 5mm;"> A full list of contributors is available at <a href="%(url)scontributors">%(url)scontributors</a> </p> <p style="font-size: 5mm;"> The list of references cited in this document is available at <a href="%(url)ssources">%(url)ssources</a> </p> """ % dict( language=lang.name, editors=editors, dataset=req.dataset.name, url=req.resource_url(req.dataset), date=date.today(), citation=text_citation(req, req.dataset), license=req.dataset.jsondata['license_name']), ''.join(html)), dest=fp, link_callback=link_callback, )
def test_get_feature(data): l = Language.first() assert geojson.get_feature(l)['id'] == l.id assert geojson.get_feature(l)['properties']['name'] == l.name assert geojson.get_feature(l, name='geo')['properties']['name'] == 'geo'
def test_SolrDoc(self): from clld.web.adapters.base import SolrDoc adapter = SolrDoc(None) adapter.render(Language.first(), self.env['request'])