def get_record(self, req, identifier): """ """ rec = Language.get(self.parse_identifier(req, identifier), default=None) assert rec return rec
def __init__(self, req, model, language=None, **kw): super(Sources, self).__init__(req, model, **kw) if language: self.language = language elif 'language' in req.params: self.language = Language.get(req.params['language']) else: self.language = None
def update(args): count = 0 for url, glottocode in args.json.items(): lang = Language.get(glottocode, default=None) if lang: count += 1 lang.update_jsondata(languagelandscape=url) print 'assigned', count, 'languagelandscape urls'
def __init__(self, req, model, language=None, **kw): if language: self.language = language elif 'language' in req.params: self.language = Language.get(req.params['language']) else: self.language = None DataTable.__init__(self, req, model, **kw)
def test_Base(db): l = Language(id='abc', name='Name') DBSession.add(l) DBSession.flush() DBSession.expunge(l) l = Language.get('abc', session=DBSession) assert l.name == 'Name' Language().__str__() assert repr(l) == "<Language 'abc'>"
def test_Base(db): l = Language(id='abc', name='Name') VersionedDBSession.add(l) VersionedDBSession.flush() VersionedDBSession.expunge(l) l = Language.get('abc', session=VersionedDBSession) assert l.name == 'Name' assert not list(l.history()) Language().__str__() assert repr(l) == "<Language 'abc'>"
def __init__(self, req, *args, **kw): Parameters.__init__(self, req, *args, **kw) if kw.get('languages'): self.languages = kw['languages'] elif 'languages' in req.params: self.languages = nfilter([ Language.get(id_, default=None) for id_ in req.params['languages'].split(',')]) else: self.languages = [] self._langs = [ aliased(ValueSet, name='l%s' % i) for i in range(len(self.languages))]
def test_Base(self): l = Language(id='abc', name='Name') VersionedDBSession.add(l) VersionedDBSession.flush() VersionedDBSession.expunge(l) l = Language.get('abc') self.assertEqual(l.name, 'Name') assert not list(l.history()) # a bit of a hack to test the human readable representations. # we exploit the fact, that on py2, string and unicode comparison does type # coercion, while on py3, the two methods should actually return the same string. self.assertEqual(l.__str__(), l.__unicode__()) Language().__str__()
def __init__(self, req, *args, **kw): Parameters.__init__(self, req, *args, **kw) if kw.get('languages'): self.languages = kw['languages'] elif 'languages' in req.params: self.languages = nfilter([ Language.get(id_, default=None) for id_ in req.params['languages'].split(',') ]) else: self.languages = [] self._langs = [ aliased(ValueSet, name='l%s' % i) for i in range(len(self.languages)) ]
def test_Base(db): l = Language(id='abc', name='Name') VersionedDBSession.add(l) VersionedDBSession.flush() VersionedDBSession.expunge(l) l = Language.get('abc', session=VersionedDBSession) assert l.name == 'Name' assert not list(l.history()) # a bit of a hack to test the human readable representations. # we exploit the fact, that on py2, string and unicode comparison does type # coercion, while on py3, the two methods should actually return the same string. assert l.__str__() == l.__unicode__() Language().__str__() assert repr(l) == "<Language 'abc'>" if PY3 else "<Language u'abc'>"
def test_Base(self): l = Language(id='abc', name='Name') VersionedDBSession.add(l) VersionedDBSession.flush() VersionedDBSession.expunge(l) #print('pk: %s' % l.pk) #transaction.commit() #transaction.begin() #l = VersionedDBSession.query(Language).get(1) #print(l) #l.name = 'New name' #print('pk: %s' % l.pk) #transaction.commit() #transaction.begin() l = Language.get('abc') #print(l.version) self.assertEqual(l.name, 'Name') l.history() # a bit of a hack to test the human readable representations. # we exploit the fact, that on py2, string and unicode comparison does type # coercion, while on py3, the two methods should actualy return the same string. self.assertEqual(l.__str__(), l.__unicode__()) Language().__str__()
def import_dataset(path, provider): # look for metadata # look for sources # then loop over values dirpath, fname = os.path.split(path) basename, ext = os.path.splitext(fname) glottolog = Glottolog() mdpath = path + "-metadata.json" assert os.path.exists(mdpath) md = jsonload(mdpath) md, parameters = md["properties"], md["parameters"] cname = md["name"] if "id" in md: cname = "%s [%s]" % (cname, md["id"]) contrib = Wordlist(id=basename, name=cname) contributors = md.get("typedby", md.get("contributors")) if contributors: contributor_name = HumanName(contributors) contributor_id = slug(contributor_name.last + contributor_name.first) contributor = Contributor.get(contributor_id, default=None) if not contributor: contributor = Contributor(id=contributor_id, name="%s" % contributor_name) DBSession.add(ContributionContributor(contribution=contrib, contributor=contributor)) # bibpath = os.path.join(dirpath, basename + '.bib') # if os.path.exists(bibpath): # for rec in Database.from_file(bibpath): # if rec['key'] not in data['Source']: # data.add(Source, rec['key'], _obj=bibtex2source(rec)) data = Data() concepts = {p.id: p for p in DBSession.query(Concept)} language = None for i, row in enumerate(reader(path, dicts=True, delimiter=",")): if not row["Value"] or not row["Feature_ID"]: continue fid = row["Feature_ID"].split("/")[-1] vsid = "%s-%s-%s" % (basename, row["Language_ID"], fid) vid = "%s-%s-%s" % (provider, basename, i + 1) if language: assert language.id == row["Language_ID"] else: language = Language.get(row["Language_ID"], default=None) if language is None: # query glottolog! languoid = glottolog.languoid(row["Language_ID"]) language = LexibankLanguage( id=row["Language_ID"], name=languoid.name, latitude=languoid.latitude, longitude=languoid.longitude ) parameter = concepts.get(fid) if parameter is None: concepts[fid] = parameter = Concept( id=fid, name=parameters[row["Feature_ID"]], concepticon_url=row["Feature_ID"] ) vs = data["ValueSet"].get(vsid) if vs is None: vs = data.add( ValueSet, vsid, id=vsid, parameter=parameter, language=language, contribution=contrib, source=row.get("Source"), ) counterpart = Counterpart( id=vid, valueset=vs, name=row["Value"], description=row.get("Comment"), loan=row.get("Loan") == "yes" ) if row.get("Cognate_Set"): csid = row["Cognate_Set"].split(",")[0].strip() cs = Cognateset.get(csid, key="name", default=None) if cs is None: cs = Cognateset(name=csid) counterpart.cognateset = cs # for key, src in data['Source'].items(): # if key in vs.source: # ValueSetReference(valueset=vs, source=src, key=key) contrib.language = language
def create(self, req, filename=None, verbose=True, link_callback=None, lang=None): html = [] lang = lang or Language.get('afr') entries = list( DBSession.query(ValueSet).join(ValueSet.parameter) .filter(ValueSet.language_pk == lang.pk) .order_by( Taxon.kingdom, Taxon.phylum, Taxon.class_, Taxon.order, Taxon.family, Parameter.name) .options(contains_eager(ValueSet.parameter), joinedload(ValueSet.values))) for kingdom, taxa1 in groupby(entries, key=lambda vs: vs.parameter.kingdom): html.append('<h2>Kingdom: %s</h2>' % (kingdom or 'other')) for phylum, taxa2 in groupby(taxa1, key=lambda vs: vs.parameter.phylum): html.append('<h3>Phylum: %s</h3>' % (phylum or 'other')) for class_, taxa3 in groupby(taxa2, key=lambda vs: vs.parameter.class_): html.append('<h4>Class: %s</h4>' % (class_ or 'other')) for order, taxa4 in groupby(taxa3, key=lambda vs: vs.parameter.order): html.append('<h5>Order: %s</h5>' % (order or 'other')) for family, taxa5 in groupby(taxa4, key=lambda vs: vs.parameter.family): html.append('<h6>Family: %s</h6>' % (family or 'other')) for entry in taxa5: adapter = get_adapter( IRepresentation, entry, req, ext='snippet.html') html.append(adapter.render(entry, req)) html.append('<p class="separator"> <p>') with open(str(download_path('%s.pdf' % lang.id)), 'wb') as fp: editors = '' if lang.contribution.contributor_assocs: editors = 'edited by ' + ' and '.join( c.last_first() for c in lang.contribution.primary_contributors) pisa.CreatePDF( html_tmpl % ( css_tmpl.format(charis_font_spec_css()), req.resource_url(req.dataset), """ <h1 style="text-align: center; font-size: 12mm;">%(language)s names for Plants and Animals</h1> <h2 style="text-align: center; font-size: 8mm;">%(editors)s</h2> <p style="font-size: 5mm;"> This document was created from <a href="%(url)s">%(dataset)s</a> on %(date)s. </p> <p style="font-size: 5mm;"> %(dataset)s is published under a %(license)s and should be cited as </p> <blockquote style="font-size: 5mm;"><i>%(citation)s</i></blockquote> <p style="font-size: 5mm;"> A full list of contributors is available at <a href="%(url)scontributors">%(url)scontributors</a> </p> <p style="font-size: 5mm;"> The list of references cited in this document is available at <a href="%(url)ssources">%(url)ssources</a> </p> """ % dict( language=lang.name, editors=editors, dataset=req.dataset.name, url=req.resource_url(req.dataset), date=date.today(), citation=text_citation(req, req.dataset), license=req.dataset.jsondata['license_name']), ''.join(html)), dest=fp, link_callback=link_callback, )
def test_Base_get(self): self.assertEqual(42, Language.get('doesntexist', default=42)) self.assertRaises(NoResultFound, Language.get, 'doesntexist')
def create(self, req, filename=None, verbose=True, link_callback=None, lang=None): html = [] lang = lang or Language.get('afr') entries = list( DBSession.query(ValueSet).join(ValueSet.parameter).filter( ValueSet.language_pk == lang.pk).order_by( Taxon.kingdom, Taxon.phylum, Taxon.class_, Taxon.order, Taxon.family, Parameter.name).options(contains_eager(ValueSet.parameter), joinedload(ValueSet.values))) for kingdom, taxa1 in groupby(entries, key=lambda vs: vs.parameter.kingdom): html.append('<h2>Kingdom: %s</h2>' % (kingdom or 'other')) for phylum, taxa2 in groupby(taxa1, key=lambda vs: vs.parameter.phylum): html.append('<h3>Phylum: %s</h3>' % (phylum or 'other')) for class_, taxa3 in groupby( taxa2, key=lambda vs: vs.parameter.class_): html.append('<h4>Class: %s</h4>' % (class_ or 'other')) for order, taxa4 in groupby( taxa3, key=lambda vs: vs.parameter.order): html.append('<h5>Order: %s</h5>' % (order or 'other')) for family, taxa5 in groupby( taxa4, key=lambda vs: vs.parameter.family): html.append('<h6>Family: %s</h6>' % (family or 'other')) for entry in taxa5: adapter = get_adapter(IRepresentation, entry, req, ext='snippet.html') html.append(adapter.render(entry, req)) html.append('<p class="separator"> <p>') with open(str(download_path('%s.pdf' % lang.id)), 'wb') as fp: editors = '' if lang.contribution.contributor_assocs: editors = 'edited by ' + ' and '.join( c.last_first() for c in lang.contribution.primary_contributors) pisa.CreatePDF( html_tmpl % (css_tmpl.format( charis_font_spec_css()), req.resource_url(req.dataset), """ <h1 style="text-align: center; font-size: 12mm;">%(language)s names for Plants and Animals</h1> <h2 style="text-align: center; font-size: 8mm;">%(editors)s</h2> <p style="font-size: 5mm;"> This document was created from <a href="%(url)s">%(dataset)s</a> on %(date)s. </p> <p style="font-size: 5mm;"> %(dataset)s is published under a %(license)s and should be cited as </p> <blockquote style="font-size: 5mm;"><i>%(citation)s</i></blockquote> <p style="font-size: 5mm;"> A full list of contributors is available at <a href="%(url)scontributors">%(url)scontributors</a> </p> <p style="font-size: 5mm;"> The list of references cited in this document is available at <a href="%(url)ssources">%(url)ssources</a> </p> """ % dict(language=lang.name, editors=editors, dataset=req.dataset.name, url=req.resource_url(req.dataset), date=date.today(), citation=text_citation(req, req.dataset), license=req.dataset.jsondata['license_name']), ''.join(html)), dest=fp, link_callback=link_callback, )
def test_Base_get(db): assert 42 == Language.get('doesntexist', default=42) with pytest.raises(NoResultFound): Language.get('doesntexist')