def survey(request): id_ = request.matchdict["id"] md = jsonlib.load(ppath("Surveys", "%s.json" % id_)) html = get_html(ppath("Surveys", "%s.html" % id_)) maps = [] for fname in sorted( ppath("Surveys", processed="maps").glob("%s*.png" % id_.split(".")[1].replace("-", "_")), key=lambda fn: fn.stem ): img = b64encode(open(fname.as_posix(), "rb").read()) if "figure" in fname.stem: html = html.replace("{%s}" % fname.stem, "data:image/png;base64,%s" % img) else: maps.append(img) return { "maps": maps, "md": md, "authors": [Contributor.get(a["id"]) for a in md["authors"]], "html": html, "ctx": ApicsContribution.get(id_.split(".")[0]), }
def survey(request): id_ = request.matchdict['id'] md = jsonload(ppath('Surveys', '%s.json' % id_)) html = get_html(ppath('Surveys', '%s.html' % id_)) maps = [] for fname in sorted( ppath('Surveys', processed='maps').files( '%s*.png' % id_.split('.')[1].replace('-', '_')), key=lambda fn: fn.namebase): img = b64encode(open(fname, 'rb').read()) if 'figure' in fname.namebase: html = html.replace('{%s}' % fname.namebase, 'data:image/png;base64,%s' % img) else: maps.append(img) return { 'maps': maps, 'md': md, 'authors': [Contributor.get(a['id']) for a in md['authors']], 'html': html, 'ctx': ApicsContribution.get(id_.split('.')[0]), }
def import_dataset(path, provider): # look for metadata # look for sources # then loop over values dirpath, fname = os.path.split(path) basename, ext = os.path.splitext(fname) glottolog = Glottolog() mdpath = path + "-metadata.json" assert os.path.exists(mdpath) md = jsonload(mdpath) md, parameters = md["properties"], md["parameters"] cname = md["name"] if "id" in md: cname = "%s [%s]" % (cname, md["id"]) contrib = Wordlist(id=basename, name=cname) contributors = md.get("typedby", md.get("contributors")) if contributors: contributor_name = HumanName(contributors) contributor_id = slug(contributor_name.last + contributor_name.first) contributor = Contributor.get(contributor_id, default=None) if not contributor: contributor = Contributor(id=contributor_id, name="%s" % contributor_name) DBSession.add(ContributionContributor(contribution=contrib, contributor=contributor)) # bibpath = os.path.join(dirpath, basename + '.bib') # if os.path.exists(bibpath): # for rec in Database.from_file(bibpath): # if rec['key'] not in data['Source']: # data.add(Source, rec['key'], _obj=bibtex2source(rec)) data = Data() concepts = {p.id: p for p in DBSession.query(Concept)} language = None for i, row in enumerate(reader(path, dicts=True, delimiter=",")): if not row["Value"] or not row["Feature_ID"]: continue fid = row["Feature_ID"].split("/")[-1] vsid = "%s-%s-%s" % (basename, row["Language_ID"], fid) vid = "%s-%s-%s" % (provider, basename, i + 1) if language: assert language.id == row["Language_ID"] else: language = Language.get(row["Language_ID"], default=None) if language is None: # query glottolog! languoid = glottolog.languoid(row["Language_ID"]) language = LexibankLanguage( id=row["Language_ID"], name=languoid.name, latitude=languoid.latitude, longitude=languoid.longitude ) parameter = concepts.get(fid) if parameter is None: concepts[fid] = parameter = Concept( id=fid, name=parameters[row["Feature_ID"]], concepticon_url=row["Feature_ID"] ) vs = data["ValueSet"].get(vsid) if vs is None: vs = data.add( ValueSet, vsid, id=vsid, parameter=parameter, language=language, contribution=contrib, source=row.get("Source"), ) counterpart = Counterpart( id=vid, valueset=vs, name=row["Value"], description=row.get("Comment"), loan=row.get("Loan") == "yes" ) if row.get("Cognate_Set"): csid = row["Cognate_Set"].split(",")[0].strip() cs = Cognateset.get(csid, key="name", default=None) if cs is None: cs = Cognateset(name=csid) counterpart.cognateset = cs # for key, src in data['Source'].items(): # if key in vs.source: # ValueSetReference(valueset=vs, source=src, key=key) contrib.language = language