def test_CustomModelMixin_polymorphic(self): from clld.tests.fixtures import CustomLanguage lang = Language(id='def', name='Name') clang = CustomLanguage(id='abc', name='Name', custom='c') DBSession.add_all([lang, clang]) DBSession.flush() DBSession.expunge_all() lang = DBSession.query(Language).filter_by(id='def').one() clang = DBSession.query(Language).filter_by(id='abc').one() self.assertEqual(lang.polymorphic_type, 'base') self.assertEqual(clang.polymorphic_type, 'custom') self.assertIs(type(lang), Language) self.assertIs(type(clang), CustomLanguage)
def test_CustomModelMixin_polymorphic(db, custom_language): lang = Language(id='def', name='Name') assert repr(lang).startswith("<Language ") assert is_base(Language) assert not is_base(custom_language) clang = custom_language(id='abc', name='Name', custom='c') DBSession.add_all([lang, clang]) DBSession.flush() DBSession.expunge_all() lang = DBSession.query(Language).filter_by(id='def').one() clang = DBSession.query(Language).filter_by(id='abc').one() assert lang.polymorphic_type == 'base' assert clang.polymorphic_type == 'custom' assert type(lang) is Language assert type(clang) is custom_language
def prime_cache(args): """If data needs to be denormalized for lookup, do that here. This procedure should be separate from the db initialization, because it will have to be run periodically whenever data has been updated. """ print('Parsing markdown intros...') for contrib in DBSession.query(models.Contribution): if contrib.description: contrib.markup_description = markdown(contrib.description) else: contrib.markup_description = None print('... done') print('Retrieving language data from glottolog...') catconf = cldfcatalog.Config.from_file() glottolog_path = catconf.get_clone('glottolog') glottolog = Glottolog(glottolog_path) lang_ids = [lang.id for lang in DBSession.query(common.Language)] languoids = {l.id: l for l in glottolog.languoids(lang_ids)} glottocodes = [(l.id, common.Identifier(id=l.id, name=l.id, type='glottolog')) for l in languoids.values()] glottocodes = OrderedDict(sorted(glottocodes, key=lambda t: t[0])) isocodes = [(l.iso, common.Identifier(id=l.iso, name=l.iso, type='iso639-3')) for l in languoids.values() if l.iso] isocodes = OrderedDict(sorted(isocodes, key=lambda t: t[0])) DBSession.add_all(glottocodes.values()) DBSession.add_all(isocodes.values()) DBSession.flush() for lang in DBSession.query(common.Language): if lang.id not in languoids: continue languoid = languoids[lang.id] lang.name = languoid.name lang.latitude = languoid.latitude lang.longitude = languoid.longitude lang.macroarea = languoid.macroareas[ 0].name if languoid.macroareas else '' DBSession.add( common.LanguageIdentifier( language=lang, identifier_pk=glottocodes[languoid.id].pk)) if languoid.iso in isocodes: DBSession.add( common.LanguageIdentifier( language=lang, identifier_pk=isocodes[languoid.iso].pk)) DBSession.flush() print('... done') print('Making pretty colourful dots for parameter values...') all_icons = [icon.name for icon in ORDERED_ICONS] code_query = DBSession.query(common.DomainElement)\ .order_by(common.DomainElement.parameter_pk, common.DomainElement.id) for _, param_codes in groupby(code_query, lambda c: c.parameter_pk): icons = cycle(all_icons) for code in param_codes: code.update_jsondata(icon=next(icons)) DBSession.flush() print('... done')