def test_crud(self): from clld.db.migration import Connection migration = Connection(DBSession) assert len(list(migration.select(common.Identifier))) == 0 pk = migration.insert( common.Identifier, id='iso-csw', name='csw', type=common.IdentifierType.iso.value) assert migration.pk(common.Identifier, 'iso-csw') == pk assert len(list(migration.select(common.Identifier))) == 1 identifier = DBSession.query(common.Identifier)\ .options(undefer('*')).get(pk) assert identifier.active assert identifier.version == 1 assert identifier.created assert identifier.updated migration.update(common.Identifier, [('name', 'cea')], pk=pk) DBSession.refresh(identifier) assert identifier.name == 'cea' migration.delete(common.Identifier, pk=pk) self.assertRaises(InvalidRequestError, DBSession.refresh, identifier)
def test_crud(db): migration = Connection(DBSession) assert len(list(migration.select(common.Identifier))) == 0 pk = migration.insert(common.Identifier, id='iso-csw', name='csw', type=common.IdentifierType.iso.value) assert migration.pk(common.Identifier, 'iso-csw') == pk assert len(list(migration.select(common.Identifier))) == 1 identifier = DBSession.query(common.Identifier)\ .options(undefer('*')).get(pk) assert identifier.active assert identifier.version == 1 assert identifier.created assert identifier.updated migration.update(common.Identifier, [('name', 'cea')], pk=pk) DBSession.refresh(identifier) assert identifier.name == 'cea' migration.delete(common.Identifier, pk=pk) with pytest.raises(InvalidRequestError): DBSession.refresh(identifier)
def test_crud(self): from clld.db.migration import Connection migration = Connection(DBSession) assert len(list(migration.select(common.Identifier))) == 0 pk = migration.insert(common.Identifier, id='iso-csw', name='csw', type=common.IdentifierType.iso.value) assert migration.pk(common.Identifier, 'iso-csw') == pk assert len(list(migration.select(common.Identifier))) == 1 identifier = DBSession.query(common.Identifier).get(pk) assert identifier.active assert identifier.version == 1 assert identifier.created assert identifier.updated migration.update(common.Identifier, [('name', 'cea')], pk=pk) DBSession.refresh(identifier) assert identifier.name == 'cea' migration.delete(common.Identifier, pk=pk) self.assertRaises(InvalidRequestError, DBSession.refresh, identifier)
def test_Data(self): from clld.db.models.common import Language, Language_data l = Language(id='abc', name='Name') l.data.append(Language_data(key='abstract', value='c')) DBSession.add(l) DBSession.flush() DBSession.refresh(l) self.assertEqual(l.datadict()['abstract'], 'c')
def test_Data(db): from clld.db.models.common import Language, Language_data l = Language(id='abc', name='Name') l.data.append(Language_data(key='abstract', value='c')) DBSession.add(l) DBSession.flush() DBSession.refresh(l) assert l.datadict()['abstract'] == 'c'
def test_Files(self): from clld.db.models.common import Language, Language_files if PY3: return # pragma: no cover l = Language(id='abc', name='Name') assert l.iso_code is None l._files.append(Language_files(id='abstract')) DBSession.add(l) DBSession.flush() DBSession.refresh(l) f = l.files['abstract']
def test_Files(db, tmppath): from clld.db.models.common import Sentence, Sentence_files l = Sentence(id='abc', name='Name') f = Sentence_files(object=l, id='abstract', mime_type='audio/mpeg') p = f.create(Path(tmppath), 'content') assert Path(p).exists() l._files.append(f) DBSession.add(l) DBSession.flush() DBSession.refresh(l) assert l.files assert l.audio
def test_Files(db, tmppath): from clld.db.models.common import Sentence, Sentence_files l = Sentence(id='abc', name='Name') f = Sentence_files(object=l, id='abstract', mime_type='audio/mpeg') p = f.create(tmppath, 'content') assert Path(p).exists() l._files.append(f) DBSession.add(l) DBSession.flush() DBSession.refresh(l) assert l.files assert l.audio
def add_file(self, type_, checksum, file_cls, obj): if checksum in self.cdstar: jsondata = {k: v for k, v in self.props.get(type_, {}).items()} jsondata.update(self.cdstar[checksum]) f = file_cls( id='%s-%s' % (obj.id, checksum), name=self.cdstar[checksum]['original'], object_pk=obj.pk, mime_type=self.cdstar[checksum]['mimetype'], jsondata=jsondata) DBSession.add(f) DBSession.flush() DBSession.refresh(f) return print('{0} file missing: {1}'.format(type_, checksum)) return
def test_Files(self): from clld.db.models.common import Sentence, Sentence_files if PY3: return # pragma: no cover l = Sentence(id='abc', name='Name') f = Sentence_files(object=l, id='abstract', mime_type='audio/mpeg') p = f.create(Path(mkdtemp()).joinpath('clldtest').as_posix(), 'content') assert os.path.exists(p) rmtree(Path(p).parent.parent) l._files.append(f) DBSession.add(l) DBSession.flush() DBSession.refresh(l) assert l.files assert l.audio
def test_Files(self): from clld.db.models.common import Sentence, Sentence_files if PY3: return # pragma: no cover l = Sentence(id='abc', name='Name') f = Sentence_files(object=l, id='abstract', mime_type='audio/mpeg') p = f.create( Path(mkdtemp()).joinpath('clldtest').as_posix(), 'content') assert os.path.exists(p) rmtree(Path(p).parent.parent) l._files.append(f) DBSession.add(l) DBSession.flush() DBSession.refresh(l) assert l.files assert l.audio
def add_file(self, type_, checksum, file_cls, obj, attrs=None): if checksum in self.cdstar: jsondata = {k: v for k, v in self.props.get(type_, {}).items()} jsondata.update(self.cdstar[checksum]) if attrs: jsondata.update(attrs) f = file_cls( id='%s-%s' % (obj.id, checksum), name=self.cdstar[checksum]['original'], object_pk=obj.pk, mime_type=self.cdstar[checksum]['mimetype'], jsondata=jsondata) DBSession.add(f) DBSession.flush() DBSession.refresh(f) return print('{0} file missing: {1}'.format(type_, checksum)) return
def test_Files(self): from clld.db.models.common import Sentence, Sentence_files from path import path if PY3: return # pragma: no cover l = Sentence(id='abc', name='Name') f = Sentence_files(object=l, id='abstract', mime_type='audio/mpeg') p = f.create(path(gettempdir()), 'content') assert os.path.exists(p) os.remove(p) l._files.append(f) DBSession.add(l) DBSession.flush() DBSession.refresh(l) assert l.files assert l.audio
def load(id_, data, files_dir, data_dir): d = Dictionary(data_dir.joinpath("Hoocak_lex_ld100.lex")) d.entries = filter(lambda r: r.get("lx"), d.entries) lang = data["Language"][id_] vocab = data["Dictionary"][id_] sd = common.UnitParameter(id="sd", name="semantic domain") DBSession.add(sd) DBSession.flush() for name in d.values("sd"): if name.startswith("??"): continue p = data.add(common.UnitDomainElement, name, id="sd-" + slug(name), name=name) p.unitparameter_pk = sd.pk DBSession.flush() for i, row in enumerate(d.entries): w = data.add( models.Word, row.get("lx"), id="%s-%s" % (id_, i), name=row.get("lx"), description="; ".join(row.getall("me")), dictionary=vocab, ) w.language = lang if row.get("hm"): try: w.number = int(row.get("hm")) except: print "---->", row.get("hm") DBSession.flush() for marker, label in [ ("al", "alternative form"), ("cf", "conjugated form"), ("cc", "conjugation class"), ("mp", "metaphony"), ("is", "internal structure"), ]: for k, name in enumerate(row.getall(marker)): DBSession.add(common.Unit_data(key=label, value=name, ord=k, object_pk=w.pk)) for marker in ["pc", "sf"]: for l, spec in enumerate(row.getall(marker)): try: p, mimetype = spec except: p = spec mimetype = "image/jpeg" if marker == "pc" else "audio/mpeg" p = data_dir.joinpath(*p.split("\\")) with open(p, "rb") as fp: f = common.Unit_files( name=p.basename(), id=mimetype.split("/")[0], mime_type=mimetype, ord=l + 1, object_pk=w.pk ) DBSession.add(f) DBSession.flush() DBSession.refresh(f) f.create(files_dir, fp.read()) for j, name in enumerate(row.getall("sd")): if name.startswith("??"): continue DBSession.add( common.UnitValue( id="sd-%s-%s" % (i, j), unit=w, unitparameter=sd, unitdomainelement=data["UnitDomainElement"][name], contribution=vocab, ) ) meaning_prefix = "" for j, name in enumerate(row.getall("ps")): if name.startswith("??"): continue if name == "v.inact": name += "." if name.startswith("v."): meaning_prefix = "to " elif name.startswith("n."): meaning_prefix = "the " if j > 0: # only one part-of-speech value per entry! raise ValueError DBSession.add( common.UnitValue( id="pos-%s-%s" % (id_, i), unit=w, unitparameter=data["UnitParameter"]["pos"], unitdomainelement=data["UnitDomainElement"][POS_MAP[name]], contribution=vocab, ) ) for j, name in enumerate(row.getall("me")): key = "%s%s" % (meaning_prefix, name.lower()) if key in data["Meaning"]: meaning = data["Meaning"][key] vsid = ("%s-%s" % (key, id_),) if vsid in data["ValueSet"]: vs = data["ValueSet"][vsid] else: vs = data.add( common.ValueSet, vsid, id="%s-%s" % (id_, meaning.id), language=lang, contribution=vocab, parameter=meaning, ) DBSession.add(models.Counterpart(id="%s-%s-%s" % (id_, i, j), name=row.get("lx"), valueset=vs, word=w)) DBSession.flush()