Ejemplo n.º 1
0
    def test_CsvwJsonAdapter(self):
        from clld.web.adapters.csv import CsvmJsonAdapter

        adapter = CsvmJsonAdapter(None)
        res = adapter.render(
            datatables.Languages(self.env['request'], Language), self.env['request'])
        self.assertIn('tableSchema', json.loads(res))
        res = adapter.render(
            datatables.Valuesets(self.env['request'], ValueSet), self.env['request'])
        self.assertIn('foreignKeys', json.loads(res)['tableSchema'])
        adapter.render_to_response(
            datatables.Valuesets(self.env['request'], ValueSet), self.env['request'])
Ejemplo n.º 2
0
def freeze_func(args, dataset=None, with_history=True):
    dataset = dataset or args.env['request'].dataset
    dump_dir = args.data_file('dumps')
    if not dump_dir.exists():
        dump_dir.mkdir()
    dump_dir = dump_dir.resolve()

    with dump_dir.joinpath('README.txt').open('w', encoding='utf8') as fp:
        fp.write(freeze_readme(dataset, args.env['request']))

    db_version = get_alembic_version(DBSession)

    for table in Base.metadata.sorted_tables:
        csv = dump_dir.joinpath('%s.csv' % table.name)
        if with_history or not table.name.endswith('_history'):
            _freeze(table, csv)

        if csv.exists():
            csvm = '%s.%s' % (table.name, CsvmJsonAdapter.extension)
            doc = CsvmJsonAdapter.csvm_doc(csvm, args.env['request'],
                                           [(col.name, col)
                                            for col in table.columns])
            if db_version:
                # We (ab)use a dc:identifier property to pass the alembic revision of the
                # database to the unfreeze script.
                doc["dc:identifier"] = db_version
            jsonlib.dump(doc, dump_dir.joinpath(csvm))

    with ZipFile(as_posix(args.data_file('..', 'data.zip')), 'w',
                 ZIP_DEFLATED) as zipfile:
        for f in dump_dir.iterdir():
            if f.is_file():
                with f.open('rb') as fp:
                    zipfile.writestr(f.name, fp.read())
Ejemplo n.º 3
0
def freeze_func(args, dataset=None, with_history=True):
    dataset = dataset or args.env['request'].dataset
    dump_dir = args.data_file('dumps')
    if not dump_dir.exists():
        dump_dir.mkdir()
    dump_dir = dump_dir.resolve()

    with dump_dir.joinpath('README.txt').open('w', encoding='utf8') as fp:
        fp.write(freeze_readme(dataset, args.env['request']))

    db_version = get_alembic_version(DBSession)

    for table in Base.metadata.sorted_tables:
        csv = dump_dir.joinpath('%s.csv' % table.name)
        if with_history or not table.name.endswith('_history'):
            _freeze(table, csv)

        if csv.exists():
            csvm = '%s.%s' % (table.name, CsvmJsonAdapter.extension)
            doc = CsvmJsonAdapter.csvm_doc(
                csvm, args.env['request'], [(col.name, col) for col in table.columns])
            if db_version:
                # We (ab)use a dc:identifier property to pass the alembic revision of the
                # database to the unfreeze script.
                doc["dc:identifier"] = db_version  # pragma: no cover
            jsonlib.dump(doc, dump_dir.joinpath(csvm))

    with ZipFile(
            as_posix(args.data_file('..', 'data.zip')), 'w', ZIP_DEFLATED) as zipfile:
        for f in dump_dir.iterdir():
            if f.is_file():
                with f.open('rb') as fp:
                    zipfile.writestr(f.name, fp.read())
Ejemplo n.º 4
0
def freeze_func(args, dataset=None, with_history=True):
    dataset = dataset or args.env["request"].dataset
    dump_dir = args.data_file("dumps")
    if not dump_dir.exists():
        dump_dir.mkdir()
    dump_dir = dump_dir.resolve()

    with dump_dir.joinpath("README.txt").open("w", encoding="utf8") as fp:
        fp.write(freeze_readme(dataset, args.env["request"]))

    db_version = get_alembic_version(DBSession)

    for table in Base.metadata.sorted_tables:
        csv = dump_dir.joinpath("%s.csv" % table.name)
        if with_history or not table.name.endswith("_history"):
            _freeze(table, csv)

        if csv.exists():
            csvm = "%s.%s" % (table.name, CsvmJsonAdapter.extension)
            doc = CsvmJsonAdapter.csvm_doc(csvm, args.env["request"], [(col.name, col) for col in table.columns])
            if db_version:
                # We (ab)use a dc:identifier property to pass the alembic revision of the
                # database to the unfreeze script.
                doc["dc:identifier"] = db_version
            jsonlib.dump(doc, dump_dir.joinpath(csvm))

    with ZipFile(as_posix(args.data_file("..", "data.zip")), "w", ZIP_DEFLATED) as zipfile:
        for f in dump_dir.iterdir():
            if f.is_file():
                with f.open("rb") as fp:
                    zipfile.writestr(f.name, fp.read())
Ejemplo n.º 5
0
    def test_CsvwJsonAdapter(self):
        from clld.web.adapters.csv import CsvmJsonAdapter

        adapter = CsvmJsonAdapter(None)
        res = adapter.render(
            datatables.Languages(self.env['request'], Language),
            self.env['request'])
        self.assertIn('tableSchema', json.loads(res))
        res = adapter.render(
            datatables.Valuesets(self.env['request'], ValueSet),
            self.env['request'])
        self.assertIn('foreignKeys', json.loads(res)['tableSchema'])
        adapter.render_to_response(
            datatables.Valuesets(self.env['request'], ValueSet),
            self.env['request'])
Ejemplo n.º 6
0
def test_CsvwJsonAdapter(request_factory, env):
    from clld.web.adapters.csv import CsvmJsonAdapter

    adapter = CsvmJsonAdapter(None)
    res = json.loads(
        adapter.render(datatables.Languages(env['request'], Language),
                       env['request']))
    assert res['tableSchema']['columns'] != []

    res = adapter.render(datatables.Valuesets(env['request'], ValueSet),
                         env['request'])
    assert 'foreignKeys' in json.loads(res)['tableSchema']
    adapter.render_to_response(datatables.Valuesets(env['request'], ValueSet),
                               env['request'])

    with request_factory(params={'sSearch_0': 'xyz'}) as req:
        res = json.loads(
            adapter.render(datatables.Languages(req, Language), req))
        assert res['tableSchema']['columns'] == []
Ejemplo n.º 7
0
def test_CsvwJsonAdapter(request_factory, env):
    from clld.web.adapters.csv import CsvmJsonAdapter

    adapter = CsvmJsonAdapter(None)
    res = json.loads(adapter.render(
        datatables.Languages(env['request'], Language), env['request']))
    assert res['tableSchema']['columns'] != []

    res = adapter.render(
        datatables.Valuesets(env['request'], ValueSet), env['request'])
    assert 'foreignKeys' in json.loads(res)['tableSchema']
    adapter.render_to_response(
        datatables.Valuesets(env['request'], ValueSet), env['request'])

    with request_factory(params={'sSearch_0': 'xyz'}) as req:
        res = json.loads(adapter.render(datatables.Languages(req, Language), req))
        assert res['tableSchema']['columns'] == []
Ejemplo n.º 8
0
    def create(self, req, filename=None, verbose=True):
        p = self.abspath(req)
        if not p.parent.exists():  # pragma: no cover
            p.parent.mkdir()
        tmp = Path('%s.tmp' % p)

        language_url_pattern = self.route_url_pattern(req, 'language')

        with ZipFile(tmp.as_posix(), 'w', ZIP_DEFLATED) as zipfile:
            tables = []
            for param in DBSession.query(Parameter).options(joinedload(Parameter.domain)):
                fname = '%s-%s.csv' % (req.dataset.id, param.id)
                zipfile.writestr(fname, self.get_values(param, language_url_pattern))
                tables.append({
                    '@type': 'Table',
                    'url': fname,
                    'notes': [
                        {
                            '@id': req.resource_url(param),
                            'dc:identifier': param.id,
                            'dc:title': param.name,
                            'dc:description': param.description or ''}] + [
                        {
                            '@type': 'DomainElement',
                            'name': de.name,
                            'description': de.description,
                            'numeric': de.number
                        } for de in param.domain
                    ],
                })

            md = CsvmJsonAdapter.csvm_basic_doc(req, tables=tables)
            md.update({
                '@type': 'TableGroup',
                'dc:language': list(self.get_languages(req, language_url_pattern)),
                'tableSchema': {
                    "columns": [
                        {
                            "name": "ID",
                            "datatype": "string",
                            "required": True
                        },
                        {
                            "name": "Language_ID",
                            "datatype": "string",
                            "required": True
                        },
                        {
                            "name": "Parameter_ID",
                            "datatype": "string",
                            "required": True
                        },
                        {
                            "name": "Contribution_ID",
                            "datatype": "string",
                            "required": True
                        },
                        {
                            "name": "Value",
                            "datatype": "string",
                            "required": True
                        },
                        {
                            "name": "Source",
                            "datatype": "string",
                        },
                        {
                            "name": "Comment",
                            "datatype": "string",
                        },
                    ],
                    "primaryKey": "ID",
                    'aboutUrl': self.route_url_pattern(req, 'value', '{ID}'),
                },
            })
            zipfile.writestr(
                '%s.csv-metadata.json' % req.dataset.id, json.dumps(md, indent=4))
            bib = Database([
                rec.bibtex() for rec in DBSession.query(Source).order_by(Source.name)])
            zipfile.writestr('%s.bib' % req.dataset.id, ('%s' % bib).encode('utf8'))
            zipfile.writestr(
                'README.txt',
                README.format(
                    req.dataset.name,
                    '=' * (
                        len(req.dataset.name)
                        + len(' data download')),
                    req.dataset.license,
                    TxtCitation(None).render(req.dataset, req)).encode('utf8'))
        if p.exists():  # pragma: no cover
            remove(p)
        move(tmp, p)