Esempio n. 1
0
def freeze_func(args, dataset=None, with_history=True):
    dataset = dataset or args.env['request'].dataset
    dump_dir = args.data_file('dumps')
    if not dump_dir.exists():
        dump_dir.mkdir()
    dump_dir = dump_dir.resolve()

    with dump_dir.joinpath('README.txt').open('w', encoding='utf8') as fp:
        fp.write(freeze_readme(dataset, args.env['request']))

    db_version = get_alembic_version(DBSession)

    for table in Base.metadata.sorted_tables:
        csv = dump_dir.joinpath('%s.csv' % table.name)
        if with_history or not table.name.endswith('_history'):
            _freeze(table, csv)

        if csv.exists():
            csvm = '%s.%s' % (table.name, CsvmJsonAdapter.extension)
            doc = CsvmJsonAdapter.csvm_doc(csvm, args.env['request'],
                                           [(col.name, col)
                                            for col in table.columns])
            if db_version:
                # We (ab)use a dc:identifier property to pass the alembic revision of the
                # database to the unfreeze script.
                doc["dc:identifier"] = db_version
            jsonlib.dump(doc, dump_dir.joinpath(csvm))

    with ZipFile(as_posix(args.data_file('..', 'data.zip')), 'w',
                 ZIP_DEFLATED) as zipfile:
        for f in dump_dir.iterdir():
            if f.is_file():
                with f.open('rb') as fp:
                    zipfile.writestr(f.name, fp.read())
Esempio n. 2
0
def freeze_func(args, dataset=None, with_history=True):
    dataset = dataset or args.env['request'].dataset
    dump_dir = args.data_file('dumps')
    if not dump_dir.exists():
        dump_dir.mkdir()
    dump_dir = dump_dir.resolve()

    with dump_dir.joinpath('README.txt').open('w', encoding='utf8') as fp:
        fp.write(freeze_readme(dataset, args.env['request']))

    db_version = get_alembic_version(DBSession)

    for table in Base.metadata.sorted_tables:
        csv = dump_dir.joinpath('%s.csv' % table.name)
        if with_history or not table.name.endswith('_history'):
            _freeze(table, csv)

        if csv.exists():
            csvm = '%s.%s' % (table.name, CsvmJsonAdapter.extension)
            doc = CsvmJsonAdapter.csvm_doc(
                csvm, args.env['request'], [(col.name, col) for col in table.columns])
            if db_version:
                # We (ab)use a dc:identifier property to pass the alembic revision of the
                # database to the unfreeze script.
                doc["dc:identifier"] = db_version  # pragma: no cover
            jsonlib.dump(doc, dump_dir.joinpath(csvm))

    with ZipFile(
            as_posix(args.data_file('..', 'data.zip')), 'w', ZIP_DEFLATED) as zipfile:
        for f in dump_dir.iterdir():
            if f.is_file():
                with f.open('rb') as fp:
                    zipfile.writestr(f.name, fp.read())
Esempio n. 3
0
def freeze_func(args, dataset=None, with_history=True):
    dataset = dataset or args.env["request"].dataset
    dump_dir = args.data_file("dumps")
    if not dump_dir.exists():
        dump_dir.mkdir()
    dump_dir = dump_dir.resolve()

    with dump_dir.joinpath("README.txt").open("w", encoding="utf8") as fp:
        fp.write(freeze_readme(dataset, args.env["request"]))

    db_version = get_alembic_version(DBSession)

    for table in Base.metadata.sorted_tables:
        csv = dump_dir.joinpath("%s.csv" % table.name)
        if with_history or not table.name.endswith("_history"):
            _freeze(table, csv)

        if csv.exists():
            csvm = "%s.%s" % (table.name, CsvmJsonAdapter.extension)
            doc = CsvmJsonAdapter.csvm_doc(csvm, args.env["request"], [(col.name, col) for col in table.columns])
            if db_version:
                # We (ab)use a dc:identifier property to pass the alembic revision of the
                # database to the unfreeze script.
                doc["dc:identifier"] = db_version
            jsonlib.dump(doc, dump_dir.joinpath(csvm))

    with ZipFile(as_posix(args.data_file("..", "data.zip")), "w", ZIP_DEFLATED) as zipfile:
        for f in dump_dir.iterdir():
            if f.is_file():
                with f.open("rb") as fp:
                    zipfile.writestr(f.name, fp.read())
Esempio n. 4
0
def changes(request):
    # changes in the 2011 edition: check values with an updated date after 2011 and
    # before 2013
    E2009 = utc.localize(datetime(2009, 1, 1))
    E2012 = utc.localize(datetime(2012, 1, 1))
    E2014 = utc.localize(datetime(2014, 6, 30))
    E2015 = utc.localize(datetime(2015, 6, 30))

    history = inspect(Value.__history_mapper__).class_
    query = DBSession.query(Value)\
        .outerjoin(history, Value.pk == history.pk)\
        .join(ValueSet)\
        .order_by(ValueSet.parameter_pk, ValueSet.language_pk)\
        .options(joinedload_all(Value.valueset, ValueSet.language),
                 joinedload_all(Value.valueset, ValueSet.parameter))

    changes2011 = query.join(ValueSet.parameter)\
        .filter(Parameter.id.contains('A'))\
        .filter(Parameter.id != '143A')\
        .filter(Parameter.id != '144A')\
        .filter(or_(
            and_(E2009 < Value.updated, Value.updated < E2012),
            and_(history.updated != None,
                 E2009 < history.updated, history.updated < E2012)))

    changes2013 = query.filter(
        or_(and_(E2012 < Value.updated, Value.updated < E2014),
            and_(E2012 < history.updated, history.updated < E2014)))

    changes2014 = query.filter(
        or_(and_(E2014 < Value.updated, Value.updated < E2015),
            and_(E2014 < history.updated, history.updated < E2015)))

    #
    # TODO:
    #
    # history = inspect(ValueSet.__history_mapper__).class_
    # current = DBSession.query(ValueSet.pk).subquery()
    # removals2013 = DBSession.query(Parameter.id, Parameter.name, count(history.pk))\
    # .filter(Parameter.pk == history.parameter_pk)\
    # .filter(not_(history.pk.in_(current)))\
    # .group_by(Parameter.pk, Parameter.id, Parameter.name)\
    # .order_by(Parameter.pk)

    grouped = lambda changes: groupby([v.valueset for v in changes], lambda vs:
                                      vs.parameter)
    return {
        'db_version': get_alembic_version(DBSession),
        'changes2011': grouped(changes2011),
        'changes2013': grouped(changes2013),
        'changes2014': grouped(changes2014),
        'removals2013': []
    }
Esempio n. 5
0
File: views.py Progetto: clld/wals3
def changes(request):
    # changes in the 2011 edition: check values with an updated date after 2011 and
    # before 2013
    E2009 = utc.localize(datetime(2009, 1, 1))
    E2012 = utc.localize(datetime(2012, 1, 1))
    E2014 = utc.localize(datetime(2014, 6, 30))
    E2015 = utc.localize(datetime(2015, 6, 30))

    history = inspect(Value.__history_mapper__).class_
    query = DBSession.query(Value)\
        .outerjoin(history, Value.pk == history.pk)\
        .join(ValueSet)\
        .order_by(ValueSet.parameter_pk, ValueSet.language_pk)\
        .options(joinedload_all(Value.valueset, ValueSet.language),
                 joinedload_all(Value.valueset, ValueSet.parameter))

    changes2011 = query.join(ValueSet.parameter)\
        .filter(Parameter.id.contains('A'))\
        .filter(Parameter.id != '143A')\
        .filter(Parameter.id != '144A')\
        .filter(or_(
            and_(E2009 < Value.updated, Value.updated < E2012),
            and_(history.updated != None,
                 E2009 < history.updated, history.updated < E2012)))

    changes2013 = query.filter(or_(
        and_(E2012 < Value.updated, Value.updated < E2014),
        and_(E2012 < history.updated, history.updated < E2014)))

    changes2014 = query.filter(or_(
        and_(E2014 < Value.updated, Value.updated < E2015),
        and_(E2014 < history.updated, history.updated < E2015)))

    #
    # TODO:
    #
    # history = inspect(ValueSet.__history_mapper__).class_
    # current = DBSession.query(ValueSet.pk).subquery()
    # removals2013 = DBSession.query(Parameter.id, Parameter.name, count(history.pk))\
    # .filter(Parameter.pk == history.parameter_pk)\
    # .filter(not_(history.pk.in_(current)))\
    # .group_by(Parameter.pk, Parameter.id, Parameter.name)\
    # .order_by(Parameter.pk)

    grouped = lambda changes: groupby([v.valueset for v in changes],
                                      lambda vs: vs.parameter)
    return {
        'db_version': get_alembic_version(DBSession),
        'changes2011': grouped(changes2011),
        'changes2013': grouped(changes2013),
        'changes2014': grouped(changes2014),
        'removals2013': []}
Esempio n. 6
0
def test_alembic_version(db):
    from clld.db.util import set_alembic_version, get_alembic_version

    assert get_alembic_version(db) != '1234'
    set_alembic_version(db, '1234')
    assert get_alembic_version(db) == '1234'
Esempio n. 7
0
def test_alembic_version(db):
    from clld.db.util import set_alembic_version, get_alembic_version

    assert get_alembic_version(db) != '1234'
    set_alembic_version(db, '1234')
    assert get_alembic_version(db) == '1234'