def freeze_func(args, dataset=None, with_history=True): dataset = dataset or args.env['request'].dataset dump_dir = args.data_file('dumps') if not dump_dir.exists(): dump_dir.mkdir() dump_dir = dump_dir.resolve() with dump_dir.joinpath('README.txt').open('w', encoding='utf8') as fp: fp.write(freeze_readme(dataset, args.env['request'])) db_version = get_alembic_version(DBSession) for table in Base.metadata.sorted_tables: csv = dump_dir.joinpath('%s.csv' % table.name) if with_history or not table.name.endswith('_history'): _freeze(table, csv) if csv.exists(): csvm = '%s.%s' % (table.name, CsvmJsonAdapter.extension) doc = CsvmJsonAdapter.csvm_doc( csvm, args.env['request'], [(col.name, col) for col in table.columns]) if db_version: # We (ab)use a dc:identifier property to pass the alembic revision of the # database to the unfreeze script. doc["dc:identifier"] = db_version # pragma: no cover jsonlib.dump(doc, dump_dir.joinpath(csvm)) with ZipFile( as_posix(args.data_file('..', 'data.zip')), 'w', ZIP_DEFLATED) as zipfile: for f in dump_dir.iterdir(): if f.is_file(): with f.open('rb') as fp: zipfile.writestr(f.name, fp.read())
def freeze_func(args, dataset=None, with_history=True): dataset = dataset or args.env['request'].dataset dump_dir = args.data_file('dumps') if not dump_dir.exists(): dump_dir.mkdir() dump_dir = dump_dir.resolve() with dump_dir.joinpath('README.txt').open('w', encoding='utf8') as fp: fp.write(freeze_readme(dataset, args.env['request'])) db_version = get_alembic_version(DBSession) for table in Base.metadata.sorted_tables: csv = dump_dir.joinpath('%s.csv' % table.name) if with_history or not table.name.endswith('_history'): _freeze(table, csv) if csv.exists(): csvm = '%s.%s' % (table.name, CsvmJsonAdapter.extension) doc = CsvmJsonAdapter.csvm_doc(csvm, args.env['request'], [(col.name, col) for col in table.columns]) if db_version: # We (ab)use a dc:identifier property to pass the alembic revision of the # database to the unfreeze script. doc["dc:identifier"] = db_version jsonlib.dump(doc, dump_dir.joinpath(csvm)) with ZipFile(as_posix(args.data_file('..', 'data.zip')), 'w', ZIP_DEFLATED) as zipfile: for f in dump_dir.iterdir(): if f.is_file(): with f.open('rb') as fp: zipfile.writestr(f.name, fp.read())
def freeze_func(args, dataset=None, with_history=True): dataset = dataset or args.env["request"].dataset dump_dir = args.data_file("dumps") if not dump_dir.exists(): dump_dir.mkdir() dump_dir = dump_dir.resolve() with dump_dir.joinpath("README.txt").open("w", encoding="utf8") as fp: fp.write(freeze_readme(dataset, args.env["request"])) db_version = get_alembic_version(DBSession) for table in Base.metadata.sorted_tables: csv = dump_dir.joinpath("%s.csv" % table.name) if with_history or not table.name.endswith("_history"): _freeze(table, csv) if csv.exists(): csvm = "%s.%s" % (table.name, CsvmJsonAdapter.extension) doc = CsvmJsonAdapter.csvm_doc(csvm, args.env["request"], [(col.name, col) for col in table.columns]) if db_version: # We (ab)use a dc:identifier property to pass the alembic revision of the # database to the unfreeze script. doc["dc:identifier"] = db_version jsonlib.dump(doc, dump_dir.joinpath(csvm)) with ZipFile(as_posix(args.data_file("..", "data.zip")), "w", ZIP_DEFLATED) as zipfile: for f in dump_dir.iterdir(): if f.is_file(): with f.open("rb") as fp: zipfile.writestr(f.name, fp.read())