def test_relationships(db): # commented-out dependencies are the dependencies that aren't tracked directly by postgres with S(db) as s: s.execute(CREATES) with S(db) as s: i = get_inspector(s) dependencies_by_name = { k: v.dependent_on for k, v in i.selectables.items() if v.dependent_on } assert dependencies_by_name == { # '"public"."depends_on_vvv"(t text)': [ # '"public"."vvv"' # ], '"public"."depends_on_fff"': ['"public"."fff"(t text)'], '"public"."doubledep"': [ '"public"."depends_on_fff"', '"public"."depends_on_vvv"(t text)', ], } dependents_by_name = { k: v.dependents for k, v in i.selectables.items() if v.dependents } assert dependents_by_name == { # '"public"."vvv"': ['"public"."depends_on_vvv"(t text)'], '"public"."fff"(t text)': ['"public"."depends_on_fff"'], '"public"."depends_on_fff"': ['"public"."doubledep"'], '"public"."depends_on_vvv"(t text)': ['"public"."doubledep"'], } # testing recursive deps dependencies_by_name = { k: v.dependent_on_all for k, v in i.selectables.items() if v.dependent_on_all } assert dependencies_by_name == { # '"public"."depends_on_vvv"(t text)': [ # '"public"."vvv"' # ], '"public"."depends_on_fff"': ['"public"."fff"(t text)'], '"public"."doubledep"': [ '"public"."depends_on_fff"', '"public"."depends_on_vvv"(t text)', '"public"."fff"(t text)', ], # '"public"."vvv"' } dependents_by_name = { k: v.dependents_all for k, v in i.selectables.items() if v.dependents_all } assert dependents_by_name == { # '"public"."vvv"': ['"public"."depends_on_vvv"(t text)', '"public"."doubledep"'], '"public"."fff"(t text)': [ '"public"."depends_on_fff"', '"public"."doubledep"', ], '"public"."depends_on_fff"': ['"public"."doubledep"'], '"public"."depends_on_vvv"(t text)': ['"public"."doubledep"'], }
def db_differences(db_url_a, db_url_b, **kwargs): with S(db_url_a, poolclass=NullPool) as a, S(db_url_b, poolclass=NullPool) as b: m = Migration(a, b, **kwargs) m.set_safety(False) m.add_all_changes() return m.sql
def pending(write_to_file=False): with temporary_db() as CURRENT_DB_URL, temporary_db() as TARGET_DB_URL: load_current_production_state(CURRENT_DB_URL) load_current_staging_state(TARGET_DB_URL) with S(CURRENT_DB_URL) as s_current, S(TARGET_DB_URL) as s_target: m = Migration(s_current, s_target) m.set_safety(False) m.add_all_changes() print('-- Pending (prod -> staging):\n\n{}'.format(m.sql)) if write_to_file: with io.open('DB/migration/pending/pending.sql', 'w') as w: w.write(m.sql) with temporary_db() as CURRENT_DB_URL, temporary_db() as TARGET_DB_URL: load_current_staging_state(CURRENT_DB_URL) load_from_app_model(TARGET_DB_URL) with S(CURRENT_DB_URL) as s_current, S(TARGET_DB_URL) as s_target: m = Migration(s_current, s_target) m.set_safety(False) m.add_all_changes() print('-- Pending (staging -> models):\n\n{}'.format(m.sql)) if write_to_file: with io.open('DB/migration/pending/pending.sql', 'w') as w: w.write(m.sql)
def sync(database): if os.getenv('HOSTINGENV') == 'DEV': db_url_format = 'postgresql://*****:*****@postgres:5432/%s' else: db_url_format = 'postgresql://*****:*****@/%s?host=/var/run/postgresql' temp_db_url = db_url_format % f'{database}temp' db_url = db_url_format % database with temp_db(temp_db_url) as s_target_temp: create_database(db_url) with S(db_url) as s_current, S(s_target_temp) as s_target: run_all(f'{database}/migrations', s_current) run_all(f'{database}/schemas', s_target) run_all(f'{database}/tables', s_target) m = Migration(s_current, s_target) m.set_safety(False) m.add_all_changes() if m.statements: print('THE FOLLOWING CHANGES ARE PENDING:', end='\n\n') print(m.sql) print() if (len(sys.argv) > 1 and sys.argv[1] == 'noconfirm') or input('Apply these changes? (y/n) ') == 'y': print('Applying...') m.apply() else: print('Not applying.') else: print('Already synced.')
def dump(): from sqlbag import S, temporary_database as temporary_db, load_sql_from_file from migra import Migration DB_URL_FUTURE = 'postgresql:///cuedev' with temporary_db() as DB_URL_CURRENT: with S(DB_URL_FUTURE) as s_current, S(DB_URL_CURRENT) as s_target: load_sql_from_file(s_target, './host.sql') m = Migration(s_target, s_current) m.set_safety(False) m.add_all_changes() if m.statements: print('THE FOLLOWING CHANGES ARE NOT IMMORTALISED:', end='\n\n') print(m.sql) print() os.system( 'pg_dump -s --no-comments --no-owner --no-acl --no-tablespaces postgresql:///cuedev | sed -e "/^--/d" > host.sql' ) if input('Dump Dev to host.sql?') == 'yes': print('Dumping...') else: print('Not applying.') else: print('Already synced.')
def sync(DB_URL: str = "postgresql://orflaedi:@localhost/orflaedi"): from sqlbag import S, temporary_database as temporary_db with temporary_db() as TEMP_DB_URL: os.environ["DATABASE_URL"] = TEMP_DB_URL from orflaedi.database import engine, Base from orflaedi.models import Model, Retailer, VehicleClassEnum, TagEnum Base.metadata.create_all(engine) with S(DB_URL) as s_current, S(TEMP_DB_URL) as s_target: m = Migration(s_current, s_target) m.set_safety(False) m.add_all_changes() if m.statements: print("THE FOLLOWING CHANGES ARE PENDING:", end="\n\n") print(m.sql) print() if input("Apply these changes?") == "yes": print("Applying...") m.apply() else: print("Not applying.") else: print("Already synced.")
def main(): with S(DB, echo=False) as s: s.execute(""" drop table if exists single; """) s.execute(""" create table if not exists single(id serial, title text, year int, peak_position int) """) with S(DB, echo=False) as s: for line in SINGLES.splitlines(): title, year, peak = line.rsplit(' ', 2) single = Single( title=title, year=year, peak_position=peak ) s.add(single) with S(DB, echo=False) as s: q = s.query(Single).order_by(Single.peak_position, desc(Single.year), Single.title, desc(Single.id)) bookmark = None while True: p = get_page(q, per_page=PER_PAGE, page=bookmark) print_page(p) bookmark = p.paging.bookmark_next if not p.paging.has_next: break
def test_create_all_and_migrations_produce_same_result(two_databases, pytestconfig): """ Test, that verifies that running all migrations from top to bottom produces the same result as setting the database up from scratch using metadata.create_all() """ create_all_url, migrations_url = two_databases create_all_engine = sa.create_engine(create_all_url) Base.metadata.create_all(create_all_engine) create_all_engine.dispose() create_config = get_alembic_config(pytestconfig.rootdir, create_all_url) stamp(create_config, "head") config = get_alembic_config(pytestconfig.rootdir, migrations_url) upgrade(config, "head") with S(migrations_url) as s_migrations, S(create_all_url) as s_create_all: m = migra.Migration(s_migrations, s_create_all) m.set_safety(False) m.add_all_changes() if m.statements: sql = m.sql.strip().replace("\n\n", "\n") sql = textwrap.indent(sql, 4 * " ") pytest.fail(fail_message.format(sql))
def run(args, out=None, err=None): if not out: out = sys.stdout # pragma: no cover if not err: err = sys.stderr # pragma: no cover with S(args.dburl_from) as s0, S(args.dburl_target) as s1: m = Migration(s0, s1) if args.unsafe: m.set_safety(False) m.add_all_changes() try: print(m.sql, file=out) except UnsafeMigrationException: print( '-- ERROR: destructive statements generated. Use the --unsafe flag to suppress this error.', file=err) return 3 if not m.statements: return 0 else: return 2
def test_errors_and_messages(db): assert pg_errorname_lookup(22005) == "ERROR_IN_ASSIGNMENT" with S(db) as s: s.execute("drop table if exists x") assert pg_notices(s) == [ 'NOTICE: table "x" does not exist, skipping\n' ] assert pg_notices( s, wipe=True) == ['NOTICE: table "x" does not exist, skipping\n'] assert pg_notices(s) == [] out = io.StringIO() s.execute("drop table if exists x") pg_print_notices(s, out=out) assert out.getvalue() == 'NOTICE: table "x" does not exist, skipping' out = io.StringIO() pg_print_notices(s, out=out) assert out.getvalue() == "" s.execute("create table x(id text)") try: with S(db) as s: s.execute("create table x(id text)") except DB_ERROR_TUPLE as e: assert errorcode_from_error(e) == "42P07"
def get_migrations(): # no test coverage setup_test() base = "postgresql+psycopg2://" with S(base, creator=connect) as current, S(base, creator=connect_test) as target: m = migra.Migration(current, target) return m
def test_collations(db): with S(db) as s: s.execute(""" CREATE TABLE measurement ( city_id int not null, logdate date not null, peaktemp int, unitsales int ); create schema x; CREATE COLLATION x.german (provider = icu, locale = 'de-DE-x-icu'); CREATE COLLATION naturalsort (provider = icu, locale = 'en-u-kn-true'); """) i = get_inspector(s) assert list(i.collations) == ['"public"."naturalsort"', '"x"."german"'] gc = i.collations['"x"."german"'] assert ( gc.create_statement == """create collation if not exists "x"."german" (provider = 'icu', locale = 'de-DE-x-icu');""" ) nc = i.collations['"public"."naturalsort"'] assert ( nc.create_statement == """create collation if not exists "public"."naturalsort" (provider = 'icu', locale = 'en-u-kn-true');""" ) assert gc == gc assert gc != nc with S(db) as s: s.execute(""" CREATE TABLE tt ( id int, t text, tde text collate "POSIX" ); """) i = get_inspector(s) tt = i.tables['"public"."tt"'] t = tt.columns["t"] tde = tt.columns["tde"] assert t.collation is None assert tde.collation == "POSIX" assert (t.alter_data_type_clause == 'alter column "t" set data type text using "t"::text') assert ( tde.alter_data_type_clause == 'alter column "tde" set data type text collate "POSIX" using "tde"::text' )
def databases_are_equal(dburl_a, dburl_b): with S(dburl_a) as s0, S(dburl_b) as s1: m = Migration(s0, s1) m.set_safety(False) m.add_all_changes() if m.statements: print('DIFFERENCES FOUND:') print(m.sql) return not m.statements
def _get_schema_diff( from_db_url: str, target_db_url: str) -> Generator[Tuple[str, Migration], Any, Any]: """Get schema diff between two databases using djrobstep/migra.""" with S(from_db_url) as from_schema_session, \ S(target_db_url) as target_schema_session: migration = Migration(from_schema_session, target_schema_session) migration.set_safety(False) migration.add_all_changes() yield migration.sql, migration
def check_migration_result(): with temporary_db() as CURRENT_DB_URL, temporary_db() as TARGET_DB_URL: load_post_migration_state(CURRENT_DB_URL) load_from_app_model(TARGET_DB_URL) with S(CURRENT_DB_URL) as s_current, S(TARGET_DB_URL) as s_target: m = Migration(s_current, s_target) m.set_safety(False) m.add_all_changes() print('Differences:\n{}'.format(m.sql))
def staging_errors(): with temporary_db() as CURRENT_DB_URL, temporary_db() as TARGET_DB_URL: load_current_staging_state(CURRENT_DB_URL) load_current_production_state(TARGET_DB_URL) with S(CURRENT_DB_URL) as s_current, S(TARGET_DB_URL) as s_target: m = Migration(s_current, s_target) m.set_safety(False) m.add_all_changes() print('Differences:\n{}'.format(m.sql))
def load_from_app_model(dburl): def create_tables(include=None, exclude=None): tables = set(t for t in db.metadata.tables.values() if ((include is None) or t.name in include) and ( (exclude is None) or t.name not in exclude)) db.metadata.create_all(s.bind.engine, tables=tables) with S(dburl) as s: load_sql_from_file(s, 'DB/migration/setup-pre.sql') create_tables() with S(dburl) as s: load_sql_from_file(s, 'DB/migration/setup-post.sql')
def fixture_setup(dburl): COUNT = 10 with S(dburl) as s: Base.metadata.create_all(s.connection()) with S(dburl) as s: for x in range(COUNT): b = Book(name='Book {}'.format(x), a=x, b=x % 2, c=COUNT - x, d=99) if x == 1: b.a = None b.author = Author(name='Willy Shakespeare') s.add(b)
def do_migration(REAL_DB_URL): PENDING = sql_from_folder(PENDING_FOLDER) with tempfolder() as tempf: outfile = os.path.join(tempf, 'schemadump.sql') do_schema_dump(REAL_DB_URL, outfile) for i in range(len(PENDING) + 1): ATTEMPTING = list(reversed(PENDING))[:i] ATTEMPTING.reverse() print("TESTING MIGRATION USING LAST {} MIGRATION FILES".format(i)) with temporary_db() as dummy_db_url, temporary_db( ) as target_db_url: with S(dummy_db_url) as s_dummy: load_sql_from_file(s_dummy, outfile) try: for migration_sql in ATTEMPTING: raw_execute(s_dummy, migration_sql) except DB_ERROR_TUPLE as e: print( 'TRIED USING LAST {} PENDING FILES TO MIGRATE BUT THIS FAILED, MOVING TO NEXT' .format(i)) continue load_from_app_model(target_db_url) if databases_are_equal(dummy_db_url, target_db_url): print('APPLYING LAST {} PENDING FILES'.format(i)) with S(REAL_DB_URL) as s_real: for migration_sql in ATTEMPTING: if not DRY_RUN: print("EXECUTING:") print(migration_sql) raw_execute(s_real, migration_sql) else: print('DRY RUN, would apply:') print(migration_sql) print('SUCCESS: DATABASE UP TO DATE.') return 0 else: print( 'TRIED USING LAST {} PENDING FILES TO MIGRATE BUT THIS DOES NOT GIVE A CORRECT OUTCOME, MOVING TO NEXT' .format(i)) print('COULD NOT FIND A CORRECT MIGRATION PATH :(') return 1
def sync(args: List[str]) -> None: """ Compare live database to application schema & apply changes to database. Uses running database specified for application via `DB_[USER|PASS|HOST|NAME]` environment variables & compares to application schema defined at `./src/models/**/*.sql`. """ # define if prompts are needed or not no_prompt = False if 'noprompt' in args: no_prompt = True # create temp database for app schema with _temp_db(host=DB_HOST, user=DB_USER, password=DB_PASS) as temp_db_url: print(f'db url: {DB_URL}') print(f'temp url: {temp_db_url}') # create sessions for current db state & target schema with S(DB_URL) as from_schema_session, \ S(temp_db_url) as target_schema_session: # load target schema to temp db _load_from_app(target_schema_session) # diff target db & current db migration = Migration(from_schema_session, target_schema_session) migration.set_safety(False) migration.add_all_changes() # handle changes if migration.statements: print('\nTHE FOLLOWING CHANGES ARE PENDING:', end='\n\n') print(migration.sql) if no_prompt: print('Applying...') migration.apply() print('Changes applied.') else: if _prompt('Apply these changes?'): print('Applying...') migration.apply() print('Changes applied.') else: print('Not applying.') else: print('Already synced.')
def test_dynamic_timeout(db): def get_timeout(): return -1 for n in await_pg_notifications( db, ["hello", "hello2"], timeout=get_timeout, yield_on_timeout=True, notifications_as_list=True, handle_signals=SIGNALS_TO_HANDLE, ): if n is None: with S(db) as s: s.execute("notify hello, 'here is my message'") elif isinstance(n, int): sig = signal.Signals(n) assert sig.name == "SIGINT" assert n == signal.SIGINT break else: assert len(n) == 1 _n = n[0] assert _n.channel == "hello" assert _n.payload == "here is my message" os.kill(os.getpid(), signal.SIGINT) with raises(KeyboardInterrupt): for n in await_pg_notifications(db, "hello", timeout=0.1, yield_on_timeout=True): os.kill(os.getpid(), signal.SIGINT)
def test_dep_order(db): with S(db) as s: i = get_inspector(s) if i.pg_version <= 10: return # s.execute(CREATES) s.execute(CREATES_FK) i = get_inspector(s) # dependency_order doesn't work in py2 if sys.version_info < (3, 0): return create_order = i.dependency_order(include_fk_deps=True, ) drop_order = i.dependency_order( drop_order=True, include_fk_deps=True, ) for x in drop_order: thing = i.get_dependency_by_signature(x) drop = thing.drop_statement s.execute(drop) for x in create_order: thing = i.get_dependency_by_signature(x) create = thing.create_statement s.execute(create)
def test_basic_schemainspect(): a = ColumnInfo('a', 'text', str) a2 = ColumnInfo('a', 'text', str) b = ColumnInfo('b', 'varchar', str, dbtypestr='varchar(10)') b2 = ColumnInfo('b', 'text', str, dbtypestr='text', default="'d'::text", not_null=True) assert a == a2 assert a == a assert a != b assert b != b2 alter = b2.alter_table_statements(b, 't') assert alter == [ 'alter table t alter column "b" set default \'d\'::text;', 'alter table t alter column "b" set not null;', 'alter table t alter column "b" set data type text;', ] alter = b.alter_table_statements(b2, 't') assert alter == [ 'alter table t alter column "b" drop default;', 'alter table t alter column "b" drop not null;', 'alter table t alter column "b" set data type varchar(10);', ] b.add_column_clause == 'add column "b"' b.drop_column_clause == 'drop column "b"' with temporary_database('sqlite') as dburl: with raises(NotImplementedError): with S(dburl) as s: get_inspector(s)
def arg_context(x): if x == "EMPTY": yield None else: with S(x) as s: yield s
def test_postgres_inspect_singleschema(db): with S(db) as s: setup_pg_schema(s) i = get_inspector(s, schema='otherschema') asserts_pg_singleschema(i, 'otherschema') i = get_inspector(s, schema='public') asserts_pg_singleschema(i, 'public')
def test_orm_aggregated(dburl): count = func.count().label("count") spec = [desc(count), desc(Author.name), Author.id] with S(dburl, echo=ECHO) as s: q = s.query(Author, count).join(Author.books).group_by(Author).order_by(*spec) check_paging_orm(q=q)
def test_migration(shelf_filename, cld_host, dbexport_host, username, password, service_binding): PENDING = sql_from_folder(PENDING_FOLDER) with tempfolder() as tempf: outfile = os.path.join(tempf, 'schemadump.sql') do_schema_dump(outfile, cld_host, dbexport_host, username, password, service_binding) for i in range(len(PENDING) + 1): ATTEMPTING = list(reversed(PENDING))[:i] ATTEMPTING.reverse() print("TESTING MIGRATION USING LAST {} MIGRATION FILES".format(i)) with temporary_db() as dummy_db_url, temporary_db() as target_db_url: with S(dummy_db_url) as s_dummy: load_sql_from_file(s_dummy, outfile) try: for migration_sql in ATTEMPTING: raw_execute(s_dummy, migration_sql) except DB_ERROR_TUPLE: print('TRIED USING LAST {} PENDING FILES TO MIGRATE BUT THIS FAILED, MOVING TO NEXT'.format(i)) continue load_from_app_model(target_db_url) if databases_are_equal(dummy_db_url, target_db_url): print('SUCCESS WITH LAST {} PENDING FILES'.format(i)) shelve_result(shelf_filename, ATTEMPTING) return 0 else: print('TRIED USING LAST {} PENDING FILES TO MIGRATE BUT THIS DOES NOT GIVE A CORRECT OUTCOME, MOVING TO NEXT'.format(i)) print('COULD NOT FIND A CORRECT MIGRATION PATH :(') return 1
def test_orm_recursive_cte(pg_only_dburl): with S(pg_only_dburl, echo=ECHO) as s: # Start with "origins": books that don't have prequels seed = s.query(Book.id.label("id"), Book.id.label("origin")).filter( Book.prequel is None ) # Recurse by picking up sequels sequel = aliased(Book, name="sequel") recursive = seed.cte(recursive=True) recursive = recursive.union( s.query(sequel.id, recursive.c.origin).filter( sequel.prequel_id == recursive.c.id ) ) # Count total books per origin count = func.count().label("count") origin = recursive.c.origin.label("origin") sq = s.query(origin, count).group_by(origin).cte(recursive=False) # Join to full book table q = ( s.query(sq.c.count, Book) .filter(Book.id == sq.c.origin) .order_by(sq.c.count.desc(), Book.id) ) check_paging_orm(q=q)
def test_basic_schemainspect(): a = ColumnInfo("a", "text", str) a2 = ColumnInfo("a", "text", str) b = ColumnInfo("b", "varchar", str, dbtypestr="varchar(10)") b2 = ColumnInfo("b", "text", str, dbtypestr="text", default="'d'::text", not_null=True) assert a == a2 assert a == a assert a != b assert b != b2 alter = b2.alter_table_statements(b, "t") assert alter == [ "alter table t alter column \"b\" set default 'd'::text;", 'alter table t alter column "b" set not null;', 'alter table t alter column "b" set data type text using "b"::text;', ] alter = b.alter_table_statements(b2, "t") assert alter == [ 'alter table t alter column "b" drop default;', 'alter table t alter column "b" drop not null;', 'alter table t alter column "b" set data type varchar(10) using "b"::varchar(10);', ] b.add_column_clause == 'add column "b"' b.drop_column_clause == 'drop column "b"' with temporary_database("sqlite") as dburl: with raises(NotImplementedError): with S(dburl) as s: get_inspector(s)
def test_core2(dburl): with S(dburl, echo=ECHO) as s: sel = select([Book.score]).order_by(Book.id) check_paging_core(sel, s) sel = ( select([Book.score]) .order_by(Author.id - Book.id, Book.id) .where(Author.id == Book.author_id) ) check_paging_core(sel, s) sel = ( select([Book.author_id, func.count()]) .group_by(Book.author_id) .order_by(func.sum(Book.popularity)) ) check_paging_core(sel, s) v = func.sum(func.coalesce(Book.a, 0)) + func.min(Book.b) sel = ( select([Book.author_id, func.count(), v]) .group_by(Book.author_id) .order_by(v) ) check_paging_core(sel, s)