def teardown(): aggregates.manager.reset() close_all_sessions() Base.metadata.drop_all(connection) remove_composite_listeners() connection.close() engine.dispose()
def tables(engine): for m in metadata: m.create_all(engine) yield close_all_sessions() for m in metadata: m.drop_all(engine)
def cope_with_db_failover(): max_attempts = 10 for attempt in range(1, max_attempts + 1): try: standby = db.engine.execute( 'SELECT pg_is_in_recovery()').fetchall()[0][0] if standby: # This is a hot standby, we need to fail over current_app.logger.warning( 'Connection to standby db connected, reconnecting. ' 'Attempt number %s/%s', attempt, max_attempts) current_app.update_db_uri() close_all_sessions() time.sleep(0.2) else: break except OperationalError as err: current_app.logger.warning( 'Database reconnection occurred. This is expected to happen ' 'when there has been a recent failover or DB proxy restart. ' 'Attempt numer %s/%s. Error was: %s', attempt, max_attempts, err, )
async def test_real_btc_wallet_identfier(tmpdir): """ Test the identifier of a Bitcoin wallet """ wallet = BitcoinWallet(tmpdir) assert wallet.get_identifier() == 'BTC' await wallet.shutdown_task_manager() db_session.close_all_sessions()
async def test_real_btc_wallet_name(tmpdir): """ Test the name of a Bitcoin wallet """ wallet = BitcoinWallet(tmpdir) assert wallet.get_name() == 'Bitcoin' await wallet.shutdown_task_manager() db_session.close_all_sessions()
def teardown(): session.execute('DROP TABLE account') session.execute('DROP TYPE money_type') session.commit() close_all_sessions() connection.close() remove_composite_listeners() engine.dispose()
def tear_down(): import fastapi_sqla yield close_all_sessions() # reload fastapi_sqla to clear sqla deferred reflection mapping stored in Base importlib.reload(fastapi_sqla)
def db(): _configuration = ''' db: test_url: postgresql://postgres:postgres@localhost/restfulpy_test administrative_url: postgresql://postgres:postgres@localhost/postgres ''' configure(force=True) settings.merge(_configuration) # Overriding the db uri becase this is a test session, so db.test_uri will # be used instead of the db.uri settings.db.url = settings.db.test_url # Drop the previosely created db if exists. with DBManager(url=settings.db.test_url) as m: m.drop_database() m.create_database() # An engine to create db schema and bind future created sessions engine = create_engine() # A session factory to create and store session to close it on tear down sessions = [] def _connect(*a, expire_on_commit=False, **kw): new_session = session_factory( bind=engine, *a, expire_on_commit=expire_on_commit, **kw ) sessions.append(new_session) return new_session session = _connect(expire_on_commit=True) # Creating database objects setup_schema(session) session.commit() # Closing the session to free the connection for future sessions. session.close() # Preparing and binding the application shared scoped session, due the # some errors when a model trying use the mentioned session internally. init_model(engine) yield _connect # Closing all sessions created by the test writer for s in sessions: s.close() close_all_sessions() engine.dispose() # Dropping the previously created database with DBManager(url=settings.db.test_url) as m: m.drop_database()
def tearDown(self): if self.session.lm.wallets['BTC'].wallet: # Close the database session so the wallet file can be removed del self.session.lm.wallets['BTC'].wallet # Close all bitcoinlib Wallet DB sessions if exists db_session.close_all_sessions() yield super(TestWalletsEndpoint, self).tearDown()
def teardown_method(self, method): search_manager.processed_columns = [] self.session.expunge_all() close_all_sessions() vectorizer.clear() self.drop_tables() self.engine.dispose() remove_listeners(self.Base.metadata)
def db_session(): db.engine = create_engine("sqlite:///:memory:", echo=False) # db.engine = create_engine("mysql://*****:*****@127.0.0.1:33060/qisittest") # db.engine = create_engine("postgresql+psycopg2://qisit:[email protected]:54320/qisittest") db.Session.configure(bind=db.engine) the_session = db.Session() initialize_db(the_session, load_data=False) yield the_session session.close_all_sessions()
def client(): _app = create_app() _app.config['SQLALCHEMY_DATABASE_URI'] = os.getenv( 'TEST_SQLALCHEMY_DATABASE_URI') client = _app.test_client() with _app.app_context(): db.create_all() yield client close_all_sessions() db.drop_all()
def destroy(self) -> None: instance = self.get_instance() if self.app: with self.app.app_context(): sql = text("SELECT 1") instance.db.engine.execute(sql) instance.db.session.remove() close_all_sessions() # massive destruction log.critical("Destroy current SQL data") instance.db.drop_all()
def _reset_database(request, app): db_name = "test_freight" session.close_all_sessions() # 9.1 does not support --if-exists if subprocess.call(f"psql -l | grep '{db_name}'", shell=True) == 0: engine = db.engine engine.connect().close() engine.dispose() subprocess.check_call(f"dropdb {db_name}", shell=True) subprocess.check_call(f"createdb -E utf-8 {db_name}", shell=True) command.upgrade(ALEMBIC_CONFIG, "head") return lambda: reset_database(request, app)
def tearDown(self) -> None: close_all_sessions() if self.mock_bq_tables: # Execute each statement one at a time for resilience. for dataset_id, table_id in self.mock_bq_tables: self._execute_statement( f"DROP TABLE {self._to_postgres_table_name(dataset_id, table_id)}" ) for type_name in self.type_name_generator.all_names_generated(): self._execute_statement(f"DROP TYPE {type_name}") self._execute_statement(_DROP_ARRAY_CONCAT_AGG_FUNC) if self.postgres_engine is not None: self.postgres_engine.dispose() self.postgres_engine = None
def base_app(base_config_app): app = base_config_app db.init_app(app) with app.app_context(): db.drop_all() db.create_all() close_all_sessions() engine = create_engine(app.config['SQLALCHEMY_DATABASE_URI']) connection = engine.connect() app.config['TESTING_CONNECTION'] = connection app.register_blueprint(api_bp, url_prefix='/api') return app
def init_database(request): """Initializes the database """ DB.drop_all() DB.create_all() base_dir = join(abspath(dirname(__file__)), '..') for fixture_file in glob(join(base_dir, 'seed', '*.json')): fixtures = JSONLoader().load(fixture_file) load_fixtures(DB, fixtures) for fixture_file in sorted(glob(join(base_dir, 'seed', 'demo', '*.json'))): fixtures = JSONLoader().load(fixture_file) load_fixtures(DB, fixtures) request.cls.DB = DB yield DB close_all_sessions()
def insertTradeSummary(self, trade): ''' Create a new record from the trade object :params trade: DataFrame that uses the column names defined in SumReqFields ''' sf = self.sf rc = self.rc # tcols = sf.tcols newts = dict() ModelBase.session.rollback() close_all_sessions() ModelBase.connect(new_session=True) session = ModelBase.session tradesum = TradeSum() tradesum.name = trade[sf.name].unique()[0] tradesum.strategy = trade[sf.strat].unique()[0] tradesum.link1 = trade[sf.link1].unique()[0] tradesum.account = trade[sf.acct].unique()[0] tradesum.pnl = trade[sf.pl].unique()[0] tradesum.start = formatTime(trade[sf.start].unique()[0]) tradesum.date = formatDate(trade[sf.date].unique()[0]) tradesum.duration = trade[sf.dur].unique()[0] tradesum.shares = trade[sf.shares].unique()[0] tradesum.mktval = getFloat(trade[sf.mktval].unique()[0]) tradesum.target = getFloat(trade[sf.targ].unique()[0]) tradesum.targdiff = getFloat(trade[sf.targdiff].unique()[0]) tradesum.stoploss = getFloat(trade[sf.stoploss].unique()[0]) tradesum.sldiff = getFloat(trade[sf.sldiff].unique()[0]) tradesum.rr = trade[sf.rr].unique()[0] tradesum.realrr = trade[sf.realrr].unique()[0] tradesum.maxloss = getFloat(trade[sf.maxloss].unique()[0]) tradesum.mstkval = getFloat(trade[sf.mstkval].unique()[0]) tradesum.mstknote = trade[sf.mstknote].unique()[0] tradesum.explain = trade[sf.explain].unique()[0] tradesum.notes = trade[sf.notes].unique()[0] tradesum.clean = '' session.add(tradesum) session.commit() # session.close() return tradesum
def teardown_on_disk_postgresql_database( declarative_base: DeclarativeMeta) -> None: """Clears state in an on-disk postgres database for a given schema, for use once a single test has completed. As an optimization, does not actually drop tables, just clears them. As a best practice, you should call stop_and_clear_on_disk_postgresql_database() once all tests in a test class are complete to actually drop the tables. """ # Ensure all sessions are closed, otherwise the below may hang. close_all_sessions() session = SessionFactory.for_schema_base(declarative_base) try: for table in reversed(declarative_base.metadata.sorted_tables): session.execute(table.delete()) session.commit() except Exception as e: session.rollback() raise e finally: session.close() SQLAlchemyEngineManager.teardown_engine_for_schema(declarative_base)
def destroy_db(self): session.close_all_sessions() self.engine.dispose() self._db_exec('drop database ' + config.get('db')['database'])
def tearDown(self): close_all_sessions() self.drop_schema()
async def tearDown(self): db_session.close_all_sessions() await super().tearDown()
def tearDown(self): close_all_sessions() db.drop_all()
async def tearDown(self): # Close all bitcoinlib Wallet DB sessions if exists db_session.close_all_sessions() await super(TestWalletsEndpoint, self).tearDown()
def tearDownClass(cls): call_finalizers() close_all_sessions() db.drop_all() db.engine.dispose() cls.app_context.pop()
def _tearDown(): cfg.applogger.info(f'Purging database...[!n]') session.close_all_sessions() Base.metadata.drop_all(dbengine) cfg.applogger.info('[OK][!p]')
def teardown_method(self, method): translation_manager.pending_classes = [] close_all_sessions() self.Model.metadata.drop_all(self.connection) self.connection.close() self.engine.dispose()
def shutdown(self): """Shuts down the database engine.""" close_all_sessions() self.engine.dispose() # clear all tracked global variables clear_tracked_globals()
def tearDown(self): # Close all bitcoinlib Wallet DB sessions db_session.close_all_sessions() yield super(TestBtcWallet, self).tearDown()
def postgres_session_factory(postgres_db: engine.Engine): yield sessionmaker( bind=postgres_db, autoflush=False ) # deliver to next fixture close_all_sessions()