def configure_db(app): app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True if not app.config['TESTING']: cfg = Config.getInstance() db_uri = cfg.getSQLAlchemyDatabaseURI() if db_uri is None: raise Exception( "No proper SQLAlchemy store has been configured. Please edit your indico.conf" ) app.config['SQLALCHEMY_DATABASE_URI'] = db_uri app.config['SQLALCHEMY_RECORD_QUERIES'] = False app.config['SQLALCHEMY_POOL_SIZE'] = cfg.getSQLAlchemyPoolSize() app.config['SQLALCHEMY_POOL_TIMEOUT'] = cfg.getSQLAlchemyPoolTimeout() app.config['SQLALCHEMY_POOL_RECYCLE'] = cfg.getSQLAlchemyPoolRecycle() app.config['SQLALCHEMY_MAX_OVERFLOW'] = cfg.getSQLAlchemyMaxOverflow() import_all_models() db.init_app(app) if not app.config['TESTING']: apply_db_loggers(app) plugins_loaded.connect(lambda sender: configure_mappers(), app, weak=False) models_committed.connect(on_models_committed, app)
def main(): import_all_models() for cls, rels in sorted(_find_backrefs().iteritems(), key=lambda x: x[0].__name__): path = _get_source_file(cls) with open(path, "r") as f: source = [line.rstrip("\n") for line in f] new_source = [] in_class = in_backrefs = backrefs_written = False for i, line in enumerate(source): if in_backrefs: if not backrefs_written: for backref_name, target, target_rel_name in sorted(rels, key=itemgetter(0)): new_source.append(" # - {} ({}.{})".format(backref_name, target, target_rel_name)) backrefs_written = True if not line.startswith(" # - "): in_backrefs = False else: continue elif in_class: if line == " # relationship backrefs:": in_backrefs = True elif line and not line.startswith(" " * 4): # end of the indented class block in_class = False else: if line.startswith("class {}(".format(cls.__name__)): in_class = True new_source.append(line) if not backrefs_written: print cformat("%{yellow}Class {} has no comment for backref information").format(cls.__name__) if source != new_source: print cformat("%{green!}Updating backref info for {} in {}").format(cls.__name__, path) with open(path, "w") as f: f.writelines(line + "\n" for line in new_source)
def configure_db(app): if not app.config['TESTING']: cfg = Config.getInstance() db_uri = cfg.getSQLAlchemyDatabaseURI() if db_uri is None: raise Exception( "No proper SQLAlchemy store has been configured. Please edit your indico.conf" ) app.config['SQLALCHEMY_DATABASE_URI'] = db_uri # DB options app.config['SQLALCHEMY_ECHO'] = cfg.getSQLAlchemyEcho() app.config[ 'SQLALCHEMY_RECORD_QUERIES'] = cfg.getSQLAlchemyRecordQueries() app.config['SQLALCHEMY_POOL_SIZE'] = cfg.getSQLAlchemyPoolSize() app.config['SQLALCHEMY_POOL_TIMEOUT'] = cfg.getSQLAlchemyPoolTimeout() app.config['SQLALCHEMY_POOL_RECYCLE'] = cfg.getSQLAlchemyPoolRecycle() app.config['SQLALCHEMY_MAX_OVERFLOW'] = cfg.getSQLAlchemyMaxOverflow() import_all_models() db.init_app(app) if not app.config['TESTING']: apply_db_loggers(app.debug) configure_mappers() # Make sure all backrefs are set models_committed.connect(on_models_committed, app)
def configure_db(app): if not app.config['TESTING']: cfg = Config.getInstance() db_uri = cfg.getSQLAlchemyDatabaseURI() if db_uri is None: raise Exception("No proper SQLAlchemy store has been configured. Please edit your indico.conf") app.config['SQLALCHEMY_DATABASE_URI'] = db_uri # DB options app.config['SQLALCHEMY_ECHO'] = cfg.getSQLAlchemyEcho() app.config['SQLALCHEMY_RECORD_QUERIES'] = cfg.getSQLAlchemyRecordQueries() app.config['SQLALCHEMY_POOL_SIZE'] = cfg.getSQLAlchemyPoolSize() app.config['SQLALCHEMY_POOL_TIMEOUT'] = cfg.getSQLAlchemyPoolTimeout() app.config['SQLALCHEMY_POOL_RECYCLE'] = cfg.getSQLAlchemyPoolRecycle() app.config['SQLALCHEMY_MAX_OVERFLOW'] = cfg.getSQLAlchemyMaxOverflow() import_all_models() db.init_app(app) if not app.config['TESTING']: apply_db_loggers(app) plugins_loaded.connect(lambda sender: configure_mappers(), app, weak=False) models_committed.connect(on_models_committed, app)
def configure_db(app): app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True if app.config['TESTING']: # tests do not actually use sqlite but run a postgres instance and # reconfigure flask-sqlalchemy to use that database. by setting # a dummy uri explicitly instead of letting flask-sqlalchemy do # the exact same thing we avoid a warning when running tests. app.config.setdefault('SQLALCHEMY_DATABASE_URI', 'sqlite:///:memory:') else: if config.SQLALCHEMY_DATABASE_URI is None: raise Exception("No proper SQLAlchemy store has been configured. Please edit your indico.conf") app.config['SQLALCHEMY_DATABASE_URI'] = config.SQLALCHEMY_DATABASE_URI app.config['SQLALCHEMY_RECORD_QUERIES'] = False app.config['SQLALCHEMY_POOL_SIZE'] = config.SQLALCHEMY_POOL_SIZE app.config['SQLALCHEMY_POOL_TIMEOUT'] = config.SQLALCHEMY_POOL_TIMEOUT app.config['SQLALCHEMY_POOL_RECYCLE'] = config.SQLALCHEMY_POOL_RECYCLE app.config['SQLALCHEMY_MAX_OVERFLOW'] = config.SQLALCHEMY_MAX_OVERFLOW import_all_models() db.init_app(app) if not app.config['TESTING']: apply_db_loggers(app) plugins_loaded.connect(lambda sender: configure_mappers(), app, weak=False) models_committed.connect(on_models_committed, app)
def configure_db(app): app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True if app.config['TESTING']: # tests do not actually use sqlite but run a postgres instance and # reconfigure flask-sqlalchemy to use that database. by setting # a dummy uri explicitly instead of letting flask-sqlalchemy do # the exact same thing we avoid a warning when running tests. app.config.setdefault('SQLALCHEMY_DATABASE_URI', 'sqlite:///:memory:') else: if config.SQLALCHEMY_DATABASE_URI is None: raise Exception( "No proper SQLAlchemy store has been configured. Please edit your indico.conf" ) app.config['SQLALCHEMY_DATABASE_URI'] = config.SQLALCHEMY_DATABASE_URI app.config['SQLALCHEMY_RECORD_QUERIES'] = False app.config['SQLALCHEMY_POOL_SIZE'] = config.SQLALCHEMY_POOL_SIZE app.config['SQLALCHEMY_POOL_TIMEOUT'] = config.SQLALCHEMY_POOL_TIMEOUT app.config['SQLALCHEMY_POOL_RECYCLE'] = config.SQLALCHEMY_POOL_RECYCLE app.config['SQLALCHEMY_MAX_OVERFLOW'] = config.SQLALCHEMY_MAX_OVERFLOW import_all_models() db.init_app(app) if not app.config['TESTING']: apply_db_loggers(app) plugins_loaded.connect(lambda sender: configure_mappers(), app, weak=False) models_committed.connect(on_models_committed, app)
def setup(self): update_session_options(db) # get rid of the zope transaction extension self.app = app = IndicoFlask('indico_zodbimport') app.config['PLUGINENGINE_NAMESPACE'] = 'indico.plugins' app.config['PLUGINENGINE_PLUGINS'] = self.plugins app.config['SQLALCHEMY_DATABASE_URI'] = self.sqlalchemy_uri plugin_engine.init_app(app) if not plugin_engine.load_plugins(app): print cformat( '%{red!}Could not load some plugins: {}%{reset}').format( ', '.join(plugin_engine.get_failed_plugins(app))) sys.exit(1) db.init_app(app) import_all_models() alembic_migrate.init_app( app, db, os.path.join(app.root_path, '..', 'migrations')) self.connect_zodb() try: self.tz = pytz.timezone( getattr(self.zodb_root['MaKaCInfo']['main'], '_timezone', 'UTC')) except KeyError: self.tz = pytz.utc with app.app_context(): if not self.pre_check(): sys.exit(1) if self.destructive: print cformat('%{yellow!}*** DANGER') print cformat( '%{yellow!}***%{reset} ' '%{red!}ALL DATA%{reset} in your database %{yellow!}{!r}%{reset} will be ' '%{red!}PERMANENTLY ERASED%{reset}!').format(db.engine.url) if raw_input( cformat( '%{yellow!}***%{reset} To confirm this, enter %{yellow!}YES%{reset}: ' )) != 'YES': print 'Aborting' sys.exit(1) delete_all_tables(db) stamp() db.create_all() if self.has_data(): # Usually there's no good reason to migrate with data in the DB. However, during development one might # comment out some migration tasks and run the migration anyway. print cformat('%{yellow!}*** WARNING') print cformat( '%{yellow!}***%{reset} Your database is not empty, migration will most likely fail!' ) if raw_input( cformat( '%{yellow!}***%{reset} To confirm this, enter %{yellow!}YES%{reset}: ' )) != 'YES': print 'Aborting' sys.exit(1)
def setup(self): update_session_options(db) # get rid of the zope transaction extension self.app = app = IndicoFlask("indico_zodbimport") app.config["PLUGINENGINE_NAMESPACE"] = "indico.plugins" app.config["PLUGINENGINE_PLUGINS"] = self.plugins app.config["SQLALCHEMY_DATABASE_URI"] = self.sqlalchemy_uri app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = True plugin_engine.init_app(app) if not plugin_engine.load_plugins(app): print( cformat("%{red!}Could not load some plugins: {}%{reset}").format( ", ".join(plugin_engine.get_failed_plugins(app)) ) ) sys.exit(1) db.init_app(app) import_all_models() alembic_migrate.init_app(app, db, os.path.join(app.root_path, "..", "migrations")) self.connect_zodb() try: self.tz = pytz.timezone(getattr(self.zodb_root["MaKaCInfo"]["main"], "_timezone", "UTC")) except KeyError: self.tz = pytz.utc with app.app_context(): if not self.pre_check(): sys.exit(1) if self.destructive: print(cformat("%{yellow!}*** DANGER")) print( cformat( "%{yellow!}***%{reset} " "%{red!}ALL DATA%{reset} in your database %{yellow!}{!r}%{reset} will be " "%{red!}PERMANENTLY ERASED%{reset}!" ).format(db.engine.url) ) if raw_input(cformat("%{yellow!}***%{reset} To confirm this, enter %{yellow!}YES%{reset}: ")) != "YES": print("Aborting") sys.exit(1) delete_all_tables(db) stamp() db.create_all() if self.has_data(): # Usually there's no good reason to migrate with data in the DB. However, during development one might # comment out some migration tasks and run the migration anyway. print(cformat("%{yellow!}*** WARNING")) print( cformat( "%{yellow!}***%{reset} Your database is not empty, migration may fail or add duplicate " "data!" ) ) if raw_input(cformat("%{yellow!}***%{reset} To confirm this, enter %{yellow!}YES%{reset}: ")) != "YES": print("Aborting") sys.exit(1)
def list_plugins(): """Lists the available indico plugins.""" import_all_models() table_data = [['Name', 'Title']] for ep in sorted(iter_entry_points('indico.plugins'), key=attrgetter('name')): plugin = ep.load() table_data.append([ep.name, plugin.title]) table = AsciiTable(table_data, cformat('%{white!}Available Plugins%{reset}')) click.echo(table.table)
def list_plugins(): """List the available Indico plugins.""" import_all_models() table_data = [['Name', 'Title']] for ep in sorted(iter_entry_points('indico.plugins'), key=attrgetter('name')): plugin = ep.load() table_data.append([ep.name, plugin.title]) table = AsciiTable(table_data, cformat('%{white!}Available Plugins%{reset}')) click.echo(table.table)
def setup(logger, zodb_root, sqlalchemy_uri, dblog=False, restore=False): app = IndicoFlask('indico_migrate') app.config['PLUGINENGINE_NAMESPACE'] = 'indico.plugins' app.config['SQLALCHEMY_DATABASE_URI'] = sqlalchemy_uri app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True _monkeypatch_config() plugin_engine.init_app(app) if not plugin_engine.load_plugins(app): print( cformat('%[red!]Could not load some plugins: {}%[reset]').format( ', '.join(plugin_engine.get_failed_plugins(app)))) sys.exit(1) db.init_app(app) if dblog: app.debug = True apply_db_loggers(app, force=True) db_logger = Logger.get('_db') db_logger.level = logging.DEBUG db_logger.propagate = False db_logger.addHandler(SocketHandler('127.0.0.1', 9020)) # avoid "no handlers registered" warnings logging.root.addHandler(logging.NullHandler()) import_all_models() configure_mappers() alembic_migrate.init_app(app, db, os.path.join(app.root_path, 'migrations')) try: tz = pytz.timezone( getattr(zodb_root['MaKaCInfo']['main'], '_timezone', 'UTC')) except KeyError: tz = pytz.utc with app.app_context(): if not restore: all_tables = sum(get_all_tables(db).values(), []) if all_tables: if db_has_data(): logger.fatal_error( 'Your database is not empty!\n' 'If you want to reset it, please drop and recreate it first.' ) else: # the DB is empty, prepare DB tables # prevent alembic from messing with the logging config tmp = logging.config.fileConfig logging.config.fileConfig = lambda fn: None prepare_db(empty=True, root_path=get_root_path('indico'), verbose=False) logging.config.fileConfig = tmp _create_oauth_apps() return app, tz
def _import_models(self): old_models = set(db.Model._decl_class_registry.items()) import_all_models(self.package_name) added_models = set(db.Model._decl_class_registry.items()) - old_models # Ensure that only plugin schemas have been touched. It would be nice if we could actually # restrict a plugin to plugin_PLUGNNAME but since we load all models from the plugin's package # which could contain more than one plugin this is not easily possible. for name, model in added_models: schema = model.__table__.schema if not schema.startswith('plugin_'): raise Exception("Plugin '{}' added a model which is not in a plugin schema ('{}' in '{}')" .format(self.name, name, schema))
def _import_models(self): old_models = set(db.Model._decl_class_registry.items()) import_all_models(self.package_name) added_models = set(db.Model._decl_class_registry.items()) - old_models # Ensure that only plugin schemas have been touched. It would be nice if we could actually # restrict a plugin to plugin_PLUGNNAME but since we load all models from the plugin's package # which could contain more than one plugin this is not easily possible. for name, model in added_models: schema = model.__table__.schema if not schema.startswith('plugin_'): raise Exception("Plugin '{}' added a model which is not in a plugin schema ('{}' in '{}')" .format(self.name, name, schema))
def setup(self): self.app = app = IndicoFlask('indico_zodbimport') app.config['PLUGINENGINE_NAMESPACE'] = 'indico.plugins' app.config['PLUGINENGINE_PLUGINS'] = self.plugins app.config['SQLALCHEMY_DATABASE_URI'] = self.sqlalchemy_uri app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True plugin_engine.init_app(app) if not plugin_engine.load_plugins(app): print( cformat( '%{red!}Could not load some plugins: {}%{reset}').format( ', '.join(plugin_engine.get_failed_plugins(app)))) sys.exit(1) db.init_app(app) setup_request_stats(app) if self.dblog: app.debug = True apply_db_loggers(app) import_all_models() alembic_migrate.init_app(app, db, os.path.join(app.root_path, 'migrations')) self.connect_zodb() try: self.tz = pytz.timezone( getattr(self.zodb_root['MaKaCInfo']['main'], '_timezone', 'UTC')) except KeyError: self.tz = pytz.utc with app.app_context(): request_stats_request_started() if not self.pre_check(): sys.exit(1) if self.has_data(): # Usually there's no good reason to migrate with data in the DB. However, during development one might # comment out some migration tasks and run the migration anyway. print(cformat('%{yellow!}*** WARNING')) print( cformat( '%{yellow!}***%{reset} Your database is not empty, migration may fail or add duplicate ' 'data!')) if raw_input( cformat( '%{yellow!}***%{reset} To confirm this, enter %{yellow!}YES%{reset}: ' )) != 'YES': print('Aborting') sys.exit(1)
def _import_models(self): old_models = get_all_models() import_all_models(self.package_name) added_models = get_all_models() - old_models # Ensure that only plugin schemas have been touched. It would be nice if we could actually # restrict a plugin to plugin_PLUGNNAME but since we load all models from the plugin's package # which could contain more than one plugin this is not easily possible. for model in added_models: schema = model.__table__.schema # Allow models with non-plugin schema if they specify `polymorphic_identity` without a dedicated table if ('polymorphic_identity' in getattr(model, '__mapper_args__', ()) and '__tablename__' not in model.__dict__): continue if not schema.startswith('plugin_'): raise Exception("Plugin '{}' added a model which is not in a plugin schema ('{}' in '{}')" .format(self.name, model.__name__, schema))
def setup(self): update_session_options(db) # get rid of the zope transaction extension self.app = app = IndicoFlask('indico_zodbimport') app.config['PLUGINENGINE_NAMESPACE'] = 'indico.plugins' app.config['PLUGINENGINE_PLUGINS'] = self.plugins app.config['SQLALCHEMY_DATABASE_URI'] = self.sqlalchemy_uri plugin_engine.init_app(app) if not plugin_engine.load_plugins(app): print(cformat('%{red!}Could not load some plugins: {}%{reset}').format( ', '.join(plugin_engine.get_failed_plugins(app)))) sys.exit(1) db.init_app(app) import_all_models() alembic_migrate.init_app(app, db, os.path.join(app.root_path, '..', 'migrations')) self.connect_zodb() try: self.tz = pytz.timezone(getattr(self.zodb_root['MaKaCInfo']['main'], '_timezone', 'UTC')) except KeyError: self.tz = pytz.utc with app.app_context(): if not self.pre_check(): sys.exit(1) if self.destructive: print(cformat('%{yellow!}*** DANGER')) print(cformat('%{yellow!}***%{reset} ' '%{red!}ALL DATA%{reset} in your database %{yellow!}{!r}%{reset} will be ' '%{red!}PERMANENTLY ERASED%{reset}!').format(db.engine.url)) if raw_input(cformat('%{yellow!}***%{reset} To confirm this, enter %{yellow!}YES%{reset}: ')) != 'YES': print('Aborting') sys.exit(1) delete_all_tables(db) stamp() db.create_all() if self.has_data(): # Usually there's no good reason to migrate with data in the DB. However, during development one might # comment out some migration tasks and run the migration anyway. print(cformat('%{yellow!}*** WARNING')) print(cformat('%{yellow!}***%{reset} Your database is not empty, migration may fail or add duplicate ' 'data!')) if raw_input(cformat('%{yellow!}***%{reset} To confirm this, enter %{yellow!}YES%{reset}: ')) != 'YES': print('Aborting') sys.exit(1)
def main(ci): import_all_models() has_missing = has_updates = False for cls, rels in sorted(_find_backrefs().items(), key=lambda x: x[0].__name__): path = _get_source_file(cls) with open(path) as f: source = [line.rstrip('\n') for line in f] new_source = [] in_class = in_backrefs = backrefs_written = False for i, line in enumerate(source): if in_backrefs: if not backrefs_written: _write_backrefs(rels, new_source) backrefs_written = True if not line.startswith(' # - '): in_backrefs = False else: continue elif in_class: if line == ' # relationship backrefs:': in_backrefs = True elif line and not line.startswith(' ' * 4): # end of the indented class block in_class = False else: if line.startswith(f'class {cls.__name__}('): in_class = True new_source.append(line) if in_backrefs and not backrefs_written: _write_backrefs(rels, new_source) if not backrefs_written: print( cformat( '%{yellow}Class {} has no comment for backref information' ).format(cls.__name__)) has_missing = True if source != new_source: print( cformat('%{green!}Updating backref info for {} in {}').format( cls.__name__, path)) has_updates = True with open(path, 'w') as f: f.writelines(line + '\n' for line in new_source) sys.exit(1 if (has_missing or (ci and has_updates)) else 0)
def setup(logger, zodb_root, sqlalchemy_uri, dblog=False, restore=False): app = IndicoFlask('indico_migrate') app.config['PLUGINENGINE_NAMESPACE'] = 'indico.plugins' app.config['SQLALCHEMY_DATABASE_URI'] = sqlalchemy_uri app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True _monkeypatch_config() plugin_engine.init_app(app) if not plugin_engine.load_plugins(app): print( cformat('%[red!]Could not load some plugins: {}%[reset]').format( ', '.join(plugin_engine.get_failed_plugins(app)))) sys.exit(1) db.init_app(app) if dblog: app.debug = True apply_db_loggers(app) import_all_models() configure_mappers() alembic_migrate.init_app(app, db, os.path.join(app.root_path, 'migrations')) try: tz = pytz.timezone( getattr(zodb_root['MaKaCInfo']['main'], '_timezone', 'UTC')) except KeyError: tz = pytz.utc with app.app_context(): if not restore: all_tables = sum(get_all_tables(db).values(), []) if all_tables: if db_has_data(): logger.fatal_error( 'Your database is not empty!\n' 'If you want to reset it, please drop and recreate it first.' ) else: # the DB is empty, prepare DB tables prepare_db(empty=True, root_path=get_root_path('indico'), verbose=False) return app, tz
def main(): import_all_models() for cls, rels in sorted(_find_backrefs().iteritems(), key=lambda x: x[0].__name__): path = _get_source_file(cls) with open(path, 'r') as f: source = [line.rstrip('\n') for line in f] new_source = [] in_class = in_backrefs = backrefs_written = False for i, line in enumerate(source): if in_backrefs: if not backrefs_written: for backref_name, target, target_rel_name in sorted( rels, key=itemgetter(0)): new_source.append(' # - {} ({}.{})'.format( backref_name, target, target_rel_name)) backrefs_written = True if not line.startswith(' # - '): in_backrefs = False else: continue elif in_class: if line == ' # relationship backrefs:': in_backrefs = True elif line and not line.startswith(' ' * 4): # end of the indented class block in_class = False else: if line.startswith('class {}('.format(cls.__name__)): in_class = True new_source.append(line) if not backrefs_written: print cformat( '%{yellow}Class {} has no comment for backref information' ).format(cls.__name__) if source != new_source: print cformat( '%{green!}Updating backref info for {} in {}').format( cls.__name__, path) with open(path, 'w') as f: f.writelines(line + '\n' for line in new_source)
def main(ci): import_all_models() has_missing = has_updates = False for cls, rels in sorted(_find_backrefs().iteritems(), key=lambda x: x[0].__name__): path = _get_source_file(cls) with open(path, 'r') as f: source = [line.rstrip('\n') for line in f] new_source = [] in_class = in_backrefs = backrefs_written = False for i, line in enumerate(source): if in_backrefs: if not backrefs_written: _write_backrefs(rels, new_source) backrefs_written = True if not line.startswith(' # - '): in_backrefs = False else: continue elif in_class: if line == ' # relationship backrefs:': in_backrefs = True elif line and not line.startswith(' ' * 4): # end of the indented class block in_class = False else: if line.startswith('class {}('.format(cls.__name__)): in_class = True new_source.append(line) if in_backrefs and not backrefs_written: _write_backrefs(rels, new_source) if not backrefs_written: print cformat('%{yellow}Class {} has no comment for backref information').format(cls.__name__) has_missing = True if source != new_source: print cformat('%{green!}Updating backref info for {} in {}').format(cls.__name__, path) has_updates = True with open(path, 'w') as f: f.writelines(line + '\n' for line in new_source) sys.exit(1 if (has_missing or (ci and has_updates)) else 0)
def configure_db(app): app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True if not app.config['TESTING']: if config.SQLALCHEMY_DATABASE_URI is None: raise Exception( "No proper SQLAlchemy store has been configured. Please edit your indico.conf" ) app.config['SQLALCHEMY_DATABASE_URI'] = config.SQLALCHEMY_DATABASE_URI app.config['SQLALCHEMY_RECORD_QUERIES'] = False app.config['SQLALCHEMY_POOL_SIZE'] = config.SQLALCHEMY_POOL_SIZE app.config['SQLALCHEMY_POOL_TIMEOUT'] = config.SQLALCHEMY_POOL_TIMEOUT app.config['SQLALCHEMY_POOL_RECYCLE'] = config.SQLALCHEMY_POOL_RECYCLE app.config['SQLALCHEMY_MAX_OVERFLOW'] = config.SQLALCHEMY_MAX_OVERFLOW import_all_models() db.init_app(app) if not app.config['TESTING']: apply_db_loggers(app) plugins_loaded.connect(lambda sender: configure_mappers(), app, weak=False) models_committed.connect(on_models_committed, app)
import logging.config from alembic import context from flask import current_app from sqlalchemy import engine_from_config, pool from indico.core.db.sqlalchemy.util.models import import_all_models # Ensure all our models are imported import_all_models() # this is the Alembic Config object, which provides # access to the values within the .ini file in use. config = context.config logging.config.fileConfig(config.config_file_name) config.set_main_option('sqlalchemy.url', current_app.config.get('SQLALCHEMY_DATABASE_URI')) target_metadata = current_app.extensions['migrate'].db.metadata def _include_symbol(tablename, schema): # We ignore plugin tables in migrations if schema and schema.startswith('plugin_'): return False return tablename != 'alembic_version' and not tablename.startswith('alembic_version_') def _render_item(type_, obj, autogen_context): if hasattr(obj, 'info') and obj.info.get('alembic_dont_render'): return None
from alembic import context from flask import current_app from flask_pluginengine import current_plugin from sqlalchemy import engine_from_config, pool from indico.core.db import db from indico.core.db.sqlalchemy.util.session import update_session_options from indico.core.db.sqlalchemy.util.models import import_all_models if not current_plugin: raise Exception('Cannot run plugin db migration outside plugin context') # Ensure all our models are imported - XXX is this necessary? if we call configure_db it's not! import_all_models() import_all_models(current_plugin.package_name) # this is the Alembic Config object, which provides # access to the values within the .ini file in use. config = context.config logging.config.fileConfig(config.config_file_name) config.set_main_option('sqlalchemy.url', current_app.config.get('SQLALCHEMY_DATABASE_URI')) target_metadata = current_app.extensions['migrate'].db.metadata # Get rid of the ZODB transaction manager update_session_options(db) plugin_schema = 'plugin_{}'.format(current_plugin.name) version_table = 'alembic_version_plugin_{}'.format(current_plugin.name)
# Copyright (C) 2002 - 2020 CERN # # Indico is free software; you can redistribute it and/or # modify it under the terms of the MIT License; see the # LICENSE file for more details. import logging.config from alembic import context from flask import current_app from sqlalchemy import engine_from_config, pool from indico.core.db.sqlalchemy.util.models import import_all_models # Ensure all our models are imported import_all_models() # this is the Alembic Config object, which provides # access to the values within the .ini file in use. config = context.config logging.config.fileConfig(config.config_file_name) config.set_main_option('sqlalchemy.url', current_app.config.get('SQLALCHEMY_DATABASE_URI')) target_metadata = current_app.extensions['migrate'].db.metadata def _include_symbol(tablename, schema): # We ignore plugin tables in migrations if schema and schema.startswith('plugin_'): return False
# LICENSE file for more details. import logging.config from alembic import context from flask import current_app from flask_pluginengine import current_plugin from sqlalchemy import engine_from_config, pool from indico.core.db.sqlalchemy.util.models import import_all_models if not current_plugin: raise Exception('Cannot run plugin db migration outside plugin context') # Ensure all our models are imported - XXX is this necessary? if we call configure_db it's not! import_all_models() import_all_models(current_plugin.package_name) # this is the Alembic Config object, which provides # access to the values within the .ini file in use. config = context.config logging.config.fileConfig(config.config_file_name) config.set_main_option('sqlalchemy.url', current_app.config.get('SQLALCHEMY_DATABASE_URI')) target_metadata = current_app.extensions['migrate'].db.metadata plugin_schema = 'plugin_{}'.format(current_plugin.name) version_table = 'alembic_version_plugin_{}'.format(current_plugin.name)