def __init__(self, file, repository, echoresults): #for backward compatibelity if re.match('^\w+://', file) == None: file = 'sqlite:///'+file self.version = 2 self.dbfile = file self.repository = repository #migrate code try: dbversion = api.db_version(file, self.repository) #print dbversion except : dbversion = 0 api.version_control(file, self.repository, dbversion) if dbversion < self.version: api.upgrade(file, self.repository, self.version) elif dbversion > self.version: api.downgrade(file, self.repository, self.version) engine = create_engine(file , echo=False)#edit by hassan : echoresults to True metadata = Base.metadata metadata.create_all(engine) Session = sessionmaker(engine) self.session = Session()
def _setup(config): # disable delayed execution # config['adhocracy.amqp.host'] = None # FIXME: still do this with rq instead of rabbitmq # NOTE: this is called from tests so it may have side effects # Create the tables if they don't already exist url = config.get('sqlalchemy.url') migrate_repo = os.path.join(os.path.dirname(__file__), 'migration') repo_version = migrateapi.version(migrate_repo) if config.get('adhocracy.setup.drop', "OH_NOES") == "KILL_EM_ALL": meta.data.drop_all(bind=meta.engine) meta.engine.execute("DROP TABLE IF EXISTS migrate_version") try: db_version = migrateapi.db_version(url, migrate_repo) if db_version < repo_version: migrateapi.upgrade(url, migrate_repo) initial_setup = False except DatabaseNotControlledError: meta.data.create_all(bind=meta.engine) migrateapi.version_control(url, migrate_repo, version=repo_version) initial_setup = True install.setup_entities(config, initial_setup)
def initialize_startup(): """ Force DB tables create, in case no data is already found.""" is_db_empty = False session = SA_SESSIONMAKER() inspector = reflection.Inspector.from_engine(session.connection()) if len(inspector.get_table_names()) < 1: LOGGER.debug("Database access exception, maybe DB is empty") is_db_empty = True session.close() if is_db_empty: LOGGER.info("Initializing Database") if os.path.exists(cfg.DB_VERSIONING_REPO): shutil.rmtree(cfg.DB_VERSIONING_REPO) migratesqlapi.create(cfg.DB_VERSIONING_REPO, os.path.split(cfg.DB_VERSIONING_REPO)[1]) _update_sql_scripts() migratesqlapi.version_control(cfg.DB_URL, cfg.DB_VERSIONING_REPO, version=cfg.DB_CURRENT_VERSION) session = SA_SESSIONMAKER() model.Base.metadata.create_all(bind=session.connection()) session.commit() session.close() LOGGER.info("Database Default Tables created successfully!") else: _update_sql_scripts() migratesqlapi.upgrade(cfg.DB_URL, cfg.DB_VERSIONING_REPO, version=cfg.DB_CURRENT_VERSION) LOGGER.info("Database already has some data, will not be re-created!") return is_db_empty
def create_database(): db.create_all() if not os.path.exists(c.SQLALCHEMY_MIGRATE_REPO): api.create(c.SQLALCHEMY_MIGRATE_REPO, 'database repository') api.version_control(c.SQLALCHEMY_DATABASE_URI, c.SQLALCHEMY_MIGRATE_REPO) else: api.version_control(c.SQLALCHEMY_DATABASE_URI, c.SQLALCHEMY_MIGRATE_REPO, api.version(c.SQLALCHEMY_MIGRATE_REPO))
def run(self): db.create_all() if not os.path.exists(SQLALCHEMY_MIGRATE_REPO): api.create(SQLALCHEMY_MIGRATE_REPO, 'database repository') api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO) else: api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO, api.version(SQLALCHEMY_MIGRATE_REPO))
def create_db(): Base.metadata.create_all(engine) if not path.exists(SQLALCHEMY_MIGRATE_REPO): api.create(SQLALCHEMY_MIGRATE_REPO, 'database repository') api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO) else: api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO, api.version(SQLALCHEMY_MIGRATE_REPO)) M1A = Module(name=u'M1A', slot=1, gpio=8, io_type='input', rpull=False, status=False, device_id='') M1B = Module(name=u'M1B', slot=1, gpio=7, io_type='input', rpull=False, status=False, device_id='') M1C = Module(name=u'M1C', slot=1, gpio=11, io_type='input', rpull=False, status=False, device_id='') M2A = Module(name=u'M2A', slot=2, gpio=9, io_type='input', rpull=False, status=False, device_id='') M2B = Module(name=u'M2B', slot=2, gpio=10, io_type='input', rpull=False, status=False, device_id='') M2C = Module(name=u'M2C', slot=2, gpio=5, io_type='input', rpull=False, status=False, device_id='') M3A = Module(name=u'M3A', slot=3, gpio=6, io_type='input', rpull=False, status=False, device_id='') M3B = Module(name=u'M3B', slot=3, gpio=12, io_type='input', rpull=False, status=False, device_id='') M3C = Module(name=u'M3C', slot=3, gpio=13, io_type='input', rpull=False, status=False, device_id='') M4A = Module(name=u'M4A', slot=4, gpio=0, io_type='input', rpull=False, status=False, device_id='') M4B = Module(name=u'M4B', slot=4, gpio=1, io_type='input', rpull=False, status=False, device_id='') M4C = Module(name=u'M4C', slot=4, gpio=16, io_type='input', rpull=False, status=False, device_id='') M5A = Module(name=u'M5A', slot=5, gpio=17, io_type='input', rpull=False, status=False, device_id='') M5B = Module(name=u'M5B', slot=5, gpio=18, io_type='input', rpull=False, status=False, device_id='') M5C = Module(name=u'M5C', slot=5, gpio=19, io_type='input', rpull=False, status=False, device_id='') M6A = Module(name=u'M6A', slot=6, gpio=20, io_type='input', rpull=False, status=False, device_id='') M6B = Module(name=u'M6B', slot=6, gpio=21, io_type='input', rpull=False, status=False, device_id='') M6C = Module(name=u'M6C', slot=6, gpio=22, io_type='input', rpull=False, status=False, device_id='') M7A = Module(name=u'M7A', slot=7, gpio=23, io_type='input', rpull=False, status=False, device_id='') M7B = Module(name=u'M7B', slot=7, gpio=24, io_type='input', rpull=False, status=False, device_id='') M7C = Module(name=u'M7C', slot=7, gpio=25, io_type='input', rpull=False, status=False, device_id='') modules = [M1A, M1B, M1C, M2A, M2B, M2C, M3A, M3B, M3C, M4A, M4B, M4C, M5A, M5B, M5C, M6A, M6B, M6C, M7A, M7B, M7C] for m in modules: session.add(m) session.commit()
def db_sync(version=None): """Place a database under migration control and perform an upgrade.""" try: versioning_api.version_control( CFG.db.sql_connection, get_migrate_repo_path()) except versioning_exceptions.DatabaseAlreadyControlledError, e: pass
def _init_database(self, url): LOG.debug('Building Engine') engine = sqlalchemy.create_engine(url) LOG.debug('Initializing database') versioning_api.version_control(engine, repository=self.REPOSITORY) return engine
def createdb(): """ Creates a database with all of the tables defined in the SQLAlchemy models. Creates and initializes an SQLAlchemy-migrate repository if none exists. """ # Create New DB Reflecting SQLAlchemy Data Models db.create_all(app=app) admin = User('admin', '/home/admin', 'password') db.session.add(admin) db.session.commit() # Create SQLAlchemy-migrate Versioning Repository If Absent if not os.path.exists(app.config['SQLALCHEMY_MIGRATE_REPO']): api.create(app.config['SQLALCHEMY_MIGRATE_REPO'], 'database repository') api.version_control(app.config['SQLALCHEMY_DATABASE_URI'], app.config['SQLALCHEMY_MIGRATE_REPO']) print "SQLAlchemy-migrate Versioning Repository Created in: " +\ app.config['SQLALCHEMY_MIGRATE_REPO'] else: api.version_control(app.config['SQLALCHEMY_DATABASE_URI'], app.config['SQLALCHEMY_MIGRATE_REPO'], api.version( app.config['SQLALCHEMY_MIGRATE_REPO'])) print "Database created in: " + app.config['SQLALCHEMY_DATABASE_URI']
def db_sync(): repo_path = os.path.abspath(os.path.dirname(migrate_repo.__file__)) try: versioning_api.upgrade(CONF.database.connection, repo_path) except versioning_exceptions.DatabaseNotControlledError: versioning_api.version_control(CONF.database.connection, repo_path) versioning_api.upgrade(CONF.database.connection, repo_path)
def _walk_versions(self, engine=None): """Walk through and test the migration scripts Determine latest version script from the repo, then upgrade from 1 through to the latest, then downgrade from the latest back to 1, with no data in the databases. This just checks that the schema itself upgrades and downgrades successfully. """ # Place the database under version control migration_api.version_control(engine, self.REPOSITORY, self.INIT_VERSION) assert_equal(self.INIT_VERSION, migration_api.db_version(engine, self.REPOSITORY)) LOG.debug('Latest version is %s' % self.REPOSITORY.latest) versions = range(self.INIT_VERSION + 1, self.REPOSITORY.latest + 1) # Snake walk from version 1 to the latest, testing the upgrade paths. # upgrade -> downgrade -> upgrade for version in versions: self._migrate_up(engine, version) self._migrate_down(engine, version - 1) self._migrate_up(engine, version) # Now snake walk back down to version 1 from the latest, testing the # downgrade paths. # downgrade -> upgrade -> downgrade for version in reversed(versions): self._migrate_down(engine, version - 1) self._migrate_up(engine, version) self._migrate_down(engine, version - 1)
def db_create(): """Create the database""" try: migrate_api.version_control(url=db_url, repository=db_repo) db_upgrade() except DatabaseAlreadyControlledError: print 'ERROR: Database is already version controlled.'
def db_version_control(engine, abs_path, version=None): """ Mark a database as under this repository's version control. """ repository = _find_migrate_repo(abs_path) versioning_api.version_control(engine, repository, version) return version
def _memorydb_migrate_db(**kwargs): """ This is crazy crackheaded, and abusive to sqlalchemy. We'll take out dispose so the migrate stuff doesn't kill it, and push through the migrate. This makes a bunch of assumptions that are likely stupid, but since this is done on a memory-backed db for testing, it's probably okay. Just don't run this on a real db. """ def dispose_patch(*args, **kwargs): pass global engine Base.metadata.create_all(bind=engine) for table in reversed(Base.metadata.sorted_tables): session.execute(table.delete()) session.commit() old_dispose = engine.dispose engine.dispose = dispose_patch repo_path = repo.Repository( os.path.abspath(os.path.dirname(opencenter_repo.__file__))) migrate_api.version_control(engine, repo_path) migrate_api.upgrade(engine, repo_path) engine.dispose = old_dispose
def setup_db(settings): """ We need to create the test sqlite database to run our tests against If the db exists, remove it We're using the SA-Migrations API to create the db and catch it up to the latest migration level for testing In theory, we could use this API to do version specific testing as well if we needed to. If we want to run any tests with a fresh db we can call this function """ from migrate.versioning import api as mig sa_url = settings['sqlalchemy.url'] migrate_repository = 'migrations' # we're hackish here since we're going to assume the test db is whatever is # after the last slash of the SA url sqlite:///somedb.db db_name = sa_url[sa_url.rindex('/') + 1:] try: os.remove(db_name) except: pass open(db_name, 'w').close() mig.version_control(sa_url, migrate_repository) mig.upgrade(sa_url, migrate_repository)
def init_db(): import models Base.metadata.create_all(bind=engine) if not os.path.exists(SQLALCHEMY_MIGRATE_REPO): api.create(SQLALCHEMY_MIGRATE_REPO, 'database repository') api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO) else: api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO, api.version(SQLALCHEMY_MIGRATE_REPO))
def create_db_and_update(db_uri, migrate_repo): if not os.path.exists(migrate_repo): api.create(migrate_repo, 'database repository') api.version_control(db_uri, migrate_repo) else: api.version_control(db_uri, migrate_repo, api.version(migrate_repo))
def initdb(): """Creates all database tables.""" db.create_all() if not os.path.exists(app.config.get('SQLALCHEMY_MIGRATE_REPO')): api.create(app.config.get('SQLALCHEMY_MIGRATE_REPO'), 'database repository') api.version_control(app.config.get('SQLALCHEMY_DATABASE_URI'), app.config.get('SQLALCHEMY_MIGRATE_REPO')) else: api.version_control(app.config.get('SQLALCHEMY_DATABASE_URI'), app.config.get('SQLALCHEMY_MIGRATE_REPO'), api.version(app.config.get('SQLALCHEMY_MIGRATE_REPO')))
def __init__(self): database.create_all() if not os.path.exists(SQLALCHEMY_MIGRATE_CONT): api.create(SQLALCHEMY_MIGRATE_CONT, "database container") api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_CONT) else: api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_CONT, api.version(SQLALCHEMY_MIGRATE_CONT))
def _db_version_control(cls): try: versioning_api.version_control( CONF.db.sql_connection, cls._get_repo_path()) except migrate.exceptions.DatabaseAlreadyControlledError: return 'DB is already controlled my migrate' return 'Success'
def db_create(): pgdb.create_all() if not os.path.exists(Config.SQLALCHEMY_MIGRATE_REPO): api.create(Config.SQLALCHEMY_MIGRATE_REPO, 'database repository') api.version_control(Config.SQLALCHEMY_DATABASE_URI, Config.SQLALCHEMY_MIGRATE_REPO) else: api.version_control(Config.SQLALCHEMY_DATABASE_URI, Config.SQLALCHEMY_MIGRATE_REPO, api.version(Config.SQLALCHEMY_MIGRATE_REPO))
def create(): create_engine(SQLALCHEMY_DATABASE_URI, echo=True) if not os.path.exists(SQLALCHEMY_MIGRATE_REPO): api.create(SQLALCHEMY_MIGRATE_REPO, 'database repository') api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO) else: api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO, api.version(SQLALCHEMY_MIGRATE_REPO))
def create(): print '{} is being instantiated now!'.format(SQLALCHEMY_DATABASE_URI) orm_db.create_all() if not os.path.exists(SQLALCHEMY_MIGRATE_REPO): api.create(SQLALCHEMY_MIGRATE_REPO, 'database repository') api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO) else: api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO, api.version(SQLALCHEMY_MIGRATE_REPO))
def test_passing_engine(self): repo = self.tmp_repos() api.create(repo, 'temp') api.script('First Version', repo) engine = construct_engine('sqlite:///:memory:') api.version_control(engine, repo) api.upgrade(engine, repo)
def sync(conf): register_conf_opts(conf) repo_path = os.path.abspath(os.path.dirname(migrate_repo.__file__)) try: versioning_api.upgrade(conf.sql.connection, repo_path) except versioning_exceptions.DatabaseNotControlledError: versioning_api.version_control(conf.sql.connection, repo_path) versioning_api.upgrade(conf.sql.connection, repo_path)
def create_db(self): self.metadata.create_all(bind=self.metadata.bind) # sqlalchemy migrate hack from migrate.versioning.api import version_control, version import shakespeare.migration.versions v = version(shakespeare.migration.__path__[0]) log.info( "Setting current version to '%s'" % v ) version_control(self.metadata.bind.url, shakespeare.migration.__path__[0], v)
def init(self): url = cfg.CONF['storage:sqlalchemy'].database_connection try: LOG.info('Attempting to initialize database') versioning_api.version_control(url=url, repository=REPOSITORY) LOG.info('Database initialized successfully') except DatabaseAlreadyControlledError: raise Exception('Database already initialized')
def create_db(): db.session.rollback() db.drop_all() db.create_all() if not os.path.exists(SQLALCHEMY_MIGRATE_REPO): api.create(SQLALCHEMY_MIGRATE_REPO, 'database_repository') api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO) else: api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO, api.version(SQLALCHEMY_MIGRATE_REPO))
def main(parser, options, args): engine_url = config["sqlalchemy.url"] latest_version = version(migrate_repository) try: version_control(engine_url, migrate_repository, version=initial_version, echo=DEBUG) except DatabaseAlreadyControlledError: pass upgrade(engine_url, migrate_repository, version=latest_version, echo=DEBUG) sys.exit(0)
def setup_migration_version_control(self, version=None): import migrate.exceptions import migrate.versioning.api as mig # set up db version control (if not already) try: mig.version_control(self.metadata.bind, self.migrate_repository, version) except migrate.exceptions.DatabaseAlreadyControlledError: pass
def version(self, initial_version): """Initialize the database with migrate metadata. Raises DatabaseAlreadyControlledError if the DB is already initialized.""" version_control(self._db_url, self._repository_path, initial_version)
# datetime: 2013-07-19 16:34 #********************************** ''' 创建好一个数据库之后,执行该文件, 同步所有的数据表 ''' import os.path import sys sys.path.append('../') from migrate.versioning import api from app.models import app, db, Permissions db.create_all() if not os.path.exists(app.config['SQLALCHEMY_MIGRATE_REPO']): api.create(app.config['SQLALCHEMY_MIGRATE_REPO'], 'database repository') api.version_control(app.config['SQLALCHEMY_DATABASE_URI'], \ app.config['SQLALCHEMY_MIGRATE_REPO']) else: api.version_control(app.config['SQLALCHEMY_DATABASE_URI'], app.config\ ['SQLALCHEMY_MIGRATE_REPO'], api.version(app.config['SQLALCHEMY_MIGRATE_REPO'])) print 'database has complated.'
# @Software: PyCharm # from migrate.versioning import api # from config import MigrateConfig # from config import UserConfig # from app import db # import os.path # db.create_all() # if not os.path.exists(MigrateConfig.SQLALCHEMY_MIGRATE_JEFF): # api.create(MigrateConfig.SQLALCHEMY_MIGRATE_JEFF, 'database repository') # api.version_control(UserConfig.SQLALCHEMY_DATABASE_URI, MigrateConfig.SQLALCHEMY_MIGRATE_JEFF) # else: # api.version_control(UserConfig.SQLALCHEMY_DATABASE_URI, # MigrateConfig.SQLALCHEMY_MIGRATE_JEFF, # api.version(MigrateConfig.SQLALCHEMY_MIGRATE_JEFF)) from migrate.versioning import api from config import Config # from config import UserConfig from app import db import os.path db.create_all() if not os.path.exists(Config.SQLALCHEMY_MIGRATE_JEFF): api.create(Config.SQLALCHEMY_MIGRATE_JEFF, 'database repository') api.version_control(Config.SQLALCHEMY_DATABASE_URI, Config.SQLALCHEMY_MIGRATE_JEFF) else: api.version_control(Config.SQLALCHEMY_DATABASE_URI, Config.SQLALCHEMY_MIGRATE_JEFF, api.version(Config.SQLALCHEMY_MIGRATE_JEFF))
#!flask/bin/python # Utilized with changes from: # https://github.com/miguelgrinberg/microblog/blob/6b193afe4748f25018fe086bc7faee452e024828/db_create.py from migrate.versioning import api from config import SQLALCHEMY_DATABASE_URI from config import SQLALCHEMY_MIGRATIONS from app import db import os.path db.create_all() if not os.path.exists(SQLALCHEMY_MIGRATIONS): api.create(SQLALCHEMY_MIGRATIONS, 'database repository') api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATIONS) else: api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATIONS, api.version(SQLALCHEMY_MIGRATIONS))
#!bin/python # Create all of the tables for the SQLAlchemy models. # You need to create the database first, though. from os import environ, path environ["PPT_ENVIRONMENT"] = "test" from flask_sqlalchemy import SQLAlchemy from migrate.versioning import api from flask_ppt2 import app, db repo_uri = app.config["SQLALCHEMY_DATABASE_URI"] repo = app.config["SQLALCHEMY_MIGRATE_REPO"] db.create_all() db.session.commit() if not path.exists(repo): api.create(repo, 'database repository') api.version_control(repo_uri, repo) else: api.version_control(repo_uri, repo, api.version(repo))
def db_version_control(version=None, package=None): repo_path = migration_helpers.find_migrate_repo(package=package) versioning_api.version_control(CONF.database.connection, repo_path, version) return version
#!flask/bin/python import os.path import sys sys.path.append('../') from app import app, db from migrate.versioning import api db.init_app(app) with app.test_request_context(): db.create_all() uri = app.config['SQLALCHEMY_DATABASE_URI'] repo = app.config['SQLALCHEMY_MIGRATE_REPO'] if not os.path.exists(repo): api.create(repo, 'database repository') api.version_control(uri, repo) else: api.version_control(uri, repo, api.version.Collection.version(repo))
def _setup(self, url): super(TestSchemaAPI, self)._setup(url) self.repo = self.tmp_repos() api.create(self.repo, 'temp') self.schema = api.version_control(url, self.repo)
def runCouchPotato(options, base_path, args): # Load settings from couchpotato.environment import Env settings = Env.get('settings') settings.setFile(options.config_file) # Create data dir if needed data_dir = os.path.expanduser(Env.setting('data_dir')) if data_dir == '': data_dir = os.path.join(base_path, '_data') if not os.path.isdir(data_dir): os.makedirs(data_dir) # Create logging dir log_dir = os.path.join(data_dir, 'logs') if not os.path.isdir(log_dir): os.mkdir(log_dir) # Daemonize app if options.daemonize: createDaemon() # Register environment settings Env.set('uses_git', not options.git) Env.set('app_dir', base_path) Env.set('data_dir', data_dir) Env.set('log_path', os.path.join(log_dir, 'CouchPotato.log')) Env.set('db_path', 'sqlite:///' + os.path.join(data_dir, 'couchpotato.db')) Env.set('cache_dir', os.path.join(data_dir, 'cache')) Env.set('cache', FileSystemCache(os.path.join(Env.get('cache_dir'), 'python'))) Env.set('quiet', options.quiet) Env.set('daemonize', options.daemonize) Env.set('args', args) # Determine debug debug = options.debug or Env.setting('debug', default=False) Env.set('debug', debug) # Only run once when debugging if os.environ.get('WERKZEUG_RUN_MAIN') or not debug: # Logger logger = logging.getLogger() formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s', '%H:%M:%S') level = logging.DEBUG if debug else logging.INFO logger.setLevel(level) # To screen if debug and not options.quiet and not options.daemonize: hdlr = logging.StreamHandler(sys.stderr) hdlr.setFormatter(formatter) logger.addHandler(hdlr) # To file hdlr2 = handlers.RotatingFileHandler(Env.get('log_path'), 'a', 500000, 10) hdlr2.setFormatter(formatter) logger.addHandler(hdlr2) # Disable server access log server_log = logging.getLogger('werkzeug') server_log.disabled = True # Start logging from couchpotato.core.logger import CPLog log = CPLog(__name__) log.debug('Started with options %s' % options) # Load configs & plugins loader = Env.get('loader') loader.preload(root=base_path) loader.run() # Load migrations from migrate.versioning.api import version_control, db_version, version, upgrade db = Env.get('db_path') repo = os.path.join(base_path, 'couchpotato', 'core', 'migration') logging.getLogger('migrate').setLevel( logging.WARNING) # Disable logging for migration latest_db_version = version(repo) initialize = True try: current_db_version = db_version(db, repo) initialize = False except: version_control(db, repo, version=latest_db_version) current_db_version = db_version(db, repo) if current_db_version < latest_db_version and not debug: log.info('Doing database upgrade. From %d to %d' % (current_db_version, latest_db_version)) upgrade(db, repo) # Configure Database from couchpotato.core.settings.model import setup setup() fireEventAsync('app.load') if initialize: fireEventAsync('app.initialize') # Create app from couchpotato import app api_key = Env.setting('api_key') url_base = '/' + Env.setting('url_base').lstrip('/') if Env.setting( 'url_base') else '' reloader = debug and not options.daemonize # Basic config app.secret_key = api_key config = { 'use_reloader': reloader, 'host': Env.setting('host', default='0.0.0.0'), 'port': Env.setting('port', default=5000) } # Static path web.add_url_rule(url_base + '/static/<path:filename>', endpoint='static', view_func=app.send_static_file) # Register modules app.register_blueprint(web, url_prefix='%s/' % url_base) app.register_blueprint(api, url_prefix='%s/%s/' % (url_base, api_key)) # Go go go! app.run(**config)
def test_version_control(self): repo = self.tmp_repos() api.create(repo, 'temp') api.version_control('sqlite:///', repo) api.version_control('sqlite:///', six.text_type(repo))
def db_version_control(version=None): repo_path = _find_migrate_repo() versioning_api.version_control(FLAGS.sql_connection, repo_path, version) return version
def db_version_control(version=None, repo_path=None): if repo_path is None: repo_path = find_migrate_repo() versioning_api.version_control(CONF.database.connection, repo_path, version) return version
def runCouchPotato(options, base_path, args, data_dir = None, log_dir = None, Env = None, desktop = None): try: locale.setlocale(locale.LC_ALL, "") encoding = locale.getpreferredencoding() except (locale.Error, IOError): encoding = None # for OSes that are poorly configured I'll just force UTF-8 if not encoding or encoding in ('ANSI_X3.4-1968', 'US-ASCII', 'ASCII'): encoding = 'UTF-8' Env.set('encoding', encoding) # Do db stuff db_path = toUnicode(os.path.join(data_dir, 'couchpotato.db')) # Backup before start and cleanup old databases new_backup = toUnicode(os.path.join(data_dir, 'db_backup', str(int(time.time())))) if not os.path.isdir(new_backup): os.makedirs(new_backup) # Remove older backups, keep backups 3 days or at least 3 backups = [] for directory in os.listdir(os.path.dirname(new_backup)): backup = toUnicode(os.path.join(os.path.dirname(new_backup), directory)) if os.path.isdir(backup): backups.append(backup) latest_backup = tryInt(os.path.basename(sorted(backups)[-1])) if len(backups) > 0 else 0 if latest_backup < time.time() - 3600: # Create path and copy src_files = [options.config_file, db_path, db_path + '-shm', db_path + '-wal'] for src_file in src_files: if os.path.isfile(src_file): dst_file = toUnicode(os.path.join(new_backup, os.path.basename(src_file))) shutil.copyfile(src_file, dst_file) # Try and copy stats seperately try: shutil.copystat(src_file, dst_file) except: pass total_backups = len(backups) for backup in backups: if total_backups > 3: if tryInt(os.path.basename(backup)) < time.time() - 259200: for the_file in os.listdir(backup): file_path = os.path.join(backup, the_file) try: if os.path.isfile(file_path): os.remove(file_path) except: raise os.rmdir(backup) total_backups -= 1 # Register environment settings Env.set('app_dir', toUnicode(base_path)) Env.set('data_dir', toUnicode(data_dir)) Env.set('log_path', toUnicode(os.path.join(log_dir, 'CouchPotato.log'))) Env.set('db_path', toUnicode('sqlite:///' + db_path)) Env.set('cache_dir', toUnicode(os.path.join(data_dir, 'cache'))) Env.set('cache', FileSystemCache(toUnicode(os.path.join(Env.get('cache_dir'), 'python')))) Env.set('console_log', options.console_log) Env.set('quiet', options.quiet) Env.set('desktop', desktop) Env.set('daemonized', options.daemon) Env.set('args', args) Env.set('options', options) # Determine debug debug = options.debug or Env.setting('debug', default = False, type = 'bool') Env.set('debug', debug) # Development development = Env.setting('development', default = False, type = 'bool') Env.set('dev', development) # Disable logging for some modules for logger_name in ['enzyme', 'guessit', 'subliminal', 'apscheduler', 'tornado', 'requests']: logging.getLogger(logger_name).setLevel(logging.ERROR) for logger_name in ['gntp', 'migrate']: logging.getLogger(logger_name).setLevel(logging.WARNING) # Use reloader reloader = debug is True and development and not Env.get('desktop') and not options.daemon # Logger logger = logging.getLogger() formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s', '%m-%d %H:%M:%S') level = logging.DEBUG if debug else logging.INFO logger.setLevel(level) logging.addLevelName(19, 'INFO') # To screen if (debug or options.console_log) and not options.quiet and not options.daemon: hdlr = logging.StreamHandler(sys.stderr) hdlr.setFormatter(formatter) logger.addHandler(hdlr) # To file hdlr2 = handlers.RotatingFileHandler(Env.get('log_path'), 'a', 500000, 10, encoding = Env.get('encoding')) hdlr2.setFormatter(formatter) logger.addHandler(hdlr2) # Start logging & enable colors import color_logs from couchpotato.core.logger import CPLog log = CPLog(__name__) log.debug('Started with options %s', options) def customwarn(message, category, filename, lineno, file = None, line = None): log.warning('%s %s %s line:%s', (category, message, filename, lineno)) warnings.showwarning = customwarn # Check if database exists db = Env.get('db_path') db_exists = os.path.isfile(toUnicode(db_path)) # Load migrations if db_exists: from migrate.versioning.api import version_control, db_version, version, upgrade repo = os.path.join(base_path, 'couchpotato', 'core', 'migration') latest_db_version = version(repo) try: current_db_version = db_version(db, repo) except: version_control(db, repo, version = latest_db_version) current_db_version = db_version(db, repo) if current_db_version < latest_db_version: if development: log.error('There is a database migration ready, but you are running development mode, so it won\'t be used. If you see this, you are stupid. Please disable development mode.') else: log.info('Doing database upgrade. From %d to %d', (current_db_version, latest_db_version)) upgrade(db, repo) # Configure Database from couchpotato.core.settings.model import setup setup() # Create app from couchpotato import WebHandler web_base = ('/' + Env.setting('url_base').lstrip('/') + '/') if Env.setting('url_base') else '/' Env.set('web_base', web_base) api_key = Env.setting('api_key') if not api_key: api_key = uuid4().hex Env.setting('api_key', value = api_key) api_base = r'%sapi/%s/' % (web_base, api_key) Env.set('api_base', api_base) # Basic config host = Env.setting('host', default = '0.0.0.0') # app.debug = development config = { 'use_reloader': reloader, 'port': tryInt(Env.setting('port', default = 5050)), 'host': host if host and len(host) > 0 else '0.0.0.0', 'ssl_cert': Env.setting('ssl_cert', default = None), 'ssl_key': Env.setting('ssl_key', default = None), } # Load the app application = Application([], log_function = lambda x : None, debug = config['use_reloader'], gzip = True, cookie_secret = api_key, login_url = '%slogin/' % web_base, ) Env.set('app', application) # Request handlers application.add_handlers(".*$", [ (r'%snonblock/(.*)(/?)' % api_base, NonBlockHandler), # API handlers (r'%s(.*)(/?)' % api_base, ApiHandler), # Main API handler (r'%sgetkey(/?)' % web_base, KeyHandler), # Get API key (r'%s' % api_base, RedirectHandler, {"url": web_base + 'docs/'}), # API docs # Login handlers (r'%slogin(/?)' % web_base, LoginHandler), (r'%slogout(/?)' % web_base, LogoutHandler), # Catch all webhandlers (r'%s(.*)(/?)' % web_base, WebHandler), (r'(.*)', WebHandler), ]) # Static paths static_path = '%sstatic/' % web_base for dir_name in ['fonts', 'images', 'scripts', 'style']: application.add_handlers(".*$", [ ('%s%s/(.*)' % (static_path, dir_name), StaticFileHandler, {'path': toUnicode(os.path.join(base_path, 'couchpotato', 'static', dir_name))}) ]) Env.set('static_path', static_path) # Load configs & plugins loader = Env.get('loader') loader.preload(root = toUnicode(base_path)) loader.run() # Fill database with needed stuff if not db_exists: fireEvent('app.initialize', in_order = True) # Go go go! from tornado.ioloop import IOLoop loop = IOLoop.current() # Some logging and fire load event try: log.info('Starting server on port %(port)s', config) except: pass fireEventAsync('app.load') if config['ssl_cert'] and config['ssl_key']: server = HTTPServer(application, no_keep_alive = True, ssl_options = { "certfile": config['ssl_cert'], "keyfile": config['ssl_key'], }) else: server = HTTPServer(application, no_keep_alive = True) try_restart = True restart_tries = 5 while try_restart: try: server.listen(config['port'], config['host']) loop.start() except Exception, e: log.error('Failed starting: %s', traceback.format_exc()) try: nr, msg = e if nr == 48: log.info('Port (%s) needed for CouchPotato is already in use, try %s more time after few seconds', (config.get('port'), restart_tries)) time.sleep(1) restart_tries -= 1 if restart_tries > 0: continue else: return except: pass raise try_restart = False
import os, sys import migrate.versioning.api as dbm sys.path.append(os.curdir + "\\src\\UI") sys.path.append(os.curdir + "\\src\\DB") sys.path.append(os.curdir + "\\src\\models") repoName = "DB_REPO" dbm.create(repoName, "tn") dbm.version_control("sqlite:///src/DB/Data/stocks.db", repoName) dbm.create_model("sqlite:///src/DB/Data/stocks.db", repoName)
def db_version_control(version=None, database='main', context=None): repository = _find_migrate_repo(database) versioning_api.version_control(get_engine(database, context=context), repository, version) return version
def runCouchPotato(options, base_path, args, data_dir=None, log_dir=None, Env=None, desktop=None): try: locale.setlocale(locale.LC_ALL, "") encoding = locale.getpreferredencoding() except (locale.Error, IOError): encoding = None # for OSes that are poorly configured I'll just force UTF-8 if not encoding or encoding in ('ANSI_X3.4-1968', 'US-ASCII', 'ASCII'): encoding = 'UTF-8' # Do db stuff db_path = os.path.join(data_dir, 'couchpotato.db') # Backup before start and cleanup old databases new_backup = os.path.join(data_dir, 'db_backup', str(int(time.time()))) # Create path and copy if not os.path.isdir(new_backup): os.makedirs(new_backup) src_files = [ options.config_file, db_path, db_path + '-shm', db_path + '-wal' ] for src_file in src_files: if os.path.isfile(src_file): shutil.copy2(src_file, os.path.join(new_backup, os.path.basename(src_file))) # Remove older backups, keep backups 3 days or at least 3 backups = [] for directory in os.listdir(os.path.dirname(new_backup)): backup = os.path.join(os.path.dirname(new_backup), directory) if os.path.isdir(backup): backups.append(backup) total_backups = len(backups) for backup in backups: if total_backups > 3: if int(os.path.basename(backup)) < time.time() - 259200: for src_file in src_files: b_file = os.path.join(backup, os.path.basename(src_file)) if os.path.isfile(b_file): os.remove(b_file) os.rmdir(backup) total_backups -= 1 # Register environment settings Env.set('encoding', encoding) Env.set('app_dir', base_path) Env.set('data_dir', data_dir) Env.set('log_path', os.path.join(log_dir, 'CouchPotato.log')) Env.set('db_path', 'sqlite:///' + db_path) Env.set('cache_dir', os.path.join(data_dir, 'cache')) Env.set('cache', FileSystemCache(os.path.join(Env.get('cache_dir'), 'python'))) Env.set('console_log', options.console_log) Env.set('quiet', options.quiet) Env.set('desktop', desktop) Env.set('args', args) Env.set('options', options) # Determine debug debug = options.debug or Env.setting('debug', default=False, type='bool') Env.set('debug', debug) # Development development = Env.setting('development', default=False, type='bool') Env.set('dev', development) # Disable logging for some modules for logger_name in ['enzyme', 'guessit', 'subliminal', 'apscheduler']: logging.getLogger(logger_name).setLevel(logging.ERROR) for logger_name in ['gntp', 'migrate']: logging.getLogger(logger_name).setLevel(logging.WARNING) # Use reloader reloader = debug is True and development and not Env.get( 'desktop') and not options.daemon # Logger logger = logging.getLogger() formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s', '%m-%d %H:%M:%S') level = logging.DEBUG if debug else logging.INFO logger.setLevel(level) logging.addLevelName(19, 'INFO') # To screen if (debug or options.console_log) and not options.quiet and not options.daemon: hdlr = logging.StreamHandler(sys.stderr) hdlr.setFormatter(formatter) logger.addHandler(hdlr) # To file hdlr2 = handlers.RotatingFileHandler(Env.get('log_path'), 'a', 500000, 10) hdlr2.setFormatter(formatter) logger.addHandler(hdlr2) # Start logging & enable colors import color_logs from couchpotato.core.logger import CPLog log = CPLog(__name__) log.debug('Started with options %s', options) def customwarn(message, category, filename, lineno, file=None, line=None): log.warning('%s %s %s line:%s', (category, message, filename, lineno)) warnings.showwarning = customwarn # Check if database exists db = Env.get('db_path') db_exists = os.path.isfile(db_path) # Load configs & plugins loader = Env.get('loader') loader.preload(root=base_path) loader.run() # Load migrations if db_exists: from migrate.versioning.api import version_control, db_version, version, upgrade repo = os.path.join(base_path, 'couchpotato', 'core', 'migration') latest_db_version = version(repo) try: current_db_version = db_version(db, repo) except: version_control(db, repo, version=latest_db_version) current_db_version = db_version(db, repo) if current_db_version < latest_db_version and not debug: log.info('Doing database upgrade. From %d to %d', (current_db_version, latest_db_version)) upgrade(db, repo) # Configure Database from couchpotato.core.settings.model import setup setup() # Fill database with needed stuff if not db_exists: fireEvent('app.initialize', in_order=True) # Create app from couchpotato import app api_key = Env.setting('api_key') url_base = '/' + Env.setting('url_base').lstrip('/') if Env.setting( 'url_base') else '' # Basic config app.secret_key = api_key # app.debug = development config = { 'use_reloader': reloader, 'host': Env.setting('host', default='0.0.0.0'), 'port': tryInt(Env.setting('port', default=5000)) } # Static path app.static_folder = os.path.join(base_path, 'couchpotato', 'static') web.add_url_rule('api/%s/static/<path:filename>' % api_key, endpoint='static', view_func=app.send_static_file) # Register modules app.register_blueprint(web, url_prefix='%s/' % url_base) app.register_blueprint(api, url_prefix='%s/api/%s/' % (url_base, api_key)) # Some logging and fire load event try: log.info('Starting server on port %(port)s', config) except: pass fireEventAsync('app.load') # Go go go! web_container = WSGIContainer(app) web_container._log = _log loop = IOLoop.instance() application = Application([ (r'%s/api/%s/nonblock/(.*)/' % (url_base, api_key), NonBlockHandler), (r'.*', FallbackHandler, dict(fallback=web_container)), ], log_function=lambda x: None, debug=config['use_reloader']) try_restart = True restart_tries = 5 while try_restart: try: application.listen(config['port'], config['host'], no_keep_alive=True) loop.start() except Exception, e: try: nr, msg = e if nr == 48: log.info( 'Already in use, try %s more time after few seconds', restart_tries) time.sleep(1) restart_tries -= 1 if restart_tries > 0: continue else: return except: pass raise try_restart = False
def commit_version_control(db_commit): if not os.path.exists(SQLALCHEMY_MIGRATE_REPO): api.create(SQLALCHEMY_MIGRATE_REPO, 'database repository') api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO) elif db_commit is not None: api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO, api.version(SQLALCHEMY_MIGRATE_REPO))
#!flask/bin/python from migrate.versioning import api from config import SQLALCHEMY_DATABASE_URI from config import SQLALCHEMY_BINDS from config import SQLALCHEMY_MIGRATE_REPO from config import SQLALCHEMY_BINDS_MIGRATE_REPO from app import db import os.path db.create_all() if not os.path.exists(SQLALCHEMY_MIGRATE_REPO): api.create(SQLALCHEMY_MIGRATE_REPO, 'database repository') api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO) else: api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO, api.version(SQLALCHEMY_MIGRATE_REPO)) for name, uri in SQLALCHEMY_BINDS.items(): if not os.path.exists(SQLALCHEMY_BINDS_MIGRATE_REPO[name]): api.create(SQLALCHEMY_BINDS_MIGRATE_REPO[name], 'database repository') api.version_control(uri, SQLALCHEMY_BINDS_MIGRATE_REPO[name]) else: api.version_control(uri, SQLALCHEMY_BINDS_MIGRATE_REPO[name], api.version(SQLALCHEMY_BINDS_MIGRATE_REPO[name]))
import os from config import config from flaskprj.models import db from migrate.versioning import api from flaskprj import create_app from flaskprj.models import Role from sqlite3 import OperationalError app = create_app() app.app_context().push() db.create_all() try: app = create_app() app.app_context().push() Role.insert_roles() except OperationalError: pass sqluri = config[os.getenv('FLASK_CONFIG') or 'default'].SQLALCHEMY_DATABASE_URI sqlmr = config[os.getenv('FLASK_CONFIG') or 'default'].SQLALCHEMY_MIGRATE_REPO if not os.path.exists(sqlmr): api.create(sqlmr, 'database_repository') api.version_control(sqluri, sqlmr) else: api.version_control(sqluri, sqlmr, api.version(sqlmr))
print("AnnotateIt admin password: %s" % password) email = os.environ.get('ANNOTATEIT_EMAIL', "*****@*****.**") print("AnnotateIt admin email: %s" % email) db_url = app.config['SQLALCHEMY_DATABASE_URI'] print("AnnotateIt database URL: %s" % db_url) print("\nCreating ElasticSearch indices... ") annotateit.create_indices(app) print("done.\n") migrate_args = dict(url=db_url, debug='False', repository='migration') try: print("Creating SQLite database... ") migrate.version_control(**migrate_args) print("done.\n") except: print(" ...already created\n") print("Migrating database... ") migrate.upgrade(**migrate_args) print("done.") ckey = os.environ.get('CONSUMER_KEY', 'annotateit') csecret = os.environ.get('CONSUMER_SECRET', 'annotate.it.secret') with app.test_request_context(): users_count = User.query.count() print("Users in DB: " + str(users_count))
def setup_app(command, conf, vars): """Called by ``paster setup-app``. This script is responsible for: * Creating the initial database schema and loading default data. * Executing any migrations necessary to bring an existing database up-to-date. Your data should be safe but, as always, be sure to make backups before using this. * Re-creating the default database for every run of the test suite. XXX: All your data will be lost IF you run the test suite with a config file named 'test.ini'. Make sure you have this configured to a different database than in your usual deployment.ini or development.ini file because all database tables are dropped a and recreated every time this script runs. XXX: If you are upgrading from MediaCore v0.7.2 or v0.8.0, run whichever one of these that applies: ``python batch-scripts/upgrade/upgrade_from_v072.py deployment.ini`` ``python batch-scripts/upgrade/upgrade_from_v080.py deployment.ini`` XXX: For search to work, we depend on a number of MySQL triggers which copy the data from our InnoDB tables to a MyISAM table for its fulltext indexing capability. Triggers can only be installed with a mysql superuser like root, so you must run the setup_triggers.sql script yourself. """ if pylons.test.pylonsapp: # NOTE: This extra filename check may be unnecessary, the example it is # from did not check for pylons.test.pylonsapp. Leaving it in for now # to make it harder for someone to accidentally delete their database. filename = os.path.split(conf.filename)[-1] if filename == 'test.ini': log.info('Dropping existing tables...') metadata.drop_all(checkfirst=True) drop_version_control(conf.local_conf['sqlalchemy.url'], migrate_repository) else: # Don't reload the app if it was loaded under the testing environment config = load_environment(conf.global_conf, conf.local_conf) # Create the migrate_version table if it doesn't exist. # If the table doesn't exist, we assume the schema was just setup # by this script and therefore must be the latest version. latest_version = version(migrate_repository) try: version_control(conf.local_conf['sqlalchemy.url'], migrate_repository, version=latest_version) except DatabaseAlreadyControlledError: log.info('Running any new migrations, if there are any') upgrade(conf.local_conf['sqlalchemy.url'], migrate_repository, version=latest_version) else: log.info('Initializing new database with version %r' % latest_version) metadata.create_all(bind=DBSession.bind, checkfirst=True) add_default_data() cleanup_players_table(enabled=True) # Save everything, along with the dummy data if applicable DBSession.commit() log.info('Generating appearance.css from your current settings') settings = DBSession.query(Setting.key, Setting.value) generate_appearance_css(settings, cache_dir=conf['cache_dir']) log.info('Successfully setup')
def add_db(self, version_=None): return version_control( url=self.db_url, repository=self.repository, version=version_ )
def test_version_control(self): repo = self.tmp_repos() api.create(repo, 'temp') api.version_control('sqlite:///', repo) api.version_control('sqlite:///', unicode(repo))
#! /usr/bin/env python from migrate.versioning import api from settings import settings from app import db import os.path db.create_all() mrepo = settings.sqlalchemy_migrations_repo db_url = settings.database.url if not os.path.exists(mrepo): api.create(mrepo, 'database repository') api.version_control(db_url, mrepo) else: api.version_control(db_url, mrepo, api.version(mrepo))
from migrate.versioning import api from config import SQLALCHEMY_DATABASE_URI from config import SQLALCHEMY_MIGRATE_REPO import app from models import db, pick_words, word_table, query_all_words import os.path @app.before_first_request def create_database(): db.create_all() if not os.path.exists(SQLALCHEMY_MIGRATE_REPO): api.create(SQLALCHEMY_MIGRATE_REPO, 'database repository') api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO) else: api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO, api.version(SQLALCHEMY_MIGRATE_REPO))
from migrate.versioning import api from config import Config from app import db import os.path db.create_all() if not os.path.exists(Config.SQLALCHEMY_MIGRATE_REPO): api.create(Config.SQLALCHEMY_MIGRATE_REPO, 'database repository') api.version_control(Config.SQLALCHEMY_DATABASE_URI, Config.SQLALCHEMY_MIGRATE_REPO) else: api.version_control(Config.SQLALCHEMY_DATABASE_URI, Config.SQLALCHEMY_MIGRATE_REPO, api.version)
from migrate.versioning import api from config import Config from app import db import os.path db.create_all() if not os.path.exists(Config.SQLALCHEMY_DATABASE_URI): api.create(Config.SQLALCHEMY_DATABASE_URI, 'database repository') api.version_control(Config.SQLALCHEMY_DATABASE_URI, Config.SQLALCHEMY_DATABASE_URI) else: api.version_control(Config.SQLALCHEMY_DATABASE_URI, Config.SQLALCHEMY_DATABASE_URI, api.version(Config.SQLALCHEMY_DATABASE_URI))
def db_version_control(version=None): repository = _find_migrate_repo() versioning_api.version_control(get_engine(), repository, version) return version
#!/usr/bin/env python2 import os.path from migrate.versioning import api from flask_app import db, config db.create_all() if not os.path.exists(config['SQLALCHEMY_MIGRATE_REPO']): api.create(config['SQLALCHEMY_MIGRATE_REPO'], 'database repository') api.version_control(config['SQLALCHEMY_DATABASE_URI'], config['SQLALCHEMY_MIGRATE_REPO']) else: api.version_control(config['SQLALCHEMY_DATABASE_URI'], config['SQLALCHEMY_MIGRATE_REPO'], api.version(config['SQLALCHEMY_MIGRATE_REPO']))
def _version_control(conf): try: version_control(conf.get("DEFAULT", "db_conn"), conf.get("DEFAULT", "repo")) except DatabaseAlreadyControlledError as e: print e