コード例 #1
0
ファイル: fixtures.py プロジェクト: paulorobertocruz/blitzdb
 def finalizer():
     del backend.connection
     print("Dropping schema...")
     meta = MetaData(engine)
     meta.reflect()
     meta.drop_all()
     print("Done...")
コード例 #2
0
def test_create_table(engine, bigquery_dataset):
    meta = MetaData()
    Table(
        f"{bigquery_dataset}.test_table_create",
        meta,
        Column("integer_c", sqlalchemy.Integer, doc="column description"),
        Column("float_c", sqlalchemy.Float),
        Column("decimal_c", sqlalchemy.DECIMAL),
        Column("string_c", sqlalchemy.String),
        Column("text_c", sqlalchemy.Text),
        Column("boolean_c", sqlalchemy.Boolean),
        Column("timestamp_c", sqlalchemy.TIMESTAMP),
        Column("datetime_c", sqlalchemy.DATETIME),
        Column("date_c", sqlalchemy.DATE),
        Column("time_c", sqlalchemy.TIME),
        Column("binary_c", sqlalchemy.BINARY),
        bigquery_description="test table description",
        bigquery_friendly_name="test table name",
    )
    meta.create_all(engine)
    meta.drop_all(engine)

    # Test creating tables with declarative_base
    Base = declarative_base()

    class TableTest(Base):
        __tablename__ = f"{bigquery_dataset}.test_table_create2"
        integer_c = Column(sqlalchemy.Integer, primary_key=True)
        float_c = Column(sqlalchemy.Float)

    Base.metadata.create_all(engine)
    Base.metadata.drop_all(engine)
コード例 #3
0
ファイル: conftest.py プロジェクト: VarekBoettcher/namex
def db(app, request):
    """
    Returns session-wide initialised database.
    Drops all existing tables - Meta follows Postgres FKs
    """
    with app.app_context():
        # Clear out any existing tables
        metadata = MetaData(_db.engine)
        metadata.reflect()
        for table in metadata.tables.values():
            for fk in table.foreign_keys:
                _db.engine.execute(DropConstraint(fk.constraint))
        metadata.drop_all()
        _db.drop_all()

        # ############################################
        # There are 2 approaches, an empty database, or the same one that the app will use
        #     create the tables
        #     _db.create_all()
        # or
        # Use Alembic to load all of the DB revisions including supporting lookup data
        # This is the path we'll use in NAMEX!!

        # even though this isn't referenced directly, it sets up the internal configs that upgrade needs
        migrate = Migrate(app, _db)
        upgrade()
コード例 #4
0
ファイル: fixtures.py プロジェクト: asampat3090/blitzdb
    def _sql_backend(request,engine,**kwargs):

        meta = MetaData(engine)
        meta.reflect()
        meta.drop_all()
        #we enable foreign key checks for SQLITE
        if str(engine.url).startswith('sqlite://'):
            engine.connect().execute('pragma foreign_keys=ON')

        if not 'ondelete' in kwargs:
            kwargs['ondelete'] = 'CASCADE'
        backend = SqlBackend(engine = engine,**kwargs)
        backend.init_schema()
        backend.create_schema()

        def finalizer():
            backend.rollback()
            del backend.connection
            print("Dropping schema...")
            #we disable foreign key checks for SQLITE (as dropping tables with circular foreign keys won't work otherwise...)
            if str(engine.url).startswith('sqlite://'):
                engine.connect().execute('pragma foreign_keys=OFF')
            meta = MetaData(engine)
            meta.reflect()
            meta.drop_all()
            print("Done...")

        request.addfinalizer(finalizer)

        return backend
コード例 #5
0
ファイル: db.py プロジェクト: acmiyaguchi/buildbotve
    def _clean_database(self):
        log.msg("cleaning database %s" % self.db_url)
        engine = sqlalchemy.create_engine(self.db_url)

        meta = MetaData()
        
        # there are some tables for which reflection sometimes fails, but since
        # we're just dropping them, we don't need actual schema - a fake
        # table will do the trick
        for table in [ 'buildrequests', 'builds',
                'buildset_properties', 'buildsets', 'change_properties',
                'change_files', 'change_links',
                'changes', 'patches', 'sourcestamp_changes', 'sourcestamps',
                'scheduler_changes', 'scheduler_upstream_buildsets',
                'schedulers' ]:
            sqlalchemy.Table(table, meta,
                    sqlalchemy.Column('tmp', sqlalchemy.Integer))

        # load any remaining tables
        meta.reflect(bind=engine)

        # and drop them, if they exist
        meta.drop_all(bind=engine, checkfirst=True)

        engine.dispose()
コード例 #6
0
ファイル: conftest.py プロジェクト: stikks/Feature-Request
def app(request):
    """
    flask test application
    :param request:
    :return:
    """
    # initialize flask application
    app = create_app('test_app', config.TestConfig)

    # initialize database
    with app.app_context():
        metadata = MetaData(db.engine)
        metadata.reflect()

        # drop database
        metadata.drop_all()

        # create tables based on the models
        db.create_all()

        # insert test data
        create_dummy_data()

        app = FlaskPytest(app)

        yield app

        # remove temporary tables in database
        request.addfinalizer(metadata.drop_all)
コード例 #7
0
def test_create_table(engine):
    meta = MetaData()
    table = Table('test_pybigquery.test_table_create',
                  meta,
                  Column('integer_c',
                         sqlalchemy.Integer,
                         doc="column description"),
                  Column('float_c', sqlalchemy.Float),
                  Column('decimal_c', sqlalchemy.DECIMAL),
                  Column('string_c', sqlalchemy.String),
                  Column('text_c', sqlalchemy.Text),
                  Column('boolean_c', sqlalchemy.Boolean),
                  Column('timestamp_c', sqlalchemy.TIMESTAMP),
                  Column('datetime_c', sqlalchemy.DATETIME),
                  Column('date_c', sqlalchemy.DATE),
                  Column('time_c', sqlalchemy.TIME),
                  Column('binary_c', sqlalchemy.BINARY),
                  bigquery_description="test table description",
                  bigquery_friendly_name="test table name")
    meta.create_all(engine)
    meta.drop_all(engine)

    # Test creating tables with declarative_base
    Base = declarative_base()

    class TableTest(Base):
        __tablename__ = 'test_pybigquery.test_table_create2'
        integer_c = Column(sqlalchemy.Integer, primary_key=True)
        float_c = Column(sqlalchemy.Float)

    Base.metadata.create_all(engine)
    Base.metadata.drop_all(engine)
コード例 #8
0
ファイル: __init__.py プロジェクト: gitgovgithub/ekklesia
    def open_db(self, engine, mode='open'):
        "mode: create, open, drop"
        from sqlalchemy.ext.declarative import declarative_base, DeferredReflection
        from sqlalchemy.schema import MetaData, DropConstraint
        from sqlalchemy import event, Table
        if mode != 'open':
            # Clear out any existing tables
            metadata = MetaData(engine)
            metadata.reflect()
            if engine.name != 'sqlite':
                for table in metadata.tables.values():
                    for fk in table.foreign_keys:
                        engine.execute(DropConstraint(fk.constraint))
            metadata.drop_all(engine)
            if mode == 'drop': return
        self.Base = declarative_base(cls=DeferredReflection)
        self.Base.metadata.bind = engine
        if mode == 'open':

            @event.listens_for(Table, "column_reflect")
            def column_reflect(inspector, table, column_info):
                if table.metadata is self.Base.metadata:
                    if self.column_map and table.name in self.column_map:
                        column_info['key'] = self.column_map[table.name][
                            column_info['name']]
                    else:
                        column_info['key'] = column_info['name']

        self.declare(reflect=mode == 'open')
        if mode == 'create': self.Base.metadata.create_all(engine)
        from sqlalchemy.orm import sessionmaker
        self.Base.prepare(engine)
        self.reflect_classes()
        Session = sessionmaker(bind=engine)
        self.session = Session()
コード例 #9
0
def setupPackage():
    os.environ['MONGO_URI'] = 'mongodb://localhost'
    os.environ['MONGO_DB_NAME'] = 'royal_example'
    os.environ['MONGO_DB_PREFIX'] = ''

    # sqla extentsion setup.
    global engine

    alembic_config = Config()
    alembic_config.set_main_option('script_location', 'example/ext/sqla/db')
    alembic_config.set_main_option('sqlalchemy.url', mysql_uri)

    engine = create_engine(mysql_uri)

    try:
        command.downgrade(alembic_config, 'base')
    except:
        log.exception("Migration downgrade failed, clearing all tables")
        metadata = MetaData(engine)
        metadata.reflect()
        for table in metadata.tables.values():
            for fk in table.foreign_keys:
                engine.execute(DropConstraint(fk.constraint))
        metadata.drop_all()

    command.upgrade(alembic_config, 'head')
コード例 #10
0
ファイル: __init__.py プロジェクト: hadrien/pyramid_royal
def setupPackage():
    os.environ['MONGO_URI'] = 'mongodb://localhost'
    os.environ['MONGO_DB_NAME'] = 'royal_example'
    os.environ['MONGO_DB_PREFIX'] = ''

    # sqla extentsion setup.
    global engine

    alembic_config = Config()
    alembic_config.set_main_option('script_location',
                                   'example/ext/sqla/db')
    alembic_config.set_main_option('sqlalchemy.url', mysql_uri)

    engine = create_engine(mysql_uri)

    try:
        command.downgrade(alembic_config, 'base')
    except:
        log.exception("Migration downgrade failed, clearing all tables")
        metadata = MetaData(engine)
        metadata.reflect()
        for table in metadata.tables.values():
            for fk in table.foreign_keys:
                engine.execute(DropConstraint(fk.constraint))
        metadata.drop_all()

    command.upgrade(alembic_config, 'head')
コード例 #11
0
ファイル: db.py プロジェクト: allannss/buildbot
    def __thd_clean_database(self, conn):
        # drop the known tables
        model.Model.metadata.drop_all(bind=conn, checkfirst=True)

        # see if we can find any other tables to drop
        meta = MetaData(bind=conn)
        meta.reflect()
        meta.drop_all()
コード例 #12
0
ファイル: db.py プロジェクト: paroga/buildbot
    def __thd_clean_database(self, conn):
        # drop the known tables
        model.Model.metadata.drop_all(bind=conn, checkfirst=True)

        # see if we can find any other tables to drop
        meta = MetaData(bind=conn)
        meta.reflect()
        meta.drop_all()
コード例 #13
0
def db(app):  # pylint: disable=redefined-outer-name, invalid-name
    """Return a session-wide initialised database.

    Drops all existing tables - Meta follows Postgres FKs
    """
    with app.app_context():
        # Clear out any existing tables
        metadata = MetaData(_db.engine)
        metadata.reflect()
        for table in metadata.tables.values():
            for fk in table.foreign_keys:  # pylint: disable=invalid-name
                _db.engine.execute(DropConstraint(fk.constraint))
        metadata.drop_all()
        _db.drop_all()

        sequence_sql = """SELECT sequence_name FROM information_schema.sequences
                          WHERE sequence_schema='public'
                       """

        sess = _db.session()
        for seq in [name for (name, ) in sess.execute(text(sequence_sql))]:
            try:
                sess.execute(text('DROP SEQUENCE public.%s ;' % seq))
                print('DROP SEQUENCE public.%s ' % seq)
            except Exception as err:  # pylint: disable=broad-except
                print(f'Error: {err}')
        sess.commit()

        # ############################################
        # There are 2 approaches, an empty database, or the same one that the app will use
        #     create the tables
        #     _db.create_all()
        # or
        # Use Alembic to load all of the DB revisions including supporting lookup data
        # This is the path we'll use in selfservice_api!!

        # even though this isn't referenced directly,
        # it sets up the internal configs that upgrade needs
        Migrate(app, _db)
        upgrade()

        admin_user = {
            'sub': '65a62428-6713-4e7d-8f12-99e56de58386',
            'given_name': 'admin',
            'family_name': 'ss',
            'email': '*****@*****.**'
        }
        sess.execute(
            text(
                'INSERT INTO public."user"(' +
                'created, modified, email, phone, first_name, last_name, oauth_id)'
                + "VALUES (now(), null, '" + admin_user['email'] + "', " +
                "'123456789', '" + admin_user['given_name'] + "', " + " '" +
                admin_user['family_name'] + "', '" + admin_user['sub'] +
                "'); "))
        sess.commit()

        return _db
コード例 #14
0
def drop_all_tables():
    """
    remove all tables, not just those with metadata present in
    this package
    """
    Base.metadata.drop_all(bind=engine)
    meta = MetaData()
    meta.reflect(bind=engine)
    meta.drop_all(bind=engine)
コード例 #15
0
ファイル: conftest.py プロジェクト: sumesh-aot/sbc-auth
def db(app):  # pylint: disable=redefined-outer-name, invalid-name
    """Return a session-wide initialised database.

    Drops all existing tables - Meta follows Postgres FKs
    """
    with app.app_context():
        # clear all custom views
        views_sql = """select table_name from INFORMATION_SCHEMA.views 
                    WHERE table_schema = ANY (current_schemas(false))

                    """
        sess = _db.session()
        for view in [name for (name, ) in sess.execute(text(views_sql))]:
            try:
                sess.execute(text('DROP VIEW public.%s ;' % view))
                print('DROP VIEW public.%s ' % view)
            except Exception as err:  # pylint: disable=broad-except
                print(f'Error: {err}')
        sess.commit()

        # Clear out any existing tables
        metadata = MetaData(_db.engine)
        metadata.reflect()
        for table in metadata.tables.values():
            for fk in table.foreign_keys:  # pylint: disable=invalid-name
                _db.engine.execute(DropConstraint(fk.constraint))
        metadata.drop_all()
        _db.drop_all()

        sequence_sql = """SELECT sequence_name FROM information_schema.sequences
                          WHERE sequence_schema='public'
                       """

        sess = _db.session()
        for seq in [name for (name, ) in sess.execute(text(sequence_sql))]:
            try:
                sess.execute(text('DROP SEQUENCE public.%s ;' % seq))
                print('DROP SEQUENCE public.%s ' % seq)
            except Exception as err:  # pylint: disable=broad-except
                print(f'Error: {err}')
        sess.commit()

        # ############################################
        # There are 2 approaches, an empty database, or the same one that the app will use
        #     create the tables
        #     _db.create_all()
        # or
        # Use Alembic to load all of the DB revisions including supporting lookup data
        # This is the path we'll use in auth_api!!

        # even though this isn't referenced directly, it sets up the internal configs that upgrade needs
        Migrate(app, _db)
        upgrade()

        return _db
コード例 #16
0
ファイル: fixtures.py プロジェクト: asampat3090/blitzdb
 def finalizer():
     backend.rollback()
     del backend.connection
     print("Dropping schema...")
     #we disable foreign key checks for SQLITE (as dropping tables with circular foreign keys won't work otherwise...)
     if str(engine.url).startswith('sqlite://'):
         engine.connect().execute('pragma foreign_keys=OFF')
     meta = MetaData(engine)
     meta.reflect()
     meta.drop_all()
     print("Done...")
コード例 #17
0
    def __thd_clean_database(self, conn):
        # drop the known tables, although sometimes this misses dependencies
        try:
            model.Model.metadata.drop_all(bind=conn, checkfirst=True)
        except sa.exc.ProgrammingError:
            pass

        # see if we can find any other tables to drop
        meta = MetaData(bind=conn)
        meta.reflect()
        meta.drop_all()
コード例 #18
0
ファイル: db.py プロジェクト: Acidburn0zzz/buildbot
    def __thd_clean_database(self, conn):
        # drop the known tables, although sometimes this misses dependencies
        try:
            model.Model.metadata.drop_all(bind=conn, checkfirst=True)
        except sa.exc.ProgrammingError:
            pass

        # see if we can find any other tables to drop
        meta = MetaData(bind=conn)
        meta.reflect()
        meta.drop_all()
コード例 #19
0
def initialize_db(config_uri, options={}):

    setup_logging(config_uri)
    settings = get_appsettings(config_uri, options=options)
    engine = engine_from_config(settings, 'sqlalchemy.')
    # delete all tables

    meta = MetaData()
    meta.reflect(bind=engine)
    meta.drop_all(engine)

    upgrade_db(config_uri)
コード例 #20
0
def setup_clean_db(app):
    # Clear out any existing tables
    from subscity.models.base import DB
    # db_name = os.environ.get('DN_NAME')
    engine = DB.get_engine(app)
    metadata = MetaData(engine)
    metadata.reflect()
    # for table in metadata.tables.values():
    #     for fk in table.foreign_keys:
    #         engine.execute(DropConstraint(fk.constraint))
    metadata.drop_all()
    apply_migrations()
コード例 #21
0
def db(app):  # pylint: disable=redefined-outer-name, invalid-name
    """Return a session-wide initialised database.

    Drops all existing tables - Meta follows Postgres FKs
    """
    with app.app_context():
        # Clear out any existing tables
        metadata = MetaData(_db.engine)
        metadata.reflect()
        for table in metadata.tables.values():
            for fk in table.foreign_keys:  # pylint: disable=invalid-name
                _db.engine.execute(DropConstraint(fk.constraint))
        metadata.drop_all()
        _db.drop_all()

        sequence_sql = """SELECT sequence_name FROM information_schema.sequences
                          WHERE sequence_schema='public'
                       """

        sess = _db.session()
        for seq in [name for (name, ) in sess.execute(text(sequence_sql))]:
            try:
                sess.execute(text('DROP SEQUENCE public.%s ;' % seq))
                print('DROP SEQUENCE public.%s ' % seq)
            except Exception as err:  # NOQA pylint: disable=broad-except
                print(f'Error: {err}')
        sess.commit()

        # ############################################
        # There are 2 approaches, an empty database, or the same one that the app will use
        #     create the tables
        #     _db.create_all()
        # or
        # Use Alembic to load all of the DB revisions including supporting lookup data
        # This is the path we'll use in legal_api!!

        # even though this isn't referenced directly, it sets up the internal configs that upgrade
        import sys
        migrations_path = [
            folder for folder in sys.path if 'pay-api/pay-api' in folder
        ]
        if len(migrations_path) > 0:
            migrations_path = migrations_path[0].replace(
                '/pay-api/src', '/pay-api/migrations')
        # Fix for windows.
        else:
            migrations_path = os.path.abspath('../../pay-api/migrations')

        Migrate(app, _db, directory=migrations_path)
        upgrade()

        return _db
コード例 #22
0
def initialize_db(config_uri, options={}):

    setup_logging(config_uri)
    settings = get_appsettings(config_uri, options=options)
    engine = engine_from_config(settings, 'sqlalchemy.')
    # delete all tables

    meta = MetaData()
    meta.reflect(bind=engine)
    meta.drop_all(engine)


    upgrade_db(config_uri)
コード例 #23
0
ファイル: test_dao.py プロジェクト: jaunis/xivo-dao
def _init_tables(engine):
    global _tables
    logger.debug("Cleaning tables")
    metadata = MetaData(bind=engine)
    metadata.reflect()
    logger.debug("drop all tables")
    metadata.drop_all()
    logger.debug("create all tables")
    Base.metadata.create_all(bind=engine)
    engine.dispose()
    logger.debug("Tables cleaned")
    metadata = MetaData(bind=engine)
    metadata.reflect()
    _tables = [table for table in metadata.tables.iterkeys()]
コード例 #24
0
def test_create_table(engine, inspector):
    meta = MetaData()
    Table('test_pybigquery.test_table_create',
          meta,
          Column('integer_c', sqlalchemy.Integer, doc="column description"),
          Column('float_c', sqlalchemy.Float),
          Column('decimal_c', sqlalchemy.DECIMAL),
          Column('string_c', sqlalchemy.String),
          Column('text_c', sqlalchemy.Text),
          Column('boolean_c', sqlalchemy.Boolean),
          Column('timestamp_c', sqlalchemy.TIMESTAMP),
          Column('datetime_c', sqlalchemy.DATETIME),
          Column('date_c', sqlalchemy.DATE),
          Column('time_c', sqlalchemy.TIME),
          Column('binary_c', sqlalchemy.BINARY),
          bigquery_description="test table description",
          bigquery_friendly_name="test table name",
          bigquery_cluster_by=["integer_c", "string_c"],
          bigquery_partition_by="DATE(timestamp_c)",
          bigquery_require_partition_filtering=True)
    meta.create_all(engine)

    # Validate index creation
    indexes = inspector.get_indexes('test_pybigquery.test_table_create')
    assert len(indexes) == 2
    assert indexes[0] == {
        'name': 'partition',
        'column_names': ['timestamp_c'],
        'unique': False
    }
    assert indexes[1] == {
        'name': 'clustering',
        'column_names': ['integer_c', 'string_c'],
        'unique': False
    }

    meta.drop_all(engine)

    # Test creating tables with declarative_base
    Base = declarative_base()

    class TableTest(Base):
        __tablename__ = 'test_pybigquery.test_table_create2'
        integer_c = Column(sqlalchemy.Integer, primary_key=True)
        float_c = Column(sqlalchemy.Float)

    Base.metadata.create_all(engine)
    Base.metadata.drop_all(engine)
コード例 #25
0
ファイル: sqlalchemy.py プロジェクト: spoqa/ormeasy
def test_connection(
    ctx: object,
    metadata: MetaData,
    engine: Engine,
    real_transaction: bool = False,
    ctx_connection_attribute_name: str = '_test_fx_connection',
) -> typing.Generator:
    """Joining a SQLAlchemy session into an external transaction for test suit.

    :param object ctx: Context object to inject test connection into attribute
    :param MetaData metadata: SQLAlchemy schema metadata
    :param bool real_transaction: (Optional) Whether to use engine as connection directly
                                  or make separate connection. Default: `False`
    :param str ctx_connection_attribute_name: (Optional) Attribute name for injecting
                                              test connection to the context object
                                              Default: `'_test_fx_connection'`

    .. seealso::

       Documentation of the SQLAlchemy session used in test suites.
          <http://docs.sqlalchemy.org/en/latest/orm/session_transaction.html#joining-a-session-into-an-external-transaction-such-as-for-test-suites>

    """  # noqa
    if real_transaction:
        metadata.create_all(engine)
        try:
            yield engine
        finally:
            metadata.drop_all(engine, checkfirst=True)
        return
    connection = engine.connect()
    try:
        metadata.drop_all(connection, checkfirst=True)
        transaction = connection.begin()
        try:
            metadata.create_all(bind=connection)
            setattr(ctx, ctx_connection_attribute_name, connection)
            try:
                yield connection
            finally:
                delattr(ctx, ctx_connection_attribute_name)
        finally:
            transaction.rollback()
    finally:
        connection.close()
    engine.dispose()
コード例 #26
0
ファイル: ivc_deploy.py プロジェクト: dulton/IVR
def initialize_db(config_uri):

    # setup_logging(config_uri)
    # settings = get_appsettings(config_uri, options=options)

    config = configparser.ConfigParser()
    config.read(config_uri)
    settings = dict(config.items("alembic"))

    engine = engine_from_config(settings, 'sqlalchemy.')
    # delete all tables

    meta = MetaData()
    meta.reflect(bind=engine)
    meta.drop_all(engine)

    upgrade_db(config_uri)
コード例 #27
0
ファイル: ivc_deploy.py プロジェクト: OpenSight/IVR
def initialize_db(config_uri):

    # setup_logging(config_uri)
    # settings = get_appsettings(config_uri, options=options)

    config = configparser.ConfigParser()
    config.read(config_uri)
    settings = dict(config.items("alembic"))

    engine = engine_from_config(settings, 'sqlalchemy.')
    # delete all tables

    meta = MetaData()
    meta.reflect(bind=engine)
    meta.drop_all(engine)


    upgrade_db(config_uri)
コード例 #28
0
ファイル: conftest.py プロジェクト: vysakh-menon-aot/lear
def db(app):  # pylint: disable=redefined-outer-name, invalid-name
    """Return a session-wide initialised database.

    Drops all existing tables - Meta follows Postgres FKs
    """
    with app.app_context():
        # Clear out any existing tables
        metadata = MetaData(_db.engine)
        metadata.reflect()
        for table in metadata.tables.values():
            for fk in table.foreign_keys:  # pylint: disable=invalid-name
                with suppress(Exception):
                    _db.engine.execute(DropConstraint(fk.constraint))
        with suppress(Exception):
            metadata.drop_all()
        with suppress(Exception):
            _db.drop_all()

        sequence_sql = """SELECT sequence_name FROM information_schema.sequences
                          WHERE sequence_schema='public'
                       """

        sess = _db.session()
        for seq in [name for (name, ) in sess.execute(text(sequence_sql))]:
            with suppress(Exception):
                sess.execute(text('DROP SEQUENCE public.%s ;' % seq))
                print('DROP SEQUENCE public.%s ' % seq)
        sess.commit()

        # ############################################
        # There are 2 approaches, an empty database, or the same one that the app will use
        #     create the tables
        #     _db.create_all()
        # or
        # Use Alembic to load all of the DB revisions including supporting lookup data
        # This is the path we'll use in legal_api!!

        # even though this isn't referenced directly, it sets up the internal configs that upgrade needs
        Migrate(app, _db)
        upgrade()

        return _db
コード例 #29
0
ファイル: fixtures.py プロジェクト: paulorobertocruz/blitzdb
    def _sql_backend(request,engine):

        meta = MetaData(engine)
        meta.reflect()
        meta.drop_all()

        backend = SqlBackend(engine = engine)
        backend.init_schema()
        backend.create_schema()

        def finalizer():
            del backend.connection
            print("Dropping schema...")
            meta = MetaData(engine)
            meta.reflect()
            meta.drop_all()
            print("Done...")

        request.addfinalizer(finalizer)

        return backend
コード例 #30
0
    def __thd_clean_database(self, conn):
        # drop the known tables, although sometimes this misses dependencies
        try:
            model.Model.metadata.drop_all(bind=conn, checkfirst=True)
        except sa.exc.ProgrammingError:
            pass

        # see if we can find any other tables to drop
        try:
            meta = MetaData(bind=conn)
            meta.reflect()
            meta.drop_all()
        except Exception:
            # sometimes this goes badly wrong; being able to see the schema
            # can be a big help
            if conn.engine.dialect.name == 'sqlite':
                r = conn.execute("select sql from sqlite_master "
                                 "where type='table'")
                log.msg("Current schema:")
                for row in r.fetchall():
                    log.msg(row.sql)
            raise
コード例 #31
0
ファイル: db.py プロジェクト: AnyBucket/buildbot
    def __thd_clean_database(self, conn):
        # drop the known tables, although sometimes this misses dependencies
        try:
            model.Model.metadata.drop_all(bind=conn, checkfirst=True)
        except sa.exc.ProgrammingError:
            pass

        # see if we can find any other tables to drop
        try:
            meta = MetaData(bind=conn)
            meta.reflect()
            meta.drop_all()
        except Exception, e:
            # sometimes this goes badly wrong; being able to see the schema
            # can be a big help
            if conn.engine.dialect.name == 'sqlite':
                r = conn.execute("select sql from sqlite_master "
                                 "where type='table'")
                log.msg("Current schema:")
                for row in r.fetchall():
                    log.msg(row.sql)
            raise e
コード例 #32
0
    def do_cleardb(self, s):
        '''
        cleardb

        Clears out the main database (not the settings db).  This should only
        be done while dofler is not running!
        '''
        from sqlalchemy.engine import reflection
        from sqlalchemy.schema import (MetaData, Table, DropTable,
                                       ForeignKeyConstraint, DropConstraint)
        conn = db.engine.connect()
        trans = conn.begin()
        inspector = reflection.Inspector.from_engine(db.engine)
        metadata = MetaData(db.engine)
        metadata.reflect()
        metadata.drop_all()
        trans.commit()
        conn.close()
        vuln_db = '/opt/pvs/var/pvs/db/reports.db'
        if os.path.exists(vuln_db):
            os.system('service pvs stop')
            os.remove(vuln_db)
            os.system('service pvs start')
コード例 #33
0
ファイル: cli.py プロジェクト: blha303/DoFler
    def do_cleardb(self, s):
        '''
        cleardb

        Clears out the main database (not the settings db).  This should only
        be done while dofler is not running!
        '''
        from sqlalchemy.engine import reflection
        from sqlalchemy.schema import (MetaData, Table, DropTable, 
                                       ForeignKeyConstraint, DropConstraint)
        conn = db.engine.connect()
        trans = conn.begin()
        inspector = reflection.Inspector.from_engine(db.engine)
        metadata = MetaData(db.engine)
        metadata.reflect()
        metadata.drop_all()
        trans.commit()
        conn.close()
        vuln_db = '/opt/pvs/var/pvs/db/reports.db'
        if os.path.exists(vuln_db):
            os.system('service pvs stop')
            os.remove(vuln_db)
            os.system('service pvs start')
コード例 #34
0
ファイル: db.py プロジェクト: LeadsPlus/OpenClimateGIS
    
class IndexTime(NcBase,Base):
#    __tablename__ = 'nc_index_time'
    dataset_id = Column(ForeignKey(Dataset.id))
    index = Column(Integer)
    lower = Column(DateTime)
    value = Column(DateTime)
    upper = Column(DateTime)
    
    @declared_attr
    def dataset(cls):
        return(relationship(Dataset,backref=cls.__tablename__))
    
    
class IndexSpatial(IndexBase,Base):
#    __tablename__ = 'nc_index_spatial'
#    id = Column(Integer,primary_key=True)
#    dataset_id = Column(ForeignKey(Dataset.id))
    row = Column(Integer)
    col = Column(Integer)
    geom = GeometryColumn(Polygon)
    centroid = GeometryColumn(Point)
    
#    dataset = relationship(Dataset,backref=__tablename__)
GeometryDDL(IndexSpatial.__table__)
    

try:
    metadata.drop_all()
finally:
    metadata.create_all()
コード例 #35
0
def analyze(data,drop=False,load=False):
    
    engine = create_engine('sqlite:////home/bkoziol/tmp/profiling.sqlite')
    metadata = MetaData(bind=engine)
    Base = declarative_base(metadata=metadata)
    Session = sessionmaker(bind=engine)
    
    class SqlBase(object):
    
        @classmethod
        def get_or_create(cls,s,kwds,commit=False):
            qq = s.query(cls).filter_by(**kwds)
            try:
                obj = qq.one()
            except NoResultFound:
                obj = cls(**kwds)
                s.add(obj)
            if commit: s.commit()
            return(obj)
    
    class Scenario(SqlBase,Base):
        __tablename__ = 'scenario'
        sid = Column(Integer,primary_key=True)
        name = Column(String,nullable=False)
       
    class Function(SqlBase,Base):
        __tablename__ = 'function'
        fid = Column(Integer,primary_key=True)
        name = Column(String,nullable=False)
        
    class FileName(SqlBase,Base):
        __tablename__ = 'filename'
        fnid = Column(Integer,primary_key=True)
        name = Column(String,nullable=False)
        
        
    class Profile(SqlBase,Base):
        __tablename__ = 'profile'
        id = Column(Integer,primary_key=True)
        sid = Column(Integer,ForeignKey(Scenario.sid))
        fid = Column(Integer,ForeignKey(Function.fid))
        fnid = Column(Integer,ForeignKey(FileName.fnid),nullable=True)
        ncalls = Column(Integer,nullable=False)
        tottime = Column(Float,nullable=False)
        percall = Column(Float,nullable=False)
        cumtime = Column(Float,nullable=False)

        filename = relationship(FileName)
        scenario = relationship(Scenario)
        function = relationship(Function)
        
        def report(self,total):
            msg = [self.scenario.name]
            try:
                msg.append(self.filename.name)
            except AttributeError:
                msg.append('')
            msg.append(self.function.name)
            msg.append(str(self.tottime))
            msg.append(str(self.tottime/float(total)))
            msg.append(str(total))
            return(','.join(msg))
        
        @staticmethod
        def report_headers():
            return('scenario,filename,function,tottime,perctime,exetime')
    
    if load:
        if drop: metadata.drop_all(checkfirst=True)
        metadata.create_all(checkfirst=True)
        
        s = Session()
        for kwds in data:
            with open(os.path.join('/home/bkoziol/tmp',kwds['name']+'.txt'),'r') as out:
                txt = out.read()
            profiles = re.split('finished ::.*',txt)
            profiles = profiles[0:-1]
            for profile in profiles:
                profile = profile.strip()
                scenario_name = re.match('starting :: (.*)',profile).group(1)
                scenario = Scenario.get_or_create(s,dict(name=scenario_name))
                table = re.match('.*lineno\(function\)(.*)',profile,flags=re.DOTALL).group(1).strip()
                lines = re.split('\n',table)
                for line in lines:
                    line = line.strip()
        #            print line
                    elements = re.split(' {2,}',line)
                    if '{' in line and '}' in line:
                        filename = None
                    else:
                        try:
                            filename_name = re.match('.* (.*):.*',elements[4]).group(1)
                        except:
                            import ipdb;ipdb.set_trace()
                        filename = FileName.get_or_create(s,dict(name=filename_name))
                    rm = re.match('.*\((.*)\)|.*{(.*)}',elements[4])
                    if rm.group(1) is None:
                        function_name = rm.group(2)
                    else:
                        function_name = rm.group(1)
                    function = Function.get_or_create(s,dict(name=function_name))
                    obj = Profile()
                    obj.ncalls = elements[0]
                    obj.tottime = elements[1]
                    obj.percall = elements[2]
                    obj.cumtime = elements[3]
                    obj.filename = filename
                    obj.scenario = scenario
                    obj.function = function
                    s.add(obj)
        s.commit()
    else:
        s = Session()
        print(Profile.report_headers())
        for scenario in s.query(Scenario):
            ## get the total time
            total = s.query(func.sum(Profile.tottime)).filter_by(scenario=scenario)
            total = total.one()[0]
            ## get the top ten time things
            top = s.query(Profile).filter_by(scenario=scenario)
            top = top.order_by(Profile.tottime.desc())
            top = top.limit(10)
            for obj in top:
                print obj.report(total)
        import ipdb;ipdb.set_trace()
コード例 #36
0
ファイル: db.py プロジェクト: wlmgithub/buildbot
    def __thd_clean_database(self, conn):
        # In general it's nearly impossible to do "bullet proof" database
        # cleanup with SQLAlchemy that will work on a range of databases
        # and they configurations.
        #
        # Following approaches were considered.
        #
        # 1. Drop Buildbot Model schema:
        #
        #     model.Model.metadata.drop_all(bind=conn, checkfirst=True)
        #
        # Dropping schema from model is correct and working operation only
        # if database schema is exactly corresponds to the model schema.
        #
        # If it is not (e.g. migration script failed or migration results in
        # old version of model), then some tables outside model schema may be
        # present, which may reference tables in the model schema.
        # In this case either dropping model schema will fail (if database
        # enforces referential integrity, e.g. PostgreSQL), or
        # dropping left tables in the code below will fail (if database allows
        # removing of tables on which other tables have references,
        # e.g. SQLite).
        #
        # 2. Introspect database contents and drop found tables.
        #
        #     meta = MetaData(bind=conn)
        #     meta.reflect()
        #     meta.drop_all()
        #
        # May fail if schema contains reference cycles (and Buildbot schema
        # has them). Reflection looses metadata about how reference cycles
        # can be teared up (e.g. use_alter=True).
        # Introspection may fail if schema has invalid references
        # (e.g. possible in SQLite).
        #
        # 3. What is actually needed here is accurate code for each engine
        # and each engine configuration that will drop all tables,
        # indexes, constraints, etc in proper order or in a proper way
        # (using tables alternation, or DROP TABLE ... CASCADE, etc).
        #
        # Conclusion: use approach 2 with manually teared apart known
        # reference cycles.

        # pylint: disable=too-many-nested-blocks

        try:
            meta = MetaData(bind=conn)

            # Reflect database contents. May fail, e.g. if table references
            # non-existent table in SQLite.
            meta.reflect()

            # Table.foreign_key_constraints introduced in SQLAlchemy 1.0.
            if sa_version()[:2] >= (1, 0):
                # Restore `use_alter` settings to break known reference cycles.
                # Main goal of this part is to remove SQLAlchemy warning
                # about reference cycle.
                # Looks like it's OK to do it only with SQLAlchemy >= 1.0.0,
                # since it's not issued in SQLAlchemy == 0.8.0

                # List of reference links (table_name, ref_table_name) that
                # should be broken by adding use_alter=True.
                table_referenced_table_links = [('buildsets', 'builds'),
                                                ('builds', 'buildrequests')]
                for table_name, ref_table_name in table_referenced_table_links:
                    if table_name in meta.tables:
                        table = meta.tables[table_name]
                        for fkc in table.foreign_key_constraints:
                            if fkc.referred_table.name == ref_table_name:
                                fkc.use_alter = True

            # Drop all reflected tables and indices. May fail, e.g. if
            # SQLAlchemy wouldn't be able to break circular references.
            # Sqlalchemy fk support with sqlite is not yet perfect, so we must deactivate fk during that
            # operation, even though we made our possible to use use_alter
            with withoutSqliteForeignKeys(conn.engine, conn):
                meta.drop_all()

        except Exception:
            # sometimes this goes badly wrong; being able to see the schema
            # can be a big help
            if conn.engine.dialect.name == 'sqlite':
                r = conn.execute("select sql from sqlite_master "
                                 "where type='table'")
                log.msg("Current schema:")
                for row in r.fetchall():
                    log.msg(row.sql)
            raise
コード例 #37
0
ファイル: __init__.py プロジェクト: Kozea/pypet
class BaseTestCase(unittest.TestCase):

    def setUp(self):
        engine = create_engine('postgresql://postgres@localhost/pypet')
        self.metadata = MetaData(bind=engine)

        self.store_table = Table('store', self.metadata,
                Column('store_id', types.Integer, primary_key=True),
                Column('store_name', types.String),
                Column('country_id', types.Integer,
                    ForeignKey('country.country_id')))

        self.country_table = Table('country', self.metadata,
                Column('country_id', types.Integer, primary_key=True),
                Column('country_name', types.String),
                Column('region_id', types.Integer,
                    ForeignKey('region.region_id')))

        self.region_table = Table('region', self.metadata,
                Column('region_id', types.Integer, primary_key=True),
                Column('region_name', types.String))

        self.product_table = Table('product', self.metadata,
                Column('product_id', types.Integer, primary_key=True),
                Column('product_name', types.String),
                Column('product_category_id', types.Integer,
                   ForeignKey('product_category.product_category_id')))

        self.product_category_table = Table('product_category', self.metadata,
                Column('product_category_id', types.Integer, primary_key=True),
                Column('product_category_name', types.String))

        self.facts_table = Table('facts_table', self.metadata,
                Column('store_id', types.Integer,
                    ForeignKey('store.store_id')),
                Column('date', types.Date),
                Column('product_id', types.Integer,
                    ForeignKey('product.product_id')),
                Column('price', types.Float),
                Column('qty', types.Integer))

        agg_name = ('agg_time_month_product_product_store_store'
                    '_Unit Price_Quantity')
        self.agg_by_month_table = Table(agg_name,
                self.metadata,
                Column('store_store', types.Integer,
                    ForeignKey('store.store_id')),
                Column('time_month', types.Date),
                Column('product_product', types.Integer,
                    ForeignKey('product.product_id')),
                Column('Unit Price', types.Float),
                Column('Quantity', types.Integer),
                Column('fact_count', types.Integer))
        agg_name = ('agg_time_year_store_country_product_product'
                    '_Unit Price_Quantity')

        self.agg_by_year_country_table = Table(agg_name,
                self.metadata,
                Column('store_country', types.Integer,
                    ForeignKey('country.country_id')),
                Column('time_year', types.Date),
                Column('product_product', types.Integer,
                    ForeignKey('product.product_id')),
                Column('Unit Price', types.Float),
                Column('Quantity', types.Integer),
                Column('fact_count', types.Integer))

        self.metadata.create_all()

        self.store_dim = Dimension('store', [
            Hierarchy('default', [
                Level('region', self.region_table.c.region_id,
                    self.region_table.c.region_name),
                Level('country', self.country_table.c.country_id,
                    self.country_table.c.country_name),
                Level('store', self.store_table.c.store_id,
                    self.store_table.c.store_name)])])

        self.product_dim = Dimension('product', [
            Hierarchy('default', [
                Level('category',
                    self.product_category_table.c.product_category_id,
                    self.product_category_table.c
                    .product_category_name),
                Level('product', self.product_table.c.product_id,
                    self.product_table.c.product_name)])])

        self.time_dim = TimeDimension('time', self.facts_table.c.date,
                ['year', 'month', 'day'])

        unit_price = Measure('Unit Price', self.facts_table.c.price,
                aggregates.avg)
        quantity = Measure('Quantity', self.facts_table.c.qty, aggregates.sum)
        price = ((unit_price.aggregate_with(None) *
                quantity.aggregate_with(None))
                .aggregate_with(aggregates.sum).label('Price'))

        self.cube = Cube(self.metadata, self.facts_table, [self.store_dim,
            self.product_dim, self.time_dim], [unit_price, quantity, price],
            fact_count_column=self.facts_table.c.qty)

        self.region_table.insert({'region_id': 1, 'region_name':
            'Europe'}).execute()

        self.country_table.insert({'region_id': 1, 'country_name':
            'France', 'country_id': 1}).execute()

        self.country_table.insert({'region_id': 1, 'country_name':
            'Germany', 'country_id': 2}).execute()

        self.region_table.insert({'region_id': 2, 'region_name':
            'America'}).execute()

        self.country_table.insert({'region_id': 2, 'country_name':
            'USA', 'country_id': 3}).execute()

        self.country_table.insert({'region_id': 2, 'country_name':
            'Canada', 'country_id': 4}).execute()

        self.store_table.insert({
            'store_id': 1,
            'store_name': 'ACME.fr',
            'country_id': 1}).execute()

        self.store_table.insert({
            'store_id': 2,
            'store_name': 'ACME.de',
            'country_id': 2}).execute()

        self.store_table.insert({
            'store_id': 3,
            'store_name': 'Food Mart.fr',
            'country_id': 1}).execute()

        self.store_table.insert({
            'store_id': 4,
            'store_name': 'Food Mart.de',
            'country_id': 2}).execute()

        self.store_table.insert({
            'store_id': 5,
            'store_name': 'ACME.us',
            'country_id': 3}).execute()

        self.store_table.insert({
            'store_id': 6,
            'store_name': 'Food Mart.us',
            'country_id': 3}).execute()

        self.store_table.insert({
            'store_id': 7,
            'store_name': 'ACME.ca',
            'country_id': 4}).execute()

        self.store_table.insert({
            'store_id': 8,
            'store_name': 'Food Mart.ca',
            'country_id': 4}).execute()

        self.product_category_table.insert({
            'product_category_id': 1,
            'product_category_name': 'Vegetables'}).execute()

        self.product_category_table.insert({
            'product_category_id': 2,
            'product_category_name': 'Shoes'}).execute()

        self.product_table.insert({
            'product_id': 1,
            'product_category_id': 1,
            'product_name': 'Carrots'}).execute()
        self.product_table.insert({
            'product_id': 2,
            'product_category_id': 1,
            'product_name': 'Bananas'}).execute()
        self.product_table.insert({
            'product_id': 3,
            'product_category_id': 2,
            'product_name': 'Red shoes'}).execute()
        self.product_table.insert({
            'product_id': 4,
            'product_category_id': 2,
            'product_name': 'Green shoes'}).execute()
        self.product_table.insert({
            'product_id': 5,
            'product_category_id': 2,
            'product_name': 'Blue shoes'}).execute()

        years = cycle([2009, 2010, 2011])
        months = cycle([1, 5, 8, 9, 11])
        days = cycle([3, 12, 21, 29])
        prices = iter(cycle([100, 500, 1000]))
        quantities = iter(cycle([1, 5, 1, 2, 3, 20, 8]))
        values = iter((date(*value) for value in izip(years, months, days)))
        for value in self.product_table.select().with_only_columns([
            self.product_table.c.product_id,
            self.store_table.c.store_id]).execute():
            self.facts_table.insert({
                'product_id': value.product_id,
                'store_id': value.store_id,
                'date': next(values),
                'qty': next(quantities),
                'price': next(prices)}).execute()
        results = (self.facts_table.select().with_only_columns([
                (func.sum(self.facts_table.c.price *
                    self.facts_table.c.qty) /
                    func.sum(self.facts_table.c.qty))
                    .label('Unit Price'),
                func.sum(self.facts_table.c.qty).label('Quantity'),
                func.sum(self.facts_table.c.qty).label('fact_count'),
                self.facts_table.c.product_id.label('product_product'),
                self.facts_table.c.store_id.label('store_store'),
                func.date_trunc('month',
                    self.facts_table.c.date).label('time_month')])
            .group_by(func.date_trunc('month', self.facts_table.c.date),
                self.facts_table.c.product_id,
                self.facts_table.c.store_id)
            .execute())
        for res in results:
            self.agg_by_month_table.insert().execute(dict(res))
        second_agg = (self.facts_table.select().with_only_columns([
            (func.sum(self.facts_table.c.price *
                    self.facts_table.c.qty) /
                    func.sum(self.facts_table.c.qty))
                    .label('Unit Price'),
            func.sum(self.facts_table.c.qty).label('Quantity'),
            func.sum(self.facts_table.c.qty).label('fact_count'),
            self.facts_table.c.product_id.label('product_product'),
            self.store_table.c.country_id.label('store_country'),
            func.date_trunc('year',
                self.facts_table.c.date).label('time_year')])
            .where(self.facts_table.c.store_id == self.store_table.c.store_id)
            .group_by(self.facts_table.c.product_id.label('product_product'),
            self.store_table.c.country_id.label('store_country'),
            func.date_trunc('year',
                self.facts_table.c.date).label('time_year'))
            .execute())
        for res in second_agg:
            self.agg_by_year_country_table.insert().execute(dict(res))

    def tearDown(self):
        self.metadata.drop_all()
コード例 #38
0
ファイル: db.py プロジェクト: Apogeya/buildbot
    def __thd_clean_database(self, conn):
        # In general it's nearly impossible to do "bullet proof" database
        # cleanup with SQLAlchemy that will work on a range of databases
        # and they configurations.
        #
        # Following approaches were considered.
        #
        # 1. Drop Buildbot Model schema:
        #
        #     model.Model.metadata.drop_all(bind=conn, checkfirst=True)
        #
        # Dropping schema from model is correct and working operation only
        # if database schema is exactly corresponds to the model schema.
        #
        # If it is not (e.g. migration script failed or migration results in
        # old version of model), then some tables outside model schema may be
        # present, which may reference tables in the model schema.
        # In this case either dropping model schema will fail (if database
        # enforces referential integrity, e.g. PostgreSQL), or
        # dropping left tables in the code below will fail (if database allows
        # removing of tables on which other tables have references,
        # e.g. SQLite).
        #
        # 2. Introspect database contents and drop found tables.
        #
        #     meta = MetaData(bind=conn)
        #     meta.reflect()
        #     meta.drop_all()
        #
        # May fail if schema contains reference cycles (and Buildbot schema
        # has them). Reflection looses metadata about how reference cycles
        # can be teared up (e.g. use_alter=True).
        # Introspection may fail if schema has invalid references
        # (e.g. possible in SQLite).
        #
        # 3. What is actually needed here is accurate code for each engine
        # and each engine configuration that will drop all tables,
        # indexes, constraints, etc in proper order or in a proper way
        # (using tables alternation, or DROP TABLE ... CASCADE, etc).
        #
        # Conclusion: use approach 2 with manually teared apart known
        # reference cycles.

        try:
            meta = MetaData(bind=conn)

            # Reflect database contents. May fail, e.g. if table references
            # non-existent table in SQLite.
            meta.reflect()

            # Table.foreign_key_constraints introduced in SQLAlchemy 1.0.
            if sa_version()[:2] >= (1, 0):
                # Restore `use_alter` settings to break known reference cycles.
                # Main goal of this part is to remove SQLAlchemy warning
                # about reference cycle.
                # Looks like it's OK to do it only with SQLAlchemy >= 1.0.0,
                # since it's not issued in SQLAlchemy == 0.8.0

                # List of reference links (table_name, ref_table_name) that
                # should be broken by adding use_alter=True.
                table_referenced_table_links = [
                    ('buildsets', 'builds'), ('builds', 'buildrequests')]
                for table_name, ref_table_name in table_referenced_table_links:
                    if table_name in meta.tables:
                        table = meta.tables[table_name]
                        for fkc in table.foreign_key_constraints:
                            if fkc.referred_table.name == ref_table_name:
                                fkc.use_alter = True

            # Drop all reflected tables and indices. May fail, e.g. if
            # SQLAlchemy wouldn't be able to break circular references.
            # Sqlalchemy fk support with sqlite is not yet perfect, so we must deactivate fk during that
            # operation, even though we made our possible to use use_alter
            with withoutSqliteForeignKeys(conn.engine, conn):
                meta.drop_all()

        except Exception:
            # sometimes this goes badly wrong; being able to see the schema
            # can be a big help
            if conn.engine.dialect.name == 'sqlite':
                r = conn.execute("select sql from sqlite_master "
                                 "where type='table'")
                log.msg("Current schema:")
                for row in r.fetchall():
                    log.msg(row.sql)
            raise
コード例 #39
0
class DbClient:
    def __init__(self, db_url=environ.get("DB_URL"), new=False, echo=False):

        self.engine = create_engine(db_url, echo=echo)
        self.session_maker = sessionmaker(bind=self.engine)
        self.metadata = MetaData(bind=self.engine)

        spider_loader = spiderloader.SpiderLoader.from_settings(settings())
        s_names = spider_loader.list()
        self.spiders = tuple(spider_loader.load(name) for name in s_names)

        # todo consider wrapping sqlalchemy.exc.OperationalError instead of using new parameter
        if new:
            create_database(self.engine.url)
            self.create_tables(Base)
        else:
            self.metadata.reflect()

    @staticmethod
    def current_date():
        # finds the latest day based on the mastercard definition
        now = datetime.datetime.now(timezone('US/Eastern'))

        today = now.date()

        if now.hour < 14:
            today -= datetime.timedelta(days=1)

        return today

    @contextmanager
    def session_scope(self, commit=True):
        """Provide a transactional scope around a series of operations."""

        session = self.session_maker()
        try:
            yield session
            if commit:
                session.commit()
        except Exception:
            session.rollback()
            raise
        finally:
            session.close()

    def create_tables(self, base):
        base.metadata.create_all(self.engine)

        with self.session_scope() as s:
            providers = [s.provider for s in self.spiders]
            for pid, p_name in enumerate(providers):
                s.add(Provider(id=pid + 1, name=p_name))
                self.update_currencies(p_name)

    # todo differentiate between card currencies and transaction currencies
    def missing(self, provider, end=None, num_days=363, currs=None):
        with self.session_scope(commit=False) as s:

            if not end:
                end = self.current_date()

            start = end - datetime.timedelta(days=num_days - 1)

            spider = next(spider for spider in self.spiders
                          if spider.provider == provider)

            if not currs:
                currs = set(spider.fetch_avail_currs().keys())

            avail_dates = (end - datetime.timedelta(days=x)
                           for x in range(num_days))

            all_combos = ((x, y, z)
                          for x, y, z in product(currs, currs, avail_dates)
                          if x != y)

            # noinspection PyUnresolvedReferences
            not_missing = set(
                s.query(Rate.card_code, Rate.trans_code, Rate.date).filter(
                    Rate.provider.has(name=provider)).filter(
                        Rate.date <= end).filter(Rate.date >= start).filter(
                            Rate.card_code.in_(currs)).filter(
                                Rate.trans_code.in_(currs)))

        return (x for x in all_combos if x not in not_missing)

    # todo multiprocessing to be implemented
    @staticmethod
    def combos_to_csv(file_count, results, out_path):

        out_path = Path(out_path)

        try:
            out_path.mkdir()
        except FileExistsError:
            pass

        paths = tuple(out_path / f'{i}.csv' for i in range(file_count))

        for p in paths:
            p.touch()

        fs = []
        try:
            fs = tuple(p.open(mode='w') for p in paths)
            for i, (card_c, trans_c, date) in enumerate(results):
                std_date = date.strftime(std_date_fmt)
                fs[i % file_count].write(f'{card_c},{trans_c},{std_date}\n')

        finally:
            for f in fs:
                f.close()

    def rates_from_csv(self, provider, in_path):

        with self.session_scope() as s:

            provider_id = (s.query(
                Provider.id).filter_by(name=provider).first()[0])

            for file in Path(in_path).glob('*.csv'):
                print(file)
                with file.open() as f:
                    data = csv.reader(f)
                    next(data)  # skip header row #
                    rates = [
                        Rate(card_code=card_code,
                             trans_code=trans_code,
                             date=strpdate(date, fmt='%m/%d/%Y'),
                             provider_id=provider_id,
                             rate=rate)
                        for card_code, trans_code, date, rate in data
                    ]
                    s.bulk_save_objects(rates)
                    s.commit()

    def update_currencies(self, provider):
        spider = next(s for s in self.spiders if s.provider == provider)
        with self.session_scope() as s:
            for alpha_code, name in spider.fetch_avail_currs().items():
                try:
                    s.add(CurrencyCode(alpha_code=alpha_code, name=name))
                    s.commit()
                except IntegrityError:
                    s.rollback()

    def drop_all_tables(self):
        self.metadata.drop_all()

    def drop_database(self):
        drop_database(self.engine.url)
コード例 #40
0
def db_session():
    metadata = MetaData(ssms.app.engine)
    metadata.reflect()
    session = ssms.app.Session()
    yield session
    metadata.drop_all()
コード例 #41
0
ファイル: db.py プロジェクト: mapping/OpenClimateGIS
class IndexTime(NcBase, Base):
    #    __tablename__ = 'nc_index_time'
    dataset_id = Column(ForeignKey(Dataset.id))
    index = Column(Integer)
    lower = Column(DateTime)
    value = Column(DateTime)
    upper = Column(DateTime)

    @declared_attr
    def dataset(cls):
        return (relationship(Dataset, backref=cls.__tablename__))


class IndexSpatial(IndexBase, Base):
    #    __tablename__ = 'nc_index_spatial'
    #    id = Column(Integer,primary_key=True)
    #    dataset_id = Column(ForeignKey(Dataset.id))
    row = Column(Integer)
    col = Column(Integer)
    geom = GeometryColumn(Polygon)
    centroid = GeometryColumn(Point)


#    dataset = relationship(Dataset,backref=__tablename__)
GeometryDDL(IndexSpatial.__table__)

try:
    metadata.drop_all()
finally:
    metadata.create_all()