コード例 #1
0
ファイル: sqlalchemy_utils.py プロジェクト: AlinaKay/Flexget
def drop_tables(names, session):
    """Takes a list of table names and drops them from the database if they exist."""
    metadata = MetaData()
    metadata.reflect(bind=session.bind)
    for table in metadata.sorted_tables:
        if table.name in names:
            table.drop()
コード例 #2
0
ファイル: __init__.py プロジェクト: petrushev/ideasphere
def reflect(engine, models, schema = None):
    metadata = MetaData()
    metadata.bind = engine

    with warnings.catch_warnings():
        warnings.simplefilter("ignore", category = SAWarning)
        metadata.reflect(schema = schema, views = False)

    if schema is not None:
        tables = dict((table_name.replace(str(schema) + ".", ""), table)
                      for table_name, table in metadata.tables.iteritems())
    else:
        tables = metadata.tables

    clear_mappers()

    mappers = {}
    for table_name, table in tables.iteritems():
        modelname = "".join([word.capitalize() for word in table_name.split("_")])

        try:
            model = getattr(models, modelname)
        except AttributeError:
            stderr.write("Missing model for table %s\n" % table_name)
        else:
            mappers[modelname] = mapper(model, table)

    Session = sessionmaker(bind = engine, autocommit = False, autoflush = True)

    return mappers, tables, Session
コード例 #3
0
ファイル: fixtures.py プロジェクト: cwoebker/blitzdb
    def _sql_backend(request,engine,**kwargs):

        meta = MetaData(engine)
        meta.reflect()
        meta.drop_all()
        #we enable foreign key checks for SQLITE
        if str(engine.url).startswith('sqlite://'):
            engine.connect().execute('pragma foreign_keys=ON')

        if not 'ondelete' in kwargs:
            kwargs['ondelete'] = 'CASCADE'
        backend = SqlBackend(engine = engine,**kwargs)
        backend.init_schema()
        backend.create_schema()

        def finalizer():
            backend.rollback()
            del backend.connection
            print("Dropping schema...")
            #we disable foreign key checks for SQLITE (as dropping tables with circular foreign keys won't work otherwise...)
            if str(engine.url).startswith('sqlite://'):
                engine.connect().execute('pragma foreign_keys=OFF')
            meta = MetaData(engine)
            meta.reflect()
            meta.drop_all()
            print("Done...")

        request.addfinalizer(finalizer)

        return backend
コード例 #4
0
ファイル: fixtures.py プロジェクト: paulorobertocruz/blitzdb
 def finalizer():
     del backend.connection
     print("Dropping schema...")
     meta = MetaData(engine)
     meta.reflect()
     meta.drop_all()
     print("Done...")
コード例 #5
0
ファイル: __init__.py プロジェクト: hadrien/pyramid_royal
def setupPackage():
    os.environ['MONGO_URI'] = 'mongodb://localhost'
    os.environ['MONGO_DB_NAME'] = 'royal_example'
    os.environ['MONGO_DB_PREFIX'] = ''

    # sqla extentsion setup.
    global engine

    alembic_config = Config()
    alembic_config.set_main_option('script_location',
                                   'example/ext/sqla/db')
    alembic_config.set_main_option('sqlalchemy.url', mysql_uri)

    engine = create_engine(mysql_uri)

    try:
        command.downgrade(alembic_config, 'base')
    except:
        log.exception("Migration downgrade failed, clearing all tables")
        metadata = MetaData(engine)
        metadata.reflect()
        for table in metadata.tables.values():
            for fk in table.foreign_keys:
                engine.execute(DropConstraint(fk.constraint))
        metadata.drop_all()

    command.upgrade(alembic_config, 'head')
コード例 #6
0
ファイル: database.py プロジェクト: geekygirldawn/sortinghat
def reflect_table(engine, klass):
    """Inspect and reflect objects"""

    try:
        meta = MetaData()
        meta.reflect(bind=engine)
    except OperationalError as e:
        raise DatabaseError(error=e.orig.args[1], code=e.orig.args[0])

    # Try to reflect from any of the supported tables
    table = None

    for tb in klass.tables():
        if tb in meta.tables:
            table = meta.tables[tb]
            break

    if table is None:
        raise DatabaseError(error="Invalid schema. Table not found",
                            code="-1")

    # Map table schema into klass
    mapper(klass, table,
           column_prefix=klass.column_prefix())

    return table
コード例 #7
0
ファイル: util.py プロジェクト: pacificclimate/pycds
def create_reflected_test_database(read_engine, write_engine):
    meta = MetaData(bind=write_engine)
    meta.reflect(bind=read_engine)

    for tablename in ('matviews', 'stats_station_var'):
        meta.remove(meta.tables[tablename])

    logger.info("Overriding PG types that are unknown to sqlite")
    meta.tables['meta_history'].columns['tz_offset'].type = Integer()
    meta.tables['obs_raw'].columns['mod_time'].server_default = None
    meta.tables['meta_history'].columns['the_geom'].type = Integer()
    # These are all BIGINT in postgres
    meta.tables['obs_raw'].columns['obs_raw_id'].type = Integer()
    meta.tables['obs_raw_native_flags'].columns['obs_raw_id'].type = Integer()
    meta.tables['obs_raw_pcic_flags'].columns['obs_raw_id'].type = Integer()

    logger.info("Unsetting all of the sequence defaults")
    for tablename, table in meta.tables.iteritems():
        if hasattr(table, 'primary_key'):
            for column in table.primary_key.columns.values():
                if column.server_default:
                    column.server_default = None

    logger.info("Creating a subset of the tables")
    to_search = [
        'obs_raw', 'meta_history', 'meta_station', 'meta_network', 'meta_vars',
        'meta_contact'
    ]
    to_create = [
        table for tablename, table in meta.tables.iteritems()
        if tablename in to_search
    ]
    # Don't have contact in the postgres database yet 2013.12.04
    meta.tables['meta_network'].append_column(Column('contact_id', Integer))
    meta.create_all(tables=to_create)
コード例 #8
0
ファイル: db.py プロジェクト: acmiyaguchi/buildbotve
    def _clean_database(self):
        log.msg("cleaning database %s" % self.db_url)
        engine = sqlalchemy.create_engine(self.db_url)

        meta = MetaData()
        
        # there are some tables for which reflection sometimes fails, but since
        # we're just dropping them, we don't need actual schema - a fake
        # table will do the trick
        for table in [ 'buildrequests', 'builds',
                'buildset_properties', 'buildsets', 'change_properties',
                'change_files', 'change_links',
                'changes', 'patches', 'sourcestamp_changes', 'sourcestamps',
                'scheduler_changes', 'scheduler_upstream_buildsets',
                'schedulers' ]:
            sqlalchemy.Table(table, meta,
                    sqlalchemy.Column('tmp', sqlalchemy.Integer))

        # load any remaining tables
        meta.reflect(bind=engine)

        # and drop them, if they exist
        meta.drop_all(bind=engine, checkfirst=True)

        engine.dispose()
コード例 #9
0
class Database:
    def __init__(self, url, config=None):
        self.url = url
        self.engine = self.make_engine(url)
        self.metadata = MetaData(bind=self.engine)
        self.metadata.reflect()

        self.config = config

        # parallel table init
        self.tables_lock = threading.Lock()
        self.tables = {}
        threads = []
        for table in self.metadata.sorted_tables:
            t = threading.Thread(target=self.make_dsm_table, args=(table,))
            t.start()
            threads.append(t)
        [t.join() for t in threads]

    def execute(self, qry):
        try:
            res = self.engine.execute(qry)
        except Exception, e:
            if e.message == "(OperationalError) (1205, 'Lock wait timeout exceeded; try restarting transaction')":
                print e
                res = self.execute(qry)
            else:
                print e
                raise e

        return res
コード例 #10
0
ファイル: sql.py プロジェクト: bxhunter/pandas
def read_sql_table(table_name, con, index_col=None, coerce_float=True,
                   parse_dates=None, columns=None):
    """Read SQL database table into a DataFrame.

    Given a table name and an SQLAlchemy engine, returns a DataFrame.
    This function does not support DBAPI connections.

    Parameters
    ----------
    table_name : string
        Name of SQL table in database
    con : SQLAlchemy engine
        Sqlite DBAPI connection mode not supported
    index_col : string, optional
        Column to set as index
    coerce_float : boolean, default True
        Attempt to convert values to non-string, non-numeric objects (like
        decimal.Decimal) to floating point. Can result in loss of Precision.
    parse_dates : list or dict
        - List of column names to parse as dates
        - Dict of ``{column_name: format string}`` where format string is
          strftime compatible in case of parsing string times or is one of
          (D, s, ns, ms, us) in case of parsing integer timestamps
        - Dict of ``{column_name: arg dict}``, where the arg dict corresponds
          to the keyword arguments of :func:`pandas.to_datetime`
          Especially useful with databases without native Datetime support,
          such as SQLite
    columns : list
        List of column names to select from sql table

    Returns
    -------
    DataFrame

    See also
    --------
    read_sql_query : Read SQL query into a DataFrame.
    read_sql

    """
    if not _is_sqlalchemy_engine(con):
        raise NotImplementedError("read_sql_table only supported for "
                                  "SQLAlchemy engines.")
    import sqlalchemy
    from sqlalchemy.schema import MetaData
    meta = MetaData(con)
    try:
        meta.reflect(only=[table_name])
    except sqlalchemy.exc.InvalidRequestError:
        raise ValueError("Table %s not found" % table_name)

    pandas_sql = PandasSQLAlchemy(con, meta=meta)
    table = pandas_sql.read_table(
        table_name, index_col=index_col, coerce_float=coerce_float,
        parse_dates=parse_dates, columns=columns)

    if table is not None:
        return table
    else:
        raise ValueError("Table %s not found" % table_name, con)
コード例 #11
0
ファイル: main.py プロジェクト: rflynn/sqlacodegen
def main():
    parser = argparse.ArgumentParser(description='Generates SQLAlchemy model code from an existing database.')
    parser.add_argument('url', nargs='?', help='SQLAlchemy url to the database')
    parser.add_argument('--version', action='store_true', help="print the version number and exit")
    parser.add_argument('--schema', help='load tables from an alternate schema')
    parser.add_argument('--tables', help='tables to process (comma-separated, default: all)')
    parser.add_argument('--noviews', action='store_true', help="ignore views")
    parser.add_argument('--noindexes', action='store_true', help='ignore indexes')
    parser.add_argument('--noconstraints', action='store_true', help='ignore constraints')
    parser.add_argument('--nojoined', action='store_true', help="don't autodetect joined table inheritance")
    parser.add_argument('--noinflect', action='store_true', help="don't try to convert tables names to singular form")
    parser.add_argument('--noclasses', action='store_true', help="don't generate classes, only tables")
    parser.add_argument('--alwaysclasses', action='store_true', help="always generate classes")
    parser.add_argument('--nosequences', action='store_true', help="don't auto-generate postgresql sequences")
    parser.add_argument('--outfile', help='file to write output to (default: stdout)')
    args = parser.parse_args()

    if args.version:
        print(sqlacodegen.version)
        return
    if not args.url:
        print('You must supply a url\n', file=sys.stderr)
        parser.print_help()
        return

    engine = create_engine(args.url)
    metadata = MetaData(engine)
    tables = args.tables.split(',') if args.tables else None
    metadata.reflect(engine, args.schema, not args.noviews, tables)
    outfile = codecs.open(args.outfile, 'w', encoding='utf-8') if args.outfile else sys.stdout
    generator = CodeGenerator(metadata, args.noindexes, args.noconstraints, args.nojoined, args.noinflect,
                              args.noclasses, args.alwaysclasses, args.nosequences)
    generator.render(outfile)
コード例 #12
0
ファイル: model.py プロジェクト: tsionyx/pydbv
def main(sqlalchemy_connection, show_type=False):
    """
    The main function used to generate database schema
    description in the dot language
    """
    engine = create_engine(sqlalchemy_connection)

    meta = MetaData()
    meta.reflect(bind=engine)

    env = Environment(
        loader=FileSystemLoader(THIS_DIR),
        trim_blocks=True,
        lstrip_blocks=True)
    node_template = env.get_template('node-template.html')

    g = pgv.AGraph(strict=False,
                   directed=True,
                   name=engine.url.database,
                   graph_type='digraph',
                   compound='true',
                   rankdir='RL')
    tables = meta.tables.values()
    # tables = engine.table_names()

    for table in tables:
        if table.name.lower().startswith('sqlite_'):
            continue
        g.add_node(
            table.name,
            label=node_template.render(
                title=table.name,
                columns=table.columns.values(),
                show_type=show_type),
            shape='plaintext'
        )

    for table in tables:
        for fk in table.foreign_keys:
            if fk.column.table not in tables:
                # print fk.column.table
                continue

            uniq_child = fk.parent.primary_key or fk.parent.unique
            g.add_edge(
                table.name,
                fk.column.table.name,
                tailport="{0}:w".format(fk.parent.name),
                headport="{0}:e".format(fk.column.name +
                                        ('_type' if show_type else '')),
                # headlabel="+ %s"%fk.column.name,
                # taillabel='+ %s'%fk.parent.name,
                # arrowhead='odottee' if fk.parent.nullable else 'teetee',
                # dir='both',
                arrowhead='none', dir='back',
                arrowtail='teeodot' if uniq_child else 'crowodot'
                # samehead=fk.column.name, sametail=fk.parent.name,
            )
    return g.to_string()
コード例 #13
0
ファイル: db.py プロジェクト: allannss/buildbot
    def __thd_clean_database(self, conn):
        # drop the known tables
        model.Model.metadata.drop_all(bind=conn, checkfirst=True)

        # see if we can find any other tables to drop
        meta = MetaData(bind=conn)
        meta.reflect()
        meta.drop_all()
コード例 #14
0
ファイル: sql.py プロジェクト: boqiulu/pandas
    def __init__(self, engine, meta=None):
        self.engine = engine
        if not meta:
            from sqlalchemy.schema import MetaData
            meta = MetaData(self.engine)
            meta.reflect(self.engine)

        self.meta = meta
コード例 #15
0
def create_connection(url, schema_name, datasource_name=''):
    __engine = create_engine(url)

    __metadata = MetaData(schema=schema_name, bind=__engine)
    __metadata.reflect(schema=schema_name, bind=__engine)

    dbUtil = DbUtil(engine=__engine, metadata=__metadata)
    provideUtility(dbUtil, IDbUtil, name=datasource_name)
コード例 #16
0
ファイル: migratedb.py プロジェクト: viiru-/pytrainer
 def is_empty(self):
     """Check if the DB schema is empty.
     
     An empty schema indicates a new uninitialised database."""
     metadata = MetaData()
     metadata.bind = sqlalchemy.create_engine(self._db_url)
     metadata.reflect()
     tables = metadata.tables
     return not tables
コード例 #17
0
ファイル: db.py プロジェクト: Acidburn0zzz/buildbot
    def __thd_clean_database(self, conn):
        # drop the known tables, although sometimes this misses dependencies
        try:
            model.Model.metadata.drop_all(bind=conn, checkfirst=True)
        except sa.exc.ProgrammingError:
            pass

        # see if we can find any other tables to drop
        meta = MetaData(bind=conn)
        meta.reflect()
        meta.drop_all()
コード例 #18
0
ファイル: sqlachemy_test_case.py プロジェクト: Uberi/PyHive
    def test_get_table_names(self, engine, connection):
        meta = MetaData()
        meta.reflect(bind=engine)
        self.assertIn('one_row', meta.tables)
        self.assertIn('one_row_complex', meta.tables)

        insp = sqlalchemy.inspect(engine)
        self.assertIn(
            'dummy_table',
            insp.get_table_names(schema='pyhive_test_database'),
        )
コード例 #19
0
ファイル: fixtures.py プロジェクト: cwoebker/blitzdb
 def finalizer():
     backend.rollback()
     del backend.connection
     print("Dropping schema...")
     #we disable foreign key checks for SQLITE (as dropping tables with circular foreign keys won't work otherwise...)
     if str(engine.url).startswith('sqlite://'):
         engine.connect().execute('pragma foreign_keys=OFF')
     meta = MetaData(engine)
     meta.reflect()
     meta.drop_all()
     print("Done...")
コード例 #20
0
def setup_clean_db(app):
    # Clear out any existing tables
    from subscity.models.base import DB
    # db_name = os.environ.get('DN_NAME')
    engine = DB.get_engine(app)
    metadata = MetaData(engine)
    metadata.reflect()
    # for table in metadata.tables.values():
    #     for fk in table.foreign_keys:
    #         engine.execute(DropConstraint(fk.constraint))
    metadata.drop_all()
    apply_migrations()
コード例 #21
0
ファイル: sqla_orm.py プロジェクト: djrobstep/sqlbag
def metadata_from_session(s):
    """
    Args:
        s: an SQLAlchemy :class:`Session`
    Returns:
        The metadata.

    Get the metadata associated with the schema.
    """
    meta = MetaData()
    meta.reflect(bind=s.bind)
    return meta
コード例 #22
0
    def __init__(self):
        Base = automap_base()
        engine = create_engine("mysql://{}:{}@{}/{}".format(user, password, host, dbname))

        # reflect the tables
        Base.prepare(engine, reflect=True)
        metadata = MetaData()
        metadata.reflect(bind=engine)
        self.connection = engine.connect()
        self.events = Table("events", metadata, autoload=True)
        self.attributes = Table("attributes", metadata, autoload=True)

        self.r = StrictRedis(unix_socket_path=redis_socket)
コード例 #23
0
def initialize_db(config_uri, options={}):

    setup_logging(config_uri)
    settings = get_appsettings(config_uri, options=options)
    engine = engine_from_config(settings, 'sqlalchemy.')
    # delete all tables

    meta = MetaData()
    meta.reflect(bind=engine)
    meta.drop_all(engine)


    upgrade_db(config_uri)
コード例 #24
0
ファイル: test_dao.py プロジェクト: jaunis/xivo-dao
def _init_tables(engine):
    global _tables
    logger.debug("Cleaning tables")
    metadata = MetaData(bind=engine)
    metadata.reflect()
    logger.debug("drop all tables")
    metadata.drop_all()
    logger.debug("create all tables")
    Base.metadata.create_all(bind=engine)
    engine.dispose()
    logger.debug("Tables cleaned")
    metadata = MetaData(bind=engine)
    metadata.reflect()
    _tables = [table for table in metadata.tables.iterkeys()]
コード例 #25
0
ファイル: dbproxy_server.py プロジェクト: zhoujun/simplerpc
class DBProxyServer(BaseServer):
    def __init__(self):
        self.user = '******'
        self.password = '******'
        self.host = '127.0.0.1'
        self.db = 'demo'
        self.url = 'mysql://%s:%s@%s/%s' % (self.user, self.password, self.host, self.db)
        self.engine = create_engine(self.url, echo=True, encoding='utf-8', pool_size=100, pool_recycle=3600)
        self.meta = MetaData(self.engine)
        self.meta.reflect()
        
        server = RPCServer(12000)
        server.add_avatar('admin', '123456', DBProxyAdminAvatar, (self,))
        server.start()
        
    def shutdown(self):
        self.stop()
コード例 #26
0
ファイル: _database.py プロジェクト: chaoflow/dicttree.sql
class Database(object):
    def __init__(self, *args, **kw):
        self.engine = create_engine(*args, **kw)
        self.metadata = MetaData(bind=self.engine)
        self.metadata.reflect(bind=self.engine)
        Session = sessionmaker(bind=self.engine)
        self.session = Session()

    def __contains__(self, tablename):
        return tablename in self.metadata.tables.keys()

    def __getitem__(self, tablename):
        return Table(self.metadata.tables[tablename], self.session)

    def __setitem__(self, dn, node):
        raise NotImplementedError()

    def __delitem__(self, tablename):
        table = self[tablename]
        table._table.drop(self.engine)
        self.metadata.remove(table._table)

    def __iter__(self):
        for tablename in self.metadata.tables:
            yield tablename

    def __len__(self):
        return sum(1 for table in iter(self))

    iterkeys = __iter__

    def itervalues(self):
        for table in self.metadata.tables.values():
            yield Table(table, self.session)

    def iteritems(self):
        return ((table.name, table) for table in ValuesView(self))

    def items(self):
        return ItemsView(dictionary=self)

    def keys(self):
        return KeysView(dictionary=self)

    def values(self):
        return ValuesView(dictionary=self)
コード例 #27
0
def get_options_from_database(config):
    table_name = "%soptions" % config['wordpress']['prefix']
    engine = create_engine(config['sqlalchemy']['engine'])
    meta = MetaData()
    meta.reflect(bind=engine)

    options_table = meta.tables[table_name]
    query = select([options_table.c.option_value],
        options_table.c.option_name==config['wordpress']['option'])

    connection = engine.connect()
    result = connection.execute(query)
    row = result.fetchone()
    values = phpload(row[0])

    return dict([(k.replace("dropbox_music_", ""), v)
                  for k,v in values.items()])
コード例 #28
0
ファイル: declarative.py プロジェクト: Kozea/pypet
    def _make_instance(mcs, class_, classdict):
        metadata = classdict.get('__metadata__', None)
        if not metadata:
            connection = classdict.get('__connection__', None)
            if not connection:
                raise ValueError(
                    'Cube must have at least a __metadata__'
                    ' or a __connection__ attribute')
            metadata = MetaData(bind=create_engine(connection))
            metadata.reflect()
        fact_table = classdict.get('__fact_table__', None)
        if fact_table is None:
                raise ValueError(
                    'Cube must have a __fact_table__ attribute')
        fact_table = table(fact_table, metadata)

        dimensions = []
        measures = []
        for key in dir(class_):
            value = getattr(class_, key)
            if isinstance(value, pypet.Dimension):
                value.name = key
                dimensions.append(value)
            elif isinstance(value, pypet.Measure):
                value.name = key
                expr = getattr(value, 'expression', None)
                if expr is UNKNOWN_VALUE:
                    value.expression = key
                if expr is not None:
                    value.expression = column(value.expression, fact_table)
                measures.append(value)

        dimensions = sorted(dimensions,
                        key=lambda x: getattr(x, '_count', float('inf')))
        measures = sorted(measures,
                        key=lambda x: getattr(x, '_count', float('inf')))
        cube = pypet.Cube(
            metadata, fact_table, dimensions, measures,
            aggregates=classdict.get('__aggregates__', None),
            fact_count_column=column(
                classdict.get('__fact_count_column__', None), fact_table))

        for thing in dimensions + measures:
            if not hasattr(cube, thing.name):
                setattr(cube, thing.name, thing)
        return cube
コード例 #29
0
ファイル: __init__.py プロジェクト: edemocracy/ekklesia
 def open_db(self,engine,mode='open'):
     """open the database with engine URL. possible modes:
     drop - drop own tables
     dropall - drop all tables
     create - create the tables
     open - reflect the existing tables
     """
     from sqlalchemy.ext.declarative import declarative_base, DeferredReflection
     from sqlalchemy.schema import MetaData
     from sqlalchemy import event, Table
     metadata = None
     @event.listens_for(Table, "column_reflect")
     def column_reflect(inspector, table, column_info):
         if table.metadata is metadata:
             if self.column_map and table.name in self.column_map:
                 column_info['key'] = self.column_map[table.name][column_info['name']]
             else:
                 column_info['key'] = column_info['name']
     convention = dict(
       ix='ix_%(column_0_label)s',
       uq="uq_%(table_name)s_%(column_0_name)s",
       ck="ck_%(table_name)s_%(column_0_name)s",
       fk="fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s",
       pk="pk_%(table_name)s"
     )
     if mode=='dropall':
         # Clear out any existing tables
         metadata = MetaData(engine, naming_convention=convention)
         metadata.reflect()
         _drop_all(metadata)
         return
     metadata = MetaData(naming_convention=convention)
     self.Base = declarative_base(bind=engine, cls=DeferredReflection, metadata=metadata)
     metadata = self.Base.metadata
     self.declare(reflect= mode=='open')
     if mode=='drop':
         _drop_all(metadata)
         return
     if mode=='create':
         metadata.create_all(engine,checkfirst=False)
     from sqlalchemy.orm import sessionmaker
     self.Base.prepare(engine)
     self.reflect_classes()
     Session = sessionmaker(bind=engine)
     self.session = Session()
コード例 #30
0
ファイル: ivc_deploy.py プロジェクト: OpenSight/IVR
def initialize_db(config_uri):

    # setup_logging(config_uri)
    # settings = get_appsettings(config_uri, options=options)

    config = configparser.ConfigParser()
    config.read(config_uri)
    settings = dict(config.items("alembic"))

    engine = engine_from_config(settings, 'sqlalchemy.')
    # delete all tables

    meta = MetaData()
    meta.reflect(bind=engine)
    meta.drop_all(engine)


    upgrade_db(config_uri)
コード例 #31
0
ファイル: conftest.py プロジェクト: sumesh-aot/sbc-auth
def db(app):  # pylint: disable=redefined-outer-name, invalid-name
    """Return a session-wide initialised database.

    Drops all existing tables - Meta follows Postgres FKs
    """
    with app.app_context():
        # Clear out any existing tables
        metadata = MetaData(_db.engine)
        metadata.reflect()
        for table in metadata.tables.values():
            for fk in table.foreign_keys:  # pylint: disable=invalid-name
                _db.engine.execute(DropConstraint(fk.constraint))
        metadata.drop_all()
        _db.drop_all()

        sequence_sql = """SELECT sequence_name FROM information_schema.sequences
                          WHERE sequence_schema='public'
                       """

        sess = _db.session()
        for seq in [name for (name,) in sess.execute(text(sequence_sql))]:
            try:
                sess.execute(text('DROP SEQUENCE public.%s ;' % seq))
                print('DROP SEQUENCE public.%s ' % seq)
            except Exception as err:  # pylint: disable=broad-except
                print(f'Error: {err}')
        sess.commit()

        # ############################################
        # There are 2 approaches, an empty database, or the same one that the app will use
        #     create the tables
        #     _db.create_all()
        # or
        # Use Alembic to load all of the DB revisions including supporting lookup data
        # This is the path we'll use in auth_api!!

        # even though this isn't referenced directly, it sets up the internal configs that upgrade needs
        Migrate(app, _db)
        upgrade()

        return _db
コード例 #32
0
 def import_schema(self, schema, srv_options, options,
                   restriction_type, restricts):
     """
     Reflects the remote schema.
     """
     metadata = MetaData()
     url = _parse_url_from_options(srv_options)
     engine = create_engine(url)
     dialect = PGDialect()
     if restriction_type == 'limit':
         only = restricts
     elif restriction_type == 'except':
         only = lambda t, _: t not in restricts
     else:
         only = None
     metadata.reflect(bind=engine,
                      schema=schema,
                      only=only)
     to_import = []
     for _, table in sorted(metadata.tables.items()):
         ftable = TableDefinition(table.name)
         ftable.options['schema'] = schema
         ftable.options['tablename'] = table.name
         for c in table.c:
             # Force collation to None to prevent imcompatibilities
             setattr(c.type, "collation", None)
             # If the type is specialized, call the generic
             # superclass method
             if type(c.type) in CONVERSION_MAP:
                 class_name = CONVERSION_MAP[type(c.type)]
                 old_args = c.type.__dict__
                 c.type = class_name()
                 c.type.__dict__.update(old_args)
             if c.primary_key:
                 ftable.options['primary_key'] = c.name
             ftable.columns.append(ColumnDefinition(
                 c.name,
                 type_name=c.type.compile(dialect)))
         to_import.append(ftable)
     return to_import
コード例 #33
0
ファイル: main.py プロジェクト: xuru/sqlacodegen
def main():
    parser = argparse.ArgumentParser(
        description='Generates SQLAlchemy model code from an existing database.')
    parser.add_argument('url', nargs='?', help='SQLAlchemy url to the database')
    parser.add_argument('--version', action='store_true', help="print the version number and exit")
    parser.add_argument('--schema', help='load tables from an alternate schema')
    parser.add_argument('--tables', help='tables to process (comma-separated, default: all)')
    parser.add_argument('--noviews', action='store_true', help="ignore views")
    parser.add_argument('--noindexes', action='store_true', help='ignore indexes')
    parser.add_argument('--noconstraints', action='store_true', help='ignore constraints')
    parser.add_argument('--nojoined', action='store_true',
                        help="don't autodetect joined table inheritance")
    parser.add_argument('--noinflect', action='store_true',
                        help="don't try to convert tables names to singular form")
    parser.add_argument('--noclasses', action='store_true',
                        help="don't generate classes, only tables")
    parser.add_argument('--outfile', help='file to write output to (default: stdout)')
    args = parser.parse_args()

    if args.version:
        version = pkg_resources.get_distribution('sqlacodegen').parsed_version
        print(version.public)
        return
    if not args.url:
        print('You must supply a url\n', file=sys.stderr)
        parser.print_help()
        return

    # Use reflection to fill in the metadata
    engine = create_engine(args.url)
    import_dialect_specificities(engine)
    metadata = MetaData(engine)
    tables = args.tables.split(',') if args.tables else None
    metadata.reflect(engine, args.schema, not args.noviews, tables)

    # Write the generated model code to the specified file or standard output
    outfile = io.open(args.outfile, 'w', encoding='utf-8') if args.outfile else sys.stdout
    generator = CodeGenerator(metadata, args.noindexes, args.noconstraints, args.nojoined,
                              args.noinflect, args.noclasses)
    generator.render(outfile)
コード例 #34
0
def ioc_mysql():
    printv("Connection to mysql database")
    Base = automap_base()
    engine = create_engine('mysql://{}:{}@{}/{}'.format(
        conf['mysql']['user'], conf['mysql']['password'],
        conf['mysql']['host'], conf['mysql']['dbname']))

    Base.prepare(engine, reflect=True)
    metadata = MetaData()
    metadata.reflect(bind=engine)
    connection = engine.connect()
    attributes_table = Table("attributes", metadata, autoload=True)
    users_table = Table("users", metadata, autoload=True)

    # MISP token must be the same as the authkey
    printv("Check authentication key (token)")
    query = select([users_table.c.authkey
                    ]).where(users_table.c.email == conf['misp']['email'])
    resp = connection.execute(query)
    for authkey in resp:
        if not conf['misp']['token'] == authkey[0]:
            sys.exit(
                "Your misp_token must be your authentication key. Please check your configuration file"
            )

    # Get all ids attributes
    printv("Get Attributes")
    attributes = connection.execute(select([attributes_table]))
    for attr in attributes:
        dic_attr = OrderedDict(attr.items())
        if dic_attr['to_ids'] == 1:
            timestamp = dic_attr['timestamp']
            dic_attr['date'] = datetime.datetime.fromtimestamp(
                int(timestamp)).strftime("%Y%m%d")
            dic_attr['value'] = dic_attr['value1']
            if (attr['value2']):
                dic_attr[
                    'value'] = dic_attr['value'] + '|' + dic_attr['value2']
            IOCs.append(dic_attr)
コード例 #35
0
    def __thd_clean_database(self, conn):
        # drop the known tables, although sometimes this misses dependencies
        try:
            model.Model.metadata.drop_all(bind=conn, checkfirst=True)
        except sa.exc.ProgrammingError:
            pass

        # see if we can find any other tables to drop
        try:
            meta = MetaData(bind=conn)
            meta.reflect()
            meta.drop_all()
        except Exception:
            # sometimes this goes badly wrong; being able to see the schema
            # can be a big help
            if conn.engine.dialect.name == 'sqlite':
                r = conn.execute("select sql from sqlite_master "
                                 "where type='table'")
                log.msg("Current schema:")
                for row in r.fetchall():
                    log.msg(row.sql)
            raise
コード例 #36
0
def init():
    global engine, connection, translations
    LOGGER.info('PostgreSQL at ' + POSTGRESQL_URL)
    while True:
        try:            
            engine = create_engine(POSTGRESQL_URL, poolclass=StaticPool)
            connection = engine.connect()

            meta = MetaData()
            meta.reflect(bind=engine)
            translations = meta.tables['translations']            
            break
        except sqlalchemy.exc.OperationalError as e:            
            time.sleep(1.0)
            print(e)
        except sqlalchemy.exc.NoSuchTableError as e:            
            time.sleep(1.0)
            print(e)
        except Exception as e:
            LOGGER.error(e)
            print(e)
            time.sleep(1.0)
コード例 #37
0
ファイル: main.py プロジェクト: BlueWhale0-0/KYXL
def main():
    parser = argparse.ArgumentParser(description='Generates SQLAlchemy model code from an existing database.')
    parser.add_argument('url', nargs='?', help='SQLAlchemy url to the database')
    parser.add_argument('--version', action='store_true', help="print the version number and exit")
    parser.add_argument('--schema', help='load tables from an alternate schema')
    parser.add_argument('--tables', help='tables to process (comma-separated, default: all)')
    parser.add_argument('--noviews', action='store_true', help="ignore views")
    parser.add_argument('--noindexes', action='store_true', help='ignore indexes')
    parser.add_argument('--noconstraints', action='store_true', help='ignore constraints')
    parser.add_argument('--nojoined', action='store_true', help="don't autodetect joined table inheritance")
    parser.add_argument('--noinflect', action='store_true', help="don't try to convert tables names to singular form")
    parser.add_argument('--noclasses', action='store_true', help="don't generate classes, only tables")
    parser.add_argument('--outfile', help='file to write output to (default: stdout)')
    parser.add_argument('--nobackrefs', action='store_true', help="don't include backrefs")
    parser.add_argument('--flask', action='store_true', help="use Flask-SQLAlchemy columns")
    parser.add_argument('--ignore-cols', help="Don't check foreign key constraints on specified columns (comma-separated)")
    parser.add_argument('--nocomments', action='store_true', help="don't render column comments")
    args = parser.parse_args()

    if args.version:
        print(sqlacodegen.version)
        return
    if not args.url:
        print('You must supply a url\n', file=sys.stderr)
        parser.print_help()
        return

    engine = create_engine(args.url)
    import_dialect_specificities(engine)
    metadata = MetaData(engine)
    tables = args.tables.split(',') if args.tables else None
    ignore_cols = args.ignore_cols.split(',') if args.ignore_cols else None
    metadata.reflect(engine, args.schema, not args.noviews, tables)
    outfile = codecs.open(args.outfile, 'w', encoding='utf-8') if args.outfile else sys.stdout
    generator = CodeGenerator(metadata, args.noindexes, args.noconstraints,
                              args.nojoined, args.noinflect, args.nobackrefs,
                              args.flask, ignore_cols, args.noclasses, args.nocomments)
    generator.render(outfile)
コード例 #38
0
ファイル: Outils.py プロジェクト: nantodevison/Outils
def check_colonne_in_table_bdd(bdd, schema_r, table_r,*colonnes) : 
    """
    verifier qu'une table d'une bdd contient les colonnes ciblees
    in : 
       bdd : string : descriptions de la bdd, cf modules id_connexion
       schema_r : string : le nom du schema supportant la table
       table_r : le nom de la table
       colonnes : le nom des colonnes devant etre dans la table, separe par une virgule
    out : 
        flag : booleen : true si toute les colonnes sont das la table, False sinon
        list_colonne_manquante : lisrte des colonnes qui manque
    """
    with ct.ConnexionBdd(bdd) as c : 
        m=MetaData(bind=c.engine,schema=schema_r)
        m.reflect()
        inspector=inspect(c.engine)
    for t in m.tables.keys() : 
        if t==f'{schema_r}.{table_r}' : 
            columns=[c['name'] for c in inspector.get_columns(table_r, schema=schema_r)]
    if all([e in columns for e in colonnes]) : 
        return True,[]
    else : 
        return False,[e for e in colonnes if e not in columns]    
コード例 #39
0
    def do_cleardb(self, s):
        '''
        cleardb

        Clears out the main database (not the settings db).  This should only
        be done while dofler is not running!
        '''
        from sqlalchemy.engine import reflection
        from sqlalchemy.schema import (MetaData, Table, DropTable,
                                       ForeignKeyConstraint, DropConstraint)
        conn = db.engine.connect()
        trans = conn.begin()
        inspector = reflection.Inspector.from_engine(db.engine)
        metadata = MetaData(db.engine)
        metadata.reflect()
        metadata.drop_all()
        trans.commit()
        conn.close()
        vuln_db = '/opt/pvs/var/pvs/db/reports.db'
        if os.path.exists(vuln_db):
            os.system('service pvs stop')
            os.remove(vuln_db)
            os.system('service pvs start')
コード例 #40
0
ファイル: __init__.py プロジェクト: NiklasMM/alembic_test
class Database():
    def __init__(self, config, database_url, before_rev, after_rev):
        self.before_rev = before_rev
        self.after_rev = after_rev
        self.alembic_cfg = Config(config)
        self.alembic_cfg.set_main_option("sqlalchemy.url", database_url)
        command.upgrade(self.alembic_cfg, before_rev)
        self.engine = create_engine(
            self.alembic_cfg.get_main_option("sqlalchemy.url"))
        self.metadata = MetaData()
        self.metadata.reflect(bind=self.engine)

    def insert(self, data):
        connection = self.engine.connect()
        for table_name, table_data in data.items():

            table = self.metadata.tables[table_name]

            for entry in table_data:
                for column in table.c:
                    if not column.nullable and column.name not in entry:
                        entry[column.name.split(".")[-1]] = generate_fake_data(
                            column.type)
                print(entry)
                connection.execute(table.insert().values(**entry))
        connection.close()

    def migrate(self):
        command.upgrade(self.alembic_cfg, self.after_rev)
        self.metadata = MetaData()
        self.metadata.reflect(bind=self.engine)

    def get_table(self, table_name):
        return self.metadata.tables[table_name]

    def connection(self):
        return self.engine.connect()
コード例 #41
0
def upgrade():
    from inbox.ignition import main_engine
    engine = main_engine(pool_size=1, max_overflow=0)

    # Check affected tables present
    if not engine.has_table('easaccount'):
        return

    meta = MetaData()
    meta.reflect(bind=engine)
    easuid = meta.tables['easuid']

    # Check this migration hasn't run before
    if 'folder_id' not in [c.name for c in easuid.columns]:
        print 'This migration has been run, skipping.'
        return

    print 'Running migration'
    conn = op.get_bind()

    conn.execute('''ALTER TABLE easfoldersyncstatus
                    CHANGE name name varchar(191) NOT NULL''')
    conn.execute('''ALTER TABLE easuid DROP COLUMN folder_id''')
    conn.execute('''ALTER TABLE easfoldersyncstatus DROP COLUMN folder_id''')
コード例 #42
0
def codegen(args):

    # Use reflection to fill in the metadata
    engine = create_engine(args.url)

    metadata = MetaData(engine)
    tables = args.tables.split(",") if args.tables else None
    metadata.reflect(engine, args.schema, not args.noviews, tables)
    if "sqlite" in args.url: # db.session.bind.dialect.name == "sqlite":   FIXME review
        # dirty hack for sqlite
        engine.execute("""PRAGMA journal_mode = OFF""")

    # Write the generated model code to the specified file or standard output

    capture = StringIO()
    # outfile = io.open(args.outfile, 'w', encoding='utf-8') if args.outfile else capture # sys.stdout
    generator = CodeGenerator(metadata, args.noindexes, args.noconstraints, args.nojoined, args.noinflect, args.noclasses)
    generator.render(capture)
    generated = capture.getvalue()
    generated = fix_generated(generated, args)
    if args.outfile:
        outfile = io.open(args.outfile, "w", encoding="utf-8")
        outfile.write(generated)
    return generated
コード例 #43
0
def create_links_for_cluster_collection(session,
                                        entity,
                                        minimum_similarity=None):
    """
    Main entry point to cluster creation
    Assumes existence of similiarity and importance tables (via setup_company_similarities)
    Creates 'entity_standardization' table to map members to standards
    """
    metadata = MetaData(bind=session.get_bind())

    similarity_table_name = SIMILAR_ENTITY_TABLE_MAP[entity]
    importance_table_name = entity + '_importance'
    link_table_name = entity + '_standardization'
    initialize_link_table(session, link_table_name)
    metadata.reflect()
    [link_table
     ] = [x for x in metadata.sorted_tables if x.name == link_table_name]

    clusters = bucket_by_trigram_signature(
        session, similarity_table_name, minimum_similarity=minimum_similarity)
    for cluster in clusters.values():
        standard = select_standard_name(session, cluster,
                                        importance_table_name)
        link_members_to_standard(session, cluster, standard, link_table)
コード例 #44
0
 def test_get_columns_exclude_geo_point(self):
     """
     SQLAlchemy: Test get_columns exclude geo point (odelasticsearch only)
     """
     if self.driver_name == "elasticsearch":
         return
     metadata = MetaData()
     metadata.reflect(bind=self.engine)
     source_cols = [c.name for c in metadata.tables["data1"].c]
     expected_columns = [
         "field_array",
         "field_array.keyword",
         "field_boolean",
         "field_float",
         "field_nested.c1",
         "field_nested.c1.keyword",
         "field_nested.c2",
         "field_number",
         "field_str",
         "field_str.keyword",
         "timestamp",
     ]
     self.assertEqual(source_cols,
                      self.make_columns_compliant(expected_columns))
コード例 #45
0
def reflect_table(engine, klass):
    """Inspect and reflect objects"""

    try:
        meta = MetaData()
        meta.reflect(bind=engine)
    except OperationalError as e:
        raise DatabaseError(error=e.orig.args[1], code=e.orig.args[0])

    # Try to reflect from any of the supported tables
    table = None

    for tb in klass.tables():
        if tb in meta.tables:
            table = meta.tables[tb]
            break

    if table is None:
        raise DatabaseError(error="Invalid schema. Table not found", code="-1")

    # Map table schema into klass
    mapper(klass, table, column_prefix=klass.column_prefix())

    return table
コード例 #46
0
# -*- coding: utf-8 -*-

from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.orm import RelationshipProperty
from sqlalchemy import inspect
from models import db, Models, app
from faker import Faker
from random import randint
from sqlalchemy.schema import MetaData
import re

arglist = {}

meta = MetaData()
meta.reflect(bind=db.engine)

processed = []

# def is_foreign_key(table_name, col_name):
#     return table_name+"."+col_name in [e.target_fullname for e in meta.tables[table_name].foreign_keys]


def add_model_instance(model_class, amount=10, parent=None):
    # print(model_class.__name__)
    with app.app_context():
        insp = inspect(model_class)
        insp = inspect(model_class)
        children = []
        child = {}
        has_parent = False
コード例 #47
0
class Database(object):
    def __init__(self,
                 url,
                 schema=None,
                 reflect_metadata=True,
                 engine_kwargs=None,
                 reflect_views=True,
                 row_type=row_type):
        if engine_kwargs is None:
            engine_kwargs = {}

        self.lock = threading.RLock()
        self.local = threading.local()
        if '?' in url:
            url, query = url.split('?', 1)
            query = parse_qs(query)
            if schema is None:
                # le pop
                schema_qs = query.pop('schema', query.pop('searchpath', []))
                if len(schema_qs):
                    schema = schema_qs.pop()
            if len(query):
                url = url + '?' + urlencode(query, doseq=True)
        self.schema = schema
        self.engine = create_engine(url, **engine_kwargs)
        self.url = url
        self.metadata = MetaData(schema=schema)
        self.metadata.bind = self.engine
        if reflect_metadata:
            self.metadata.reflect(self.engine, views=reflect_views)
        self.row_type = row_type
        self._tables = {}

    @property
    def executable(self):
        """ The current connection or engine against which statements
        will be executed. """
        if hasattr(self.local, 'connection'):
            return self.local.connection
        return self.engine

    @property
    def op(self):
        ctx = MigrationContext.configure(self.engine)
        return Operations(ctx)

    def _acquire(self):
        self.lock.acquire()

    def _release(self):
        if not hasattr(self.local, 'tx'):
            self.lock.release()
            self.local.must_release = False
        else:
            self.local.must_release = True

    def _release_internal(self):
        if getattr(self.local, 'must_release', None):
            self.lock.release()
            self.local.must_release = False

    def _dispose_transaction(self):
        self.local.tx.remove(self.local.tx[-1])
        if not self.local.tx:
            del self.local.tx
            self.local.connection.close()
            del self.local.connection
            self._release_internal()

    def begin(self):
        """ Enter a transaction explicitly. No data will be written
        until the transaction has been committed.

        **NOTICE:** Schema modification operations, such as the creation
        of tables or columns will not be part of the transactional context."""
        if not hasattr(self.local, 'connection'):
            self.local.connection = self.engine.connect()
        if not hasattr(self.local, 'tx'):
            self.local.tx = []
        self.local.tx.append(self.local.connection.begin())

    def commit(self):
        """ Commit the current transaction, making all statements executed
        since the transaction was begun permanent. """
        if hasattr(self.local, 'tx') and self.local.tx:
            self.local.tx[-1].commit()
            self._dispose_transaction()

    def rollback(self):
        """ Roll back the current transaction, discarding all statements
        executed since the transaction was begun. """
        if hasattr(self.local, 'tx') and self.local.tx:
            self.local.tx[-1].rollback()
            self._dispose_transaction()

    def __enter__(self):
        self.begin()
        return self

    def __exit__(self, error_type, error_value, traceback):
        if error_type is None:
            try:
                self.commit()
            except:
                with safe_reraise():
                    self.rollback()
        else:
            self.rollback()

    @property
    def tables(self):
        """
        Get a listing of all tables that exist in the database.
        """
        return list(
            set(self.metadata.tables.keys()) | set(self._tables.keys()))

    def __contains__(self, member):
        return member in self.tables

    def _valid_table_name(self, table_name):
        """ Check if the table name is obviously invalid. """
        if table_name is None or not len(table_name.strip()):
            raise ValueError("Invalid table name: %r" % table_name)
        return table_name.strip()

    def create_table(self,
                     table_name,
                     primary_id='id',
                     primary_type='Integer'):
        """
        Creates a new table. The new table will automatically have an `id` column
        unless specified via optional parameter primary_id, which will be used
        as the primary key of the table. Automatic id is set to be an
        auto-incrementing integer, while the type of custom primary_id can be a
        String or an Integer as specified with primary_type flag. The default
        length of String is 255. The caller can specify the length.
        The caller will be responsible for the uniqueness of manual primary_id.

        This custom id feature is only available via direct create_table call.

        Returns a :py:class:`Table <dataset.Table>` instance.
        ::

            table = db.create_table('population')

            # custom id and type
            table2 = db.create_table('population2', 'age')
            table3 = db.create_table('population3', primary_id='race', primary_type='String')
            # custom length of String
            table4 = db.create_table('population4', primary_id='race', primary_type='String(50)')
        """
        table_name = self._valid_table_name(table_name)
        self._acquire()
        try:
            log.debug("Creating table: %s on %r" % (table_name, self.engine))
            match = re.match(r'^(Integer)$|^(String)(\(\d+\))?$', primary_type)
            if match:
                if match.group(1) == 'Integer':
                    auto_flag = False
                    if primary_id == 'id':
                        auto_flag = True
                    col = Column(primary_id,
                                 Integer,
                                 primary_key=True,
                                 autoincrement=auto_flag)
                elif not match.group(3):
                    col = Column(primary_id, String(255), primary_key=True)
                else:
                    len_string = int(match.group(3)[1:-1])
                    len_string = min(len_string, 255)
                    col = Column(primary_id,
                                 String(len_string),
                                 primary_key=True)
            else:
                raise DatasetException(
                    "The primary_type has to be either 'Integer' or 'String'.")

            table = SQLATable(table_name, self.metadata, schema=self.schema)
            table.append_column(col)
            table.create(self.engine)
            self._tables[table_name] = table
            return Table(self, table)
        finally:
            self._release()

    def load_table(self, table_name):
        """
        Loads a table. This will fail if the tables does not already
        exist in the database. If the table exists, its columns will be
        reflected and are available on the :py:class:`Table <dataset.Table>`
        object.

        Returns a :py:class:`Table <dataset.Table>` instance.
        ::

            table = db.load_table('population')
        """
        table_name = self._valid_table_name(table_name)
        self._acquire()
        try:
            log.debug("Loading table: %s on %r" % (table_name, self))
            table = SQLATable(table_name,
                              self.metadata,
                              schema=self.schema,
                              autoload=True)
            self._tables[table_name] = table
            return Table(self, table)
        finally:
            self._release()

    def update_table(self, table_name):
        table_name = self._valid_table_name(table_name)
        self.metadata = MetaData(schema=self.schema)
        self.metadata.bind = self.engine
        self.metadata.reflect(self.engine)
        self._tables[table_name] = SQLATable(table_name,
                                             self.metadata,
                                             schema=self.schema)
        return self._tables[table_name]

    def get_table(self, table_name, primary_id='id', primary_type='Integer'):
        """
        Smart wrapper around *load_table* and *create_table*. Either loads a table
        or creates it if it doesn't exist yet.
        For short-hand to create a table with custom id and type using [], where
        table_name, primary_id, and primary_type are specified as a tuple

        Returns a :py:class:`Table <dataset.Table>` instance.
        ::

            table = db.get_table('population')
            # you can also use the short-hand syntax:
            table = db['population']

        """
        if table_name in self._tables:
            return Table(self, self._tables[table_name])
        self._acquire()
        try:
            if self.engine.has_table(table_name, schema=self.schema):
                return self.load_table(table_name)
            else:
                return self.create_table(table_name, primary_id, primary_type)
        finally:
            self._release()

    def __getitem__(self, table_name):
        return self.get_table(table_name)

    def query(self, query, **kw):
        """
        Run a statement on the database directly, allowing for the
        execution of arbitrary read/write queries. A query can either be
        a plain text string, or a `SQLAlchemy expression <http://docs.sqlalchemy.org/en/latest/core/tutorial.html#selecting>`_.
        If a plain string is passed in, it will be converted to an expression automatically.

        Keyword arguments will be used for parameter binding. See the `SQLAlchemy
        documentation <http://docs.sqlalchemy.org/en/rel_0_9/core/connections.html#sqlalchemy.engine.Connection.execute>`_ for details.

        The returned iterator will yield each result sequentially.
        ::

            res = db.query('SELECT user, COUNT(*) c FROM photos GROUP BY user')
            for row in res:
                print(row['user'], row['c'])
        """
        if isinstance(query, six.string_types):
            query = text(query)
        return ResultIter(self.executable.execute(query, **kw),
                          row_type=self.row_type)

    def __repr__(self):
        return '<Database(%s)>' % self.url
コード例 #48
0
ファイル: context.py プロジェクト: vamshi9666/graphql-engine
class HGECtx:
    def __init__(self, hge_url, pg_url, config):

        self.http = requests.Session()
        self.hge_key = config.getoption('--hge-key')
        self.hge_url = hge_url
        self.pg_url = pg_url
        self.hge_webhook = config.getoption('--hge-webhook')
        hge_jwt_key_file = config.getoption('--hge-jwt-key-file')
        if hge_jwt_key_file is None:
            self.hge_jwt_key = None
        else:
            with open(hge_jwt_key_file) as f:
                self.hge_jwt_key = f.read()
        self.hge_jwt_conf = config.getoption('--hge-jwt-conf')
        if self.hge_jwt_conf is not None:
            self.hge_jwt_conf_dict = json.loads(self.hge_jwt_conf)
        self.webhook_insecure = config.getoption('--test-webhook-insecure')
        self.metadata_disabled = config.getoption('--test-metadata-disabled')
        self.may_skip_test_teardown = False
        self.function_permissions = config.getoption(
            '--test-function-permissions')

        self.engine = create_engine(self.pg_url)
        self.meta = MetaData()

        self.ws_read_cookie = config.getoption('--test-ws-init-cookie')

        self.hge_scale_url = config.getoption('--test-hge-scale-url')
        self.avoid_err_msg_checks = config.getoption(
            '--avoid-error-message-checks')
        self.inherited_roles_tests = config.getoption('--test-inherited-roles')

        self.ws_client = GQLWsClient(self, '/v1/graphql')

        self.backend = config.getoption('--backend')

        # HGE version
        result = subprocess.run(['../../scripts/get-version.sh'],
                                shell=False,
                                stdout=subprocess.PIPE,
                                check=True)
        env_version = os.getenv('VERSION')
        self.version = env_version if env_version else result.stdout.decode(
            'utf-8').strip()
        if not self.metadata_disabled and not config.getoption(
                '--skip-schema-setup'):
            try:
                st_code, resp = self.v2q_f("queries/" +
                                           self.backend_suffix("clear_db") +
                                           ".yaml")
            except requests.exceptions.RequestException as e:
                self.teardown()
                raise HGECtxError(repr(e))
            assert st_code == 200, resp

        # Postgres version
        if self.backend == 'postgres':
            pg_version_text = self.sql(
                'show server_version_num').fetchone()['server_version_num']
            self.pg_version = int(pg_version_text)

    def reflect_tables(self):
        self.meta.reflect(bind=self.engine)

    def anyq(self, u, q, h, b=None, v=None):
        resp = None
        if v == 'GET':
            resp = self.http.get(self.hge_url + u, headers=h)
        elif v == 'POST' and b:
            # TODO: Figure out why the requests are failing with a byte object passed in as `data`
            resp = self.http.post(self.hge_url + u, data=b, headers=h)
        elif v == 'PATCH' and b:
            resp = self.http.patch(self.hge_url + u, data=b, headers=h)
        elif v == 'PUT' and b:
            resp = self.http.put(self.hge_url + u, data=b, headers=h)
        elif v == 'DELETE':
            resp = self.http.delete(self.hge_url + u, headers=h)
        else:
            resp = self.http.post(self.hge_url + u, json=q, headers=h)
        # NOTE: make sure we preserve key ordering so we can test the ordering
        # properties in the graphql spec properly
        # Returning response headers to get the request id from response
        return resp.status_code, resp.json(
            object_pairs_hook=OrderedDict), resp.headers

    def sql(self, q):
        conn = self.engine.connect()
        res = conn.execute(q)
        conn.close()
        return res

    def execute_query(self, q, url_path, headers={}):
        h = headers.copy()
        if self.hge_key is not None:
            h['X-Hasura-Admin-Secret'] = self.hge_key
        resp = self.http.post(self.hge_url + url_path, json=q, headers=h)
        # NOTE: make sure we preserve key ordering so we can test the ordering
        # properties in the graphql spec properly
        return resp.status_code, resp.json(object_pairs_hook=OrderedDict)

    def v1q(self, q, headers={}):
        return self.execute_query(q, "/v1/query", headers)

    def v1q_f(self, fn):
        with open(fn) as f:
            # NOTE: preserve ordering with ruamel
            yml = yaml.YAML()
            return self.v1q(yml.load(f))

    def v2q(self, q, headers={}):
        return self.execute_query(q, "/v2/query", headers)

    def v2q_f(self, fn):
        with open(fn) as f:
            # NOTE: preserve ordering with ruamel
            yml = yaml.YAML()
            return self.v2q(yml.load(f))

    def backend_suffix(self, filename):
        if self.backend == 'postgres':
            return filename
        else:
            return filename + "_" + self.backend

    def v1metadataq(self, q, headers={}):
        return self.execute_query(q, "/v1/metadata", headers)

    def v1metadataq_f(self, fn):
        with open(fn) as f:
            # NOTE: preserve ordering with ruamel
            yml = yaml.YAML()
            return self.v1metadataq(yml.load(f))

    def teardown(self):
        self.http.close()
        self.engine.dispose()
コード例 #49
0
def read_sql_table(table_name,
                   con,
                   schema=None,
                   column_dtypes=None,
                   coerce_float=True,
                   parse_dates=None,
                   columns=None,
                   chunksize=None):
    """Read SQL database table into an afw table.

    Given a table name and an SQLAlchemy connectable, returns an Table.
    This function does not support DBAPI connections.

    Parameters
    ----------
    table_name : string
        Name of SQL table in database
    con : SQLAlchemy connectable (or database string URI)
    schema : string, default: None
        Name of SQL schema in database to query (if database flavor
        supports this). If None, use default schema (default).
    column_dtypes : dict, default: None
        Dict of ``{column_name: dtype}`` where dtype is the target
        dtype of the column. This is used to harmonize types and downcast
        from doubles and longs.
    coerce_float : boolean, default True
        Attempt to convert values of non-string, non-numeric objects (like
        decimal.Decimal) to floating point. Can result in loss of Precision.
    parse_dates : list or dict, default: None
        - List of column names to parse as dates
        - Dict of ``{column_name: format string}`` where format string is
          strftime compatible in case of parsing string times or is one of
          (D, s, ns, ms, us) in case of parsing integer timestamps
    columns : list, default: None
        List of column names to select from sql table
    chunksize : int, default: None
        If specified, return an iterator where ``chunksize`` is the number
        of rows to include in each chunk.

    Returns
    -------
    afw table

    Notes
    -----
    Any datetime values with time zone information will be converted to UTC

    See also
    --------
    read_sql_query : Read SQL query into a afw table.
    read_sql

    """

    con = _engine_builder(con)
    meta = MetaData(con, schema=schema)
    try:
        meta.reflect(only=[table_name], views=True)
    except sqlalchemy.exc.InvalidRequestError:
        raise ValueError("Table %s not found" % table_name)

    sql_io = SQLDatabase(con, meta=meta)
    table = sql_io.read_table(table_name,
                              column_dtypes=column_dtypes,
                              coerce_float=coerce_float,
                              parse_dates=parse_dates,
                              columns=columns,
                              chunksize=chunksize)

    if table is not None:
        return table
    else:
        raise ValueError("Table %s not found" % table_name, con)
コード例 #50
0
ファイル: context.py プロジェクト: zeta1999/graphql-engine
class HGECtx:
    def __init__(self, hge_url, pg_url):
        server_address = ('0.0.0.0', 5592)

        self.resp_queue = queue.Queue(maxsize=1)
        self.error_queue = queue.Queue()
        self.ws_queue = queue.Queue(maxsize=-1)
        self.httpd = WebhookServer(self.resp_queue, self.error_queue,
                                   server_address)
        self.web_server = threading.Thread(target=self.httpd.serve_forever)
        self.web_server.start()

        self.pg_url = pg_url
        self.engine = create_engine(self.pg_url)
        self.meta = MetaData()

        self.http = requests.Session()
        self.hge_url = hge_url

        self.ws_url = urlparse(hge_url)
        self.ws_url = self.ws_url._replace(scheme='ws')
        self.ws_url = self.ws_url._replace(path='/v1alpha1/graphql')
        self.ws = websocket.WebSocketApp(self.ws_url.geturl(),
                                         on_message=self._on_message)
        self.wst = threading.Thread(target=self.ws.run_forever)
        self.wst.daemon = True
        self.wst.start()

        result = subprocess.run(['../../scripts/get-version.sh'],
                                shell=True,
                                stdout=subprocess.PIPE,
                                check=True)
        self.version = result.stdout.decode('utf-8').strip()
        try:
            st_code, resp = self.v1q_f('queries/clear_db.yaml')
        except requests.exceptions.RequestException as e:
            self.teardown()
            raise HGECtxError(repr(e))
        assert st_code == 200, resp

    def _on_message(self, message):
        my_json = json.loads(message)
        if my_json['type'] != 'ka':
            self.ws_queue.put(message)

    def get_event(self, timeout):
        return self.resp_queue.get(timeout=timeout)

    def get_error_queue_size(self):
        sz = 0
        while not self.error_queue.empty():
            self.error_queue.get()
            sz = sz + 1
        return sz

    def get_ws_event(self, timeout):
        return json.loads(self.ws_queue.get(timeout=timeout))

    def reflect_tables(self):
        self.meta.reflect(bind=self.engine)

    def anyq(self, u, q, h):
        resp = self.http.post(self.hge_url + u, json=q, headers=h)
        return resp.status_code, resp.json()

    def v1q(self, q):
        resp = self.http.post(self.hge_url + "/v1/query", json=q)
        return resp.status_code, resp.json()

    def v1q_f(self, fn):
        with open(fn) as f:
            return self.v1q(yaml.load(f))

    def teardown(self):
        self.http.close()
        self.engine.dispose()
        self.httpd.shutdown()
        self.httpd.server_close()
        self.ws.close()
        self.web_server.join()
        self.wst.join()
コード例 #51
0
class HGECtx:
    def __init__(self, hge_url, pg_url, hge_key, hge_webhook, webhook_insecure,
                 hge_jwt_key_file, hge_jwt_conf, metadata_disabled):
        server_address = ('0.0.0.0', 5592)

        self.resp_queue = queue.Queue(maxsize=1)
        self.error_queue = queue.Queue()
        self.ws_queue = queue.Queue(maxsize=-1)
        self.httpd = WebhookServer(self.resp_queue, self.error_queue,
                                   server_address)
        self.web_server = threading.Thread(target=self.httpd.serve_forever)
        self.web_server.start()

        self.pg_url = pg_url
        self.engine = create_engine(self.pg_url)
        self.meta = MetaData()

        self.http = requests.Session()
        self.hge_url = hge_url
        self.hge_key = hge_key
        self.hge_webhook = hge_webhook
        if hge_jwt_key_file is None:
            self.hge_jwt_key = None
        else:
            with open(hge_jwt_key_file) as f:
                self.hge_jwt_key = f.read()
        self.hge_jwt_conf = hge_jwt_conf
        self.webhook_insecure = webhook_insecure
        self.metadata_disabled = metadata_disabled
        self.may_skip_test_teardown = False

        self.ws_url = urlparse(hge_url)
        self.ws_url = self.ws_url._replace(scheme='ws')
        self.ws_url = self.ws_url._replace(path='/v1alpha1/graphql')
        self.ws = websocket.WebSocketApp(self.ws_url.geturl(),
                                         on_message=self._on_message)
        self.wst = threading.Thread(target=self.ws.run_forever)
        self.wst.daemon = True
        self.wst.start()

        # start the graphql server
        self.graphql_server = graphql_server.create_server('127.0.0.1', 5000)
        self.gql_srvr_thread = threading.Thread(
            target=self.graphql_server.serve_forever)
        self.gql_srvr_thread.start()

        result = subprocess.run(['../../scripts/get-version.sh'],
                                shell=False,
                                stdout=subprocess.PIPE,
                                check=True)
        self.version = result.stdout.decode('utf-8').strip()
        if not self.metadata_disabled:
            try:
                st_code, resp = self.v1q_f('queries/clear_db.yaml')
            except requests.exceptions.RequestException as e:
                self.teardown()
                raise HGECtxError(repr(e))
            assert st_code == 200, resp

    def _on_message(self, message):
        my_json = json.loads(message)
        if my_json['type'] != 'ka':
            self.ws_queue.put(message)

    def get_event(self, timeout):
        return self.resp_queue.get(timeout=timeout)

    def get_error_queue_size(self):
        sz = 0
        while not self.error_queue.empty():
            self.error_queue.get()
            sz = sz + 1
        return sz

    def get_ws_event(self, timeout):
        return json.loads(self.ws_queue.get(timeout=timeout))

    def reflect_tables(self):
        self.meta.reflect(bind=self.engine)

    def anyq(self, u, q, h):
        resp = self.http.post(self.hge_url + u, json=q, headers=h)
        return resp.status_code, resp.json()

    def sql(self, q):
        conn = self.engine.connect()
        res = conn.execute(q)
        conn.close()
        return res

    def v1q(self, q, headers={}):
        h = headers.copy()
        if self.hge_key is not None:
            h['X-Hasura-Admin-Secret'] = self.hge_key
        resp = self.http.post(self.hge_url + "/v1/query", json=q, headers=h)
        return resp.status_code, resp.json()

    def v1q_f(self, fn):
        with open(fn) as f:
            return self.v1q(yaml.load(f))

    def teardown(self):
        self.http.close()
        self.engine.dispose()
        self.httpd.shutdown()
        self.httpd.server_close()
        self.ws.close()
        self.web_server.join()
        self.wst.join()
        graphql_server.stop_server(self.graphql_server)
        self.gql_srvr_thread.join()
コード例 #52
0
def main() -> None:
    generators = {
        ep.name: ep
        for ep in entry_points()['sqlacodegen.generators']
    }
    parser = argparse.ArgumentParser(
        description='Generates SQLAlchemy model code from an existing database.'
    )
    parser.add_argument('url',
                        nargs='?',
                        help='SQLAlchemy url to the database')
    parser.add_argument('--option',
                        nargs='*',
                        help="options passed to the generator class")
    parser.add_argument('--version',
                        action='store_true',
                        help="print the version number and exit")
    parser.add_argument(
        '--schemas',
        help='load tables from the given schemas (comma separated)')
    parser.add_argument('--generator',
                        choices=generators,
                        default='declarative',
                        help="generator class to use")
    parser.add_argument(
        '--tables', help='tables to process (comma-separated, default: all)')
    parser.add_argument('--noviews', action='store_true', help="ignore views")
    parser.add_argument('--outfile',
                        help='file to write output to (default: stdout)')
    args = parser.parse_args()

    if args.version:
        print(version('sqlacodegen'))
        return
    if not args.url:
        print('You must supply a url\n', file=sys.stderr)
        parser.print_help()
        return

    # Use reflection to fill in the metadata
    engine = create_engine(args.url)
    metadata = MetaData()
    tables = args.tables.split(',') if args.tables else None
    schemas = args.schemas.split(',') if args.schemas else [None]
    for schema in schemas:
        metadata.reflect(engine, schema, not args.noviews, tables)

    # Instantiate the generator
    generator_class = generators[args.generator].load()
    generator = generator_class(metadata, engine, set(args.option or ()))

    # Open the target file (if given)
    with ExitStack() as stack:
        if args.outfile:
            outfile = open(args.outfile, 'w', encoding='utf-8')
            stack.enter_context(outfile)
        else:
            outfile = sys.stdout

        # Write the generated model code to the specified file or standard output
        outfile.write(generator.generate())
コード例 #53
0
ファイル: test_migration.py プロジェクト: thebent/ichnaea
 def inspect_db(self):
     metadata = MetaData()
     metadata.reflect(bind=self.db.engine)
     return metadata
コード例 #54
0
ファイル: db.py プロジェクト: wlmgithub/buildbot
    def __thd_clean_database(self, conn):
        # In general it's nearly impossible to do "bullet proof" database
        # cleanup with SQLAlchemy that will work on a range of databases
        # and they configurations.
        #
        # Following approaches were considered.
        #
        # 1. Drop Buildbot Model schema:
        #
        #     model.Model.metadata.drop_all(bind=conn, checkfirst=True)
        #
        # Dropping schema from model is correct and working operation only
        # if database schema is exactly corresponds to the model schema.
        #
        # If it is not (e.g. migration script failed or migration results in
        # old version of model), then some tables outside model schema may be
        # present, which may reference tables in the model schema.
        # In this case either dropping model schema will fail (if database
        # enforces referential integrity, e.g. PostgreSQL), or
        # dropping left tables in the code below will fail (if database allows
        # removing of tables on which other tables have references,
        # e.g. SQLite).
        #
        # 2. Introspect database contents and drop found tables.
        #
        #     meta = MetaData(bind=conn)
        #     meta.reflect()
        #     meta.drop_all()
        #
        # May fail if schema contains reference cycles (and Buildbot schema
        # has them). Reflection looses metadata about how reference cycles
        # can be teared up (e.g. use_alter=True).
        # Introspection may fail if schema has invalid references
        # (e.g. possible in SQLite).
        #
        # 3. What is actually needed here is accurate code for each engine
        # and each engine configuration that will drop all tables,
        # indexes, constraints, etc in proper order or in a proper way
        # (using tables alternation, or DROP TABLE ... CASCADE, etc).
        #
        # Conclusion: use approach 2 with manually teared apart known
        # reference cycles.

        # pylint: disable=too-many-nested-blocks

        try:
            meta = MetaData(bind=conn)

            # Reflect database contents. May fail, e.g. if table references
            # non-existent table in SQLite.
            meta.reflect()

            # Table.foreign_key_constraints introduced in SQLAlchemy 1.0.
            if sa_version()[:2] >= (1, 0):
                # Restore `use_alter` settings to break known reference cycles.
                # Main goal of this part is to remove SQLAlchemy warning
                # about reference cycle.
                # Looks like it's OK to do it only with SQLAlchemy >= 1.0.0,
                # since it's not issued in SQLAlchemy == 0.8.0

                # List of reference links (table_name, ref_table_name) that
                # should be broken by adding use_alter=True.
                table_referenced_table_links = [('buildsets', 'builds'),
                                                ('builds', 'buildrequests')]
                for table_name, ref_table_name in table_referenced_table_links:
                    if table_name in meta.tables:
                        table = meta.tables[table_name]
                        for fkc in table.foreign_key_constraints:
                            if fkc.referred_table.name == ref_table_name:
                                fkc.use_alter = True

            # Drop all reflected tables and indices. May fail, e.g. if
            # SQLAlchemy wouldn't be able to break circular references.
            # Sqlalchemy fk support with sqlite is not yet perfect, so we must deactivate fk during that
            # operation, even though we made our possible to use use_alter
            with withoutSqliteForeignKeys(conn.engine, conn):
                meta.drop_all()

        except Exception:
            # sometimes this goes badly wrong; being able to see the schema
            # can be a big help
            if conn.engine.dialect.name == 'sqlite':
                r = conn.execute("select sql from sqlite_master "
                                 "where type='table'")
                log.msg("Current schema:")
                for row in r.fetchall():
                    log.msg(row.sql)
            raise
コード例 #55
0
class HGECtx:

    def __init__(self, hge_url, pg_url, hge_key, hge_webhook, webhook_insecure,
                 hge_jwt_key_file, hge_jwt_conf, metadata_disabled, ws_read_cookie, hge_scale_url):

        self.http = requests.Session()
        self.hge_key = hge_key
        self.hge_url = hge_url
        self.pg_url = pg_url
        self.hge_webhook = hge_webhook
        if hge_jwt_key_file is None:
            self.hge_jwt_key = None
        else:
            with open(hge_jwt_key_file) as f:
                self.hge_jwt_key = f.read()
        self.hge_jwt_conf = hge_jwt_conf
        self.webhook_insecure = webhook_insecure
        self.metadata_disabled = metadata_disabled
        self.may_skip_test_teardown = False

        self.engine = create_engine(self.pg_url)
        self.meta = MetaData()

        self.ws_read_cookie = ws_read_cookie

        self.hge_scale_url = hge_scale_url

        self.ws_client = GQLWsClient(self, '/v1/graphql')

        result = subprocess.run(['../../scripts/get-version.sh'], shell=False, stdout=subprocess.PIPE, check=True)
        self.version = result.stdout.decode('utf-8').strip()
        if not self.metadata_disabled:
          try:
              st_code, resp = self.v1q_f('queries/clear_db.yaml')
          except requests.exceptions.RequestException as e:
              self.teardown()
              raise HGECtxError(repr(e))
          assert st_code == 200, resp

    def reflect_tables(self):
        self.meta.reflect(bind=self.engine)

    def anyq(self, u, q, h):
        resp = self.http.post(
            self.hge_url + u,
            json=q,
            headers=h
        )
        return resp.status_code, resp.json()

    def sql(self, q):
        conn = self.engine.connect()
        res  = conn.execute(q)
        conn.close()
        return res

    def v1q(self, q, headers = {}):
        h = headers.copy()
        if self.hge_key is not None:
            h['X-Hasura-Admin-Secret'] = self.hge_key
        resp = self.http.post(
            self.hge_url + "/v1/query",
            json=q,
            headers=h
        )
        return resp.status_code, resp.json()

    def v1q_f(self, fn):
        with open(fn) as f:
            return self.v1q(yaml.safe_load(f))

    def teardown(self):
        self.http.close()
        self.engine.dispose()
コード例 #56
0
 def _get_reflected_tabname_to_colnames(self):
     reflected_meta = MetaData()
     with self.suppressed_stderr(
     ):  # Let's get rid of irrelevant warnings...
         reflected_meta.reflect(bind=self.old_db_engine)
     return self._get_tabname_to_colnames(reflected_meta)
コード例 #57
0
class MainWindow(mainwindow.Ui_MainWindow, QtWidgets.QMainWindow):
    def __init__(self):
        super(MainWindow, self).__init__()
        self.setupUi(self)
        self.orRadioButton.setChecked(True)
        self.orRadioButton.setEnabled(False)
        self.andRadioButton.setEnabled(False)
        self.AddToDBButton.setEnabled(False)
        self.removeFromDBButton.setEnabled(False)
        self.removeFromDBButton2.setEnabled(False)

        self.conditionChain = ConditionChain()
        self.conditions = []
        self.columnTypes = []
        self.engine = None
        self.session = None
        self.table = None

        self.initSignals()
        self.initActions()

    def initSignals(self):
        self.openButton.clicked.connect(self.openDb)
        self.searchByQueryButton.clicked.connect(self.executeSqlQueryByText)
        self.addConditionButton.clicked.connect(self.addCondition)
        self.addToConditionsComboBoxButton.clicked.connect(
            self.chooseCondition)
        self.removeFromResultConditionsButton.clicked.connect(
            self.removeFromChosenCondition)
        self.selectColumnComboBox.currentIndexChanged.connect(
            self.updateOperatorComboBox)
        self.showQueryButton.clicked.connect(self.showQuery)
        self.clearQueryTextButton.clicked.connect(self.clearQuery)
        self.searchByConditionsButton.clicked.connect(self.searchByConditions)

    def initActions(self):
        self.actionExit.triggered.connect(sys.exit)
        self.actionOpen.triggered.connect(self.openDb)

    def initColumnKeysComboBox(self):
        if self.table == None:
            raise NoDataBaseException

        for key in self.table.columns.keys():
            self.selectColumnComboBox.addItem(str(key))

    def updateOperatorComboBox(self):
        if self.table == None:
            return

        currentColumn = self.selectColumnComboBox.currentIndex()
        operatorsCount = self.selectOperatorComboBox.count()

        if not str(self.columnTypes[currentColumn]).startswith("VARCHAR"):
            for i in range(operatorsCount):
                self.selectOperatorComboBox.model().item(i).setEnabled(True)
        else:
            self.selectOperatorComboBox.setCurrentIndex(0)
            for i in range(1, operatorsCount):
                self.selectOperatorComboBox.model().item(i).setEnabled(False)

    def showQuery(self):
        if not self.conditionChain or self.table == None:
            return

        query = QueryCreator.createQuery(self.conditionChain, self.table.name)
        self.queryTextWidget.setText(query)

    def clearQuery(self):
        self.queryTextWidget.setText("")

    def addCondition(self):
        if self.table == None:
            return

        columnStr = self.selectColumnComboBox.currentText()
        compareOperatorStr = self.selectOperatorComboBox.currentText()
        valueStr = self.selectValueLine.text()

        currentIndex = self.selectColumnComboBox.currentIndex()
        if str(self.columnTypes[currentIndex]).startswith("VARCHAR"):
            valueStr = "\"{0}\"".format(valueStr)

        condition = Condition(columnStr, compareOperatorStr, valueStr)

        self.conditions.append(condition)
        self.allConditionsListWidget.addItem(str(condition))

        self.selectValueLine.setText("")

    def chooseCondition(self):
        if not self.conditions or self.table == None:
            return

        selectedConditionIndex = self.allConditionsListWidget.currentRow()
        currentCondition = self.conditions[selectedConditionIndex]
        currentOperator = Operator.OR if self.orRadioButton.isChecked(
        ) else Operator.AND
        self.conditionChain.addCondition(currentCondition, currentOperator)

        self.resultConditionsListWidget.addItem(str(currentCondition))

        self.orRadioButton.setEnabled(True)
        self.andRadioButton.setEnabled(True)

        print(str(self.conditionChain))
        query = QueryCreator.createQuery(self.conditionChain, self.table.name)
        print(query)
        self.executeSqlQueryByConditions(query)

    def removeFromChosenCondition(self):
        selectedIndex = self.resultConditionsListWidget.currentRow()
        if selectedIndex == -1:
            return

        self.resultConditionsListWidget.model().removeRow(selectedIndex)
        self.conditionChain.popCondition(selectedIndex)

        query = QueryCreator.createQuery(self.conditionChain, self.table.name)
        print(query)
        self.executeSqlQueryByConditions(query)

        if not self.conditionChain.conditions:
            self.orRadioButton.setEnabled(False)
            self.andRadioButton.setEnabled(False)

    def initMainTableWidget(self):
        if self.table == None or self.engine == None:
            raise NoDataBaseException

        columnKeys = self.table.columns.keys()
        columnsCount = len(columnKeys)

        self.mainTableWidget.setColumnCount(columnsCount)
        self.mainTableWidget.setHorizontalHeaderLabels(columnKeys)
        self.resultTableWidget.setColumnCount(columnsCount)
        self.resultTableWidget.setHorizontalHeaderLabels(columnKeys)
        rowsCount = self.mainTableWidget.rowCount()

        data = self.session.query(self.table).all()
        for rowNumber, row in enumerate(data):
            self.mainTableWidget.insertRow(rowNumber)
            for columnNumber, value in enumerate(row):
                cell = QtWidgets.QTableWidgetItem(str(value))
                self.mainTableWidget.setItem(rowNumber, columnNumber, cell)

    def initResultTableWidget(self, data):
        self.resultTableWidget.setRowCount(0)

        for rowNumber, row in enumerate(data):
            self.resultTableWidget.insertRow(rowNumber)
            for columnNumber, value in enumerate(row):
                cell = QtWidgets.QTableWidgetItem(str(value))
                self.resultTableWidget.setItem(rowNumber, columnNumber, cell)

    def openDb(self):
        fileName = QtWidgets.QFileDialog.getOpenFileName(
            self, "Open DB", "", "*.db")[0]
        engineName = "sqlite:///{0}".format(fileName)
        self.engine = create_engine(engineName, echo=True)
        engine = self.engine
        Session = sessionmaker(bind=engine)
        self.session = Session()

        with engine.connect() as connection:
            result = connection.execute("select * from students")
            for row in result:
                print(row)

        self.meta = MetaData()
        self.meta.reflect(bind=engine)

        self.table = Table(self.meta.sorted_tables[0],
                           self.meta,
                           autoload=True,
                           autoload_with=engine)

        self.columnTypes = []
        for column in self.table.columns:
            self.columnTypes.append(column.type)
        print(self.columnTypes)

        self.initMainTableWidget()
        self.initColumnKeysComboBox()

    def addToDb(self):
        # Не успел пока разобраться, как реализовать

        if self.table == None:
            raise NoDataBaseException

    def removeFromDb(self):
        # Не успел пока разобраться, как реализовать

        if self.table == None:
            raise NoDataBaseException

        currentIndex = self.mainTableWidget.currentRow()
        currentObj = self.engine.execute(
            "select * from {0} where id = {1}".format(self.table.name,
                                                      currentIndex))
        self.session.delete(currentObj)
        self.session.commit()
        self.initMainTableWidget()

    def executeSqlQueryByText(self):
        if self.table == None:
            return

        query = self.queryTextWidget.toPlainText()
        if query == "":
            return

        data = self.engine.execute(query)
        self.initResultTableWidget(data)

    def searchByConditions(self):
        if self.table == None:
            return

        query = QueryCreator.createQuery(self.conditionChain, self.table.name)
        self.executeSqlQueryByConditions(query)

    def executeSqlQueryByConditions(self, query):
        print(query)
        if self.table == None or not self.conditionChain:
            return

        if query == "":
            self.resultTableWidget.setRowCount(0)
            return

        data = self.engine.execute(query)
        self.initResultTableWidget(data)
コード例 #58
0
class DbClient:
    def __init__(self, db_url=environ.get("DB_URL"), new=False, echo=False):

        self.engine = create_engine(db_url, echo=echo)
        self.session_maker = sessionmaker(bind=self.engine)
        self.metadata = MetaData(bind=self.engine)

        spider_loader = spiderloader.SpiderLoader.from_settings(settings())
        s_names = spider_loader.list()
        self.spiders = tuple(spider_loader.load(name) for name in s_names)

        # todo consider wrapping sqlalchemy.exc.OperationalError instead of using new parameter
        if new:
            create_database(self.engine.url)
            self.create_tables(Base)
        else:
            self.metadata.reflect()

    @staticmethod
    def current_date():
        # finds the latest day based on the mastercard definition
        now = datetime.datetime.now(timezone('US/Eastern'))

        today = now.date()

        if now.hour < 14:
            today -= datetime.timedelta(days=1)

        return today

    @contextmanager
    def session_scope(self, commit=True):
        """Provide a transactional scope around a series of operations."""

        session = self.session_maker()
        try:
            yield session
            if commit:
                session.commit()
        except Exception:
            session.rollback()
            raise
        finally:
            session.close()

    def create_tables(self, base):
        base.metadata.create_all(self.engine)

        with self.session_scope() as s:
            providers = [s.provider for s in self.spiders]
            for pid, p_name in enumerate(providers):
                s.add(Provider(id=pid + 1, name=p_name))
                self.update_currencies(p_name)

    # todo differentiate between card currencies and transaction currencies
    def missing(self, provider, end=None, num_days=363, currs=None):
        with self.session_scope(commit=False) as s:

            if not end:
                end = self.current_date()

            start = end - datetime.timedelta(days=num_days - 1)

            spider = next(spider for spider in self.spiders
                          if spider.provider == provider)

            if not currs:
                currs = set(spider.fetch_avail_currs().keys())

            avail_dates = (end - datetime.timedelta(days=x)
                           for x in range(num_days))

            all_combos = ((x, y, z)
                          for x, y, z in product(currs, currs, avail_dates)
                          if x != y)

            # noinspection PyUnresolvedReferences
            not_missing = set(
                s.query(Rate.card_code, Rate.trans_code, Rate.date).filter(
                    Rate.provider.has(name=provider)).filter(
                        Rate.date <= end).filter(Rate.date >= start).filter(
                            Rate.card_code.in_(currs)).filter(
                                Rate.trans_code.in_(currs)))

        return (x for x in all_combos if x not in not_missing)

    # todo multiprocessing to be implemented
    @staticmethod
    def combos_to_csv(file_count, results, out_path):

        out_path = Path(out_path)

        try:
            out_path.mkdir()
        except FileExistsError:
            pass

        paths = tuple(out_path / f'{i}.csv' for i in range(file_count))

        for p in paths:
            p.touch()

        fs = []
        try:
            fs = tuple(p.open(mode='w') for p in paths)
            for i, (card_c, trans_c, date) in enumerate(results):
                std_date = date.strftime(std_date_fmt)
                fs[i % file_count].write(f'{card_c},{trans_c},{std_date}\n')

        finally:
            for f in fs:
                f.close()

    def rates_from_csv(self, provider, in_path):

        with self.session_scope() as s:

            provider_id = (s.query(
                Provider.id).filter_by(name=provider).first()[0])

            for file in Path(in_path).glob('*.csv'):
                print(file)
                with file.open() as f:
                    data = csv.reader(f)
                    next(data)  # skip header row #
                    rates = [
                        Rate(card_code=card_code,
                             trans_code=trans_code,
                             date=strpdate(date, fmt='%m/%d/%Y'),
                             provider_id=provider_id,
                             rate=rate)
                        for card_code, trans_code, date, rate in data
                    ]
                    s.bulk_save_objects(rates)
                    s.commit()

    def update_currencies(self, provider):
        spider = next(s for s in self.spiders if s.provider == provider)
        with self.session_scope() as s:
            for alpha_code, name in spider.fetch_avail_currs().items():
                try:
                    s.add(CurrencyCode(alpha_code=alpha_code, name=name))
                    s.commit()
                except IntegrityError:
                    s.rollback()

    def drop_all_tables(self):
        self.metadata.drop_all()

    def drop_database(self):
        drop_database(self.engine.url)
コード例 #59
0
    return my_args


IB_PORT = os.environ.get('IB_PORT')

if not IB_PORT:
    IB_PORT = '4003'
# IB_PORT = '7496'
engine = create_engine(
    'postgresql://*****:*****@localhost:2345/option_price_tracking')
connection = engine.connect()
Session = sessionmaker(bind=engine)
session = Session()

meta = MetaData()
meta.reflect(bind=engine)
contract_timestamp_table = meta.tables["contract_ib_first_timestamp"]
contracts = meta.tables["contracts"]

timeout_retry_flag = 0


def set_timeout_flag(flag_value: bool, conId):
    stmt = update(contracts). \
        where(contracts.c.conId == conId). \
        values(timestampReqTimedout=flag_value, timestampLoadAttemptDate=datetime.datetime.now(datetime.timezone.utc))
    session.execute(stmt)
    session.commit()


def onError(reqId, errorCode, errorString, contract):
コード例 #60
0
def read_sql_table(table_name,
                   con,
                   schema=None,
                   index_col=None,
                   coerce_float=True,
                   parse_dates=None,
                   columns=None):
    """Read SQL database table into a DataFrame.

    Given a table name and an SQLAlchemy engine, returns a DataFrame.
    This function does not support DBAPI connections.

    Parameters
    ----------
    table_name : string
        Name of SQL table in database
    con : SQLAlchemy engine
        Sqlite DBAPI connection mode not supported
    schema : string, default None
        Name of SQL schema in database to query (if database flavor
        supports this). If None, use default schema (default).
    index_col : string, optional
        Column to set as index
    coerce_float : boolean, default True
        Attempt to convert values to non-string, non-numeric objects (like
        decimal.Decimal) to floating point. Can result in loss of Precision.
    parse_dates : list or dict
        - List of column names to parse as dates
        - Dict of ``{column_name: format string}`` where format string is
          strftime compatible in case of parsing string times or is one of
          (D, s, ns, ms, us) in case of parsing integer timestamps
        - Dict of ``{column_name: arg dict}``, where the arg dict corresponds
          to the keyword arguments of :func:`pandas.to_datetime`
          Especially useful with databases without native Datetime support,
          such as SQLite
    columns : list
        List of column names to select from sql table

    Returns
    -------
    DataFrame

    See also
    --------
    read_sql_query : Read SQL query into a DataFrame.
    read_sql

    """
    if not _is_sqlalchemy_engine(con):
        raise NotImplementedError("read_sql_table only supported for "
                                  "SQLAlchemy engines.")
    import sqlalchemy
    from sqlalchemy.schema import MetaData
    meta = MetaData(con, schema=schema)
    try:
        meta.reflect(only=[table_name])
    except sqlalchemy.exc.InvalidRequestError:
        raise ValueError("Table %s not found" % table_name)

    pandas_sql = PandasSQLAlchemy(con, meta=meta)
    table = pandas_sql.read_table(table_name,
                                  index_col=index_col,
                                  coerce_float=coerce_float,
                                  parse_dates=parse_dates,
                                  columns=columns)

    if table is not None:
        return table
    else:
        raise ValueError("Table %s not found" % table_name, con)