Example #1
0
    def _expect_default(self, c_expected, col, seq=None):
        Table('t', self.metadata, col)

        self.autogen_context.metadata = self.metadata

        if seq:
            seq._set_metadata(self.metadata)
        self.metadata.create_all(config.db)

        insp = Inspector.from_engine(config.db)

        uo = ops.UpgradeOps(ops=[])
        _compare_tables(
            set([(None, 't')]), set([]),
            insp, uo, self.autogen_context)
        diffs = uo.as_diffs()
        tab = diffs[0][1]

        eq_(_render_server_default_for_compare(
            tab.c.x.server_default, tab.c.x, self.autogen_context),
            c_expected)

        insp = Inspector.from_engine(config.db)
        uo = ops.UpgradeOps(ops=[])
        m2 = MetaData()
        Table('t', m2, Column('x', BigInteger()))
        self.autogen_context.metadata = m2
        _compare_tables(
            set([(None, 't')]), set([(None, 't')]),
            insp, uo, self.autogen_context)
        diffs = uo.as_diffs()
        server_default = diffs[0][0][4]['existing_server_default']
        eq_(_render_server_default_for_compare(
            server_default, tab.c.x, self.autogen_context),
            c_expected)
Example #2
0
 def _test_get_foreign_keys(self, schema=None):
     meta = MetaData(testing.db)
     (users, addresses) = createTables(meta, schema)
     meta.create_all()
     insp = Inspector(meta.bind)
     try:
         expected_schema = schema
         # users
         users_fkeys = insp.get_foreign_keys(users.name,
                                             schema=schema)
         fkey1 = users_fkeys[0]
         self.assert_(fkey1['name'] is not None)
         eq_(fkey1['referred_schema'], expected_schema)
         eq_(fkey1['referred_table'], users.name)
         eq_(fkey1['referred_columns'], ['user_id', ])
         eq_(fkey1['constrained_columns'], ['parent_user_id'])
         #addresses
         addr_fkeys = insp.get_foreign_keys(addresses.name,
                                            schema=schema)
         fkey1 = addr_fkeys[0]
         self.assert_(fkey1['name'] is not None)
         eq_(fkey1['referred_schema'], expected_schema)
         eq_(fkey1['referred_table'], users.name)
         eq_(fkey1['referred_columns'], ['user_id', ])
         eq_(fkey1['constrained_columns'], ['remote_user_id'])
     finally:
         addresses.drop()
         users.drop()
Example #3
0
 def _test_get_foreign_keys(self, schema=None):
     meta = MetaData(testing.db)
     (users, addresses) = createTables(meta, schema)
     meta.create_all()
     insp = Inspector(meta.bind)
     try:
         expected_schema = schema
         if schema is None:
             try:
                 expected_schema = meta.bind.dialect.get_default_schema_name(
                                 meta.bind)
             except NotImplementedError:
                 expected_schema = None
         # users
         users_fkeys = insp.get_foreign_keys(users.name,
                                             schema=schema)
         fkey1 = users_fkeys[0]
         self.assert_(fkey1['name'] is not None)
         self.assertEqual(fkey1['referred_schema'], expected_schema)
         self.assertEqual(fkey1['referred_table'], users.name)
         self.assertEqual(fkey1['referred_columns'], ['user_id', ])
         self.assertEqual(fkey1['constrained_columns'], ['parent_user_id'])
         #addresses
         addr_fkeys = insp.get_foreign_keys(addresses.name,
                                            schema=schema)
         fkey1 = addr_fkeys[0]
         self.assert_(fkey1['name'] is not None)
         self.assertEqual(fkey1['referred_schema'], expected_schema)
         self.assertEqual(fkey1['referred_table'], users.name)
         self.assertEqual(fkey1['referred_columns'], ['user_id', ])
         self.assertEqual(fkey1['constrained_columns'], ['remote_user_id'])
     finally:
         addresses.drop()
         users.drop()
def find_pending(db):
    engine = sa.create_engine(db)
    inspector = Inspector(engine)
    # Newer buildbot has a "buildrequest_claims" table
    if "buildrequest_claims" in inspector.get_table_names():
        query = sa.text("""
        SELECT buildername, count(*) FROM
               buildrequests WHERE
               complete=0 AND
               submitted_at > :yesterday AND
               submitted_at < :toonew AND
               (select count(*) from buildrequest_claims where brid=id) = 0

               GROUP BY buildername""")
    # Older buildbot doesn't
    else:
        query = sa.text("""
        SELECT buildername, count(*) FROM
               buildrequests WHERE
               complete=0 AND
               claimed_at=0 AND
               submitted_at > :yesterday AND
               submitted_at < :toonew

               GROUP BY buildername""")

    result = engine.execute(
        query,
        yesterday=time.time() - 86400,
        toonew=time.time() - 60
    )
    retval = result.fetchall()
    return retval
Example #5
0
    def _test_get_indexes(self, schema=None):
        meta = MetaData(testing.db)
        (users, addresses) = createTables(meta, schema)
        meta.create_all()
        createIndexes(meta.bind, schema)
        try:
            # The database may decide to create indexes for foreign keys, etc.
            # so there may be more indexes than expected.
            insp = Inspector(meta.bind)
            indexes = insp.get_indexes('users', schema=schema)
            indexes.sort()
            if testing.against('oracle'):
                expected_indexes = [
                    {'unique': False,
                     'column_names': ['TEST1', 'TEST2'],
                     'name': 'USERS_T_IDX'}]
            else:
                expected_indexes = [
                    {'unique': False,
                     'column_names': ['test1', 'test2'],
                     'name': 'users_t_idx'}]
            index_names = [d['name'] for d in indexes]
            for e_index in expected_indexes:
                self.assertTrue(e_index['name'] in index_names)
                index = indexes[index_names.index(e_index['name'])]
                for key in e_index:
                    self.assertEqual(e_index[key], index[key])

        finally:
            addresses.drop()
            users.drop()
Example #6
0
 def _test_get_table_names(self, schema=None, table_type='table',
                           order_by=None):
     meta = MetaData(testing.db)
     (users, addresses) = createTables(meta, schema)
     meta.create_all()
     createViews(meta.bind, schema)
     try:
         insp = Inspector(meta.bind)
         if table_type == 'view':
             table_names = insp.get_view_names(schema)
             table_names.sort()
             answer = ['email_addresses_v', 'users_v']
         else:
             table_names = insp.get_table_names(schema,
                                                order_by=order_by)
             table_names.sort()
             if order_by == 'foreign_key':
                 answer = ['users', 'email_addresses']
             else:
                 answer = ['email_addresses', 'users']
         self.assertEqual(table_names, answer)
     finally:
         dropViews(meta.bind, schema)
         addresses.drop()
         users.drop()
Example #7
0
    def _test_selfref_fk(self, recreate):
        bar = Table(
            'bar', self.metadata,
            Column('id', Integer, primary_key=True),
            Column('bar_id', Integer, ForeignKey('bar.id')),
            Column('data', String(50)),
            mysql_engine='InnoDB'
        )
        bar.create(self.conn)
        self.conn.execute(bar.insert(), {'id': 1, 'data': 'x', 'bar_id': None})
        self.conn.execute(bar.insert(), {'id': 2, 'data': 'y', 'bar_id': 1})

        with self.op.batch_alter_table("bar", recreate=recreate) as batch_op:
            batch_op.alter_column(
                'data', new_column_name='newdata', existing_type=String(50))

        insp = Inspector.from_engine(self.conn)

        insp = Inspector.from_engine(self.conn)
        eq_(
            [(key['referred_table'],
             key['referred_columns'], key['constrained_columns'])
             for key in insp.get_foreign_keys('bar')],
            [('bar', ['id'], ['bar_id'])]
        )
    def _expect_default(self, c_expected, col, seq=None):
        Table('t', self.metadata, col)

        if seq:
            seq._set_metadata(self.metadata)
        self.metadata.create_all(config.db)

        insp = Inspector.from_engine(config.db)
        diffs = []
        _compare_tables(
            set([(None, 't')]), set([]),
            [],
            insp, self.metadata, diffs, self.autogen_context)
        tab = diffs[0][1]
        eq_(_render_server_default_for_compare(
            tab.c.x.server_default, tab.c.x, self.autogen_context),
            c_expected)

        insp = Inspector.from_engine(config.db)
        diffs = []
        m2 = MetaData()
        Table('t', m2, Column('x', BigInteger()))
        _compare_tables(
            set([(None, 't')]), set([(None, 't')]),
            [],
            insp, m2, diffs, self.autogen_context)
        server_default = diffs[0][0][4]['existing_server_default']
        eq_(_render_server_default_for_compare(
            server_default, tab.c.x, self.autogen_context),
            c_expected)
    def test_autogen(self):
        m = sa.MetaData()
        sa.Table('t', m, sa.Column('x', sa.Integer))

        def process_revision_directives(context, rev, generate_revisions):
            existing_upgrades = generate_revisions[0].upgrade_ops
            existing_downgrades = generate_revisions[0].downgrade_ops

            # model1 will run the upgrades, e.g. create the table,
            # model2 will run the downgrades as upgrades, e.g. drop
            # the table again

            generate_revisions[:] = [
                ops.MigrationScript(
                    util.rev_id(),
                    existing_upgrades,
                    ops.DowngradeOps(),
                    version_path=os.path.join(
                        _get_staging_directory(), "model1"),
                    head="model1@head"
                ),
                ops.MigrationScript(
                    util.rev_id(),
                    existing_downgrades,
                    ops.DowngradeOps(),
                    version_path=os.path.join(
                        _get_staging_directory(), "model2"),
                    head="model2@head"
                )
            ]

        with self._env_fixture(process_revision_directives, m):
            command.upgrade(self.cfg, "heads")

            eq_(
                Inspector.from_engine(self.engine).get_table_names(),
                ["alembic_version"]
            )

            command.revision(
                self.cfg, message="some message",
                autogenerate=True)

            command.upgrade(self.cfg, "model1@head")

            eq_(
                Inspector.from_engine(self.engine).get_table_names(),
                ["alembic_version", "t"]
            )

            command.upgrade(self.cfg, "model2@head")

            eq_(
                Inspector.from_engine(self.engine).get_table_names(),
                ["alembic_version"]
            )
Example #10
0
def add_municipality_domain(context):
    # Rename the columns
    renames = (
        ('elections', 'total_municipalities', 'total_entities'),
        ('elections', 'counted_municipalities', 'counted_entities'),
        ('election_results', 'municipality_id', 'entity_id'),
        ('ballot_results', 'municipality_id', 'entity_id'),
    )

    for table, old, new in renames:
        if context.has_column(table, old):
            context.operations.alter_column(table, old, new_column_name=new)

    # Add the new domain, see http://stackoverflow.com/a/14845740
    table_names = []
    inspector = Inspector(context.operations_connection)
    if 'elections' in inspector.get_table_names(context.schema):
        table_names.append('elections')
    if 'election_compounds' in inspector.get_table_names(context.schema):
        table_names.append('election_compounds')
    if 'votes' in inspector.get_table_names(context.schema):
        table_names.append('votes')
    if 'archived_results' in inspector.get_table_names(context.schema):
        table_names.append('archived_results')

    old_type = Enum('federation', 'canton', name='domain_of_influence')
    new_type = Enum('federation', 'canton', 'municipality',
                    name='domain_of_influence')
    tmp_type = Enum('federation', 'canton', 'municipality',
                    name='_domain_of_influence')

    tmp_type.create(context.operations.get_bind(), checkfirst=False)

    for table_name in table_names:
        context.operations.execute(
            (
                'ALTER TABLE {} ALTER COLUMN domain TYPE _domain_of_influence '
                'USING domain::text::_domain_of_influence'
            ).format(table_name)
        )

    old_type.drop(context.operations.get_bind(), checkfirst=False)

    new_type.create(context.operations.get_bind(), checkfirst=False)

    for table_name in table_names:
        context.operations.execute(
            (
                'ALTER TABLE {} ALTER COLUMN domain TYPE domain_of_influence '
                'USING domain::text::domain_of_influence'
            ).format(table_name)
        )

    tmp_type.drop(context.operations.get_bind(), checkfirst=False)
Example #11
0
 def test_dont_reflect_autoindex(self):
     meta = self.metadata
     Table('foo', meta, Column('bar', String, primary_key=True))
     meta.create_all()
     inspector = Inspector(testing.db)
     eq_(inspector.get_indexes('foo'), [])
     eq_(
         inspector.get_indexes('foo', include_auto_indexes=True),
         [{
             'unique': 1,
             'name': 'sqlite_autoindex_foo_1',
             'column_names': ['bar']}])
Example #12
0
    def test_create_index_with_schema(self):
        """Test creation of index with explicit schema"""

        meta = self.metadata
        Table(
            'foo', meta, Column('bar', String, index=True),
            schema='main')
        meta.create_all()
        inspector = Inspector(testing.db)
        eq_(
            inspector.get_indexes('foo', schema='main'),
            [{'unique': 0, 'name': u'ix_main_foo_bar',
              'column_names': [u'bar']}])
Example #13
0
 def test_dont_reflect_autoindex(self):
     meta = MetaData(testing.db)
     t = Table('foo', meta, Column('bar', String, primary_key=True))
     meta.create_all()
     from sqlalchemy.engine.reflection import Inspector
     try:
         inspector = Inspector(testing.db)
         eq_(inspector.get_indexes('foo'), [])
         eq_(inspector.get_indexes('foo',
             include_auto_indexes=True), [{'unique': 1, 'name'
             : 'sqlite_autoindex_foo_1', 'column_names': ['bar']}])
     finally:
         meta.drop_all()
Example #14
0
 def test_get_unique_constraints(self):
     meta = self.metadata
     Table(
         'foo', meta, Column('f', Integer),
         UniqueConstraint('f', name='foo_f'))
     Table(
         'bar', meta, Column('b', Integer),
         UniqueConstraint('b', name='bar_b'),
         prefixes=['TEMPORARY'])
     meta.create_all()
     inspector = Inspector(testing.db)
     eq_(inspector.get_unique_constraints('foo'),
         [{'column_names': [u'f'], 'name': u'foo_f'}])
     eq_(inspector.get_unique_constraints('bar'),
         [{'column_names': [u'b'], 'name': u'bar_b'}])
Example #15
0
    def test_dont_reflect_autoindex(self):
        meta = MetaData(testing.db)
        t = Table("foo", meta, Column("bar", String, primary_key=True))
        meta.create_all()
        from sqlalchemy.engine.reflection import Inspector

        try:
            inspector = Inspector(testing.db)
            eq_(inspector.get_indexes("foo"), [])
            eq_(
                inspector.get_indexes("foo", include_auto_indexes=True),
                [{"unique": 1, "name": "sqlite_autoindex_foo_1", "column_names": ["bar"]}],
            )
        finally:
            meta.drop_all()
Example #16
0
    def handle(self, options, global_options, *args):
        
        output_dir = os.path.join(options.output_dir, options.engine)
        if not os.path.exists(output_dir):
            os.makedirs(output_dir)
        
        engine = get_engine(options, global_options)

        if not args:
            print "Failed! You should pass one or more tables name."
            sys.exit(1)
            
        inspector = Inspector.from_engine(engine)
        
        tables = get_tables(global_options.apps_dir, tables=args,
            engine=options.engine, settings_file=global_options.settings, 
            local_settings_file=global_options.local_settings)

        for tablename, t in tables.items():
            if global_options.verbose:
                print '[%s] Dumpping %s...' % (options.engine, tablename)
            filename = os.path.join(output_dir, tablename+'.txt')
            if options.text:
                format = 'txt'
            else:
                format = None
            dump_table(t, filename, engine, delimiter=options.delimiter, 
                format=format, encoding=options.encoding, inspector=inspector)
Example #17
0
def create_table(engine, tool):
    """ checks if a table  exists and create one if it doesn't """

    inspector = Inspector.from_engine(engine)
    tables = set(inspector.get_table_names())
    if tool.__tablename__ not in tables:
        Base.metadata.create_all(engine)
Example #18
0
    def handle(self, options, global_options, *args):
        from sqlalchemy import Table
        from uliweb.orm import reflect_table_model

        engine = get_engine(options, global_options)

        insp = Inspector.from_engine(engine)
        if not args:
            tables = insp.get_table_names()
        else:
            tables = args

        mapping = {}
        print '#coding=utf8'
        print 'from uliweb.orm import *'
        print 'from uliweb.i18n import ugettext_lazy as _'
        print 'from uliweb.utils.common import get_var'
        if options.oracle:
            print 'from sqlalchemy.dialects.oracle import VARCHAR2'
            mapping = {'str': 'VARCHAR2'}
        print '\n'

        meta = engine.metadata
        for name in tables:
            table = Table(name, meta)
            try:
                insp.reflecttable(table, None)
                print reflect_table_model(table, mapping, without_id=not options.auto_id)
                print '\n'
            except Exception as e:
                import traceback
                traceback.print_exc()
Example #19
0
def delete_all_tables(db):
    """Drops all tables in the database"""
    conn = db.engine.connect()
    transaction = conn.begin()
    inspector = Inspector.from_engine(db.engine)
    metadata = MetaData()

    all_schema_tables = get_all_tables(db)
    tables = []
    all_fkeys = []
    for schema, schema_tables in all_schema_tables.iteritems():
        for table_name in schema_tables:
            fkeys = [ForeignKeyConstraint((), (), name=fk['name'])
                     for fk in inspector.get_foreign_keys(table_name, schema=schema)
                     if fk['name']]
            tables.append(Table(table_name, metadata, *fkeys, schema=schema))
            all_fkeys.extend(fkeys)

    for fkey in all_fkeys:
        conn.execute(DropConstraint(fkey))
    for table in tables:
        conn.execute(DropTable(table))
    for schema in all_schema_tables:
        if schema != 'public':
            conn.execute(DropSchema(schema))
    transaction.commit()
Example #20
0
def setup():

    if harvest_source_table is None:
        define_harvester_tables()
        log.debug('Harvest tables defined in memory')

    if model.package_table.exists():
        if not harvest_source_table.exists():

            # Create each table individually rather than
            # using metadata.create_all()
            harvest_source_table.create()
            harvest_job_table.create()
            harvest_object_table.create()
            harvest_gather_error_table.create()
            harvest_object_error_table.create()

            log.debug('Harvest tables created')
        else:
            from ckan.model.meta import engine
            log.debug('Harvest tables already exist')
            # Check if existing tables need to be updated
            inspector = Inspector.from_engine(engine)
            columns = inspector.get_columns('harvest_source')
            if not 'title' in [column['name'] for column in columns]:
                log.debug('Harvest tables need to be updated')
                migrate_v2()

    else:
        log.debug('Harvest table creation deferred')
Example #21
0
def delete_all_tables(db):
    """Drops all tables in the database"""
    conn = db.engine.connect()
    transaction = conn.begin()
    inspector = Inspector.from_engine(db.engine)
    metadata = MetaData()

    all_schema_tables = get_all_tables(db)
    tables = []
    all_fkeys = []
    for schema, schema_tables in all_schema_tables.iteritems():
        for table_name in schema_tables:
            fkeys = [ForeignKeyConstraint((), (), name=fk['name'])
                     for fk in inspector.get_foreign_keys(table_name, schema=schema)
                     if fk['name']]
            tables.append(Table(table_name, metadata, *fkeys, schema=schema))
            all_fkeys.extend(fkeys)

    for fkey in all_fkeys:
        conn.execute(DropConstraint(fkey))
    for table in tables:
        conn.execute(DropTable(table))
    for schema in all_schema_tables:
        if schema != 'public':
            row = conn.execute("""
                SELECT 'DROP FUNCTION ' || ns.nspname || '.' || proname || '(' || oidvectortypes(proargtypes) || ')'
                FROM pg_proc INNER JOIN pg_namespace ns ON (pg_proc.pronamespace = ns.oid)
                WHERE ns.nspname = '{}'  order by proname;
            """.format(schema))
            for stmt, in row:
                conn.execute(stmt)
            conn.execute(DropSchema(schema))
    transaction.commit()
Example #22
0
    def _compare_default(self, t1, t2, col, rendered):
        t1.create(self.bind, checkfirst=True)
        insp = Inspector.from_engine(self.bind)
        cols = insp.get_columns(t1.name)
        ctx = self.autogen_context.migration_context

        return ctx.impl.compare_server_default(None, col, rendered, cols[0]["default"])
Example #23
0
    def handle(self, options, global_options, *args):
        from sqlalchemy import Table
        from uliweb.orm import reflect_model

        engine = get_engine(options, global_options)

        insp = Inspector.from_engine(engine)
        if not args:
            tables = insp.get_table_names()
        else:
            tables = args

        print '#coding=utf8'
        print 'from uliweb.orm import *'
        print 'from uliweb.i18n import ugettext_lazy as _'
        print 'from uliweb.utils.common import get_var'
        print '\n'

        meta = engine.metadata
        for name in tables:
            table = Table(name, meta)
            try:
                insp.reflecttable(table, None)
                print reflect_model(table)
                print '\n'
            except Exception as e:
                import traceback
                traceback.print_exc()
Example #24
0
  def __initializeConnection( self, dbPath ):
    """
    Collects from the CS all the info needed to connect to the DB.
    This should be in a base class eventually
    """

    result = getDBParameters( dbPath )
    if not result[ 'OK' ]:
      raise Exception( 'Cannot get database parameters: %s' % result['Message'] )

    dbParameters = result[ 'Value' ]
    self.log.debug("db parameters: %s" % dbParameters)
    self.host = dbParameters[ 'Host' ]
    self.port = dbParameters[ 'Port' ]
    self.user = dbParameters[ 'User' ]
    self.password = dbParameters[ 'Password' ]
    self.dbName = dbParameters[ 'DBName' ]

    self.engine = create_engine( 'mysql://%s:%s@%s:%s/%s' % ( self.user,
                                                              self.password,
                                                              self.host,
                                                              self.port,
                                                              self.dbName ),
                                 pool_recycle = 3600,
                                 echo_pool = True,
                                 echo = self.log.getLevel() == 'DEBUG')
    self.sessionMaker_o = sessionmaker( bind = self.engine )
    self.inspector = Inspector.from_engine( self.engine )
def upgrade():
    c = get_context()
    if isinstance(c.connection.engine.dialect, MySQLDialect):
        insp = Inspector.from_engine(c.connection.engine)
        for t in ['groups_permissions', 'groups_resources_permissions',
                  'users_groups', 'resources']:
            for constraint in insp.get_foreign_keys(t):
                if constraint['referred_columns'] == ['group_name']:
                    op.drop_constraint(constraint['name'], t,
                                       type='foreignkey')

    op.drop_column('groups', 'id')
    op.alter_column('groups', 'group_name',
                    type_=sa.String(128),
                    existing_type=sa.String(50),
                    )
    op.create_primary_key('groups_pkey', 'groups', cols=['group_name'])

    if isinstance(c.connection.engine.dialect, MySQLDialect):
        op.create_foreign_key(None, 'groups_permissions', 'groups',
                              remote_cols=['group_name'],
                              local_cols=['group_name'], onupdate='CASCADE',
                              ondelete='CASCADE')
        op.create_foreign_key(None, 'groups_resources_permissions', 'groups',
                              remote_cols=['group_name'],
                              local_cols=['group_name'], onupdate='CASCADE',
                              ondelete='CASCADE')
        op.create_foreign_key(None, 'users_groups', 'groups',
                              remote_cols=['group_name'],
                              local_cols=['group_name'], onupdate='CASCADE',
                              ondelete='CASCADE')
        op.create_foreign_key(None, 'resources', 'groups',
                              remote_cols=['group_name'],
                              local_cols=['owner_group_name'], onupdate='CASCADE',
                              ondelete='SET NULL')
Example #26
0
def _produce_net_changes(connection, metadata, diffs, autogen_context,
                            object_filters=(),
                            include_schemas=False):
    inspector = Inspector.from_engine(connection)
    # TODO: not hardcode alembic_version here ?
    conn_table_names = set()
    if include_schemas:
        schemas = set(inspector.get_schema_names())
        # replace default schema name with None
        schemas.discard("information_schema")
        # replace the "default" schema with None
        schemas.add(None)
        schemas.discard(connection.dialect.default_schema_name)
    else:
        schemas = [None]

    for s in schemas:
        tables = set(inspector.get_table_names(schema=s)).\
                difference(['alembic_version'])
        conn_table_names.update(zip([s] * len(tables), tables))

    metadata_table_names = OrderedSet([(table.schema, table.name)
                                for table in metadata.sorted_tables])

    _compare_tables(conn_table_names, metadata_table_names,
                    object_filters,
                    inspector, metadata, diffs, autogen_context)
Example #27
0
 def init_db(self):
     engine = self.session.get_bind(mapper=None, clause=None)        
     
     inspector = Inspector.from_engine(engine)
     if 'ab_user' not in inspector.get_table_names():
         print "Security DB not found Creating..."
         Base.metadata.create_all(engine)
         print "Security DB Created"
         self.migrate_db()
     if self.session.query(Role).filter_by(name = self.auth_role_admin).first() is None:
         role = Role()
         role.name = self.auth_role_admin
         self.session.add(role)
         self.session.commit()
         print "Inserted Role for public access", self.auth_role_admin            
     if not self.session.query(Role).filter_by(name = self.auth_role_public).first():
         role = Role()
         role.name = self.auth_role_public
         self.session.add(role)
         self.session.commit()
         print "Inserted Role for public access", self.auth_role_public
     if not self.session.query(User).all():
         user = User()
         user.first_name = 'Admin'
         user.last_name = 'User'
         user.username = '******'
         user.password = '******'
         user.active = True
         user.role = self.session.query(Role).filter_by(name = self.auth_role_admin).first()
         self.session.add(user)
         self.session.commit()
         print "Inserted initial Admin user"
         print "Login using Admin/general"
Example #28
0
    def _test_get_primary_keys(self, schema=None):
        meta = MetaData(testing.db)
        (users, addresses) = createTables(meta, schema)
        meta.create_all()
        insp = Inspector(meta.bind)
        try:
            users_pkeys = insp.get_primary_keys(users.name,
                                                schema=schema)
            self.assertEqual(users_pkeys,  ['user_id'])
            addr_pkeys = insp.get_primary_keys(addresses.name,
                                               schema=schema)
            self.assertEqual(addr_pkeys,  ['address_id'])

        finally:
            addresses.drop()
            users.drop()
Example #29
0
    def handle(self, options, global_options, *args):
        
        engine = get_engine(options, global_options)

        if len(args) != 2:
            print self.print_help(self.prog_name, 'dumptablefile')
            sys.exit(1)
            
        inspector = Inspector.from_engine(engine)

        name = args[0]
        tables = get_tables(global_options.apps_dir, tables=[name],
            engine_name=options.engine, settings_file=global_options.settings, 
            local_settings_file=global_options.local_settings)
        t = tables[name]
        if global_options.verbose:
            print '[%s] Dumpping %s...' % (options.engine, show_table(name, t, 0, 1)),
        if options.text:
            format = 'txt'
        else:
            format = None
        t = dump_table(t, args[1], engine, delimiter=options.delimiter, 
            format=format, encoding=options.encoding, inspector=inspector,
            engine_name=engine.engine_name)
        if global_options.verbose:
            print t
Example #30
0
 def __init__(self, sqla_conn, args, schema=None):
     self.args = args
     self.sqla_conn = sqla_conn
     self.schema = schema
     self.engine = sa.create_engine(sqla_conn)
     self.meta = sa.MetaData(bind=self.engine) # excised schema=schema to prevent errors
     self.meta.reflect(schema=self.schema)
     self.inspector = Inspector(bind=self.engine)
     self.conn = self.engine.connect()
     self.tables = OrderedDict()
     for tbl in self.meta.sorted_tables:
         if any(fnmatch.fnmatch(tbl.name, each) for each in args.exclude_tables):
             continue
         tbl.db = self
         # TODO: Replace all these monkeypatches with an instance assigment
         tbl.find_n_rows = types.MethodType(_find_n_rows, tbl)
         tbl.random_row_func = types.MethodType(_random_row_func, tbl)
         tbl.fks = self.inspector.get_foreign_keys(tbl.name, schema=tbl.schema)
         tbl.pk = self.inspector.get_primary_keys(tbl.name, schema=tbl.schema)
         if not tbl.pk:
             tbl.pk = [d['name'] for d in self.inspector.get_columns(tbl.name)]
         tbl.filtered_by = types.MethodType(_filtered_by, tbl)
         tbl.by_pk = types.MethodType(_by_pk, tbl)
         tbl.pk_val = types.MethodType(_pk_val, tbl)
         tbl.child_fks = []
         estimate_rows = not(any(fnmatch.fnmatch(tbl.name, each) 
                                 for each in self.args.full_tables))
         tbl.find_n_rows(estimate=estimate_rows)
         self.tables[(tbl.schema, tbl.name)] = tbl
     for ((tbl_schema, tbl_name), tbl) in self.tables.items():
         constraints = args.config.get('constraints', {}).get(tbl_name, [])
         for fk in (tbl.fks + constraints):
             fk['constrained_schema'] = tbl_schema
             fk['constrained_table'] = tbl_name  # TODO: check against constrained_table
             self.tables[(fk['referred_schema'], fk['referred_table'])].child_fks.append(fk)
Example #31
0
 def enumerate_tables(self, **tokens):
     engine, sessions = self._acquire_engine(tokens)
     inspector = Inspector.from_engine(engine)
     return inspector.get_table_names()
Example #32
0
    def _process_view(
        self,
        dataset_name: str,
        inspector: Inspector,
        schema: str,
        view: str,
        sql_config: SQLAlchemyConfig,
    ) -> Iterable[Union[SqlWorkUnit, MetadataWorkUnit]]:
        try:
            columns = inspector.get_columns(view, schema)
        except KeyError:
            # For certain types of views, we are unable to fetch the list of columns.
            self.report.report_warning(dataset_name,
                                       "unable to get schema for this view")
            schema_metadata = None
        else:
            schema_fields = self.get_schema_fields(dataset_name, columns)
            schema_metadata = get_schema_metadata(
                self.report,
                dataset_name,
                self.platform,
                columns,
                canonical_schema=schema_fields,
            )
        try:
            # SQLALchemy stubs are incomplete and missing this method.
            # PR: https://github.com/dropbox/sqlalchemy-stubs/pull/223.
            view_info: dict = inspector.get_table_comment(
                view, schema)  # type: ignore
        except NotImplementedError:
            description: Optional[str] = None
            properties: Dict[str, str] = {}
        else:
            description = view_info["text"]

            # The "properties" field is a non-standard addition to SQLAlchemy's interface.
            properties = view_info.get("properties", {})
        try:
            view_definition = inspector.get_view_definition(view, schema)
            if view_definition is None:
                view_definition = ""
            else:
                # Some dialects return a TextClause instead of a raw string,
                # so we need to convert them to a string.
                view_definition = str(view_definition)
        except NotImplementedError:
            view_definition = ""
        properties["view_definition"] = view_definition
        properties["is_view"] = "True"
        dataset_urn = make_dataset_urn_with_platform_instance(
            self.platform,
            dataset_name,
            self.config.platform_instance,
            self.config.env,
        )
        dataset_snapshot = DatasetSnapshot(
            urn=dataset_urn,
            aspects=[StatusClass(removed=False)],
        )
        db_name = self.get_db_name(inspector)
        yield from self.add_table_to_schema_container(dataset_urn, db_name,
                                                      schema)
        if self.is_stateful_ingestion_configured():
            cur_checkpoint = self.get_current_checkpoint(
                self.get_default_ingestion_job_id())
            if cur_checkpoint is not None:
                checkpoint_state = cast(BaseSQLAlchemyCheckpointState,
                                        cur_checkpoint.state)
                checkpoint_state.add_view_urn(dataset_urn)
        dataset_properties = DatasetPropertiesClass(
            name=view,
            description=description,
            customProperties=properties,
        )
        dataset_snapshot.aspects.append(dataset_properties)
        if schema_metadata:
            dataset_snapshot.aspects.append(schema_metadata)
        mce = MetadataChangeEvent(proposedSnapshot=dataset_snapshot)
        wu = SqlWorkUnit(id=dataset_name, mce=mce)
        self.report.report_workunit(wu)
        yield wu
        dpi_aspect = self.get_dataplatform_instance_aspect(
            dataset_urn=dataset_urn)
        if dpi_aspect:
            yield dpi_aspect
        subtypes_aspect = MetadataWorkUnit(
            id=f"{view}-subtypes",
            mcp=MetadataChangeProposalWrapper(
                entityType="dataset",
                changeType=ChangeTypeClass.UPSERT,
                entityUrn=dataset_urn,
                aspectName="subTypes",
                aspect=SubTypesClass(typeNames=["view"]),
            ),
        )
        yield subtypes_aspect
        if "view_definition" in properties:
            view_definition_string = properties["view_definition"]
            view_properties_aspect = ViewPropertiesClass(
                materialized=False,
                viewLanguage="SQL",
                viewLogic=view_definition_string)
            yield MetadataWorkUnit(
                id=f"{view}-viewProperties",
                mcp=MetadataChangeProposalWrapper(
                    entityType="dataset",
                    changeType=ChangeTypeClass.UPSERT,
                    entityUrn=dataset_urn,
                    aspectName="viewProperties",
                    aspect=view_properties_aspect,
                ),
            )

        yield from self._get_domain_wu(
            dataset_name=dataset_name,
            entity_urn=dataset_urn,
            entity_type="dataset",
            sql_config=sql_config,
        )
Example #33
0
def get_tables(apps_dir, apps=None, engine_name=None, tables=None,
    settings_file='settings.ini', local_settings_file='local_settings.ini',
    all=False):
    from uliweb.core.SimpleFrame import get_apps, get_app_dir
    from uliweb import orm
    
    engine = orm.engine_manager[engine_name]
    e = engine.options['connection_string']

    dispatch.get(None, 'load_models')

    old_models = orm.__models__.keys()
    tables_map = {}
    tables_mapping_only = {}
    try:
        for tablename, m in engine.models.items():
            try:
                x = orm.get_model(tablename, engine_name)
                #convert dynamic model to mapping_only model
                if hasattr(x, '__dynamic__') and getattr(x, '__dynamic__'):
                    x.__mapping_only__ = True
                if hasattr(x, '__mapping_only__') and getattr(x, '__mapping_only__'):
                    tables_mapping_only[x.tablename] = True
            except:
                print("Error on Model [%s]" % tablename)
                raise
            tables_map[x.tablename] = tablename
    except:
        print("Problems to models like:", list(set(old_models) ^ set(orm.__models__.keys())))
        raise

    all_meta = MetaData()
    meta = engine.metadata
    insp = Inspector.from_engine(engine.engine)

    def get_table(tablename):
        table = Table(tablename, all_meta)
        try:
            insp.reflecttable(table, None)
        except NoSuchTableError:
            return
        return table

    all_tables = insp.get_table_names() + list(meta.tables.keys())
    if apps:
        t = {}
        for tablename in all_tables:
            if tablename in meta.tables:
                table = meta.tables[tablename]
                if hasattr(table, '__appname__') and table.__appname__ in apps:
                    t[tables_map.get(tablename, tablename)] = table
                    table.__mapping_only__ = tables_mapping_only.get(tablename, False)
    elif tables:
        t = {}
        for tablename in tables:
            if tablename in meta.tables:
                table = meta.tables[tablename]
            else:
                try:
                    table = get_table(tablename)
                    if not table:
                        print("Table [%s] can't be found, it'll be skipped." % tablename)
                        continue
                    table.__appname__ = 'UNKNOWN'
                except Exception as e:
                    import traceback
                    traceback.print_exc()
                    continue
            t[tables_map.get(tablename, tablename)] = table
            table.__mapping_only__ = tables_mapping_only.get(tablename, False)

    else:
        t = {}
        if not all:
            all_meta = engine.metadata
            all_tables = meta.tables.keys()
        for tablename in all_tables:
            if tablename in meta:
                table = meta.tables[tablename]
            else:
                try:
                    table = get_table(tablename)
                    table.__appname__ = 'UNKNOWN'
                except Exception as e:
                    import traceback
                    traceback.print_exc()
                    continue
            t[tables_map.get(tablename, tablename)] = table
            table.__mapping_only__ = tables_mapping_only.get(tablename, False)
    return t
Example #34
0
 def inspector(self):
     return Inspector.from_engine(self.connection)
Example #35
0
        def verify_thd(conn):
            metadata = sa.MetaData()
            metadata.bind = conn

            # Verify database contents.

            # 'workers' table contents.
            workers = sautils.Table('workers', metadata, autoload=True)
            c = workers.c
            q = sa.select(
                [c.id, c.name, c.info]
            ).order_by(c.id)
            self.assertEqual(
                q.execute().fetchall(), [
                    (30, u'worker-1', u'{}'),
                    (31, u'worker-2', u'{"a": 1}'),
                ])

            # 'builds' table contents.
            builds = sautils.Table('builds', metadata, autoload=True)
            c = builds.c
            q = sa.select(
                [c.id, c.number, c.builderid, c.buildrequestid, c.workerid,
                 c.masterid, c.started_at, c.complete_at, c.state_string,
                 c.results]
            ).order_by(c.id)
            self.assertEqual(
                q.execute().fetchall(), [
                    (40, 1, None, 20, 30, 10, 1000, None, u'state', None),
                    (41, 2, 50, 21, None, 11, 2000, 3000, u'state 2', 9),
                ])

            # 'configured_workers' table contents.
            configured_workers = sautils.Table(
                'configured_workers', metadata, autoload=True)
            c = configured_workers.c
            q = sa.select(
                [c.id, c.buildermasterid, c.workerid]
            ).order_by(c.id)
            self.assertEqual(
                q.execute().fetchall(), [
                    (60, 70, 30),
                    (61, 71, 31),
                ])

            # 'connected_workers' table contents.
            connected_workers = sautils.Table(
                'connected_workers', metadata, autoload=True)
            c = connected_workers.c
            q = sa.select(
                [c.id, c.masterid, c.workerid]
            ).order_by(c.id)
            self.assertEqual(
                q.execute().fetchall(), [
                    (80, 10, 30),
                    (81, 11, 31),
                ])

            # Verify that there is no "slave"-named items in schema.
            inspector = Inspector(conn)

            def check_name(name, table_name, item_type):
                if not name:
                    return
                self.assertTrue(
                    u"slave" not in name.lower(),
                    msg=u"'slave'-named {type} in table '{table}': "
                        u"'{name}'".format(
                        type=item_type, table=table_name,
                        name=name))

            # Check every table.
            for table_name in inspector.get_table_names():
                # Check table name.
                check_name(table_name, table_name, u"table name")

                # Check column names.
                for column_info in inspector.get_columns(table_name):
                    check_name(column_info['name'], table_name, u"column")

                # Check foreign key names.
                for fk_info in inspector.get_foreign_keys(table_name):
                    check_name(fk_info['name'], table_name, u"foreign key")

                # Check indexes names.
                for index_info in inspector.get_indexes(table_name):
                    check_name(index_info['name'], table_name, u"index")

                # Check primary keys constraints names.
                pk_info = inspector.get_pk_constraint(table_name)
                check_name(pk_info.get('name'), table_name, u"primary key")

            # Test that no "slave"-named items present in schema
            for name in inspector.get_schema_names():
                self.assertTrue(u"slave" not in name.lower())
def get_apps():
    inspector = Inspector.from_engine(engine)
    schemas = set(inspector.get_schema_names())
    schemas.discard('information_schema')
    return schemas
def downgrade():
    conn = op.get_bind()
    inspector = Inspector.from_engine(conn)
    tables = inspector.get_table_names()
    if 'shops_has_storages' in tables:
        op.drop_table('shops_has_storages')
Example #38
0
from sqlalchemy import create_engine
from resources import dbs
dbd = 'mysql'
tdb = dbs[dbd]
engine=create_engine('%s://%s:%s@%s:%s/%s' % \
                     (dbd
                      , tdb['user'], tdb['pw']
                     ,tdb['host'],tdb['port']
                     ,tdb['db'] ))
#conn=engine.connect()
from sqlalchemy.engine.reflection import Inspector
insp = Inspector(engine)  #(conn)
from sqlalchemy import MetaData
md = MetaData(engine)
md.reflect()


def get_table_info(tbl_nm):
    """returns a dict with keys 'attrib' for each col"""
    for ti in insp.get_columns(tbl_nm):
        yield ti


from sqlalchemy.sql import and_, select
from sqlalchemy import Table
flights = md.tables['flights']
Example #39
0
def upgrade():
    conn = op.get_bind()
    inspector = Inspector.from_engine(conn)
    tables = inspector.get_table_names()

    if 'connection' not in tables:
        op.create_table(
            'connection', sa.Column('id', sa.Integer(), nullable=False),
            sa.Column('conn_id', sa.String(length=250), nullable=True),
            sa.Column('conn_type', sa.String(length=500), nullable=True),
            sa.Column('host', sa.String(length=500), nullable=True),
            sa.Column('schema', sa.String(length=500), nullable=True),
            sa.Column('login', sa.String(length=500), nullable=True),
            sa.Column('password', sa.String(length=500), nullable=True),
            sa.Column('port', sa.Integer(), nullable=True),
            sa.Column('extra', sa.String(length=5000), nullable=True),
            sa.PrimaryKeyConstraint('id'))
    if 'dag' not in tables:
        op.create_table(
            'dag', sa.Column('dag_id', sa.String(length=250), nullable=False),
            sa.Column('is_paused', sa.Boolean(), nullable=True),
            sa.Column('is_subdag', sa.Boolean(), nullable=True),
            sa.Column('is_active', sa.Boolean(), nullable=True),
            sa.Column('last_scheduler_run', sa.DateTime(), nullable=True),
            sa.Column('last_pickled', sa.DateTime(), nullable=True),
            sa.Column('last_expired', sa.DateTime(), nullable=True),
            sa.Column('scheduler_lock', sa.Boolean(), nullable=True),
            sa.Column('pickle_id', sa.Integer(), nullable=True),
            sa.Column('fileloc', sa.String(length=2000), nullable=True),
            sa.Column('owners', sa.String(length=2000), nullable=True),
            sa.PrimaryKeyConstraint('dag_id'))
    if 'dag_pickle' not in tables:
        op.create_table(
            'dag_pickle', sa.Column('id', sa.Integer(), nullable=False),
            sa.Column('pickle', sa.PickleType(), nullable=True),
            sa.Column('created_dttm', sa.DateTime(), nullable=True),
            sa.Column('pickle_hash', sa.BigInteger(), nullable=True),
            sa.PrimaryKeyConstraint('id'))
    if 'import_error' not in tables:
        op.create_table(
            'import_error', sa.Column('id', sa.Integer(), nullable=False),
            sa.Column('timestamp', sa.DateTime(), nullable=True),
            sa.Column('filename', sa.String(length=1024), nullable=True),
            sa.Column('stacktrace', sa.Text(), nullable=True),
            sa.PrimaryKeyConstraint('id'))
    if 'job' not in tables:
        op.create_table(
            'job', sa.Column('id', sa.Integer(), nullable=False),
            sa.Column('dag_id', sa.String(length=250), nullable=True),
            sa.Column('state', sa.String(length=20), nullable=True),
            sa.Column('job_type', sa.String(length=30), nullable=True),
            sa.Column('start_date', sa.DateTime(), nullable=True),
            sa.Column('end_date', sa.DateTime(), nullable=True),
            sa.Column('latest_heartbeat', sa.DateTime(), nullable=True),
            sa.Column('executor_class', sa.String(length=500), nullable=True),
            sa.Column('hostname', sa.String(length=500), nullable=True),
            sa.Column('unixname', sa.String(length=1000), nullable=True),
            sa.PrimaryKeyConstraint('id'))
        op.create_index('job_type_heart',
                        'job', ['job_type', 'latest_heartbeat'],
                        unique=False)
    if 'known_event_type' not in tables:
        op.create_table(
            'known_event_type', sa.Column('id', sa.Integer(), nullable=False),
            sa.Column('know_event_type', sa.String(length=200), nullable=True),
            sa.PrimaryKeyConstraint('id'))
    if 'log' not in tables:
        op.create_table(
            'log', sa.Column('id', sa.Integer(), nullable=False),
            sa.Column('dttm', sa.DateTime(), nullable=True),
            sa.Column('dag_id', sa.String(length=250), nullable=True),
            sa.Column('task_id', sa.String(length=250), nullable=True),
            sa.Column('event', sa.String(length=30), nullable=True),
            sa.Column('execution_date', sa.DateTime(), nullable=True),
            sa.Column('owner', sa.String(length=500), nullable=True),
            sa.PrimaryKeyConstraint('id'))
    if 'sla_miss' not in tables:
        op.create_table(
            'sla_miss',
            sa.Column('task_id', sa.String(length=250), nullable=False),
            sa.Column('dag_id', sa.String(length=250), nullable=False),
            sa.Column('execution_date', sa.DateTime(), nullable=False),
            sa.Column('email_sent', sa.Boolean(), nullable=True),
            sa.Column('timestamp', sa.DateTime(), nullable=True),
            sa.Column('description', sa.Text(), nullable=True),
            sa.PrimaryKeyConstraint('task_id', 'dag_id', 'execution_date'))
    if 'slot_pool' not in tables:
        op.create_table('slot_pool',
                        sa.Column('id', sa.Integer(), nullable=False),
                        sa.Column('pool', sa.String(length=50), nullable=True),
                        sa.Column('slots', sa.Integer(), nullable=True),
                        sa.Column('description', sa.Text(), nullable=True),
                        sa.PrimaryKeyConstraint('id'),
                        sa.UniqueConstraint('pool'))
    if 'task_instance' not in tables:
        op.create_table(
            'task_instance',
            sa.Column('task_id', sa.String(length=250), nullable=False),
            sa.Column('dag_id', sa.String(length=250), nullable=False),
            sa.Column('execution_date', sa.DateTime(), nullable=False),
            sa.Column('start_date', sa.DateTime(), nullable=True),
            sa.Column('end_date', sa.DateTime(), nullable=True),
            sa.Column('duration', sa.Integer(), nullable=True),
            sa.Column('state', sa.String(length=20), nullable=True),
            sa.Column('try_number', sa.Integer(), nullable=True),
            sa.Column('hostname', sa.String(length=1000), nullable=True),
            sa.Column('unixname', sa.String(length=1000), nullable=True),
            sa.Column('job_id', sa.Integer(), nullable=True),
            sa.Column('pool', sa.String(length=50), nullable=True),
            sa.Column('queue', sa.String(length=50), nullable=True),
            sa.Column('priority_weight', sa.Integer(), nullable=True),
            sa.PrimaryKeyConstraint('task_id', 'dag_id', 'execution_date'))
        op.create_index('ti_dag_state',
                        'task_instance', ['dag_id', 'state'],
                        unique=False)
        op.create_index('ti_pool',
                        'task_instance', ['pool', 'state', 'priority_weight'],
                        unique=False)
        op.create_index('ti_state_lkp',
                        'task_instance',
                        ['dag_id', 'task_id', 'execution_date', 'state'],
                        unique=False)

    if 'user' not in tables:
        op.create_table(
            'user', sa.Column('id', sa.Integer(), nullable=False),
            sa.Column('username', sa.String(length=250), nullable=True),
            sa.Column('email', sa.String(length=500), nullable=True),
            sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('username'))
    if 'variable' not in tables:
        op.create_table('variable',
                        sa.Column('id', sa.Integer(), nullable=False),
                        sa.Column('key', sa.String(length=250), nullable=True),
                        sa.Column('val', sa.Text(), nullable=True),
                        sa.PrimaryKeyConstraint('id'),
                        sa.UniqueConstraint('key'))
    if 'chart' not in tables:
        op.create_table(
            'chart', sa.Column('id', sa.Integer(), nullable=False),
            sa.Column('label', sa.String(length=200), nullable=True),
            sa.Column('conn_id', sa.String(length=250), nullable=False),
            sa.Column('user_id', sa.Integer(), nullable=True),
            sa.Column('chart_type', sa.String(length=100), nullable=True),
            sa.Column('sql_layout', sa.String(length=50), nullable=True),
            sa.Column('sql', sa.Text(), nullable=True),
            sa.Column('y_log_scale', sa.Boolean(), nullable=True),
            sa.Column('show_datatable', sa.Boolean(), nullable=True),
            sa.Column('show_sql', sa.Boolean(), nullable=True),
            sa.Column('height', sa.Integer(), nullable=True),
            sa.Column('default_params', sa.String(length=5000), nullable=True),
            sa.Column('x_is_date', sa.Boolean(), nullable=True),
            sa.Column('iteration_no', sa.Integer(), nullable=True),
            sa.Column('last_modified', sa.DateTime(), nullable=True),
            sa.ForeignKeyConstraint(
                ['user_id'],
                ['user.id'],
            ), sa.PrimaryKeyConstraint('id'))
    if 'known_event' not in tables:
        op.create_table(
            'known_event', sa.Column('id', sa.Integer(), nullable=False),
            sa.Column('label', sa.String(length=200), nullable=True),
            sa.Column('start_date', sa.DateTime(), nullable=True),
            sa.Column('end_date', sa.DateTime(), nullable=True),
            sa.Column('user_id', sa.Integer(), nullable=True),
            sa.Column('known_event_type_id', sa.Integer(), nullable=True),
            sa.Column('description', sa.Text(), nullable=True),
            sa.ForeignKeyConstraint(
                ['known_event_type_id'],
                ['known_event_type.id'],
            ), sa.ForeignKeyConstraint(
                ['user_id'],
                ['user.id'],
            ), sa.PrimaryKeyConstraint('id'))
    if 'xcom' not in tables:
        op.create_table(
            'xcom', sa.Column('id', sa.Integer(), nullable=False),
            sa.Column('key', sa.String(length=512), nullable=True),
            sa.Column('value', sa.PickleType(), nullable=True),
            sa.Column('timestamp',
                      sa.DateTime(),
                      default=func.now(),
                      nullable=False),
            sa.Column('execution_date', sa.DateTime(), nullable=False),
            sa.Column('task_id', sa.String(length=250), nullable=False),
            sa.Column('dag_id', sa.String(length=250), nullable=False),
            sa.PrimaryKeyConstraint('id'))
Example #40
0
def upgrade():
    c = get_context()
    insp = Inspector.from_engine(c.connection.engine)

    # existing migration
    # pre naming convention keys
    groups_permissions_pkey = "groups_permissions_pkey"
    groups_pkey = "groups_pkey"
    groups_resources_permissions_pkey = "groups_resources_permissions_pkey"
    users_groups_pkey = "users_groups_pkey"
    users_permissions_pkey = "users_permissions_pkey"
    users_resources_permissions_pkey = "users_resources_permissions_pkey"

    # inspected keys
    groups_permissions_pkey = insp.get_pk_constraint(
        "groups_permissions")["name"]
    groups_pkey = insp.get_pk_constraint("groups")["name"]
    groups_resources_permissions_pkey = insp.get_pk_constraint(
        "groups_resources_permissions")["name"]
    users_groups_pkey = insp.get_pk_constraint("users_groups")["name"]
    users_permissions_pkey = insp.get_pk_constraint(
        "users_permissions")["name"]
    users_resources_permissions_pkey = insp.get_pk_constraint(
        "users_resources_permissions")["name"]

    op.drop_constraint("groups_pkey", "groups", type_="primary")

    if isinstance(c.connection.engine.dialect, MySQLDialect):
        op.add_column(
            "groups",
            sa.Column("id", sa.Integer, primary_key=True, autoincrement=False))
        op.create_primary_key(groups_pkey, "groups", cols=["id"])
        op.alter_column(
            "groups",
            "id",
            type_=sa.Integer,
            existing_type=sa.Integer,
            autoincrement=True,
            existing_autoincrement=False,
            nullable=False,
        )
    else:
        op.add_column(
            "groups",
            sa.Column("id", sa.Integer, primary_key=True, autoincrement=True))
        op.create_primary_key(groups_pkey, "groups", cols=["id"])

    if isinstance(c.connection.engine.dialect, MySQLDialect):
        for t in [
                "groups_permissions", "groups_resources_permissions",
                "users_groups"
        ]:
            for constraint in insp.get_foreign_keys(t):
                if constraint["referred_columns"] == ["group_name"]:
                    op.drop_constraint(constraint["name"],
                                       t,
                                       type_="foreignkey")

        for t in [
                "users_resources_permissions", "users_permissions",
                "users_groups"
        ]:
            for constraint in insp.get_foreign_keys(t):
                if constraint["referred_columns"] == ["user_name"]:
                    op.drop_constraint(constraint["name"],
                                       t,
                                       type_="foreignkey")

        for constraint in insp.get_foreign_keys("resources"):
            if constraint["referred_columns"] in [["user_name"],
                                                  ["group_name"]]:
                op.drop_constraint(constraint["name"],
                                   "resources",
                                   type_="foreignkey")

    op.add_column(
        "resources",
        sa.Column(
            "owner_user_id",
            sa.Integer(),
            sa.ForeignKey("users.id", onupdate="CASCADE", ondelete="SET NULL"),
        ),
    )
    op.add_column(
        "resources",
        sa.Column(
            "owner_group_id",
            sa.Integer(),
            sa.ForeignKey("groups.id", onupdate="CASCADE",
                          ondelete="SET NULL"),
        ),
    )
    # update the data
    op.execute("""update resources set owner_user_id =
                (select id from users where users.user_name=owner_user_name)"""
               )  # noqa
    op.execute("""update resources set owner_group_id =
                (select id from users where users.user_name=owner_group_name)"""
               )  # noqa

    # mysql is stupid as usual so we cant create FKEY and add PKEY later,
    # need to set PKEY first and then set FKEY
    if isinstance(c.connection.engine.dialect, MySQLDialect):
        op.add_column("groups_permissions", sa.Column("group_id",
                                                      sa.Integer()))
    else:
        op.add_column(
            "groups_permissions",
            sa.Column(
                "group_id",
                sa.Integer(),
                sa.ForeignKey(
                    "groups.id",
                    onupdate="CASCADE",
                    ondelete="CASCADE"  # noqa  # noqa
                ),
            ),
        )  # noqa

    op.execute("""update groups_permissions set group_id =
    (select id from groups where groups.group_name=groups_permissions.group_name)"""
               )  # noqa

    op.drop_constraint(groups_permissions_pkey,
                       "groups_permissions",
                       type_="primary")
    op.create_primary_key(groups_permissions_pkey,
                          "groups_permissions",
                          cols=["group_id", "perm_name"])
    if isinstance(c.connection.engine.dialect, MySQLDialect):
        op.create_foreign_key(
            None,
            "groups_permissions",
            "groups",
            remote_cols=["id"],
            local_cols=["group_id"],
            onupdate="CASCADE",
            ondelete="CASCADE",
        )

    if isinstance(c.connection.engine.dialect, MySQLDialect):
        op.add_column("groups_resources_permissions",
                      sa.Column("group_id", sa.Integer()))
    else:
        op.add_column(
            "groups_resources_permissions",
            sa.Column(
                "group_id",
                sa.Integer(),
                sa.ForeignKey("groups.id",
                              onupdate="CASCADE",
                              ondelete="CASCADE"),
            ),
        )

    op.execute("""update groups_resources_permissions set group_id =
    (select id from groups where groups.group_name=groups_resources_permissions.group_name)"""
               )  # noqa
    op.drop_constraint(
        groups_resources_permissions_pkey,
        "groups_resources_permissions",
        type_="primary",
    )
    op.create_primary_key(
        groups_resources_permissions_pkey,
        "groups_resources_permissions",
        cols=["group_id", "resource_id", "perm_name"],
    )

    if isinstance(c.connection.engine.dialect, MySQLDialect):
        op.create_foreign_key(
            None,
            "groups_resources_permissions",
            "groups",
            remote_cols=["id"],
            local_cols=["group_id"],
            onupdate="CASCADE",
            ondelete="CASCADE",
        )

    if isinstance(c.connection.engine.dialect, MySQLDialect):
        op.add_column("users_groups", sa.Column("group_id", sa.Integer()))
    else:
        op.add_column(
            "users_groups",
            sa.Column(
                "group_id",
                sa.Integer(),
                sa.ForeignKey(
                    "groups.id",
                    onupdate="CASCADE",
                    ondelete="CASCADE"  # noqa
                ),
            ),
        )  # noqa
    op.execute("""update users_groups set group_id =
    (select id from groups where groups.group_name=users_groups.group_name)""")

    if isinstance(c.connection.engine.dialect, MySQLDialect):
        op.add_column("users_groups", sa.Column("user_id", sa.Integer()))
    else:
        op.add_column(
            "users_groups",
            sa.Column(
                "user_id",
                sa.Integer(),
                sa.ForeignKey(
                    "users.id",
                    onupdate="CASCADE",
                    ondelete="CASCADE"  # noqa
                ),
            ),
        )  # noqa
    op.execute("""update users_groups set user_id =
    (select id from users where users.user_name=users_groups.user_name)""")
    op.drop_constraint(users_groups_pkey, "users_groups", type="primary")
    op.create_primary_key(users_groups_pkey,
                          "users_groups",
                          cols=["user_id", "group_id"])
    if isinstance(c.connection.engine.dialect, MySQLDialect):
        op.create_foreign_key(
            None,
            "users_groups",
            "groups",
            remote_cols=["id"],
            local_cols=["group_id"],
            onupdate="CASCADE",
            ondelete="CASCADE",
        )
        op.create_foreign_key(
            None,
            "users_groups",
            "users",
            remote_cols=["id"],
            local_cols=["user_id"],
            onupdate="CASCADE",
            ondelete="CASCADE",
        )

    if isinstance(c.connection.engine.dialect, MySQLDialect):
        op.add_column("users_permissions", sa.Column("user_id", sa.Integer()))
    else:
        op.add_column(
            "users_permissions",
            sa.Column(
                "user_id",
                sa.Integer(),
                sa.ForeignKey(
                    "users.id",
                    onupdate="CASCADE",
                    ondelete="CASCADE"  # noqa
                ),
            ),
        )  # noqa
    op.execute("""update users_permissions set user_id =
    (select id from groups where groups.group_name=users_permissions.user_name)"""
               )  # noqa
    op.drop_constraint(users_permissions_pkey,
                       "users_permissions",
                       type="primary")
    op.create_primary_key(users_permissions_pkey,
                          "users_permissions",
                          cols=["user_id", "perm_name"])
    if isinstance(c.connection.engine.dialect, MySQLDialect):
        op.create_foreign_key(
            None,
            "users_permissions",
            "users",
            remote_cols=["id"],
            local_cols=["user_id"],
            onupdate="CASCADE",
            ondelete="CASCADE",
        )

    if isinstance(c.connection.engine.dialect, MySQLDialect):
        op.add_column("users_resources_permissions",
                      sa.Column("user_id", sa.Integer()))
    else:
        op.add_column(
            "users_resources_permissions",
            sa.Column(
                "user_id",
                sa.Integer(),
                sa.ForeignKey("users.id",
                              onupdate="CASCADE",
                              ondelete="CASCADE"),
            ),
        )

    op.execute("""update users_resources_permissions set user_id =
    (select id from users where users.user_name=users_resources_permissions.user_name)"""
               )  # noqa
    op.drop_constraint(users_resources_permissions_pkey,
                       "users_resources_permissions",
                       type="primary")
    op.create_primary_key(
        users_resources_permissions_pkey,
        "users_resources_permissions",
        cols=["user_id", "resource_id", "perm_name"],
    )
    if isinstance(c.connection.engine.dialect, MySQLDialect):
        op.create_foreign_key(
            None,
            "users_resources_permissions",
            "users",
            remote_cols=["id"],
            local_cols=["user_id"],
            onupdate="CASCADE",
            ondelete="CASCADE",
        )

    op.drop_column("resources", "owner_user_name")
    op.drop_column("resources", "owner_group_name")
    op.drop_column("groups_permissions", "group_name")
    op.drop_column("groups_resources_permissions", "group_name")
    op.drop_column("users_resources_permissions", "user_name")
    op.drop_column("users_groups", "group_name")
    op.drop_column("users_groups", "user_name")
    op.drop_column("users_permissions", "user_name")
Example #41
0
def get_all_tables(db):
    """Returns a dict containing all tables grouped by schema"""
    inspector = Inspector.from_engine(db.engine)
    schemas = sorted(set(inspector.get_schema_names()) - {'information_schema'})
    return dict(zip(schemas, (inspector.get_table_names(schema=schema) for schema in schemas)))
Example #42
0
def downgrade():
    conn = op.get_bind()
    inspector = Inspector.from_engine(conn)
    tables = inspector.get_table_names()
    if 'products_has_associates' in tables:
        op.drop_table('products_has_associates')
Example #43
0
 def get_columns(
     cls, inspector: Inspector, table_name: str, schema: Optional[str]
 ) -> List[Dict[str, Any]]:
     return inspector.get_columns(table_name, schema)
Example #44
0
def exists_table(table_name):
    """check if table exists"""
    engine = create_engine(get_db_url())
    inspector = Inspector.from_engine(engine)
    table = inspector.get_table_names()
    return table_name in table
def upgrade():
    # Define columns that need to be added/deleted
    user_columns = [Column('gid', String),
                    Column('display_name', String)]
    burst_columns = [Column('range1', String), Column('range2', String), Column('fk_simulation', Integer),
                     Column('fk_operation_group', Integer), Column('fk_metric_operation_group', Integer)]
    op_column = Column('view_model_disk_size', Integer)

    # Get tables
    inspector = Inspector.from_engine(conn)
    table_names = inspector.get_table_names()
    tables = Base.metadata.tables

    try:
        op.rename_table('BURST_CONFIGURATIONS', 'BurstConfiguration')

        # Dropping tables which don't exist in the new version
        op.drop_table('MAPPED_LOOK_UP_TABLE_DATA')
        op.drop_table('MAPPED_DATATYPE_MEASURE_DATA')
        op.drop_table('MAPPED_SPATIAL_PATTERN_VOLUME_DATA')
        op.drop_table('MAPPED_SIMULATION_STATE_DATA')
        op.drop_table('WORKFLOW_STEPS')
        op.drop_table('WORKFLOW_VIEW_STEPS')

        # Dropping tables which will be repopulated from the H5 files
        op.drop_table('MAPPED_COHERENCE_SPECTRUM_DATA')
        op.drop_table('MAPPED_COMPLEX_COHERENCE_SPECTRUM_DATA')
        op.drop_table('MAPPED_CONNECTIVITY_ANNOTATIONS_DATA')
        op.drop_table('MAPPED_CONNECTIVITY_MEASURE_DATA')
        op.drop_table('MAPPED_CONNECTIVITY_DATA')
        op.drop_table('MAPPED_CORRELATION_COEFFICIENTS_DATA')
        op.drop_table('MAPPED_COVARIANCE_DATA')
        op.drop_table('MAPPED_CROSS_CORRELATION_DATA')
        op.drop_table('MAPPED_FCD_DATA')
        op.drop_table('MAPPED_FOURIER_SPECTRUM_DATA')
        op.drop_table('MAPPED_INDEPENDENT_COMPONENTS_DATA')
        op.drop_table('MAPPED_LOCAL_CONNECTIVITY_DATA')
        op.drop_table('MAPPED_PRINCIPAL_COMPONENTS_DATA')
        op.drop_table('MAPPED_PROJECTION_MATRIX_DATA')
        op.drop_table('MAPPED_REGION_MAPPING_DATA')
        op.drop_table('MAPPED_REGION_VOLUME_MAPPING_DATA')
        op.drop_table('MAPPED_TIME_SERIES_REGION_DATA')
        op.drop_table('MAPPED_TIME_SERIES_EEG_DATA')
        op.drop_table('MAPPED_TIME_SERIES_MEG_DATA')
        op.drop_table('MAPPED_TIME_SERIES_SEEG_DATA')
        op.drop_table('MAPPED_TIME_SERIES_SURFACE_DATA')
        op.drop_table('MAPPED_TIME_SERIES_VOLUME_DATA')
        op.drop_table('MAPPED_TIME_SERIES_DATA')
        op.drop_table('MAPPED_SENSORS_DATA')
        op.drop_table('MAPPED_TRACTS_DATA')
        op.drop_table('MAPPED_STIMULI_REGION_DATA')
        op.drop_table('MAPPED_STIMULI_SURFACE_DATA')
        op.drop_table('MAPPED_STRUCTURAL_MRI_DATA')
        op.drop_table('MAPPED_SURFACE_DATA')
        op.drop_table('MAPPED_VALUE_WRAPPER_DATA')
        op.drop_table('MAPPED_VOLUME_DATA')
        op.drop_table('MAPPED_WAVELET_COEFFICIENTS_DATA')
        op.drop_table('DATA_TYPES_GROUPS')
        op.drop_table('MAPPED_ARRAY_DATA')
        op.drop_table('MAPPED_SPATIO_TEMPORAL_PATTERN_DATA')
        op.drop_table('MAPPED_SPATIAL_PATTERN_DATA')
        op.drop_table('WORKFLOWS')

        # Delete migrate_version if exists
        if 'migrate_version' in table_names:
            op.drop_table('migrate_version')
    except Exception as excep:
        LOGGER.exception(excep)

    # Migrating USERS table
    if TvbProfile.current.db.SELECTED_DB == 'postgres':
        op.add_column('USERS', user_columns[0])
        op.add_column('USERS', user_columns[1])
        op.create_unique_constraint('USERS_gid_key', 'USERS', ['gid'])
    else:
        with op.batch_alter_table('USERS', table_args=(UniqueConstraint('gid'),)) as batch_op:
            batch_op.add_column(user_columns[0])
            batch_op.add_column(user_columns[1])

    users_table = tables['USERS']
    user_ids = conn.execute("""SELECT U.id FROM "USERS" U""").fetchall()
    for id in user_ids:
        conn.execute(users_table.update().where(users_table.c.id == id[0]).
                     values({"gid": uuid.uuid4().hex, "display_name": users_table.c.username}))
    conn.execute('COMMIT')

    # Migrating BurstConfiguration table
    burst_config_table = tables['BurstConfiguration']
    for column in burst_columns:
        op.add_column('BurstConfiguration', column)

    try:
        op.alter_column('BurstConfiguration', '_dynamic_ids', new_column_name='dynamic_ids')
        op.alter_column('BurstConfiguration', '_simulator_configuration', new_column_name='simulator_gid')
        conn.execute(burst_config_table.delete().where(burst_config_table.c.status == 'error'))

        ranges = conn.execute("""SELECT OG.id, OG.range1, OG.range2 from "OPERATION_GROUPS" OG """).fetchall()

        ranges_1 = []
        ranges_2 = []

        for r in ranges:
            ranges_1.append(str(r[1]))
            ranges_2.append(str(r[2]))

        new_ranges_1 = _migrate_range_params(ranges_1)
        new_ranges_2 = _migrate_range_params(ranges_2)

        # Migrating Operation Groups
        operation_groups_table = tables['OPERATION_GROUPS']
        operation_groups = conn.execute("""SELECT * FROM "OPERATION_GROUPS" """).fetchall()

        for op_g in operation_groups:
            operation = conn.execute("""SELECT fk_operation_group, parameters, meta_data FROM "OPERATIONS" O """
                                     """WHERE O.fk_operation_group = """ + str(op_g[0])).fetchone()
            burst_id = eval(operation[2])['Burst_Reference']

            # Find if operation refers to an operation group or a metric operation group
            if 'time_series' in operation[1]:
                conn.execute(burst_config_table.update().where(burst_config_table.c.id == burst_id).
                             values({"fk_metric_operation_group": operation[0]}))
            else:
                conn.execute(burst_config_table.update().where(burst_config_table.c.id == burst_id).
                             values({"fk_operation_group": operation[0]}))

        for i in range(len(ranges_1)):
            range1 = str(new_ranges_1[i]).replace('\'', '')
            range2 = str(new_ranges_2[i]).replace('\'', '')
            _update_range_parameters(burst_config_table, operation_groups_table, range1, range2, ranges[i][0])

        conn.execute('COMMIT')
    except Exception as excep:
        LOGGER.exception(excep)

    # Finish BurstConfiguration migration by deleting unused column and adding foreign keys
    with op.batch_alter_table('BurstConfiguration') as batch_op:
        batch_op.drop_column('workflows_number')
        batch_op.create_foreign_key('bc_fk_simulation', 'OPERATIONS', ['fk_simulation'], ['id'])
        batch_op.create_foreign_key('bc_fk_operation_group', 'OPERATION_GROUPS', ['fk_operation_group'], ['id'])
        batch_op.create_foreign_key('bc_metric_operation_group', 'OPERATION_GROUPS',
                                    ['fk_metric_operation_group'],['id'])
    conn.execute('COMMIT')

    # MIGRATING Operations
    op_table = tables['OPERATIONS']
    try:
        burst_ref_metadata = conn.execute("""SELECT id, meta_data FROM "OPERATIONS" """
                                          """WHERE meta_data like '%%Burst_Reference%%' """).fetchall()
        op.alter_column('OPERATIONS', 'parameters', new_column_name='view_model_gid')

        for metadata in burst_ref_metadata:
            metadata_dict = eval(str(metadata[1]))
            conn.execute(op_table.update().where(op_table.c.id == metadata[0]).
                         values({'view_model_gid': json.dumps(metadata_dict['Burst_Reference'])}))

        op.rename_table('BurstConfiguration', 'BURST_CONFIGURATION')
        conn.execute('COMMIT')
    except Exception as excep:
        LOGGER.exception(excep)

    with op.batch_alter_table('OPERATIONS') as batch_op:
        batch_op.add_column(op_column)
        batch_op.drop_column('meta_data')
    conn.execute('COMMIT')

    try:
        op.drop_table('ALGORITHMS')
        op.drop_table('ALGORITHM_CATEGORIES')
        op.drop_table('DATA_TYPES')
    except Exception as excep:
        try:
            conn.execute("""DROP TABLE if exists "ALGORITHMS" cascade; """)
            conn.execute("""DROP TABLE if exists "ALGORITHM_CATEGORIES" cascade; """)
            conn.execute("""DROP TABLE if exists "DATA_TYPES" cascade; """)
        except Exception as excep:
            LOGGER.exception(excep)
        LOGGER.exception(excep)
Example #46
0
def reflect_table(engine, tablename):
    meta = MetaData()
    table = Table(tablename, meta)
    insp = Inspector.from_engine(engine)
    insp.reflecttable(table, None)
    return table
Example #47
0
def get_engine(uri):
    engine = create_engine(uri)
    inspector = Inspector.from_engine(engine)
    if not inspector.get_table_names():
        create_database(engine)
    return engine
    level = Column("level", String(32), nullable=False)
    term = Column("term", String(32), nullable=False)
    type_of_match = Column("type_of_match", String(32), nullable=False)


# Now we've created the classes for the database, we'll associate the class with the database and create any missing tables


Messages.metadata.create_all(database)
if backup:
    Messages.metadata.create_all(database_backup)


# database is init, now check and see if the fts table is there

inspector = Inspector.from_engine(database)
if "messages_fts" not in inspector.get_table_names():
    import sys

    acarshub_logging.log(
        "Missing FTS TABLE! Aborting!", "database", level=LOG_LEVEL["ERROR"]
    )
    sys.exit(1)

# messages_idx = Table(
#     "messages_fts",
#     Messages.metadata,
#     #,
#     Column("rowid", Integer(), key="id", primary_key=True),
#     Column("depa", String(32)),
#     Column("dsta", String(32)),
Example #49
0
    def _process_table(
        self,
        dataset_name: str,
        inspector: Inspector,
        schema: str,
        table: str,
        sql_config: SQLAlchemyConfig,
    ) -> Iterable[Union[SqlWorkUnit, MetadataWorkUnit]]:
        columns = self._get_columns(dataset_name, inspector, schema, table)
        dataset_urn = make_dataset_urn_with_platform_instance(
            self.platform,
            dataset_name,
            self.config.platform_instance,
            self.config.env,
        )
        dataset_snapshot = DatasetSnapshot(
            urn=dataset_urn,
            aspects=[StatusClass(removed=False)],
        )
        if self.is_stateful_ingestion_configured():
            cur_checkpoint = self.get_current_checkpoint(
                self.get_default_ingestion_job_id())
            if cur_checkpoint is not None:
                checkpoint_state = cast(BaseSQLAlchemyCheckpointState,
                                        cur_checkpoint.state)
                checkpoint_state.add_table_urn(dataset_urn)

        description, properties, location_urn = self.get_table_properties(
            inspector, schema, table)
        dataset_properties = DatasetPropertiesClass(
            name=table,
            description=description,
            customProperties=properties,
        )
        dataset_snapshot.aspects.append(dataset_properties)

        if location_urn:
            external_upstream_table = UpstreamClass(
                dataset=location_urn,
                type=DatasetLineageTypeClass.COPY,
            )
            lineage_mcpw = MetadataChangeProposalWrapper(
                entityType="dataset",
                changeType=ChangeTypeClass.UPSERT,
                entityUrn=dataset_snapshot.urn,
                aspectName="upstreamLineage",
                aspect=UpstreamLineage(upstreams=[external_upstream_table]),
            )
            lineage_wu = MetadataWorkUnit(
                id=
                f"{self.platform}-{lineage_mcpw.entityUrn}-{lineage_mcpw.aspectName}",
                mcp=lineage_mcpw,
            )
            yield lineage_wu

        pk_constraints: dict = inspector.get_pk_constraint(table, schema)
        foreign_keys = self._get_foreign_keys(dataset_urn, inspector, schema,
                                              table)
        schema_fields = self.get_schema_fields(dataset_name, columns,
                                               pk_constraints)
        schema_metadata = get_schema_metadata(
            self.report,
            dataset_name,
            self.platform,
            columns,
            pk_constraints,
            foreign_keys,
            schema_fields,
        )
        dataset_snapshot.aspects.append(schema_metadata)
        db_name = self.get_db_name(inspector)
        yield from self.add_table_to_schema_container(dataset_urn, db_name,
                                                      schema)
        mce = MetadataChangeEvent(proposedSnapshot=dataset_snapshot)
        wu = SqlWorkUnit(id=dataset_name, mce=mce)
        self.report.report_workunit(wu)
        yield wu
        dpi_aspect = self.get_dataplatform_instance_aspect(
            dataset_urn=dataset_urn)
        if dpi_aspect:
            yield dpi_aspect
        subtypes_aspect = MetadataWorkUnit(
            id=f"{dataset_name}-subtypes",
            mcp=MetadataChangeProposalWrapper(
                entityType="dataset",
                changeType=ChangeTypeClass.UPSERT,
                entityUrn=dataset_urn,
                aspectName="subTypes",
                aspect=SubTypesClass(typeNames=["table"]),
            ),
        )
        yield subtypes_aspect

        yield from self._get_domain_wu(
            dataset_name=dataset_name,
            entity_urn=dataset_urn,
            entity_type="dataset",
            sql_config=sql_config,
        )
Revises: 02_add_venue_fields_exc_genres
Create Date: 2020-12-24 12:37:52.424887

"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.engine.reflection import Inspector

# revision identifiers, used by Alembic.
revision = '03_add_genre_table'
down_revision = '02_add_venue_fields_exc_genres'
branch_labels = None
depends_on = None

conn = op.get_bind()
inspector = Inspector.from_engine(conn)
tables = inspector.get_table_names()


def upgrade():
    # ### commands auto generated by Alembic - please adjust! ###
    if 'genres' not in tables:
        genre = op.create_table(
            'genres',
            sa.Column('id', sa.Integer, nullable=False, primary_key=True),
            sa.Column('name', sa.String, nullable=False),
            sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('name'))

    res = conn.execute("select * from genres")
    results = res.fetchall()
Example #51
0
def register_schema(providers: List[str],
                    db_name: str,
                    schema_base: DeclarativeMeta,
                    entity_type: str = 'stock'):
    """
    function for register schema,please declare them before register

    :param providers: the supported providers for the schema
    :type providers:
    :param db_name: database name for the schema
    :type db_name:
    :param schema_base:
    :type schema_base:
    :param entity_type: the schema related entity_type
    :type entity_type:
    :return:
    :rtype:
    """
    schemas = []
    for item in schema_base._decl_class_registry.items():
        cls = item[1]
        if type(cls) == DeclarativeMeta:
            # register provider to the schema
            for provider in providers:
                if issubclass(cls, Mixin):
                    cls.register_provider(provider)

            if zvt_context.dbname_map_schemas.get(db_name):
                schemas = zvt_context.dbname_map_schemas[db_name]
            zvt_context.schemas.append(cls)
            add_to_map_list(the_map=zvt_context.entity_map_schemas, key=entity_type, value=cls)
            schemas.append(cls)

    zvt_context.dbname_map_schemas[db_name] = schemas

    for provider in providers:
        # track in in  _providers
        if provider not in zvt_context.providers:
            zvt_context.providers.append(provider)

        if not zvt_context.provider_map_dbnames.get(provider):
            zvt_context.provider_map_dbnames[provider] = []
        zvt_context.provider_map_dbnames[provider].append(db_name)
        zvt_context.dbname_map_base[db_name] = schema_base

        # create the db & table
        engine = get_db_engine(provider, db_name=db_name)
        schema_base.metadata.create_all(engine)

        session_fac = get_db_session_factory(provider, db_name=db_name)
        session_fac.configure(bind=engine)

    for provider in providers:
        engine = get_db_engine(provider, db_name=db_name)
        inspector = Inspector.from_engine(engine)

        # create index for 'timestamp','entity_id','code','report_period','updated_timestamp
        for table_name, table in iter(schema_base.metadata.tables.items()):
            index_column_names = [index['name'] for index in inspector.get_indexes(table_name)]

            logger.debug('engine:{},table:{},index:{}'.format(engine, table_name, index_column_names))

            for col in ['timestamp', 'entity_id', 'code', 'report_period', 'created_timestamp', 'updated_timestamp']:
                if col in table.c:
                    column = eval('table.c.{}'.format(col))
                    index = sqlalchemy.schema.Index('{}_{}_index'.format(table_name, col), column)
                    if index.name not in index_column_names:
                        index.create(engine)
            for cols in [('timestamp', 'entity_id'), ('timestamp', 'code')]:
                if (cols[0] in table.c) and (col[1] in table.c):
                    column0 = eval('table.c.{}'.format(col[0]))
                    column1 = eval('table.c.{}'.format(col[1]))
                    index = sqlalchemy.schema.Index('{}_{}_{}_index'.format(table_name, col[0], col[1]), column0,
                                                    column1)
                    if index.name not in index_column_names:
                        index.create(engine)
Example #52
0
def init():
    inspector = Inspector.from_engine(engine)
    if not Quote.__tablename__ in inspector.get_table_names():
        create()
Example #53
0
def init():
    # Models are imported here to prevent a circular import where we would
    # import models and the models would import that db object just above us.

    # They're also imported here in this function because they implicitly
    # monkey-patch the threading module, and we might not need that if all we want
    # from the namespace is something like app.version, like in repl.py for example.
    from models import APIKey
    from models import FeedGroup
    from models import Feed
    from models import Article
    from models import Event

    from resources import api_key
    from resources import feeds
    from resources import feedgroups
    from resources import articles

    api.add_resource(api_key.KeyCollection, "/keys")
    api.add_resource(api_key.KeyResource, "/keys/<string:name>")

    api.add_resource(feedgroups.FeedGroupCollection, "/feeds")
    api.add_resource(feedgroups.FeedGroupResource, "/feeds/<string:groupname>")
    api.add_resource(feedgroups.FeedGroupStop,
                     "/feeds/<string:groupname>/stop")
    api.add_resource(feedgroups.FeedGroupStart,
                     "/feeds/<string:groupname>/start")
    api.add_resource(feedgroups.FeedGroupArticles,
                     "/feeds/<string:groupname>/articles")
    api.add_resource(feedgroups.FeedGroupSearch,
                     "/feeds/<string:groupname>/search/<string:terms>")
    api.add_resource(feedgroups.FeedGroupCount,
                     "/feeds/<string:groupname>/count")

    api.add_resource(feeds.FeedResource,
                     "/feeds/<string:groupname>/<string:name>")
    api.add_resource(feeds.FeedArticleCollection,
                     "/feeds/<string:groupname>/<string:name>/articles")
    api.add_resource(
        feeds.FeedSearch,
        "/feeds/<string:groupname>/<string:name>/search/<string:terms>")
    api.add_resource(feeds.FeedStartResource,
                     "/feeds/<string:groupname>/<string:name>/start")
    api.add_resource(feeds.FeedStopResource,
                     "/feeds/<string:groupname>/<string:name>/stop")

    api.add_resource(articles.ArticleCollection, "/articles")
    api.add_resource(articles.ArticleResource, "/articles/<string:uid>")
    api.add_resource(articles.ArticleSearch, "/articles/search/<string:terms>")
    api.add_resource(articles.ArticleCount, "/articles/count")

    # Create the database schema if it's not already laid out.
    inspector = Inspector.from_engine(db.engine)
    tables = [table_name for table_name in inspector.get_table_names()]

    if 'api_keys' not in tables:
        db.create_all()
        master = models.APIKey(name=app.config['MASTER_KEY_NAME'])
        if app.config['MASTER_KEY']: master.key = app.config['MASTER_KEY']
        else: master.key = master.generate_key_str()
        print master.key
        master.active = True
        db.session.add(master)
        db.session.commit()
 def test_get_current_revision_doesnt_create_version_table(self):
     context = self.make_one(connection=self.connection,
                             opts={'version_table': 'version_table'})
     eq_(context.get_current_revision(), None)
     insp = Inspector(self.connection)
     assert ('version_table' not in insp.get_table_names())
Example #55
0
class Db(object):
    def __init__(self, sqla_conn, args, schema=None):
        self.args = args
        self.sqla_conn = sqla_conn
        self.schema = schema
        self.engine = sa.create_engine(sqla_conn)
        self.meta = sa.MetaData(
            bind=self.engine)  # excised schema=schema to prevent errors
        self.meta.reflect(schema=self.schema)
        self.inspector = Inspector(bind=self.engine)
        self.conn = self.engine.connect()
        self.tables = OrderedDict()
        for tbl in self.meta.sorted_tables:
            tbl.db = self
            # TODO: Replace all these monkeypatches with an instance assigment
            tbl.find_n_rows = types.MethodType(_find_n_rows, tbl)
            tbl.random_row_func = types.MethodType(_random_row_func, tbl)
            tbl.fks = self.inspector.get_foreign_keys(tbl.name,
                                                      schema=tbl.schema)
            tbl.pk = self.inspector.get_primary_keys(tbl.name,
                                                     schema=tbl.schema)
            tbl.filtered_by = types.MethodType(_filtered_by, tbl)
            tbl.by_pk = types.MethodType(_by_pk, tbl)
            tbl.pk_val = types.MethodType(_pk_val, tbl)
            tbl.exists = types.MethodType(_exists, tbl)
            tbl.child_fks = []
            tbl.find_n_rows(estimate=True)
            self.tables[(tbl.schema, tbl.name)] = tbl
        for ((tbl_schema, tbl_name), tbl) in self.tables.items():
            for fk in tbl.fks:
                fk['constrained_schema'] = tbl_schema
                fk['constrained_table'] = tbl_name  # TODO: check against constrained_table
                self.tables[(fk['referred_schema'],
                             fk['referred_table'])].child_fks.append(fk)

    def __repr__(self):
        return "Db('%s')" % self.sqla_conn

    def assign_target(self, target_db):
        for ((tbl_schema, tbl_name), tbl) in self.tables.items():
            tbl._random_row_gen_fn = types.MethodType(_random_row_gen_fn, tbl)
            tbl.random_rows = tbl._random_row_gen_fn()
            tbl.next_row = types.MethodType(_next_row, tbl)
            target = target_db.tables[(tbl_schema, tbl_name)]
            target.requested = deque()
            target.required = deque()
            if tbl.n_rows:
                if self.args.logarithmic:
                    target.n_rows_desired = int(
                        math.pow(
                            10,
                            math.log10(tbl.n_rows) * self.args.fraction)) or 1
                else:
                    target.n_rows_desired = int(
                        tbl.n_rows * self.args.fraction) or 1
            else:
                target.n_rows_desired = 0
            target.source = tbl
            tbl.target = target
            target.completeness_score = types.MethodType(
                _completeness_score, target)
            logging.debug("assigned methods to %s" % target.name)

    def confirm(self):
        message = []
        for (tbl_schema, tbl_name) in sorted(self.tables, key=lambda t: t[1]):
            tbl = self.tables[(tbl_schema, tbl_name)]
            message.append("Create %d rows from %d in %s.%s" %
                           (tbl.target.n_rows_desired, tbl.n_rows, tbl_schema
                            or '', tbl_name))
        print("\n".join(sorted(message)))
        if self.args.yes:
            return True
        response = input("Proceed? (Y/n) ").strip().lower()
        return (not response) or (response[0] == 'y')

    def create_row_in(self, source_row, target_db, target, prioritized=False):
        logging.debug('create_row_in %s:%s ' %
                      (target.name, target.pk_val(source_row)))

        row_exists = target.exists(**(dict(source_row)))
        logging.debug("Row exists? %s" % str(row_exists))
        if row_exists and not prioritized:
            return

        if not row_exists:
            # make sure that all required rows are in parent table(s)
            for fk in target.fks:
                target_parent = target_db.tables[(fk['referred_schema'],
                                                  fk['referred_table'])]
                slct = sa.sql.select([
                    target_parent,
                ])
                any_non_null_key_columns = False
                for (parent_col, child_col) in zip(fk['referred_columns'],
                                                   fk['constrained_columns']):
                    slct = slct.where(
                        target_parent.c[parent_col] == source_row[child_col])
                    if source_row[child_col] is not None:
                        any_non_null_key_columns = True
                if any_non_null_key_columns:
                    target_parent_row = target_db.conn.execute(slct).first()
                    if not target_parent_row:
                        source_parent_row = self.conn.execute(slct).first()
                        self.create_row_in(source_parent_row, target_db,
                                           target_parent)

            ins = target.insert().values(**source_row)
            target_db.conn.execute(ins)
            target.n_rows += 1

        for child_fk in target.child_fks:
            child = self.tables[(child_fk['constrained_schema'],
                                 child_fk['constrained_table'])]
            slct = sa.sql.select([
                child,
            ])
            for (child_col, this_col) in zip(child_fk['constrained_columns'],
                                             child_fk['referred_columns']):
                slct = slct.where(child.c[child_col] == source_row[this_col])
            if not prioritized:
                slct = slct.limit(self.args.children)
            for (n, desired_row) in enumerate(self.conn.execute(slct)):
                if prioritized:
                    child.target.required.append((desired_row, prioritized))
                elif (n == 0):
                    child.target.requested.appendleft(
                        (desired_row, prioritized))
                else:
                    child.target.requested.append((desired_row, prioritized))

    def create_subset_in(self, target_db):

        for (tbl_name, pks) in self.args.force_rows.items():
            if '.' in tbl_name:
                (tbl_schema, tbl_name) = tbl_name.split('.', 1)
            else:
                tbl_schema = None
            source = self.tables[(tbl_schema, tbl_name)]
            for pk in pks:
                source_row = source.by_pk(pk)
                if source_row:
                    self.create_row_in(source_row,
                                       target_db,
                                       source.target,
                                       prioritized=True)
                else:
                    logging.warn("requested %s:%s not found in source db,"
                                 "could not create" % (source.name, pk))

        while True:
            targets = sorted(target_db.tables.values(),
                             key=lambda t: t.completeness_score())
            try:
                target = targets.pop(0)
                while not target.source.n_rows:
                    target = targets.pop(0)
            except IndexError:  # pop failure, no more tables
                return
            logging.debug("total n_rows in target: %d" % sum(
                (t.n_rows for t in target_db.tables.values())))
            logging.debug(
                "target tables with 0 n_rows: %s" %
                ", ".join(t.name
                          for t in target_db.tables.values() if not t.n_rows))
            logging.info("lowest completeness score (in %s) at %f" %
                         (target.name, target.completeness_score()))
            if target.completeness_score() > 0.97:
                return
            (source_row, prioritized) = target.source.next_row()
            self.create_row_in(source_row,
                               target_db,
                               target,
                               prioritized=prioritized)
Example #56
0
    def handle(self, options, global_options, *args):
        from zipfile import ZipFile, ZIP_DEFLATED
        from StringIO import StringIO

        output_dir = os.path.join(options.output_dir, options.engine)
        if not os.path.exists(output_dir):
            os.makedirs(output_dir)

        engine = get_engine(options, global_options)

        if not args:
            print "Failed! You should pass one or more tables name."
            sys.exit(1)

        zipfile = None
        if options.zipfile:
            zipfile = ZipFile(options.zipfile, 'w', compression=ZIP_DEFLATED)

        inspector = Inspector.from_engine(engine)

        tables = get_sorted_tables(
            get_tables(global_options.apps_dir,
                       tables=args,
                       engine_name=options.engine,
                       settings_file=global_options.settings,
                       local_settings_file=global_options.local_settings))
        _len = len(tables)

        for i, (name, t) in enumerate(tables):
            if global_options.verbose:
                print '[%s] Dumpping %s...' % (options.engine,
                                               show_table(name, t, i, _len)),
            filename = os.path.join(output_dir, name + '.txt')
            if options.text:
                format = 'txt'
            else:
                format = None
            #process zipfile
            if options.zipfile:
                fileobj = StringIO()
                filename = os.path.basename(filename)
            else:
                fileobj = filename

            t = dump_table(t,
                           fileobj,
                           engine,
                           delimiter=options.delimiter,
                           format=format,
                           encoding=options.encoding,
                           inspector=inspector,
                           engine_name=engine.engine_name)

            #write zip content
            if options.zipfile and zipfile:
                zipfile.writestr(filename, fileobj.getvalue())
            if global_options.verbose:
                print t

        if zipfile:
            zipfile.close()
Example #57
0
 def get_table_names(cls, database: "Database", inspector: Inspector,
                     schema: Optional[str]) -> List[str]:
     """Need to disregard the schema for Sqlite"""
     return sorted(inspector.get_table_names())
Example #58
0
File: hive.py Project: MK0820/LaiZu
 def get_columns(
     cls, inspector: Inspector, table_name: str, schema: str
 ) -> List[dict]:
     return inspector.get_columns(table_name, schema)
def downgrade():
    conn = op.get_bind()
    inspector = Inspector.from_engine(conn)
    tables = inspector.get_table_names()
    if 'sensor_instance' in tables:
        op.drop_table('sensor_instance')
Example #60
0
def inspector_under_test(engine_under_test):
    from sqlalchemy.engine.reflection import Inspector

    return Inspector.from_engine(engine_under_test)