Ejemplo n.º 1
0
    def _expect_default(self, c_expected, col, seq=None):
        Table('t', self.metadata, col)

        if seq:
            seq._set_metadata(self.metadata)
        self.metadata.create_all(config.db)

        insp = Inspector.from_engine(config.db)
        diffs = []
        _compare_tables(
            set([(None, 't')]), set([]),
            [],
            insp, self.metadata, diffs, self.autogen_context)
        tab = diffs[0][1]
        eq_(_render_server_default_for_compare(
            tab.c.x.server_default, tab.c.x, self.autogen_context),
            c_expected)

        insp = Inspector.from_engine(config.db)
        diffs = []
        m2 = MetaData()
        Table('t', m2, Column('x', BigInteger()))
        _compare_tables(
            set([(None, 't')]), set([(None, 't')]),
            [],
            insp, m2, diffs, self.autogen_context)
        server_default = diffs[0][0][4]['existing_server_default']
        eq_(_render_server_default_for_compare(
            server_default, tab.c.x, self.autogen_context),
            c_expected)
Ejemplo n.º 2
0
    def _test_selfref_fk(self, recreate):
        bar = Table(
            'bar', self.metadata,
            Column('id', Integer, primary_key=True),
            Column('bar_id', Integer, ForeignKey('bar.id')),
            Column('data', String(50)),
            mysql_engine='InnoDB'
        )
        bar.create(self.conn)
        self.conn.execute(bar.insert(), {'id': 1, 'data': 'x', 'bar_id': None})
        self.conn.execute(bar.insert(), {'id': 2, 'data': 'y', 'bar_id': 1})

        with self.op.batch_alter_table("bar", recreate=recreate) as batch_op:
            batch_op.alter_column(
                'data', new_column_name='newdata', existing_type=String(50))

        insp = Inspector.from_engine(self.conn)

        insp = Inspector.from_engine(self.conn)
        eq_(
            [(key['referred_table'],
             key['referred_columns'], key['constrained_columns'])
             for key in insp.get_foreign_keys('bar')],
            [('bar', ['id'], ['bar_id'])]
        )
Ejemplo n.º 3
0
    def _expect_default(self, c_expected, col, seq=None):
        Table('t', self.metadata, col)

        self.autogen_context.metadata = self.metadata

        if seq:
            seq._set_metadata(self.metadata)
        self.metadata.create_all(config.db)

        insp = Inspector.from_engine(config.db)

        uo = ops.UpgradeOps(ops=[])
        _compare_tables(
            set([(None, 't')]), set([]),
            insp, uo, self.autogen_context)
        diffs = uo.as_diffs()
        tab = diffs[0][1]

        eq_(_render_server_default_for_compare(
            tab.c.x.server_default, tab.c.x, self.autogen_context),
            c_expected)

        insp = Inspector.from_engine(config.db)
        uo = ops.UpgradeOps(ops=[])
        m2 = MetaData()
        Table('t', m2, Column('x', BigInteger()))
        self.autogen_context.metadata = m2
        _compare_tables(
            set([(None, 't')]), set([(None, 't')]),
            insp, uo, self.autogen_context)
        diffs = uo.as_diffs()
        server_default = diffs[0][0][4]['existing_server_default']
        eq_(_render_server_default_for_compare(
            server_default, tab.c.x, self.autogen_context),
            c_expected)
Ejemplo n.º 4
0
    def test_autogen(self):
        m = sa.MetaData()
        sa.Table('t', m, sa.Column('x', sa.Integer))

        def process_revision_directives(context, rev, generate_revisions):
            existing_upgrades = generate_revisions[0].upgrade_ops
            existing_downgrades = generate_revisions[0].downgrade_ops

            # model1 will run the upgrades, e.g. create the table,
            # model2 will run the downgrades as upgrades, e.g. drop
            # the table again

            generate_revisions[:] = [
                ops.MigrationScript(
                    util.rev_id(),
                    existing_upgrades,
                    ops.DowngradeOps(),
                    version_path=os.path.join(
                        _get_staging_directory(), "model1"),
                    head="model1@head"
                ),
                ops.MigrationScript(
                    util.rev_id(),
                    existing_downgrades,
                    ops.DowngradeOps(),
                    version_path=os.path.join(
                        _get_staging_directory(), "model2"),
                    head="model2@head"
                )
            ]

        with self._env_fixture(process_revision_directives, m):
            command.upgrade(self.cfg, "heads")

            eq_(
                Inspector.from_engine(self.engine).get_table_names(),
                ["alembic_version"]
            )

            command.revision(
                self.cfg, message="some message",
                autogenerate=True)

            command.upgrade(self.cfg, "model1@head")

            eq_(
                Inspector.from_engine(self.engine).get_table_names(),
                ["alembic_version", "t"]
            )

            command.upgrade(self.cfg, "model2@head")

            eq_(
                Inspector.from_engine(self.engine).get_table_names(),
                ["alembic_version"]
            )
Ejemplo n.º 5
0
def setup():

    if application_table is None:
        define_apps_tables()
        log.debug('Apps tables defined in memory')

    if model.repo.are_tables_created():
        if not application_table.exists():

            # Create each table individually rather than
            # using metadata.create_all()
            application_table.create()
            application_tag_table.create()
            application_image_table.create()
            idea_table.create()
            idea_tag_table.create()

            log.debug('Apps tables created')
        else:
            log.debug('Apps tables already exist')
            from ckan.model.meta import engine
            # Check if existing tables need to be updated
            inspector = Inspector.from_engine(engine)
            columns = inspector.get_columns('application_tag')
            if not 'id' in [column['name'] for column in columns]:
                log.debug('Apps tables need to be updated')
                migrate_v2()


    else:
        log.debug('Apps table creation deferred')
Ejemplo n.º 6
0
    def handle(self, options, global_options, *args):
        
        output_dir = os.path.join(options.output_dir, options.engine)
        if not os.path.exists(output_dir):
            os.makedirs(output_dir)
        
        engine = get_engine(options, global_options)

        if not args:
            print "Failed! You should pass one or more tables name."
            sys.exit(1)
            
        inspector = Inspector.from_engine(engine)
        
        tables = get_tables(global_options.apps_dir, tables=args,
            engine=options.engine, settings_file=global_options.settings, 
            local_settings_file=global_options.local_settings)

        for tablename, t in tables.items():
            if global_options.verbose:
                print '[%s] Dumpping %s...' % (options.engine, tablename)
            filename = os.path.join(output_dir, tablename+'.txt')
            if options.text:
                format = 'txt'
            else:
                format = None
            dump_table(t, filename, engine, delimiter=options.delimiter, 
                format=format, encoding=options.encoding, inspector=inspector)
def downgrade():
    """Downgrade database."""
    ctx = op.get_context()
    insp = Inspector.from_engine(ctx.connection.engine)

    for fk in insp.get_foreign_keys('b2share_block_schema'):
        if fk['referred_table'] == 'b2share_community':
            op.drop_constraint(
                op.f(fk['name']),
                'b2share_block_schema',
                type_='foreignkey'
            )

    op.drop_table('b2share_block_schema_version')

    for fk in insp.get_foreign_keys('b2share_community_schema_version'):
        if fk['referred_table'] == 'b2share_community':
            op.drop_constraint(
                op.f(fk['name']),
                'b2share_community_schema_version',
                type_='foreignkey'
            )

    op.drop_table('b2share_community_schema_version')
    op.drop_table('b2share_block_schema')
    op.drop_table('b2share_root_schema_version')
Ejemplo n.º 8
0
    def __init__(self, db, verbose=False, regen=False, regenjson=False):
        self.dbpath = pathlib.Path(db)
        try:
            self.dbpath = self.dbpath.resolve()
        except FileNotFoundError:
            self.dbpath.touch()
            self.dbpath = self.dbpath.resolve()

        self.verbose = verbose
        self.dataengine = create_engine("sqlite:///{}".format(self.dbpath))
        self.inspect = Inspector.from_engine(self.dataengine)
        if regen:
            print(Mood.happy("Deleting existing videoinfo table."))
            VideoInfo.__table__.drop(self.dataengine)  # pylint: disable=e1101
            if regenjson:
                print(Mood.happy("Deleting existing videojson table."))
                VideoJSON.__table__.drop(self.dataengine)   # pylint: disable=e1101
        print(Mood.happy("Creating database tables."))
        SQLBase.metadata.create_all(self.dataengine)
        if verbose:
            print(Mood.happy("Tables List:\n{}".format(self.inspect.get_table_names())))
            if "videoinfo" in self.inspect.get_table_names():
                print(Mood.happy("Column Names:\n{}".format(self.inspect.get_columns("videoinfo"))))
        sessionbase = sessionmaker(bind=self.dataengine)
        self.session = sessionbase()  # pylint: disable=c0103
Ejemplo n.º 9
0
    def handle(self, options, global_options, *args):
        
        engine = get_engine(options, global_options)

        if len(args) != 2:
            print self.print_help(self.prog_name, 'dumptablefile')
            sys.exit(1)
            
        inspector = Inspector.from_engine(engine)

        name = args[0]
        tables = get_tables(global_options.apps_dir, tables=[name],
            engine_name=options.engine, settings_file=global_options.settings, 
            local_settings_file=global_options.local_settings)
        t = tables[name]
        if global_options.verbose:
            print '[%s] Dumpping %s...' % (options.engine, show_table(name, t, 0, 1)),
        if options.text:
            format = 'txt'
        else:
            format = None
        t = dump_table(t, args[1], engine, delimiter=options.delimiter, 
            format=format, encoding=options.encoding, inspector=inspector,
            engine_name=engine.engine_name)
        if global_options.verbose:
            print t
def upgrade():
    c = get_context()
    # drop foreign keys for mysql
    if isinstance(c.connection.engine.dialect, MySQLDialect):
        insp = Inspector.from_engine(c.connection.engine)
        for t in [
            "groups_resources_permissions",
            "users_resources_permissions",
            "resources",
        ]:
            for constraint in insp.get_foreign_keys(t):
                if constraint["referred_columns"] == ["resource_id"]:
                    op.drop_constraint(constraint["name"], t, type="foreignkey")

    op.alter_column(
        "resources",
        "resource_id",
        type_=sa.Integer(),
        existing_type=sa.BigInteger(),
        autoincrement=True,
        nullable=False,
    )
    op.alter_column(
        "resources", "parent_id", type_=sa.Integer(), existing_type=sa.BigInteger()
    )
    op.alter_column(
        "users_resources_permissions",
        "resource_id",
        type_=sa.Integer(),
        existing_type=sa.BigInteger(),
        nullable=False,
    )
    op.alter_column(
        "groups_resources_permissions",
        "resource_id",
        type_=sa.Integer(),
        existing_type=sa.BigInteger(),
        nullable=False,
    )

    # recreate foreign keys for mysql
    if isinstance(c.connection.engine.dialect, MySQLDialect):
        op.create_foreign_key(
            "groups_resources_permissions_resource_fk",
            "groups_resources_permissions",
            "resources",
            ["resource_id"],
            ["resource_id"],
            onupdate="CASCADE",
            ondelete="CASCADE",
        )
        op.create_foreign_key(
            "users_resources_permissions_fk",
            "users_resources_permissions",
            "resources",
            ["resource_id"],
            ["resource_id"],
            onupdate="CASCADE",
            ondelete="CASCADE",
        )
Ejemplo n.º 11
0
def _produce_net_changes(connection, metadata, diffs, autogen_context,
                            object_filters=(),
                            include_schemas=False):
    inspector = Inspector.from_engine(connection)
    # TODO: not hardcode alembic_version here ?
    conn_table_names = set()
    if include_schemas:
        schemas = set(inspector.get_schema_names())
        # replace default schema name with None
        schemas.discard("information_schema")
        # replace the "default" schema with None
        schemas.add(None)
        schemas.discard(connection.dialect.default_schema_name)
    else:
        schemas = [None]

    for s in schemas:
        tables = set(inspector.get_table_names(schema=s)).\
                difference(['alembic_version'])
        conn_table_names.update(zip([s] * len(tables), tables))

    metadata_table_names = OrderedSet([(table.schema, table.name)
                                for table in metadata.sorted_tables])

    _compare_tables(conn_table_names, metadata_table_names,
                    object_filters,
                    inspector, metadata, diffs, autogen_context)
Ejemplo n.º 12
0
def get_unique_fields(session, instance):
    table_name = get_object_type_for(instance)
    if table_name != 'vulnerability':
        engine = session.connection().engine
        insp = Inspector.from_engine(engine)
        unique_constraints = insp.get_unique_constraints(table_name)
    else:
        # Vulnerability unique index can't be retrieved via reflection.
        # If the unique index changes we need to update here.
        # A test should fail when the unique index changes
        unique_constraints = []
        unique_constraints.append({
            'column_names': [
                'name',
                'description',
                'type',
                'host_id',
                'service_id',
                'method',
                'parameter_name',
                'path',
                'website',
                'workspace_id',
            ]
        })
    if unique_constraints:
        for unique_constraint in unique_constraints:
            yield unique_constraint['column_names']
Ejemplo n.º 13
0
 def connect(self):
     engine = create_engine('postgresql://*****:*****@localhost/search_engine', echo=True)
     engine.connect()
     inspector = Inspector.from_engine(engine)
     table_names = ["records", "domains", "trackers"]
     for table_name in table_names:
         if table_name not in inspector.get_table_names():
             print(table_name + " not exists")
             print("Creating " + table_name)
             metadata = MetaData(bind=engine)
             if table_name == "records":
                 main_table = Table(table_name, metadata,
                                    Column('id', Integer, primary_key=True, autoincrement='ignore_fk'),
                                    Column('title', String(100)),
                                    Column('meta_data', String(200)),
                                    Column('text', Text),
                                    Column('snapshot', Text),
                                    Column('url', Text),
                                    Column('updated_at', DateTime))
                 metadata.create_all()
             elif table_name == "domains":
                 main_table = Table(table_name, metadata,
                                    Column('id', Integer, primary_key=True, autoincrement='ignore_fk'),
                                    Column('name', String(100)),
                                    Column('disabled', Boolean),
                                    Column('Blocked', Boolean))
                 metadata.create_all()
             elif table_name == "trackers":
                 main_table = Table(table_name, metadata,
                                    Column('id', Integer, primary_key=True, autoincrement='ignore_fk'),
                                    Column('last_url', Text))
                 metadata.create_all()
     engine = create_engine('postgresql://*****:*****@localhost/search_engine', echo=True)
     return engine.connect()
Ejemplo n.º 14
0
    def handle(self, options, global_options, *args):
        from sqlalchemy import Table
        from uliweb.orm import reflect_model

        engine = get_engine(options, global_options)

        insp = Inspector.from_engine(engine)
        if not args:
            tables = insp.get_table_names()
        else:
            tables = args

        print '#coding=utf8'
        print 'from uliweb.orm import *'
        print 'from uliweb.i18n import ugettext_lazy as _'
        print 'from uliweb.utils.common import get_var'
        print '\n'

        meta = engine.metadata
        for name in tables:
            table = Table(name, meta)
            try:
                insp.reflecttable(table, None)
                print reflect_model(table)
                print '\n'
            except Exception as e:
                import traceback
                traceback.print_exc()
Ejemplo n.º 15
0
    def _compare_default_roundtrip(
            self, type_, orig_default, alternate=None, diff_expected=None):
        diff_expected = diff_expected \
            if diff_expected is not None \
            else alternate is not None
        if alternate is None:
            alternate = orig_default

        t1 = Table("test", self.metadata,
                   Column("somecol", type_, server_default=orig_default))
        t2 = Table("test", MetaData(),
                   Column("somecol", type_, server_default=alternate))

        t1.create(self.bind)

        insp = Inspector.from_engine(self.bind)
        cols = insp.get_columns(t1.name)
        insp_col = Column("somecol", cols[0]['type'],
                          server_default=text(cols[0]['default']))
        op = ops.AlterColumnOp("test", "somecol")
        _compare_server_default(
            self.autogen_context, op,
            None, "test", "somecol", insp_col, t2.c.somecol)

        diffs = op.to_diff_tuple()
        eq_(bool(diffs), diff_expected)
def upgrade():
    c = get_context()
    # drop foreign keys for mysql
    if isinstance(c.connection.engine.dialect, MySQLDialect):
        insp = Inspector.from_engine(c.connection.engine)
        for t in ['groups_resources_permissions', 'users_resources_permissions', 'resources']:
            for constraint in insp.get_foreign_keys(t):
                if constraint['referred_columns'] == ['resource_id']:
                    op.drop_constraint(constraint['name'], t, type='foreignkey')

    with op.batch_alter_table('resources', schema=None) as batch_op:
        batch_op.alter_column('resource_id',
                    type_=sa.Integer(), existing_type=sa.BigInteger(),
                    autoincrement=True)
    with op.batch_alter_table('resources', schema=None) as batch_op:
        batch_op.alter_column('parent_id',
                    type_=sa.Integer(), existing_type=sa.BigInteger())
    with op.batch_alter_table('users_resources_permissions', schema=None) as batch_op:
        batch_op.alter_column('resource_id',
                        type_=sa.Integer(), existing_type=sa.BigInteger())
        batch_op.alter_column('resource_id',
                        type_=sa.Integer(), existing_type=sa.BigInteger())

    # recreate foreign keys for mysql
    if isinstance(c.connection.engine.dialect, MySQLDialect):
        op.create_foreign_key("groups_resources_permissions_resource_fk",
                              'groups_resources_permissions',
                              "resources", ["resource_id"], ["resource_id"],
                              onupdate='CASCADE', ondelete='CASCADE')
        op.create_foreign_key("users_resources_permissions_fk",
                              'users_resources_permissions',
                              "resources", ["resource_id"], ["resource_id"],
                              onupdate='CASCADE', ondelete='CASCADE')
Ejemplo n.º 17
0
 def init_db(self):
     engine = self.session.get_bind(mapper=None, clause=None)        
     
     inspector = Inspector.from_engine(engine)
     if 'ab_user' not in inspector.get_table_names():
         print "Security DB not found Creating..."
         Base.metadata.create_all(engine)
         print "Security DB Created"
         self.migrate_db()
     if self.session.query(Role).filter_by(name = self.auth_role_admin).first() is None:
         role = Role()
         role.name = self.auth_role_admin
         self.session.add(role)
         self.session.commit()
         print "Inserted Role for public access", self.auth_role_admin            
     if not self.session.query(Role).filter_by(name = self.auth_role_public).first():
         role = Role()
         role.name = self.auth_role_public
         self.session.add(role)
         self.session.commit()
         print "Inserted Role for public access", self.auth_role_public
     if not self.session.query(User).all():
         user = User()
         user.first_name = 'Admin'
         user.last_name = 'User'
         user.username = '******'
         user.password = '******'
         user.active = True
         user.role = self.session.query(Role).filter_by(name = self.auth_role_admin).first()
         self.session.add(user)
         self.session.commit()
         print "Inserted initial Admin user"
         print "Login using Admin/general"
Ejemplo n.º 18
0
  def __initializeConnection( self, dbPath ):
    """
    Collects from the CS all the info needed to connect to the DB.
    This should be in a base class eventually
    """

    result = getDBParameters( dbPath )
    if not result[ 'OK' ]:
      raise Exception( 'Cannot get database parameters: %s' % result['Message'] )

    dbParameters = result[ 'Value' ]
    self.log.debug("db parameters: %s" % dbParameters)
    self.host = dbParameters[ 'Host' ]
    self.port = dbParameters[ 'Port' ]
    self.user = dbParameters[ 'User' ]
    self.password = dbParameters[ 'Password' ]
    self.dbName = dbParameters[ 'DBName' ]

    self.engine = create_engine( 'mysql://%s:%s@%s:%s/%s' % ( self.user,
                                                              self.password,
                                                              self.host,
                                                              self.port,
                                                              self.dbName ),
                                 pool_recycle = 3600,
                                 echo_pool = True,
                                 echo = self.log.getLevel() == 'DEBUG')
    self.sessionMaker_o = sessionmaker( bind = self.engine )
    self.inspector = Inspector.from_engine( self.engine )
Ejemplo n.º 19
0
def create_table(engine, tool):
    """ checks if a table  exists and create one if it doesn't """

    inspector = Inspector.from_engine(engine)
    tables = set(inspector.get_table_names())
    if tool.__tablename__ not in tables:
        Base.metadata.create_all(engine)
Ejemplo n.º 20
0
    def handle(self, options, global_options, *args):
        from sqlalchemy import Table
        from uliweb.orm import reflect_table_model

        engine = get_engine(options, global_options)

        insp = Inspector.from_engine(engine)
        if not args:
            tables = insp.get_table_names()
        else:
            tables = args

        mapping = {}
        print '#coding=utf8'
        print 'from uliweb.orm import *'
        print 'from uliweb.i18n import ugettext_lazy as _'
        print 'from uliweb.utils.common import get_var'
        if options.oracle:
            print 'from sqlalchemy.dialects.oracle import VARCHAR2'
            mapping = {'str': 'VARCHAR2'}
        print '\n'

        meta = engine.metadata
        for name in tables:
            table = Table(name, meta)
            try:
                insp.reflecttable(table, None)
                print reflect_table_model(table, mapping, without_id=not options.auto_id)
                print '\n'
            except Exception as e:
                import traceback
                traceback.print_exc()
Ejemplo n.º 21
0
def delete_all_tables(db):
    """Drops all tables in the database"""
    conn = db.engine.connect()
    transaction = conn.begin()
    inspector = Inspector.from_engine(db.engine)
    metadata = MetaData()

    all_schema_tables = get_all_tables(db)
    tables = []
    all_fkeys = []
    for schema, schema_tables in all_schema_tables.iteritems():
        for table_name in schema_tables:
            fkeys = [ForeignKeyConstraint((), (), name=fk['name'])
                     for fk in inspector.get_foreign_keys(table_name, schema=schema)
                     if fk['name']]
            tables.append(Table(table_name, metadata, *fkeys, schema=schema))
            all_fkeys.extend(fkeys)

    for fkey in all_fkeys:
        conn.execute(DropConstraint(fkey))
    for table in tables:
        conn.execute(DropTable(table))
    for schema in all_schema_tables:
        if schema != 'public':
            conn.execute(DropSchema(schema))
    transaction.commit()
Ejemplo n.º 22
0
def _produce_net_changes(connection, metadata, diffs, autogen_context,
                         object_filters=(),
                         include_schemas=False):
    inspector = Inspector.from_engine(connection)
    conn_table_names = set()

    default_schema = connection.dialect.default_schema_name
    if include_schemas:
        schemas = set(inspector.get_schema_names())
        # replace default schema name with None
        schemas.discard("information_schema")
        # replace the "default" schema with None
        schemas.add(None)
        schemas.discard(default_schema)
    else:
        schemas = [None]

    version_table_schema = autogen_context['context'].version_table_schema
    version_table = autogen_context['context'].version_table

    for s in schemas:
        tables = set(inspector.get_table_names(schema=s))
        if s == version_table_schema:
            tables = tables.difference(
                [autogen_context['context'].version_table]
            )
        conn_table_names.update(zip([s] * len(tables), tables))

    metadata_table_names = OrderedSet(
        [(table.schema, table.name) for table in metadata.sorted_tables]
    ).difference([(version_table_schema, version_table)])

    _compare_tables(conn_table_names, metadata_table_names,
                    object_filters,
                    inspector, metadata, diffs, autogen_context)
def upgrade():
    c = get_context()
    if isinstance(c.connection.engine.dialect, MySQLDialect):
        insp = Inspector.from_engine(c.connection.engine)
        for t in ['groups_permissions', 'groups_resources_permissions',
                  'users_groups', 'resources']:
            for constraint in insp.get_foreign_keys(t):
                if constraint['referred_columns'] == ['group_name']:
                    op.drop_constraint(constraint['name'], t,
                                       type='foreignkey')

    op.drop_column('groups', 'id')
    op.alter_column('groups', 'group_name',
                    type_=sa.String(128),
                    existing_type=sa.String(50),
                    )
    op.create_primary_key('groups_pkey', 'groups', cols=['group_name'])

    if isinstance(c.connection.engine.dialect, MySQLDialect):
        op.create_foreign_key(None, 'groups_permissions', 'groups',
                              remote_cols=['group_name'],
                              local_cols=['group_name'], onupdate='CASCADE',
                              ondelete='CASCADE')
        op.create_foreign_key(None, 'groups_resources_permissions', 'groups',
                              remote_cols=['group_name'],
                              local_cols=['group_name'], onupdate='CASCADE',
                              ondelete='CASCADE')
        op.create_foreign_key(None, 'users_groups', 'groups',
                              remote_cols=['group_name'],
                              local_cols=['group_name'], onupdate='CASCADE',
                              ondelete='CASCADE')
        op.create_foreign_key(None, 'resources', 'groups',
                              remote_cols=['group_name'],
                              local_cols=['owner_group_name'], onupdate='CASCADE',
                              ondelete='SET NULL')
Ejemplo n.º 24
0
def setup():

    if harvest_source_table is None:
        define_harvester_tables()
        log.debug('Harvest tables defined in memory')

    if model.package_table.exists():
        if not harvest_source_table.exists():

            # Create each table individually rather than
            # using metadata.create_all()
            harvest_source_table.create()
            harvest_job_table.create()
            harvest_object_table.create()
            harvest_gather_error_table.create()
            harvest_object_error_table.create()

            log.debug('Harvest tables created')
        else:
            from ckan.model.meta import engine
            log.debug('Harvest tables already exist')
            # Check if existing tables need to be updated
            inspector = Inspector.from_engine(engine)
            columns = inspector.get_columns('harvest_source')
            if not 'title' in [column['name'] for column in columns]:
                log.debug('Harvest tables need to be updated')
                migrate_v2()

    else:
        log.debug('Harvest table creation deferred')
Ejemplo n.º 25
0
def delete_all_tables(db):
    """Drops all tables in the database"""
    conn = db.engine.connect()
    transaction = conn.begin()
    inspector = Inspector.from_engine(db.engine)
    metadata = MetaData()

    all_schema_tables = get_all_tables(db)
    tables = []
    all_fkeys = []
    for schema, schema_tables in all_schema_tables.iteritems():
        for table_name in schema_tables:
            fkeys = [ForeignKeyConstraint((), (), name=fk['name'])
                     for fk in inspector.get_foreign_keys(table_name, schema=schema)
                     if fk['name']]
            tables.append(Table(table_name, metadata, *fkeys, schema=schema))
            all_fkeys.extend(fkeys)

    for fkey in all_fkeys:
        conn.execute(DropConstraint(fkey))
    for table in tables:
        conn.execute(DropTable(table))
    for schema in all_schema_tables:
        if schema != 'public':
            row = conn.execute("""
                SELECT 'DROP FUNCTION ' || ns.nspname || '.' || proname || '(' || oidvectortypes(proargtypes) || ')'
                FROM pg_proc INNER JOIN pg_namespace ns ON (pg_proc.pronamespace = ns.oid)
                WHERE ns.nspname = '{}'  order by proname;
            """.format(schema))
            for stmt, in row:
                conn.execute(stmt)
            conn.execute(DropSchema(schema))
    transaction.commit()
Ejemplo n.º 26
0
    def migrate_db(self):
        """
            Migrate security tables from Flask-AppBuilder 0.2.X to 0.3.X
        """
        engine = self.session.get_bind(mapper=None, clause=None)
        inspector = Inspector.from_engine(engine)
        if 'user' in inspector.get_table_names() and 'role' in inspector.get_table_names() and 'permission' in inspector.get_table_names():
            log.info("Found previous security tables, migrating...")

            metadata = MetaData(engine)

            old_user = Table('user', metadata, autoload=True)
            old_role = Table('role', metadata, autoload=True)
            old_permission = Table('permission', metadata, autoload=True)
            old_permission_view = Table('permission_view', metadata, autoload=True)
            old_view_menu = Table('view_menu', metadata, autoload=True)
            old_permission_view_role = Table('permission_view_role', metadata, autoload=True)

            log.info("Migrating Views and Menus")
            self.migrate_obj(old_view_menu, ViewMenu)

            log.info("Migrating Permissions")
            self.migrate_obj(old_permission, Permission)

            log.info("Migrating Permissions on Views")
            self.migrate_obj(old_permission_view, PermissionView)

            log.info("Migrating Roles")
            self.migrate_obj(old_role, Role)

            log.info("Migrating Roles to Permissions on Views")
            self.migrate_obj(old_permission_view_role, self.quick_mapper(assoc_permissionview_role))

            log.info("Migrating Users")
            self.migrate_obj(old_user, User)
Ejemplo n.º 27
0
    def _assert_fk_diff(
            self, diff, type_, source_table, source_columns,
            target_table, target_columns, name=None, conditional_name=None,
            source_schema=None):
        # the public API for ForeignKeyConstraint was not very rich
        # in 0.7, 0.8, so here we use the well-known but slightly
        # private API to get at its elements
        (fk_source_schema, fk_source_table,
         fk_source_columns, fk_target_schema, fk_target_table,
         fk_target_columns) = _fk_spec(diff[1])

        eq_(diff[0], type_)
        eq_(fk_source_table, source_table)
        eq_(fk_source_columns, source_columns)
        eq_(fk_target_table, target_table)
        eq_(fk_source_schema, source_schema)

        eq_([elem.column.name for elem in diff[1].elements],
            target_columns)
        if conditional_name is not None:
            if config.requirements.no_fk_names.enabled:
                eq_(diff[1].name, None)
            elif conditional_name == 'servergenerated':
                fks = Inspector.from_engine(self.bind).\
                    get_foreign_keys(source_table)
                server_fk_name = fks[0]['name']
                eq_(diff[1].name, server_fk_name)
            else:
                eq_(diff[1].name, conditional_name)
        else:
            eq_(diff[1].name, name)
Ejemplo n.º 28
0
    def db_worker(cls, manager):
        b = cls(manager)
        settings = manager.settings
        drop = settings.get('SQLALCHEMYBACKEND_DROP_ALL_TABLES')
        clear_content = settings.get('SQLALCHEMYBACKEND_CLEAR_CONTENT')
        inspector = Inspector.from_engine(b.engine)

        metadata_m = b.models['MetadataModel']
        queue_m = b.models['QueueModel']
        if drop:
            existing = inspector.get_table_names()
            if metadata_m.__table__.name in existing:
                metadata_m.__table__.drop(bind=b.engine)
            if queue_m.__table__.name in existing:
                queue_m.__table__.drop(bind=b.engine)
        metadata_m.__table__.create(bind=b.engine)
        queue_m.__table__.create(bind=b.engine)

        if clear_content:
            session = b.session_cls()
            session.execute(metadata_m.__table__.delete())
            session.execute(queue_m.__table__.delete())
            session.close()

        b._metadata = Metadata(b.session_cls, metadata_m,
                               settings.get('SQLALCHEMYBACKEND_CACHE_SIZE'))
        b._queue = Queue(b.session_cls, queue_m, settings.get('SPIDER_FEED_PARTITIONS'))
        return b
Ejemplo n.º 29
0
def init():

    # Create schema
    inspector = Inspector.from_engine(db.engine)
    tables = [table_name for table_name in inspector.get_table_names()]
    if 'synchrony_domains' not in tables:
        db.create_all()

    init_user_groups()

    # Attach HTTP endpoints
    from synchrony.resources import users
    from synchrony.resources import groups
    from synchrony.resources import privs
    from synchrony.resources import peers
    from synchrony.resources import domains
    from synchrony.resources import networks
    from synchrony.resources import revisions
    from synchrony.resources import config

    api.add_resource(networks.NetworkCollection,            "/networks")
    api.add_resource(networks.NetworkResource,              "/networks/<string:network>")
    api.add_resource(networks.NetworkPeerCollection,        "/networks/<string:network>/peers")

    api.add_resource(domains.DomainCollection,              "/domains")
    api.add_resource(domains.DomainResource,                "/domains/<domain>")
    api.add_resource(domains.DomainCountResource,           "/domains/count")

    api.add_resource(revisions.RevisionCollection,          "/revisions")
    api.add_resource(revisions.RevisionResource,            "/revisions/<string:hash>")
    api.add_resource(revisions.RevisionContentResource,     "/revisions/<string:hash>/content")
    api.add_resource(revisions.RevisionDownloadsCollection, "/revisions/downloads")
    api.add_resource(revisions.RevisionDownloadsResource,   "/revisions/downloads/<string:network>")
    api.add_resource(revisions.RevisionSearchResource,      "/revisions/search/<string:query>")
#    api.add_resource(revisions.RevisionFeedbackResource,    "/revisions/downloads/<string:network>/<path:url>")

    api.add_resource(users.UserCollection,                  "/users")
    api.add_resource(users.UserResource,                    "/users/<string:username>")
    api.add_resource(users.UserSessionsResource,            "/users/<string:username>/sessions")
    api.add_resource(users.UserFriendsCollection,           "/users/<string:username>/friends")
    api.add_resource(users.UserRevisionCollection,          "/users/<string:username>/revisions")
    api.add_resource(users.UserRevisionCountResource,       "/users/<string:username>/revisions/count")
    api.add_resource(users.UserAvatarResource,              "/users/<string:username>/avatar")


    api.add_resource(groups.UserGroupCollection,            "/groups")
    api.add_resource(groups.UserGroupResource,              "/groups/<string:name>")

    api.add_resource(privs.PrivCollection,                  "/privs")

    api.add_resource(peers.PeerCollection,                  "/peers")
    api.add_resource(peers.PeerNetworkResource,             "/peers/<string:network>")
    api.add_resource(peers.PeerResource,                    "/peers/<string:network>/<int:node_id>")
    api.add_resource(peers.PublicRevisionCollection,        "/peers/revisions")
    api.add_resource(peers.PublicRevisionResource,          "/peers/revisions/<string:content_hash>")

    api.add_resource(peers.PeerTestSet,                     "/peers/test")
    api.add_resource(peers.PeerTestGet,                     "/peers/test/<path:url>")

    api.add_resource(config.ConfigCollection,               "/config")
Ejemplo n.º 30
0
    def _compare_default(self, t1, t2, col, rendered):
        t1.create(self.bind, checkfirst=True)
        insp = Inspector.from_engine(self.bind)
        cols = insp.get_columns(t1.name)
        ctx = self.autogen_context.migration_context

        return ctx.impl.compare_server_default(None, col, rendered, cols[0]["default"])
Ejemplo n.º 31
0
def make_risk_id_column_nullable(context):
    """ Make the risk_id column of the Risk table nullable.
        This is so that the user can create custom risks. These risks don't
        have dexterity counterparts in the survey, so we don't have a value for
        risk_id.
    """
    session = Session()
    inspector = Inspector.from_engine(session.bind)
    log.info('Making the risk_id column of Risk table nullable')
    session.execute("ALTER TABLE %s ALTER COLUMN risk_id DROP NOT NULL;" %
                    model.Risk.__table__.name)
    datamanager.mark_changed(session)
Ejemplo n.º 32
0
 def _all_tables_present(self):
     from sqlalchemy.engine.reflection import Inspector
     inspector = Inspector.from_engine(self.engine)
     all_tables = set(inspector.get_table_names())
     zult = True
     for table_name in (Resource.__tablename__, Product.__tablename__,
                        ProductKeyValue.__tablename__, SymbolKeyValue.__tablename__,
                        Content.__tablename__, ContentKeyValue.__tablename__, PRODUCTS_FROM_RESOURCES_TABLE_NAME):
         present = table_name in all_tables
         LOG.debug("table {} {} present in database".format(table_name, "is" if present else "is not"))
         zult = False if not present else zult
     return zult
Ejemplo n.º 33
0
    def test_model_creation(self):
        """
            Test Model creation
        """
        from sqlalchemy.engine.reflection import Inspector

        engine = self.db.session.get_bind(mapper=None, clause=None)
        inspector = Inspector.from_engine(engine)
        # Check if tables exist
        ok_('model1' in inspector.get_table_names())
        ok_('model2' in inspector.get_table_names())
        ok_('model_with_enums' in inspector.get_table_names())
Ejemplo n.º 34
0
    def test_create_with_FK(self):
        # create and keep FK
        b = create_book(uri_conn=db_sqlite_uri,
                        keep_foreign_keys=True,
                        overwrite=True)
        b.session.close()

        insp = Inspector.from_engine(create_engine(db_sqlite_uri))
        fk_total = []
        for tbl in insp.get_table_names():
            fk_total.append(insp.get_foreign_keys(tbl))
        assert len(fk_total) == 25
Ejemplo n.º 35
0
def db_wipe(db_engines, request):
    """Cleans up the database after a test run"""
    inspector = Inspector.from_engine(db_engines['common'])
    tables = inspector.get_table_names()
    # Assume that if db_wipe is used it means we want to start fresh as well.
    if 'modules' in tables:
        _wipe_db(db_engines['super'])

    def finalize():
        _wipe_db(db_engines['super'])

    request.addfinalizer(finalize)
def upgrade():

    bind = op.get_bind()

    inspector = insp.from_engine(bind)
    pk_constraint = inspector.get_pk_constraint(ML2_NEXUS_MAPPING_TABLE)
    op.drop_constraint(pk_constraint.get('name'),
                       ML2_NEXUS_MAPPING_TABLE,
                       type_='primary')
    op.create_primary_key(op.f('pk_cisco_ml2_nexus_host_interface_mapping'),
                          ML2_NEXUS_MAPPING_TABLE,
                          ['host_id', 'switch_ip', 'if_id'])
def upgrade():
    conn = op.get_bind()
    inspector = Inspector.from_engine(conn)
    tables = inspector.get_table_names()

    if "scripts" not in tables:
        op.create_table(
            "scripts",
            sa.Column("name", sa.String(), nullable=True),
            sa.Column("uid", sa.String(), nullable=False),
            sa.Column("enabled", sa.Boolean(), nullable=True),
            sa.Column("triggers", sa.JSON(), nullable=True),
            sa.PrimaryKeyConstraint("uid"),
        )

    if "settings" not in tables:
        op.create_table(
            "settings",
            sa.Column("key", sa.String(), nullable=False),
            sa.Column("value", sa.String(), nullable=True),
            sa.PrimaryKeyConstraint("key"),
        )

    if "tasks" not in tables:
        op.create_table(
            "tasks",
            sa.Column("id", sa.Integer(), nullable=False),
            sa.Column("task_id", sa.String(), nullable=True),
            sa.Column("task_arguments", sa.String(), nullable=True),
            sa.Column("priority", sa.String(), nullable=True),
            sa.Column("status", sa.String(), nullable=True),
            sa.Column("output", sa.String(), nullable=True),
            sa.Column("time_scheduled", sa.DateTime(), nullable=True),
            sa.Column("time_started", sa.DateTime(), nullable=True),
            sa.Column("time_finished", sa.DateTime(), nullable=True),
            sa.PrimaryKeyConstraint("id"),
        )

    if "logs" not in tables:
        op.create_table(
            "logs",
            sa.Column("id", sa.Integer(), nullable=False),
            sa.Column("script", sa.Integer(), nullable=True),
            sa.Column("text", sa.String(), nullable=True),
            sa.Column("error", sa.String(), nullable=True),
            sa.Column("date", sa.DateTime(), nullable=True),
            sa.Column("exitcode", sa.Integer(), nullable=True),
            sa.ForeignKeyConstraint(
                ["script"],
                ["scripts.uid"],
            ),
            sa.PrimaryKeyConstraint("id"),
        )
Ejemplo n.º 38
0
 def create_db(self):
     try:
         engine = self.get_session.get_bind(mapper=None, clause=None)
         inspector = Inspector.from_engine(engine)
         if "ab_user" not in inspector.get_table_names():
             log.info(c.LOGMSG_INF_SEC_NO_DB)
             Base.metadata.create_all(engine)
             log.info(c.LOGMSG_INF_SEC_ADD_DB)
         super(SecurityManager, self).create_db()
     except Exception as e:
         log.error(c.LOGMSG_ERR_SEC_CREATE_DB.format(str(e)))
         exit(1)
Ejemplo n.º 39
0
def is_empty_database(driver):
    '''
    check if the database is empty or not
    Args:
        driver (object): an alias or index driver instance

    Returns:
        Boolean
    '''
    table_list = Inspector.from_engine(driver.engine).get_table_names()

    return len(table_list) == 0
Ejemplo n.º 40
0
def gen_columns_info(engine: Engine,
                     tablename: str) -> Generator[SqlaColumnInspectionInfo,
                                                  None, None]:
    """
    For the specified table, generate column information as
    :class:`SqlaColumnInspectionInfo` objects.
    """
    # Dictionary structure: see
    # http://docs.sqlalchemy.org/en/latest/core/reflection.html#sqlalchemy.engine.reflection.Inspector.get_columns  # noqa
    insp = Inspector.from_engine(engine)
    for d in insp.get_columns(tablename):
        yield SqlaColumnInspectionInfo(d)
Ejemplo n.º 41
0
    def _assert_fk_diff(
        self,
        diff,
        type_,
        source_table,
        source_columns,
        target_table,
        target_columns,
        name=None,
        conditional_name=None,
        source_schema=None,
        onupdate=None,
        ondelete=None,
        initially=None,
        deferrable=None,
    ):
        # the public API for ForeignKeyConstraint was not very rich
        # in 0.7, 0.8, so here we use the well-known but slightly
        # private API to get at its elements
        (
            fk_source_schema,
            fk_source_table,
            fk_source_columns,
            fk_target_schema,
            fk_target_table,
            fk_target_columns,
            fk_onupdate,
            fk_ondelete,
            fk_deferrable,
            fk_initially,
        ) = _fk_spec(diff[1])

        eq_(diff[0], type_)
        eq_(fk_source_table, source_table)
        eq_(fk_source_columns, source_columns)
        eq_(fk_target_table, target_table)
        eq_(fk_source_schema, source_schema)
        eq_(fk_onupdate, onupdate)
        eq_(fk_ondelete, ondelete)
        eq_(fk_initially, initially)
        eq_(fk_deferrable, deferrable)

        eq_([elem.column.name for elem in diff[1].elements], target_columns)
        if conditional_name is not None:
            if conditional_name == "servergenerated":
                fks = Inspector.from_engine(
                    self.bind).get_foreign_keys(source_table)
                server_fk_name = fks[0]["name"]
                eq_(diff[1].name, server_fk_name)
            else:
                eq_(diff[1].name, conditional_name)
        else:
            eq_(diff[1].name, name)
Ejemplo n.º 42
0
def upgrade(op, tables, tester):
    inspector = Inspector.from_engine(op.get_bind())
    manifestblob_indexes = inspector.get_indexes("manifestblob")

    if not "manifestblob_repository_id_blob_id" in [
            i["name"] for i in manifestblob_indexes
    ]:
        op.create_index(
            "manifestblob_repository_id_blob_id",
            "manifestblob",
            ["repository_id", "blob_id"],
        )
 def test_dont_barf_on_already_reflected(self):
     from sqlalchemy.util import OrderedSet
     inspector = Inspector.from_engine(self.bind)
     uo = ops.UpgradeOps(ops=[])
     autogenerate.compare._compare_tables(
         OrderedSet([(None, 'extra'), (None, 'user')]), OrderedSet(),
         inspector, uo, self.autogen_context)
     eq_([(rec[0], rec[1].name) for rec in uo.as_diffs()], [
         ('remove_table', 'extra'),
         ('remove_index', 'pw_idx'),
         ('remove_table', 'user'),
     ])
Ejemplo n.º 44
0
    def test_ix_existing(self):
        self._table_w_index_fixture()

        with self.op.batch_alter_table("t_w_ix") as batch_op:
            batch_op.alter_column('data', type_=String(30))
            batch_op.create_index("ix_data", ["data"])

        insp = Inspector.from_engine(config.db)
        eq_(
            set((ix['name'], tuple(ix['column_names']))
                for ix in insp.get_indexes('t_w_ix')),
            set([('ix_data', ('data', )), ('ix_thing', ('thing', ))]))
Ejemplo n.º 45
0
    def handle(self, options, global_options, *args):
        from zipfile import ZipFile, ZIP_DEFLATED
        from StringIO import StringIO

        output_dir = os.path.join(options.output_dir, options.engine)
        if not os.path.exists(output_dir):
            os.makedirs(output_dir)

        engine = get_engine(options, global_options)

        zipfile = None
        if options.zipfile:
            zipfile = ZipFile(options.zipfile, 'w', compression=ZIP_DEFLATED)

        inspector = Inspector.from_engine(engine)

        tables = get_sorted_tables(
            get_tables(global_options.apps_dir,
                       args,
                       engine_name=options.engine,
                       settings_file=global_options.settings,
                       local_settings_file=global_options.local_settings))
        _len = len(tables)
        for i, (name, t) in enumerate(tables):
            if global_options.verbose:
                print 'Dumpping %s...' % show_table(name, t, i, _len),
            filename = os.path.join(output_dir, name + '.txt')
            if options.text:
                format = 'txt'
            else:
                format = None
            #process zipfile
            if options.zipfile:
                fileobj = StringIO()
                filename = os.path.basename(filename)
            else:
                fileobj = filename
            t = dump_table(t,
                           fileobj,
                           engine,
                           delimiter=options.delimiter,
                           format=format,
                           encoding=options.encoding,
                           inspector=inspector,
                           engine_name=engine.engine_name)
            #write zip content
            if options.zipfile and zipfile:
                zipfile.writestr(filename, fileobj.getvalue())
            if global_options.verbose:
                print t

        if zipfile:
            zipfile.close()
Ejemplo n.º 46
0
 def check_and_create_tables(self, is_drop, is_clear, models):
     inspector = Inspector.from_engine(self.engine)
     for model in models:
         if is_drop:
             if model.__table__.name in inspector.get_table_names():
                 model.__table__.drop(bind=self.engine)
         if model.__table__.name not in inspector.get_table_names():
             model.__table__.create(bind=self.engine)
         if is_clear:
             session = self.session_cls()
             session.execute(model.__table__.delete())
             session.close()
Ejemplo n.º 47
0
def test_init(db_env_vars, db_engines):
    from cnxdb.cli.main import main
    args = ['init']
    return_code = main(args)

    assert return_code == 0

    inspector = Inspector.from_engine(db_engines['common'])
    tables = inspector.get_table_names()

    assert 'modules' in tables
    assert 'pending_documents' in tables
Ejemplo n.º 48
0
def add_column_for_custom_risks(context):
    session = Session()
    inspector = Inspector.from_engine(session.bind)
    columns = [
        c['name'] for c in inspector.get_columns(model.Risk.__table__.name)
    ]
    if 'is_custom_risk' not in columns:
        log.info('Adding is_custom_risk column for risks')
        session.execute(
            "ALTER TABLE %s ADD is_custom_risk BOOL NOT NULL DEFAULT FALSE" %
            model.Risk.__table__.name)
        datamanager.mark_changed(session)
Ejemplo n.º 49
0
def upgrade():

    connection = op.get_bind()

    inspector = insp.from_engine(connection)

    pk_constraint = (inspector.get_pk_constraint(ATTRIBUTES).get('name')
                     or naming_convention['pk'] % {
                         'table_name': ATTRIBUTES
                     })
    fk_constraint = (inspector.get_foreign_keys(ATTRIBUTES)[0].get('name')
                     or naming_convention['fk'] % {
                         'table_name': ATTRIBUTES
                     })

    columns_meta = inspector.get_columns(ATTRIBUTES)
    name_type = {
        meta.get('type')
        for meta in columns_meta if meta['name'] == NAME
    }.pop()
    value_type = {
        meta.get('type')
        for meta in columns_meta if meta['name'] == VALUE
    }.pop()

    node_uuid_column = sa.Column(NODE_UUID, sa.String(36))
    op.add_column(ATTRIBUTES, node_uuid_column)

    attributes = sa.table(ATTRIBUTES, sa.Column(NODE_UUID, sa.String(36)),
                          sa.Column(UUID, sa.String(36)))

    with op.batch_alter_table(ATTRIBUTES,
                              naming_convention=naming_convention) as batch_op:
        batch_op.drop_constraint(fk_constraint, type_='foreignkey')

    rows = connection.execute(
        sa.select([attributes.c.uuid, attributes.c.node_uuid]))

    for row in rows:
        # move uuid to node_uuid, reuse uuid as a new primary key
        connection.execute(
            attributes.update().where(attributes.c.uuid == row.uuid).values(
                node_uuid=row.uuid, uuid=uuidutils.generate_uuid()))

    with op.batch_alter_table(ATTRIBUTES,
                              naming_convention=naming_convention) as batch_op:
        batch_op.drop_constraint(pk_constraint, type_='primary')
        batch_op.create_primary_key(pk_constraint, [UUID])
        batch_op.create_foreign_key('fk_node_attribute', NODES, [NODE_UUID],
                                    [UUID])
        batch_op.alter_column('name', nullable=False, type_=name_type)
        batch_op.alter_column('value', nullable=True, type_=value_type)
Ejemplo n.º 50
0
def upgrade():
    from nbexchange.models import Feedback as FeedbackNew

    class FeedbackOld(Base):
        __tablename__ = "feedback"
        id = Column(Integer(), primary_key=True, autoincrement=True)
        notebook = None
        notebook_id = Column(Integer(),
                             ForeignKey("notebook.id", ondelete="CASCADE"),
                             index=True)
        instructor = None
        instructor_id = Column(Integer,
                               ForeignKey("user.id", ondelete="CASCADE"),
                               index=True)
        student = None
        student_id = Column(Integer,
                            ForeignKey("user.id", ondelete="CASCADE"),
                            index=True)
        location = Column(
            Unicode(200),
            nullable=True)  # Location for the file of this action
        checksum = Column(Unicode(200),
                          nullable=True)  # Checksum for the feedback file
        timestamp = Column(Unicode(12), nullable=False)
        created_at = Column(DateTime, default=datetime.utcnow)

    bind = op.get_bind()

    inspector = Inspector.from_engine(bind)

    tables = inspector.get_table_names()

    if "feedback_2" not in tables:
        FeedbackNew.__table__.create(bind)

    session = orm.Session(bind=bind)

    if "feedback" in tables:
        feedbacks = [
            FeedbackNew(
                notebook_id=feedback.notebook_id,
                instructor_id=feedback.instructor_id,
                student_id=feedback.student_id,
                location=feedback.location,
                checksum=feedback.checksum,
                timestamp=try_convert(feedback.timestamp, feedback.created_at),
                created_at=feedback.created_at,
            ) for feedback in session.query(FeedbackOld)
        ]
        session.add_all(feedbacks)

        session.commit()
def upgrade():
    c = get_context()
    if isinstance(c.connection.engine.dialect, MySQLDialect):
        insp = Inspector.from_engine(c.connection.engine)
        for t in [
                'groups_permissions', 'groups_resources_permissions',
                'users_groups', 'resources'
        ]:
            for constraint in insp.get_foreign_keys(t):
                if constraint['referred_columns'] == ['group_name']:
                    op.drop_constraint(constraint['name'],
                                       t,
                                       type='foreignkey')

    op.drop_column('groups', 'id')
    op.alter_column(
        'groups',
        'group_name',
        type_=sa.Unicode(128),
        existing_type=sa.Unicode(50),
    )
    op.create_primary_key('groups_pkey', 'groups', cols=['group_name'])

    if isinstance(c.connection.engine.dialect, MySQLDialect):
        op.create_foreign_key(None,
                              'groups_permissions',
                              'groups',
                              remote_cols=['group_name'],
                              local_cols=['group_name'],
                              onupdate='CASCADE',
                              ondelete='CASCADE')
        op.create_foreign_key(None,
                              'groups_resources_permissions',
                              'groups',
                              remote_cols=['group_name'],
                              local_cols=['group_name'],
                              onupdate='CASCADE',
                              ondelete='CASCADE')
        op.create_foreign_key(None,
                              'users_groups',
                              'groups',
                              remote_cols=['group_name'],
                              local_cols=['group_name'],
                              onupdate='CASCADE',
                              ondelete='CASCADE')
        op.create_foreign_key(None,
                              'resources',
                              'groups',
                              remote_cols=['group_name'],
                              local_cols=['owner_group_name'],
                              onupdate='CASCADE',
                              ondelete='SET NULL')
Ejemplo n.º 52
0
def init_db():
    inspector = Inspector.from_engine(engine)
    if "alembic_version" in inspector.get_table_names():
        raise Exception(
            "Database has already been initialised. Use \"alembic upgrade head\" instead."
        )

    engine.echo = True
    Base.metadata.create_all(bind=engine)

    alembic_cfg = Config(
        os.path.dirname(os.path.realpath(__file__)) + "/alembic.ini")
    command.stamp(alembic_cfg, "head")
Ejemplo n.º 53
0
 def tearDown(self):
     delete_stmt = niamoto_db_meta.raster_registry.delete()
     with Connector.get_connection() as connection:
         inspector = Inspector.from_engine(connection)
         tables = inspector.get_table_names(
             schema=settings.NIAMOTO_RASTER_SCHEMA
         )
         for tb in tables:
             if tb != niamoto_db_meta.raster_registry.name:
                 connection.execute("DROP TABLE IF EXISTS {};".format(
                     "{}.{}".format(settings.NIAMOTO_RASTER_SCHEMA, tb)
                 ))
         connection.execute(delete_stmt)
Ejemplo n.º 54
0
 def tearDown(self):
     unittest.TestCase.tearDown(self)
     self.app.close_app()
     tasks.app = self._app
     # Drop testing schemas
     engine = create_engine(self.app.conf['SQLALCHEMY_URL'], echo=False)
     connection = engine.connect()
     existing_schema_names = Inspector.from_engine(
         engine).get_schema_names()
     existing_schema_names = filter(
         lambda x: x.startswith(self.schema_prefix), existing_schema_names)
     for schema_name in existing_schema_names:
         connection.execute("drop schema {0} cascade;".format(schema_name))
Ejemplo n.º 55
0
 def test_dont_barf_on_already_reflected(self):
     diffs = []
     from sqlalchemy.util import OrderedSet
     inspector = Inspector.from_engine(self.bind)
     autogenerate.compare._compare_tables(
         OrderedSet([(None, 'extra'), (None, 'user')]),
         OrderedSet(), [], inspector,
             MetaData(), diffs, self.autogen_context
     )
     eq_(
         [(rec[0], rec[1].name) for rec in diffs],
         [('remove_table', 'extra'), ('remove_table', 'user')]
     )
Ejemplo n.º 56
0
    def __initializeConnection(self):
        """
        This should be in a base class eventually
        """

        self.engine = create_engine(
            "mysql://%s:%s@%s:%s/%s" % (self.dbUser, self.dbPass, self.dbHost, self.dbPort, self.dbName),
            pool_recycle=3600,
            echo_pool=True,
            echo=self.log.getLevel() == "DEBUG",
        )
        self.sqlalchemySession = scoped_session(sessionmaker(bind=self.engine))
        self.inspector = Inspector.from_engine(self.engine)
Ejemplo n.º 57
0
    def test_change_type_boolean_to_int(self):
        self._boolean_fixture()
        with self.op.batch_alter_table("hasbool") as batch_op:
            batch_op.alter_column('x',
                                  type_=Integer,
                                  existing_type=Boolean(create_constraint=True,
                                                        name='ck1'))
        insp = Inspector.from_engine(config.db)

        eq_([
            c['type']._type_affinity
            for c in insp.get_columns('hasbool') if c['name'] == 'x'
        ], [Integer])
def downgrade():
    conn = op.get_bind()
    inspector = Inspector.from_engine(conn)
    tables = inspector.get_table_names()
    metadata = MetaData(conn, reflect=True)
    table_mapping_reversed = {y: x for x, y in table_mapping.items()}
    table: str
    for table in tables:
        if table in skip_table or table.endswith(VERSION):
            continue
        print('<<<<Processing starting for table', table)
        _rename_obj(inspector, metadata, table, table_mapping_reversed, tables)
        print('Processing ended for table>>>', table)
Ejemplo n.º 59
0
 def test_base_dimension(self):
     dim_1 = TestDimension()
     self.assertEqual(dim_1._exists, False)
     self.assertFalse(dim_1.is_created())
     dim_1.create_dimension()
     self.assertTrue(dim_1.is_created())
     with Connector.get_connection() as connection:
         inspector = Inspector.from_engine(connection)
         tables = inspector.get_table_names(
             schema=settings.NIAMOTO_DIMENSIONS_SCHEMA)
         self.assertIn("test_dimension", tables)
     dim_2 = TestDimension()
     self.assertEqual(dim_2._exists, True)
Ejemplo n.º 60
0
 def test_delete_vector(self):
     VectorManager.add_vector('ncl_adm1', SHP_TEST)
     VectorManager.delete_vector('ncl_adm1')
     df = VectorManager.get_vector_list()
     self.assertNotIn('ncl_adm1', list(df['name']))
     engine = Connector.get_engine()
     inspector = Inspector.from_engine(engine)
     self.assertNotIn(
         'ncl_adm1',
         inspector.get_table_names(schema=settings.NIAMOTO_VECTOR_SCHEMA),
     )
     self.assertRaises(NoRecordFoundError, VectorManager.delete_vector,
                       'ncl_adm1')