def test_insert_table(engine_testaccount):
    metadata = MetaData()
    users = Table('users', metadata,
                  Column('id', Integer, Sequence('user_id_seq'),
                         primary_key=True),
                  Column('name', String),
                  Column('fullname', String),
                  )
    metadata.create_all(engine_testaccount)

    data = [{
        'id': 1,
        'name': 'testname1',
        'fullname': 'fulltestname1',
    }, {
        'id': 2,
        'name': 'testname2',
        'fullname': 'fulltestname2',
    }]
    conn = engine_testaccount.connect()
    try:
        # using multivalue insert
        conn.execute(users.insert(data))
        results = conn.execute(select([users]).order_by('id'))
        row = results.fetchone()
        assert row['name'] == 'testname1'

    finally:
        conn.close()
        users.drop(engine_testaccount)
예제 #2
0
 def test_reflect_select(self, engine, connection):
     """reflecttable should be able to fill in a table from the name"""
     one_row_complex = Table('one_row_complex', MetaData(bind=engine), autoload=True)
     # Presto ignores the union and decimal columns
     self.assertEqual(len(one_row_complex.c), 15 - 2)
     self.assertIsInstance(one_row_complex.c.string, Column)
     rows = one_row_complex.select().execute().fetchall()
     self.assertEqual(len(rows), 1)
     self.assertEqual(list(rows[0]), [
         True,
         127,
         32767,
         2147483647,
         9223372036854775807,
         0.5,
         0.25,
         'a string',
         '1970-01-01 00:00:00.000',
         '123',
         [1, 2],
         {"1": 2, "3": 4},  # Presto converts all keys to strings so that they're valid JSON
         [1, 2],  # struct is returned as a list of elements
         #'{0:1}',
         #0.1,
     ])
예제 #3
0
def downgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine

    tasks_table = Table('tasks', meta, autoload=True)
    task_info_table = Table('task_info', meta, autoload=True)

    for col_name in TASKS_MIGRATE_COLUMNS:
        column = Column(col_name, Text())
        column.create(tasks_table)

    task_info_records = task_info_table.select().execute().fetchall()

    for task_info in task_info_records:
        values = {
            'input': task_info.input,
            'result': task_info.result,
            'message': task_info.message
        }

        tasks_table\
            .update(values=values)\
            .where(tasks_table.c.id == task_info.task_id)\
            .execute()

    drop_tables([task_info_table])
def upgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine

    ip_blocks = Table('ip_blocks', meta, autoload=True)
    max_allocation = Column('max_allocation', Integer())
    ip_blocks.create_column(max_allocation)
예제 #5
0
def upgrade(migrate_engine):
    meta.bind = migrate_engine

    # Load the database tables
    servers_table = Table('servers', meta, autoload=True)
    pool_attrib_table = Table('pool_attributes', meta, autoload=True)

    # Read in all the servers to migrate to pool_attributes table
    servers = select(
        columns=[
            servers_table.c.id,
            servers_table.c.created_at,
            servers_table.c.updated_at,
            servers_table.c.version,
            servers_table.c.name
        ]
    ).execute().fetchall()

    for server in servers:
        pool_attrib_table.insert().execute(
            id=server.id,
            created_at=server.created_at,
            updated_at=server.updated_at,
            version=server.version,
            key='name_server',
            value=server.name,
            pool_id=default_pool_id
        )
예제 #6
0
파일: test_utils.py 프로젝트: aaratn/heat
    def test_clone_table_adds_or_deletes_columns(self):
        meta = MetaData()
        meta.bind = self.engine

        table = Table('dummy',
                      meta,
                      Column('id', String(36), primary_key=True,
                             nullable=False),
                      Column('A', Boolean, default=False)
                      )
        table.create()

        newcols = [
            Column('B', Boolean, default=False),
            Column('C', String(255), default='foobar')
        ]
        ignorecols = [
            table.c.A.name
        ]
        new_table = migrate_utils.clone_table('new_dummy', table, meta,
                                              newcols=newcols,
                                              ignorecols=ignorecols)

        col_names = [c.name for c in new_table.columns]

        self.assertEqual(3, len(col_names))
        self.assertIsNotNone(new_table.c.B)
        self.assertIsNotNone(new_table.c.C)
        self.assertNotIn('A', col_names)
예제 #7
0
    def test_reflect_select(self, engine, connection):
        """reflecttable should be able to fill in a table from the name"""
        one_row_complex = Table('one_row_complex', MetaData(bind=engine), autoload=True)
        self.assertEqual(len(one_row_complex.c), 15)
        self.assertIsInstance(one_row_complex.c.string, Column)
        rows = one_row_complex.select().execute().fetchall()
        self.assertEqual(len(rows), 1)
        self.assertEqual(list(rows[0]), _ONE_ROW_COMPLEX_CONTENTS)

        try:
            from sqlalchemy.types import BigInteger
        except ImportError:
            from sqlalchemy.databases.mysql import MSBigInteger as BigInteger

        # TODO some of these types could be filled in better
        self.assertIsInstance(one_row_complex.c.boolean.type, types.Boolean)
        self.assertIsInstance(one_row_complex.c.tinyint.type, types.Integer)
        self.assertIsInstance(one_row_complex.c.smallint.type, types.Integer)
        self.assertIsInstance(one_row_complex.c.int.type, types.Integer)
        self.assertIsInstance(one_row_complex.c.bigint.type, BigInteger)
        self.assertIsInstance(one_row_complex.c.float.type, types.Float)
        self.assertIsInstance(one_row_complex.c.double.type, types.Float)
        self.assertIsInstance(one_row_complex.c.string.type, types.String)
        self.assertIsInstance(one_row_complex.c.timestamp.type, HiveTimestamp)
        self.assertIsInstance(one_row_complex.c.binary.type, types.String)
        self.assertIsInstance(one_row_complex.c.array.type, types.String)
        self.assertIsInstance(one_row_complex.c.map.type, types.String)
        self.assertIsInstance(one_row_complex.c.struct.type, types.String)
        self.assertIsInstance(one_row_complex.c.union.type, types.String)
        self.assertIsInstance(one_row_complex.c.decimal.type, HiveDecimal)
예제 #8
0
파일: meta.py 프로젝트: pwdtnx/ouroboros
    def _exclude_columns_table(table):
        new_table = Table(table.name, MetaData())

        for c in table.columns:
            if c.name not in columns:
                new_table.append_column(c.copy())
        return new_table
    def create_table(self, table_name, columns, ctypes, keys):
        """
        This method is used to create new table in the database.

        Input:
        Database configuration object and table name

        Output:
        New table created
        """
        
        #before creating a new table check if that table exists
        self.table_name = table_name
        table_flag = self.check_if_table_exists(table_name)
        if table_flag:
            print 'Table already exists in the database. No need to create a new table'
        else:
            #create a new table since it does not exist
            print 'Table - %s does not exist. Create a new table' %(table_name)
            #get the description of the table
            table_columns = self.get_table_desc(columns, ctypes, keys)
            try:
                new_table = Table(
                        self.table_name,
                        self.metadata,
                        *table_columns
                        )
                #create new table                         
                new_table.create(checkfirst = True)
                print "Table '%s' created"%self.table_name

            except:
                print 'Error while creating the table %s'%self.table_name
                raise Exception
예제 #10
0
파일: table.py 프로젝트: smnorris/pgdb
 def __init__(self, db, schema, table, columns=None):
     self.db = db
     self.schema = schema
     self.name = table
     self.engine = create_engine(db.url)
     self.metadata = MetaData(schema=schema)
     self.metadata.bind = self.engine
     # http://docs.sqlalchemy.org/en/rel_1_0/core/metadata.html
     # if provided columns (SQLAlchemy columns), create the table
     if table:
         if columns:
             self.table = SQLATable(
                 table, self.metadata, schema=self.schema, *columns
             )
             self.table.create()
         # otherwise just load from db
         else:
             self.table = SQLATable(
                 table, self.metadata, schema=self.schema, autoload=True
             )
         self.indexes = dict((i.name, i) for i in self.table.indexes)
         self._is_dropped = False
     else:
         self._is_dropped = True
         self.table = None
def upgrade(migrate_engine):
    """Create shares and share_access_map tables."""
    meta = MetaData()
    meta.bind = migrate_engine

    shares = Table('shares', meta, autoload=True)
    share_snapshots = Table(
        'share_snapshots', meta,
        Column('created_at', DateTime),
        Column('updated_at', DateTime),
        Column('deleted_at', DateTime),
        Column('deleted', Boolean),
        Column('id', String(length=36), primary_key=True, nullable=False),
        Column('user_id', String(length=255)),
        Column('project_id', String(length=255)),
        Column('share_id', String(36), ForeignKey('shares.id'),
               nullable=False),
        Column('size', Integer),
        Column('status', String(length=255)),
        Column('progress', String(length=255)),
        Column('display_name', String(length=255)),
        Column('display_description', String(length=255)),
        Column('share_size', Integer),
        Column('share_proto', String(length=255)),
        Column('export_location', String(255)),
        mysql_engine='InnoDB')

    try:
        share_snapshots.create()
    except Exception:
        LOG.error(_("Table %r not created!"), share_snapshots)
        raise
예제 #12
0
def downgrade(migrate_engine):
    meta.bind = migrate_engine

    keys = Enum(name='key', metadata=meta, *ZONE_ATTRIBUTE_KEYS)
    types = Enum(name='types', metadata=meta, *ZONE_TYPES)

    domains_attributes_table = Table('domain_attributes', meta, autoload=True)
    domains_table = Table('domains', meta, autoload=True)

    domains = select(columns=[domains_table.c.id, domains_table.c.type])\
        .where(domains_table.c.type == 'SECONDARY')\
        .execute().fetchall()

    for dom in domains:
        delete = domains_table.delete()\
            .where(domains_table.id == dom.id)
        delete.execute()

    domains_table.c.type.drop()
    domains_table.c.transferred_at.drop()

    domains_attributes_table.drop()
    keys.drop()
    types.drop()

    dialect = migrate_engine.url.get_dialect().name
    if dialect.startswith('sqlite'):
        constraint = UniqueConstraint(
            'name', 'deleted', name='unique_domain_name', table=domains_table)

        # Add missing unique index
        constraint.create()
예제 #13
0
def update_item_saved_info(item):
    
        
    engine = get_onitsuka_db_engine()
    
    item_owner_id = item['owner_id']
    item_id = item['item_id']
    
    user_following = Table('user_following', metaData, autoload=True, autoload_with = engine)
    s = select([user_following.c.user_id], (user_following.c.following_id==item_owner_id))
    
    result = engine.execute(s)
    
    user_feed_update_list = list()
    for follower in result:
        
        item_owner_follower_id = follower['user_id']
        print item_owner_follower_id
        
        user_feed_update_item = {}
        user_feed_update_item['user_id']  = item_owner_follower_id
        user_feed_update_item['owner_id'] = item_owner_id
        user_feed_update_item['item_id'] = item_id
        user_feed_update_list.append(user_feed_update_item)

    result.close()

    user_feed_table = Table('user_feed', metaData, autoload=True, autoload_with = engine)
    ins = user_feed_table.insert().values(user_id=bindparam('user_id'), owner_id=bindparam('owner_id'), item_id=bindparam('item_id'))
    engine.execute(ins, user_feed_update_list)
def upgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine

    ip_blocks = Table('ip_blocks', meta, autoload=True)
    network_name = Column('network_name', String(255))
    ip_blocks.create_column(network_name)
예제 #15
0
        def _table(table):
            src_table = table
            new_table = Table(src_table.name, MetaData())

            for c in src_table.columns:
                if c.name not in columns:
                    new_table.append_column(c.copy())
            return new_table
예제 #16
0
def create_table(engine, table_name):
    log.debug("Creating table: %s on %r" % (table_name, engine))
    table = Table(table_name, engine._metadata)
    col = Column('id', Integer, primary_key=True)
    table.append_column(col)
    table.create(engine)
    TABLES[engine][table_name] = table
    return table
def upgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine

    ip_addresses = Table('ip_addresses', meta, autoload=True)
    allocated = Column('allocated', Boolean(), default=False)
    ip_addresses.create_column(allocated)
    migrate_engine.execute(ip_addresses.update().values(allocated=True))
예제 #18
0
파일: entities.py 프로젝트: tomster/stasis
 def add_aspect(self, name, *columns):
     table = Table(
         name, self.metadata,
         Column('id', types.Integer, ForeignKey("entity.id")),
         *columns)
     table.create()
     self.aspects[name] = [x.name for x in columns]
     self.aspect_names = set(self.aspects.iterkeys())
예제 #19
0
파일: db.py 프로젝트: vuuvv/vweb.old
		def wrap(fn):
			table_definition = TableDefinition()
			fn(table_definition)
			table = Table(name, self.meta)
			for attrname in table_definition.fields.keys():
				args, kw = table_definition.fields[attrname]
				table.append_column(Column(attrname, *args, **kw))
			table.create()
예제 #20
0
 def test_reflect_select(self, engine, connection):
     """reflecttable should be able to fill in a table from the name"""
     one_row_complex = Table('one_row_complex', MetaData(bind=engine), autoload=True)
     self.assertEqual(len(one_row_complex.c), 15)
     self.assertIsInstance(one_row_complex.c.string, Column)
     rows = one_row_complex.select().execute().fetchall()
     self.assertEqual(len(rows), 1)
     self.assertEqual(list(rows[0]), _ONE_ROW_COMPLEX_CONTENTS)
예제 #21
0
 def _create_table(self, table_name):
     table_name = validate_name(table_name)
     log.debug("Creating table: %s on %r" % (table_name, self.engine))
     table = Table(table_name, self.meta)
     col = Column(ID_COLUMN, Integer, primary_key=True)
     table.append_column(col)
     table.create(self.engine)
     return table
예제 #22
0
파일: schema.py 프로젝트: jljs/sqlaload
def create_table(engine, table_name):
    with lock:
        log.debug("Creating table: %s on %r" % (table_name, engine))
        table = Table(table_name, engine._metadata)
        col = Column("id", Integer, primary_key=True)
        table.append_column(col)
        table.create(engine)
        engine._tables[table_name] = table
        return table
예제 #23
0
 def __init__(self, registry=None):
     self.registry = registry
     self.engine = create_engine('sqlite://')
     self.metadata = MetaData(self.engine)
     entity = Table(
         'entity', self.metadata,
         Column('id', types.Integer, primary_key=True))
     entity.create()
     self.aspects = propdict()
예제 #24
0
 def test_reserved_words(self, engine, connection):
     """Hive uses backticks"""
     # Use keywords for the table/column name
     fake_table = Table('select', MetaData(bind=engine), Column('map', sqlalchemy.types.String))
     query = str(fake_table.select(fake_table.c.map == 'a'))
     self.assertIn('`select`', query)
     self.assertIn('`map`', query)
     self.assertNotIn('"select"', query)
     self.assertNotIn('"map"', query)
예제 #25
0
def copy_table(table):
    """
     渡されたテーブルをコピーします
    """
    ret_table = Table(table.name, MetaData())
    for c in table.columns:
        ret_table.append_column(copy_column(c))

    return ret_table
예제 #26
0
 def test_reserved_words(self, engine, connection):
     """Presto uses double quotes, not backticks"""
     # Use keywords for the table/column name
     fake_table = Table('select', MetaData(bind=engine), Column('current_timestamp', String))
     query = str(fake_table.select(fake_table.c.current_timestamp == 'a'))
     self.assertIn('"select"', query)
     self.assertIn('"current_timestamp"', query)
     self.assertNotIn('`select`', query)
     self.assertNotIn('`current_timestamp`', query)
예제 #27
0
파일: mapping.py 프로젝트: rlugojr/aleph
 def query(self):
     query = self.config.get('query')
     if not query:
         table_name = self.config.get('table')
         table = Table(table_name, self.meta, autoload=True)
         query = table.select()
     else:
         query = sql_text(query)
     log.info("Query: %s", query)
     return query
예제 #28
0
파일: meta.py 프로젝트: pwdtnx/ouroboros
    def _rename_columns_table(table):
        new_table = Table(table.name, MetaData())

        for c in table.columns:
            renamed_column = c.copy()
            if c.name in src_columns:
                renamed_column.name = maps[c.name]
            new_table.append_column(renamed_column)

        return new_table
def downgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine

    share_snapshots = Table('share_snapshots', meta, autoload=True)
    try:
        share_snapshots.drop()
    except Exception:
        LOG.error(_("share_snapshots table not dropped"))
        raise
예제 #30
0
 def add_aspect(self, name, *columns):
     table = Table(
         name, self.metadata,
         Column('id', types.Integer, ForeignKey("entity.id")),
         *columns)
     table.create()
     self.aspects[name] = propdict(
         ((x.name, x) for x in columns),
         _name=name,
         _table=table)
     self._aspect_names = set(self.aspects.keys())
예제 #31
0
from sqlalchemy.dialects.postgresql import UUID

from aiida.utils import timezone
from aiida.backends.sqlalchemy.models.base import Base
from aiida.backends.sqlalchemy.models.utils import uuid_func

__copyright__ = u"Copyright (c), This file is part of the AiiDA platform. For further information please visit http://www.aiida.net/. All rights reserved."
__license__ = "MIT license, see LICENSE.txt file."
__authors__ = "The AiiDA team."
__version__ = "0.7.1"

table_groups_nodes = Table(
    'db_dbgroup_dbnodes', Base.metadata, Column('id',
                                                Integer,
                                                primary_key=True),
    Column('dbnode_id', Integer,
           ForeignKey('db_dbnode.id', deferrable=True, initially="DEFERRED")),
    Column('dbgroup_id', Integer,
           ForeignKey('db_dbgroup.id', deferrable=True, initially="DEFERRED")))


class DbGroup(Base):
    __tablename__ = "db_dbgroup"

    id = Column(Integer, primary_key=True)

    uuid = Column(UUID(as_uuid=True), default=uuid_func)
    name = Column(String(255), index=True)

    type = Column(String(255), default="", index=True)
예제 #32
0
def _gen_array_simple(cls, props, k, child_cust, p):
    table_name = cls.Attributes.table_name
    metadata = cls.Attributes.sqla_metadata

    # get left (fk) column info
    _gen_col = _get_col_o2m(cls, p.left)
    col_info = next(_gen_col)  # gets the column name
    p.left, child_left_col_type = col_info[
        0]  # FIXME: Add support for multi-column primary keys.
    child_left_col_name = p.left

    # get right(data) column info
    child_right_col_type = get_sqlalchemy_type(child_cust)
    child_right_col_name = p.right  # this is the data column
    if child_right_col_name is None:
        child_right_col_name = k

    # get table name
    child_table_name = child_cust.Attributes.table_name
    if child_table_name is None:
        child_table_name = '_'.join([table_name, k])

    if child_table_name in metadata.tables:
        child_t = metadata.tables[child_table_name]

        # if we have the table, we sure have the right column (data column)
        assert child_right_col_type.__class__ is \
               child_t.c[child_right_col_name].type.__class__, "%s.%s: %r != %r" % \
                   (cls, child_right_col_name, child_right_col_type.__class__,
                               child_t.c[child_right_col_name].type.__class__)

        # Table exists but our own foreign key doesn't.
        if child_left_col_name in child_t.c:
            assert child_left_col_type.__class__ is \
                child_t.c[child_left_col_name].type.__class__, "%r != %r" % \
                   (child_left_col_type.__class__,
                               child_t.c[child_left_col_name].type.__class__)
        else:
            child_left_col = next(_gen_col)
            _sp_attrs_to_sqla_constraints(cls, child_cust, col=child_left_col)
            child_t.append_column(child_left_col)

    else:
        # table does not exist, generate table
        child_right_col = Column(child_right_col_name, child_right_col_type)
        _sp_attrs_to_sqla_constraints(cls, child_cust, col=child_right_col)

        child_left_col = next(_gen_col)
        _sp_attrs_to_sqla_constraints(cls, child_cust, col=child_left_col)

        child_t = Table(
            child_table_name,
            metadata,
            Column('id', sqlalchemy.Integer, primary_key=True),
            child_left_col,
            child_right_col,
        )
        _gen_index_info(child_t, child_right_col, child_right_col_name,
                        child_cust)

    # generate temporary class for association proxy
    cls_name = ''.join(x.capitalize() or '_'
                       for x in child_table_name.split('_'))

    # generates camelcase class name.

    def _i(self, *args):
        setattr(self, child_right_col_name, args[0])

    cls_ = type("_" + cls_name, (object, ), {'__init__': _i})
    own_mapper(cls_)(cls_, child_t)
    props["_" + k] = relationship(cls_)

    # generate association proxy
    setattr(cls, k, association_proxy("_" + k, child_right_col_name))
예제 #33
0
    def get_user(self):
        if self.user_id is None:
            return None
        try:
            user = Users.query().filter(Users.id == self.user_id).one()
        except SQLAlchemyError as e:
            logger.exception(e)
            raise DoesNotExist("Key does not exist")
        return user


register_table(AuthKeys)

permissions_to_groups = Table(
    'permissions_to_groups', metadata,
    Column('permissions_id', Integer, ForeignKey('permissions.id')),
    Column('permission_groups_id', Integer,
           ForeignKey('permission_groups.id')))

permissions_to_users = Table(
    'permissions_to_users', metadata,
    Column('permissions_id', Integer, ForeignKey('permissions.id')),
    Column('users_id', Integer, ForeignKey('users.id')))

users_to_groups = Table(
    'users_to_groups', metadata,
    Column('users_id', Integer, ForeignKey('users.id')),
    Column('permission_groups_id', Integer,
           ForeignKey('permission_groups.id')))

예제 #34
0
    def load(self, schema: str, table: str, load_plan: RecordsLoadPlan,
             directory: RecordsDirectory) -> Optional[int]:
        if not isinstance(load_plan.records_format, DelimitedRecordsFormat):
            raise NotImplementedError(
                'Teach me how to load '
                f'{load_plan.records_format.format_type} format')

        if directory.scheme != 's3':
            with self.temporary_s3_directory_loc() as temp_s3_loc:
                s3_directory = directory.copy_to(temp_s3_loc)
                return self.load(schema=schema,
                                 table=table,
                                 load_plan=load_plan,
                                 directory=s3_directory)

        to = Table(table, self.meta, schema=schema)  # no autoload
        unhandled_hints = set(load_plan.records_format.hints.keys())
        processing_instructions = load_plan.processing_instructions
        validated_hints = load_plan.records_format.\
            validate(fail_if_cant_handle_hint=processing_instructions.fail_if_cant_handle_hint)
        redshift_options = redshift_copy_options(
            unhandled_hints, validated_hints,
            processing_instructions.fail_if_cant_handle_hint,
            processing_instructions.fail_if_row_invalid,
            processing_instructions.max_failure_rows)
        logger.info(f"Copying to Redshift with options: {redshift_options}")
        complain_on_unhandled_hints(
            processing_instructions.fail_if_dont_understand, unhandled_hints,
            load_plan.records_format.hints)
        # http://sqlalchemy-redshift.readthedocs.io/en/latest/commands.html
        loc = directory.loc
        if not callable(getattr(loc, 'aws_creds', None)):
            raise NotImplementedError(
                'Redshift can only load from an S3 bucket')
        else:
            aws_creds: Optional[Credentials] = directory.loc.aws_creds(
            )  # type: ignore
            if aws_creds is None:
                raise CredsDoNotSupportS3Import(
                    'Please provide AWS credentials '
                    '(run "aws configure")')
            #
            # Upon error, an exception is raised with the full SQL -
            # including the AWS creds inside.  Let's register those
            # with the logger so they get redacted.
            #
            register_secret(aws_creds.token)
            register_secret(aws_creds.secret_key)

            copy = CopyCommand(
                to=to,
                data_location=directory.loc.url + '_manifest',
                access_key_id=aws_creds.access_key,
                secret_access_key=aws_creds.secret_key,
                session_token=aws_creds.token,
                manifest=True,
                region=directory.loc.region,  # type: ignore
                empty_as_null=True,
                **redshift_options)  # type: ignore
            logger.info(f"Starting Redshift COPY from {directory}...")
            redshift_pid: int = self.db.execute(
                "SELECT pg_backend_pid();").scalar()
            try:
                self.db.execute(copy)
            except sqlalchemy.exc.InternalError:
                # Upon a load erorr, we receive:
                #
                #  sqlalchemy.exc.InternalError:
                #  (psycopg2.errors.InternalError_) Load into table 'tablename'
                #    failed.  Check 'stl_load_errors' system table for details.
                logger.warning(
                    "Caught load error - "
                    "for details, run this query: "
                    f"SELECT * FROM stl_load_errors WHERE session={redshift_pid}"
                )
                raise
            logger.info("Redshift COPY complete.")
            return None  # redshift doesn't give reliable info on load results
예제 #35
0
def gen_sqla_info(cls, cls_bases=()):
    """Return SQLAlchemy table object corresponding to the passed Spyne object.
    Also maps given class to the returned table.
    """

    metadata = cls.Attributes.sqla_metadata
    table_name = cls.Attributes.table_name

    inc = [] # include_properties

    # check inheritance
    inheritance = None
    base_class = getattr(cls, '__extends__', None)
    if base_class is None:
        for b in cls_bases:
            if getattr(b, '_type_info', None) is not None and b.__mixin__:
                base_class = b

    if base_class is not None:
        base_table_name = base_class.Attributes.table_name
        if base_table_name is not None:
            if base_table_name == table_name:
                inheritance = _SINGLE
            else:
                inheritance = _JOINED
                raise NotImplementedError("Joined table inheritance is not yet "
                                          "implemented.")
            inc_prop = base_class.Attributes.sqla_mapper.include_properties
            if inc_prop is not None:
                inc.extend(inc_prop)

            exc_prop = base_class.Attributes.sqla_mapper.exclude_properties
            if exc_prop is not None:
                inc = [_p for _p in inc if not _p in exc_prop]

    # check whether the object already has a table
    table = None
    if table_name in metadata.tables:
        table = metadata.tables[table_name]
    else:
        # We need FakeTable because table_args can contain all sorts of stuff
        # that can require a fully-constructed table, and we don't have that
        # information here yet.
        table = _FakeTable()

    # check whether the base classes are already mapped
    base_mapper = None
    if base_class is not None:
        base_mapper = base_class.Attributes.sqla_mapper

    if base_mapper is None:
        for b in cls_bases:
            bm = _mapper_registry.get(b, None)
            if bm is not None:
                assert base_mapper is None, "There can be only one base mapper."
                base_mapper = bm
                inheritance = _SINGLE

    props = {}

    # For each Spyne field
    for k, v in cls._type_info.items():
        if v.Attributes.exc_table:
            continue

        col_args, col_kwargs = sanitize_args(v.Attributes.sqla_column_args)
        _sp_attrs_to_sqla_constraints(cls, v, col_kwargs)

        t = get_sqlalchemy_type(v)

        if t is None:
            p = getattr(v.Attributes, 'store_as', None)
            if p is not None and issubclass(v, Array) and isinstance(p, c_table):
                child_cust, = v._type_info.values()
                if child_cust.__orig__ is not None:
                    child = child_cust.__orig__
                else:
                    child = child_cust

                if p.multi != False: # many to many
                    col_own, col_child = _get_cols_m2m(cls, k, v, p.left, p.right)

                    p.left = col_own.key
                    p.right = col_child.key

                    if p.multi == True:
                        rel_table_name = '_'.join([cls.Attributes.table_name, k])
                    else:
                        rel_table_name = p.multi

                    # FIXME: Handle the case where the table already exists.
                    rel_t = Table(rel_table_name, metadata, *(col_own, col_child))

                    props[k] = relationship(child, secondary=rel_t, backref=p.backref)

                else: # one to many
                    assert p.left is None, "'left' is ignored in one-to-many " \
                                            "relationships. You probebly meant " \
                                            "to use 'right'."

                    child_t = child.__table__
                    _gen_col = _get_col_o2m(cls, p.right)

                    col_info = _gen_col.next() # gets the column name
                    p.right, col_type = col_info[0] # FIXME: Add support for multi-column primary keys.

                    if p.right in child_t.c:
                        # FIXME: This branch MUST be tested.
                        assert col_type == child_t.c[p.right].type

                        # if the column is there, the decision about whether
                        # it should be in child's mapper should also have been
                        # made.
                        #
                        # so, not adding the child column to to child mapper
                        # here.

                    else:
                        col = _gen_col.next()

                        _sp_attrs_to_sqla_constraints(cls, child_cust, col=col)

                        child_t.append_column(col)
                        child.__mapper__.add_property(col.name, col)

                    props[k] = relationship(child)

            elif p is not None and issubclass(v, ComplexModelBase):
                # v has the Attribute values we need whereas real_v is what the
                # user instantiates (thus what sqlalchemy needs)
                if v.__orig__ is None: # vanilla class
                    real_v = v
                else: # customized class
                    real_v = v.__orig__

                if isinstance(p, c_table):
                    assert not getattr(p, 'multi', False), (
                                        'Storing a single element-type using a '
                                        'relation table is pointless.')

                    assert p.right is None, "'right' is ignored in a one-to-one " \
                                            "relationship"

                    col = _get_col_o2o(cls, k, v, p.left)
                    rel = relationship(real_v, uselist=False)

                    p.left = col.key
                    props[k] = rel

                elif isinstance(p, c_xml):
                    if k in table.c:
                        col = table.c[k]
                    else:
                        col = Column(k, PGObjectXml(v, p.root_tag, p.no_ns),
                                                        *col_args, **col_kwargs)

                elif isinstance(p, c_json):
                    if k in table.c:
                        col = table.c[k]
                    else:
                        col = Column(k, PGObjectJson(v, p.skip_depth),
                                                        *col_args, **col_kwargs)

                elif isinstance(p, c_msgpack):
                    raise NotImplementedError()

                else:
                    raise ValueError(p)

                props[col.name] = col
                if not k in table.c:
                    table.append_column(col)

            else:
                logger.debug("Skipping %s.%s.%s: %r, store_as: %r" % (
                                                cls.get_namespace(),
                                                cls.get_type_name(), k, v, p))

        else:
            unique = v.Attributes.unique
            index = v.Attributes.index
            if unique and not index:
                index = True

            try:
                index_name, index_method = v.Attributes.index
            except (TypeError, ValueError):
                index_name = "%s_%s%s" % (table_name, k, '_unique' if unique else '')
                index_method = v.Attributes.index

            if k in table.c:
                col = table.c[k]

            else:
                col = Column(k, t, *col_args, **col_kwargs)
                table.append_column(col)

                if index in (False, None):
                    pass
                else:
                    if index == True:
                        index_args = (index_name, col), dict(unique=unique)
                    else:
                        index_args = (index_name, col), dict(unique=unique,
                                                postgresql_using=index_method)

                    if isinstance(table, _FakeTable):
                        table.indexes.append(index_args)
                    else:
                        Index(*index_args[0], **index_args[1])

            if not v.Attributes.exc_mapper:
                props[k] = col

    if isinstance(table, _FakeTable):
        _table = table
        table_args, table_kwargs = sanitize_args(cls.Attributes.sqla_table_args)
        table = Table(table_name, metadata,
                           *(tuple(table.columns) + table_args), **table_kwargs)

        for index_args, index_kwargs in _table.indexes:
            Index(*index_args, **index_kwargs)
        del _table


    # Map the table to the object
    mapper_args, mapper_kwargs = sanitize_args(cls.Attributes.sqla_mapper_args)

    _props = mapper_kwargs.get('properties', None)
    if _props is None:
        mapper_kwargs['properties'] = props
    else:
        props.update(_props)
        mapper_kwargs['properties'] = props

    _inc = mapper_kwargs.get('include_properties', None)
    if _inc is None:
        mapper_kwargs['include_properties'] = inc + props.keys()

    po = mapper_kwargs.get('polymorphic_on', None)
    if po is not None:
        if not isinstance(po, Column):
            mapper_kwargs['polymorphic_on'] = table.c[po]
        else:
            del mapper_kwargs['polymorphic_on']

    if base_mapper is not None:
        mapper_kwargs['inherits'] = base_mapper

    if inheritance is not _SINGLE:
        mapper_args = (table,) + mapper_args

    cls_mapper = mapper(cls, *mapper_args, **mapper_kwargs)

    cls.__tablename__ = cls.Attributes.table_name
    cls.Attributes.sqla_mapper = cls.__mapper__ = cls_mapper
    cls.Attributes.sqla_table = cls.__table__ = table

    return table
예제 #36
0
def _gen_array_simple(cls, props, subname, arrser_cust, storage):
    """Generate an array of simple objects.

    :param cls: The class that owns this field
    :param props: SQLAlchemy Mapper properties
    :param subname: Field name
    :param arrser_cust: Array serializer, ie the class itself inside the Array
                        object
    :param storage: The storage configuration object passed to the store_as
    """

    table_name = cls.Attributes.table_name
    metadata = cls.Attributes.sqla_metadata

    # get left (fk) column info
    _gen_col = _get_col_o2m(cls,
                            storage.left,
                            ondelete=storage.fk_left_ondelete,
                            onupdate=storage.fk_left_onupdate,
                            deferrable=storage.fk_left_deferrable,
                            initially=storage.fk_left_initially)

    col_info = next(_gen_col)  # gets the column name
    # FIXME: Add support for multi-column primary keys.
    storage.left, child_left_col_type = col_info[0]
    child_left_col_name = storage.left

    # get right(data) column info
    child_right_col_type = _get_sqlalchemy_type(arrser_cust)
    child_right_col_name = storage.right  # this is the data column
    if child_right_col_name is None:
        child_right_col_name = subname

    # get table name
    child_table_name = arrser_cust.Attributes.table_name
    if child_table_name is None:
        child_table_name = '_'.join([table_name, subname])

    if child_table_name in metadata.tables:
        child_t = metadata.tables[child_table_name]

        # if we have the table, make sure have the right column (data column)
        assert child_right_col_type.__class__ is \
               child_t.c[child_right_col_name].type.__class__, "%s.%s: %r != %r" % \
                   (cls, child_right_col_name, child_right_col_type.__class__,
                               child_t.c[child_right_col_name].type.__class__)

        if child_left_col_name in child_t.c:
            assert child_left_col_type is \
                child_t.c[child_left_col_name].type.__class__, "%r != %r" % \
                   (child_left_col_type,
                               child_t.c[child_left_col_name].type.__class__)
        else:
            # Table exists but our own foreign key doesn't.
            child_left_col = next(_gen_col)
            _sp_attrs_to_sqla_constraints(cls, arrser_cust, col=child_left_col)
            child_t.append_column(child_left_col)

    else:
        # table does not exist, generate table
        child_right_col = Column(child_right_col_name, child_right_col_type)
        _sp_attrs_to_sqla_constraints(cls, arrser_cust, col=child_right_col)

        child_left_col = next(_gen_col)
        _sp_attrs_to_sqla_constraints(cls, arrser_cust, col=child_left_col)

        child_t = Table(
            child_table_name,
            metadata,
            Column('id', sqlalchemy.Integer, primary_key=True),
            child_left_col,
            child_right_col,
        )
        _gen_index_info(child_t, child_right_col, child_right_col_name,
                        arrser_cust)

    # generate temporary class for association proxy
    cls_name = ''.join(x.capitalize() or '_'
                       for x in child_table_name.split('_'))

    # generates camelcase class name.

    def _i(self, *args):
        setattr(self, child_right_col_name, args[0])

    cls_ = type("_" + cls_name, (object, ), {'__init__': _i})
    mapper(cls_, child_t)
    props["_" + subname] = relationship(cls_)

    # generate association proxy
    setattr(cls, subname, association_proxy("_" + subname,
                                            child_right_col_name))
예제 #37
0
def create_translation_table(_table_name,
                             foreign_class,
                             relation_name,
                             language_class,
                             relation_lazy='select',
                             **kwargs):
    """Creates a table that represents some kind of data attached to the given
    foreign class, but translated across several languages.  Returns the new
    table's mapped class.  It won't be declarative, but it will have a
    `__table__` attribute so you can retrieve the Table object.

    `foreign_class` must have a `__singlename__`, currently only used to create
    the name of the foreign key column.

    Also supports the notion of a default language, which is attached to the
    session.  This is English by default, for historical and practical reasons.

    Usage looks like this:

        class Foo(Base): ...

        create_translation_table('foo_bars', Foo, 'bars',
            name = Column(...),
        )

        # Now you can do the following:
        foo.name
        foo.name_map['en']
        foo.foo_bars['en']

        foo.name_map['en'] = "new name"
        del foo.name_map['en']

        q.options(joinedload(Foo.bars_local))
        q.options(joinedload(Foo.bars))

    The following properties are added to the passed class:

    - `(relation_name)`, a relation to the new table.  It uses a dict-based
      collection class, where the keys are language identifiers and the values
      are rows in the created tables.
    - `(relation_name)_local`, a relation to the row in the new table that
      matches the current default language.
    - `(relation_name)_table`, the class created by this function.

    Note that these are distinct relations.  Even though the former necessarily
    includes the latter, SQLAlchemy doesn't treat them as linked; loading one
    will not load the other.  Modifying both within the same transaction has
    undefined behavior.

    For each column provided, the following additional attributes are added to
    Foo:

    - `(column)_map`, an association proxy onto `foo_bars`.
    - `(column)`, an association proxy onto `foo_bars_local`.

    Pardon the naming disparity, but the grammar suffers otherwise.

    Modifying these directly is not likely to be a good idea.

    For Markdown-formatted columns, `(column)_map` and `(column)` will give
    Markdown objects.
    """
    # n.b.: language_class only exists for the sake of tests, which sometimes
    # want to create tables entirely separate from the pokedex metadata

    foreign_key_name = foreign_class.__singlename__ + '_id'

    Translations = type(
        _table_name, (object, ), {
            '_language_identifier':
            association_proxy('local_language', 'identifier'),
            'relation_name':
            relation_name,
            '__tablename__':
            _table_name,
        })

    # Create the table object
    table = Table(
        _table_name,
        foreign_class.__table__.metadata,
        Column(foreign_key_name,
               Integer,
               ForeignKey(foreign_class.id),
               primary_key=True,
               nullable=False,
               doc=u"ID of the %s these texts relate to" %
               foreign_class.__singlename__),
        Column('local_language_id',
               Integer,
               ForeignKey(language_class.id),
               primary_key=True,
               nullable=False,
               doc=u"Language these texts are in"),
    )
    Translations.__table__ = table

    # Add ye columns
    # Column objects have a _creation_order attribute in ascending order; use
    # this to get the (unordered) kwargs sorted correctly
    kwitems = kwargs.items()
    kwitems.sort(key=lambda kv: kv[1]._creation_order)
    for name, column in kwitems:
        column.name = name
        table.append_column(column)

    # Construct ye mapper
    mapper(Translations,
           table,
           properties={
               'foreign_id':
               synonym(foreign_key_name),
               'local_language':
               relationship(
                   language_class,
                   primaryjoin=table.c.local_language_id == language_class.id,
                   innerjoin=True),
           })

    # Add full-table relations to the original class
    # Foo.bars_table
    setattr(foreign_class, relation_name + '_table', Translations)
    # Foo.bars
    setattr(
        foreign_class, relation_name,
        relationship(
            Translations,
            primaryjoin=foreign_class.id == Translations.foreign_id,
            collection_class=attribute_mapped_collection('local_language'),
        ))
    # Foo.bars_local
    # This is a bit clever; it uses bindparam() to make the join clause
    # modifiable on the fly.  db sessions know the current language and
    # populate the bindparam.
    # The 'dummy' value is to trick SQLA; without it, SQLA thinks this
    # bindparam is just its own auto-generated clause and everything gets
    # f****d up.
    local_relation_name = relation_name + '_local'
    setattr(
        foreign_class, local_relation_name,
        relationship(
            Translations,
            primaryjoin=and_(
                Translations.foreign_id == foreign_class.id,
                Translations.local_language_id == bindparam(
                    '_default_language_id',
                    value='dummy',
                    type_=Integer,
                    required=True),
            ),
            foreign_keys=[
                Translations.foreign_id, Translations.local_language_id
            ],
            uselist=False,
            lazy=relation_lazy,
        ))

    # Add per-column proxies to the original class
    for name, column in kwitems:
        getset_factory = None
        string_getter = column.info.get('string_getter')
        if string_getter:
            getset_factory = _getset_factory_factory(column.name,
                                                     string_getter)

        # Class.(column) -- accessor for the default language's value
        setattr(
            foreign_class, name,
            LocalAssociationProxy(local_relation_name,
                                  name,
                                  getset_factory=getset_factory))

        # Class.(column)_map -- accessor for the language dict
        # Need a custom creator since Translations doesn't have an init, and
        # these are passed as *args anyway
        def creator(language, value):
            row = Translations()
            row.local_language = language
            setattr(row, name, value)
            return row

        setattr(
            foreign_class, name + '_map',
            association_proxy(relation_name,
                              name,
                              creator=creator,
                              getset_factory=getset_factory))

    # Add to the list of translation classes
    foreign_class.translation_classes.append(Translations)

    # Done
    return Translations
예제 #38
0
    name = Column(String(255))

    symbol = Column(String(16))
    title = Column(String(255))
    rank = Column(Integer, default=0)
    display = Column(Boolean)

    users = relationship("User", secondary='usergroup')

    @property
    def id(self):
        return self.gid


usergroup = Table('usergroup', Base.metadata,
                  Column('uid', Integer, ForeignKey('users.uid')),
                  Column('gid', Integer, ForeignKey('groups.gid')))


class Category(Base):
    __tablename__ = 'categories'

    id = Column(Integer, primary_key=True, nullable=False)
    name = Column(String(255))
    position = Column(Integer)

    group_id = Column(Integer, ForeignKey('groups.gid'))
    group = relationship("Group", backref='categories')

    @property
    def url(self):
예제 #39
0
 def test_reflect_with_schema(self, engine, connection):
     dummy = Table('dummy_table', MetaData(bind=engine), schema='pyhive_test_database',
                   autoload=True)
     self.assertEqual(len(dummy.c), 1)
     self.assertIsNotNone(dummy.c.a)
예제 #40
0
 def test_char_length(self, engine, connection):
     one_row_complex = Table('one_row_complex', MetaData(bind=engine), autoload=True)
     result = sqlalchemy.select([
         sqlalchemy.func.char_length(one_row_complex.c.string)
     ]).execute().scalar()
     self.assertEqual(result, len('a string'))
예제 #41
0
class Pagamento(Base):
    __table__ = Table(os.getenv('PAGAMENTO_TABLE'),
                      metadata_siai,
                      autoload=True)
예제 #42
0
class Orgao(Base):
    __table__ = Table(os.getenv('ORGAO_TABLE'), metadata_siai, autoload=True)
    __mapper_args__ = {'primary_key': [__table__.c.IdOrgao]}
예제 #43
0
def _gen_array_m2m(cls, props, subname, arrser, storage):
    """Generates a relational many-to-many array.

    :param cls: The class that owns the field
    :param props: SQLAlchemy Mapper properties
    :param subname: Field name
    :param arrser: Array serializer, ie the __orig__ of the class inside the
                   Array object
    :param storage: The storage configuration object passed to the store_as
    attribute.
    """

    metadata = cls.Attributes.sqla_metadata

    col_own, col_child = _get_cols_m2m(
        cls, subname, arrser, storage.left, storage.right,
        storage.fk_left_deferrable, storage.fk_left_initially,
        storage.fk_right_deferrable, storage.fk_right_initially,
        storage.fk_left_ondelete, storage.fk_left_onupdate,
        storage.fk_right_ondelete, storage.fk_right_onupdate)

    storage.left = col_own.key
    storage.right = col_child.key

    # noinspection PySimplifyBooleanCheck because literal True means
    # "generate table name automatically" here
    if storage.multi is True:
        rel_table_name = '_'.join([cls.Attributes.table_name, subname])
    else:
        rel_table_name = storage.multi

    if rel_table_name in metadata.tables:
        rel_t = metadata.tables[rel_table_name]

        assert col_own.type.__class__ == rel_t.c[col_own.key].type.__class__
        assert col_child.type.__class__ == rel_t.c[
            col_child.key].type.__class__

    else:
        rel_t = Table(rel_table_name, metadata, *(col_own, col_child))

    own_t = cls.Attributes.sqla_table
    if storage.explicit_join:
        # Specify primaryjoin and secondaryjoin when requested.
        # There are special cases when sqlalchemy can't figure it out by itself.
        # this is where we help it when we can.
        # e.g.: http://sqlalchemy.readthedocs.org/en/rel_1_0/orm/join_conditions.html#self-referential-many-to-many-relationship

        assert own_t is not None and len(get_pk_columns(cls)) > 0

        # FIXME: support more than one pk
        (col_pk_key, _), = get_pk_columns(cls)
        col_pk = own_t.c[col_pk_key]

        rel_kwargs = dict(
            lazy=storage.lazy,
            backref=storage.backref,
            cascade=storage.cascade,
            order_by=storage.order_by,
            secondary=rel_t,
            primaryjoin=(col_pk == rel_t.c[col_own.key]),
            secondaryjoin=(col_pk == rel_t.c[col_child.key]),
            back_populates=storage.back_populates,
        )

        if storage.single_parent is not None:
            rel_kwargs['single_parent'] = storage.single_parent

        props[subname] = relationship(arrser, **rel_kwargs)

    else:
        rel_kwargs = dict(secondary=rel_t,
                          backref=storage.backref,
                          back_populates=storage.back_populates,
                          cascade=storage.cascade,
                          lazy=storage.lazy,
                          order_by=storage.order_by)

        if storage.single_parent is not None:
            rel_kwargs['single_parent'] = storage.single_parent

        props[subname] = relationship(arrser, **rel_kwargs)
예제 #44
0
 def table(self, schema, table):
     return Table(table,
                  self.meta,
                  schema=schema,
                  autoload=True,
                  autoload_with=self.engine)
                               echo=False,
                               encoding="utf-8")
    else:
        raise ValueError("DB_INFO_DIC['type']=%s", DB_INFO_DIC['type'])
    logger.debug("数据库已连接")
    return engine


if __name__ == "__main__":
    engine = create_engine('sqlite:///:memory:', echo=True)
    # engine = get_db_engine()
    metadata = MetaData()
    #创建user表,继承metadata类
    #Engine使用Schama Type创建一个特定的结构对象
    testonly_tableobj = Table("testonly", metadata,
                              Column("id", Integer, primary_key=True),
                              Column("name", String(20)),
                              Column('bytes', VARBINARY(20)))

    # color = Table("color", metadata,
    #               Column("id", Integer, primary_key=True),
    #               Column("name", String(20)))
    metadata.create_all(engine)  #创建表结构

    conn = engine.connect()
    try:
        # 通过 metadata 动态从数据库中获取表结构,插入数据
        metadata = MetaData(engine)
        testonly_tableobj_autoload = Table('testonly', metadata, autoload=True)
        conn.execute(testonly_tableobj_autoload.insert(), {
            'id': 2,
            "name": "koka",
예제 #46
0
# version.
#
# This program is distributed in the hope that it will be useful,but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,51 Franklin
# Street, Fifth Floor, Boston, MA  02110-1301, USA

from __future__ import \
    absolute_import, division, print_function, \
    unicode_literals

from sqlalchemy.schema import Column, ForeignKey, \
    Table, UniqueConstraint

from sqlalchemy.types import Float, Integer

from ...db.meta import metadata, table_opts

example_request = Table(
    'example_request', metadata, Column('id', Integer, primary_key=True),
    Column('proposal_id',
           None,
           ForeignKey('proposal.id', onupdate='RESTRICT', ondelete='RESTRICT'),
           nullable=False), Column('instrument', Integer, nullable=False),
    Column('time', Float, nullable=False),
    UniqueConstraint('proposal_id', 'instrument'), **table_opts)
예제 #47
0
meta = MetaData()

# Get the default pool_id from the config file
default_pool_id = cfg.CONF['service:central'].default_pool_id.replace('-', '')

pool_ns_records_table = Table('pool_ns_records',
                              meta,
                              Column('id',
                                     UUID(),
                                     default=utils.generate_uuid,
                                     primary_key=True),
                              Column('created_at', DateTime()),
                              Column('updated_at', DateTime()),
                              Column('version',
                                     Integer(),
                                     default=1,
                                     nullable=False),
                              Column('pool_id', UUID(), nullable=False),
                              Column('priority', Integer(), nullable=False),
                              Column('hostname', String(255), nullable=False),
                              ForeignKeyConstraint(['pool_id'], ['pools.id'],
                                                   ondelete='CASCADE'),
                              mysql_engine='INNODB',
                              mysql_charset='utf8')


def upgrade(migrate_engine):
    meta.bind = migrate_engine

    # Load the pool_attributes_table table schema
예제 #48
0
def resource_role_class(declarative_base, user_model, resource_model, roles):
    """Create a :ref:`resource-specific role<resource-specific-roles>` Mixin
    for SQLAlchemy models. Returns the role mixin, which must then be mixed
    into a SQLAlchemy model for the role. E.g.,

    .. code-block:: python

        OrganizationRoleMixin = oso_roles.resource_role_class(
            Base, User, Organization, ["OWNER", "MEMBER", "BILLING"]
        )

        class OrganizationRole(Base, OrganizationRoleMixin):
            pass


    :param declarative_base: The SQLAlchemy declarative base model that \
    the role model and all related models are mapped to.

    :param user_model: The SQLAlchemy model representing users that the \
    resource-specific roles can be assigned to. The generated Role mixin will \
    have a many-to-many relationship with this user model. The primary key of \
    the ``user_model`` must be named ``id``. E.g.,

        .. code-block:: python

            class User(Base):
                __tablename__ = "users"

                id = Column(Integer, primary_key=True)
                email = Column(String())

    :param resource_model: The SQLAlchemy model representing resources that \
    the generated Role mixin will be scoped to. The Role mixin will \
    have a many-to-one (ForeignKey) relationship with this resource model. \
    The primary key of the ``resource_model`` must be named ``id``. E.g.,

        .. code-block:: python

            class Repository(Base):
                __tablename__ = "repositories"

                id = Column(Integer, primary_key=True)
                name = Column(String(256))

    :param roles: An order-independent list of the built-in roles for this resource-specific role type.
    :type roles: List[str]

        .. code-block:: python

            class Team(Base):
                __tablename__ = "teams"

                id = Column(Integer, primary_key=True)
                name = Column(String(256))


    """
    global ROLE_CLASSES
    ROLE_CLASSES.append({
        "user_model": user_model,
        "resource_model": resource_model,
        # @NOTE: Must name role model like this for now.
        "role_model": resource_model.__name__ + "Role",
    })

    # many-to-many relationship with users
    user_join_table = Table(
        f"{resource_model.__name__.lower()}_roles_users",
        declarative_base.metadata,
        Column(
            f"{resource_model.__name__.lower()}_role_id",
            Integer,
            ForeignKey(f"{resource_model.__name__.lower()}_roles.id"),
            primary_key=True,
        ),
        Column(
            "user_id",
            Integer,
            ForeignKey(f"{user_model.__tablename__}.id"),
            primary_key=True,
        ),
    )

    class ResourceRoleMixin:
        # TODO: enforce that classes are named with the ResourceRole convention, e.g. RepositoryRole
        choices = roles

        __tablename__ = f"{resource_model.__name__.lower()}_roles"
        id = Column(Integer, primary_key=True)
        name = Column(String())

        # many-to-many relationship with users
        @declared_attr
        def users(cls):
            return relationship(
                f"{user_model.__name__}",
                secondary=user_join_table,
                lazy="subquery",
                backref=backref(f"{resource_model.__name__.lower()}_roles",
                                lazy=True),
            )

    @declared_attr
    def resource_id(cls):
        table_name = resource_model.__tablename__
        return Column(Integer, ForeignKey(f"{table_name}.id"))

    @declared_attr
    def resource(cls):
        return relationship(resource_model.__name__,
                            backref="roles",
                            lazy=True)

    setattr(ResourceRoleMixin, f"{resource_model.__name__.lower()}_id",
            resource_id)
    setattr(ResourceRoleMixin, resource_model.__name__.lower(), resource)

    return ResourceRoleMixin
예제 #49
0
    def _import_schema(self, schema_name):
        def fkey(target):
            return ForeignKey(schema_name + '.' + target)

        def make_uuid():
            return str(uuid4())

        metadata = self.metadata[schema_name]
        tables = self.tables[schema_name]
        tables.append(
            Table("ealgis_metadata",
                  metadata,
                  Column('id', Integer, primary_key=True),
                  Column('name', String(256), nullable=False),
                  Column('family', String(256), nullable=True),
                  Column('uuid', String(36), nullable=False,
                         default=make_uuid),
                  Column('description', Text(), nullable=False),
                  Column('date_created',
                         DateTime(timezone=True),
                         default=datetime.datetime.utcnow,
                         nullable=False),
                  Column('date_published',
                         DateTime(timezone=True),
                         nullable=False),
                  schema=schema_name))
        tables.append(
            Table("dependencies",
                  metadata,
                  Column('id', Integer, primary_key=True),
                  Column('name', String(256), nullable=False),
                  Column('uuid', String(36), nullable=False),
                  schema=schema_name))
        tables.append(
            Table("table_info",
                  metadata,
                  Column('id', Integer, primary_key=True),
                  Column('name', String(256)),
                  Column('metadata_json', JSON()),
                  schema=schema_name))
        tables.append(
            Table("column_info",
                  metadata,
                  Column('id', Integer, primary_key=True),
                  Column('table_info_id',
                         Integer,
                         fkey('table_info.id'),
                         nullable=False),
                  Column('name', String(256)),
                  Column('schema_name', String(256)),
                  Column('metadata_json', JSON()),
                  schema=schema_name))
        tables.append(
            Table("geometry_source",
                  metadata,
                  Column('id', Integer, primary_key=True),
                  Column('table_info_id',
                         Integer,
                         fkey('table_info.id'),
                         nullable=False),
                  Column('gid_column', String(256)),
                  Column('geometry_type', String(256)),
                  schema=schema_name))
        tables.append(
            Table("geometry_source_projection",
                  metadata,
                  Column('id', Integer, primary_key=True),
                  Column('geometry_source_id',
                         Integer,
                         fkey('table_info.id'),
                         nullable=False),
                  Column('geometry_column', String(256)),
                  Column('srid', Integer),
                  schema=schema_name))

        tables.append(
            Table(
                "geometry_linkage",
                metadata,
                Column('id', Integer, primary_key=True),
                # in the source schema: may not be the same schema as this Table instance
                Column('geometry_source_schema_name', String, nullable=False),
                Column('geometry_source_id', Integer, nullable=False),
                # these must be in this schema
                Column('attr_table_id',
                       Integer,
                       fkey('table_info.id'),
                       nullable=False),
                Column('attr_column', String(256)),
                schema=schema_name))
        tables.append(
            Table("mailbox",
                  metadata,
                  Column('id', Integer, primary_key=True),
                  Column('from', String(256)),
                  Column('to', String(256)),
                  Column('message', JSON()),
                  schema=schema_name))
예제 #50
0
class Liquidacao(Base):
    __table__ = Table(os.getenv('LIQUIDACAO_TABLE'),
                      metadata_siai,
                      autoload=True)
예제 #51
0
                    '.dat', '.divx', '.dmf', '.dv', '.dvr-ms', '.evo', '.flc',
                    '.fli', '.flic', '.flv', '.flx', '.gvi', '.gvp', '.h264',
                    '.m1v', '.m2p', '.m2ts', '.m2v', '.m4e', '.m4v', '.mjp',
                    '.mjpeg', '.mjpg', '.mkv', '.moov', '.mov', '.movhd',
                    '.movie', '.movx', '.mp4', '.mpe', '.mpeg', '.mpg', '.mpv',
                    '.mpv2', '.mxf', '.nsv', '.nut', '.ogg', '.ogm', '.omf',
                    '.ps', '.qt', '.ram', '.rm', '.rmvb', '.swf', '.ts',
                    '.vfw', '.vid', '.video', '.viv', '.vivo', '.vob', '.vro',
                    '.wm', '.wmv', '.wmx', '.wrap', '.wvx', '.wx', '.x264',
                    '.xvid')

SUBTITLE_EXTENSIONS = ('.srt', '.sub', '.smi', '.txt', '.ssa', '.ass', '.mpl'
                       )  # Borrowed from Subliminal

association_table = Table(
    'association', Base.metadata,
    Column('sub_queue_id', Integer, ForeignKey('subtitle_queue.id')),
    Column('lang_id', Integer, ForeignKey('subtitle_language.id')))
Base.register_table(association_table)


def normalize_path(path):
    return os.path.normcase(os.path.abspath(path)) if path else None


class SubtitleLanguages(Base):
    __tablename__ = 'subtitle_language'

    id = Column(Integer, primary_key=True)
    language = Column(String, unique=True, index=True)

    def __init__(self, language):
예제 #52
0
class Empenho(Base):
    __table__ = Table(os.getenv('EMPENHO_TABLE'), metadata_siai, autoload=True)
예제 #53
0
class SqlAlchemyFdw(ForeignDataWrapper):
    """An SqlAlchemy foreign data wrapper.

    The sqlalchemy foreign data wrapper performs simple selects on a remote
    database using the sqlalchemy framework.

    Accepted options:

    db_url      --  the sqlalchemy connection string.
    schema      --  (optional) schema name to qualify table name with
    tablename   --  the table name in the remote database.

    """

    def __init__(self, fdw_options, fdw_columns):
        super(SqlAlchemyFdw, self).__init__(fdw_options, fdw_columns)
        if 'tablename' not in fdw_options:
            log_to_postgres('The tablename parameter is required', ERROR)
        self.metadata = MetaData()
        url = _parse_url_from_options(fdw_options)
        self.engine = create_engine(url)
        schema = fdw_options['schema'] if 'schema' in fdw_options else None
        tablename = fdw_options['tablename']
        sqlacols = []
        for col in fdw_columns.values():
            col_type = self._get_column_type(col.type_name)
            sqlacols.append(Column(col.column_name, col_type))
        self.table = Table(tablename, self.metadata, schema=schema,
                           *sqlacols)
        self.transaction = None
        self._connection = None
        self._row_id_column = fdw_options.get('primary_key', None)



    def _need_explicit_null_ordering(self, key):
        support = SORT_SUPPORT[self.engine.dialect.name]
        default = support['default']
        no = None
        if key.is_reversed:
            no = nullsfirst if default == 'higher' else nullslast
        else:
            no = nullslast if default == 'higher' else nullsfirst
        if key.nulls_first:
            if no != nullsfirst:
                return nullsfirst
            return None
        else:
            if no != nullslast:
                return nullslast
            return None

    def can_sort(self, sortkeys):
        if SORT_SUPPORT.get(self.engine.dialect.name) is None:
            # We have no idea about defaults
            return []
        can_order_null = SORT_SUPPORT[self.engine.dialect.name]['support']
        if (any((self._need_explicit_null_ordering(x) is not None
                 for x in sortkeys)) and not can_order_null):
            return []
        return sortkeys

    def explain(self, quals, columns, sortkeys=None, verbose=False):
        sortkeys = sortkeys or []
        statement = self._build_statement(quals, columns, sortkeys)
        return [str(statement)]

    def _build_statement(self, quals, columns, sortkeys):
        statement = select([self.table])
        clauses = []
        for qual in quals:
            operator = OPERATORS.get(qual.operator, None)
            if operator:
                clauses.append(operator(self.table.c[qual.field_name],
                                        qual.value))
            else:
                log_to_postgres('Qual not pushed to foreign db: %s' % qual,
                                WARNING)
        if clauses:
            statement = statement.where(and_(*clauses))
        if columns:
            columns = [self.table.c[col] for col in columns]
        else:
            columns = self.table.c
        statement = statement.with_only_columns(columns)
        orders = []
        for sortkey in sortkeys:
            column = self.table.c[sortkey.attname]
            if sortkey.is_reversed:
                column = column.desc()
            if sortkey.collate:
                column = column.collate('"%s"' % sortkey.collate)
            null_ordering = self._need_explicit_null_ordering(sortkey)
            if null_ordering:
                column = null_ordering(column)
            statement = statement.order_by(column)
        return statement


    def execute(self, quals, columns, sortkeys=None):
        """
        The quals are turned into an and'ed where clause.
        """
        sortkeys = sortkeys or []
        statement = self._build_statement(quals, columns, sortkeys)
        log_to_postgres(str(statement), DEBUG)
        rs = (self.connection
              .execution_options(stream_results=True)
              .execute(statement))
        # Workaround pymssql "trash old results on new query"
        # behaviour (See issue #100)
        if self.engine.driver == 'pymssql' and self.transaction is not None:
            rs = list(rs)

        for item in rs:
            yield dict(item)

    @property
    def connection(self):
        if self._connection is None:
            self._connection = self.engine.connect()
        return self._connection

    def begin(self, serializable):
        self.transaction = self.connection.begin()

    def pre_commit(self):
        if self.transaction is not None:
            self.transaction.commit()
            self.transaction = None

    def commit(self):
        # Pre-commit hook does this on 9.3
        if self.transaction is not None:
            self.transaction.commit()
            self.transaction = None

    def rollback(self):
        if self.transaction is not None:
            self.transaction.rollback()
            self.transaction = None

    @property
    def rowid_column(self):
        if self._row_id_column is None:
            log_to_postgres(
                'You need to declare a primary key option in order '
                'to use the write features')
        return self._row_id_column

    def insert(self, values):
        self.connection.execute(self.table.insert(values=values))

    def update(self, rowid, newvalues):
        self.connection.execute(
            self.table.update()
            .where(self.table.c[self._row_id_column] == rowid)
            .values(newvalues))

    def delete(self, rowid):
        self.connection.execute(
            self.table.delete()
            .where(self.table.c[self._row_id_column] == rowid))

    def _get_column_type(self, format_type):
        """Blatant ripoff from PG_Dialect.get_column_info"""
        # strip (*) from character varying(5), timestamp(5)
        # with time zone, geometry(POLYGON), etc.
        attype = re.sub(r'\(.*\)', '', format_type)

        # strip '[]' from integer[], etc.
        attype = re.sub(r'\[\]', '', attype)

        is_array = format_type.endswith('[]')
        charlen = re.search('\(([\d,]+)\)', format_type)
        if charlen:
            charlen = charlen.group(1)
        args = re.search('\((.*)\)', format_type)
        if args and args.group(1):
            args = tuple(re.split('\s*,\s*', args.group(1)))
        else:
            args = ()
        kwargs = {}

        if attype == 'numeric':
            if charlen:
                prec, scale = charlen.split(',')
                args = (int(prec), int(scale))
            else:
                args = ()
        elif attype == 'double precision':
            args = (53, )
        elif attype == 'integer':
            args = ()
        elif attype in ('timestamp with time zone',
                        'time with time zone'):
            kwargs['timezone'] = True
            if charlen:
                kwargs['precision'] = int(charlen)
            args = ()
        elif attype in ('timestamp without time zone',
                        'time without time zone', 'time'):
            kwargs['timezone'] = False
            if charlen:
                kwargs['precision'] = int(charlen)
            args = ()
        elif attype == 'bit varying':
            kwargs['varying'] = True
            if charlen:
                args = (int(charlen),)
            else:
                args = ()
        elif attype in ('interval', 'interval year to month',
                        'interval day to second'):
            if charlen:
                kwargs['precision'] = int(charlen)
            args = ()
        elif charlen:
            args = (int(charlen),)

        coltype = ischema_names.get(attype, None)
        if coltype:
            coltype = coltype(*args, **kwargs)
            if is_array:
                coltype = ARRAY(coltype)
        else:
            coltype = sqltypes.NULLTYPE
        return coltype

    @classmethod
    def import_schema(self, schema, srv_options, options,
                      restriction_type, restricts):
        """
        Reflects the remote schema.
        """
        metadata = MetaData()
        url = _parse_url_from_options(srv_options)
        engine = create_engine(url)
        dialect = PGDialect()
        if restriction_type == 'limit':
            only = restricts
        elif restriction_type == 'except':
            only = lambda t, _: t not in restricts
        else:
            only = None
        metadata.reflect(bind=engine,
                         schema=schema,
                         only=only)
        to_import = []
        for _, table in sorted(metadata.tables.items()):
            ftable = TableDefinition(table.name)
            ftable.options['schema'] = schema
            ftable.options['tablename'] = table.name
            for c in table.c:
                # Force collation to None to prevent imcompatibilities
                setattr(c.type, "collation", None)
                # If the type is specialized, call the generic
                # superclass method
                if type(c.type) in CONVERSION_MAP:
                    class_name = CONVERSION_MAP[type(c.type)]
                    old_args = c.type.__dict__
                    c.type = class_name()
                    c.type.__dict__.update(old_args)
                if c.primary_key:
                    ftable.options['primary_key'] = c.name
                ftable.columns.append(ColumnDefinition(
                    c.name,
                    type_name=c.type.compile(dialect)))
            to_import.append(ftable)
        return to_import
예제 #54
0
파일: db.py 프로젝트: bbodek/pyhcup
def process_raw_table(
        eng,
        table,
        meta_df,
        state,
        year,
        index_fields=['key', 'state', 'year'],
        #replace_sentinels=True, # deprecated option
        table_name=None):
    """
    Uses SQLAlchemy to split raw load table into columns, scrub missing
    data placeholders, and load sliced data into a new table.

    Returns tuple: (created SQLAlchemy Table object, SQL used)

    Parameters
    ==========
    eng: required
        Must be a SQLAlchemy engine object.
    
    table: required
        SQLAlchemy table object, usually created with raw_load().

    meta_df: required
        Must be a pandas DataFrame with meta data on the file in question. I.e., returned by pyhcup.meta.get().

    state: required
        Should be the two letter abbreviation for the state where the data are from, like 'NE' for Nebraska. Used to fill in the state value explicitly in the table.

    year: required
        Should be the four digit year where the data are from, like 2009. Used to fill in the year value explicitly in the table.
    
    index_fields: required (list or None)
        Will create an index on these fields.
    
    table_name: optional (default: None)
        Table name for the load. Will be generated automatically if not provided.
    """
    raw_table_name = table.name
    stmt = sqla_select_processed_from_raw(eng, table, meta_df, state, year)
    compiled = stmt.compile(bind=eng, compile_kwargs={"literal_binds": True})

    # Create the table
    if table_name is None:
        # derive a name for this staging table
        # prefer to have it match the raw table name, but with staging instead of raw
        # failing that, just tack on '_staging' to the other name
        if 'raw' in raw_table_name:
            table_name = raw_table_name.replace('raw', 'staging')
        else:
            table_name = raw_table_name + '_staging'

    sql = text("CREATE TABLE %s AS (%s)" % (table_name, unicode(compiled)))
    result = eng.execute(sql)
    t = Table(table_name, MetaData(bind=eng), autoload=True)

    # Really, the steps below may be unnecessary.
    # If this table will soon be selected into a master table with
    # constraints already in place, then this is a redundant (and
    # slow!) step to perform twice.
    # It is good to put in for now for validating that this approach
    # is not truncating fields, and as a comparison of overall
    # loading speed.

    # TODO: Failing as of 2015-06-25
    #if isinstance(index_fields, list):
    #    if len(index_fields) > 0:
    #        for col in index_fields:
    #            idx = create_index(Column(col))
    #            idx.create(eng)
    #else:
    #    raise TypeError("index_fields must either be None or a non-zero length list (got %s)." % type(index_fields))

    return t, sql
예제 #55
0
파일: db.py 프로젝트: bbodek/pyhcup
def get_cols(eng, tbl_name):
    table = Table(tbl_name, MetaData(), autoload_with=eng)
    return table.c.keys()
예제 #56
0
파일: db.py 프로젝트: bbodek/pyhcup
def drop_table(eng, tbl_name):
    table = Table(tbl_name, MetaData(), autoload_with=eng)
    if table.exists():
        table.drop()
        return True
    return False
예제 #57
0
import shortuuid
from sqlalchemy.schema import Table, Column
from sqlalchemy.sql import func, select
from sqlalchemy import Integer, String, DateTime

from coupon import db

Coupon = Table(
    'coupon', db.MetaData,
    Column('id', Integer, nullable=False, primary_key=True,
           autoincrement=True),
    Column('time', DateTime, nullable=False, default=func.now(), index=True),
    Column('uuid', String(255), nullable=False, unique=True),
    Column('name', String(255), nullable=False, default=''),
    Column('img', String(255), nullable=False),
    Column('title', String(255), nullable=False, default=''),
    Column('receive_text', String(255), nullable=True, default=None),
    Column('custom_style', String(65535), nullable=True, default=None),
    Column('view_count', Integer, nullable=False, default=0),
    Column('submit_count', Integer, nullable=False, default=0))


def add(img: str,
        name: str = '',
        title: str = '',
        receive_text: str = None,
        custom_style: str = None):
    uuid = shortuuid.uuid()
    coupon = {
        'uuid': uuid,
        'name': name,
예제 #58
0
class ArquivoXML(Base):
    __table__ = Table(os.getenv('ARQUIVOXML_TABLE'),
                      metadata_siai,
                      autoload=True)
예제 #59
0
 def test_has_table(self, engine, connection):
     self.assertTrue(Table('one_row', MetaData(bind=engine)).exists())
     self.assertFalse(Table('this_table_does_not_exist', MetaData(bind=engine)).exists())
예제 #60
0
파일: table.py 프로젝트: wwyys/untitled0
class Table(object):
    """Represents a table in a database and exposes common operations."""
    PRIMARY_DEFAULT = 'id'

    def __init__(self,
                 database,
                 table_name,
                 primary_id=None,
                 primary_type=None,
                 auto_create=False):
        """Initialise the table from database schema."""
        self.db = database
        self.name = normalize_table_name(table_name)
        self._table = None
        self._columns = None
        self._indexes = []
        self._primary_id = primary_id if primary_id is not None \
            else self.PRIMARY_DEFAULT
        self._primary_type = primary_type if primary_type is not None \
            else Types.integer
        self._auto_create = auto_create

    @property
    def exists(self):
        """Check to see if the table currently exists in the database."""
        if self._table is not None:
            return True
        return self.name in self.db

    @property
    def table(self):
        """Get a reference to the table, which may be reflected or created."""
        if self._table is None:
            self._sync_table(())
        return self._table

    @property
    def _column_keys(self):
        """Get a dictionary of all columns and their case mapping."""
        if not self.exists:
            return {}
        if self._columns is None:
            # Initialise the table if it doesn't exist
            table = self.table
            self._columns = {}
            for column in table.columns:
                name = normalize_column_name(column.name)
                key = normalize_column_key(name)
                if key in self._columns:
                    log.warning("Duplicate column: %s", name)
                self._columns[key] = name
        return self._columns

    @property
    def columns(self):
        """Get a listing of all columns that exist in the table."""
        return list(self._column_keys.values())

    def has_column(self, column):
        """Check if a column with the given name exists on this table."""
        key = normalize_column_key(normalize_column_name(column))
        return key in self._column_keys

    def _get_column_name(self, name):
        """Find the best column name with case-insensitive matching."""
        name = normalize_column_name(name)
        key = normalize_column_key(name)
        return self._column_keys.get(key, name)

    def insert(self, row, ensure=None, types=None):
        """Add a ``row`` dict by inserting it into the table.

        If ``ensure`` is set, any of the keys of the row are not
        table columns, they will be created automatically.

        During column creation, ``types`` will be checked for a key
        matching the name of a column to be created, and the given
        SQLAlchemy column type will be used. Otherwise, the type is
        guessed from the row value, defaulting to a simple unicode
        field.
        ::

            data = dict(title='I am a banana!')
            table.insert(data)

        Returns the inserted row's primary key.
        """
        row = self._sync_columns(row, ensure, types=types)
        res = self.db.executable.execute(self.table.insert(row))
        if len(res.inserted_primary_key) > 0:
            return res.inserted_primary_key[0]
        return True

    def insert_ignore(self, row, keys, ensure=None, types=None):
        """Add a ``row`` dict into the table if the row does not exist.

        If rows with matching ``keys`` exist no change is made.

        Setting ``ensure`` results in automatically creating missing columns,
        i.e., keys of the row are not table columns.

        During column creation, ``types`` will be checked for a key
        matching the name of a column to be created, and the given
        SQLAlchemy column type will be used. Otherwise, the type is
        guessed from the row value, defaulting to a simple unicode
        field.
        ::

            data = dict(id=10, title='I am a banana!')
            table.insert_ignore(data, ['id'])
        """
        row = self._sync_columns(row, ensure, types=types)
        if self._check_ensure(ensure):
            self.create_index(keys)
        args, _ = self._keys_to_args(row, keys)
        if self.count(**args) == 0:
            return self.insert(row, ensure=False)
        return False

    def insert_many(self, rows, chunk_size=1000, ensure=None, types=None):
        """Add many rows at a time.

        This is significantly faster than adding them one by one. Per default
        the rows are processed in chunks of 1000 per commit, unless you specify
        a different ``chunk_size``.

        See :py:meth:`insert() <dataset.Table.insert>` for details on
        the other parameters.
        ::

            rows = [dict(name='Dolly')] * 10000
            table.insert_many(rows)
        """
        # Sync table before inputting rows.
        sync_row = {}
        for row in rows:
            # Only get non-existing columns.
            sync_keys = list(sync_row.keys())
            for key in [k for k in row.keys() if k not in sync_keys]:
                # Get a sample of the new column(s) from the row.
                sync_row[key] = row[key]
        self._sync_columns(sync_row, ensure, types=types)

        # Get columns name list to be used for padding later.
        columns = sync_row.keys()

        chunk = []
        for index, row in enumerate(rows):
            chunk.append(row)

            # Insert when chunk_size is fulfilled or this is the last row
            if len(chunk) == chunk_size or index == len(rows) - 1:
                chunk = pad_chunk_columns(chunk, columns)
                self.table.insert().execute(chunk)
                chunk = []

    def update(self, row, keys, ensure=None, types=None, return_count=False):
        """Update a row in the table.

        The update is managed via the set of column names stated in ``keys``:
        they will be used as filters for the data to be updated, using the
        values in ``row``.
        ::

            # update all entries with id matching 10, setting their title
            # columns
            data = dict(id=10, title='I am a banana!')
            table.update(data, ['id'])

        If keys in ``row`` update columns not present in the table, they will
        be created based on the settings of ``ensure`` and ``types``, matching
        the behavior of :py:meth:`insert() <dataset.Table.insert>`.
        """
        row = self._sync_columns(row, ensure, types=types)
        args, row = self._keys_to_args(row, keys)
        clause = self._args_to_clause(args)
        if not len(row):
            return self.count(clause)
        stmt = self.table.update(whereclause=clause, values=row)
        rp = self.db.executable.execute(stmt)
        if rp.supports_sane_rowcount():
            return rp.rowcount
        if return_count:
            return self.count(clause)

    def update_many(self,
                    rows,
                    keys,
                    chunk_size=1000,
                    ensure=None,
                    types=None):
        """Update many rows in the table at a time.

        This is significantly faster than updating them one by one. Per default
        the rows are processed in chunks of 1000 per commit, unless you specify
        a different ``chunk_size``.

        See :py:meth:`update() <dataset.Table.update>` for details on
        the other parameters.
        """
        # Convert keys to a list if not a list or tuple.
        keys = keys if type(keys) in (list, tuple) else [keys]

        chunk = []
        columns = []
        for index, row in enumerate(rows):
            chunk.append(row)
            for col in row.keys():
                if col not in columns:
                    columns.append(col)

            # bindparam requires names to not conflict (cannot be "id" for id)
            for key in keys:
                row['_%s' % key] = row[key]

            # Update when chunk_size is fulfilled or this is the last row
            if len(chunk) == chunk_size or index == len(rows) - 1:
                cl = [self.table.c[k] == bindparam('_%s' % k) for k in keys]
                stmt = self.table.update(whereclause=and_(*cl),
                                         values={
                                             col: bindparam(col,
                                                            required=False)
                                             for col in columns
                                         })
                self.db.executable.execute(stmt, chunk)
                chunk = []

    def upsert(self, row, keys, ensure=None, types=None):
        """An UPSERT is a smart combination of insert and update.

        If rows with matching ``keys`` exist they will be updated, otherwise a
        new row is inserted in the table.
        ::

            data = dict(id=10, title='I am a banana!')
            table.upsert(data, ['id'])
        """
        row = self._sync_columns(row, ensure, types=types)
        if self._check_ensure(ensure):
            self.create_index(keys)
        row_count = self.update(row, keys, ensure=False, return_count=True)
        if row_count == 0:
            return self.insert(row, ensure=False)
        return True

    def upsert_many(self,
                    rows,
                    keys,
                    chunk_size=1000,
                    ensure=None,
                    types=None):
        """
        Sorts multiple input rows into upserts and inserts. Inserts are passed
        to insert_many and upserts are updated.

        See :py:meth:`upsert() <dataset.Table.upsert>` and
        :py:meth:`insert_many() <dataset.Table.insert_many>`.
        """
        # Convert keys to a list if not a list or tuple.
        keys = keys if type(keys) in (list, tuple) else [keys]

        to_insert = []
        to_update = []
        for row in rows:
            if self.find_one(**{key: row.get(key) for key in keys}):
                # Row exists - update it.
                to_update.append(row)
            else:
                # Row doesn't exist - insert it.
                to_insert.append(row)

        # Insert non-existing rows.
        self.insert_many(to_insert, chunk_size, ensure, types)

        # Update existing rows.
        self.update_many(to_update, keys, chunk_size, ensure, types)

    def delete(self, *clauses, **filters):
        """Delete rows from the table.

        Keyword arguments can be used to add column-based filters. The filter
        criterion will always be equality:
        ::

            table.delete(place='Berlin')

        If no arguments are given, all records are deleted.
        """
        if not self.exists:
            return False
        clause = self._args_to_clause(filters, clauses=clauses)
        stmt = self.table.delete(whereclause=clause)
        rp = self.db.executable.execute(stmt)
        return rp.rowcount > 0

    def _reflect_table(self):
        """Load the tables definition from the database."""
        with self.db.lock:
            self._columns = None
            try:
                self._table = SQLATable(self.name,
                                        self.db.metadata,
                                        schema=self.db.schema,
                                        autoload=True)
            except NoSuchTableError:
                self._table = None

    def _threading_warn(self):
        if self.db.in_transaction and threading.active_count() > 1:
            warnings.warn(
                "Changing the database schema inside a transaction "
                "in a multi-threaded environment is likely to lead "
                "to race conditions and synchronization issues.",
                RuntimeWarning)

    def _sync_table(self, columns):
        """Lazy load, create or adapt the table structure in the database."""
        self._columns = None
        if self._table is None:
            # Load an existing table from the database.
            self._reflect_table()
        if self._table is None:
            # Create the table with an initial set of columns.
            if not self._auto_create:
                raise DatasetException("Table does not exist: %s" % self.name)
            # Keep the lock scope small because this is run very often.
            with self.db.lock:
                self._threading_warn()
                self._table = SQLATable(self.name,
                                        self.db.metadata,
                                        schema=self.db.schema)
                if self._primary_id is not False:
                    # This can go wrong on DBMS like MySQL and SQLite where
                    # tables cannot have no columns.
                    primary_id = self._primary_id
                    primary_type = self._primary_type
                    increment = primary_type in [Types.integer, Types.bigint]
                    column = Column(primary_id,
                                    primary_type,
                                    primary_key=True,
                                    autoincrement=increment)
                    self._table.append_column(column)
                for column in columns:
                    if not column.name == self._primary_id:
                        self._table.append_column(column)
                self._table.create(self.db.executable, checkfirst=True)
        elif len(columns):
            with self.db.lock:
                self._reflect_table()
                self._threading_warn()
                for column in columns:
                    if not self.has_column(column.name):
                        self.db.op.add_column(self.name, column,
                                              self.db.schema)
                self._reflect_table()

    def _sync_columns(self, row, ensure, types=None):
        """Create missing columns (or the table) prior to writes.

        If automatic schema generation is disabled (``ensure`` is ``False``),
        this will remove any keys from the ``row`` for which there is no
        matching column.
        """
        ensure = self._check_ensure(ensure)
        types = types or {}
        types = {self._get_column_name(k): v for (k, v) in types.items()}
        out = {}
        sync_columns = {}
        for name, value in row.items():
            name = self._get_column_name(name)
            if self.has_column(name):
                out[name] = value
            elif ensure:
                _type = types.get(name)
                if _type is None:
                    _type = self.db.types.guess(value)
                sync_columns[name] = Column(name, _type)
                out[name] = value
        self._sync_table(sync_columns.values())
        return out

    def _check_ensure(self, ensure):
        if ensure is None:
            return self.db.ensure_schema
        return ensure

    def _generate_clause(self, column, op, value):
        if op in ('like', ):
            return self.table.c[column].like(value)
        if op in ('ilike', ):
            return self.table.c[column].ilike(value)
        if op in ('>', 'gt'):
            return self.table.c[column] > value
        if op in ('<', 'lt'):
            return self.table.c[column] < value
        if op in ('>=', 'gte'):
            return self.table.c[column] >= value
        if op in ('<=', 'lte'):
            return self.table.c[column] <= value
        if op in ('=', '==', 'is'):
            return self.table.c[column] == value
        if op in ('!=', '<>', 'not'):
            return self.table.c[column] != value
        if op in ('in'):
            return self.table.c[column].in_(value)
        if op in ('between', '..'):
            start, end = value
            return self.table.c[column].between(start, end)
        return false()

    def _args_to_clause(self, args, clauses=()):
        clauses = list(clauses)
        for column, value in args.items():
            column = self._get_column_name(column)
            if not self.has_column(column):
                clauses.append(false())
            elif isinstance(value, (list, tuple, set)):
                clauses.append(self._generate_clause(column, 'in', value))
            elif isinstance(value, dict):
                for op, op_value in value.items():
                    clauses.append(self._generate_clause(column, op, op_value))
            else:
                clauses.append(self._generate_clause(column, '=', value))
        return and_(*clauses)

    def _args_to_order_by(self, order_by):
        orderings = []
        for ordering in ensure_tuple(order_by):
            if ordering is None:
                continue
            column = ordering.lstrip('-')
            column = self._get_column_name(column)
            if not self.has_column(column):
                continue
            if ordering.startswith('-'):
                orderings.append(self.table.c[column].desc())
            else:
                orderings.append(self.table.c[column].asc())
        return orderings

    def _keys_to_args(self, row, keys):
        keys = ensure_tuple(keys)
        keys = [self._get_column_name(k) for k in keys]
        row = row.copy()
        args = {k: row.pop(k, None) for k in keys}
        return args, row

    def create_column(self, name, type, **kwargs):
        """Create a new column ``name`` of a specified type.
        ::

            table.create_column('created_at', db.types.datetime)

        `type` corresponds to an SQLAlchemy type as described by
        `dataset.db.Types`. Additional keyword arguments are passed
        to the constructor of `Column`, so that default values, and
        options like `nullable` and `unique` can be set.
        ::

            table.create_column('key', unique=True, nullable=False)
            table.create_column('food', default='banana')
        """
        name = self._get_column_name(name)
        if self.has_column(name):
            log.debug("Column exists: %s" % name)
            return
        self._sync_table((Column(name, type, **kwargs), ))

    def create_column_by_example(self, name, value):
        """
        Explicitly create a new column ``name`` with a type that is appropriate
        to store the given example ``value``.  The type is guessed in the same
        way as for the insert method with ``ensure=True``.
        ::

            table.create_column_by_example('length', 4.2)

        If a column of the same name already exists, no action is taken, even
        if it is not of the type we would have created.
        """
        type_ = self.db.types.guess(value)
        self.create_column(name, type_)

    def drop_column(self, name):
        """Drop the column ``name``.
        ::
            table.drop_column('created_at')
        """
        if self.db.engine.dialect.name == 'sqlite':
            raise RuntimeError("SQLite does not support dropping columns.")
        name = self._get_column_name(name)
        with self.db.lock:
            if not self.exists or not self.has_column(name):
                log.debug("Column does not exist: %s", name)
                return

            self._threading_warn()
            self.db.op.drop_column(self.table.name, name, self.table.schema)
            self._reflect_table()

    def drop(self):
        """Drop the table from the database.

        Deletes both the schema and all the contents within it.
        """
        with self.db.lock:
            if self.exists:
                self._threading_warn()
                self.table.drop(self.db.executable, checkfirst=True)
                self._table = None
                self._columns = None

    def has_index(self, columns):
        """Check if an index exists to cover the given ``columns``."""
        if not self.exists:
            return False
        columns = set([self._get_column_name(c) for c in columns])
        if columns in self._indexes:
            return True
        for column in columns:
            if not self.has_column(column):
                return False
        indexes = self.db.inspect.get_indexes(self.name, schema=self.db.schema)
        for index in indexes:
            if columns == set(index.get('column_names', [])):
                self._indexes.append(columns)
                return True
        return False

    def create_index(self, columns, name=None, **kw):
        """Create an index to speed up queries on a table.

        If no ``name`` is given a random name is created.
        ::

            table.create_index(['name', 'country'])
        """
        columns = [self._get_column_name(c) for c in ensure_tuple(columns)]
        with self.db.lock:
            if not self.exists:
                raise DatasetException("Table has not been created yet.")

            for column in columns:
                if not self.has_column(column):
                    return

            if not self.has_index(columns):
                self._threading_warn()
                name = name or index_name(self.name, columns)
                columns = [self.table.c[c] for c in columns]
                idx = Index(name, *columns, **kw)
                idx.create(self.db.executable)

    def find(self, *_clauses, **kwargs):
        """Perform a simple search on the table.

        Simply pass keyword arguments as ``filter``.
        ::

            results = table.find(country='France')
            results = table.find(country='France', year=1980)

        Using ``_limit``::

            # just return the first 10 rows
            results = table.find(country='France', _limit=10)

        You can sort the results by single or multiple columns. Append a minus
        sign to the column name for descending order::

            # sort results by a column 'year'
            results = table.find(country='France', order_by='year')
            # return all rows sorted by multiple columns (descending by year)
            results = table.find(order_by=['country', '-year'])

        To perform complex queries with advanced filters or to perform
        aggregation, use :py:meth:`db.query() <dataset.Database.query>`
        instead.
        """
        if not self.exists:
            return iter([])

        _limit = kwargs.pop('_limit', None)
        _offset = kwargs.pop('_offset', 0)
        order_by = kwargs.pop('order_by', None)
        _streamed = kwargs.pop('_streamed', False)
        _step = kwargs.pop('_step', QUERY_STEP)
        if _step is False or _step == 0:
            _step = None

        order_by = self._args_to_order_by(order_by)
        args = self._args_to_clause(kwargs, clauses=_clauses)
        query = self.table.select(whereclause=args,
                                  limit=_limit,
                                  offset=_offset)
        if len(order_by):
            query = query.order_by(*order_by)

        conn = self.db.executable
        if _streamed:
            conn = self.db.engine.connect()
            conn = conn.execution_options(stream_results=True)

        return ResultIter(conn.execute(query),
                          row_type=self.db.row_type,
                          step=_step)

    def find_one(self, *args, **kwargs):
        """Get a single result from the table.

        Works just like :py:meth:`find() <dataset.Table.find>` but returns one
        result, or ``None``.
        ::

            row = table.find_one(country='United States')
        """
        if not self.exists:
            return None

        kwargs['_limit'] = 1
        kwargs['_step'] = None
        resiter = self.find(*args, **kwargs)
        try:
            for row in resiter:
                return row
        finally:
            resiter.close()

    def count(self, *_clauses, **kwargs):
        """Return the count of results for the given filter set."""
        # NOTE: this does not have support for limit and offset since I can't
        # see how this is useful. Still, there might be compatibility issues
        # with people using these flags. Let's see how it goes.
        if not self.exists:
            return 0

        args = self._args_to_clause(kwargs, clauses=_clauses)
        query = select([func.count()], whereclause=args)
        query = query.select_from(self.table)
        rp = self.db.executable.execute(query)
        return rp.fetchone()[0]

    def __len__(self):
        """Return the number of rows in the table."""
        return self.count()

    def distinct(self, *args, **_filter):
        """Return all the unique (distinct) values for the given ``columns``.
        ::

            # returns only one row per year, ignoring the rest
            table.distinct('year')
            # works with multiple columns, too
            table.distinct('year', 'country')
            # you can also combine this with a filter
            table.distinct('year', country='China')
        """
        if not self.exists:
            return iter([])

        columns = []
        clauses = []
        for column in args:
            if isinstance(column, ClauseElement):
                clauses.append(column)
            else:
                if not self.has_column(column):
                    raise DatasetException("No such column: %s" % column)
                columns.append(self.table.c[column])

        clause = self._args_to_clause(_filter, clauses=clauses)
        if not len(columns):
            return iter([])

        q = expression.select(columns,
                              distinct=True,
                              whereclause=clause,
                              order_by=[c.asc() for c in columns])
        return self.db.query(q)

    # Legacy methods for running find queries.
    all = find

    def __iter__(self):
        """Return all rows of the table as simple dictionaries.

        Allows for iterating over all rows in the table without explicetly
        calling :py:meth:`find() <dataset.Table.find>`.
        ::

            for row in table:
                print(row)
        """
        return self.find()

    def __repr__(self):
        """Get table representation."""
        return '<Table(%s)>' % self.table.name