def upgrade(migrate_engine):
    meta.bind = migrate_engine

    records_table = Table('records', meta, autoload=True)

    # Add the hash column, start with allowing NULLs
    hash_column = Column('hash', String(32), nullable=True, default=None,
                         unique=True)
    hash_column.create(records_table, unique_name='unique_record')

    sync_domains = []

    # Fill out the hash values. We need to do this in a way that lets us track
    # which domains need to be re-synced, so having the DB do this directly
    # won't work.
    for record in records_table.select().execute():
        try:
            records_table.update()\
                         .where(records_table.c.id == record.id)\
                         .values(hash=_build_hash(record))\
                         .execute()
        except IntegrityError:
            if record.domain_id not in sync_domains:
                sync_domains.append(record.domain_id)
                LOG.warn("Domain '%s' needs to be synchronised" %
                         record.domain_id)

            records_table.delete()\
                         .where(records_table.c.id == record.id)\
                         .execute()

    # Finally, the column should not be nullable.
    records_table.c.hash.alter(nullable=False)
Пример #2
0
def upgrade(migrate_engine):
    meta.bind = migrate_engine

    # Load the pool_attributes_table table schema
    pool_attributes_table = Table('pool_attributes', meta, autoload=True)

    # Create the pool_ns_records DB table
    pool_ns_records_table.create()

    # Find the existing name server entries
    pool_ns_records = select(columns=[
        pool_attributes_table.c.id, pool_attributes_table.c.key,
        pool_attributes_table.c.value, pool_attributes_table.c.created_at,
        pool_attributes_table.c.updated_at, pool_attributes_table.c.version
    ]).where(
        pool_attributes_table.c.key == 'name_server').execute().fetchall()

    # Create matching entries in the new table.
    for pool_ns_record in pool_ns_records:
        pool_ns_records_table.insert().execute(
            id=pool_ns_record.id,
            created_at=pool_ns_record.created_at,
            updated_at=pool_ns_record.updated_at,
            version=pool_ns_record.version,
            pool_id=default_pool_id,
            priority=1,
            hostname=pool_ns_record.value,
        )

    # Delete the old nameserver attr rows from the Database
    pool_attributes_table.delete()\
                         .where(pool_attributes_table.c.key == 'name_server')\
                         .execute()
def upgrade(migrate_engine):
    meta.bind = migrate_engine

    records_table = Table('records', meta, autoload=True)

    # Add the hash column, start with allowing NULLs
    hash_column = Column('hash', String(32), nullable=True, default=None,
                         unique=True)
    hash_column.create(records_table, unique_name='unique_record')

    sync_domains = []

    # Fill out the hash values. We need to do this in a way that lets us track
    # which domains need to be re-synced, so having the DB do this directly
    # won't work.
    for record in records_table.select().execute():
        try:
            records_table.update()\
                         .where(records_table.c.id == record.id)\
                         .values(hash=_build_hash(record))\
                         .execute()
        except IntegrityError:
            if record.domain_id not in sync_domains:
                sync_domains.append(record.domain_id)
                LOG.warn(_LW("Domain '%s' needs to be synchronised") %
                         record.domain_id)

            records_table.delete()\
                         .where(records_table.c.id == record.id)\
                         .execute()

    # Finally, the column should not be nullable.
    records_table.c.hash.alter(nullable=False)
Пример #4
0
def downgrade(migrate_engine):
    meta.bind = migrate_engine
    # servers_table = Table('servers', meta, autoload=True)
    pool_attrib_table = Table('pool_attributes', meta, autoload=True)

    pool_attributes = select(
        columns=[pool_attrib_table.c.key, pool_attrib_table.c.pool_id
                 ]).execute().fetchall()

    for p in pool_attributes:
        pool_attrib_table.delete().\
            where(p.pool_id == default_pool_id).\
            where(p.key == 'name_server').execute()
def upgrade(migrate_engine):
    meta.bind = migrate_engine
    zone_attibutes_table = Table('zone_attributes', meta, autoload=True)

    connection = migrate_engine.connect()

    transaction = connection.begin()
    try:

        zone_masters_table.create()

        masters = select([
            zone_attibutes_table.c.id, zone_attibutes_table.c.version,
            zone_attibutes_table.c.created_at,
            zone_attibutes_table.c.updated_at, zone_attibutes_table.c.value,
            zone_attibutes_table.c.zone_id
        ]).where(zone_attibutes_table.c.key == 'master').execute().fetchall()

        masters_input = []

        for master in masters:
            host, port = utils.split_host_port(
                master[zone_attibutes_table.c.value])
            masters_input.append({
                'id':
                master[zone_attibutes_table.c.id],
                'version':
                master[zone_attibutes_table.c.version],
                'created_at':
                master[zone_attibutes_table.c.created_at],
                'updated_at':
                master[zone_attibutes_table.c.updated_at],
                'zone_id':
                master[zone_attibutes_table.c.zone_id],
                'host':
                host,
                'port':
                port
            })

        zone_attibutes_table.insert(masters_input)

        zone_attibutes_table.delete().where(
            zone_attibutes_table.c.key == 'master')

        zone_attibutes_table.c.key.alter(type=String(50))
        transaction.commit()
    except Exception:
        transaction.rollback()
        raise
def upgrade(migrate_engine):
    meta.bind = migrate_engine
    zone_attibutes_table = Table('zone_attributes', meta, autoload=True)

    connection = migrate_engine.connect()

    transaction = connection.begin()
    try:

        zone_masters_table.create()

        masters = select(
            [
                zone_attibutes_table.c.id,
                zone_attibutes_table.c.version,
                zone_attibutes_table.c.created_at,
                zone_attibutes_table.c.updated_at,
                zone_attibutes_table.c.value,
                zone_attibutes_table.c.zone_id
            ]
        ).where(
            zone_attibutes_table.c.key == 'master'
        ).execute().fetchall()

        masters_input = []

        for master in masters:
            host, port = utils.split_host_port(
                master[zone_attibutes_table.c.value])
            masters_input.append({
                'id': master[zone_attibutes_table.c.id],
                'version': master[zone_attibutes_table.c.version],
                'created_at': master[zone_attibutes_table.c.created_at],
                'updated_at': master[zone_attibutes_table.c.updated_at],
                'zone_id': master[zone_attibutes_table.c.zone_id],
                'host': host,
                'port': port
            })

        zone_attibutes_table.insert(masters_input)

        zone_attibutes_table.delete().where(
            zone_attibutes_table.c.key == 'master')

        zone_attibutes_table.c.key.alter(type=String(50))
        transaction.commit()
    except Exception:
        transaction.rollback()
        raise
Пример #7
0
def downgrade(migrate_engine):
    meta.bind = migrate_engine
    # servers_table = Table('servers', meta, autoload=True)
    pool_attrib_table = Table('pool_attributes', meta, autoload=True)

    pool_attributes = select(
        columns=[
            pool_attrib_table.c.key,
            pool_attrib_table.c.pool_id
        ]
    ).execute().fetchall()

    for p in pool_attributes:
        pool_attrib_table.delete().\
            where(p.pool_id == default_pool_id).\
            where(p.key == 'name_server').execute()
Пример #8
0
def downgrade(migrate_engine):
    meta.bind = migrate_engine

    keys = Enum(name='key', metadata=meta, *ZONE_ATTRIBUTE_KEYS)
    types = Enum(name='types', metadata=meta, *ZONE_TYPES)

    domains_attributes_table = Table('domain_attributes', meta, autoload=True)
    domains_table = Table('domains', meta, autoload=True)

    domains = select(columns=[domains_table.c.id, domains_table.c.type])\
        .where(domains_table.c.type == 'SECONDARY')\
        .execute().fetchall()

    for dom in domains:
        delete = domains_table.delete()\
            .where(domains_table.id == dom.id)
        delete.execute()

    domains_table.c.type.drop()
    domains_table.c.transferred_at.drop()

    domains_attributes_table.drop()
    keys.drop()
    types.drop()

    dialect = migrate_engine.url.get_dialect().name
    if dialect.startswith('sqlite'):
        constraint = UniqueConstraint(
            'name', 'deleted', name='unique_domain_name', table=domains_table)

        # Add missing unique index
        constraint.create()
def upgrade(migrate_engine):
    meta.bind = migrate_engine

    # Load the pool_attributes_table table schema
    pool_attributes_table = Table('pool_attributes', meta, autoload=True)

    # Create the pool_ns_records DB table
    pool_ns_records_table.create()

    # Find the existing name server entries
    pool_ns_records = select(
        columns=[
            pool_attributes_table.c.id,
            pool_attributes_table.c.key,
            pool_attributes_table.c.value,
            pool_attributes_table.c.created_at,
            pool_attributes_table.c.updated_at,
            pool_attributes_table.c.version
        ]
    ).where(pool_attributes_table.c.key == 'name_server').execute().fetchall()

    # Create matching entries in the new table.
    for pool_ns_record in pool_ns_records:
        pool_ns_records_table.insert().execute(
            id=pool_ns_record.id,
            created_at=pool_ns_record.created_at,
            updated_at=pool_ns_record.updated_at,
            version=pool_ns_record.version,

            pool_id=default_pool_id,
            priority=1,
            hostname=pool_ns_record.value,
        )

    # Delete the old nameserver attr rows from the Database
    pool_attributes_table.delete()\
                         .where(pool_attributes_table.c.key == 'name_server')\
                         .execute()
Пример #10
0
class TableHandler(object):

    """ Used by automatically generated objects such as datasets
    and dimensions to generate, write and clear the table under
    its management. """

    def _init_table(self, meta, namespace, name, id_type=Integer):
        """ Create the given table if it does not exist, otherwise
        reflect the current table schema from the database.
        """
        name = namespace + '__' + name
        self.table = Table(name, meta)
        if id_type is not None:
            col = Column('id', id_type, primary_key=True)
            self.table.append_column(col)

    def _generate_table(self):
        """ Create the given table if it does not exist. """
        # TODO: make this support some kind of migration?
        if not db.engine.has_table(self.table.name):
            self.table.create(db.engine)

    def _upsert(self, bind, data, unique_columns):
        """ Upsert a set of values into the table. This will
        query for the set of unique columns and either update an
        existing row or create a new one. In both cases, the ID
        of the changed row will be returned. """
        key = and_(*[self.table.c[c] == data.get(c)
                     for c in unique_columns])
        q = self.table.update(key, data)
        if bind.execute(q).rowcount == 0:
            q = self.table.insert(data)
            rs = bind.execute(q)
            return rs.inserted_primary_key[0]
        else:
            q = self.table.select(key)
            row = bind.execute(q).fetchone()
            return row['id']

    def _flush(self, bind):
        """ Delete all rows in the table. """
        q = self.table.delete()
        bind.execute(q)

    def _drop(self, bind):
        """ Drop the table and the local reference to it. """
        if db.engine.has_table(self.table.name):
            self.table.drop()
        del self.table
Пример #11
0
class TableHandler(object):
    """ Used by automatically generated objects such as datasets
    and dimensions to generate, write and clear the table under
    its management. """
    def _init_table(self, meta, namespace, name, id_type=Integer):
        """ Create the given table if it does not exist, otherwise
        reflect the current table schema from the database.
        """
        name = namespace + '__' + name
        self.table = Table(name, meta)
        if id_type is not None:
            col = Column('id', id_type, primary_key=True)
            self.table.append_column(col)

    def _generate_table(self):
        """ Create the given table if it does not exist. """
        # TODO: make this support some kind of migration?
        if not db.engine.has_table(self.table.name):
            self.table.create(db.engine)

    def _upsert(self, bind, data, unique_columns):
        """ Upsert a set of values into the table. This will
        query for the set of unique columns and either update an
        existing row or create a new one. In both cases, the ID
        of the changed row will be returned. """
        key = and_(*[self.table.c[c] == data.get(c) for c in unique_columns])
        q = self.table.update(key, data)
        if bind.execute(q).rowcount == 0:
            q = self.table.insert(data)
            rs = bind.execute(q)
            return rs.inserted_primary_key[0]
        else:
            q = self.table.select(key)
            row = bind.execute(q).fetchone()
            return row['id']

    def _flush(self, bind):
        """ Delete all rows in the table. """
        q = self.table.delete()
        bind.execute(q)

    def _drop(self, bind):
        """ Drop the table and the local reference to it. """
        if db.engine.has_table(self.table.name):
            self.table.drop()
        del self.table
Пример #12
0
class SqlAlchemyFdw(ForeignDataWrapper):
    """An SqlAlchemy foreign data wrapper.

    The sqlalchemy foreign data wrapper performs simple selects on a remote
    database using the sqlalchemy framework.

    Accepted options:

    db_url      --  the sqlalchemy connection string.
    schema      --  (optional) schema name to qualify table name with
    tablename   --  the table name in the remote database.

    """

    def __init__(self, fdw_options, fdw_columns):
        super(SqlAlchemyFdw, self).__init__(fdw_options, fdw_columns)
        if 'tablename' not in fdw_options:
            log_to_postgres('The tablename parameter is required', ERROR)
        self.metadata = MetaData()
        url = _parse_url_from_options(fdw_options)
        self.engine = create_engine(url)
        schema = fdw_options['schema'] if 'schema' in fdw_options else None
        tablename = fdw_options['tablename']
        sqlacols = []
        for col in fdw_columns.values():
            col_type = self._get_column_type(col.type_name)
            sqlacols.append(Column(col.column_name, col_type))
        self.table = Table(tablename, self.metadata, schema=schema,
                           *sqlacols)
        self.transaction = None
        self._connection = None
        self._row_id_column = fdw_options.get('primary_key', None)



    def _need_explicit_null_ordering(self, key):
        support = SORT_SUPPORT[self.engine.dialect.name]
        default = support['default']
        no = None
        if key.is_reversed:
            no = nullsfirst if default == 'higher' else nullslast
        else:
            no = nullslast if default == 'higher' else nullsfirst
        if key.nulls_first:
            if no != nullsfirst:
                return nullsfirst
            return None
        else:
            if no != nullslast:
                return nullslast
            return None

    def can_sort(self, sortkeys):
        if SORT_SUPPORT.get(self.engine.dialect.name) is None:
            # We have no idea about defaults
            return []
        can_order_null = SORT_SUPPORT[self.engine.dialect.name]['support']
        if (any((self._need_explicit_null_ordering(x) is not None
                 for x in sortkeys)) and not can_order_null):
            return []
        return sortkeys

    def explain(self, quals, columns, sortkeys=None, verbose=False):
        sortkeys = sortkeys or []
        statement = self._build_statement(quals, columns, sortkeys)
        return [str(statement)]

    def _build_statement(self, quals, columns, sortkeys):
        statement = select([self.table])
        clauses = []
        for qual in quals:
            operator = OPERATORS.get(qual.operator, None)
            if operator:
                clauses.append(operator(self.table.c[qual.field_name],
                                        qual.value))
            else:
                log_to_postgres('Qual not pushed to foreign db: %s' % qual,
                                WARNING)
        if clauses:
            statement = statement.where(and_(*clauses))
        if columns:
            columns = [self.table.c[col] for col in columns]
        else:
            columns = self.table.c
        statement = statement.with_only_columns(columns)
        orders = []
        for sortkey in sortkeys:
            column = self.table.c[sortkey.attname]
            if sortkey.is_reversed:
                column = column.desc()
            if sortkey.collate:
                column = column.collate('"%s"' % sortkey.collate)
            null_ordering = self._need_explicit_null_ordering(sortkey)
            if null_ordering:
                column = null_ordering(column)
            statement = statement.order_by(column)
        return statement


    def execute(self, quals, columns, sortkeys=None):
        """
        The quals are turned into an and'ed where clause.
        """
        sortkeys = sortkeys or []
        statement = self._build_statement(quals, columns, sortkeys)
        log_to_postgres(str(statement), DEBUG)
        rs = (self.connection
              .execution_options(stream_results=True)
              .execute(statement))
        # Workaround pymssql "trash old results on new query"
        # behaviour (See issue #100)
        if self.engine.driver == 'pymssql' and self.transaction is not None:
            rs = list(rs)

        for item in rs:
            yield dict(item)

    @property
    def connection(self):
        if self._connection is None:
            self._connection = self.engine.connect()
        return self._connection

    def begin(self, serializable):
        self.transaction = self.connection.begin()

    def pre_commit(self):
        if self.transaction is not None:
            self.transaction.commit()
            self.transaction = None

    def commit(self):
        # Pre-commit hook does this on 9.3
        if self.transaction is not None:
            self.transaction.commit()
            self.transaction = None

    def rollback(self):
        if self.transaction is not None:
            self.transaction.rollback()
            self.transaction = None

    @property
    def rowid_column(self):
        if self._row_id_column is None:
            log_to_postgres(
                'You need to declare a primary key option in order '
                'to use the write features')
        return self._row_id_column

    def insert(self, values):
        self.connection.execute(self.table.insert(values=values))

    def update(self, rowid, newvalues):
        self.connection.execute(
            self.table.update()
            .where(self.table.c[self._row_id_column] == rowid)
            .values(newvalues))

    def delete(self, rowid):
        self.connection.execute(
            self.table.delete()
            .where(self.table.c[self._row_id_column] == rowid))

    def _get_column_type(self, format_type):
        """Blatant ripoff from PG_Dialect.get_column_info"""
        # strip (*) from character varying(5), timestamp(5)
        # with time zone, geometry(POLYGON), etc.
        attype = re.sub(r'\(.*\)', '', format_type)

        # strip '[]' from integer[], etc.
        attype = re.sub(r'\[\]', '', attype)

        is_array = format_type.endswith('[]')
        charlen = re.search('\(([\d,]+)\)', format_type)
        if charlen:
            charlen = charlen.group(1)
        args = re.search('\((.*)\)', format_type)
        if args and args.group(1):
            args = tuple(re.split('\s*,\s*', args.group(1)))
        else:
            args = ()
        kwargs = {}

        if attype == 'numeric':
            if charlen:
                prec, scale = charlen.split(',')
                args = (int(prec), int(scale))
            else:
                args = ()
        elif attype == 'double precision':
            args = (53, )
        elif attype == 'integer':
            args = ()
        elif attype in ('timestamp with time zone',
                        'time with time zone'):
            kwargs['timezone'] = True
            if charlen:
                kwargs['precision'] = int(charlen)
            args = ()
        elif attype in ('timestamp without time zone',
                        'time without time zone', 'time'):
            kwargs['timezone'] = False
            if charlen:
                kwargs['precision'] = int(charlen)
            args = ()
        elif attype == 'bit varying':
            kwargs['varying'] = True
            if charlen:
                args = (int(charlen),)
            else:
                args = ()
        elif attype in ('interval', 'interval year to month',
                        'interval day to second'):
            if charlen:
                kwargs['precision'] = int(charlen)
            args = ()
        elif charlen:
            args = (int(charlen),)

        coltype = ischema_names.get(attype, None)
        if coltype:
            coltype = coltype(*args, **kwargs)
            if is_array:
                coltype = ARRAY(coltype)
        else:
            coltype = sqltypes.NULLTYPE
        return coltype

    @classmethod
    def import_schema(self, schema, srv_options, options,
                      restriction_type, restricts):
        """
        Reflects the remote schema.
        """
        metadata = MetaData()
        url = _parse_url_from_options(srv_options)
        engine = create_engine(url)
        dialect = PGDialect()
        if restriction_type == 'limit':
            only = restricts
        elif restriction_type == 'except':
            only = lambda t, _: t not in restricts
        else:
            only = None
        metadata.reflect(bind=engine,
                         schema=schema,
                         only=only)
        to_import = []
        for _, table in sorted(metadata.tables.items()):
            ftable = TableDefinition(table.name)
            ftable.options['schema'] = schema
            ftable.options['tablename'] = table.name
            for c in table.c:
                # Force collation to None to prevent imcompatibilities
                setattr(c.type, "collation", None)
                # If the type is specialized, call the generic
                # superclass method
                if type(c.type) in CONVERSION_MAP:
                    class_name = CONVERSION_MAP[type(c.type)]
                    old_args = c.type.__dict__
                    c.type = class_name()
                    c.type.__dict__.update(old_args)
                if c.primary_key:
                    ftable.options['primary_key'] = c.name
                ftable.columns.append(ColumnDefinition(
                    c.name,
                    type_name=c.type.compile(dialect)))
            to_import.append(ftable)
        return to_import
Пример #13
0
def merge(nombre_tabla, id_destino, otros_ids, session):
    """
    Fusiona uno o más registros (otros_ids) en otro (id_destino), siendo todos ellos
    de la tabla 'nombre_tabla'.
    
    IN
      id_usuario   <int>: Identificador del usuario que se conecta
      nombre_tabla <str>: Nombre de la tabla a la que pertenecen los registros
      id_destino   <int>: Identificador del registro donde se guardará la información
      otros_ids    <list> [<int>, ...]: Lista de ids desde donde se obtendrá la información
    
    OUT
      un JSON de la forma:
      {
       'id_destino': 123,
       'id_origen': 123,
       'num_campos': 123
      }
    
    EXC
      ENoExisteRegistro: Cuando no existe alguno de los registros (origen o destino)
    """

    meta = MetaData(bind=session.bind, reflect=True)
    tabla = Table(nombre_tabla, meta, autoload=True)

    # comprobar que existe el registro "destino"
    alumno = session.execute(select([tabla],
                                    tabla.c.id == id_destino)).fetchone()
    if alumno is None:
        raise ENoExisteRegistro(id_destino)

    # comprobar que existen los "otros" registros
    for id_otro in otros_ids:

        otro = session.execute(select([tabla],
                                      tabla.c.id == id_otro)).fetchone()
        if otro is None:
            raise ENoExisteRegistro(id_otro)

    # fusionar
    resultado = {}
    resultado['id_destino'] = id_destino
    resultado['num_campos'] = 0
    for id_otro in otros_ids:

        if id_otro == id_destino:
            continue

        resultado['id_origen'] = id_otro

        # obtener datos de los dos registros
        alumno = session.execute(select([tabla],
                                        tabla.c.id == id_destino)).fetchone()
        otro = session.execute(select([tabla],
                                      tabla.c.id == id_otro)).fetchone()

        # claves foráneas que hacen referencia a esta tabla
        for t in meta.sorted_tables:
            for fk in t.foreign_keys:
                if fk.references(tabla):
                    # actualizar los registros que apuntan a "origen" -> "destino"
                    qry_update = t.update(fk.parent == id_otro,
                                          values={fk.parent: id_destino})
                    session.execute(qry_update)

        session.commit()

        # cambiar datos de la tabla
        datos = {}
        for k in alumno.keys():
            if k != 'id' and k != 'busqueda':

                #print 'Tratando %s: %s - %s' % (k, alumno[k] or '<NULL>', otro[k] or '<NULL>')

                #                if alumno[k] != None and otro[k] != None:
                #                    if lista_atributos and str(k) in lista_atributos:
                #                        merge('clientes', alumno[k], [otro[k]], conector=conector)

                if alumno[k] is None and otro[k] != None:
                    #print 'cambiando...'
                    datos[k] = otro[k]
                    resultado['num_campos'] += 1

        if datos != {}:
            # actualizar "destino"
            qry_update = tabla.update(tabla.c.id == id_destino, values=datos)
            session.execute(qry_update)

        # borrar "otro"
        qry_delete = tabla.delete(tabla.c.id == id_otro)
        session.execute(qry_delete)
        session.commit()

    return resultado
Пример #14
0
class SqlAlchemyFdw(ForeignDataWrapper):
    """An SqlAlchemy foreign data wrapper.

    The sqlalchemy foreign data wrapper performs simple selects on a remote
    database using the sqlalchemy framework.

    Accepted options:

    db_url      --  the sqlalchemy connection string.
    schema      --  (optional) schema name to qualify table name with
    tablename   --  the table name in the remote database.

    """
    def __init__(self, fdw_options, fdw_columns):
        super(SqlAlchemyFdw, self).__init__(fdw_options, fdw_columns)
        if 'tablename' not in fdw_options:
            log_to_postgres('The tablename parameter is required', ERROR)
        self.metadata = MetaData()
        url = _parse_url_from_options(fdw_options)
        self.engine = create_engine(url)
        schema = fdw_options['schema'] if 'schema' in fdw_options else None
        tablename = fdw_options['tablename']
        sqlacols = []
        for col in fdw_columns.values():
            col_type = self._get_column_type(col.type_name)
            sqlacols.append(Column(col.column_name, col_type))
        self.table = Table(tablename, self.metadata, schema=schema, *sqlacols)
        self.transaction = None
        self._connection = None
        self._row_id_column = fdw_options.get('primary_key', None)

    def _need_explicit_null_ordering(self, key):
        support = SORT_SUPPORT[self.engine.dialect.name]
        default = support['default']
        no = None
        if key.is_reversed:
            no = nullsfirst if default == 'higher' else nullslast
        else:
            no = nullslast if default == 'higher' else nullsfirst
        if key.nulls_first:
            if no != nullsfirst:
                return nullsfirst
            return None
        else:
            if no != nullslast:
                return nullslast
            return None

    def can_sort(self, sortkeys):
        if SORT_SUPPORT.get(self.engine.dialect.name) is None:
            # We have no idea about defaults
            return []
        can_order_null = SORT_SUPPORT[self.engine.dialect.name]['support']
        if (any((self._need_explicit_null_ordering(x) is not None
                 for x in sortkeys)) and not can_order_null):
            return []
        return sortkeys

    def explain(self, quals, columns, sortkeys=None, verbose=False):
        sortkeys = sortkeys or []
        statement = self._build_statement(quals, columns, sortkeys)
        return [str(statement)]

    def _build_statement(self, quals, columns, sortkeys):
        statement = select([self.table])
        clauses = []
        for qual in quals:
            operator = OPERATORS.get(qual.operator, None)
            if operator:
                clauses.append(
                    operator(self.table.c[qual.field_name], qual.value))
            else:
                log_to_postgres('Qual not pushed to foreign db: %s' % qual,
                                WARNING)
        if clauses:
            statement = statement.where(and_(*clauses))
        if columns:
            columns = [self.table.c[col] for col in columns]
        else:
            columns = self.table.c
        statement = statement.with_only_columns(columns)
        orders = []
        for sortkey in sortkeys:
            column = self.table.c[sortkey.attname]
            if sortkey.is_reversed:
                column = column.desc()
            if sortkey.collate:
                column = column.collate('"%s"' % sortkey.collate)
            null_ordering = self._need_explicit_null_ordering(sortkey)
            if null_ordering:
                column = null_ordering(column)
            statement = statement.order_by(column)
        return statement

    def execute(self, quals, columns, sortkeys=None):
        """
        The quals are turned into an and'ed where clause.
        """
        sortkeys = sortkeys or []
        statement = self._build_statement(quals, columns, sortkeys)
        log_to_postgres(str(statement), DEBUG)
        rs = (self.connection.execution_options(
            stream_results=True).execute(statement))
        # Workaround pymssql "trash old results on new query"
        # behaviour (See issue #100)
        if self.engine.driver == 'pymssql' and self.transaction is not None:
            rs = list(rs)

        for item in rs:
            yield dict(item)

    @property
    def connection(self):
        if self._connection is None:
            self._connection = self.engine.connect()
        return self._connection

    def begin(self, serializable):
        self.transaction = self.connection.begin()

    def pre_commit(self):
        if self.transaction is not None:
            self.transaction.commit()
            self.transaction = None

    def commit(self):
        # Pre-commit hook does this on 9.3
        if self.transaction is not None:
            self.transaction.commit()
            self.transaction = None

    def rollback(self):
        if self.transaction is not None:
            self.transaction.rollback()
            self.transaction = None

    @property
    def rowid_column(self):
        if self._row_id_column is None:
            log_to_postgres(
                'You need to declare a primary key option in order '
                'to use the write features')
        return self._row_id_column

    def insert(self, values):
        self.connection.execute(self.table.insert(values=values))

    def update(self, rowid, newvalues):
        self.connection.execute(self.table.update().where(
            self.table.c[self._row_id_column] == rowid).values(newvalues))

    def delete(self, rowid):
        self.connection.execute(self.table.delete().where(
            self.table.c[self._row_id_column] == rowid))

    def _get_column_type(self, format_type):
        """Blatant ripoff from PG_Dialect.get_column_info"""
        # strip (*) from character varying(5), timestamp(5)
        # with time zone, geometry(POLYGON), etc.
        attype = re.sub(r'\(.*\)', '', format_type)

        # strip '[]' from integer[], etc.
        attype = re.sub(r'\[\]', '', attype)

        is_array = format_type.endswith('[]')
        charlen = re.search('\(([\d,]+)\)', format_type)
        if charlen:
            charlen = charlen.group(1)
        args = re.search('\((.*)\)', format_type)
        if args and args.group(1):
            args = tuple(re.split('\s*,\s*', args.group(1)))
        else:
            args = ()
        kwargs = {}

        if attype == 'numeric':
            if charlen:
                prec, scale = charlen.split(',')
                args = (int(prec), int(scale))
            else:
                args = ()
        elif attype == 'double precision':
            args = (53, )
        elif attype == 'integer':
            args = ()
        elif attype in ('timestamp with time zone', 'time with time zone'):
            kwargs['timezone'] = True
            if charlen:
                kwargs['precision'] = int(charlen)
            args = ()
        elif attype in ('timestamp without time zone',
                        'time without time zone', 'time'):
            kwargs['timezone'] = False
            if charlen:
                kwargs['precision'] = int(charlen)
            args = ()
        elif attype == 'bit varying':
            kwargs['varying'] = True
            if charlen:
                args = (int(charlen), )
            else:
                args = ()
        elif attype in ('interval', 'interval year to month',
                        'interval day to second'):
            if charlen:
                kwargs['precision'] = int(charlen)
            args = ()
        elif charlen:
            args = (int(charlen), )

        coltype = ischema_names.get(attype, None)
        if coltype:
            coltype = coltype(*args, **kwargs)
            if is_array:
                coltype = ARRAY(coltype)
        else:
            coltype = sqltypes.NULLTYPE
        return coltype

    @classmethod
    def import_schema(self, schema, srv_options, options, restriction_type,
                      restricts):
        """
        Reflects the remote schema.
        """
        metadata = MetaData()
        url = _parse_url_from_options(srv_options)
        engine = create_engine(url)
        dialect = PGDialect()
        if restriction_type == 'limit':
            only = restricts
        elif restriction_type == 'except':
            only = lambda t, _: t not in restricts
        else:
            only = None
        metadata.reflect(bind=engine, schema=schema, only=only)
        to_import = []
        for _, table in sorted(metadata.tables.items()):
            ftable = TableDefinition(table.name)
            ftable.options['schema'] = schema
            ftable.options['tablename'] = table.name
            for c in table.c:
                # Force collation to None to prevent imcompatibilities
                setattr(c.type, "collation", None)
                # If the type is specialized, call the generic
                # superclass method
                if type(c.type) in CONVERSION_MAP:
                    class_name = CONVERSION_MAP[type(c.type)]
                    old_args = c.type.__dict__
                    c.type = class_name()
                    c.type.__dict__.update(old_args)
                if c.primary_key:
                    ftable.options['primary_key'] = c.name
                ftable.columns.append(
                    ColumnDefinition(c.name,
                                     type_name=c.type.compile(dialect)))
            to_import.append(ftable)
        return to_import
Пример #15
0
class SqlAlchemyFdw(ForeignDataWrapper):
    """An SqlAlchemy foreign data wrapper.

    The sqlalchemy foreign data wrapper performs simple selects on a remote
    database using the sqlalchemy framework.

    Accepted options:

    db_url      --  the sqlalchemy connection string.
    schema      --  (optional) schema name to qualify table name with
    tablename   --  the table name in the remote database.

    """

    def __init__(self, fdw_options, fdw_columns):
        super(SqlAlchemyFdw, self).__init__(fdw_options, fdw_columns)
        if 'tablename' not in fdw_options:
            log_to_postgres('The tablename parameter is required', ERROR)
        self.metadata = MetaData()
        if fdw_options.get('db_url'):
            url = make_url(fdw_options.get('db_url'))
        else:
            if 'drivername' not in fdw_options:
                log_to_postgres('Either a db_url, or drivername and other '
                                'connection infos are needed', ERROR)
            url = URL(fdw_options['drivername'])
        for param in ('username', 'password', 'host',
                      'database', 'port'):
            if param in fdw_options:
                setattr(url, param, fdw_options[param])
        self.engine = create_engine(url)
        schema = fdw_options['schema'] if 'schema' in fdw_options else None
        tablename = fdw_options['tablename']
        sqlacols = []
        for col in fdw_columns.values():
            col_type = self._get_column_type(col.type_name)
            sqlacols.append(Column(col.column_name, col_type))
        self.table = Table(tablename, self.metadata, schema=schema,
                           *sqlacols)
        self.transaction = None
        self._connection = None
        self._row_id_column = fdw_options.get('primary_key', None)

    def execute(self, quals, columns):
        """
        The quals are turned into an and'ed where clause.
        """
        statement = select([self.table])
        clauses = []
        for qual in quals:
            operator = OPERATORS.get(qual.operator, None)
            if operator:
                clauses.append(operator(self.table.c[qual.field_name],
                                        qual.value))
            else:
                log_to_postgres('Qual not pushed to foreign db: %s' % qual,
                                WARNING)
        if clauses:
            statement = statement.where(and_(*clauses))
        if columns:
            columns = [self.table.c[col] for col in columns]
        else:
            columns = self.table.c.values()
        statement = statement.with_only_columns(columns)
        log_to_postgres(str(statement), DEBUG)
        rs = (self.connection
              .execution_options(stream_results=True)
              .execute(statement))
        for item in rs:
            yield dict(item)

    @property
    def connection(self):
        if self._connection is None:
            self._connection = self.engine.connect()
        return self._connection

    def begin(self, serializable):
        self.transaction = self.connection.begin()

    def pre_commit(self):
        if self.transaction is not None:
            self.transaction.commit()
            self.transaction = None

    def commit(self):
        # Pre-commit hook does this on 9.3
        if self.transaction is not None:
            self.transaction.commit()
            self.transaction = None

    def rollback(self):
        if self.transaction is not None:
            self.transaction.rollback()
            self.transaction = None

    @property
    def rowid_column(self):
        if self._row_id_column is None:
            log_to_postgres(
                'You need to declare a primary key option in order '
                'to use the write features')
        return self._row_id_column

    def insert(self, values):
        self.connection.execute(self.table.insert(values=values))

    def update(self, rowid, newvalues):
        self.connection.execute(
            self.table.update()
            .where(self.table.c[self._row_id_column] == rowid)
            .values(newvalues))

    def delete(self, rowid):
        self.connection.execute(
            self.table.delete()
            .where(self.table.c[self._row_id_column] == rowid))

    def _get_column_type(self, format_type):
        """Blatant ripoff from PG_Dialect.get_column_info"""
        ## strip (*) from character varying(5), timestamp(5)
        # with time zone, geometry(POLYGON), etc.
        attype = re.sub(r'\(.*\)', '', format_type)

        # strip '[]' from integer[], etc.
        attype = re.sub(r'\[\]', '', attype)

        is_array = format_type.endswith('[]')
        charlen = re.search('\(([\d,]+)\)', format_type)
        if charlen:
            charlen = charlen.group(1)
        args = re.search('\((.*)\)', format_type)
        if args and args.group(1):
            args = tuple(re.split('\s*,\s*', args.group(1)))
        else:
            args = ()
        kwargs = {}

        if attype == 'numeric':
            if charlen:
                prec, scale = charlen.split(',')
                args = (int(prec), int(scale))
            else:
                args = ()
        elif attype == 'double precision':
            args = (53, )
        elif attype == 'integer':
            args = ()
        elif attype in ('timestamp with time zone',
                        'time with time zone'):
            kwargs['timezone'] = True
            if charlen:
                kwargs['precision'] = int(charlen)
            args = ()
        elif attype in ('timestamp without time zone',
                        'time without time zone', 'time'):
            kwargs['timezone'] = False
            if charlen:
                kwargs['precision'] = int(charlen)
            args = ()
        elif attype == 'bit varying':
            kwargs['varying'] = True
            if charlen:
                args = (int(charlen),)
            else:
                args = ()
        elif attype in ('interval', 'interval year to month',
                        'interval day to second'):
            if charlen:
                kwargs['precision'] = int(charlen)
            args = ()
        elif charlen:
            args = (int(charlen),)

        coltype = ischema_names.get(attype, None)
        if coltype:
            coltype = coltype(*args, **kwargs)
            if is_array:
                coltype = ARRAY(coltype)
        else:
            coltype = sqltypes.NULLTYPE
        return coltype
Пример #16
0
class AlchemySqlStore(Model):
    """
    datastore using SQLAlchemy meta-SQL Python package
   
    create table vesper_stmts (
      subject UNIQUE
      predicate UNIQUE
      object UNIQUE
      objecttype UNIQUE
      context UNIQUE
    )
    """

    def __init__(self, source=None, defaultStatements=None, autocommit=False, **kw):
        if source is None:
            # this seems like a reasonable default thing to do
            source = "sqlite://"
            log.debug("SQLite in-memory database being opened")

        # We take source to be a SQLAlchemy-style dbapi spec:
        # dialect+driver://username:password@host:port/database
        # connection is made JIT on first connect()
        log.debug("sqla engine being created with:", source)

        self.engine = create_engine(source)
        self.md = sqlalchemy.schema.MetaData()
        # utterly insufficient datatypes. just for first pass
        # technically the keep_existing bool is redundant as create_all() default is "check first"
        self.vesper_stmts = Table(
            "vesper_stmts",
            self.md,
            Column("subject", String(255)),  # primary_key = True),
            Column("predicate", String(255)),  # primary_key = True),
            Column("object", String(255)),  # primary_key = True),
            Column("objecttype", String(8)),
            Column("context", String(8)),
            UniqueConstraint("subject", "predicate", "object", "objecttype", "context"),
            keep_existing=True,
        )
        Index("idx_vs", self.vesper_stmts.c.subject, self.vesper_stmts.c.predicate, self.vesper_stmts.c.object)
        self.md.create_all(self.engine)

        # Set up our state machine and grab a connection from the sqlalchemy pool
        self.conn = self.engine.connect()
        self.trans = None
        self.autocommit = autocommit

    def _checkConnection(self):
        if self.conn is None:
            self.conn = self.engine.connect()
        if self.autocommit is False:
            if not self.conn.in_transaction():
                self.trans = self.conn.begin()
        self.conn.execution_options(autocommit=self.autocommit)

    def getStatements(
        self, subject=None, predicate=None, object=None, objecttype=None, context=None, asQuad=True, hints=None
    ):
        """ 
        Return all the statements in the model that match the given arguments.
        Any combination of subject and predicate can be None, and any None slot is
        treated as a wildcard that matches any value in the model.
        """
        fs = subject is not None
        fp = predicate is not None
        fo = object is not None
        fot = objecttype is not None
        fc = context is not None
        hints = hints or {}
        limit = hints.get("limit")
        offset = hints.get("offset")

        log.debug("s p o ot c quad lim offset: ", fs, fp, fo, fot, fc, asQuad, limit, offset)

        if fo:
            if isinstance(object, ResourceUri):
                object = object.uri
                fot = True
                objecttype = OBJECT_TYPE_RESOURCE
            elif not fot:
                objecttype = OBJECT_TYPE_LITERAL

        if not asQuad and not fc:
            query = select(
                [
                    self.vesper_stmts.c.subject,
                    self.vesper_stmts.c.predicate,
                    self.vesper_stmts.c.object,
                    self.vesper_stmts.c.objecttype,
                    func.min(self.vesper_stmts.c.context).label("context"),
                ]
            )
        else:  # asQuad is True
            query = self.vesper_stmts.select()
        if fs:
            query = query.where(self.vesper_stmts.c.subject == subject)
        if fp:
            query = query.where(self.vesper_stmts.c.predicate == predicate)
        if fo:
            query = query.where(self.vesper_stmts.c.object == object)
        if fot:
            query = query.where(self.vesper_stmts.c.objecttype == objecttype)
        if fc:
            query = query.where(self.vesper_stmts.c.context == context)
        if not asQuad and not fc:
            query = query.group_by(
                self.vesper_stmts.c.subject,
                self.vesper_stmts.c.predicate,
                self.vesper_stmts.c.object,
                self.vesper_stmts.c.objecttype,
            )
        if limit is not None:
            query = query.limit(limit)
        if offset is not None:
            query = query.offset(offset)

        stmts = []
        self._checkConnection()
        result = self.conn.execute(query)
        for r in result:
            stmts.append(Statement(r["subject"], r["predicate"], r["object"], r["objecttype"], r["context"]))

        log.debug("stmts returned: ", len(stmts), stmts)
        return stmts

    def addStatement(self, stmt):
        """add the specified statement to the model"""
        log.debug("addStatement called with ", stmt)

        self._checkConnection()
        result = self.conn.execute(
            self.vesper_stmts.insert(prefixes=["OR IGNORE"]),
            {"subject": stmt[0], "predicate": stmt[1], "object": stmt[2], "objecttype": stmt[3], "context": stmt[4]},
        )
        return result.rowcount

    def addStatements(self, stmts):
        """adds multiple statements to the model"""
        log.debug("addStatement called with ", stmts)

        self._checkConnection()
        result = self.conn.execute(
            self.vesper_stmts.insert(prefixes=["OR IGNORE"]),
            [
                {"subject": stmt[0], "predicate": stmt[1], "object": stmt[2], "objecttype": stmt[3], "context": stmt[4]}
                for stmt in stmts
            ],
        )
        return result.rowcount

    def removeStatement(self, stmt):
        """removes the statement from the model"""
        log.debug("removeStatement called with: ", stmt)

        rmv = self.vesper_stmts.delete().where(
            (self.vesper_stmts.c.subject == stmt[0])
            & (self.vesper_stmts.c.predicate == stmt[1])
            & (self.vesper_stmts.c.object == stmt[2])
            & (self.vesper_stmts.c.objecttype == stmt[3])
            & (self.vesper_stmts.c.context == stmt[4])
        )
        self._checkConnection()
        result = self.conn.execute(rmv)
        return result.rowcount

    def removeStatements(self, stmts):
        """removes multiple statements from the model"""
        log.debug("removeStatements called with: ", stmts)

        wc = []
        [
            wc.append(
                (self.vesper_stmts.c.subject == stmt[0])
                & (self.vesper_stmts.c.predicate == stmt[1])
                & (self.vesper_stmts.c.object == stmt[2])
                & (self.vesper_stmts.c.objecttype == stmt[3])
                & (self.vesper_stmts.c.context == stmt[4])
            )
            for stmt in stmts
        ]
        # no protection for singleton stmt here!
        rmv = self.vesper_stmts.delete().where(or_(*wc))
        self._checkConnection()
        result = self.conn.execute(rmv)
        return result.rowcount

    def commit(self, **kw):
        if self.conn is not None:
            if self.conn.in_transaction():
                self.trans.commit()

    def rollback(self):
        if self.conn is not None:
            if self.conn.in_transaction():
                self.trans.rollback()

    def close(self):
        log.debug("closing!")
        if self.conn is not None:
            self.conn.close()
            self.conn = None
Пример #17
0
metadata_v3 = MetaData(bind=engine_v3)
s3 = sessionmaker(bind=engine_v3)()
User = reflect_table_to_declarative(metadata_v3, 'users')
Project = reflect_table_to_declarative(metadata_v3, 'projects')
ProjectInfo = reflect_table_to_declarative(metadata_v3, 'project_info')
License = reflect_table_to_declarative(metadata_v3, 'licenses')
AreaOfInterest = reflect_table_to_declarative(metadata_v3, 'areas_of_interest')
PriorityArea = reflect_table_to_declarative(metadata_v3, 'priority_areas')
project_priority_areas_v3 = Table('project_priority_areas', metadata_v3)
Task = reflect_table_to_declarative(metadata_v3, 'tasks')
TaskHistory = reflect_table_to_declarative(metadata_v3, 'task_history')
header('Connect to v3')
success('Connected to v3')

header('Cleaning up db')
project_priority_areas_v3.delete().execute()
for c in [
        Task, ProjectInfo, Project, License, AreaOfInterest, PriorityArea, User
]:
    s3.query(c).delete()
s3.commit()
success('Cleaned up')

#
# Users
#
count = users_v2.count().scalar()
header('Importing %s users' % count)
i = 0
for user_v2 in s2.query(users_v2):
    if user_v2.id != 24529:
Пример #18
0
def remove_vo(vo, commit_changes=False, skip_history=False):
    """
    Deletes rows associated with `vo` as part of multi-VO migration.

    :param vo:             The 3 character string for the VO being removed from the DB.
    :param commit_changes: If True then changes are made against the database directly.
                           If False, then nothing is commited and the commands needed are dumped to be run later.
    :param skip_history:   If True then tables without FKC containing historical data will not be converted to save time.
    """
    success = True
    engine = session.get_engine()
    conn = engine.connect()
    trans = conn.begin()
    inspector = reflection.Inspector.from_engine(engine)
    metadata = MetaData(bind=conn, reflect=True)
    dialect = engine.dialect.name

    # Gather all the columns that need deleting and all relevant foreign key constraints
    all_fks = []
    tables_and_columns = []
    tables_and_columns_rse = []
    for table_name in inspector.get_table_names():
        if skip_history and ('_history' in table_name or '_hist_recent' in table_name):
            continue
        fks = []
        table = Table(table_name, metadata)
        for column in table.c:
            if 'scope' in column.name or column.name == 'account':
                tables_and_columns.append((table, column))
            if 'rse_id' in column.name:
                tables_and_columns_rse.append((table, column))
        for fk in inspector.get_foreign_keys(table_name):
            if not fk['name']:
                continue
            if 'scope' in fk['referred_columns'] or 'account' in fk['referred_columns'] or ('rse' in fk['referred_table'] and 'id' in fk['referred_columns']):
                fks.append(ForeignKeyConstraint(fk['constrained_columns'], [fk['referred_table'] + '.' + r for r in fk['referred_columns']],
                                                name=fk['name'], table=table, **fk['options']))
        all_fks.extend(fks)

    try:
        bound_params = {'vo': vo,
                        'vo_suffix': '' if vo == 'def' else vo,
                        'split_character': '@',
                        'int_1': 1,
                        'int_2': 2}

        bound_params_text = {}
        for key in bound_params:
            if isinstance(bound_params[key], int):
                bound_params_text[key] = bound_params[key]
            else:
                bound_params_text[key] = "'%s'" % bound_params[key]

        # Drop all FKCs affecting InternalAccounts/Scopes or RSE IDs
        for fk in all_fks:
            print(str(DropConstraint(fk)) + ';')
            if commit_changes:
                conn.execute(DropConstraint(fk))

        # Delete rows
        for table, column in tables_and_columns:
            delete_command = table.delete().where(split_vo(dialect, column, return_vo=True) == bindparam('vo_suffix'))
            print(str(delete_command) % bound_params_text + ';')
            if commit_changes:
                conn.execute(delete_command, bound_params)

        rse_table = Table('rses', metadata)
        for table, column in tables_and_columns_rse:
            delete_command = table.delete().where(column == rse_table.c.id).where(rse_table.c.vo == bindparam('vo'))
            print(str(delete_command) % bound_params_text + ';')
            if commit_changes:
                conn.execute(delete_command, bound_params)

        delete_command = rse_table.delete().where(rse_table.c.vo == bindparam('vo'))
        print(str(delete_command) % bound_params_text + ';')
        if commit_changes:
            conn.execute(delete_command, bound_params)

        table = Table('vos', metadata)
        delete_command = table.delete().where(table.c.vo == bindparam('vo'))
        print(str(delete_command) % bound_params_text + ';')
        if commit_changes:
            conn.execute(delete_command, bound_params)

        # Re-add the FKCs we dropped
        for fkc in all_fks:
            print(str(AddConstraint(fkc)) + ';')
            if commit_changes:
                conn.execute(AddConstraint(fkc))
    except:
        success = False
        print(format_exc())
        print('Exception occured, changes not committed to DB.')

    if commit_changes and success:
        trans.commit()
    trans.close()
    return success
Пример #19
0
class SqlAlchemyFdw(ForeignDataWrapper):
    """An SqlAlchemy foreign data wrapper.

    The sqlalchemy foreign data wrapper performs simple selects on a remote
    database using the sqlalchemy framework.

    Accepted options:

    db_url      --  the sqlalchemy connection string.
    schema      --  (optional) schema name to qualify table name with
    tablename   --  the table name in the remote database.

    """

    def __init__(self, fdw_options, fdw_columns):
        super(SqlAlchemyFdw, self).__init__(fdw_options, fdw_columns)
        if 'db_url' not in fdw_options:
            log_to_postgres('The db_url parameter is required', ERROR)
        if 'tablename' not in fdw_options:
            log_to_postgres('The tablename parameter is required', ERROR)
        self.engine = create_engine(fdw_options.get('db_url'))
        self.metadata = MetaData()
        schema = fdw_options['schema'] if 'schema' in fdw_options else None
        tablename = fdw_options['tablename']
        self.table = Table(tablename, self.metadata, schema=schema,
                           *[Column(col.column_name,
                                    ischema_names[col.type_name])
                             for col in fdw_columns.values()])
        self.transaction = None
        self._connection = None
        self._row_id_column = fdw_options.get('primary_key', None)

    def execute(self, quals, columns):
        """
        The quals are turned into an and'ed where clause.
        """
        statement = select([self.table])
        clauses = []
        for qual in quals:
            operator = OPERATORS.get(qual.operator, None)
            if operator:
                clauses.append(operator(self.table.c[qual.field_name],
                                        qual.value))
            else:
                log_to_postgres('Qual not pushed to foreign db: %s' % qual,
                                WARNING)
        if clauses:
            statement = statement.where(and_(*clauses))
        if columns:
            columns = [self.table.c[col] for col in columns]
        else:
            columns = self.table.c.values()
        statement = statement.with_only_columns(columns)
        log_to_postgres(str(statement), DEBUG)
        for item in self.connection.execute(statement):
            yield dict(item)

    @property
    def connection(self):
        if self._connection is None:
            self._connection = self.engine.connect()
        return self._connection

    def begin(self, serializable):
        self.transaction = self.connection.begin()

    def pre_commit(self):
        if self.transaction is not None:
            self.transaction.commit()
            self.transaction = None

    def commit(self):
        # Pre-commit hook does this on 9.3
        if self.transaction is not None:
            self.transaction.commit()
            self.transaction = None

    def rollback(self):
        if self.transaction is not None:
            self.transaction.rollback()
            self.transaction = None

    @property
    def rowid_column(self):
        if self._row_id_column is None:
            log_to_postgres(
                'You need to declare a primary key option in order '
                'to use the write features')
        return self._row_id_column

    def insert(self, values):
        self.connection.execute(self.table.insert(values=values))

    def update(self, rowid, newvalues):
        self.connection.execute(
            self.table.update()
            .where(self.table.c[self._row_id_column] == rowid)
            .values(newvalues))

    def delete(self, rowid):
        self.connection.execute(
            self.table.delete()
            .where(self.table.c[self._row_id_column] == rowid))