def upgrade(migrate_engine):
    meta.bind = migrate_engine

    records_table = Table('records', meta, autoload=True)

    # Add the hash column, start with allowing NULLs
    hash_column = Column('hash', String(32), nullable=True, default=None,
                         unique=True)
    hash_column.create(records_table, unique_name='unique_record')

    sync_domains = []

    # Fill out the hash values. We need to do this in a way that lets us track
    # which domains need to be re-synced, so having the DB do this directly
    # won't work.
    for record in records_table.select().execute():
        try:
            records_table.update()\
                         .where(records_table.c.id == record.id)\
                         .values(hash=_build_hash(record))\
                         .execute()
        except IntegrityError:
            if record.domain_id not in sync_domains:
                sync_domains.append(record.domain_id)
                LOG.warn("Domain '%s' needs to be synchronised" %
                         record.domain_id)

            records_table.delete()\
                         .where(records_table.c.id == record.id)\
                         .execute()

    # Finally, the column should not be nullable.
    records_table.c.hash.alter(nullable=False)
def upgrade(migrate_engine):
    meta.bind = migrate_engine

    records_table = Table('records', meta, autoload=True)

    # Add the hash column, start with allowing NULLs
    hash_column = Column('hash', String(32), nullable=True, default=None,
                         unique=True)
    hash_column.create(records_table, unique_name='unique_record')

    sync_domains = []

    # Fill out the hash values. We need to do this in a way that lets us track
    # which domains need to be re-synced, so having the DB do this directly
    # won't work.
    for record in records_table.select().execute():
        try:
            records_table.update()\
                         .where(records_table.c.id == record.id)\
                         .values(hash=_build_hash(record))\
                         .execute()
        except IntegrityError:
            if record.domain_id not in sync_domains:
                sync_domains.append(record.domain_id)
                LOG.warn(_LW("Domain '%s' needs to be synchronised") %
                         record.domain_id)

            records_table.delete()\
                         .where(records_table.c.id == record.id)\
                         .execute()

    # Finally, the column should not be nullable.
    records_table.c.hash.alter(nullable=False)
Пример #3
0
def fix_opening_balance_errors():
    data = request.get_json(force=True)
    opening_balance_id = int(data['id'])
    new_artist_id = int(data['artist_id'])

    print(data)
    print(opening_balance_id)
    print(new_artist_id)

    opening_balance_table = Table('opening_balance',
                                  metadata,
                                  autoload=True,
                                  autoload_with=db.engine)

    update = (opening_balance_table.update().where(
        opening_balance_table.c.id == opening_balance_id).values(
            artist_id=new_artist_id))
    db.session.execute(update)

    db.session.commit()

    db.session.execute("""
    UPDATE opening_balance
    SET artist_name = artist.artist_name
    FROM artist
    WHERE opening_balance.artist_id = artist.id
    """)

    db.session.commit()

    return jsonify({'success': 'true'})
def upgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine

    ip_addresses = Table('ip_addresses', meta, autoload=True)
    allocated = Column('allocated', Boolean(), default=False)
    ip_addresses.create_column(allocated)
    migrate_engine.execute(ip_addresses.update().values(allocated=True))
Пример #5
0
class TableHandler(object):

    """ Used by automatically generated objects such as datasets
    and dimensions to generate, write and clear the table under
    its management. """

    def _init_table(self, meta, namespace, name, id_type=Integer):
        """ Create the given table if it does not exist, otherwise
        reflect the current table schema from the database.
        """
        name = namespace + '__' + name
        self.table = Table(name, meta)
        if id_type is not None:
            col = Column('id', id_type, primary_key=True)
            self.table.append_column(col)

    def _generate_table(self):
        """ Create the given table if it does not exist. """
        # TODO: make this support some kind of migration?
        if not db.engine.has_table(self.table.name):
            self.table.create(db.engine)

    def _upsert(self, bind, data, unique_columns):
        """ Upsert a set of values into the table. This will
        query for the set of unique columns and either update an
        existing row or create a new one. In both cases, the ID
        of the changed row will be returned. """
        key = and_(*[self.table.c[c] == data.get(c)
                     for c in unique_columns])
        q = self.table.update(key, data)
        if bind.execute(q).rowcount == 0:
            q = self.table.insert(data)
            rs = bind.execute(q)
            return rs.inserted_primary_key[0]
        else:
            q = self.table.select(key)
            row = bind.execute(q).fetchone()
            return row['id']

    def _flush(self, bind):
        """ Delete all rows in the table. """
        q = self.table.delete()
        bind.execute(q)

    def _drop(self, bind):
        """ Drop the table and the local reference to it. """
        if db.engine.has_table(self.table.name):
            self.table.drop()
        del self.table
Пример #6
0
def downgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine

    tasks_table = Table('tasks', meta, autoload=True)
    task_info_table = Table('task_info', meta, autoload=True)

    for col_name in TASKS_MIGRATE_COLUMNS:
        column = Column(col_name, Text())
        column.create(tasks_table)

    task_info_records = task_info_table.select().execute().fetchall()

    for task_info in task_info_records:
        values = {
            'input': task_info.input,
            'result': task_info.result,
            'message': task_info.message
        }

        tasks_table.update(values=values).where(
            tasks_table.c.id == task_info.task_id).execute()

    drop_tables([task_info_table])
Пример #7
0
def downgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine

    tasks_table = Table('tasks', meta, autoload=True)
    task_info_table = Table('task_info', meta, autoload=True)

    for col_name in TASKS_MIGRATE_COLUMNS:
        column = Column(col_name, Text())
        column.create(tasks_table)

    task_info_records = task_info_table.select().execute().fetchall()

    for task_info in task_info_records:
        values = {
            'input': task_info.input,
            'result': task_info.result,
            'message': task_info.message
        }

        tasks_table.update(values=values).where(
            tasks_table.c.id == task_info.task_id).execute()

    drop_tables([task_info_table])
Пример #8
0
class TableHandler(object):
    """ Used by automatically generated objects such as datasets
    and dimensions to generate, write and clear the table under
    its management. """
    def _init_table(self, meta, namespace, name, id_type=Integer):
        """ Create the given table if it does not exist, otherwise
        reflect the current table schema from the database.
        """
        name = namespace + '__' + name
        self.table = Table(name, meta)
        if id_type is not None:
            col = Column('id', id_type, primary_key=True)
            self.table.append_column(col)

    def _generate_table(self):
        """ Create the given table if it does not exist. """
        # TODO: make this support some kind of migration?
        if not db.engine.has_table(self.table.name):
            self.table.create(db.engine)

    def _upsert(self, bind, data, unique_columns):
        """ Upsert a set of values into the table. This will
        query for the set of unique columns and either update an
        existing row or create a new one. In both cases, the ID
        of the changed row will be returned. """
        key = and_(*[self.table.c[c] == data.get(c) for c in unique_columns])
        q = self.table.update(key, data)
        if bind.execute(q).rowcount == 0:
            q = self.table.insert(data)
            rs = bind.execute(q)
            return rs.inserted_primary_key[0]
        else:
            q = self.table.select(key)
            row = bind.execute(q).fetchone()
            return row['id']

    def _flush(self, bind):
        """ Delete all rows in the table. """
        q = self.table.delete()
        bind.execute(q)

    def _drop(self, bind):
        """ Drop the table and the local reference to it. """
        if db.engine.has_table(self.table.name):
            self.table.drop()
        del self.table
Пример #9
0
def import_opening_balance():
    file = request.files['CSV']
    check_statement = (db.session.query(StatementGenerated).filter(
        StatementGenerated.statement_balance_table == 'opening_balance').all())

    if len(check_statement) == 0:
        """Create table."""
        statement_balance_table = ge.create_statement_balance_table('opening')
        db.session.commit()
        """Add to statement balance table"""
        statement_generated_entry = StatementGenerated(
            statement_balance_table='opening_balance')
        db.session.add(statement_generated_entry)
        db.session.commit()

    opening_balance_table = Table('opening_balance',
                                  metadata,
                                  autoload=True,
                                  autoload_with=db.engine)
    """Clear current values."""
    db.session.execute("""DELETE FROM opening_balance""")
    db.session.commit()
    """Add new values from CSV."""
    df = pd.read_csv(file)
    df.to_sql('opening_balance',
              con=db.engine,
              if_exists='append',
              index=False)

    update = (opening_balance_table.update().where(
        opening_balance_table.c.artist_name == Artist.artist_name).values(
            artist_id=Artist.id))
    db.session.execute(update)
    db.session.commit()
    # sel = (
    #     db.session.query(opening_balance_table, Artist.artist_name)
    #         .outerjoin(Artist, Artist.artist_name == opening_balance_table.c.artist_name).all()
    # )
    errors = (db.session.query(opening_balance_table).outerjoin(
        Artist,
        Artist.artist_name == opening_balance_table.c.artist_name).filter(
            Artist.artist_name == None).all())
    if len(errors) > 0:
        return jsonify({'errors': len(errors)})
    return jsonify({'success': 'true'})
Пример #10
0
def downgrade(migrate_engine):
    meta.bind = migrate_engine

    rs_table = Table('recordsets', meta, autoload=True)
    records_table = Table('records', meta, autoload=True)

    recordsets = _get_recordsets(rs_table)

    col = Column('priority', Integer, default=None, nullable=True)
    col.create(records_table)

    record_cols = [
        records_table.c.id,
        records_table.c.priority,
        records_table.c.data]

    for rs in recordsets:
        records = select(columns=record_cols)\
            .where(records_table.c.recordset_id == rs[0])\
            .execute().fetchall()

        for record in records:
            priority, _, data = record[2].partition(" ")

            # Old style hashes are <rs_id>:<data>:<priority>
            new_hash = _build_hash(rs[0], data, priority)

            update = records_table.update()\
                .where(records_table.c.id == record[0])\
                .values(priority=int(priority), data=data, hash=new_hash)
            update.execute()

    dialect = migrate_engine.url.get_dialect().name
    if dialect.startswith('sqlite'):
        # Add missing unique index
        constraint = UniqueConstraint('hash',
                                      name='unique_recordset',
                                      table=records_table)
        constraint.create()
Пример #11
0
def downgrade(migrate_engine):
    meta.bind = migrate_engine

    rs_table = Table('recordsets', meta, autoload=True)
    records_table = Table('records', meta, autoload=True)

    recordsets = _get_recordsets(rs_table)

    col = Column('priority', Integer, default=None, nullable=True)
    col.create(records_table)

    record_cols = [
        records_table.c.id, records_table.c.priority, records_table.c.data
    ]

    for rs in recordsets:
        records = select(columns=record_cols)\
            .where(records_table.c.recordset_id == rs[0])\
            .execute().fetchall()

        for record in records:
            priority, _, data = record[2].partition(" ")

            # Old style hashes are <rs_id>:<data>:<priority>
            new_hash = _build_hash(rs[0], data, priority)

            update = records_table.update()\
                .where(records_table.c.id == record[0])\
                .values(priority=int(priority), data=data, hash=new_hash)
            update.execute()

    dialect = migrate_engine.url.get_dialect().name
    if dialect.startswith('sqlite'):
        # Add missing unique index
        constraint = UniqueConstraint('hash',
                                      name='unique_recordset',
                                      table=records_table)
        constraint.create()
Пример #12
0
def upgrade(migrate_engine):
    meta.bind = migrate_engine

    rs_table = Table('recordsets', meta, autoload=True)
    records_table = Table('records', meta, autoload=True)

    recordsets = _get_recordsets(rs_table)

    record_cols = [
        records_table.c.id,
        records_table.c.priority,
        records_table.c.data]

    for rs in recordsets:
        query = select(columns=record_cols)\
            .where(records_table.c.recordset_id == rs[0])\
            .where(records_table.c.priority != None)  # noqa
        records = query.execute().fetchall()

        for record in records:
            new_data = '%s %s' % (int(record[1]), record[2])
            # New style hashes are <rs_id>:<data> since prio is baked into data
            new_hash = _build_hash(rs[0], new_data)

            update = records_table.update()\
                .where(records_table.c.id == record[0])\
                .values(data=new_data, hash=new_hash)
            migrate_engine.execute(update)

    records_table.c.priority.drop()

    dialect = migrate_engine.url.get_dialect().name
    if dialect.startswith('sqlite'):
        # Add missing unique index
        constraint = UniqueConstraint('hash',
                                      name='unique_recordset',
                                      table=records_table)
        constraint.create()
Пример #13
0
def upgrade(migrate_engine):
    meta.bind = migrate_engine

    rs_table = Table('recordsets', meta, autoload=True)
    records_table = Table('records', meta, autoload=True)

    recordsets = _get_recordsets(rs_table)

    record_cols = [
        records_table.c.id, records_table.c.priority, records_table.c.data
    ]

    for rs in recordsets:
        query = select(columns=record_cols)\
            .where(records_table.c.recordset_id == rs[0])\
            .where(records_table.c.priority != None)  # noqa
        records = query.execute().fetchall()

        for record in records:
            new_data = '%s %s' % (int(record[1]), record[2])
            # New style hashes are <rs_id>:<data> since prio is baked into data
            new_hash = _build_hash(rs[0], new_data)

            update = records_table.update()\
                .where(records_table.c.id == record[0])\
                .values(data=new_data, hash=new_hash)
            migrate_engine.execute(update)

    records_table.c.priority.drop()

    dialect = migrate_engine.url.get_dialect().name
    if dialect.startswith('sqlite'):
        # Add missing unique index
        constraint = UniqueConstraint('hash',
                                      name='unique_recordset',
                                      table=records_table)
        constraint.create()
Пример #14
0
def merge(nombre_tabla, id_destino, otros_ids, session):
    """
    Fusiona uno o más registros (otros_ids) en otro (id_destino), siendo todos ellos
    de la tabla 'nombre_tabla'.
    
    IN
      id_usuario   <int>: Identificador del usuario que se conecta
      nombre_tabla <str>: Nombre de la tabla a la que pertenecen los registros
      id_destino   <int>: Identificador del registro donde se guardará la información
      otros_ids    <list> [<int>, ...]: Lista de ids desde donde se obtendrá la información
    
    OUT
      un JSON de la forma:
      {
       'id_destino': 123,
       'id_origen': 123,
       'num_campos': 123
      }
    
    EXC
      ENoExisteRegistro: Cuando no existe alguno de los registros (origen o destino)
    """

    meta = MetaData(bind=session.bind, reflect=True)
    tabla = Table(nombre_tabla, meta, autoload=True)

    # comprobar que existe el registro "destino"
    alumno = session.execute(select([tabla],
                                    tabla.c.id == id_destino)).fetchone()
    if alumno is None:
        raise ENoExisteRegistro(id_destino)

    # comprobar que existen los "otros" registros
    for id_otro in otros_ids:

        otro = session.execute(select([tabla],
                                      tabla.c.id == id_otro)).fetchone()
        if otro is None:
            raise ENoExisteRegistro(id_otro)

    # fusionar
    resultado = {}
    resultado['id_destino'] = id_destino
    resultado['num_campos'] = 0
    for id_otro in otros_ids:

        if id_otro == id_destino:
            continue

        resultado['id_origen'] = id_otro

        # obtener datos de los dos registros
        alumno = session.execute(select([tabla],
                                        tabla.c.id == id_destino)).fetchone()
        otro = session.execute(select([tabla],
                                      tabla.c.id == id_otro)).fetchone()

        # claves foráneas que hacen referencia a esta tabla
        for t in meta.sorted_tables:
            for fk in t.foreign_keys:
                if fk.references(tabla):
                    # actualizar los registros que apuntan a "origen" -> "destino"
                    qry_update = t.update(fk.parent == id_otro,
                                          values={fk.parent: id_destino})
                    session.execute(qry_update)

        session.commit()

        # cambiar datos de la tabla
        datos = {}
        for k in alumno.keys():
            if k != 'id' and k != 'busqueda':

                #print 'Tratando %s: %s - %s' % (k, alumno[k] or '<NULL>', otro[k] or '<NULL>')

                #                if alumno[k] != None and otro[k] != None:
                #                    if lista_atributos and str(k) in lista_atributos:
                #                        merge('clientes', alumno[k], [otro[k]], conector=conector)

                if alumno[k] is None and otro[k] != None:
                    #print 'cambiando...'
                    datos[k] = otro[k]
                    resultado['num_campos'] += 1

        if datos != {}:
            # actualizar "destino"
            qry_update = tabla.update(tabla.c.id == id_destino, values=datos)
            session.execute(qry_update)

        # borrar "otro"
        qry_delete = tabla.delete(tabla.c.id == id_otro)
        session.execute(qry_delete)
        session.commit()

    return resultado
Пример #15
0
class SqlAlchemyFdw(ForeignDataWrapper):
    """An SqlAlchemy foreign data wrapper.

    The sqlalchemy foreign data wrapper performs simple selects on a remote
    database using the sqlalchemy framework.

    Accepted options:

    db_url      --  the sqlalchemy connection string.
    schema      --  (optional) schema name to qualify table name with
    tablename   --  the table name in the remote database.

    """

    def __init__(self, fdw_options, fdw_columns):
        super(SqlAlchemyFdw, self).__init__(fdw_options, fdw_columns)
        if 'db_url' not in fdw_options:
            log_to_postgres('The db_url parameter is required', ERROR)
        if 'tablename' not in fdw_options:
            log_to_postgres('The tablename parameter is required', ERROR)
        self.engine = create_engine(fdw_options.get('db_url'))
        self.metadata = MetaData()
        schema = fdw_options['schema'] if 'schema' in fdw_options else None
        tablename = fdw_options['tablename']
        self.table = Table(tablename, self.metadata, schema=schema,
                           *[Column(col.column_name,
                                    ischema_names[col.type_name])
                             for col in fdw_columns.values()])
        self.transaction = None
        self._connection = None
        self._row_id_column = fdw_options.get('primary_key', None)

    def execute(self, quals, columns):
        """
        The quals are turned into an and'ed where clause.
        """
        statement = select([self.table])
        clauses = []
        for qual in quals:
            operator = OPERATORS.get(qual.operator, None)
            if operator:
                clauses.append(operator(self.table.c[qual.field_name],
                                        qual.value))
            else:
                log_to_postgres('Qual not pushed to foreign db: %s' % qual,
                                WARNING)
        if clauses:
            statement = statement.where(and_(*clauses))
        if columns:
            columns = [self.table.c[col] for col in columns]
        else:
            columns = self.table.c.values()
        statement = statement.with_only_columns(columns)
        log_to_postgres(str(statement), DEBUG)
        for item in self.connection.execute(statement):
            yield dict(item)

    @property
    def connection(self):
        if self._connection is None:
            self._connection = self.engine.connect()
        return self._connection

    def begin(self, serializable):
        self.transaction = self.connection.begin()

    def pre_commit(self):
        if self.transaction is not None:
            self.transaction.commit()
            self.transaction = None

    def commit(self):
        # Pre-commit hook does this on 9.3
        if self.transaction is not None:
            self.transaction.commit()
            self.transaction = None

    def rollback(self):
        if self.transaction is not None:
            self.transaction.rollback()
            self.transaction = None

    @property
    def rowid_column(self):
        if self._row_id_column is None:
            log_to_postgres(
                'You need to declare a primary key option in order '
                'to use the write features')
        return self._row_id_column

    def insert(self, values):
        self.connection.execute(self.table.insert(values=values))

    def update(self, rowid, newvalues):
        self.connection.execute(
            self.table.update()
            .where(self.table.c[self._row_id_column] == rowid)
            .values(newvalues))

    def delete(self, rowid):
        self.connection.execute(
            self.table.delete()
            .where(self.table.c[self._row_id_column] == rowid))
Пример #16
0
class SQLTable(Component):

    _selects = 0
    _inserts = 0
    _updates = 0
    _finalized = False

    STORE_MODE_LOOKUP = "lookup"
    STORE_MODE_INSERT = "insert"
    STORE_MODE_UPSERT = "upsert"

    _pk = False

    columns = []

    create = True

    _unicode_errors = 0
    _lookup_changed_fields = None

    def __init__(self, name, connection, columns, label=None):

        super(SQLTable, self).__init__()

        self.sa_table = None
        self.sa_metadata = None

        self.name = name
        self.connection = connection

        self.label = label if label else name

        self.columns = columns or []
        for col in columns:
            col.sqltable = self

    def _get_sa_type(self, column):

        if (column.type == "Integer"):
            return Integer
        elif (column.type == "String"):
            #if (column.length is None): column.length = 128
            return Unicode(length=128)
        elif (column.type == "Float"):
            return Float
        elif (column.type == "Boolean"):
            return Boolean
        elif (column.type == "AutoIncrement"):
            return Integer
        elif (column.type == "Date"):
            return Date
        elif (column.type == "Time"):
            return Time
        elif (column.type == "DateTime"):
            return DateTime
        elif (column.type == "Binary"):
            return Binary
        else:
            raise Exception("Invalid data type (%s): %s" %
                            (column, column.type))

    def finalize(self, ctx):

        if (not SQLTable._finalized):
            SQLTable._finalized = True
            if (SQLTable._inserts + SQLTable._selects > 0):
                logger.info(
                    "SQLTable Totals  ins/upd/sel: %d/%d/%d " %
                    (SQLTable._inserts, SQLTable._updates, SQLTable._selects))

        if (self._inserts + self._selects > 0):
            logger.info(
                "SQLTable %-18s ins/upd/sel: %6d/%6d/%-6d " %
                (self.name, self._inserts, self._updates, self._selects))
        if (self._unicode_errors > 0):
            logger.warning(
                "SQLTable %s found %d warnings assigning non-unicode fields to unicode columns"
                % (self.name, self._unicode_errors))

        ctx.comp.finalize(self.connection)

        super(SQLTable, self).finalize(ctx)

    def initialize(self, ctx):

        super(SQLTable, self).initialize(ctx)

        if self._lookup_changed_fields == None:
            self._lookup_changed_fields = []

        ctx.comp.initialize(self.connection)

        logger.debug("Loading table %s on %s" % (self.name, self))

        self.sa_metadata = MetaData()
        self.sa_table = Table(self.name, self.sa_metadata)

        self._selects = 0
        self._inserts = 0
        self._updates = 0
        self._unicode_errors = 0

        # Drop?

        columns_ex = []
        for column in self.columns:

            logger.debug("Adding column to %s: %s" % (self, column))

            column.sqltable = self

            # Check for duplicate names
            if (column.name in columns_ex):
                raise ETLConfigurationException(
                    "Duplicate column name '%s' in %s" % (column.name, self))

            columns_ex.append(column.name)

            # Configure column
            if isinstance(column, SQLColumnFK):
                if column.fk_sqlcolumn.sqltable.sa_table is None:
                    logger.warning(
                        "Column %s foreign key %s table (%s) has not been defined in backend (ignoring).",
                        column, column.fk_sqlcolumn,
                        column.fk_sqlcolumn.sqltable)
                    continue

                self.sa_table.append_column(
                    Column(column.name,
                           self._get_sa_type(column),
                           ForeignKey(
                               column.fk_sqlcolumn.sqltable.sa_table.columns[
                                   column.fk_sqlcolumn.name]),
                           primary_key=column.pk,
                           nullable=column.nullable,
                           autoincrement=(True if column.type
                                          == "AutoIncrement" else False)))
            else:
                self.sa_table.append_column(
                    Column(column.name,
                           self._get_sa_type(column),
                           primary_key=column.pk,
                           nullable=column.nullable,
                           autoincrement=(True if column.type
                                          == "AutoIncrement" else False)))

        # Check schema:

        # Create if doesn't exist
        if (not self.connection.engine().has_table(self.name)):
            logger.info("Creating table %s" % self.name)
            self.sa_table.create(self.connection.connection())

        # TODO:? Extend?  (unsafe, allow read-only connections and make them default?)
        # TODO:? Delete columns (unsafe, allow read-only connections and make them default?)

    def pk(self, ctx):
        """
        Returns the primary key column definitToClauion, or None if none defined.
        """

        #if (self._pk == False):
        if True:
            pk_cols = []
            for col in self.columns:
                if col.pk:
                    pk_cols.append(col)

            if (len(pk_cols) > 1):
                raise Exception("Table %s has multiple primary keys: %s" %
                                (self.name, pk_cols))
            elif (len(pk_cols) == 1):
                self._pk = pk_cols[0]
            else:
                self._pk = None

        return self._pk

    def _attribsToClause(self, attribs):
        clauses = []
        for k, v in attribs.items():
            if isinstance(v, (list, tuple)):
                clauses.append(self.sa_table.c[k].in_(v))
            else:
                clauses.append(self.sa_table.c[k] == v)

        return and_(*clauses)

    def _rowtodict(self, row):

        d = {}
        for column in self.columns:
            #print column
            d[column.name] = getattr(row, column.name)

        return d

    def _find(self, ctx, attribs):

        self._selects = self._selects + 1
        SQLTable._selects = SQLTable._selects + 1

        query = self.sa_table.select(self._attribsToClause(attribs))
        rows = self.connection.connection().execute(query)

        for r in rows:
            # Ensure we return dicts, not RowProxys from SqlAlchemy
            yield self._rowtodict(r)

    def lookup(self, ctx, attribs, find_function=None):

        logger.debug("Lookup on '%s' attribs: %s" % (self, attribs))

        if (len(attribs.keys()) == 0):
            raise Exception(
                "Cannot lookup on table '%s' with no criteria (empty attribute set)"
                % self.name)

        find_function = find_function or self._find
        rows = find_function(ctx, attribs)
        rows = list(rows)
        if (len(rows) > 1):
            raise Exception(
                "Found more than one row when searching for just one in table %s: %s"
                % (self.name, attribs))
        elif (len(rows) == 1):
            row = rows[0]
        else:
            row = None

        logger.debug("Lookup result on %s: %s = %s" %
                     (self.name, attribs, row))
        return row

    def upsert(self, ctx, data, keys=[]):
        """
        Upsert checks if the row exists and has changed. It does a lookup
        followed by an update or insert as appropriate.
        """

        # TODO: Check for AutoIncrement in keys, shall not be used

        # If keys
        qfilter = {}
        if (len(keys) > 0):
            for key in keys:
                try:
                    qfilter[key] = data[key]
                except KeyError as e:
                    raise Exception(
                        "Could not find attribute '%s' in data when storing row data: %s"
                        % (key, data))
        else:
            pk = self.pk(ctx)
            qfilter[pk.name] = data[pk.name]

        # Do lookup
        if len(qfilter) > 0:

            row = self.lookup(ctx, qfilter)

            if (row):
                # Check row is identical
                for c in self.columns:
                    if c.type != "AutoIncrement":
                        v1 = row[c.name]
                        v2 = data[c.name]
                        if c.type == "Date":
                            v1 = row[c.name].strftime('%Y-%m-%d')
                            v2 = data[c.name].strftime('%Y-%m-%d')
                        if (isinstance(v1, str) or isinstance(v2, str)):
                            if (not isinstance(v1, str)): v1 = str(v1)
                            if (not isinstance(v2, str)): v2 = str(v2)
                        if (v1 != v2):
                            if (c.name not in self._lookup_changed_fields):
                                logger.warning(
                                    "%s updating an entity that exists with different attributes, overwriting (field=%s, existing_value=%s, tried_value=%s)"
                                    % (self, c.name, v1, v2))
                                #self._lookup_changed_fields.append(c["name"])

                # Update the row
                row = self.update(ctx, data, keys)
                return row

        row_with_id = self.insert(ctx, data)
        return row_with_id

    def _prepare_row(self, ctx, data):

        row = {}

        for column in self.columns:
            if column.type != "AutoIncrement":
                try:
                    row[column.name] = data[column.name]
                except KeyError as e:
                    raise Exception(
                        "Missing attribute for column %s in table '%s' while inserting row: %s"
                        % (e, self.name, data))

                # Checks
                if (column.type == "String") and (not isinstance(
                        row[column.name], str)):
                    self._unicode_errors = self._unicode_errors + 1
                    if (ctx.debug):
                        logger.warning(
                            "Unicode column %r received non-unicode string: %r "
                            % (column.name, row[column.name]))

        return row

    def insert(self, ctx, data):

        row = self._prepare_row(ctx, data)

        logger.debug("Inserting in table '%s' row: %s" % (self.name, row))
        res = self.connection.connection().execute(self.sa_table.insert(row))

        pk = self.pk(ctx)
        if pk:
            row[pk.name] = res.inserted_primary_key[0]

        self._inserts = self._inserts + 1
        SQLTable._inserts = SQLTable._inserts + 1

        if pk is not None:
            return row
        else:
            return row  # None

    def update(self, ctx, data, keys=[]):

        row = self._prepare_row(ctx, data)

        # Automatically calculate lookup if necessary
        qfilter = {}
        if (len(keys) > 0):
            for key in keys:
                try:
                    qfilter[key] = data[key]
                except KeyError as e:
                    raise Exception(
                        "Could not find attribute '%s' in data when storing row data: %s"
                        % (key, data))
        else:
            pk = self.pk(ctx)
            qfilter[pk.name] = data[pk.name]

        logger.debug("Updating in table '%s' row: %s" % (self.name, row))
        res = self.connection.connection().execute(
            self.sa_table.update(self._attribsToClause(qfilter), row))

        self._updates = self._updates + 1
        SQLTable._updates = SQLTable._updates + 1

        if pk is not None:
            return row
        else:
            return None
Пример #17
0
    select([track]).where(and_(track.c.album_id == 1, track.c.genre_id == 1)))

# In[114]:

for row in trackResult:
    print(row)

# Update

# In[115]:

from sqlalchemy import update

# In[116]:

conn.execute(track.update().values(genre_id=2).where(track.c.id == 2))

# In[117]:

conn.execute(track.update().values(genre_id=1).where(track.c.id == 3))

# Where

# In[118]:

trackResult = conn.execute(
    select([track]).where(
        and_(track.c.album_id == 1,
             or_(track.c.genre_id == 1, track.c.genre_id == 2))))

# In[119]:
Пример #18
0
class SqlAlchemyFdw(ForeignDataWrapper):
    """An SqlAlchemy foreign data wrapper.

    The sqlalchemy foreign data wrapper performs simple selects on a remote
    database using the sqlalchemy framework.

    Accepted options:

    db_url      --  the sqlalchemy connection string.
    schema      --  (optional) schema name to qualify table name with
    tablename   --  the table name in the remote database.

    """

    def __init__(self, fdw_options, fdw_columns):
        super(SqlAlchemyFdw, self).__init__(fdw_options, fdw_columns)
        if 'tablename' not in fdw_options:
            log_to_postgres('The tablename parameter is required', ERROR)
        self.metadata = MetaData()
        url = _parse_url_from_options(fdw_options)
        self.engine = create_engine(url)
        schema = fdw_options['schema'] if 'schema' in fdw_options else None
        tablename = fdw_options['tablename']
        sqlacols = []
        for col in fdw_columns.values():
            col_type = self._get_column_type(col.type_name)
            sqlacols.append(Column(col.column_name, col_type))
        self.table = Table(tablename, self.metadata, schema=schema,
                           *sqlacols)
        self.transaction = None
        self._connection = None
        self._row_id_column = fdw_options.get('primary_key', None)



    def _need_explicit_null_ordering(self, key):
        support = SORT_SUPPORT[self.engine.dialect.name]
        default = support['default']
        no = None
        if key.is_reversed:
            no = nullsfirst if default == 'higher' else nullslast
        else:
            no = nullslast if default == 'higher' else nullsfirst
        if key.nulls_first:
            if no != nullsfirst:
                return nullsfirst
            return None
        else:
            if no != nullslast:
                return nullslast
            return None

    def can_sort(self, sortkeys):
        if SORT_SUPPORT.get(self.engine.dialect.name) is None:
            # We have no idea about defaults
            return []
        can_order_null = SORT_SUPPORT[self.engine.dialect.name]['support']
        if (any((self._need_explicit_null_ordering(x) is not None
                 for x in sortkeys)) and not can_order_null):
            return []
        return sortkeys

    def explain(self, quals, columns, sortkeys=None, verbose=False):
        sortkeys = sortkeys or []
        statement = self._build_statement(quals, columns, sortkeys)
        return [str(statement)]

    def _build_statement(self, quals, columns, sortkeys):
        statement = select([self.table])
        clauses = []
        for qual in quals:
            operator = OPERATORS.get(qual.operator, None)
            if operator:
                clauses.append(operator(self.table.c[qual.field_name],
                                        qual.value))
            else:
                log_to_postgres('Qual not pushed to foreign db: %s' % qual,
                                WARNING)
        if clauses:
            statement = statement.where(and_(*clauses))
        if columns:
            columns = [self.table.c[col] for col in columns]
        else:
            columns = self.table.c
        statement = statement.with_only_columns(columns)
        orders = []
        for sortkey in sortkeys:
            column = self.table.c[sortkey.attname]
            if sortkey.is_reversed:
                column = column.desc()
            if sortkey.collate:
                column = column.collate('"%s"' % sortkey.collate)
            null_ordering = self._need_explicit_null_ordering(sortkey)
            if null_ordering:
                column = null_ordering(column)
            statement = statement.order_by(column)
        return statement


    def execute(self, quals, columns, sortkeys=None):
        """
        The quals are turned into an and'ed where clause.
        """
        sortkeys = sortkeys or []
        statement = self._build_statement(quals, columns, sortkeys)
        log_to_postgres(str(statement), DEBUG)
        rs = (self.connection
              .execution_options(stream_results=True)
              .execute(statement))
        # Workaround pymssql "trash old results on new query"
        # behaviour (See issue #100)
        if self.engine.driver == 'pymssql' and self.transaction is not None:
            rs = list(rs)

        for item in rs:
            yield dict(item)

    @property
    def connection(self):
        if self._connection is None:
            self._connection = self.engine.connect()
        return self._connection

    def begin(self, serializable):
        self.transaction = self.connection.begin()

    def pre_commit(self):
        if self.transaction is not None:
            self.transaction.commit()
            self.transaction = None

    def commit(self):
        # Pre-commit hook does this on 9.3
        if self.transaction is not None:
            self.transaction.commit()
            self.transaction = None

    def rollback(self):
        if self.transaction is not None:
            self.transaction.rollback()
            self.transaction = None

    @property
    def rowid_column(self):
        if self._row_id_column is None:
            log_to_postgres(
                'You need to declare a primary key option in order '
                'to use the write features')
        return self._row_id_column

    def insert(self, values):
        self.connection.execute(self.table.insert(values=values))

    def update(self, rowid, newvalues):
        self.connection.execute(
            self.table.update()
            .where(self.table.c[self._row_id_column] == rowid)
            .values(newvalues))

    def delete(self, rowid):
        self.connection.execute(
            self.table.delete()
            .where(self.table.c[self._row_id_column] == rowid))

    def _get_column_type(self, format_type):
        """Blatant ripoff from PG_Dialect.get_column_info"""
        # strip (*) from character varying(5), timestamp(5)
        # with time zone, geometry(POLYGON), etc.
        attype = re.sub(r'\(.*\)', '', format_type)

        # strip '[]' from integer[], etc.
        attype = re.sub(r'\[\]', '', attype)

        is_array = format_type.endswith('[]')
        charlen = re.search('\(([\d,]+)\)', format_type)
        if charlen:
            charlen = charlen.group(1)
        args = re.search('\((.*)\)', format_type)
        if args and args.group(1):
            args = tuple(re.split('\s*,\s*', args.group(1)))
        else:
            args = ()
        kwargs = {}

        if attype == 'numeric':
            if charlen:
                prec, scale = charlen.split(',')
                args = (int(prec), int(scale))
            else:
                args = ()
        elif attype == 'double precision':
            args = (53, )
        elif attype == 'integer':
            args = ()
        elif attype in ('timestamp with time zone',
                        'time with time zone'):
            kwargs['timezone'] = True
            if charlen:
                kwargs['precision'] = int(charlen)
            args = ()
        elif attype in ('timestamp without time zone',
                        'time without time zone', 'time'):
            kwargs['timezone'] = False
            if charlen:
                kwargs['precision'] = int(charlen)
            args = ()
        elif attype == 'bit varying':
            kwargs['varying'] = True
            if charlen:
                args = (int(charlen),)
            else:
                args = ()
        elif attype in ('interval', 'interval year to month',
                        'interval day to second'):
            if charlen:
                kwargs['precision'] = int(charlen)
            args = ()
        elif charlen:
            args = (int(charlen),)

        coltype = ischema_names.get(attype, None)
        if coltype:
            coltype = coltype(*args, **kwargs)
            if is_array:
                coltype = ARRAY(coltype)
        else:
            coltype = sqltypes.NULLTYPE
        return coltype

    @classmethod
    def import_schema(self, schema, srv_options, options,
                      restriction_type, restricts):
        """
        Reflects the remote schema.
        """
        metadata = MetaData()
        url = _parse_url_from_options(srv_options)
        engine = create_engine(url)
        dialect = PGDialect()
        if restriction_type == 'limit':
            only = restricts
        elif restriction_type == 'except':
            only = lambda t, _: t not in restricts
        else:
            only = None
        metadata.reflect(bind=engine,
                         schema=schema,
                         only=only)
        to_import = []
        for _, table in sorted(metadata.tables.items()):
            ftable = TableDefinition(table.name)
            ftable.options['schema'] = schema
            ftable.options['tablename'] = table.name
            for c in table.c:
                # Force collation to None to prevent imcompatibilities
                setattr(c.type, "collation", None)
                # If the type is specialized, call the generic
                # superclass method
                if type(c.type) in CONVERSION_MAP:
                    class_name = CONVERSION_MAP[type(c.type)]
                    old_args = c.type.__dict__
                    c.type = class_name()
                    c.type.__dict__.update(old_args)
                if c.primary_key:
                    ftable.options['primary_key'] = c.name
                ftable.columns.append(ColumnDefinition(
                    c.name,
                    type_name=c.type.compile(dialect)))
            to_import.append(ftable)
        return to_import
Пример #19
0
class SqlAlchemyFdw(ForeignDataWrapper):
    """An SqlAlchemy foreign data wrapper.

    The sqlalchemy foreign data wrapper performs simple selects on a remote
    database using the sqlalchemy framework.

    Accepted options:

    db_url      --  the sqlalchemy connection string.
    schema      --  (optional) schema name to qualify table name with
    tablename   --  the table name in the remote database.

    """

    def __init__(self, fdw_options, fdw_columns):
        super(SqlAlchemyFdw, self).__init__(fdw_options, fdw_columns)
        if 'tablename' not in fdw_options:
            log_to_postgres('The tablename parameter is required', ERROR)
        self.metadata = MetaData()
        if fdw_options.get('db_url'):
            url = make_url(fdw_options.get('db_url'))
        else:
            if 'drivername' not in fdw_options:
                log_to_postgres('Either a db_url, or drivername and other '
                                'connection infos are needed', ERROR)
            url = URL(fdw_options['drivername'])
        for param in ('username', 'password', 'host',
                      'database', 'port'):
            if param in fdw_options:
                setattr(url, param, fdw_options[param])
        self.engine = create_engine(url)
        schema = fdw_options['schema'] if 'schema' in fdw_options else None
        tablename = fdw_options['tablename']
        sqlacols = []
        for col in fdw_columns.values():
            col_type = self._get_column_type(col.type_name)
            sqlacols.append(Column(col.column_name, col_type))
        self.table = Table(tablename, self.metadata, schema=schema,
                           *sqlacols)
        self.transaction = None
        self._connection = None
        self._row_id_column = fdw_options.get('primary_key', None)

    def execute(self, quals, columns):
        """
        The quals are turned into an and'ed where clause.
        """
        statement = select([self.table])
        clauses = []
        for qual in quals:
            operator = OPERATORS.get(qual.operator, None)
            if operator:
                clauses.append(operator(self.table.c[qual.field_name],
                                        qual.value))
            else:
                log_to_postgres('Qual not pushed to foreign db: %s' % qual,
                                WARNING)
        if clauses:
            statement = statement.where(and_(*clauses))
        if columns:
            columns = [self.table.c[col] for col in columns]
        else:
            columns = self.table.c.values()
        statement = statement.with_only_columns(columns)
        log_to_postgres(str(statement), DEBUG)
        rs = (self.connection
              .execution_options(stream_results=True)
              .execute(statement))
        for item in rs:
            yield dict(item)

    @property
    def connection(self):
        if self._connection is None:
            self._connection = self.engine.connect()
        return self._connection

    def begin(self, serializable):
        self.transaction = self.connection.begin()

    def pre_commit(self):
        if self.transaction is not None:
            self.transaction.commit()
            self.transaction = None

    def commit(self):
        # Pre-commit hook does this on 9.3
        if self.transaction is not None:
            self.transaction.commit()
            self.transaction = None

    def rollback(self):
        if self.transaction is not None:
            self.transaction.rollback()
            self.transaction = None

    @property
    def rowid_column(self):
        if self._row_id_column is None:
            log_to_postgres(
                'You need to declare a primary key option in order '
                'to use the write features')
        return self._row_id_column

    def insert(self, values):
        self.connection.execute(self.table.insert(values=values))

    def update(self, rowid, newvalues):
        self.connection.execute(
            self.table.update()
            .where(self.table.c[self._row_id_column] == rowid)
            .values(newvalues))

    def delete(self, rowid):
        self.connection.execute(
            self.table.delete()
            .where(self.table.c[self._row_id_column] == rowid))

    def _get_column_type(self, format_type):
        """Blatant ripoff from PG_Dialect.get_column_info"""
        ## strip (*) from character varying(5), timestamp(5)
        # with time zone, geometry(POLYGON), etc.
        attype = re.sub(r'\(.*\)', '', format_type)

        # strip '[]' from integer[], etc.
        attype = re.sub(r'\[\]', '', attype)

        is_array = format_type.endswith('[]')
        charlen = re.search('\(([\d,]+)\)', format_type)
        if charlen:
            charlen = charlen.group(1)
        args = re.search('\((.*)\)', format_type)
        if args and args.group(1):
            args = tuple(re.split('\s*,\s*', args.group(1)))
        else:
            args = ()
        kwargs = {}

        if attype == 'numeric':
            if charlen:
                prec, scale = charlen.split(',')
                args = (int(prec), int(scale))
            else:
                args = ()
        elif attype == 'double precision':
            args = (53, )
        elif attype == 'integer':
            args = ()
        elif attype in ('timestamp with time zone',
                        'time with time zone'):
            kwargs['timezone'] = True
            if charlen:
                kwargs['precision'] = int(charlen)
            args = ()
        elif attype in ('timestamp without time zone',
                        'time without time zone', 'time'):
            kwargs['timezone'] = False
            if charlen:
                kwargs['precision'] = int(charlen)
            args = ()
        elif attype == 'bit varying':
            kwargs['varying'] = True
            if charlen:
                args = (int(charlen),)
            else:
                args = ()
        elif attype in ('interval', 'interval year to month',
                        'interval day to second'):
            if charlen:
                kwargs['precision'] = int(charlen)
            args = ()
        elif charlen:
            args = (int(charlen),)

        coltype = ischema_names.get(attype, None)
        if coltype:
            coltype = coltype(*args, **kwargs)
            if is_array:
                coltype = ARRAY(coltype)
        else:
            coltype = sqltypes.NULLTYPE
        return coltype
Пример #20
0
def rename_vo(old_vo, new_vo, insert_new_vo=False, description=None, email=None, commit_changes=False, skip_history=False):
    """
    Updates rows so that entries associated with `old_vo` are now associated with `new_vo` as part of multi-VO migration.

    :param old_vo:         The 3 character string for the current VO (for a single-VO instance this will be 'def').
    :param new_vo:         The 3 character string for the new VO.
    :param insert_new_vo:  If True then an entry for `new_vo` is created in the database.
    :param description:    Full description of the new VO, unused if `insert_new_vo` is False.
    :param email:          Admin email for the new VO, unused if `insert_new_vo` is False.
    :param commit_changes: If True then changes are made against the database directly.
                           If False, then nothing is commited and the commands needed are dumped to be run later.
    :param skip_history:   If True then tables without FKC containing historical data will not be converted to save time.
    """
    success = True
    engine = session.get_engine()
    conn = engine.connect()
    trans = conn.begin()
    inspector = reflection.Inspector.from_engine(engine)
    metadata = MetaData(bind=conn, reflect=True)
    dialect = engine.dialect.name

    # Gather all the columns that need updating and all relevant foreign key constraints
    all_fks = []
    tables_and_columns = []
    for table_name in inspector.get_table_names():
        if skip_history and ('_history' in table_name or '_hist_recent' in table_name):
            continue
        fks = []
        table = Table(table_name, metadata)
        for column in table.c:
            if 'scope' in column.name or column.name == 'account':
                tables_and_columns.append((table, column))
        for fk in inspector.get_foreign_keys(table_name):
            if not fk['name']:
                continue
            if 'scope' in fk['referred_columns'] or 'account' in fk['referred_columns']:
                fks.append(ForeignKeyConstraint(fk['constrained_columns'], [fk['referred_table'] + '.' + r for r in fk['referred_columns']],
                                                name=fk['name'], table=table, **fk['options']))
        all_fks.extend(fks)

    try:
        bound_params = {'old_vo': old_vo,
                        'new_vo': new_vo,
                        'old_vo_suffix': '' if old_vo == 'def' else old_vo,
                        'new_vo_suffix': '' if new_vo == 'def' else '@%s' % new_vo,
                        'split_character': '@',
                        'int_1': 1,
                        'int_2': 2,
                        'new_description': description,
                        'new_email': email,
                        'datetime': datetime.utcnow()}

        bound_params_text = {}
        for key in bound_params:
            if isinstance(bound_params[key], int):
                bound_params_text[key] = bound_params[key]
            else:
                bound_params_text[key] = "'%s'" % bound_params[key]

        if insert_new_vo:
            table = Table('vos', metadata)
            insert_command = table.insert().values(vo=bindparam('new_vo'),
                                                   description=bindparam('new_description'),
                                                   email=bindparam('new_email'),
                                                   updated_at=bindparam('datetime'),
                                                   created_at=bindparam('datetime'))
            print(str(insert_command) % bound_params_text + ';')
            if commit_changes:
                conn.execute(insert_command, bound_params)

        # Drop all FKCs affecting InternalAccounts/Scopes
        for fk in all_fks:
            print(str(DropConstraint(fk)) + ';')
            if commit_changes:
                conn.execute(DropConstraint(fk))

        # Update columns
        for table, column in tables_and_columns:
            update_command = table.update().where(split_vo(dialect, column, return_vo=True) == bindparam('old_vo_suffix'))

            if new_vo == 'def':
                update_command = update_command.values({column.name: split_vo(dialect, column)})
            else:
                update_command = update_command.values({column.name: split_vo(dialect, column) + cast(bindparam('new_vo_suffix'), CHAR(4))})

            print(str(update_command) % bound_params_text + ';')
            if commit_changes:
                conn.execute(update_command, bound_params)

        table = Table('rses', metadata)
        update_command = table.update().where(table.c.vo == bindparam('old_vo')).values(vo=bindparam('new_vo'))
        print(str(update_command) % bound_params_text + ';')
        if commit_changes:
            conn.execute(update_command, bound_params)

        # Re-add the FKCs we dropped
        for fkc in all_fks:
            print(str(AddConstraint(fkc)) + ';')
            if commit_changes:
                conn.execute(AddConstraint(fkc))
    except:
        success = False
        print(format_exc())
        print('Exception occured, changes not committed to DB.')

    if commit_changes and success:
        trans.commit()
    trans.close()
    return success
Пример #21
0
class SqlAlchemyFdw(ForeignDataWrapper):
    """An SqlAlchemy foreign data wrapper.

    The sqlalchemy foreign data wrapper performs simple selects on a remote
    database using the sqlalchemy framework.

    Accepted options:

    db_url      --  the sqlalchemy connection string.
    schema      --  (optional) schema name to qualify table name with
    tablename   --  the table name in the remote database.

    """
    def __init__(self, fdw_options, fdw_columns):
        super(SqlAlchemyFdw, self).__init__(fdw_options, fdw_columns)
        if 'tablename' not in fdw_options:
            log_to_postgres('The tablename parameter is required', ERROR)
        self.metadata = MetaData()
        url = _parse_url_from_options(fdw_options)
        self.engine = create_engine(url)
        schema = fdw_options['schema'] if 'schema' in fdw_options else None
        tablename = fdw_options['tablename']
        sqlacols = []
        for col in fdw_columns.values():
            col_type = self._get_column_type(col.type_name)
            sqlacols.append(Column(col.column_name, col_type))
        self.table = Table(tablename, self.metadata, schema=schema, *sqlacols)
        self.transaction = None
        self._connection = None
        self._row_id_column = fdw_options.get('primary_key', None)

    def _need_explicit_null_ordering(self, key):
        support = SORT_SUPPORT[self.engine.dialect.name]
        default = support['default']
        no = None
        if key.is_reversed:
            no = nullsfirst if default == 'higher' else nullslast
        else:
            no = nullslast if default == 'higher' else nullsfirst
        if key.nulls_first:
            if no != nullsfirst:
                return nullsfirst
            return None
        else:
            if no != nullslast:
                return nullslast
            return None

    def can_sort(self, sortkeys):
        if SORT_SUPPORT.get(self.engine.dialect.name) is None:
            # We have no idea about defaults
            return []
        can_order_null = SORT_SUPPORT[self.engine.dialect.name]['support']
        if (any((self._need_explicit_null_ordering(x) is not None
                 for x in sortkeys)) and not can_order_null):
            return []
        return sortkeys

    def explain(self, quals, columns, sortkeys=None, verbose=False):
        sortkeys = sortkeys or []
        statement = self._build_statement(quals, columns, sortkeys)
        return [str(statement)]

    def _build_statement(self, quals, columns, sortkeys):
        statement = select([self.table])
        clauses = []
        for qual in quals:
            operator = OPERATORS.get(qual.operator, None)
            if operator:
                clauses.append(
                    operator(self.table.c[qual.field_name], qual.value))
            else:
                log_to_postgres('Qual not pushed to foreign db: %s' % qual,
                                WARNING)
        if clauses:
            statement = statement.where(and_(*clauses))
        if columns:
            columns = [self.table.c[col] for col in columns]
        else:
            columns = self.table.c
        statement = statement.with_only_columns(columns)
        orders = []
        for sortkey in sortkeys:
            column = self.table.c[sortkey.attname]
            if sortkey.is_reversed:
                column = column.desc()
            if sortkey.collate:
                column = column.collate('"%s"' % sortkey.collate)
            null_ordering = self._need_explicit_null_ordering(sortkey)
            if null_ordering:
                column = null_ordering(column)
            statement = statement.order_by(column)
        return statement

    def execute(self, quals, columns, sortkeys=None):
        """
        The quals are turned into an and'ed where clause.
        """
        sortkeys = sortkeys or []
        statement = self._build_statement(quals, columns, sortkeys)
        log_to_postgres(str(statement), DEBUG)
        rs = (self.connection.execution_options(
            stream_results=True).execute(statement))
        # Workaround pymssql "trash old results on new query"
        # behaviour (See issue #100)
        if self.engine.driver == 'pymssql' and self.transaction is not None:
            rs = list(rs)

        for item in rs:
            yield dict(item)

    @property
    def connection(self):
        if self._connection is None:
            self._connection = self.engine.connect()
        return self._connection

    def begin(self, serializable):
        self.transaction = self.connection.begin()

    def pre_commit(self):
        if self.transaction is not None:
            self.transaction.commit()
            self.transaction = None

    def commit(self):
        # Pre-commit hook does this on 9.3
        if self.transaction is not None:
            self.transaction.commit()
            self.transaction = None

    def rollback(self):
        if self.transaction is not None:
            self.transaction.rollback()
            self.transaction = None

    @property
    def rowid_column(self):
        if self._row_id_column is None:
            log_to_postgres(
                'You need to declare a primary key option in order '
                'to use the write features')
        return self._row_id_column

    def insert(self, values):
        self.connection.execute(self.table.insert(values=values))

    def update(self, rowid, newvalues):
        self.connection.execute(self.table.update().where(
            self.table.c[self._row_id_column] == rowid).values(newvalues))

    def delete(self, rowid):
        self.connection.execute(self.table.delete().where(
            self.table.c[self._row_id_column] == rowid))

    def _get_column_type(self, format_type):
        """Blatant ripoff from PG_Dialect.get_column_info"""
        # strip (*) from character varying(5), timestamp(5)
        # with time zone, geometry(POLYGON), etc.
        attype = re.sub(r'\(.*\)', '', format_type)

        # strip '[]' from integer[], etc.
        attype = re.sub(r'\[\]', '', attype)

        is_array = format_type.endswith('[]')
        charlen = re.search('\(([\d,]+)\)', format_type)
        if charlen:
            charlen = charlen.group(1)
        args = re.search('\((.*)\)', format_type)
        if args and args.group(1):
            args = tuple(re.split('\s*,\s*', args.group(1)))
        else:
            args = ()
        kwargs = {}

        if attype == 'numeric':
            if charlen:
                prec, scale = charlen.split(',')
                args = (int(prec), int(scale))
            else:
                args = ()
        elif attype == 'double precision':
            args = (53, )
        elif attype == 'integer':
            args = ()
        elif attype in ('timestamp with time zone', 'time with time zone'):
            kwargs['timezone'] = True
            if charlen:
                kwargs['precision'] = int(charlen)
            args = ()
        elif attype in ('timestamp without time zone',
                        'time without time zone', 'time'):
            kwargs['timezone'] = False
            if charlen:
                kwargs['precision'] = int(charlen)
            args = ()
        elif attype == 'bit varying':
            kwargs['varying'] = True
            if charlen:
                args = (int(charlen), )
            else:
                args = ()
        elif attype in ('interval', 'interval year to month',
                        'interval day to second'):
            if charlen:
                kwargs['precision'] = int(charlen)
            args = ()
        elif charlen:
            args = (int(charlen), )

        coltype = ischema_names.get(attype, None)
        if coltype:
            coltype = coltype(*args, **kwargs)
            if is_array:
                coltype = ARRAY(coltype)
        else:
            coltype = sqltypes.NULLTYPE
        return coltype

    @classmethod
    def import_schema(self, schema, srv_options, options, restriction_type,
                      restricts):
        """
        Reflects the remote schema.
        """
        metadata = MetaData()
        url = _parse_url_from_options(srv_options)
        engine = create_engine(url)
        dialect = PGDialect()
        if restriction_type == 'limit':
            only = restricts
        elif restriction_type == 'except':
            only = lambda t, _: t not in restricts
        else:
            only = None
        metadata.reflect(bind=engine, schema=schema, only=only)
        to_import = []
        for _, table in sorted(metadata.tables.items()):
            ftable = TableDefinition(table.name)
            ftable.options['schema'] = schema
            ftable.options['tablename'] = table.name
            for c in table.c:
                # Force collation to None to prevent imcompatibilities
                setattr(c.type, "collation", None)
                # If the type is specialized, call the generic
                # superclass method
                if type(c.type) in CONVERSION_MAP:
                    class_name = CONVERSION_MAP[type(c.type)]
                    old_args = c.type.__dict__
                    c.type = class_name()
                    c.type.__dict__.update(old_args)
                if c.primary_key:
                    ftable.options['primary_key'] = c.name
                ftable.columns.append(
                    ColumnDefinition(c.name,
                                     type_name=c.type.compile(dialect)))
            to_import.append(ftable)
        return to_import
def upgrade(migrate_engine):
    meta.bind = migrate_engine
    records_table = Table('records', meta, autoload=True)

    # We need to autoload the domains table for the FK to succeed.
    Table('domains', meta, autoload=True)

    # Prepare an empty dict to cache (domain_id, name, type) tuples to
    # RRSet id's
    cache = {}

    # Create the recordsets_table table
    recordsets_table.create()

    # NOTE(kiall): Since we need a unique UUID for each recordset, and need
    #              to maintain cross DB compatibility, we're stuck doing this
    #              in code rather than an
    #              INSERT INTO recordsets_table SELECT (..) FROM records;
    results = select(
        columns=[
            records_table.c.tenant_id,
            records_table.c.domain_id,
            records_table.c.name,
            records_table.c.type,
            func.min(records_table.c.ttl).label('ttl'),
            func.min(records_table.c.created_at).label('created_at'),
            func.max(records_table.c.updated_at).label('updated_at')
        ],
        group_by=[
            records_table.c.tenant_id,
            records_table.c.domain_id,
            records_table.c.name,
            records_table.c.type
        ]
    ).execute()

    for result in results:
        # Create the new RecordSet and remember it's id
        pk = recordsets_table.insert().execute(
            tenant_id=result.tenant_id,
            domain_id=result.domain_id,
            name=result.name,
            type=result.type,
            ttl=result.ttl,
            created_at=result.created_at,
            updated_at=result.updated_at
        ).inserted_primary_key[0]

        # Cache the ID for later
        cache_key = "%s.%s.%s" % (result.domain_id, result.name, result.type)
        cache[cache_key] = pk

    # Add the recordset column to the records table
    record_recordset_id = Column('recordset_id', UUID,
                                 default=None,
                                 nullable=True)
    record_recordset_id.create(records_table, populate_default=True)

    # Fetch all the records
    # TODO(kiall): Batch this..
    results = select(
        columns=[
            records_table.c.id,
            records_table.c.domain_id,
            records_table.c.name,
            records_table.c.type,
            records_table.c.data,
            records_table.c.priority
        ]
    ).execute()

    # Update each result with the approperiate recordset_id, and refresh
    # the hash column to reflect the removal of several fields.
    for result in results:
        cache_key = "%s.%s.%s" % (result.domain_id, result.name,
                                  result.type)

        recordset_id = cache[cache_key]
        new_hash = _build_hash(recordset_id, result)

        records_table.update()\
            .where(records_table.c.id == result.id)\
            .values(recordset_id=cache[cache_key], hash=new_hash)\
            .execute()

    # Now that the records.recordset_id field is populated, lets ensure the
    # column is not nullable and is a FK to the records table.
    records_table.c.recordset_id.alter(nullable=False)
    ForeignKeyConstraint(columns=[records_table.c.recordset_id],
                         refcolumns=[recordsets_table.c.id],
                         ondelete='CASCADE',
                         name='fkey_records_recordset_id').create()

    # Finally, drop the now-defunct columns from the records table
    records_table.c.name.drop()
    records_table.c.type.drop()
    records_table.c.ttl.drop()
Пример #23
0
def upgrade(migrate_engine):
    meta.bind = migrate_engine
    records_table = Table('records', meta, autoload=True)

    # We need to autoload the domains table for the FK to succeed.
    Table('domains', meta, autoload=True)

    # Prepare an empty dict to cache (domain_id, name, type) tuples to
    # RRSet id's
    cache = {}

    # Create the recordsets_table table
    recordsets_table.create()

    # NOTE(kiall): Since we need a unique UUID for each recordset, and need
    #              to maintain cross DB compatibility, we're stuck doing this
    #              in code rather than an
    #              INSERT INTO recordsets_table SELECT (..) FROM records;
    results = select(columns=[
        records_table.c.tenant_id, records_table.c.domain_id,
        records_table.c.name, records_table.c.type,
        func.min(records_table.c.ttl).label('ttl'),
        func.min(records_table.c.created_at).label('created_at'),
        func.max(records_table.c.updated_at).label('updated_at')
    ],
                     group_by=[
                         records_table.c.tenant_id, records_table.c.domain_id,
                         records_table.c.name, records_table.c.type
                     ]).execute()

    for result in results:
        # Create the new RecordSet and remember it's id
        pk = recordsets_table.insert().execute(
            tenant_id=result.tenant_id,
            domain_id=result.domain_id,
            name=result.name,
            type=result.type,
            ttl=result.ttl,
            created_at=result.created_at,
            updated_at=result.updated_at).inserted_primary_key[0]

        # Cache the ID for later
        cache_key = "%s.%s.%s" % (result.domain_id, result.name, result.type)
        cache[cache_key] = pk

    # Add the recordset column to the records table
    record_recordset_id = Column('recordset_id',
                                 UUID,
                                 default=None,
                                 nullable=True)
    record_recordset_id.create(records_table, populate_default=True)

    # Fetch all the records
    # TODO(kiall): Batch this..
    results = select(columns=[
        records_table.c.id, records_table.c.domain_id, records_table.c.name,
        records_table.c.type, records_table.c.data, records_table.c.priority
    ]).execute()

    # Update each result with the approperiate recordset_id, and refresh
    # the hash column to reflect the removal of several fields.
    for result in results:
        cache_key = "%s.%s.%s" % (result.domain_id, result.name, result.type)

        recordset_id = cache[cache_key]
        new_hash = _build_hash(recordset_id, result)

        records_table.update()\
            .where(records_table.c.id == result.id)\
            .values(recordset_id=cache[cache_key], hash=new_hash)\
            .execute()

    # Now that the records.recordset_id field is populated, lets ensure the
    # column is not nullable and is a FK to the records table.
    records_table.c.recordset_id.alter(nullable=False)
    ForeignKeyConstraint(columns=[records_table.c.recordset_id],
                         refcolumns=[recordsets_table.c.id],
                         ondelete='CASCADE',
                         name='fkey_records_recordset_id').create()

    # Finally, drop the now-defunct columns from the records table
    records_table.c.name.drop()
    records_table.c.type.drop()
    records_table.c.ttl.drop()
Пример #24
0
class SQLTable(Component):

    _selects = 0
    _inserts = 0
    _updates = 0
    _finalized = False

    STORE_MODE_LOOKUP = "lookup"
    STORE_MODE_INSERT = "insert"
    STORE_MODE_UPSERT = "upsert"

    _pk = False

    columns = []

    create = True

    _unicode_errors = 0
    _lookup_changed_fields = None

    def __init__(self, name, connection, columns, label=None):

        super(SQLTable, self).__init__()

        self.sa_table = None
        self.sa_metadata = None

        self.name = name
        self.connection = connection

        self.label = label if label else name

        self.columns = columns or []
        for col in columns:
            col.sqltable = self

    def _get_sa_type(self, column):

        if (column.type == "Integer"):
            return Integer
        elif (column.type == "String"):
            #if (column.length is None): column.length = 128
            return Unicode(length = 128)
        elif (column.type == "Float"):
            return Float
        elif (column.type == "Boolean"):
            return Boolean
        elif (column.type == "AutoIncrement"):
            return Integer
        elif (column.type == "Date"):
            return Date
        elif (column.type == "Time"):
            return Time
        elif (column.type == "DateTime"):
            return DateTime
        elif (column.type == "Binary"):
            return Binary
        else:
            raise Exception("Invalid data type (%s): %s" % (column, column.type))

    def finalize(self, ctx):

        if (not SQLTable._finalized):
            SQLTable._finalized = True
            if (SQLTable._inserts + SQLTable._selects > 0):
                logger.info("SQLTable Totals  ins/upd/sel: %d/%d/%d " %
                            (SQLTable._inserts, SQLTable._updates, SQLTable._selects))

        if (self._inserts + self._selects > 0):
            logger.info("SQLTable %-18s ins/upd/sel: %6d/%6d/%-6d " %
                            (self.name, self._inserts, self._updates, self._selects))
        if (self._unicode_errors > 0):
            logger.warning("SQLTable %s found %d warnings assigning non-unicode fields to unicode columns" %
                           (self.name, self._unicode_errors))

        ctx.comp.finalize(self.connection)

        super(SQLTable, self).finalize(ctx)

    def initialize(self, ctx):

        super(SQLTable, self).initialize(ctx)

        if self._lookup_changed_fields == None:
            self._lookup_changed_fields = []

        ctx.comp.initialize(self.connection)

        logger.debug("Loading table %s on %s" % (self.name, self))

        self.sa_metadata = MetaData()
        self.sa_table = Table(self.name, self.sa_metadata)

        self._selects = 0
        self._inserts = 0
        self._updates = 0
        self._unicode_errors = 0

        # Drop?

        columns_ex = []
        for column in self.columns:

            logger.debug("Adding column to %s: %s" % (self, column))

            column.sqltable = self

            # Check for duplicate names
            if (column.name in columns_ex):
                raise ETLConfigurationException("Duplicate column name '%s' in %s" % (column.name, self))

            columns_ex.append(column.name)

            # Configure column
            if isinstance(column, SQLColumnFK):
                if column.fk_sqlcolumn.sqltable.sa_table is None:
                    logger.warning("Column %s foreign key %s table (%s) has not been defined in backend (ignoring).", column, column.fk_sqlcolumn, column.fk_sqlcolumn.sqltable)
                    continue

                self.sa_table.append_column(Column(column.name,
                                                   self._get_sa_type(column),
                                                   ForeignKey(column.fk_sqlcolumn.sqltable.sa_table.columns[column.fk_sqlcolumn.name]),
                                                   primary_key=column.pk,
                                                   nullable=column.nullable,
                                                   autoincrement=(True if column.type == "AutoIncrement" else False)))
            else:
                self.sa_table.append_column(Column(column.name,
                                                   self._get_sa_type(column),
                                                   primary_key=column.pk,
                                                   nullable=column.nullable,
                                                   autoincrement=(True if column.type == "AutoIncrement" else False)))

        # Check schema:

        # Create if doesn't exist
        if (not self.connection.engine().has_table(self.name)):
            logger.info("Creating table %s" % self.name)
            self.sa_table.create(self.connection.connection())

        # TODO:? Extend?  (unsafe, allow read-only connections and make them default?)
        # TODO:? Delete columns (unsafe, allow read-only connections and make them default?)

    def pk(self, ctx):
        """
        Returns the primary key column definitToClauion, or None if none defined.
        """

        #if (self._pk == False):
        if True:
            pk_cols = []
            for col in self.columns:
                if col.pk:
                    pk_cols.append(col)

            if (len(pk_cols) > 1):
                raise Exception("Table %s has multiple primary keys: %s" % (self.name, pk_cols))
            elif (len(pk_cols) == 1):
                self._pk = pk_cols[0]
            else:
                self._pk = None

        return self._pk

    def _attribsToClause(self, attribs):
        clauses = []
        for k, v in attribs.items():
            if isinstance(v, (list, tuple)):
                clauses.append(self.sa_table.c[k].in_(v))
            else:
                clauses.append(self.sa_table.c[k] == v)

        return and_(*clauses)

    def _rowtodict(self, row):

        d = {}
        for column in self.columns:
            #print column
            d[column.name] = getattr(row, column.name)

        return d

    def _find(self, ctx, attribs):

        self._selects = self._selects + 1
        SQLTable._selects = SQLTable._selects + 1

        query = self.sa_table.select(self._attribsToClause(attribs))
        rows = self.connection.connection().execute(query)

        for r in rows:
            # Ensure we return dicts, not RowProxys from SqlAlchemy
            yield self._rowtodict(r)


    def lookup(self, ctx, attribs, find_function=None):

        logger.debug ("Lookup on '%s' attribs: %s" % (self, attribs))

        if (len(attribs.keys()) == 0):
            raise Exception("Cannot lookup on table '%s' with no criteria (empty attribute set)" % self.name)

        find_function = find_function or self._find
        rows = find_function(ctx, attribs)
        rows = list(rows)
        if (len(rows) > 1):
            raise Exception("Found more than one row when searching for just one in table %s: %s" % (self.name, attribs))
        elif (len(rows) == 1):
            row = rows[0]
        else:
            row = None

        logger.debug("Lookup result on %s: %s = %s" % (self.name, attribs, row))
        return row

    def upsert(self, ctx, data, keys = []):
        """
        Upsert checks if the row exists and has changed. It does a lookup
        followed by an update or insert as appropriate.
        """

        # TODO: Check for AutoIncrement in keys, shall not be used

        # If keys
        qfilter = {}
        if (len(keys) > 0):
            for key in keys:
                try:
                    qfilter[key] = data[key]
                except KeyError as e:
                    raise Exception("Could not find attribute '%s' in data when storing row data: %s" % (key, data))
        else:
            pk = self.pk(ctx)
            qfilter[pk.name] = data[pk.name]

        # Do lookup
        if len(qfilter) > 0:

            row = self.lookup(ctx, qfilter)

            if (row):
                # Check row is identical
                for c in self.columns:
                    if c.type != "AutoIncrement":
                        v1 = row[c.name]
                        v2 = data[c.name]
                        if c.type == "Date":
                            v1 = row[c.name].strftime('%Y-%m-%d')
                            v2 = data[c.name].strftime('%Y-%m-%d')
                        if (isinstance(v1, str) or isinstance(v2, str)):
                            if (not isinstance(v1, str)): v1 = str(v1)
                            if (not isinstance(v2, str)): v2 = str(v2)
                        if (v1 != v2):
                            if (c.name not in self._lookup_changed_fields):
                                logger.warn("%s updating an entity that exists with different attributes, overwriting (field=%s, existing_value=%s, tried_value=%s)" % (self, c.name, v1, v2))
                                #self._lookup_changed_fields.append(c["name"])

                # Update the row
                row = self.update(ctx, data, keys)
                return row

        row_with_id = self.insert(ctx, data)
        return row_with_id

    def _prepare_row(self, ctx, data):

        row = {}

        for column in self.columns:
            if column.type != "AutoIncrement":
                try:
                    row[column.name] = data[column.name]
                except KeyError as e:
                    raise Exception("Missing attribute for column %s in table '%s' while inserting row: %s" % (e, self.name, data))

                # Checks
                if (column.type == "String") and (not isinstance(row[column.name], str)):
                    self._unicode_errors = self._unicode_errors + 1
                    if (ctx.debug):
                        logger.warn("Unicode column %r received non-unicode string: %r " % (column.name, row[column.name]))

        return row

    def insert(self, ctx, data):

        row = self._prepare_row(ctx, data)

        logger.debug("Inserting in table '%s' row: %s" % (self.name, row))
        res = self.connection.connection().execute(self.sa_table.insert(row))

        pk = self.pk(ctx)
        if pk:
            row[pk.name] = res.inserted_primary_key[0]

        self._inserts = self._inserts + 1
        SQLTable._inserts = SQLTable._inserts + 1

        if pk is not None:
            return row
        else:
            return row  # None

    def update(self, ctx, data, keys = []):

        row = self._prepare_row(ctx, data)

        # Automatically calculate lookup if necessary
        qfilter = {}
        if (len(keys) > 0):
            for key in keys:
                try:
                    qfilter[key] = data[key]
                except KeyError as e:
                    raise Exception("Could not find attribute '%s' in data when storing row data: %s" % (key, data))
        else:
            pk = self.pk(ctx)
            qfilter[pk.name] = data[pk.name]

        logger.debug("Updating in table '%s' row: %s" % (self.name, row))
        res = self.connection.connection().execute(self.sa_table.update(self._attribsToClause(qfilter), row))

        self._updates = self._updates +1
        SQLTable._updates = SQLTable._updates + 1

        if pk is not None:
            return row
        else:
            return None