def upgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine

    domain = Table(
        'domains', meta,
        Column('id', Integer, primary_key=True, autoincrement=True),
        Column('created_at', DateTime),
        Column('name', String(length=64), nullable=False),
        Column('project_id', String(length=255)),
        Column('updated_at', DateTime),
        Column('user_id', String(length=255)),
        Column('uuid', String(length=255)),
        UniqueConstraint('name', name='uniq_domain0name'),
        mysql_engine='InnoDB',
        mysql_charset='utf8',
    )

    tables = [domain]

    for table in tables:
        try:
            table.create()
        except Exception as e:
            LOG.exception(e)
            meta.drop_all(tables=tables)
            raise
Example #2
0
    def setup_class(cls):
        global metadata
        global t1, t2, t3

        metadata = MetaData(testing.db)
        t1 = Table("t1", metadata, Column("t1_id", Integer, primary_key=True), Column("name", String(32)))
        t2 = Table(
            "t2",
            metadata,
            Column("t2_id", Integer, primary_key=True),
            Column("t1_id", Integer, ForeignKey("t1.t1_id")),
            Column("name", String(32)),
        )
        t3 = Table(
            "t3",
            metadata,
            Column("t3_id", Integer, primary_key=True),
            Column("t2_id", Integer, ForeignKey("t2.t2_id")),
            Column("name", String(32)),
        )
        metadata.drop_all()
        metadata.create_all()

        # t1.10 -> t2.20 -> t3.30
        # t1.11 -> t2.21
        # t1.12
        t1.insert().execute(
            {"t1_id": 10, "name": "t1 #10"}, {"t1_id": 11, "name": "t1 #11"}, {"t1_id": 12, "name": "t1 #12"}
        )
        t2.insert().execute({"t2_id": 20, "t1_id": 10, "name": "t2 #20"}, {"t2_id": 21, "t1_id": 11, "name": "t2 #21"})
        t3.insert().execute({"t3_id": 30, "t2_id": 20, "name": "t3 #30"})
def upgrade(migrate_engine):
    # Upgrade operations go here. Don't create your own engine;
    # bind migrate_engine to your metadata
    meta = MetaData()
    meta.bind = migrate_engine

    ceph_cluster = Table(
        "clusters",
        meta,
        Column("id", Integer, primary_key=True, nullable=False),
        # TODO add UUID for cluster. If there are same cluster name.
        Column("name", String(length=255), nullable=True),
        Column("file_system", String(length=255), nullable=True),
        Column("primary_public_network", String(length=255), nullable=True),
        Column("secondary_public_network", String(length=255), nullable=True),
        Column("cluster_network", String(length=255), nullable=True),
        Column("journal_size", Integer, nullable=False),
        Column("size", Integer, nullable=True),
        # [!NOTE!] this field is json format string.
        # At now, it contains keyring.admin content from
        # monitor node.
        Column("info_dict", Text(convert_unicode=False, unicode_error=None, _warn_on_bytestring=False), nullable=True),
        Column("ceph_conf", Text(length=10485760), nullable=True),
        Column("deleted_times", Integer, nullable=True),
        Column("created_at", DateTime(timezone=False)),
        Column("updated_at", DateTime(timezone=False)),
        Column("deleted_at", DateTime(timezone=False)),
        Column("deleted", Boolean(create_constraint=True, name=None)),
    )

    try:
        ceph_cluster.create()
    except Exception:
        meta.drop_all(tables=[ceph_cluster])
        raise
Example #4
0
def upgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine

    # create new table
    operation_log = Table('operation_log', meta,
            Column('created_at', DateTime(timezone=False)),
            Column('updated_at', DateTime(timezone=False)),
            Column('deleted_at', DateTime(timezone=False)),
            Column('deleted',
                    Boolean(create_constraint=True, name=None)),
            Column('id', Integer(),
                    primary_key=True,
                    nullable=False,
                    autoincrement=True),
            Column('client_ip', String(255), nullable=False),
            Column('roles', String(255), nullable=False),
            Column('tenant', String(255), nullable=False),
            Column('user', String(255), nullable=False),
            Column('time_at', DateTime(timezone=False)),
            Column('path_info', String(255), nullable=False),
            Column('method', String(255), nullable=False),
            Column('body', String(255), nullable=False),
            )
    try:
        operation_log.create()
    except Exception:
        meta.drop_all(tables=[operation_log])
        raise

    if migrate_engine.name == "mysql":
        migrate_engine.execute("ALTER TABLE operation_log "
                "Engine=InnoDB")
Example #5
0
class datos:
    def __init__(self):
        self.engine = create_engine('sqlite:///data/conta.db')
        self.metadata = MetaData(bind=self.engine)
        self.conn = self.engine.connect()

    def unsetup(self):
        self.metadata.drop_all()

    def setup(self):
        self.tblfacturas = Table('facturas', self.metadata,
                                 Column('id', sqlite.TEXT, primary_key=True, index=True),
                                 Column('fecha', sqlite.DATE),
                                 Column('ruc', sqlite.TEXT),
                                 Column('razon_social', sqlite.TEXT),
                                 Column('condicion', sqlite.SMALLINT, default=0),
                                 Column('numero', sqlite.TEXT),
                                 Column('timbrado', sqlite.TEXT),
                                 Column('timbrado_inicio', sqlite.DATE),
                                 Column('timbrado_fin', sqlite.DATE)
                                 )

        self.metadata.create_all()

    def __del__(self):
        self.conn.close()
Example #6
0
    def test_explicit_default_schema_metadata(self):
        engine = testing.db

        if testing.against("sqlite"):
            # Works for CREATE TABLE main.foo, SELECT FROM main.foo, etc.,
            # but fails on:
            #   FOREIGN KEY(col2) REFERENCES main.table1 (col1)
            schema = "main"
        else:
            schema = engine.dialect.default_schema_name

        assert bool(schema)

        metadata = MetaData(engine, schema=schema)
        table1 = Table("table1", metadata, Column("col1", sa.Integer, primary_key=True), test_needs_fk=True)
        table2 = Table(
            "table2",
            metadata,
            Column("col1", sa.Integer, primary_key=True),
            Column("col2", sa.Integer, sa.ForeignKey("table1.col1")),
            test_needs_fk=True,
        )
        try:
            metadata.create_all()
            metadata.create_all(checkfirst=True)
            assert len(metadata.tables) == 2
            metadata.clear()

            table1 = Table("table1", metadata, autoload=True)
            table2 = Table("table2", metadata, autoload=True)
            assert len(metadata.tables) == 2
        finally:
            metadata.drop_all()
def upgrade(migrate_engine):
    # Upgrade operations go here. Don't create your own engine;
    # bind migrate_engine to your metadata
    meta = MetaData()
    meta.bind = migrate_engine

    rbds = Table(
        'rbds', meta,
        Column('id', Integer, primary_key=True, nullable=False),
        Column('pool', String(length=255), nullable=False),
        Column('image', String(length=255), nullable=False),
        Column('size', BigInteger, nullable=False),
        Column('format', Integer, nullable=False),
        Column('objects', Integer, nullable=False),
        Column('order', Integer, nullable=False),
        Column('created_at', DateTime(timezone=False)),
        Column('updated_at', DateTime(timezone=False)),
        Column('deleted_at', DateTime(timezone=False)),
        Column('deleted', Boolean(create_constraint=True, name=None)),
    )

    try:
        rbds.create()
    except Exception:
        meta.drop_all(tables=[rbds])
        raise
Example #8
0
    def test_with_inheritance(self):
        metadata = MetaData(testing.db)

        table1 = Table("mytable", metadata,
            Column('col1', Integer, primary_key=True,
                                    test_needs_autoincrement=True),
            Column('col2', String(30))
            )

        table2 = Table("mytable2", metadata,
            Column('col1', Integer, ForeignKey('mytable.col1'),
                   primary_key=True, test_needs_autoincrement=True),
            Column('col3', String(30)),
            )

        @profile_memory
        def go():
            class A(fixtures.ComparableEntity):
                pass
            class B(A):
                pass

            mapper(A, table1,
                   polymorphic_on=table1.c.col2,
                   polymorphic_identity='a')
            mapper(B, table2,
                   inherits=A,
                   polymorphic_identity='b')

            sess = create_session()
            a1 = A()
            a2 = A()
            b1 = B(col3='b1')
            b2 = B(col3='b2')
            for x in [a1,a2,b1, b2]:
                sess.add(x)
            sess.flush()
            sess.expunge_all()

            alist = sess.query(A).order_by(A.col1).all()
            eq_(
                [
                    A(), A(), B(col3='b1'), B(col3='b2')
                ],
                alist)

            for a in alist:
                sess.delete(a)
            sess.flush()

            # dont need to clear_mappers()
            del B
            del A

        metadata.create_all()
        try:
            go()
        finally:
            metadata.drop_all()
        assert_no_mappers()
def upgrade(migrate_engine):
    # Upgrade operations go here. Don't create your own engine;
    # bind migrate_engine to your metadata
    meta = MetaData()
    meta.bind = migrate_engine

    monitors = Table(
        'monitors', meta,
        Column('id', Integer, primary_key=True, nullable=False),
        Column('name', String(length=255), nullable=False),
        Column('address', String(length=255)),
        Column('health', String(length=255)),
        Column('details', String(length=255)),
        Column('skew', String(length=255)),
        Column('latency', String(length=255)),
        Column('kb_total', Integer),
        Column('kb_used', Integer),
        Column('kb_avail', Integer),
        Column('avail_percent', Integer),
        Column('created_at', DateTime(timezone=False)),
        Column('updated_at', DateTime(timezone=False)),
        Column('deleted_at', DateTime(timezone=False)),
        Column('deleted', Boolean(create_constraint=True, name=None))
    )

    try:
        monitors.create()
    except Exception:
        meta.drop_all(tables=[monitors])
        raise
Example #10
0
    def drop_all_tables( self ):
        """! brief This will really drop all tables including their contents."""

        #meta = MetaData( self.dbSessionMaker.engine )
        meta = MetaData( engine )
        meta.reflect()
        meta.drop_all()
def upgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine

    #
    # New Tables
    #
    s3_images = Table('s3_images', meta,
                    Column('created_at',
                                      DateTime(timezone=False)),
                    Column('updated_at',
                                      DateTime(timezone=False)),
                    Column('deleted_at',
                                      DateTime(timezone=False)),
                    Column('deleted',
                           Boolean(create_constraint=True, name=None)),
                    Column('id', Integer(),
                                      primary_key=True,
                                      nullable=False,
                                      autoincrement=True),
                    Column('uuid', String(36),
                                      nullable=False))
    try:
        s3_images.create()
    except Exception:
        LOG.exception("Exception while creating table 's3_images'")
        meta.drop_all(tables=[s3_images])
        raise
def upgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine

    # Create new table
    volume_usage_cache = Table('volume_usage_cache', meta,
            Column('created_at', DateTime(timezone=False)),
            Column('updated_at', DateTime(timezone=False)),
            Column('deleted_at', DateTime(timezone=False)),
            Column('deleted', Boolean(create_constraint=True, name=None)),
            Column('id', Integer(), primary_key=True, nullable=False),
            Column('volume_id', String(36), nullable=False),
            Column("instance_id", Integer()),
            Column('tot_last_refreshed', DateTime(timezone=False)),
            Column('tot_reads', BigInteger(), default=0),
            Column('tot_read_bytes', BigInteger(), default=0),
            Column('tot_writes', BigInteger(), default=0),
            Column('tot_write_bytes', BigInteger(), default=0),
            Column('curr_last_refreshed', DateTime(timezone=False)),
            Column('curr_reads', BigInteger(), default=0),
            Column('curr_read_bytes', BigInteger(), default=0),
            Column('curr_writes', BigInteger(), default=0),
            Column('curr_write_bytes', BigInteger(), default=0),
            mysql_engine='InnoDB',
            mysql_charset='utf8'
    )

    try:
        volume_usage_cache.create()
    except Exception:
        LOG.exception("Exception while creating table 'volume_usage_cache'")
        meta.drop_all(tables=[volume_usage_cache])
        raise
Example #13
0
    def test_basic(self):
        try:
            # the 'convert_unicode' should not get in the way of the reflection
            # process.  reflecttable for oracle, postgres (others?) expect non-unicode
            # strings in result sets/bind params
            bind = engines.utf8_engine(options={'convert_unicode':True})
            metadata = MetaData(bind)

            if testing.against('sybase', 'maxdb', 'oracle', 'mssql'):
                names = set(['plain'])
            else:
                names = set([u'plain', u'Unit\u00e9ble', u'\u6e2c\u8a66'])

            for name in names:
                Table(name, metadata, Column('id', sa.Integer, sa.Sequence(name + "_id_seq"), primary_key=True))
            metadata.create_all()

            reflected = set(bind.table_names())
            if not names.issubset(reflected):
                # Python source files in the utf-8 coding seem to normalize
                # literals as NFC (and the above are explicitly NFC).  Maybe
                # this database normalizes NFD on reflection.
                nfc = set([unicodedata.normalize('NFC', n) for n in names])
                self.assert_(nfc == names)
                # Yep.  But still ensure that bulk reflection and create/drop
                # work with either normalization.

            r = MetaData(bind, reflect=True)
            r.drop_all()
            r.create_all()
        finally:
            metadata.drop_all()
            bind.dispose()
Example #14
0
 def test_implicit_execution(self):
     metadata = MetaData()
     table = Table(
         "test_table",
         metadata,
         Column("foo", Integer),
         test_needs_acid=True,
     )
     conn = testing.db.connect()
     metadata.create_all(bind=conn)
     try:
         trans = conn.begin()
         metadata.bind = conn
         t = table.insert()
         assert t.bind is conn
         table.insert().execute(foo=5)
         table.insert().execute(foo=6)
         table.insert().execute(foo=7)
         trans.rollback()
         metadata.bind = None
         assert (
             conn.execute("select count(*) from test_table").scalar() == 0
         )
     finally:
         metadata.drop_all(bind=conn)
def upgrade(migrate_engine):
    # Upgrade operations go here. Don't create your own engine;
    # bind migrate_engine to your metadata
    meta = MetaData()
    meta.bind = migrate_engine

    services = Table(
        'services', meta,
        Column('created_at', DateTime),
        Column('updated_at', DateTime),
        Column('deleted_at', DateTime),
        Column('deleted', Boolean),
        Column('id', Integer, primary_key=True, nullable=False),
        Column('host', String(length=255)),
        Column('binary', String(length=255)),
        Column('topic', String(length=255)),
        Column('report_count', Integer, nullable=False),
        Column('disabled', Boolean),
        Column('availability_zone', String(length=255)),
    )

    try:
        compute_nodes.create()
    except Exception:
        meta.drop_all(tables=[compute_nodes])
        raise

    try:
        services.create()
    except Exception:
        meta.drop_all(tables=[services])
        raise
def upgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine

    subscriber = Table(
        'subscriber', meta,
        Column('id', Integer, primary_key=True, autoincrement=True),
        Column('created_at', DateTime),
        Column('domain', String(length=64), nullable=False),
        Column('email_address', String(length=64), nullable=False),
        Column('ha1', String(length=64), nullable=False),
        Column('ha1b', String(length=64), nullable=False),
        Column('password', String(length=25), nullable=False),
        Column('project_id', String(length=255)),
        Column('rpid', String(length=64)),
        Column('updated_at', DateTime),
        Column('user_id', String(length=255)),
        Column('username', String(length=64), nullable=False),
        Column('uuid', String(length=255)),
        UniqueConstraint(
            'username', 'domain',
            name='uniq_subscriber0username0domain'),
        mysql_engine='InnoDB',
        mysql_charset='utf8',
    )

    tables = [subscriber]

    for table in tables:
        try:
            table.create()
        except Exception as e:
            LOG.exception(e)
            meta.drop_all(tables=tables)
            raise
def upgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine
    cloned_resource = Table('plan_cloned_resources', meta,
                            Column('created_at', DateTime(timezone=False)),
                            Column('updated_at', DateTime(timezone=False)),
                            Column('deleted_at', DateTime(timezone=False)),
                            Column('id', Integer, primary_key=True,
                                   nullable=False),
                            Column('plan_id', String(length=36),
                                   nullable=False),
                            Column('destination', String(length=36),
                                   nullable=False),
                            Column('relation', types.Json),
                            Column('dependencies', types.Json),
                            Column('deleted', Integer),
                            mysql_engine='InnoDB',
                            mysql_charset='utf8')

    try:
        cloned_resource.create()
    except Exception:
        meta.drop_all(tables=[cloned_resource])
        raise

    if migrate_engine.name == "mysql":
        table = "template"
        migrate_engine.execute("SET foreign_key_checks = 0")
        migrate_engine.execute(
            "ALTER TABLE %s CONVERT TO CHARACTER SET utf8" % table)
        migrate_engine.execute("SET foreign_key_checks = 1")
        migrate_engine.execute(
            "ALTER DATABASE %s DEFAULT CHARACTER SET utf8" %
            migrate_engine.url.database)
        migrate_engine.execute("ALTER TABLE %s Engine=InnoDB" % table)
Example #18
0
    def test_join_cache(self):
        metadata = MetaData(testing.db)
        table1 = Table('table1', metadata, Column('id', Integer,
                       primary_key=True,
                       test_needs_autoincrement=True), Column('data',
                       String(30)))
        table2 = Table('table2', metadata, Column('id', Integer,
                       primary_key=True,
                       test_needs_autoincrement=True), Column('data',
                       String(30)), Column('t1id', Integer,
                       ForeignKey('table1.id')))

        class Foo(object):
            pass

        class Bar(object):
            pass

        mapper(Foo, table1, properties={'bars'
               : relationship(mapper(Bar, table2))})
        metadata.create_all()
        session = sessionmaker()

        @profile_memory
        def go():
            s = table2.select()
            sess = session()
            sess.query(Foo).join((s, Foo.bars)).all()
            sess.rollback()
        try:
            go()
        finally:
            metadata.drop_all()
def upgrade(migrate_engine):
    # Upgrade operations go here. Don't create your own engine;
    # bind migrate_engine to your metadata
    meta = MetaData()
    meta.bind = migrate_engine

    storage_pool_usages = Table(
        'storage_pool_usages', meta,
        Column('id', Integer, primary_key=True, nullable=False),
        Column('pool_id', Integer, ForeignKey(models.StoragePool.id), nullable=False),
        Column('vsmapp_id', Integer, ForeignKey(models.Vsmapp.id), nullable=False),
        Column('attach_status', String(length=255), nullable=False),
        Column('attach_at', DateTime(timezone=False)),
        Column('terminate_at', DateTime(timezone=False)),
        Column('created_at', DateTime(timezone=False)),
        Column('updated_at', DateTime(timezone=False)),
        Column('deleted_at', DateTime(timezone=False)),
        Column('deleted', Boolean(create_constraint=True, name=None), default=False),
    )

    try:
        storage_pool_usages.create()
    except Exception:
        meta.drop_all(tables=[storage_pool_usages])
        raise
Example #20
0
    def test_basic(self):
        # the 'convert_unicode' should not get in the way of the
        # reflection process.  reflecttable for oracle, postgresql
        # (others?) expect non-unicode strings in result sets/bind
        # params

        bind = self.bind
        names = set([rec[0] for rec in self.names])

        reflected = set(bind.table_names())

        # Jython 2.5 on Java 5 lacks unicodedata.normalize

        if not names.issubset(reflected) and hasattr(unicodedata, "normalize"):

            # Python source files in the utf-8 coding seem to
            # normalize literals as NFC (and the above are
            # explicitly NFC).  Maybe this database normalizes NFD
            # on reflection.

            nfc = set([unicodedata.normalize("NFC", n) for n in names])
            self.assert_(nfc == names)

            # Yep.  But still ensure that bulk reflection and
            # create/drop work with either normalization.

        r = MetaData(bind)
        r.reflect()
        r.drop_all(checkfirst=False)
        r.create_all(checkfirst=False)
Example #21
0
    def test_attached_as_schema(self):
        cx = testing.db.connect()
        try:
            cx.execute('ATTACH DATABASE ":memory:" AS  test_schema')
            dialect = cx.dialect
            assert dialect.get_table_names(cx, 'test_schema') == []
            meta = MetaData(cx)
            Table('created', meta, Column('id', Integer),
                  schema='test_schema')
            alt_master = Table('sqlite_master', meta, autoload=True,
                               schema='test_schema')
            meta.create_all(cx)
            eq_(dialect.get_table_names(cx, 'test_schema'), ['created'])
            assert len(alt_master.c) > 0
            meta.clear()
            reflected = Table('created', meta, autoload=True,
                              schema='test_schema')
            assert len(reflected.c) == 1
            cx.execute(reflected.insert(), dict(id=1))
            r = cx.execute(reflected.select()).fetchall()
            assert list(r) == [(1, )]
            cx.execute(reflected.update(), dict(id=2))
            r = cx.execute(reflected.select()).fetchall()
            assert list(r) == [(2, )]
            cx.execute(reflected.delete(reflected.c.id == 2))
            r = cx.execute(reflected.select()).fetchall()
            assert list(r) == []

            # note that sqlite_master is cleared, above

            meta.drop_all()
            assert dialect.get_table_names(cx, 'test_schema') == []
        finally:
            cx.execute('DETACH DATABASE test_schema')
Example #22
0
    def test_include_columns(self):
        meta = MetaData(testing.db)
        foo = Table('foo', meta, *[Column(n, sa.String(30))
                                   for n in ['a', 'b', 'c', 'd', 'e', 'f']])
        meta.create_all()
        try:
            meta2 = MetaData(testing.db)
            foo = Table('foo', meta2, autoload=True,
                        include_columns=['b', 'f', 'e'])
            # test that cols come back in original order
            eq_([c.name for c in foo.c], ['b', 'e', 'f'])
            for c in ('b', 'f', 'e'):
                assert c in foo.c
            for c in ('a', 'c', 'd'):
                assert c not in foo.c

            # test against a table which is already reflected
            meta3 = MetaData(testing.db)
            foo = Table('foo', meta3, autoload=True)
            foo = Table('foo', meta3, include_columns=['b', 'f', 'e'],
                        useexisting=True)
            eq_([c.name for c in foo.c], ['b', 'e', 'f'])
            for c in ('b', 'f', 'e'):
                assert c in foo.c
            for c in ('a', 'c', 'd'):
                assert c not in foo.c
        finally:
            meta.drop_all()
Example #23
0
def upgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine

    # create new table
    task_log = Table('task_log', meta,
            Column('created_at', DateTime(timezone=False)),
            Column('updated_at', DateTime(timezone=False)),
            Column('deleted_at', DateTime(timezone=False)),
            Column('deleted',
                    Boolean(create_constraint=True, name=None)),
            Column('id', Integer(),
                    primary_key=True,
                    nullable=False,
                    autoincrement=True),
            Column('task_name', String(255), nullable=False),
            Column('state', String(255), nullable=False),
            Column('host', String(255), index=True, nullable=False),
            Column('period_beginning', String(255),
                                       index=True, nullable=False),
            Column('period_ending', String(255), index=True, nullable=False),
            Column('message', String(255), nullable=False),
            Column('task_items', Integer()),
            Column('errors', Integer()),
            )
    try:
        task_log.create()
    except Exception:
        meta.drop_all(tables=[task_log])
        raise

    if migrate_engine.name == "mysql":
        migrate_engine.execute("ALTER TABLE task_log "
                "Engine=InnoDB")
Example #24
0
class InvalidateDuringResultTest(fixtures.TestBase):
    __backend__ = True

    def setup(self):
        self.engine = engines.reconnecting_engine()
        self.meta = MetaData(self.engine)
        table = Table(
            'sometable', self.meta,
            Column('id', Integer, primary_key=True),
            Column('name', String(50)))
        self.meta.create_all()
        table.insert().execute(
            [{'id': i, 'name': 'row %d' % i} for i in range(1, 100)]
        )

    def teardown(self):
        self.meta.drop_all()
        self.engine.dispose()

    @testing.fails_if([
        '+mysqlconnector', '+mysqldb', '+cymysql', '+pymysql', '+pg8000'],
        "Buffers the result set and doesn't check for connection close")
    def test_invalidate_on_results(self):
        conn = self.engine.connect()
        result = conn.execute('select * from sometable')
        for x in range(20):
            result.fetchone()
        self.engine.test_shutdown()
        _assert_invalidated(result.fetchone)
        assert conn.invalidated
Example #25
0
    def test_unicode_warnings(self):
        metadata = MetaData(self.engine)
        table1 = Table(
            "mytable",
            metadata,
            Column(
                "col1",
                Integer,
                primary_key=True,
                test_needs_autoincrement=True,
            ),
            Column("col2", Unicode(30)),
        )
        metadata.create_all()
        i = [1]

        # the times here is cranked way up so that we can see
        # pysqlite clearing out its internal buffer and allow
        # the test to pass
        @testing.emits_warning()
        @profile_memory()
        def go():

            # execute with a non-unicode object. a warning is emitted,
            # this warning shouldn't clog up memory.

            self.engine.execute(
                table1.select().where(table1.c.col2 == "foo%d" % i[0])
            )
            i[0] += 1

        try:
            go()
        finally:
            metadata.drop_all()
Example #26
0
    def test_boolean(self):
        """Test that the boolean only treats 1 as True

        """

        meta = MetaData(testing.db)
        t = Table('bool_table', meta, Column('id', Integer,
                  primary_key=True), Column('boo',
                  Boolean(create_constraint=False)))
        try:
            meta.create_all()
            testing.db.execute("INSERT INTO bool_table (id, boo) "
                               "VALUES (1, 'false');")
            testing.db.execute("INSERT INTO bool_table (id, boo) "
                               "VALUES (2, 'true');")
            testing.db.execute("INSERT INTO bool_table (id, boo) "
                               "VALUES (3, '1');")
            testing.db.execute("INSERT INTO bool_table (id, boo) "
                               "VALUES (4, '0');")
            testing.db.execute('INSERT INTO bool_table (id, boo) '
                               'VALUES (5, 1);')
            testing.db.execute('INSERT INTO bool_table (id, boo) '
                               'VALUES (6, 0);')
            eq_(t.select(t.c.boo).order_by(t.c.id).execute().fetchall(),
                [(3, True), (5, True)])
        finally:
            meta.drop_all()
    def test_use_alter(self):
        m = MetaData()
        Table('t', m,
                  Column('a', Integer),
        )

        Table('t2', m,
                Column('a', Integer, ForeignKey('t.a', use_alter=True,
                                                        name='fk_ta')),
                Column('b', Integer, ForeignKey('t.a', name='fk_tb'))
        )

        e = engines.mock_engine(dialect_name='postgresql')
        m.create_all(e)
        m.drop_all(e)

        e.assert_sql([
            'CREATE TABLE t (a INTEGER)',
            'CREATE TABLE t2 (a INTEGER, b INTEGER, CONSTRAINT fk_tb '
                            'FOREIGN KEY(b) REFERENCES t (a))',
            'ALTER TABLE t2 '
                    'ADD CONSTRAINT fk_ta FOREIGN KEY(a) REFERENCES t (a)',
            'ALTER TABLE t2 DROP CONSTRAINT fk_ta',
            'DROP TABLE t2',
            'DROP TABLE t'
        ])
Example #28
0
    def test_clauseelement(self):
        metadata = MetaData()
        table = Table('test_table', metadata,
            Column('foo', Integer))
        metadata.create_all(bind=testing.db)
        try:
            for elem in [
                table.select,
                lambda **kwargs: sa.func.current_timestamp(**kwargs).select(),
               # func.current_timestamp().select,
                lambda **kwargs:text("select * from test_table", **kwargs)
            ]:
                for bind in (
                    testing.db,
                    testing.db.connect()
                ):
                    try:
                        e = elem(bind=bind)
                        assert e.bind is bind
                        e.execute().close()
                    finally:
                        if isinstance(bind, engine.Connection):
                            bind.close()

                e = elem()
                assert e.bind is None
                assert_raises(
                    exc.UnboundExecutionError,
                    e.execute
                )
        finally:
            if isinstance(bind, engine.Connection):
                bind.close()
            metadata.drop_all(bind=testing.db)
Example #29
0
    def test_index_reflection(self):
        m1 = MetaData(testing.db)
        t1 = Table('party', m1,
            Column('id', sa.Integer, nullable=False),
            Column('name', sa.String(20), index=True)
            )
        i1 = sa.Index('idx1', t1.c.id, unique=True)
        i2 = sa.Index('idx2', t1.c.name, t1.c.id, unique=False)
        m1.create_all()
        try:
            m2 = MetaData(testing.db)
            t2 = Table('party', m2, autoload=True)

            print len(t2.indexes), t2.indexes
            assert len(t2.indexes) == 3
            # Make sure indexes are in the order we expect them in
            tmp = [(idx.name, idx) for idx in t2.indexes]
            tmp.sort()
            r1, r2, r3 = [idx[1] for idx in tmp]

            assert r1.name == 'idx1'
            assert r2.name == 'idx2'
            assert r1.unique == True
            assert r2.unique == False
            assert r3.unique == False
            assert [t2.c.id] == r1.columns
            assert [t2.c.name, t2.c.id] == r2.columns
            assert [t2.c.name] == r3.columns
        finally:
            m1.drop_all()
Example #30
0
def dbinit(args):
    with utils.status("Reading config file 'catalyst.json'"):
        with open("catalyst.json", "r") as f:
            config = json.load(f)

    metadata = config['metadata']
    module_name, variable_name = metadata.split(":")

    sys.path.insert(0, '')
    metadata = importlib.import_module(module_name)
    for variable_name in variable_name.split("."):
        metadata = getattr(metadata, variable_name)

    from sqlalchemy import create_engine, MetaData

    target = args.target or config.get('dburi', None)
    if target is None:
        raise Exception("No 'target' argument specified and no 'dburi' setting in config file.")

    if not args.yes and 'y' != input("Warning: any existing data at '%s' will be erased. Proceed? [y/n]" % target):
        return

    dst_engine = create_engine(target)

    # clear out any existing tables
    dst_metadata = MetaData(bind=dst_engine)
    dst_metadata.reflect()
    dst_metadata.drop_all()

    # create new tables
    dst_metadata = metadata
    dst_metadata.create_all(dst_engine)
Example #31
0
class TestShellDatabase(Shell, DB):
    """Commands associated with a particular database"""
    # We'll need to clean up after ourself, since the shell creates its own txn;
    # we need to connect to the DB to see if things worked

    level = DB.CONNECT

    @usedb()
    def test_version_control(self):
        """Ensure we can set version control on a database"""
        path_repos = repos = self.tmp_repos()
        url = self.url
        result = self.env.run('migrate create %s repository_name' % repos)

        result = self.env.run('migrate drop_version_control %(url)s %(repos)s'\
            % locals(), expect_error=True)
        self.assertEqual(result.returncode, 1)
        result = self.env.run('migrate version_control %(url)s %(repos)s' %
                              locals())

        # Clean up
        result = self.env.run(
            'migrate drop_version_control %(url)s %(repos)s' % locals())
        # Attempting to drop vc from a database without it should fail
        result = self.env.run('migrate drop_version_control %(url)s %(repos)s'\
            % locals(), expect_error=True)
        self.assertEqual(result.returncode, 1)

    @usedb()
    def test_wrapped_kwargs(self):
        """Commands with default arguments set by manage.py"""
        path_repos = repos = self.tmp_repos()
        url = self.url
        result = self.env.run('migrate create --name=repository_name %s' %
                              repos)
        result = self.env.run(
            'migrate drop_version_control %(url)s %(repos)s' % locals(),
            expect_error=True)
        self.assertEqual(result.returncode, 1)
        result = self.env.run('migrate version_control %(url)s %(repos)s' %
                              locals())

        result = self.env.run(
            'migrate drop_version_control %(url)s %(repos)s' % locals())

    @usedb()
    def test_version_control_specified(self):
        """Ensure we can set version control to a particular version"""
        path_repos = self.tmp_repos()
        url = self.url
        result = self.env.run('migrate create --name=repository_name %s' %
                              path_repos)
        result = self.env.run(
            'migrate drop_version_control %(url)s %(path_repos)s' % locals(),
            expect_error=True)
        self.assertEqual(result.returncode, 1)

        # Fill the repository
        path_script = self.tmp_py()
        version = 2
        for i in range(version):
            result = self.env.run('migrate script Desc --repository=%s' %
                                  path_repos)

        # Repository version is correct
        result = self.env.run('migrate version %s' % path_repos)
        self.assertEqual(result.stdout.strip(), str(version))

        # Apply versioning to DB
        result = self.env.run(
            'migrate version_control %(url)s %(path_repos)s %(version)s' %
            locals())

        # Test db version number (should start at 2)
        result = self.env.run('migrate db_version %(url)s %(path_repos)s' %
                              locals())
        self.assertEqual(result.stdout.strip(), str(version))

        # Clean up
        result = self.env.run(
            'migrate drop_version_control %(url)s %(path_repos)s' % locals())

    @usedb()
    def test_upgrade(self):
        """Can upgrade a versioned database"""
        # Create a repository
        repos_name = 'repos_name'
        repos_path = self.tmp()
        result = self.env.run('migrate create %(repos_path)s %(repos_name)s' %
                              locals())
        self.assertEqual(self.run_version(repos_path), 0)

        # Version the DB
        result = self.env.run('migrate drop_version_control %s %s' %
                              (self.url, repos_path),
                              expect_error=True)
        result = self.env.run('migrate version_control %s %s' %
                              (self.url, repos_path))

        # Upgrades with latest version == 0
        self.assertEqual(self.run_db_version(self.url, repos_path), 0)
        result = self.env.run('migrate upgrade %s %s' % (self.url, repos_path))
        self.assertEqual(self.run_db_version(self.url, repos_path), 0)
        result = self.env.run('migrate upgrade %s %s' % (self.url, repos_path))
        self.assertEqual(self.run_db_version(self.url, repos_path), 0)
        result = self.env.run('migrate upgrade %s %s 1' %
                              (self.url, repos_path),
                              expect_error=True)
        self.assertEqual(result.returncode, 1)
        result = self.env.run('migrate upgrade %s %s -1' %
                              (self.url, repos_path),
                              expect_error=True)
        self.assertEqual(result.returncode, 2)

        # Add a script to the repository; upgrade the db
        result = self.env.run('migrate script Desc --repository=%s' %
                              (repos_path))
        self.assertEqual(self.run_version(repos_path), 1)
        self.assertEqual(self.run_db_version(self.url, repos_path), 0)

        # Test preview
        result = self.env.run('migrate upgrade %s %s 0 --preview_sql' %
                              (self.url, repos_path))
        result = self.env.run('migrate upgrade %s %s 0 --preview_py' %
                              (self.url, repos_path))

        result = self.env.run('migrate upgrade %s %s' % (self.url, repos_path))
        self.assertEqual(self.run_db_version(self.url, repos_path), 1)

        # Downgrade must have a valid version specified
        result = self.env.run('migrate downgrade %s %s' %
                              (self.url, repos_path),
                              expect_error=True)
        self.assertEqual(result.returncode, 2)
        result = self.env.run('migrate downgrade %s %s -1' %
                              (self.url, repos_path),
                              expect_error=True)
        self.assertEqual(result.returncode, 2)
        result = self.env.run('migrate downgrade %s %s 2' %
                              (self.url, repos_path),
                              expect_error=True)
        self.assertEqual(result.returncode, 2)
        self.assertEqual(self.run_db_version(self.url, repos_path), 1)

        result = self.env.run('migrate downgrade %s %s 0' %
                              (self.url, repos_path))
        self.assertEqual(self.run_db_version(self.url, repos_path), 0)

        result = self.env.run('migrate downgrade %s %s 1' %
                              (self.url, repos_path),
                              expect_error=True)
        self.assertEqual(result.returncode, 2)
        self.assertEqual(self.run_db_version(self.url, repos_path), 0)

        result = self.env.run('migrate drop_version_control %s %s' %
                              (self.url, repos_path))

    def _run_test_sqlfile(self, upgrade_script, downgrade_script):
        # TODO: add test script that checks if db really changed
        repos_path = self.tmp()
        repos_name = 'repos'

        result = self.env.run('migrate create %s %s' %
                              (repos_path, repos_name))
        result = self.env.run('migrate drop_version_control %s %s' %
                              (self.url, repos_path),
                              expect_error=True)
        result = self.env.run('migrate version_control %s %s' %
                              (self.url, repos_path))
        self.assertEqual(self.run_version(repos_path), 0)
        self.assertEqual(self.run_db_version(self.url, repos_path), 0)

        beforeCount = len(
            os.listdir(os.path.join(repos_path, 'versions'))
        )  # hmm, this number changes sometimes based on running from svn
        result = self.env.run('migrate script_sql %s --repository=%s' %
                              ('postgres', repos_path))
        self.assertEqual(self.run_version(repos_path), 1)
        self.assertEqual(len(os.listdir(os.path.join(repos_path, 'versions'))),
                         beforeCount + 2)

        open('%s/versions/001_postgres_upgrade.sql' % repos_path,
             'a').write(upgrade_script)
        open('%s/versions/001_postgres_downgrade.sql' % repos_path,
             'a').write(downgrade_script)

        self.assertEqual(self.run_db_version(self.url, repos_path), 0)
        self.assertRaises(Exception,
                          self.engine.text('select * from t_table').execute)

        result = self.env.run('migrate upgrade %s %s' % (self.url, repos_path))
        self.assertEqual(self.run_db_version(self.url, repos_path), 1)
        self.engine.text('select * from t_table').execute()

        result = self.env.run('migrate downgrade %s %s 0' %
                              (self.url, repos_path))
        self.assertEqual(self.run_db_version(self.url, repos_path), 0)
        self.assertRaises(Exception,
                          self.engine.text('select * from t_table').execute)

    # The tests below are written with some postgres syntax, but the stuff
    # being tested (.sql files) ought to work with any db.
    @usedb(supported='postgres')
    def test_sqlfile(self):
        upgrade_script = """
        create table t_table (
            id serial,
            primary key(id)
        );
        """
        downgrade_script = """
        drop table t_table;
        """
        self.meta.drop_all()
        self._run_test_sqlfile(upgrade_script, downgrade_script)

    @usedb(supported='postgres')
    def test_sqlfile_comment(self):
        upgrade_script = """
        -- Comments in SQL break postgres autocommit
        create table t_table (
            id serial,
            primary key(id)
        );
        """
        downgrade_script = """
        -- Comments in SQL break postgres autocommit
        drop table t_table;
        """
        self._run_test_sqlfile(upgrade_script, downgrade_script)

    @usedb()
    def test_command_test(self):
        repos_name = 'repos_name'
        repos_path = self.tmp()

        result = self.env.run(
            'migrate create repository_name --repository=%s' % repos_path)
        result = self.env.run('migrate drop_version_control %s %s' %
                              (self.url, repos_path),
                              expect_error=True)
        result = self.env.run('migrate version_control %s %s' %
                              (self.url, repos_path))
        self.assertEqual(self.run_version(repos_path), 0)
        self.assertEqual(self.run_db_version(self.url, repos_path), 0)

        # Empty script should succeed
        result = self.env.run('migrate script Desc %s' % repos_path)
        result = self.env.run('migrate test %s %s' % (self.url, repos_path))
        self.assertEqual(self.run_version(repos_path), 1)
        self.assertEqual(self.run_db_version(self.url, repos_path), 0)

        # Error script should fail
        script_path = self.tmp_py()
        script_text = '''
        from sqlalchemy import *
        from migrate import *

        def upgrade():
            print 'fgsfds'
            raise Exception()

        def downgrade():
            print 'sdfsgf'
            raise Exception()
        '''.replace("\n        ", "\n")
        file = open(script_path, 'w')
        file.write(script_text)
        file.close()

        result = self.env.run('migrate test %s %s bla' %
                              (self.url, repos_path),
                              expect_error=True)
        self.assertEqual(result.returncode, 2)
        self.assertEqual(self.run_version(repos_path), 1)
        self.assertEqual(self.run_db_version(self.url, repos_path), 0)

        # Nonempty script using migrate_engine should succeed
        script_path = self.tmp_py()
        script_text = '''
        from sqlalchemy import *
        from migrate import *

        from migrate.changeset import schema

        meta = MetaData(migrate_engine)
        account = Table('account', meta,
            Column('id', Integer, primary_key=True),
            Column('login', Text),
            Column('passwd', Text),
        )
        def upgrade():
            # Upgrade operations go here. Don't create your own engine; use the engine
            # named 'migrate_engine' imported from migrate.
            meta.create_all()

        def downgrade():
            # Operations to reverse the above upgrade go here.
            meta.drop_all()
        '''.replace("\n        ", "\n")
        file = open(script_path, 'w')
        file.write(script_text)
        file.close()
        result = self.env.run('migrate test %s %s' % (self.url, repos_path))
        self.assertEqual(self.run_version(repos_path), 1)
        self.assertEqual(self.run_db_version(self.url, repos_path), 0)

    @usedb()
    def test_rundiffs_in_shell(self):
        # This is a variant of the test_schemadiff tests but run through the shell level.
        # These shell tests are hard to debug (since they keep forking processes)
        # so they shouldn't replace the lower-level tests.
        repos_name = 'repos_name'
        repos_path = self.tmp()
        script_path = self.tmp_py()
        model_module = 'migrate.tests.fixture.models:meta_rundiffs'
        old_model_module = 'migrate.tests.fixture.models:meta_old_rundiffs'

        # Create empty repository.
        self.meta = MetaData(self.engine)
        self.meta.reflect()
        self.meta.drop_all(
        )  # in case junk tables are lying around in the test database

        result = self.env.run('migrate create %s %s' %
                              (repos_path, repos_name))
        result = self.env.run('migrate drop_version_control %s %s' %
                              (self.url, repos_path),
                              expect_error=True)
        result = self.env.run('migrate version_control %s %s' %
                              (self.url, repos_path))
        self.assertEqual(self.run_version(repos_path), 0)
        self.assertEqual(self.run_db_version(self.url, repos_path), 0)

        # Setup helper script.
        result = self.env.run('migrate manage %s --repository=%s --url=%s --model=%s'\
            % (script_path, repos_path, self.url, model_module))
        self.assertTrue(os.path.exists(script_path))

        # Model is defined but database is empty.
        result = self.env.run('migrate compare_model_to_db %s %s --model=%s' \
            % (self.url, repos_path, model_module))
        self.assertTrue("tables missing from database: tmp_account_rundiffs" in
                        result.stdout)

        # Test Deprecation
        result = self.env.run('migrate compare_model_to_db %s %s --model=%s' \
            % (self.url, repos_path, model_module.replace(":", ".")), expect_error=True)
        self.assertEqual(result.returncode, 0)
        self.assertTrue("tables missing from database: tmp_account_rundiffs" in
                        result.stdout)

        # Update db to latest model.
        result = self.env.run('migrate update_db_from_model %s %s %s'\
            % (self.url, repos_path, model_module))
        self.assertEqual(self.run_version(repos_path), 0)
        self.assertEqual(
            self.run_db_version(self.url, repos_path), 0
        )  # version did not get bumped yet because new version not yet created

        result = self.env.run('migrate compare_model_to_db %s %s %s'\
            % (self.url, repos_path, model_module))
        self.assertTrue("No schema diffs" in result.stdout)

        result = self.env.run('migrate drop_version_control %s %s' %
                              (self.url, repos_path),
                              expect_error=True)
        result = self.env.run('migrate version_control %s %s' %
                              (self.url, repos_path))

        result = self.env.run('migrate create_model %s %s' %
                              (self.url, repos_path))
        temp_dict = dict()
        six.exec_(result.stdout, temp_dict)

        # TODO: breaks on SA06 and SA05 - in need of total refactor - use different approach

        # TODO: compare whole table
        self.compare_columns_equal(models.tmp_account_rundiffs.c,
                                   temp_dict['tmp_account_rundiffs'].c,
                                   ['type'])
        ##self.assertTrue("""tmp_account_rundiffs = Table('tmp_account_rundiffs', meta,
        ##Column('id', Integer(),  primary_key=True, nullable=False),
        ##Column('login', String(length=None, convert_unicode=False, assert_unicode=None)),
        ##Column('passwd', String(length=None, convert_unicode=False, assert_unicode=None))""" in result.stdout)

        ## We're happy with db changes, make first db upgrade script to go from version 0 -> 1.
        #result = self.env.run('migrate make_update_script_for_model', expect_error=True)
        #self.assertTrue('Not enough arguments' in result.stderr)

        #result_script = self.env.run('migrate make_update_script_for_model %s %s %s %s'\
        #% (self.url, repos_path, old_model_module, model_module))
        #self.assertEqualIgnoreWhitespace(result_script.stdout,
        #'''from sqlalchemy import *
        #from migrate import *

        #from migrate.changeset import schema

        #meta = MetaData()
        #tmp_account_rundiffs = Table('tmp_account_rundiffs', meta,
        #Column('id', Integer(),  primary_key=True, nullable=False),
        #Column('login', Text(length=None, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)),
        #Column('passwd', Text(length=None, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)),
        #)

        #def upgrade(migrate_engine):
        ## Upgrade operations go here. Don't create your own engine; bind migrate_engine
        ## to your metadata
        #meta.bind = migrate_engine
        #tmp_account_rundiffs.create()

        #def downgrade(migrate_engine):
        ## Operations to reverse the above upgrade go here.
        #meta.bind = migrate_engine
        #tmp_account_rundiffs.drop()''')

        ## Save the upgrade script.
        #result = self.env.run('migrate script Desc %s' % repos_path)
        #upgrade_script_path = '%s/versions/001_Desc.py' % repos_path
        #open(upgrade_script_path, 'w').write(result_script.stdout)

        #result = self.env.run('migrate compare_model_to_db %s %s %s'\
        #% (self.url, repos_path, model_module))
        #self.assertTrue("No schema diffs" in result.stdout)

        self.meta.drop_all(
        )  # in case junk tables are lying around in the test database
Example #32
0
def drop_all_tables():
    db_metadata = MetaData(bind=engine)
    db_metadata.reflect()
    db_metadata.drop_all()
class Storage(object):
    """SQL Tabular Storage.

    It's an implementation of `jsontablescema.Storage`.

    Args:
        engine (object): SQLAlchemy engine
        dbschema (str): database schema name
        prefix (str): prefix for all buckets

    """

    # Public

    def __init__(self, engine, dbschema=None, prefix=''):

        # Set attributes
        self.__connection = engine.connect()
        self.__dbschema = dbschema
        self.__prefix = prefix
        self.__descriptors = {}

        # Create metadata
        self.__metadata = MetaData(bind=self.__connection,
                                   schema=self.__dbschema,
                                   reflect=True)

    def __repr__(self):

        # Template and format
        template = 'Storage <{engine}/{dbschema}>'
        text = template.format(engine=self.__connection.engine,
                               dbschema=self.__dbschema)

        return text

    @property
    def buckets(self):

        # Collect
        buckets = []
        for table in self.__metadata.sorted_tables:
            bucket = mappers.tablename_to_bucket(self.__prefix, table.name)
            if bucket is not None:
                buckets.append(bucket)

        return buckets

    def create(self, bucket, descriptor, force=False):

        # Make lists
        buckets = bucket
        if isinstance(bucket, six.string_types):
            buckets = [bucket]
        descriptors = descriptor
        if isinstance(descriptor, dict):
            descriptors = [descriptor]

        # Check buckets for existence
        for bucket in reversed(self.buckets):
            if bucket in buckets:
                if not force:
                    message = 'Bucket "%s" already exists.' % bucket
                    raise RuntimeError(message)
                self.delete(bucket)

        # Define buckets
        for bucket, descriptor in zip(buckets, descriptors):

            # Add to schemas
            self.__descriptors[bucket] = descriptor

            # Crate table
            jsontableschema.validate(descriptor)
            tablename = mappers.bucket_to_tablename(self.__prefix, bucket)
            columns, constraints = mappers.descriptor_to_columns_and_constraints(
                self.__prefix, bucket, descriptor)
            Table(tablename, self.__metadata, *(columns + constraints))

        # Create tables, update metadata
        self.__metadata.create_all()

    def delete(self, bucket=None, ignore=False):

        # Make lists
        buckets = bucket
        if isinstance(bucket, six.string_types):
            buckets = [bucket]
        elif bucket is None:
            buckets = reversed(self.buckets)

        # Iterate over buckets
        tables = []
        for bucket in buckets:

            # Check existent
            if bucket not in self.buckets:
                if not ignore:
                    message = 'Bucket "%s" doesn\'t exist.' % bucket
                    raise RuntimeError(message)

            # Remove from buckets
            if bucket in self.__descriptors:
                del self.__descriptors[bucket]

            # Add table to tables
            table = self.__get_table(bucket)
            tables.append(table)

        # Drop tables, update metadata
        self.__metadata.drop_all(tables=tables)
        self.__metadata.clear()
        self.__metadata.reflect()

    def describe(self, bucket, descriptor=None):

        # Set descriptor
        if descriptor is not None:
            self.__descriptors[bucket] = descriptor

        # Get descriptor
        else:
            descriptor = self.__descriptors.get(bucket)
            if descriptor is None:
                table = self.__get_table(bucket)
                descriptor = mappers.columns_and_constraints_to_descriptor(
                    self.__prefix, table.name, table.columns,
                    table.constraints)

        return descriptor

    def iter(self, bucket):

        # Get result
        table = self.__get_table(bucket)
        # Streaming could be not working for some backends:
        # http://docs.sqlalchemy.org/en/latest/core/connections.html
        select = table.select().execution_options(stream_results=True)
        result = select.execute()

        # Yield data
        for row in result:
            yield list(row)

    def read(self, bucket):

        # Get rows
        rows = list(self.iter(bucket))

        return rows

    def write(self, bucket, rows):

        # Prepare
        BUFFER_SIZE = 1000
        descriptor = self.describe(bucket)
        schema = jsontableschema.Schema(descriptor)
        table = self.__get_table(bucket)

        # Write
        with self.__connection.begin():
            keyed_rows = []
            for row in rows:
                keyed_row = {}
                for index, field in enumerate(schema.fields):
                    value = row[index]
                    try:
                        value = field.cast_value(value)
                    except InvalidObjectType:
                        value = json.loads(value)
                    keyed_row[field.name] = value
                keyed_rows.append(keyed_row)
                if len(keyed_rows) > BUFFER_SIZE:
                    # Insert data
                    table.insert().execute(keyed_rows)
                    # Clean memory
                    keyed_rows = []
            if len(keyed_rows) > 0:
                # Insert data
                table.insert().execute(keyed_rows)

    # Private

    def __get_table(self, bucket):
        """Return SQLAlchemy table for the given bucket.
        """

        # Prepare name
        tablename = mappers.bucket_to_tablename(self.__prefix, bucket)
        if self.__dbschema:
            tablename = '.'.join(self.__dbschema, tablename)

        return self.__metadata.tables[tablename]
class DialectSQLAlchUsageTest(fixtures.TestBase):
    """ This usage test is meant to serve as documentation and follows the
        tutorial here: http://docs.sqlalchemy.org/en/latest/core/tutorial.html
        but with the dialect being developed
    """

    # Note: this test uses pytest which captures stdout by default, pass -s to allow output to stdout

    def setUp(self):

        self.dialect = TeradataDialect()
        self.conn = testing.db.connect()
        self.engine = self.conn.engine

        # build a table with columns
        self.metadata = MetaData()
        self.users = Table('my_users', self.metadata,
                           Column('uid', Integer, primary_key=True),
                           Column('name', String(256)),
                           Column('fullname', String(256)),
                           )

        self.addresses = Table('addresses', self.metadata,
                               Column('id', Integer, primary_key=True),
                               Column('user_id', None, ForeignKey('my_users.uid'), nullable=False),
                               Column('email_address', String(256), nullable=False),
                               )

        self.metadata.create_all(self.engine)

    def tearDown(self):
        self.metadata.drop_all(self.engine)
        self.conn.close()

    def test_show_state(self):
        assert self.users in self.metadata.sorted_tables
        assert self.addresses in self.metadata.sorted_tables

    def test_inserts(self):
        self.ins = self.users.insert()

        # inserts by default require all columns to be provided
        assert(str(self.ins) == 'INSERT INTO my_users (uid, name, fullname) VALUES (:uid, :name, :fullname)')

        # use the VALUES clause to limit the values inserted
        self.ins = self.users.insert().values(name='mark', fullname='mark sandan')

        # actual values don't get stored in the string
        assert(str(self.ins) == 'INSERT INTO my_users (name, fullname) VALUES (:name, :fullname)')

        # data values are stored in the INSERT but only are used when executed, we can peek
        assert(str(self.ins.compile().params) == str({'fullname': 'mark sandan', 'name': 'mark'}))

        # None of the inserts above in this test get added to the database

    def test_executing(self):
        # re-create a new INSERT object
        self.ins = self.users.insert()

        # execute the insert statement
        res = self.conn.execute(self.ins, uid=1, name='jack', fullname='Jack Jones')
        assert(res.inserted_primary_key == [1])
        res = self.conn.execute(self.ins, uid=2, name='wendy', fullname='Wendy Williams')
        assert(res.inserted_primary_key == [2])

        # the res variable is a ResultProxy object, analagous to DBAPI cursor

        # issue many inserts, the same is possible for update and delete
        self.conn.execute(self.addresses.insert(), [
             {'id': 1, 'user_id': 1, 'email_address': '*****@*****.**'},
             {'id': 2, 'user_id': 1, 'email_address': '*****@*****.**'},
             {'id': 3, 'user_id': 2, 'email_address': '*****@*****.**'},
             {'id': 4, 'user_id': 2, 'email_address': '*****@*****.**'}
         ])

        # test selects on the inserted values
        from sqlalchemy.sql import select

        s = select([self.users])
        res = self.conn.execute(s)
        u1 = res.fetchone()
        u2 = res.fetchone()

        # accessing rows
        assert(u1['name'] == u'jack')
        assert(u1['fullname'] == u'Jack Jones')

        assert(u2['name'] == u'wendy')
        assert(u2['fullname'] == u'Wendy Williams')

        assert(u1[1] == u1['name'])
        assert(u1[2] == u1['fullname'])

        assert(u2[1] == u2['name'])
        assert(u2[2] == u2['fullname'])

        # be sure to close the result set
        res.close()

        # use cols to access rows
        res = self.conn.execute(s)
        u3 = res.fetchone()
        u4 = res.fetchone()

        assert(u3[self.users.c.name] == u1['name'])
        assert(u3[self.users.c.fullname] == u1['fullname'])

        assert(u4[self.users.c.name] == u2['name'])
        assert(u4[self.users.c.fullname] == u2['fullname'])

        # reference individual columns in select clause
        s = select([self.users.c.name, self.users.c.fullname])
        res = self.conn.execute(s)
        u3 = res.fetchone()
        u4 = res.fetchone()

        assert(u3[self.users.c.name] == u1['name'])
        assert(u3[self.users.c.fullname] == u1['fullname'])

        assert(u4[self.users.c.name] == u2['name'])
        assert(u4[self.users.c.fullname] == u2['fullname'])

        # test joins
        # cartesian product
        usrs = [row for row in self.conn.execute(select([self.users]))]
        addrs = [row for row in self.conn.execute(select([self.addresses]))]
        prod = [row for row in self.conn.execute(select([self.users, self.addresses]))]
        assert(len(prod) == len(usrs) * len(addrs))

        # inner join on id
        s = select([self.users, self.addresses]).where(self.users.c.uid == self.addresses.c.user_id)
        inner = [row for row in self.conn.execute(s)]
        assert(len(inner) == 4)

        # operators between columns objects & other col objects/literals
        expr = self.users.c.uid == self.addresses.c.user_id
        assert('my_users.uid = addresses.user_id' == str(expr))
        # see how Teradata concats two strings
        assert(str((self.users.c.name + self.users.c.fullname).compile(bind=self.engine)) ==
               'my_users.name || my_users.fullname')

        # built-in conjunctions
        from sqlalchemy.sql import and_, or_

        s = select([(self.users.c.fullname +
                     ", " +
                     self.addresses.c.email_address).label('titles')]).where(
                         and_(
                             self.users.c.uid == self.addresses.c.user_id,
                             self.users.c.name.between('m', 'z'),
                             or_(
                                 self.addresses.c.email_address.like('*****@*****.**'),
                                 self.addresses.c.email_address.like('*****@*****.**')
                             )
                         )
                     )
        # print(s)
        res = self.conn.execute(s)
        for row in res:
            assert(str(row[0]) == u'Wendy Williams, [email protected]')

        # more joins
        # ON condition auto generated based on ForeignKey
        assert(str(self.users.join(self.addresses)) ==
               'my_users JOIN addresses ON my_users.uid = addresses.user_id')

        # specify the join ON condition
        self.users.join(self.addresses,
                        self.addresses.c.email_address.like(self.users.c.name + '%'))

        # select from clause to specify tables and the ON condition
        s = select([self.users.c.fullname]).select_from(
            self.users.join(self.addresses, self.addresses.c.email_address.like(self.users.c.name + '%')))
        res = self.conn.execute(s)
        assert(len(res.fetchall()) == 3)

        # left outer joins
        s = select([self.users.c.fullname]).select_from(self.users.outerjoin(self.addresses))
        # outer join works with teradata dialect (unlike oracle dialect < version9)

        assert(str(s) == str(s.compile(dialect=self.dialect)))

        # test bind params (positional)

        from sqlalchemy import text
        s = self.users.select(self.users.c.name.like(
                                bindparam('username', type_=String)+text("'%'")))
        res = self.conn.execute(s, username='******').fetchall()
        assert(len(res), 1)

        # functions
        from sqlalchemy.sql import func, column

        # certain function names are known by sqlalchemy
        assert(str(func.current_timestamp()), 'CURRENT_TIMESTAMP')

        # functions can be used in the select
        res = self.conn.execute(select(
            [func.max(self.addresses.c.email_address, type_=String).label(
                'max_email')])).scalar()
        assert(res, '*****@*****.**')

        # func result sets, define a function taking params x,y return q,z,r
        # useful for nested queries, subqueries - w/ dynamic params
        calculate = select([column('q'), column('z'), column('r')]).\
            select_from(
                         func.calculate(
                           bindparam('x'),
                           bindparam('y')
                         )
                       )
        calc = calculate.alias()
        s = select([self.users]).where(self.users.c.uid > calc.c.z)
        assert('SELECT my_users.uid, my_users.name, my_users.fullname\
               FROM my_users, (SELECT q, z, r\
                               FROM calculate(:x, :y)) AS anon_1\
               WHERE my_users.uid > anon_1.z', s)
        # instantiate the func
        calc1 = calculate.alias('c1').unique_params(x=17, y=45)
        calc2 = calculate.alias('c2').unique_params(x=5, y=12)

        s = select([self.users]).where(self.users.c.uid.between(calc1.c.z, calc2.c.z))
        parms = s.compile().params

        assert('x_2' in parms, 'x_1' in parms)
        assert('y_2' in parms, 'y_1' in parms)
        assert(parms['x_1'] == 17, parms['y_1'] == 45)
        assert(parms['x_2'] == 5, parms['y_2'] == 12)

        # order by asc
        stmt = select([self.users.c.name]).order_by(self.users.c.name)
        res = self.conn.execute(stmt).fetchall()

        assert('jack' == res[0][0])
        assert('wendy' == res[1][0])

        # order by desc
        stmt = select([self.users.c.name]).order_by(self.users.c.name.desc())
        res = self.conn.execute(stmt).fetchall()

        assert('wendy' == res[0][0])
        assert('jack' == res[1][0])

        # group by
        stmt = select([self.users.c.name, func.count(self.addresses.c.id)]).\
            select_from(self.users.join(self.addresses)).\
            group_by(self.users.c.name)

        res = self.conn.execute(stmt).fetchall()

        assert(res[1][0] == 'jack')
        assert(res[0][0] == 'wendy')
        assert(res[0][1] == res[1][1])

        # group by having
        stmt = select([self.users.c.name, func.count(self.addresses.c.id)]).\
            select_from(self.users.join(self.addresses)).\
            group_by(self.users.c.name).\
            having(func.length(self.users.c.name) > 4)

        res = self.conn.execute(stmt).fetchall()

        assert(res[0] == ('wendy', 2))

        # distinct
        stmt = select([self.users.c.name]).\
            where(self.addresses.c.email_address.contains(self.users.c.name)).distinct()

        res = self.conn.execute(stmt).fetchall()

        assert(len(res) == 2)
        assert(res[0][0] != res[1][0])

        # limit
        stmt = select([self.users.c.name, self.addresses.c.email_address]).\
            select_from(self.users.join(self.addresses)).\
            limit(1)

        res = self.conn.execute(stmt).fetchall()

        assert(len(res) == 1)

        # offset

        # test union and except
        from sqlalchemy.sql import except_, union

        u = union(
            self.addresses.select().where(self.addresses.c.email_address == '*****@*****.**'),
            self.addresses.select().where(self.addresses.c.email_address.like('*****@*****.**')),)# .order_by(self.addresses.c.email_address)
        # print(u)
        # #res = self.conn.execute(u) this fails, syntax error order by expects pos integer?

        u = except_(
              self.addresses.select().where(self.addresses.c.email_address.like('%@%.com')),
              self.addresses.select().where(self.addresses.c.email_address.like('*****@*****.**')))
        res = self.conn.execute(u).fetchall()
        assert(1, len(res))

        u = except_(
               union(
                  self.addresses.select().where(self.addresses.c.email_address.like('*****@*****.**')),
                  self.addresses.select().where(self.addresses.c.email_address.like('*****@*****.**'))
               ).alias().select(), self.addresses.select(self.addresses.c.email_address.like('*****@*****.**'))
        )

        res = self.conn.execute(u).fetchall()
        assert(1, len(res))

        # scalar subqueries
        stmt = select([func.count(self.addresses.c.id)]).where(self.users.c.uid == self.addresses.c.user_id).as_scalar()

        # we can place stmt as any other column within another select
        res = self.conn.execute(select([self.users.c.name, stmt])).fetchall()

        # res is a list of tuples, one tuple per user's name
        assert(2, len(res))

        u1 = res[0]
        u2 = res[1]

        assert(len(u1) == len(u2))
        assert(u1[0] == u'jack')
        assert(u1[1] == u2[1])
        assert(u2[0] == u'wendy')

        # we can label the inner query
        stmt = select([func.count(self.addresses.c.id)]).\
            where(self.users.c.uid == self.addresses.c.user_id).\
            label("address_count")

        res = self.conn.execute(select([self.users.c.name, stmt])).fetchall()
        assert(2, len(res))

        u1 = res[0]
        u2 = res[1]

        assert(len(u1) == 2)
        assert(len(u2) == 2)

        # inserts, updates, deletes
        stmt = self.users.update().values(fullname="Fullname: " + self.users.c.name)
        res = self.conn.execute(stmt)

        assert('name_1' in res.last_updated_params())
        assert(res.last_updated_params()['name_1'] == 'Fullname: ')

        stmt = self.users.insert().values(name=bindparam('_name') + " .. name")
        res = self.conn.execute(stmt, [{'uid': 4, '_name': 'name1'}, {'uid': 5, '_name': 'name2'}, {'uid': 6, '_name': 'name3'}, ])

        # updates
        stmt = self.users.update().where(self.users.c.name == 'jack').values(name='ed')
        res = self.conn.execute(stmt)

        assert(res.rowcount == 1)
        assert(res.returns_rows is False)

        # update many with bound params
        stmt = self.users.update().where(self.users.c.name == bindparam('oldname')).\
            values(name=bindparam('newname'))
        res = self.conn.execute(stmt, [
                   {'oldname': 'jack', 'newname': 'ed'},
                   {'oldname': 'wendy', 'newname': 'mary'},
        ])

        assert(res.returns_rows is False)
        assert(res.rowcount == 1)

        res = self.conn.execute(select([self.users]).where(self.users.c.name == 'ed'))
        r = res.fetchone()
        assert(r['name'] == 'ed')

        # correlated updates
        stmt = select([self.addresses.c.email_address]).\
            where(self.addresses.c.user_id == self.users.c.uid).\
            limit(1)
        # this fails, syntax error bc of LIMIT - need TOP/SAMPLE instead
        # Note: TOP can't be in a subquery
        # res = self.conn.execute(self.users.update().values(fullname=stmt))

        # multiple table updates
        stmt = self.users.update().\
            values(name='ed wood').\
            where(self.users.c.uid == self.addresses.c.id).\
            where(self.addresses.c.email_address.startswith('ed%'))

        # this fails, teradata does update from set where not update set from where
        # #res = self.conn.execute(stmt)

        stmt = self.users.update().\
            values({
               self.users.c.name: 'ed wood',
               self.addresses.c.email_address: '*****@*****.**'
            }).\
            where(self.users.c.uid == self.addresses.c.id).\
            where(self.addresses.c.email_address.startswith('ed%'))

        # fails but works on MySQL, should this work for us?
        # #res = self.conn.execute(stmt)

        # deletes
        self.conn.execute(self.addresses.delete())
        self.conn.execute(self.users.delete().where(self.users.c.name > 'm'))

        # matched row counts
        # updates + deletes have a number indicating # rows matched by WHERE clause
        res = self.conn.execute(self.users.delete())
        assert(res.rowcount == 1)
Example #35
0
    def test_with_inheritance(self):
        metadata = MetaData()

        table1 = Table(
            "mytable",
            metadata,
            Column(
                "col1",
                Integer,
                primary_key=True,
                test_needs_autoincrement=True,
            ),
            Column("col2", String(30)),
        )

        table2 = Table(
            "mytable2",
            metadata,
            Column(
                "col1",
                Integer,
                ForeignKey("mytable.col1"),
                primary_key=True,
                test_needs_autoincrement=True,
            ),
            Column("col3", String(30)),
        )

        @profile_memory()
        def go():
            class A(fixtures.ComparableEntity):
                pass

            class B(A):
                pass

            clear_mappers()
            self.mapper_registry.map_imperatively(
                A,
                table1,
                polymorphic_on=table1.c.col2,
                polymorphic_identity="a",
            )
            self.mapper_registry.map_imperatively(B,
                                                  table2,
                                                  inherits=A,
                                                  polymorphic_identity="b")

            sess = Session(self.engine, autoflush=False)
            a1 = A()
            a2 = A()
            b1 = B(col3="b1")
            b2 = B(col3="b2")
            for x in [a1, a2, b1, b2]:
                sess.add(x)
            sess.flush()
            sess.expunge_all()

            alist = sess.query(A).order_by(A.col1).all()
            eq_([A(), A(), B(col3="b1"), B(col3="b2")], alist)

            for a in alist:
                sess.delete(a)
            sess.flush()

            # don't need to clear_mappers()
            del B
            del A

        metadata.create_all(self.engine)
        try:
            go()
        finally:
            metadata.drop_all(self.engine)
        assert_no_mappers()
Example #36
0
    def test_mapper_reset(self):
        metadata = MetaData()

        table1 = Table(
            "mytable",
            metadata,
            Column(
                "col1",
                Integer,
                primary_key=True,
                test_needs_autoincrement=True,
            ),
            Column("col2", String(30)),
        )

        table2 = Table(
            "mytable2",
            metadata,
            Column(
                "col1",
                Integer,
                primary_key=True,
                test_needs_autoincrement=True,
            ),
            Column("col2", String(30)),
            Column("col3", Integer, ForeignKey("mytable.col1")),
        )

        @profile_memory()
        def go():
            self.mapper_registry.map_imperatively(
                A,
                table1,
                properties={"bs": relationship(B, order_by=table2.c.col1)},
            )
            self.mapper_registry.map_imperatively(B, table2)

            sess = Session(self.engine, autoflush=False)
            a1 = A(col2="a1")
            a2 = A(col2="a2")
            a3 = A(col2="a3")
            a1.bs.append(B(col2="b1"))
            a1.bs.append(B(col2="b2"))
            a3.bs.append(B(col2="b3"))
            for x in [a1, a2, a3]:
                sess.add(x)
            sess.flush()
            sess.expunge_all()

            alist = sess.query(A).order_by(A.col1).all()
            eq_(
                [
                    A(col2="a1", bs=[B(col2="b1"), B(col2="b2")]),
                    A(col2="a2", bs=[]),
                    A(col2="a3", bs=[B(col2="b3")]),
                ],
                alist,
            )

            for a in alist:
                sess.delete(a)
            sess.flush()
            sess.close()
            clear_mappers()

        metadata.create_all(self.engine)
        try:
            go()
        finally:
            metadata.drop_all(self.engine)
        assert_no_mappers()
Example #37
0
    def test_session(self):
        metadata = MetaData()

        table1 = Table(
            "mytable",
            metadata,
            Column(
                "col1",
                Integer,
                primary_key=True,
                test_needs_autoincrement=True,
            ),
            Column("col2", String(30)),
        )

        table2 = Table(
            "mytable2",
            metadata,
            Column(
                "col1",
                Integer,
                primary_key=True,
                test_needs_autoincrement=True,
            ),
            Column("col2", String(30)),
            Column("col3", Integer, ForeignKey("mytable.col1")),
        )

        metadata.create_all(self.engine)

        m1 = self.mapper_registry.map_imperatively(
            A,
            table1,
            properties={
                "bs":
                relationship(B, cascade="all, delete", order_by=table2.c.col1)
            },
        )
        m2 = self.mapper_registry.map_imperatively(B, table2)

        @profile_memory()
        def go():
            with Session(self.engine) as sess:
                a1 = A(col2="a1")
                a2 = A(col2="a2")
                a3 = A(col2="a3")
                a1.bs.append(B(col2="b1"))
                a1.bs.append(B(col2="b2"))
                a3.bs.append(B(col2="b3"))
                for x in [a1, a2, a3]:
                    sess.add(x)
                sess.commit()

                alist = sess.query(A).order_by(A.col1).all()
                eq_(
                    [
                        A(col2="a1", bs=[B(col2="b1"),
                                         B(col2="b2")]),
                        A(col2="a2", bs=[]),
                        A(col2="a3", bs=[B(col2="b3")]),
                    ],
                    alist,
                )

                for a in alist:
                    sess.delete(a)
                sess.commit()

        go()

        metadata.drop_all(self.engine)
        del m1, m2
        assert_no_mappers()
Example #38
0
class PostgresqlDefaultCompareTest(TestBase):
    __only_on__ = "postgresql"
    __backend__ = True

    @classmethod
    def setup_class(cls):
        cls.bind = config.db
        staging_env()
        cls.migration_context = MigrationContext.configure(
            connection=cls.bind.connect(),
            opts={
                "compare_type": True,
                "compare_server_default": True
            },
        )

    def setUp(self):
        self.metadata = MetaData(self.bind)
        self.autogen_context = api.AutogenContext(self.migration_context)

    @classmethod
    def teardown_class(cls):
        clear_staging_env()

    def tearDown(self):
        self.metadata.drop_all()

    def _compare_default_roundtrip(self,
                                   type_,
                                   orig_default,
                                   alternate=None,
                                   diff_expected=None):
        diff_expected = (diff_expected if diff_expected is not None else
                         alternate is not None)
        if alternate is None:
            alternate = orig_default

        t1 = Table(
            "test",
            self.metadata,
            Column("somecol", type_, server_default=orig_default),
        )
        t2 = Table(
            "test",
            MetaData(),
            Column("somecol", type_, server_default=alternate),
        )

        t1.create(self.bind)

        insp = inspect(self.bind)
        cols = insp.get_columns(t1.name)
        insp_col = Column("somecol",
                          cols[0]["type"],
                          server_default=text(cols[0]["default"]))
        op = ops.AlterColumnOp("test", "somecol")
        _compare_server_default(
            self.autogen_context,
            op,
            None,
            "test",
            "somecol",
            insp_col,
            t2.c.somecol,
        )

        diffs = op.to_diff_tuple()
        eq_(bool(diffs), diff_expected)

    def _compare_default(self, t1, t2, col, rendered):
        t1.create(self.bind, checkfirst=True)
        insp = inspect(self.bind)
        cols = insp.get_columns(t1.name)
        ctx = self.autogen_context.migration_context

        return ctx.impl.compare_server_default(None, col, rendered,
                                               cols[0]["default"])

    def test_compare_string_blank_default(self):
        self._compare_default_roundtrip(String(8), "")

    def test_compare_string_nonblank_default(self):
        self._compare_default_roundtrip(String(8), "hi")

    def test_compare_interval_str(self):
        # this form shouldn't be used but testing here
        # for compatibility
        self._compare_default_roundtrip(Interval, "14 days")

    @config.requirements.postgresql_uuid_ossp
    def test_compare_uuid_text(self):
        self._compare_default_roundtrip(UUID, text("uuid_generate_v4()"))

    def test_compare_interval_text(self):
        self._compare_default_roundtrip(Interval, text("'14 days'"))

    def test_compare_array_of_integer_text(self):
        self._compare_default_roundtrip(ARRAY(Integer),
                                        text("(ARRAY[]::integer[])"))

    def test_compare_current_timestamp_text(self):
        self._compare_default_roundtrip(
            DateTime(), text("TIMEZONE('utc', CURRENT_TIMESTAMP)"))

    def test_compare_current_timestamp_fn_w_binds(self):
        self._compare_default_roundtrip(
            DateTime(), func.timezone("utc", func.current_timestamp()))

    def test_compare_integer_str(self):
        self._compare_default_roundtrip(Integer(), "5")

    def test_compare_integer_text(self):
        self._compare_default_roundtrip(Integer(), text("5"))

    def test_compare_integer_text_diff(self):
        self._compare_default_roundtrip(Integer(), text("5"), "7")

    def test_compare_float_str(self):
        self._compare_default_roundtrip(Float(), "5.2")

    def test_compare_float_text(self):
        self._compare_default_roundtrip(Float(), text("5.2"))

    def test_compare_float_no_diff1(self):
        self._compare_default_roundtrip(Float(),
                                        text("5.2"),
                                        "5.2",
                                        diff_expected=False)

    def test_compare_float_no_diff2(self):
        self._compare_default_roundtrip(Float(),
                                        "5.2",
                                        text("5.2"),
                                        diff_expected=False)

    def test_compare_float_no_diff3(self):
        self._compare_default_roundtrip(Float(),
                                        text("5"),
                                        text("5.0"),
                                        diff_expected=False)

    def test_compare_float_no_diff4(self):
        self._compare_default_roundtrip(Float(),
                                        "5",
                                        "5.0",
                                        diff_expected=False)

    def test_compare_float_no_diff5(self):
        self._compare_default_roundtrip(Float(),
                                        text("5"),
                                        "5.0",
                                        diff_expected=False)

    def test_compare_float_no_diff6(self):
        self._compare_default_roundtrip(Float(),
                                        "5",
                                        text("5.0"),
                                        diff_expected=False)

    def test_compare_numeric_no_diff(self):
        self._compare_default_roundtrip(Numeric(),
                                        text("5"),
                                        "5.0",
                                        diff_expected=False)

    def test_compare_unicode_literal(self):
        self._compare_default_roundtrip(String(), u"im a default")

    # TOOD: will need to actually eval() the repr() and
    # spend more effort figuring out exactly the kind of expression
    # to use
    def _TODO_test_compare_character_str_w_singlequote(self):
        self._compare_default_roundtrip(String(), "hel''lo")

    def test_compare_character_str(self):
        self._compare_default_roundtrip(String(), "hello")

    def test_compare_character_text(self):
        self._compare_default_roundtrip(String(), text("'hello'"))

    def test_compare_character_str_diff(self):
        self._compare_default_roundtrip(String(), "hello", "there")

    def test_compare_character_text_diff(self):
        self._compare_default_roundtrip(String(), text("'hello'"),
                                        text("'there'"))

    def test_primary_key_skip(self):
        """Test that SERIAL cols are just skipped"""
        t1 = Table("sometable", self.metadata,
                   Column("id", Integer, primary_key=True))
        t2 = Table("sometable", MetaData(),
                   Column("id", Integer, primary_key=True))
        assert not self._compare_default(t1, t2, t2.c.id, "")
Example #39
0
    q = (session.query(Animal).filter(
        Animal.facts.any(
            and_(AnimalFact.key == u'color', AnimalFact.value == u'reddish'))))
    print 'reddish animals', q.all()

    # Save some typing by wrapping that up in a function:
    with_characteristic = lambda key, value: and_(AnimalFact.key == key,
                                                  AnimalFact.value == value)

    q = (session.query(Animal).filter(
        Animal.facts.any(with_characteristic(u'color', u'brown'))))
    print 'brown animals', q.all()

    q = (session.query(Animal).filter(
        not_(
            Animal.facts.any(with_characteristic(u'poisonous-part',
                                                 u'elbows')))))
    print 'animals without poisonous-part == elbows', q.all()

    q = (session.query(Animal).filter(
        Animal.facts.any(AnimalFact.value == u'somewhat')))
    print 'any animal with any .value of "somewhat"', q.all()

    # Facts can be queried as well.
    q = (session.query(AnimalFact).filter(
        with_characteristic(u'cuteness', u'very')))
    print 'just the facts', q.all()

    metadata.drop_all()
    def test_autoincrement(self):
        meta = MetaData(testing.db)
        try:
            Table(
                "ai_1",
                meta,
                Column("int_y", Integer, primary_key=True, autoincrement=True),
                Column("int_n", Integer, DefaultClause("0"), primary_key=True),
                mysql_engine="MyISAM",
            )
            Table(
                "ai_2",
                meta,
                Column("int_y", Integer, primary_key=True, autoincrement=True),
                Column("int_n", Integer, DefaultClause("0"), primary_key=True),
                mysql_engine="MyISAM",
            )
            Table(
                "ai_3",
                meta,
                Column(
                    "int_n",
                    Integer,
                    DefaultClause("0"),
                    primary_key=True,
                    autoincrement=False,
                ),
                Column("int_y", Integer, primary_key=True, autoincrement=True),
                mysql_engine="MyISAM",
            )
            Table(
                "ai_4",
                meta,
                Column(
                    "int_n",
                    Integer,
                    DefaultClause("0"),
                    primary_key=True,
                    autoincrement=False,
                ),
                Column(
                    "int_n2",
                    Integer,
                    DefaultClause("0"),
                    primary_key=True,
                    autoincrement=False,
                ),
                mysql_engine="MyISAM",
            )
            Table(
                "ai_5",
                meta,
                Column("int_y", Integer, primary_key=True, autoincrement=True),
                Column(
                    "int_n",
                    Integer,
                    DefaultClause("0"),
                    primary_key=True,
                    autoincrement=False,
                ),
                mysql_engine="MyISAM",
            )
            Table(
                "ai_6",
                meta,
                Column("o1", String(1), DefaultClause("x"), primary_key=True),
                Column("int_y", Integer, primary_key=True, autoincrement=True),
                mysql_engine="MyISAM",
            )
            Table(
                "ai_7",
                meta,
                Column("o1", String(1), DefaultClause("x"), primary_key=True),
                Column("o2", String(1), DefaultClause("x"), primary_key=True),
                Column("int_y", Integer, primary_key=True, autoincrement=True),
                mysql_engine="MyISAM",
            )
            Table(
                "ai_8",
                meta,
                Column("o1", String(1), DefaultClause("x"), primary_key=True),
                Column("o2", String(1), DefaultClause("x"), primary_key=True),
                mysql_engine="MyISAM",
            )
            meta.create_all()

            table_names = [
                "ai_1",
                "ai_2",
                "ai_3",
                "ai_4",
                "ai_5",
                "ai_6",
                "ai_7",
                "ai_8",
            ]
            mr = MetaData(testing.db)
            mr.reflect(only=table_names)

            for tbl in [mr.tables[name] for name in table_names]:
                for c in tbl.c:
                    if c.name.startswith("int_y"):
                        assert c.autoincrement
                    elif c.name.startswith("int_n"):
                        assert not c.autoincrement
                tbl.insert().execute()
                if "int_y" in tbl.c:
                    assert select([tbl.c.int_y]).scalar() == 1
                    assert list(tbl.select().execute().first()).count(1) == 1
                else:
                    assert 1 not in list(tbl.select().execute().first())
        finally:
            meta.drop_all()
Example #41
0
    def test_with_manytomany(self):
        metadata = MetaData(self.engine)

        table1 = Table(
            "mytable", metadata,
            Column('col1',
                   Integer,
                   primary_key=True,
                   test_needs_autoincrement=True), Column('col2', String(30)))

        table2 = Table(
            "mytable2",
            metadata,
            Column('col1',
                   Integer,
                   primary_key=True,
                   test_needs_autoincrement=True),
            Column('col2', String(30)),
        )

        table3 = Table(
            't1tot2',
            metadata,
            Column('t1', Integer, ForeignKey('mytable.col1')),
            Column('t2', Integer, ForeignKey('mytable2.col1')),
        )

        @profile_memory()
        def go():
            class A(fixtures.ComparableEntity):
                pass

            class B(fixtures.ComparableEntity):
                pass

            mapper(A,
                   table1,
                   properties={
                       'bs':
                       relationship(B,
                                    secondary=table3,
                                    backref='as',
                                    order_by=table3.c.t1)
                   })
            mapper(B, table2)

            sess = create_session()
            a1 = A(col2='a1')
            a2 = A(col2='a2')
            b1 = B(col2='b1')
            b2 = B(col2='b2')
            a1.bs.append(b1)
            a2.bs.append(b2)
            for x in [a1, a2]:
                sess.add(x)
            sess.flush()
            sess.expunge_all()

            alist = sess.query(A).order_by(A.col1).all()
            eq_([A(bs=[B(col2='b1')]), A(bs=[B(col2='b2')])], alist)

            for a in alist:
                sess.delete(a)
            sess.flush()

            # don't need to clear_mappers()
            del B
            del A

        metadata.create_all()
        try:
            go()
        finally:
            metadata.drop_all()
        assert_no_mappers()
Example #42
0
    def test_with_inheritance(self):
        metadata = MetaData(self.engine)

        table1 = Table(
            "mytable", metadata,
            Column('col1',
                   Integer,
                   primary_key=True,
                   test_needs_autoincrement=True), Column('col2', String(30)))

        table2 = Table(
            "mytable2",
            metadata,
            Column('col1',
                   Integer,
                   ForeignKey('mytable.col1'),
                   primary_key=True,
                   test_needs_autoincrement=True),
            Column('col3', String(30)),
        )

        @profile_memory()
        def go():
            class A(fixtures.ComparableEntity):
                pass

            class B(A):
                pass

            mapper(A,
                   table1,
                   polymorphic_on=table1.c.col2,
                   polymorphic_identity='a')
            mapper(B, table2, inherits=A, polymorphic_identity='b')

            sess = create_session()
            a1 = A()
            a2 = A()
            b1 = B(col3='b1')
            b2 = B(col3='b2')
            for x in [a1, a2, b1, b2]:
                sess.add(x)
            sess.flush()
            sess.expunge_all()

            alist = sess.query(A).order_by(A.col1).all()
            eq_([A(), A(), B(col3='b1'), B(col3='b2')], alist)

            for a in alist:
                sess.delete(a)
            sess.flush()

            # don't need to clear_mappers()
            del B
            del A

        metadata.create_all()
        try:
            go()
        finally:
            metadata.drop_all()
        assert_no_mappers()
Example #43
0
    def test_session(self):
        metadata = MetaData(self.engine)

        table1 = Table(
            "mytable", metadata,
            Column('col1',
                   Integer,
                   primary_key=True,
                   test_needs_autoincrement=True), Column('col2', String(30)))

        table2 = Table(
            "mytable2", metadata,
            Column('col1',
                   Integer,
                   primary_key=True,
                   test_needs_autoincrement=True), Column('col2', String(30)),
            Column('col3', Integer, ForeignKey("mytable.col1")))

        metadata.create_all()

        m1 = mapper(A,
                    table1,
                    properties={
                        "bs":
                        relationship(B,
                                     cascade="all, delete",
                                     order_by=table2.c.col1)
                    })
        m2 = mapper(B, table2)

        m3 = mapper(A, table1, non_primary=True)

        @profile_memory()
        def go():
            sess = create_session()
            a1 = A(col2="a1")
            a2 = A(col2="a2")
            a3 = A(col2="a3")
            a1.bs.append(B(col2="b1"))
            a1.bs.append(B(col2="b2"))
            a3.bs.append(B(col2="b3"))
            for x in [a1, a2, a3]:
                sess.add(x)
            sess.flush()
            sess.expunge_all()

            alist = sess.query(A).order_by(A.col1).all()
            eq_([
                A(col2="a1", bs=[B(col2="b1"), B(col2="b2")]),
                A(col2="a2", bs=[]),
                A(col2="a3", bs=[B(col2="b3")])
            ], alist)

            for a in alist:
                sess.delete(a)
            sess.flush()

        go()

        metadata.drop_all()
        del m1, m2, m3
        assert_no_mappers()
Example #44
0
def parse(file,
          output="csv",
          edges_name="edges",
          nodes_name="nodes",
          output_path=".",
          separator=",",
          spatial=False,
          no_headers=False):
    if not os.path.exists(file):
        raise IOError("File {0} not found".format(file))

    if output != "csv":
        metadata = MetaData()
        if (spatial):
            node_geom = Point(2)
            edge_geom = LineString(2)
        else:
            node_geom = String
            edge_geom = String

        nodes_table = Table(nodes_name, metadata,
                            Column('id', Integer, primary_key=True),
                            Column('original_id', Integer, index=True),
                            Column('elevation', Integer),
                            Column('lon', Float, index=True),
                            Column('lat', Float, index=True),
                            Column('the_geom', node_geom))

        edges_table = Table(edges_name, metadata,
                            Column('id', Integer, primary_key=True),
                            Column('source', Integer, index=True),
                            Column('target', Integer, index=True),
                            Column('length', Float),
                            Column('car', SmallInteger),
                            Column('car_rev', SmallInteger),
                            Column('bike', SmallInteger),
                            Column('bike_rev', SmallInteger),
                            Column('foot', SmallInteger),
                            Column('the_geom', edge_geom))

        GeometryDDL(nodes_table)
        GeometryDDL(edges_table)

        engine = create_engine(output)
        metadata.drop_all(engine)
        metadata.create_all(engine)
        mapper(Node, nodes_table)
        mapper(Edge, edges_table)
        Session = sessionmaker(bind=engine)
        session = Session()

    extension = os.path.splitext(file)[1]
    if extension == '.bz2':
        print "Recognized as bzip2 file"
        f = bz2.BZ2File(file, 'r')

    elif extension == '.gz':
        print "Recognized as gzip2 file"
        f = gzip.open(file, 'r')

    else:
        print "Supposing OSM/xml file"
        filesize = os.path.getsize(file)
        f = open(file, 'r')

    buffer_size = 4096
    p = Parser()
    eof = False

    print "Step 1: reading file {0}".format(file)
    read = 0
    while not eof:
        s = f.read(buffer_size)
        eof = len(s) != buffer_size
        p.read(s, len(s), eof)
        read += len(s)

    print "  Read {0} nodes and {1} ways\n".format(p.get_osm_nodes(),
                                                   p.get_osm_ways())

    print "Step 2: saving the nodes"
    nodes = p.get_nodes()
    if output == "csv":
        n = open(output_path + '/' + nodes_name + '.csv', 'w')
        if no_headers == False:
            n.write('"node_id"' + separator + '"longitude"' + separator +
                    '"latitude"\n')

    count = 0
    for node in nodes:
        if output == "csv":
            n.write('{1}{0}{2}{0}{3}\n'.format(separator, node.id, node.lon,
                                               node.lat))
        else:
            session.add(Node(node.id, node.lon, node.lat, spatial=spatial))
        count += 1
    if output == "csv":
        n.close()
    else:
        session.commit()

    print "  Wrote {0} nodes\n".format(count)

    print "Step 3: saving the edges"
    edges = p.get_edges()
    count = 0
    if output == "csv":
        print output_path + '/' + edges_name + '.csv'
        e = open(output_path + '/' + edges_name + '.csv', 'w')
        if no_headers == False:
            e.write('"edge_id"' + separator + '"source"' + separator +
                    '"target"' + separator + '"length"' + separator + '"car"' +
                    separator + '"car reverse"' + separator + '"bike"' +
                    separator + '"bike reverse"' + separator + '"foot"' +
                    separator + '"WKT"\n')
    for edge in edges:
        if output == "csv":
            e.write(
                '{1}{0}{2}{0}{3}{0}{4}{0}{5}{0}{6}{0}{7}{0}{8}{0}{9}{0}LINESTRING({10})\n'
                .format(separator, edge.edge_id, edge.source, edge.target,
                        edge.length, edge.car, edge.car_d, edge.bike,
                        edge.bike_d, edge.foot, edge.geom))
        else:
            session.add(
                Edge(edge.edge_id,
                     edge.source,
                     edge.target,
                     edge.length,
                     edge.car,
                     edge.car_d,
                     edge.bike,
                     edge.bike_d,
                     edge.foot,
                     edge.geom,
                     spatial=spatial))
        count += 1
    if output == "csv":
        e.close()
    else:
        session.commit()
    print "  Wrote {0} edges\n".format(count)
Example #45
0
class SQLiteDefaultCompareTest(TestBase):
    __only_on__ = "sqlite"
    __backend__ = True

    @classmethod
    def setup_class(cls):
        cls.bind = config.db
        staging_env()
        cls.migration_context = MigrationContext.configure(
            connection=cls.bind.connect(),
            opts={
                "compare_type": True,
                "compare_server_default": True
            },
        )

    def setUp(self):
        self.metadata = MetaData(self.bind)
        self.autogen_context = api.AutogenContext(self.migration_context)

    @classmethod
    def teardown_class(cls):
        clear_staging_env()

    def tearDown(self):
        self.metadata.drop_all()

    def _compare_default_roundtrip(self,
                                   type_,
                                   orig_default,
                                   alternate=None,
                                   diff_expected=None):
        diff_expected = (diff_expected if diff_expected is not None else
                         alternate is not None)
        if alternate is None:
            alternate = orig_default

        t1 = Table(
            "test",
            self.metadata,
            Column("somecol", type_, server_default=orig_default),
        )
        t2 = Table(
            "test",
            MetaData(),
            Column("somecol", type_, server_default=alternate),
        )

        t1.create(self.bind)

        insp = inspect(self.bind)
        cols = insp.get_columns(t1.name)
        insp_col = Column("somecol",
                          cols[0]["type"],
                          server_default=text(cols[0]["default"]))
        op = ops.AlterColumnOp("test", "somecol")
        _compare_server_default(
            self.autogen_context,
            op,
            None,
            "test",
            "somecol",
            insp_col,
            t2.c.somecol,
        )

        diffs = op.to_diff_tuple()
        eq_(bool(diffs), diff_expected)

    def _compare_default(self, t1, t2, col, rendered):
        t1.create(self.bind, checkfirst=True)
        insp = inspect(self.bind)
        cols = insp.get_columns(t1.name)
        ctx = self.autogen_context.migration_context

        return ctx.impl.compare_server_default(None, col, rendered,
                                               cols[0]["default"])

    @config.requirements.sqlalchemy_12
    def test_compare_current_timestamp_func(self):
        self._compare_default_roundtrip(DateTime(),
                                        func.datetime("now", "localtime"))

    @config.requirements.sqlalchemy_12
    def test_compare_current_timestamp_func_now(self):
        self._compare_default_roundtrip(DateTime(), func.now())

    def test_compare_current_timestamp_text(self):
        # SQLAlchemy doesn't render the parenthesis for a
        # SQLite server default specified as text(), so users will be doing
        # this; sqlite comparison needs to accommodate for these.
        self._compare_default_roundtrip(DateTime(),
                                        text("(datetime('now', 'localtime'))"))

    def test_compare_integer_str(self):
        self._compare_default_roundtrip(Integer(), "5")

    def test_compare_integer_str_diff(self):
        self._compare_default_roundtrip(Integer(), "5", "7")

    def test_compare_integer_text(self):
        self._compare_default_roundtrip(Integer(), text("5"))

    def test_compare_integer_text_diff(self):
        self._compare_default_roundtrip(Integer(), text("5"), "7")

    def test_compare_float_str(self):
        self._compare_default_roundtrip(Float(), "5.2")

    def test_compare_float_str_diff(self):
        self._compare_default_roundtrip(Float(), "5.2", "5.3")

    def test_compare_float_text(self):
        self._compare_default_roundtrip(Float(), text("5.2"))

    def test_compare_float_text_diff(self):
        self._compare_default_roundtrip(Float(), text("5.2"), "5.3")

    def test_compare_string_literal(self):
        self._compare_default_roundtrip(String(), "im a default")

    def test_compare_string_literal_diff(self):
        self._compare_default_roundtrip(String(), "im a default", "me too")
Example #46
0
class BatchRoundTripTest(TestBase):
    __requires__ = ('sqlalchemy_08', )
    __only_on__ = "sqlite"

    def setUp(self):
        self.conn = config.db.connect()
        self.metadata = MetaData()
        t1 = Table('foo',
                   self.metadata,
                   Column('id', Integer, primary_key=True),
                   Column('data', String(50)),
                   Column('x', Integer),
                   mysql_engine='InnoDB')
        t1.create(self.conn)

        self.conn.execute(t1.insert(), [{
            "id": 1,
            "data": "d1",
            "x": 5
        }, {
            "id": 2,
            "data": "22",
            "x": 6
        }, {
            "id": 3,
            "data": "8.5",
            "x": 7
        }, {
            "id": 4,
            "data": "9.46",
            "x": 8
        }, {
            "id": 5,
            "data": "d5",
            "x": 9
        }])
        context = MigrationContext.configure(self.conn)
        self.op = Operations(context)

    @contextmanager
    def _sqlite_referential_integrity(self):
        self.conn.execute("PRAGMA foreign_keys=ON")
        try:
            yield
        finally:
            self.conn.execute("PRAGMA foreign_keys=OFF")

    def _no_pk_fixture(self):
        nopk = Table('nopk',
                     self.metadata,
                     Column('a', Integer),
                     Column('b', Integer),
                     Column('c', Integer),
                     mysql_engine='InnoDB')
        nopk.create(self.conn)
        self.conn.execute(nopk.insert(), [
            {
                "a": 1,
                "b": 2,
                "c": 3
            },
            {
                "a": 2,
                "b": 4,
                "c": 5
            },
        ])
        return nopk

    def _table_w_index_fixture(self):
        t = Table(
            't_w_ix',
            self.metadata,
            Column('id', Integer, primary_key=True),
            Column('thing', Integer),
            Column('data', String(20)),
        )
        Index('ix_thing', t.c.thing)
        t.create(self.conn)
        return t

    def _boolean_fixture(self):
        t = Table('hasbool', self.metadata,
                  Column('x', Boolean(create_constraint=True, name='ck1')),
                  Column('y', Integer))
        t.create(self.conn)

    def _int_to_boolean_fixture(self):
        t = Table('hasbool', self.metadata, Column('x', Integer))
        t.create(self.conn)

    def test_change_type_boolean_to_int(self):
        self._boolean_fixture()
        with self.op.batch_alter_table("hasbool") as batch_op:
            batch_op.alter_column('x',
                                  type_=Integer,
                                  existing_type=Boolean(create_constraint=True,
                                                        name='ck1'))
        insp = Inspector.from_engine(config.db)

        eq_([
            c['type']._type_affinity
            for c in insp.get_columns('hasbool') if c['name'] == 'x'
        ], [Integer])

    def test_drop_col_schematype(self):
        self._boolean_fixture()
        with self.op.batch_alter_table("hasbool") as batch_op:
            batch_op.drop_column('x')
        insp = Inspector.from_engine(config.db)

        assert 'x' not in (c['name'] for c in insp.get_columns('hasbool'))

    def test_change_type_int_to_boolean(self):
        self._int_to_boolean_fixture()
        with self.op.batch_alter_table("hasbool") as batch_op:
            batch_op.alter_column('x',
                                  type_=Boolean(create_constraint=True,
                                                name='ck1'))
        insp = Inspector.from_engine(config.db)

        if exclusions.against(config, "sqlite"):
            eq_([
                c['type']._type_affinity
                for c in insp.get_columns('hasbool') if c['name'] == 'x'
            ], [Boolean])
        elif exclusions.against(config, "mysql"):
            eq_([
                c['type']._type_affinity
                for c in insp.get_columns('hasbool') if c['name'] == 'x'
            ], [Integer])

    def tearDown(self):
        self.metadata.drop_all(self.conn)
        self.conn.close()

    def _assert_data(self, data, tablename='foo'):
        eq_([
            dict(row)
            for row in self.conn.execute("select * from %s" % tablename)
        ], data)

    def test_ix_existing(self):
        self._table_w_index_fixture()

        with self.op.batch_alter_table("t_w_ix") as batch_op:
            batch_op.alter_column('data', type_=String(30))
            batch_op.create_index("ix_data", ["data"])

        insp = Inspector.from_engine(config.db)
        eq_(
            set((ix['name'], tuple(ix['column_names']))
                for ix in insp.get_indexes('t_w_ix')),
            set([('ix_data', ('data', )), ('ix_thing', ('thing', ))]))

    def test_fk_points_to_me_auto(self):
        self._test_fk_points_to_me("auto")

    # in particular, this tests that the failures
    # on PG and MySQL result in recovery of the batch system,
    # e.g. that the _alembic_batch_temp table is dropped
    @config.requirements.no_referential_integrity
    def test_fk_points_to_me_recreate(self):
        self._test_fk_points_to_me("always")

    @exclusions.only_on("sqlite")
    @exclusions.fails("intentionally asserting that this "
                      "doesn't work w/ pragma foreign keys")
    def test_fk_points_to_me_sqlite_refinteg(self):
        with self._sqlite_referential_integrity():
            self._test_fk_points_to_me("auto")

    def _test_fk_points_to_me(self, recreate):
        bar = Table('bar',
                    self.metadata,
                    Column('id', Integer, primary_key=True),
                    Column('foo_id', Integer, ForeignKey('foo.id')),
                    mysql_engine='InnoDB')
        bar.create(self.conn)
        self.conn.execute(bar.insert(), {'id': 1, 'foo_id': 3})

        with self.op.batch_alter_table("foo", recreate=recreate) as batch_op:
            batch_op.alter_column('data',
                                  new_column_name='newdata',
                                  existing_type=String(50))

        insp = Inspector.from_engine(self.conn)
        eq_([(key['referred_table'], key['referred_columns'],
              key['constrained_columns'])
             for key in insp.get_foreign_keys('bar')],
            [('foo', ['id'], ['foo_id'])])

    def test_selfref_fk_auto(self):
        self._test_selfref_fk("auto")

    @config.requirements.no_referential_integrity
    def test_selfref_fk_recreate(self):
        self._test_selfref_fk("always")

    @exclusions.only_on("sqlite")
    @exclusions.fails("intentionally asserting that this "
                      "doesn't work w/ pragma foreign keys")
    def test_selfref_fk_sqlite_refinteg(self):
        with self._sqlite_referential_integrity():
            self._test_selfref_fk("auto")

    def _test_selfref_fk(self, recreate):
        bar = Table('bar',
                    self.metadata,
                    Column('id', Integer, primary_key=True),
                    Column('bar_id', Integer, ForeignKey('bar.id')),
                    Column('data', String(50)),
                    mysql_engine='InnoDB')
        bar.create(self.conn)
        self.conn.execute(bar.insert(), {'id': 1, 'data': 'x', 'bar_id': None})
        self.conn.execute(bar.insert(), {'id': 2, 'data': 'y', 'bar_id': 1})

        with self.op.batch_alter_table("bar", recreate=recreate) as batch_op:
            batch_op.alter_column('data',
                                  new_column_name='newdata',
                                  existing_type=String(50))

        insp = Inspector.from_engine(self.conn)

        insp = Inspector.from_engine(self.conn)
        eq_([(key['referred_table'], key['referred_columns'],
              key['constrained_columns'])
             for key in insp.get_foreign_keys('bar')],
            [('bar', ['id'], ['bar_id'])])

    def test_change_type(self):
        with self.op.batch_alter_table("foo") as batch_op:
            batch_op.alter_column('data', type_=Integer)

        self._assert_data([{
            "id": 1,
            "data": 0,
            "x": 5
        }, {
            "id": 2,
            "data": 22,
            "x": 6
        }, {
            "id": 3,
            "data": 8,
            "x": 7
        }, {
            "id": 4,
            "data": 9,
            "x": 8
        }, {
            "id": 5,
            "data": 0,
            "x": 9
        }])

    def test_drop_column(self):
        with self.op.batch_alter_table("foo") as batch_op:
            batch_op.drop_column('data')

        self._assert_data([{
            "id": 1,
            "x": 5
        }, {
            "id": 2,
            "x": 6
        }, {
            "id": 3,
            "x": 7
        }, {
            "id": 4,
            "x": 8
        }, {
            "id": 5,
            "x": 9
        }])

    def test_add_pk_constraint(self):
        self._no_pk_fixture()
        with self.op.batch_alter_table("nopk", recreate="always") as batch_op:
            batch_op.create_primary_key('newpk', ['a', 'b'])

        pk_const = Inspector.from_engine(self.conn).get_pk_constraint('nopk')
        with config.requirements.reflects_pk_names.fail_if():
            eq_(pk_const['name'], 'newpk')
        eq_(pk_const['constrained_columns'], ['a', 'b'])

    @config.requirements.check_constraints_w_enforcement
    def test_add_ck_constraint(self):
        with self.op.batch_alter_table("foo", recreate="always") as batch_op:
            batch_op.create_check_constraint("newck", text("x > 0"))

        # we dont support reflection of CHECK constraints
        # so test this by just running invalid data in
        foo = self.metadata.tables['foo']

        assert_raises_message(exc.IntegrityError, "newck", self.conn.execute,
                              foo.insert(), {
                                  "id": 6,
                                  "data": 5,
                                  "x": -2
                              })

    @config.requirements.sqlalchemy_094
    @config.requirements.unnamed_constraints
    def test_drop_foreign_key(self):
        bar = Table('bar',
                    self.metadata,
                    Column('id', Integer, primary_key=True),
                    Column('foo_id', Integer, ForeignKey('foo.id')),
                    mysql_engine='InnoDB')
        bar.create(self.conn)
        self.conn.execute(bar.insert(), {'id': 1, 'foo_id': 3})

        naming_convention = {
            "fk":
            "fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s",
        }
        with self.op.batch_alter_table(
                "bar", naming_convention=naming_convention) as batch_op:
            batch_op.drop_constraint("fk_bar_foo_id_foo", type_="foreignkey")
        eq_(Inspector.from_engine(self.conn).get_foreign_keys('bar'), [])

    def test_drop_column_fk_recreate(self):
        with self.op.batch_alter_table("foo", recreate='always') as batch_op:
            batch_op.drop_column('data')

        self._assert_data([{
            "id": 1,
            "x": 5
        }, {
            "id": 2,
            "x": 6
        }, {
            "id": 3,
            "x": 7
        }, {
            "id": 4,
            "x": 8
        }, {
            "id": 5,
            "x": 9
        }])

    def test_rename_column(self):
        with self.op.batch_alter_table("foo") as batch_op:
            batch_op.alter_column('x', new_column_name='y')

        self._assert_data([{
            "id": 1,
            "data": "d1",
            "y": 5
        }, {
            "id": 2,
            "data": "22",
            "y": 6
        }, {
            "id": 3,
            "data": "8.5",
            "y": 7
        }, {
            "id": 4,
            "data": "9.46",
            "y": 8
        }, {
            "id": 5,
            "data": "d5",
            "y": 9
        }])

    def test_rename_column_boolean(self):
        bar = Table('bar',
                    self.metadata,
                    Column('id', Integer, primary_key=True),
                    Column('flag', Boolean()),
                    mysql_engine='InnoDB')
        bar.create(self.conn)
        self.conn.execute(bar.insert(), {'id': 1, 'flag': True})
        self.conn.execute(bar.insert(), {'id': 2, 'flag': False})

        with self.op.batch_alter_table("bar") as batch_op:
            batch_op.alter_column('flag',
                                  new_column_name='bflag',
                                  existing_type=Boolean)

        self._assert_data([
            {
                "id": 1,
                'bflag': True
            },
            {
                "id": 2,
                'bflag': False
            },
        ], 'bar')

    @config.requirements.non_native_boolean
    def test_rename_column_non_native_boolean_no_ck(self):
        bar = Table('bar',
                    self.metadata,
                    Column('id', Integer, primary_key=True),
                    Column('flag', Boolean(create_constraint=False)),
                    mysql_engine='InnoDB')
        bar.create(self.conn)
        self.conn.execute(bar.insert(), {'id': 1, 'flag': True})
        self.conn.execute(bar.insert(), {'id': 2, 'flag': False})
        self.conn.execute(bar.insert(), {'id': 3, 'flag': 5})

        with self.op.batch_alter_table(
                "bar",
                reflect_args=[
                    Column('flag', Boolean(create_constraint=False))
                ]) as batch_op:
            batch_op.alter_column('flag',
                                  new_column_name='bflag',
                                  existing_type=Boolean)

        self._assert_data([{
            "id": 1,
            'bflag': True
        }, {
            "id": 2,
            'bflag': False
        }, {
            'id': 3,
            'bflag': 5
        }], 'bar')

    def test_drop_column_pk(self):
        with self.op.batch_alter_table("foo") as batch_op:
            batch_op.drop_column('id')

        self._assert_data([{
            "data": "d1",
            "x": 5
        }, {
            "data": "22",
            "x": 6
        }, {
            "data": "8.5",
            "x": 7
        }, {
            "data": "9.46",
            "x": 8
        }, {
            "data": "d5",
            "x": 9
        }])

    def test_rename_column_pk(self):
        with self.op.batch_alter_table("foo") as batch_op:
            batch_op.alter_column('id', new_column_name='ident')

        self._assert_data([{
            "ident": 1,
            "data": "d1",
            "x": 5
        }, {
            "ident": 2,
            "data": "22",
            "x": 6
        }, {
            "ident": 3,
            "data": "8.5",
            "x": 7
        }, {
            "ident": 4,
            "data": "9.46",
            "x": 8
        }, {
            "ident": 5,
            "data": "d5",
            "x": 9
        }])

    def test_add_column_auto(self):
        # note this uses ALTER
        with self.op.batch_alter_table("foo") as batch_op:
            batch_op.add_column(
                Column('data2', String(50), server_default='hi'))

        self._assert_data([{
            "id": 1,
            "data": "d1",
            "x": 5,
            'data2': 'hi'
        }, {
            "id": 2,
            "data": "22",
            "x": 6,
            'data2': 'hi'
        }, {
            "id": 3,
            "data": "8.5",
            "x": 7,
            'data2': 'hi'
        }, {
            "id": 4,
            "data": "9.46",
            "x": 8,
            'data2': 'hi'
        }, {
            "id": 5,
            "data": "d5",
            "x": 9,
            'data2': 'hi'
        }])

    def test_add_column_recreate(self):
        with self.op.batch_alter_table("foo", recreate='always') as batch_op:
            batch_op.add_column(
                Column('data2', String(50), server_default='hi'))

        self._assert_data([{
            "id": 1,
            "data": "d1",
            "x": 5,
            'data2': 'hi'
        }, {
            "id": 2,
            "data": "22",
            "x": 6,
            'data2': 'hi'
        }, {
            "id": 3,
            "data": "8.5",
            "x": 7,
            'data2': 'hi'
        }, {
            "id": 4,
            "data": "9.46",
            "x": 8,
            'data2': 'hi'
        }, {
            "id": 5,
            "data": "d5",
            "x": 9,
            'data2': 'hi'
        }])

    def test_create_drop_index(self):
        insp = Inspector.from_engine(config.db)
        eq_(insp.get_indexes('foo'), [])

        with self.op.batch_alter_table("foo", recreate='always') as batch_op:
            batch_op.create_index('ix_data', ['data'], unique=True)

        self._assert_data([{
            "id": 1,
            "data": "d1",
            "x": 5
        }, {
            "id": 2,
            "data": "22",
            "x": 6
        }, {
            "id": 3,
            "data": "8.5",
            "x": 7
        }, {
            "id": 4,
            "data": "9.46",
            "x": 8
        }, {
            "id": 5,
            "data": "d5",
            "x": 9
        }])

        insp = Inspector.from_engine(config.db)
        eq_([
            dict(unique=ix['unique'],
                 name=ix['name'],
                 column_names=ix['column_names'])
            for ix in insp.get_indexes('foo')
        ], [{
            'unique': True,
            'name': 'ix_data',
            'column_names': ['data']
        }])

        with self.op.batch_alter_table("foo", recreate='always') as batch_op:
            batch_op.drop_index('ix_data')

        insp = Inspector.from_engine(config.db)
        eq_(insp.get_indexes('foo'), [])
class ReconnectRecipeTest(fixtures.TestBase):
    """Test for the reconnect recipe given at doc/build/faq/connections.rst.

    Make sure the above document is updated if changes are made here.

    """

    # this recipe works on PostgreSQL also but only if the connection
    # is cut off from the server side, otherwise the connection.cursor()
    # method rightly fails because we explicitly closed the connection.
    # since we don't have a fixture
    # that can do this we currently rely on the MySQL drivers that allow
    # us to call cursor() even when the connection were closed.   In order
    # to get a real "cut the server off" kind of fixture we'd need to do
    # something in provisioning that seeks out the TCP connection at the
    # OS level and kills it.
    __only_on__ = ("mysql+mysqldb", "mysql+pymysql")

    future = False

    def make_engine(self, engine):
        num_retries = 3
        retry_interval = 0.5

        def _run_with_retries(fn, context, cursor, statement, *arg, **kw):
            for retry in range(num_retries + 1):
                try:
                    fn(cursor, statement, context=context, *arg)
                except engine.dialect.dbapi.Error as raw_dbapi_err:
                    connection = context.root_connection
                    if engine.dialect.is_disconnect(raw_dbapi_err, connection,
                                                    cursor):
                        if retry > num_retries:
                            raise
                        engine.logger.error(
                            "disconnection error, retrying operation",
                            exc_info=True,
                        )
                        connection.invalidate()

                        if self.future:
                            connection.rollback()
                        else:
                            trans = connection.get_transaction()
                            if trans:
                                trans.rollback()

                        time.sleep(retry_interval)
                        context.cursor = (
                            cursor) = connection.connection.cursor()
                    else:
                        raise
                else:
                    return True

        e = engine.execution_options(isolation_level="AUTOCOMMIT")

        @event.listens_for(e, "do_execute_no_params")
        def do_execute_no_params(cursor, statement, context):
            return _run_with_retries(
                context.dialect.do_execute_no_params,
                context,
                cursor,
                statement,
            )

        @event.listens_for(e, "do_execute")
        def do_execute(cursor, statement, parameters, context):
            return _run_with_retries(
                context.dialect.do_execute,
                context,
                cursor,
                statement,
                parameters,
            )

        return e

    __backend__ = True

    def setup_test(self):
        self.engine = engines.reconnecting_engine(options=dict(
            future=self.future))
        self.meta = MetaData()
        self.table = Table(
            "sometable",
            self.meta,
            Column("id", Integer, primary_key=True),
            Column("name", String(50)),
        )
        self.meta.create_all(self.engine)

    def teardown_test(self):
        self.meta.drop_all(self.engine)
        self.engine.dispose()

    def test_restart_on_execute_no_txn(self):
        engine = self.make_engine(self.engine)

        with engine.connect() as conn:
            eq_(conn.execute(select(1)).scalar(), 1)

            self.engine.test_shutdown()
            self.engine.test_restart()

            eq_(conn.execute(select(1)).scalar(), 1)

    def test_restart_on_execute_txn(self):
        engine = self.make_engine(self.engine)

        with engine.begin() as conn:
            eq_(conn.execute(select(1)).scalar(), 1)

            self.engine.test_shutdown()
            self.engine.test_restart()

            eq_(conn.execute(select(1)).scalar(), 1)

    def test_autocommits_txn(self):
        engine = self.make_engine(self.engine)

        with engine.begin() as conn:
            conn.execute(
                self.table.insert(),
                [
                    {
                        "id": 1,
                        "name": "some name 1"
                    },
                    {
                        "id": 2,
                        "name": "some name 2"
                    },
                    {
                        "id": 3,
                        "name": "some name 3"
                    },
                ],
            )

            self.engine.test_shutdown()
            self.engine.test_restart()

            eq_(
                conn.execute(select(self.table).order_by(
                    self.table.c.id)).fetchall(),
                [(1, "some name 1"), (2, "some name 2"), (3, "some name 3")],
            )

    def test_fail_on_executemany_txn(self):
        engine = self.make_engine(self.engine)

        with engine.begin() as conn:
            conn.execute(
                self.table.insert(),
                [
                    {
                        "id": 1,
                        "name": "some name 1"
                    },
                    {
                        "id": 2,
                        "name": "some name 2"
                    },
                    {
                        "id": 3,
                        "name": "some name 3"
                    },
                ],
            )

            self.engine.test_shutdown()
            self.engine.test_restart()

            assert_raises(
                exc.DBAPIError,
                conn.execute,
                self.table.insert(),
                [
                    {
                        "id": 4,
                        "name": "some name 4"
                    },
                    {
                        "id": 5,
                        "name": "some name 5"
                    },
                    {
                        "id": 6,
                        "name": "some name 6"
                    },
                ],
            )
            if self.future:
                conn.rollback()
            else:
                trans = conn.get_transaction()
                trans.rollback()
Example #48
0
class AlterColRoundTripFixture(object):

    # since these tests are about syntax, use more recent SQLAlchemy as some of
    # the type / server default compare logic might not work on older
    # SQLAlchemy versions as seems to be the case for SQLAlchemy 1.1 on Oracle

    __requires__ = ("alter_column", "sqlalchemy_12")

    def setUp(self):
        self.conn = config.db.connect()
        self.ctx = MigrationContext.configure(self.conn)
        self.op = Operations(self.ctx)
        self.metadata = MetaData()

    def _compare_type(self, t1, t2):
        c1 = Column("q", t1)
        c2 = Column("q", t2)
        assert not self.ctx.impl.compare_type(
            c1, c2
        ), "Type objects %r and %r didn't compare as equivalent" % (t1, t2)

    def _compare_server_default(self, t1, s1, t2, s2):
        c1 = Column("q", t1, server_default=s1)
        c2 = Column("q", t2, server_default=s2)
        assert not self.ctx.impl.compare_server_default(
            c1, c2, s2, s1
        ), "server defaults %r and %r didn't compare as equivalent" % (s1, s2)

    def tearDown(self):
        self.metadata.drop_all(self.conn)
        self.conn.close()

    def _run_alter_col(self, from_, to_, compare=None):
        column = Column(
            from_.get("name", "colname"),
            from_.get("type", String(10)),
            nullable=from_.get("nullable", True),
            server_default=from_.get("server_default", None),
            # comment=from_.get("comment", None)
        )
        t = Table("x", self.metadata, column)

        t.create(self.conn)
        insp = inspect(self.conn)
        old_col = insp.get_columns("x")[0]

        # TODO: conditional comment support
        self.op.alter_column(
            "x",
            column.name,
            existing_type=column.type,
            existing_server_default=column.server_default
            if column.server_default is not None
            else False,
            existing_nullable=True if column.nullable else False,
            # existing_comment=column.comment,
            nullable=to_.get("nullable", None),
            # modify_comment=False,
            server_default=to_.get("server_default", False),
            new_column_name=to_.get("name", None),
            type_=to_.get("type", None),
        )

        insp = inspect(self.conn)
        new_col = insp.get_columns("x")[0]

        if compare is None:
            compare = to_

        eq_(
            new_col["name"],
            compare["name"] if "name" in compare else column.name,
        )
        self._compare_type(
            new_col["type"], compare.get("type", old_col["type"])
        )
        eq_(new_col["nullable"], compare.get("nullable", column.nullable))
        self._compare_server_default(
            new_col["type"],
            new_col.get("default", None),
            compare.get("type", old_col["type"]),
            compare["server_default"].text
            if "server_default" in compare
            else column.server_default.arg.text
            if column.server_default is not None
            else None,
        )
Example #49
0
    def test_orm_many_engines(self):
        metadata = MetaData()

        table1 = Table(
            "mytable",
            metadata,
            Column(
                "col1",
                Integer,
                primary_key=True,
                test_needs_autoincrement=True,
            ),
            Column("col2", String(30)),
        )

        table2 = Table(
            "mytable2",
            metadata,
            Column(
                "col1",
                Integer,
                primary_key=True,
                test_needs_autoincrement=True,
            ),
            Column("col2", String(30)),
            Column("col3", Integer, ForeignKey("mytable.col1")),
        )

        metadata.create_all(self.engine)

        m1 = self.mapper_registry.map_imperatively(
            A,
            table1,
            properties={
                "bs":
                relationship(B, cascade="all, delete", order_by=table2.c.col1)
            },
            _compiled_cache_size=50,
        )
        m2 = self.mapper_registry.map_imperatively(B,
                                                   table2,
                                                   _compiled_cache_size=50)

        @profile_memory()
        def go():
            engine = engines.testing_engine(
                options={
                    "logging_name": "FOO",
                    "pool_logging_name": "BAR",
                    "use_reaper": False,
                })
            with Session(engine) as sess:
                a1 = A(col2="a1")
                a2 = A(col2="a2")
                a3 = A(col2="a3")
                a1.bs.append(B(col2="b1"))
                a1.bs.append(B(col2="b2"))
                a3.bs.append(B(col2="b3"))
                for x in [a1, a2, a3]:
                    sess.add(x)
                sess.commit()

                alist = sess.query(A).order_by(A.col1).all()
                eq_(
                    [
                        A(col2="a1", bs=[B(col2="b1"),
                                         B(col2="b2")]),
                        A(col2="a2", bs=[]),
                        A(col2="a3", bs=[B(col2="b3")]),
                    ],
                    alist,
                )

                for a in alist:
                    sess.delete(a)
                sess.commit()

            engine.dispose()

        go()

        metadata.drop_all(self.engine)
        del m1, m2
        assert_no_mappers()
Example #50
0
class ApdbSchema(object):
    """Class for management of APDB schema.

    Attributes
    ----------
    objects : `sqlalchemy.Table`
        DiaObject table instance
    objects_nightly : `sqlalchemy.Table`
        DiaObjectNightly table instance, may be None
    objects_last : `sqlalchemy.Table`
        DiaObjectLast table instance, may be None
    sources : `sqlalchemy.Table`
        DiaSource table instance
    forcedSources : `sqlalchemy.Table`
        DiaForcedSource table instance
    visits : `sqlalchemy.Table`
        ApdbProtoVisits table instance

    Parameters
    ----------
    engine : `sqlalchemy.engine.Engine`
        SQLAlchemy engine instance
    dia_object_index : `str`
        Indexing mode for DiaObject table, see `ApdbConfig.dia_object_index`
        for details.
    dia_object_nightly : `bool`
        If `True` then create per-night DiaObject table as well.
    schema_file : `str`
        Name of the YAML schema file.
    extra_schema_file : `str`, optional
        Name of the YAML schema file with extra column definitions.
    column_map : `str`, optional
        Name of the YAML file with column mappings.
    afw_schemas : `dict`, optional
        Dictionary with table name for a key and `afw.table.Schema`
        for a value. Columns in schema will be added to standard APDB
        schema (only if standard schema does not have matching column).
    prefix : `str`, optional
        Prefix to add to all scheam elements.
    """

    # map afw type names into cat type names
    _afw_type_map = {"I": "INT",
                     "L": "BIGINT",
                     "F": "FLOAT",
                     "D": "DOUBLE",
                     "Angle": "DOUBLE",
                     "String": "CHAR",
                     "Flag": "BOOL"}
    _afw_type_map_reverse = {"INT": "I",
                             "BIGINT": "L",
                             "FLOAT": "F",
                             "DOUBLE": "D",
                             "DATETIME": "L",
                             "CHAR": "String",
                             "BOOL": "Flag"}

    def __init__(self, engine, dia_object_index, dia_object_nightly,
                 schema_file, extra_schema_file=None, column_map=None,
                 afw_schemas=None, prefix=""):

        self._engine = engine
        self._dia_object_index = dia_object_index
        self._dia_object_nightly = dia_object_nightly
        self._prefix = prefix

        self._metadata = MetaData(self._engine)

        self.objects = None
        self.objects_nightly = None
        self.objects_last = None
        self.sources = None
        self.forcedSources = None
        self.visits = None

        if column_map:
            _LOG.debug("Reading column map file %s", column_map)
            with open(column_map) as yaml_stream:
                # maps cat column name to afw column name
                self._column_map = yaml.load(yaml_stream, Loader=yaml.SafeLoader)
                _LOG.debug("column map: %s", self._column_map)
        else:
            _LOG.debug("No column map file is given, initialize to empty")
            self._column_map = {}
        self._column_map_reverse = {}
        for table, cmap in self._column_map.items():
            # maps afw column name to cat column name
            self._column_map_reverse[table] = {v: k for k, v in cmap.items()}
        _LOG.debug("reverse column map: %s", self._column_map_reverse)

        # build complete table schema
        self._schemas = self._buildSchemas(schema_file, extra_schema_file,
                                           afw_schemas)

        # map cat column types to alchemy
        self._type_map = dict(DOUBLE=self._getDoubleType(),
                              FLOAT=sqlalchemy.types.Float,
                              DATETIME=sqlalchemy.types.TIMESTAMP,
                              BIGINT=sqlalchemy.types.BigInteger,
                              INTEGER=sqlalchemy.types.Integer,
                              INT=sqlalchemy.types.Integer,
                              TINYINT=sqlalchemy.types.Integer,
                              BLOB=sqlalchemy.types.LargeBinary,
                              CHAR=sqlalchemy.types.CHAR,
                              BOOL=sqlalchemy.types.Boolean)

        # generate schema for all tables, must be called last
        self._makeTables()

    def _makeTables(self, mysql_engine='InnoDB', oracle_tablespace=None, oracle_iot=False):
        """Generate schema for all tables.

        Parameters
        ----------
        mysql_engine : `str`, optional
            MySQL engine type to use for new tables.
        oracle_tablespace : `str`, optional
            Name of Oracle tablespace, only useful with oracle
        oracle_iot : `bool`, optional
            Make Index-organized DiaObjectLast table.
        """

        info = dict(oracle_tablespace=oracle_tablespace)

        if self._dia_object_index == 'pix_id_iov':
            # Special PK with HTM column in first position
            constraints = self._tableIndices('DiaObjectIndexHtmFirst', info)
        else:
            constraints = self._tableIndices('DiaObject', info)
        table = Table(self._prefix+'DiaObject', self._metadata,
                      *(self._tableColumns('DiaObject') + constraints),
                      mysql_engine=mysql_engine,
                      info=info)
        self.objects = table

        if self._dia_object_nightly:
            # Same as DiaObject but no index
            table = Table(self._prefix+'DiaObjectNightly', self._metadata,
                          *self._tableColumns('DiaObject'),
                          mysql_engine=mysql_engine,
                          info=info)
            self.objects_nightly = table

        if self._dia_object_index == 'last_object_table':
            # Same as DiaObject but with special index
            info2 = info.copy()
            info2.update(oracle_iot=oracle_iot)
            table = Table(self._prefix+'DiaObjectLast', self._metadata,
                          *(self._tableColumns('DiaObjectLast') +
                            self._tableIndices('DiaObjectLast', info)),
                          mysql_engine=mysql_engine,
                          info=info2)
            self.objects_last = table

        # for all other tables use index definitions in schema
        for table_name in ('DiaSource', 'SSObject', 'DiaForcedSource', 'DiaObject_To_Object_Match'):
            table = Table(self._prefix+table_name, self._metadata,
                          *(self._tableColumns(table_name) +
                            self._tableIndices(table_name, info)),
                          mysql_engine=mysql_engine,
                          info=info)
            if table_name == 'DiaSource':
                self.sources = table
            elif table_name == 'DiaForcedSource':
                self.forcedSources = table

        # special table to track visits, only used by prototype
        table = Table(self._prefix+'ApdbProtoVisits', self._metadata,
                      Column('visitId', sqlalchemy.types.BigInteger, nullable=False),
                      Column('visitTime', sqlalchemy.types.TIMESTAMP, nullable=False),
                      PrimaryKeyConstraint('visitId', name=self._prefix+'PK_ApdbProtoVisits'),
                      Index(self._prefix+'IDX_ApdbProtoVisits_vTime', 'visitTime', info=info),
                      mysql_engine=mysql_engine,
                      info=info)
        self.visits = table

    def makeSchema(self, drop=False, mysql_engine='InnoDB', oracle_tablespace=None, oracle_iot=False):
        """Create or re-create all tables.

        Parameters
        ----------
        drop : `bool`, optional
            If True then drop tables before creating new ones.
        mysql_engine : `str`, optional
            MySQL engine type to use for new tables.
        oracle_tablespace : `str`, optional
            Name of Oracle tablespace, only useful with oracle
        oracle_iot : `bool`, optional
            Make Index-organized DiaObjectLast table.
        """

        # re-make table schema for all needed tables with possibly different options
        _LOG.debug("clear metadata")
        self._metadata.clear()
        _LOG.debug("re-do schema mysql_engine=%r oracle_tablespace=%r",
                   mysql_engine, oracle_tablespace)
        self._makeTables(mysql_engine=mysql_engine, oracle_tablespace=oracle_tablespace,
                         oracle_iot=oracle_iot)

        # create all tables (optionally drop first)
        if drop:
            _LOG.info('dropping all tables')
            self._metadata.drop_all()
        _LOG.info('creating all tables')
        self._metadata.create_all()

    def getAfwSchema(self, table_name, columns=None):
        """Return afw schema for given table.

        Parameters
        ----------
        table_name : `str`
            One of known APDB table names.
        columns : `list` of `str`, optional
            Include only given table columns in schema, by default all columns
            are included.

        Returns
        -------
        schema : `lsst.afw.table.Schema`
        column_map : `dict`
            Mapping of the table/result column names into schema key.
        """

        table = self._schemas[table_name]
        col_map = self._column_map.get(table_name, {})

        # make a schema
        col2afw = {}
        schema = afwTable.SourceTable.makeMinimalSchema()
        for column in table.columns:
            if columns and column.name not in columns:
                continue
            afw_col = col_map.get(column.name, column.name)
            if afw_col in schema.getNames():
                # Continue if the column is already in the minimal schema.
                key = schema.find(afw_col).getKey()
            elif column.type in ("DOUBLE", "FLOAT") and column.unit == "deg":
                #
                # NOTE: degree to radian conversion is not supported (yet)
                #
                # angles in afw are radians and have special "Angle" type
                key = schema.addField(afw_col,
                                      type="Angle",
                                      doc=column.description or "",
                                      units="rad")
            elif column.type == "BLOB":
                # No BLOB support for now
                key = None
            else:
                units = column.unit or ""
                # some units in schema are not recognized by afw but we do not care
                if self._afw_type_map_reverse[column.type] == 'String':
                    key = schema.addField(afw_col,
                                          type=self._afw_type_map_reverse[column.type],
                                          doc=column.description or "",
                                          units=units,
                                          parse_strict="silent",
                                          size=10)
                elif units == "deg":
                    key = schema.addField(afw_col,
                                          type='Angle',
                                          doc=column.description or "",
                                          parse_strict="silent")
                else:
                    key = schema.addField(afw_col,
                                          type=self._afw_type_map_reverse[column.type],
                                          doc=column.description or "",
                                          units=units,
                                          parse_strict="silent")
            col2afw[column.name] = key

        return schema, col2afw

    def getAfwColumns(self, table_name):
        """Returns mapping of afw column names to Column definitions.

        Parameters
        ----------
        table_name : `str`
            One of known APDB table names.

        Returns
        -------
        column_map : `dict`
            Mapping of afw column names to `ColumnDef` instances.
        """
        table = self._schemas[table_name]
        col_map = self._column_map.get(table_name, {})

        cmap = {}
        for column in table.columns:
            afw_name = col_map.get(column.name, column.name)
            cmap[afw_name] = column
        return cmap

    def getColumnMap(self, table_name):
        """Returns mapping of column names to Column definitions.

        Parameters
        ----------
        table_name : `str`
            One of known APDB table names.

        Returns
        -------
        column_map : `dict`
            Mapping of column names to `ColumnDef` instances.
        """
        table = self._schemas[table_name]
        cmap = {column.name: column for column in table.columns}
        return cmap

    def _buildSchemas(self, schema_file, extra_schema_file=None, afw_schemas=None):
        """Create schema definitions for all tables.

        Reads YAML schemas and builds dictionary containing `TableDef`
        instances for each table.

        Parameters
        ----------
        schema_file : `str`
            Name of YAML file with standard cat schema.
        extra_schema_file : `str`, optional
            Name of YAML file with extra table information or `None`.
        afw_schemas : `dict`, optional
            Dictionary with table name for a key and `afw.table.Schema`
            for a value. Columns in schema will be added to standard APDB
            schema (only if standard schema does not have matching column).

        Returns
        -------
        schemas : `dict`
            Mapping of table names to `TableDef` instances.
        """

        _LOG.debug("Reading schema file %s", schema_file)
        with open(schema_file) as yaml_stream:
            tables = list(yaml.load_all(yaml_stream, Loader=yaml.SafeLoader))
            # index it by table name
        _LOG.debug("Read %d tables from schema", len(tables))

        if extra_schema_file:
            _LOG.debug("Reading extra schema file %s", extra_schema_file)
            with open(extra_schema_file) as yaml_stream:
                extras = list(yaml.load_all(yaml_stream, Loader=yaml.SafeLoader))
                # index it by table name
                schemas_extra = {table['table']: table for table in extras}
        else:
            schemas_extra = {}

        # merge extra schema into a regular schema, for now only columns are merged
        for table in tables:
            table_name = table['table']
            if table_name in schemas_extra:
                columns = table['columns']
                extra_columns = schemas_extra[table_name].get('columns', [])
                extra_columns = {col['name']: col for col in extra_columns}
                _LOG.debug("Extra columns for table %s: %s", table_name, extra_columns.keys())
                columns = []
                for col in table['columns']:
                    if col['name'] in extra_columns:
                        columns.append(extra_columns.pop(col['name']))
                    else:
                        columns.append(col)
                # add all remaining extra columns
                table['columns'] = columns + list(extra_columns.values())

                if 'indices' in schemas_extra[table_name]:
                    raise RuntimeError("Extra table definition contains indices, "
                                       "merging is not implemented")

                del schemas_extra[table_name]

        # Pure "extra" table definitions may contain indices
        tables += schemas_extra.values()

        # convert all dicts into named tuples
        schemas = {}
        for table in tables:

            columns = table.get('columns', [])

            table_name = table['table']
            afw_schema = afw_schemas and afw_schemas.get(table_name)
            if afw_schema:
                # use afw schema to create extra columns
                column_names = {col['name'] for col in columns}
                column_names_lower = {col.lower() for col in column_names}
                for _, field in afw_schema:
                    column = self._field2dict(field, table_name)
                    if column['name'] not in column_names:
                        # check that there is no column name that only differs in case
                        if column['name'].lower() in column_names_lower:
                            raise ValueError("afw.table column name case does not match schema column name")
                        columns.append(column)

            table_columns = []
            for col in columns:
                # For prototype set default to 0 even if columns don't specify it
                if "default" not in col:
                    default = None
                    if col['type'] not in ("BLOB", "DATETIME"):
                        default = 0
                else:
                    default = col["default"]

                column = ColumnDef(name=col['name'],
                                   type=col['type'],
                                   nullable=col.get("nullable"),
                                   default=default,
                                   description=col.get("description"),
                                   unit=col.get("unit"),
                                   ucd=col.get("ucd"))
                table_columns.append(column)

            table_indices = []
            for idx in table.get('indices', []):
                index = IndexDef(name=idx.get('name'),
                                 type=idx.get('type'),
                                 columns=idx.get('columns'))
                table_indices.append(index)

            schemas[table_name] = TableDef(name=table_name,
                                           description=table.get('description'),
                                           columns=table_columns,
                                           indices=table_indices)

        return schemas

    def _tableColumns(self, table_name):
        """Return set of columns in a table

        Parameters
        ----------
        table_name : `str`
            Name of the table.

        Returns
        -------
        column_defs : `list`
            List of `Column` objects.
        """

        # get the list of columns in primary key, they are treated somewhat
        # specially below
        table_schema = self._schemas[table_name]
        pkey_columns = set()
        for index in table_schema.indices:
            if index.type == 'PRIMARY':
                pkey_columns = set(index.columns)
                break

        # convert all column dicts into alchemy Columns
        column_defs = []
        for column in table_schema.columns:
            kwargs = dict(nullable=column.nullable)
            if column.default is not None:
                kwargs.update(server_default=str(column.default))
            if column.name in pkey_columns:
                kwargs.update(autoincrement=False)
            ctype = self._type_map[column.type]
            column_defs.append(Column(column.name, ctype, **kwargs))

        return column_defs

    def _field2dict(self, field, table_name):
        """Convert afw schema field definition into a dict format.

        Parameters
        ----------
        field : `lsst.afw.table.Field`
            Field in afw table schema.
        table_name : `str`
            Name of the table.

        Returns
        -------
        field_dict : `dict`
            Field attributes for SQL schema:

            - ``name`` : field name (`str`)
            - ``type`` : type name in SQL, e.g. "INT", "FLOAT" (`str`)
            - ``nullable`` : `True` if column can be ``NULL`` (`bool`)
        """
        column = field.getName()
        column = self._column_map_reverse[table_name].get(column, column)
        ctype = self._afw_type_map[field.getTypeString()]
        return dict(name=column, type=ctype, nullable=True)

    def _tableIndices(self, table_name, info):
        """Return set of constraints/indices in a table

        Parameters
        ----------
        table_name : `str`
            Name of the table.
        info : `dict`
            Additional options passed to SQLAlchemy index constructor.

        Returns
        -------
        index_defs : `list`
            List of SQLAlchemy index/constraint objects.
        """

        table_schema = self._schemas[table_name]

        # convert all index dicts into alchemy Columns
        index_defs = []
        for index in table_schema.indices:
            if index.type == "INDEX":
                index_defs.append(Index(self._prefix+index.name, *index.columns, info=info))
            else:
                kwargs = {}
                if index.name:
                    kwargs['name'] = self._prefix+index.name
                if index.type == "PRIMARY":
                    index_defs.append(PrimaryKeyConstraint(*index.columns, **kwargs))
                elif index.type == "UNIQUE":
                    index_defs.append(UniqueConstraint(*index.columns, **kwargs))

        return index_defs

    def _getDoubleType(self):
        """DOUBLE type is database-specific, select one based on dialect.

        Returns
        -------
        type_object : `object`
            Database-specific type definition.
        """
        if self._engine.name == 'mysql':
            from sqlalchemy.dialects.mysql import DOUBLE
            return DOUBLE(asdecimal=False)
        elif self._engine.name == 'postgresql':
            from sqlalchemy.dialects.postgresql import DOUBLE_PRECISION
            return DOUBLE_PRECISION
        elif self._engine.name == 'oracle':
            from sqlalchemy.dialects.oracle import DOUBLE_PRECISION
            return DOUBLE_PRECISION
        elif self._engine.name == 'sqlite':
            # all floats in sqlite are 8-byte
            from sqlalchemy.dialects.sqlite import REAL
            return REAL
        else:
            raise TypeError('cannot determine DOUBLE type, unexpected dialect: ' + self._engine.name)
Example #51
0
    def test_alias_pathing(self):
        metadata = MetaData()

        a = Table(
            "a",
            metadata,
            Column("id",
                   Integer,
                   primary_key=True,
                   test_needs_autoincrement=True),
            Column("bid", Integer, ForeignKey("b.id")),
            Column("type", String(30)),
        )

        asub = Table(
            "asub",
            metadata,
            Column("id", Integer, ForeignKey("a.id"), primary_key=True),
            Column("data", String(30)),
        )

        b = Table(
            "b",
            metadata,
            Column("id",
                   Integer,
                   primary_key=True,
                   test_needs_autoincrement=True),
        )
        self.mapper_registry.map_imperatively(A,
                                              a,
                                              polymorphic_identity="a",
                                              polymorphic_on=a.c.type)
        self.mapper_registry.map_imperatively(ASub,
                                              asub,
                                              inherits=A,
                                              polymorphic_identity="asub")
        self.mapper_registry.map_imperatively(
            B, b, properties={"as_": relationship(A)})

        metadata.create_all(self.engine)
        sess = Session(self.engine)
        a1 = ASub(data="a1")
        a2 = ASub(data="a2")
        a3 = ASub(data="a3")
        b1 = B(as_=[a1, a2, a3])
        sess.add(b1)
        sess.commit()
        del sess

        # sqlite has a slow enough growth here
        # that we have to run it more times to see the
        # "dip" again
        @profile_memory(maxtimes=120)
        def go():
            sess = Session(self.engine)
            sess.query(B).options(subqueryload(B.as_.of_type(ASub))).all()
            sess.close()
            del sess

        try:
            go()
        finally:
            metadata.drop_all(self.engine)
        clear_mappers()
Example #52
0
    Column('id', Integer, primary_key=True, default=id_generator),
    Column('continent', String(30), nullable=False),
    Column('city', String(50), nullable=False))

weather_reports = Table(
    "weather_reports",
    meta,
    Column('id', Integer, primary_key=True),
    Column('location_id', Integer, ForeignKey('weather_locations.id')),
    Column('temperature', Float),
    Column('report_time', DateTime, default=datetime.datetime.now),
)

# create tables
for db in (db1, db2, db3, db4):
    meta.drop_all(db)
    meta.create_all(db)

# establish initial "id" in db1
db1.execute(ids.insert(), nextid=1)

# step 5. define sharding functions.

# we'll use a straight mapping of a particular set of "country"
# attributes to shard id.
shard_lookup = {
    'North America': 'north_america',
    'Asia': 'asia',
    'Europe': 'europe',
    'South America': 'south_america'
}
Example #53
0
    def test_with_manytomany(self):
        metadata = MetaData()

        table1 = Table(
            "mytable",
            metadata,
            Column(
                "col1",
                Integer,
                primary_key=True,
                test_needs_autoincrement=True,
            ),
            Column("col2", String(30)),
        )

        table2 = Table(
            "mytable2",
            metadata,
            Column(
                "col1",
                Integer,
                primary_key=True,
                test_needs_autoincrement=True,
            ),
            Column("col2", String(30)),
        )

        table3 = Table(
            "t1tot2",
            metadata,
            Column("t1", Integer, ForeignKey("mytable.col1")),
            Column("t2", Integer, ForeignKey("mytable2.col1")),
        )

        @profile_memory()
        def go():
            class A(fixtures.ComparableEntity):
                pass

            class B(fixtures.ComparableEntity):
                pass

            self.mapper_registry.map_imperatively(
                A,
                table1,
                properties={
                    "bs":
                    relationship(B,
                                 secondary=table3,
                                 backref="as",
                                 order_by=table3.c.t1)
                },
            )
            self.mapper_registry.map_imperatively(B, table2)

            sess = Session(self.engine, autoflush=False)
            a1 = A(col2="a1")
            a2 = A(col2="a2")
            b1 = B(col2="b1")
            b2 = B(col2="b2")
            a1.bs.append(b1)
            a2.bs.append(b2)
            for x in [a1, a2]:
                sess.add(x)
            sess.flush()
            sess.expunge_all()

            alist = sess.query(A).order_by(A.col1).all()
            eq_([A(bs=[B(col2="b1")]), A(bs=[B(col2="b2")])], alist)

            for a in alist:
                sess.delete(a)
            sess.flush()

            # mappers necessarily find themselves in the compiled cache,
            # so to allow them to be GC'ed clear out the cache
            self.engine.clear_compiled_cache()
            del B
            del A

        metadata.create_all(self.engine)
        try:
            go()
        finally:
            metadata.drop_all(self.engine)
        assert_no_mappers()
Example #54
0
def drop_tables():
    meta = MetaData()
    meta.drop_all(bind=engine, tables=[task])
Example #55
0
    def test_update(self):
        """
        Tests sending functions and SQL expressions to the VALUES and SET
        clauses of INSERT/UPDATE instances, and that column-level defaults
        get overridden.
        """

        meta = MetaData(testing.db)
        t = Table('t1', meta,
                  Column('id', Integer, Sequence('t1idseq', optional=True),
                         primary_key=True),
                  Column('value', Integer)
                  )
        t2 = Table('t2', meta,
                   Column('id', Integer, Sequence('t2idseq', optional=True),
                          primary_key=True),
                   Column('value', Integer, default=7),
                   Column('stuff', String(20), onupdate="thisisstuff")
                   )
        meta.create_all()
        try:
            t.insert(values=dict(value=func.length("one"))).execute()
            assert t.select().execute().first()['value'] == 3
            t.update(values=dict(value=func.length("asfda"))).execute()
            assert t.select().execute().first()['value'] == 5

            r = t.insert(values=dict(value=func.length("sfsaafsda"))).execute()
            id = r.inserted_primary_key[0]
            assert t.select(t.c.id == id).execute().first()['value'] == 9
            t.update(values={t.c.value: func.length("asdf")}).execute()
            assert t.select().execute().first()['value'] == 4
            t2.insert().execute()
            t2.insert(values=dict(value=func.length("one"))).execute()
            t2.insert(values=dict(value=func.length("asfda") + -19)).\
                execute(stuff="hi")

            res = exec_sorted(select([t2.c.value, t2.c.stuff]))
            eq_(res, [(-14, 'hi'), (3, None), (7, None)])

            t2.update(values=dict(value=func.length("asdsafasd"))).\
                execute(stuff="some stuff")
            assert select([t2.c.value, t2.c.stuff]).execute().fetchall() == \
                [(9, "some stuff"), (9, "some stuff"),
                 (9, "some stuff")]

            t2.delete().execute()

            t2.insert(values=dict(value=func.length("one") + 8)).execute()
            assert t2.select().execute().first()['value'] == 11

            t2.update(values=dict(value=func.length("asfda"))).execute()
            eq_(
                select([t2.c.value, t2.c.stuff]).execute().first(),
                (5, "thisisstuff")
            )

            t2.update(values={t2.c.value: func.length("asfdaasdf"),
                              t2.c.stuff: "foo"}).execute()
            print("HI", select([t2.c.value, t2.c.stuff]).execute().first())
            eq_(select([t2.c.value, t2.c.stuff]).execute().first(),
                (9, "foo")
                )
        finally:
            meta.drop_all()
Example #56
0
def drop_tables(target_config=None):
    engine = get_engine(target_config)

    meta = MetaData()
    meta.drop_all(bind=engine, tables=[users, flags, orders, transactions])
Example #57
0
def upgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine

    profile = Table('profile',
                    meta,
                    Column('id',
                           String(length=36),
                           primary_key=True,
                           nullable=False),
                    Column('name', String(length=255)),
                    Column('type', String(length=255)),
                    Column('context', types.Dict),
                    Column('spec', types.Dict),
                    Column('user', String(length=32), nullable=False),
                    Column('project', String(length=32), nullable=False),
                    Column('domain', String(length=32)),
                    Column('permission', String(length=32)),
                    Column('meta_data', types.Dict),
                    Column('created_at', DateTime),
                    Column('updated_at', DateTime),
                    mysql_engine='InnoDB',
                    mysql_charset='utf8')

    cluster = Table('cluster',
                    meta,
                    Column('id', String(36), primary_key=True, nullable=False),
                    Column('name', String(255), nullable=False),
                    Column('profile_id',
                           String(36),
                           ForeignKey('profile.id'),
                           nullable=False),
                    Column('user', String(32), nullable=False),
                    Column('project', String(32), nullable=False),
                    Column('domain', String(32)),
                    Column('parent', String(36)),
                    Column('init_at', DateTime),
                    Column('created_at', DateTime),
                    Column('updated_at', DateTime),
                    Column('min_size', Integer),
                    Column('max_size', Integer),
                    Column('desired_capacity', Integer),
                    Column('next_index', Integer),
                    Column('timeout', Integer),
                    Column('status', String(255)),
                    Column('status_reason', Text),
                    Column('meta_data', types.Dict),
                    Column('data', types.Dict),
                    mysql_engine='InnoDB',
                    mysql_charset='utf8')

    node = Table('node',
                 meta,
                 Column('id', String(36), primary_key=True, nullable=False),
                 Column('name', String(255)),
                 Column('physical_id', String(36)),
                 Column('cluster_id', String(36)),
                 Column('profile_id',
                        String(36),
                        ForeignKey('profile.id'),
                        nullable=False),
                 Column('user', String(32)),
                 Column('project', String(32)),
                 Column('domain', String(32)),
                 Column('index', Integer),
                 Column('role', String(64)),
                 Column('init_at', DateTime),
                 Column('created_at', DateTime),
                 Column('updated_at', DateTime),
                 Column('status', String(255)),
                 Column('status_reason', Text),
                 Column('meta_data', types.Dict),
                 Column('data', types.Dict),
                 mysql_engine='InnoDB',
                 mysql_charset='utf8')

    cluster_lock = Table('cluster_lock',
                         meta,
                         Column('cluster_id',
                                String(36),
                                primary_key=True,
                                nullable=False),
                         Column('action_ids', types.List),
                         Column('semaphore', Integer),
                         mysql_engine='InnoDB',
                         mysql_charset='utf8')

    node_lock = Table('node_lock',
                      meta,
                      Column('node_id',
                             String(36),
                             primary_key=True,
                             nullable=False),
                      Column('action_id', String(36)),
                      mysql_engine='InnoDB',
                      mysql_charset='utf8')

    policy = Table('policy',
                   meta,
                   Column('id', String(36), primary_key=True, nullable=False),
                   Column('name', String(255)),
                   Column('type', String(255)),
                   Column('user', String(32), nullable=False),
                   Column('project', String(32), nullable=False),
                   Column('domain', String(32)),
                   Column('cooldown', Integer),
                   Column('level', Integer),
                   Column('created_at', DateTime),
                   Column('updated_at', DateTime),
                   Column('spec', types.Dict),
                   Column('data', types.Dict),
                   mysql_engine='InnoDB',
                   mysql_charset='utf8')

    cluster_policy = Table('cluster_policy',
                           meta,
                           Column('id',
                                  String(36),
                                  primary_key=True,
                                  nullable=False),
                           Column('cluster_id',
                                  String(36),
                                  ForeignKey('cluster.id'),
                                  nullable=False),
                           Column('policy_id',
                                  String(36),
                                  ForeignKey('policy.id'),
                                  nullable=False),
                           Column('cooldown', Integer),
                           Column('priority', Integer),
                           Column('level', Integer),
                           Column('enabled', Boolean),
                           Column('data', types.Dict),
                           Column('last_op', DateTime),
                           mysql_engine='InnoDB',
                           mysql_charset='utf8')

    receiver = Table('receiver',
                     meta,
                     Column('id', String(36), primary_key=True,
                            nullable=False),
                     Column('name', String(255)),
                     Column('type', String(255)),
                     Column('user', String(32)),
                     Column('project', String(32)),
                     Column('domain', String(32)),
                     Column('created_at', DateTime),
                     Column('updated_at', DateTime),
                     Column('cluster_id', String(36)),
                     Column('actor', types.Dict),
                     Column('action', Text),
                     Column('params', types.Dict),
                     Column('channel', types.Dict),
                     mysql_engine='InnoDB',
                     mysql_charset='utf8')

    credential = Table('credential',
                       meta,
                       Column('user',
                              String(32),
                              primary_key=True,
                              nullable=False),
                       Column('project',
                              String(32),
                              primary_key=True,
                              nullable=False),
                       Column('cred', types.Dict, nullable=False),
                       Column('data', types.Dict),
                       mysql_engine='InnoDB',
                       mysql_charset='utf8')

    action = Table(
        'action',
        meta,
        Column('id', String(36), primary_key=True, nullable=False),
        Column('name', String(63)),
        Column('context', types.Dict),
        Column('target', String(36)),
        Column('action', Text),
        Column('cause', String(255)),
        Column('owner', String(36)),
        Column('interval', Integer),
        # FIXME: Don't specify fixed precision.
        Column('start_time', Float(precision='24,8')),
        Column('end_time', Float(precision='24,8')),
        Column('timeout', Integer),
        Column('control', String(255)),
        Column('status', String(255)),
        Column('status_reason', Text),
        Column('inputs', types.Dict),
        Column('outputs', types.Dict),
        Column('depends_on', types.List),
        Column('depended_by', types.List),
        Column('created_at', DateTime),
        Column('updated_at', DateTime),
        Column('data', types.Dict),
        mysql_engine='InnoDB',
        mysql_charset='utf8')

    dependency = Table('dependency',
                       meta,
                       Column('id',
                              String(36),
                              nullable=False,
                              primary_key=True),
                       Column('depended',
                              String(36),
                              ForeignKey('action.id'),
                              nullable=False),
                       Column('dependent',
                              String(36),
                              ForeignKey('action.id'),
                              nullable=False),
                       mysql_engine='InnoDB',
                       mysql_charset='utf8')

    event = Table('event',
                  meta,
                  Column('id', String(36), primary_key=True, nullable=False),
                  Column('timestamp', DateTime, nullable=False),
                  Column('obj_id', String(36)),
                  Column('obj_name', String(255)),
                  Column('obj_type', String(36)),
                  Column('cluster_id', String(36)),
                  Column('level', String(63)),
                  Column('user', String(32)),
                  Column('project', String(32)),
                  Column('action', String(36)),
                  Column('status', String(255)),
                  Column('status_reason', Text),
                  Column('meta_data', types.Dict),
                  mysql_engine='InnoDB',
                  mysql_charset='utf8')

    tables = (
        profile,
        cluster,
        node,
        cluster_lock,
        node_lock,
        policy,
        cluster_policy,
        credential,
        action,
        dependency,
        receiver,
        event,
    )

    for index, table in enumerate(tables):
        try:
            table.create()
        except Exception:
            # If an error occurs, drop all tables created so far to return
            # to the previously existing state.
            meta.drop_all(tables=tables[:index])
            raise
Example #58
0
def drop_tables(eng):
    meta = MetaData()
    meta.drop_all(bind=eng, tables=[chat_user, chat, message])
Example #59
0
    def test_orm_many_engines(self):
        metadata = MetaData(self.engine)

        table1 = Table(
            "mytable", metadata,
            Column('col1',
                   Integer,
                   primary_key=True,
                   test_needs_autoincrement=True), Column('col2', String(30)))

        table2 = Table(
            "mytable2", metadata,
            Column('col1',
                   Integer,
                   primary_key=True,
                   test_needs_autoincrement=True), Column('col2', String(30)),
            Column('col3', Integer, ForeignKey("mytable.col1")))

        metadata.create_all()

        m1 = mapper(A,
                    table1,
                    properties={
                        "bs":
                        relationship(B,
                                     cascade="all, delete",
                                     order_by=table2.c.col1)
                    },
                    _compiled_cache_size=50)
        m2 = mapper(B, table2, _compiled_cache_size=50)

        m3 = mapper(A, table1, non_primary=True)

        @profile_memory()
        def go():
            engine = engines.testing_engine(
                options={
                    'logging_name': 'FOO',
                    'pool_logging_name': 'BAR',
                    'use_reaper': False
                })
            sess = create_session(bind=engine)

            a1 = A(col2="a1")
            a2 = A(col2="a2")
            a3 = A(col2="a3")
            a1.bs.append(B(col2="b1"))
            a1.bs.append(B(col2="b2"))
            a3.bs.append(B(col2="b3"))
            for x in [a1, a2, a3]:
                sess.add(x)
            sess.flush()
            sess.expunge_all()

            alist = sess.query(A).order_by(A.col1).all()
            eq_([
                A(col2="a1", bs=[B(col2="b1"), B(col2="b2")]),
                A(col2="a2", bs=[]),
                A(col2="a3", bs=[B(col2="b3")])
            ], alist)

            for a in alist:
                sess.delete(a)
            sess.flush()
            sess.close()
            engine.dispose()

        go()

        metadata.drop_all()
        del m1, m2, m3
        assert_no_mappers()
Example #60
0
print(engine)

meta = MetaData()
TEST = Table(
    'TESTSELECT',
    meta,
    Column('id', BIGINT),
    #Column('id', BIGINT, primary_key = True),
    #   Column('name', VARCHAR(20) ),
    #Column('name', nz.NVARCHAR(20) ),
    Column('name', nz.NVARCHAR(2)),
    #Column('name', nz.NCHAR(20) ),
    Column('gender', CHAR),
    #Column('gender', nz.NCHAR),
)
meta.drop_all(engine)
meta.create_all(engine)

conn = engine.connect()
conn.execute(TEST.insert().values(id='3', name='j', gender='M'))
conn.execute(TEST.insert().values(id='3', name='j', gender='M'))

#meta.drop_all(engine)
col = Column('name')
col2 = Column('gender')
#s = select([TEST])#.where(col=='jack1')
#result = conn.execute(s)
#for row in result:
#   print (row)
#s = select([TEST]).where(col=='jack1').order_by(desc(col2)).limit(10)