def test_table_overrides_metadata_create(self, connection): metadata = self.metadata Sequence("s1", metadata=metadata) s2 = Sequence("s2", metadata=metadata) s3 = Sequence("s3") t = Table("t", metadata, Column("c", Integer, s3, primary_key=True)) assert s3.metadata is metadata t.create(connection, checkfirst=True) s3.drop(connection) # 't' is created, and 's3' won't be # re-created since it's linked to 't'. # 's1' and 's2' are, however. metadata.create_all(connection) assert self._has_sequence(connection, "s1") assert self._has_sequence(connection, "s2") assert not self._has_sequence(connection, "s3") s2.drop(connection) assert self._has_sequence(connection, "s1") assert not self._has_sequence(connection, "s2") metadata.drop_all(connection) assert not self._has_sequence(connection, "s1") assert not self._has_sequence(connection, "s2")
def test_fixed_char(self, char_type): m = self.metadata t = Table( "t1", m, Column("id", Integer, primary_key=True), Column("data", char_type(30), nullable=False), ) if py2k and char_type is NCHAR: v1, v2, v3 = u"value 1", u"value 2", u"value 3" else: v1, v2, v3 = "value 1", "value 2", "value 3" with testing.db.begin() as conn: t.create(conn) conn.execute( t.insert(), dict(id=1, data=v1), dict(id=2, data=v2), dict(id=3, data=v3), ) eq_( conn.execute(t.select().where(t.c.data == v2)).fetchall(), [(2, "value 2 ")], ) m2 = MetaData() t2 = Table("t1", m2, autoload_with=conn) is_(type(t2.c.data.type), char_type) eq_( conn.execute(t2.select().where(t2.c.data == v2)).fetchall(), [(2, "value 2 ")], )
def test_numeric_infinity_float(self): m = self.metadata t1 = Table( "t1", m, Column("intcol", Integer), Column("numericcol", oracle.BINARY_DOUBLE(asdecimal=False)), ) t1.create() t1.insert().execute( [ dict(intcol=1, numericcol=float("inf")), dict(intcol=2, numericcol=float("-inf")), ] ) eq_( select([t1.c.numericcol]) .order_by(t1.c.intcol) .execute() .fetchall(), [(float("inf"),), (float("-inf"),)], ) eq_( testing.db.execute( "select numericcol from t1 order by intcol" ).fetchall(), [(float("inf"),), (float("-inf"),)], )
def test_reflect_table_comment(self): local_parent = Table( "parent", self.metadata, Column("q", Integer), comment="my local comment", ) local_parent.create(testing.db) insp = inspect(testing.db) eq_( insp.get_table_comment("parent", schema=testing.config.test_schema), {"text": "my table comment"}, ) eq_( insp.get_table_comment("parent", ), {"text": "my local comment"}, ) eq_( insp.get_table_comment( "parent", schema=testing.db.dialect.default_schema_name), {"text": "my local comment"}, )
def test_numeric_nan_float(self): m = self.metadata t1 = Table( "t1", m, Column("intcol", Integer), Column("numericcol", oracle.BINARY_DOUBLE(asdecimal=False)), ) t1.create() t1.insert().execute( [ dict(intcol=1, numericcol=float("nan")), dict(intcol=2, numericcol=float("-nan")), ] ) eq_( [ tuple(str(col) for col in row) for row in select([t1.c.numericcol]) .order_by(t1.c.intcol) .execute() ], [("nan",), ("nan",)], ) eq_( [ tuple(str(col) for col in row) for row in testing.db.execute( "select numericcol from t1 order by intcol" ) ], [("nan",), ("nan",)], )
def _dont_test_numeric_nan_decimal(self): m = self.metadata t1 = Table( "t1", m, Column("intcol", Integer), Column("numericcol", oracle.BINARY_DOUBLE(asdecimal=True)), ) t1.create() t1.insert().execute( [ dict(intcol=1, numericcol=decimal.Decimal("NaN")), dict(intcol=2, numericcol=decimal.Decimal("-NaN")), ] ) eq_( select([t1.c.numericcol]) .order_by(t1.c.intcol) .execute() .fetchall(), [(decimal.Decimal("NaN"),), (decimal.Decimal("NaN"),)], ) eq_( testing.db.execute( "select numericcol from t1 order by intcol" ).fetchall(), [(decimal.Decimal("NaN"),), (decimal.Decimal("NaN"),)], )
def test_create_drop_bound(self): for meta in (MetaData, ThreadLocalMetaData): for bind in (testing.db, testing.db.connect()): metadata = meta() table = Table("test_table", metadata, Column("foo", Integer)) metadata.bind = bind assert metadata.bind is table.bind is bind metadata.create_all() assert table.exists() metadata.drop_all() table.create() table.drop() assert not table.exists() metadata = meta() table = Table("test_table", metadata, Column("foo", Integer)) metadata.bind = bind assert metadata.bind is table.bind is bind metadata.create_all() assert table.exists() metadata.drop_all() table.create() table.drop() assert not table.exists() if isinstance(bind, engine.Connection): bind.close()
def test_insert_from_select_fn_defaults(self, connection): data = self.tables.data counter = itertools.count(1) def foo(ctx): return next(counter) table = Table( "sometable", self.metadata, Column("x", Integer), Column("foo", Integer, default=foo), Column("y", Integer), ) table.create(connection) sel = select([data.c.x, data.c.y]) ins = table.insert().from_select(["x", "y"], sel) connection.execute(ins) # counter is only called once! eq_( list(connection.execute(table.select().order_by(table.c.x))), [(2, 1, 5), (7, 1, 12)], )
def _run_test(self, *arg, **kw): metadata = self.metadata implicit_returning = kw.pop("implicit_returning", True) kw["primary_key"] = True if kw.get("autoincrement", True): kw["test_needs_autoincrement"] = True t = Table( "x", metadata, Column("y", self.MyInteger, *arg, **kw), Column("data", Integer), implicit_returning=implicit_returning, ) with testing.db.connect() as conn: t.create(conn) r = conn.execute(t.insert().values(data=5)) # we don't pre-fetch 'server_default'. if "server_default" in kw and ( not testing.db.dialect.implicit_returning or not implicit_returning): eq_(r.inserted_primary_key, [None]) else: eq_(r.inserted_primary_key, ["INT_1"]) eq_(conn.execute(t.select()).first(), ("INT_1", 5))
def setup(self): meta = MetaData(testing.db) global table, GoofyType class GoofyType(TypeDecorator): impl = String def process_bind_param(self, value, dialect): if value is None: return None return "FOO" + value def process_result_value(self, value, dialect): if value is None: return None return value + "BAR" table = Table( "tables", meta, Column("id", Integer, primary_key=True, test_needs_autoincrement=True), Column("persons", Integer), Column("full", Boolean), Column("goofy", GoofyType(50)), ) table.create(checkfirst=True)
def test_func_embedded_valuesbase(self, connection): """test can use next_value() in values() of _ValuesBase""" metadata = self.metadata t1 = Table("t", metadata, Column("x", Integer)) t1.create(testing.db) s = Sequence("my_sequence") connection.execute(t1.insert().values(x=s.next_value())) self._assert_seq_result(connection.scalar(t1.select()))
def test_checkfirst_table(self, connection): m = MetaData() s = Sequence("my_sequence") t = Table("t", m, Column("c", Integer, s, primary_key=True)) t.create(connection, checkfirst=False) assert self._has_sequence(connection, "my_sequence") t.create(connection, checkfirst=True) t.drop(connection, checkfirst=False) assert not self._has_sequence(connection, "my_sequence") t.drop(connection, checkfirst=True)
def setup_class(cls): global users, metadata metadata = MetaData() users = Table( "query_users", metadata, Column("user_id", INT, primary_key=True), Column("user_name", VARCHAR(20)), test_needs_acid=True, ) users.create(testing.db)
def test_create_drop_explicit(self): metadata = MetaData() table = Table("test_table", metadata, Column("foo", Integer)) for bind in (testing.db, testing.db.connect()): for args in [([], {"bind": bind}), ([bind], {})]: metadata.create_all(*args[0], **args[1]) assert table.exists(*args[0], **args[1]) metadata.drop_all(*args[0], **args[1]) table.create(*args[0], **args[1]) table.drop(*args[0], **args[1]) assert not table.exists(*args[0], **args[1])
def setup(self): meta = MetaData(testing.db) global table, seq seq = Sequence("tid_seq") table = Table( "tables", meta, Column("id", Integer, seq, primary_key=True), Column("data", String(50)), ) table.create(checkfirst=True)
def test_int_not_float(self): m = self.metadata t1 = Table("t1", m, Column("foo", Integer)) t1.create() r = t1.insert().values(foo=5).returning(t1.c.foo).execute() x = r.scalar() assert x == 5 assert isinstance(x, int) x = t1.select().scalar() assert x == 5 assert isinstance(x, int)
def test_inserted_pk_no_returning_no_lastrowid(self): """test inserted_primary_key contains [None] when pk_col=next_value(), implicit returning is not used.""" metadata = self.metadata t1 = Table("t", metadata, Column("x", Integer, primary_key=True)) t1.create(testing.db) e = engines.testing_engine(options={"implicit_returning": False}) s = Sequence("my_sequence") with e.connect() as conn: r = conn.execute(t1.insert().values(x=s.next_value())) eq_(r.inserted_primary_key, [None])
def test_inserted_pk_implicit_returning(self): """test inserted_primary_key contains the result when pk_col=next_value(), when implicit returning is used.""" metadata = self.metadata s = Sequence("my_sequence") t1 = Table("t", metadata, Column("x", Integer, primary_key=True)) t1.create(testing.db) e = engines.testing_engine(options={"implicit_returning": True}) with e.connect() as conn: r = conn.execute(t1.insert().values(x=s.next_value())) self._assert_seq_result(r.inserted_primary_key[0])
def test_func_embedded_whereclause(self, connection): """test can use next_value() in whereclause""" metadata = self.metadata t1 = Table("t", metadata, Column("x", Integer)) t1.create(testing.db) connection.execute(t1.insert(), [{"x": 1}, {"x": 300}, {"x": 301}]) s = Sequence("my_sequence") eq_( list( connection.execute(t1.select().where(t1.c.x > s.next_value())) ), [(300,), (301,)], )
def test_numerics(self): m = self.metadata t1 = Table( "t1", m, Column("intcol", Integer), Column("numericcol", Numeric(precision=9, scale=2)), Column("floatcol1", Float()), Column("floatcol2", FLOAT()), Column("doubleprec", oracle.DOUBLE_PRECISION), Column("numbercol1", oracle.NUMBER(9)), Column("numbercol2", oracle.NUMBER(9, 3)), Column("numbercol3", oracle.NUMBER), ) t1.create() t1.insert().execute( intcol=1, numericcol=5.2, floatcol1=6.5, floatcol2=8.5, doubleprec=9.5, numbercol1=12, numbercol2=14.85, numbercol3=15.76, ) m2 = MetaData(testing.db) t2 = Table("t1", m2, autoload=True) for row in ( t1.select().execute().first(), t2.select().execute().first(), ): for i, (val, type_) in enumerate( ( (1, int), (decimal.Decimal("5.2"), decimal.Decimal), (6.5, float), (8.5, float), (9.5, float), (12, int), (decimal.Decimal("14.85"), decimal.Decimal), (15.76, float), ) ): eq_(row[i], val) assert isinstance(row[i], type_), "%r is not %r" % ( row[i], type_, )
def test_int_not_float_no_coerce_decimal(self): engine = testing_engine(options=dict(coerce_to_decimal=False)) m = self.metadata t1 = Table("t1", m, Column("foo", Integer)) t1.create() r = engine.execute(t1.insert().values(foo=5).returning(t1.c.foo)) x = r.scalar() assert x == 5 assert isinstance(x, int) x = t1.select().scalar() assert x == 5 assert isinstance(x, int)
def test_non_autoincrement(self, connection): # sqlite INT primary keys can be non-unique! (only for ints) nonai = Table( "nonaitest", self.metadata, Column("id", Integer, autoincrement=False, primary_key=True), Column("data", String(20)), ) nonai.create(connection) # just testing SQLite for now, it passes with expect_warnings(".*has no Python-side or server-side default.*"): # postgresql + mysql strict will fail on first row, # mysql in legacy mode fails on second row connection.execute(nonai.insert(), dict(data="row 1")) connection.execute(nonai.insert(), dict(data="row 2"))
def setup(self): meta = MetaData(testing.db) global table table = Table( "tables", meta, Column( "id", Integer, primary_key=True, key="foo_id", test_needs_autoincrement=True, ), Column("data", String(20)), ) table.create(checkfirst=True)
def test_char_length(self): metadata = self.metadata t1 = Table( "t1", metadata, Column("c1", VARCHAR(50)), Column("c2", NVARCHAR(250)), Column("c3", CHAR(200)), Column("c4", NCHAR(180)), ) t1.create() m2 = MetaData(testing.db) t2 = Table("t1", m2, autoload=True) eq_(t2.c.c1.type.length, 50) eq_(t2.c.c2.type.length, 250) eq_(t2.c.c3.type.length, 200) eq_(t2.c.c4.type.length, 180)
def test_rowid(self): metadata = self.metadata t = Table("t1", metadata, Column("x", Integer)) t.create() t.insert().execute(x=5) s1 = select([t]) s2 = select([column("rowid")]).select_from(s1) rowid = s2.scalar() # the ROWID type is not really needed here, # as cx_oracle just treats it as a string, # but we want to make sure the ROWID works... rowid_col = column("rowid", oracle.ROWID) s3 = select([t.c.x, rowid_col]).where( rowid_col == cast(rowid, oracle.ROWID) ) eq_(s3.select().execute().fetchall(), [(5, rowid)])
def test_create_drop_constructor_bound(self): for bind in (testing.db, testing.db.connect()): try: for args in (([bind], {}), ([], {"bind": bind})): metadata = MetaData(*args[0], **args[1]) table = Table( "test_table", metadata, Column("foo", Integer) ) assert metadata.bind is table.bind is bind metadata.create_all() assert table.exists() metadata.drop_all() table.create() table.drop() assert not table.exists() finally: if isinstance(bind, engine.Connection): bind.close()
def test_lastrowid_zero(self): from sqlalchemy_1_3.dialects import sqlite eng = engines.testing_engine() class ExcCtx(sqlite.base.SQLiteExecutionContext): def get_lastrowid(self): return 0 eng.dialect.execution_ctx_cls = ExcCtx t = Table( "t", self.metadata, Column("x", Integer, primary_key=True), Column("y", Integer), ) t.create(eng) r = eng.execute(t.insert().values(y=5)) eq_(r.inserted_primary_key, [0])
def test_insert_from_select_override_defaults(self, connection): data = self.tables.data table = Table( "sometable", self.metadata, Column("x", Integer), Column("foo", Integer, default=12), Column("y", Integer), ) table.create(connection) sel = select([data.c.x, data.c.y]) ins = table.insert().from_select(["x", "y"], sel) connection.execute(ins) eq_( list(connection.execute(table.select().order_by(table.c.x))), [(2, 12, 5), (7, 12, 12)], )
def test_rollback_deadlock(self): """test that returning connections to the pool clears any object locks.""" conn1 = testing.db.connect() conn2 = testing.db.connect() users = Table( "deadlock_users", metadata, Column("user_id", INT, primary_key=True), Column("user_name", VARCHAR(20)), test_needs_acid=True, ) users.create(conn1) conn1.execute("select * from deadlock_users") conn1.close() # without auto-rollback in the connection pool's return() logic, # this deadlocks in PostgreSQL, because conn1 is returned to the # pool but still has a lock on "deadlock_users". comment out the # rollback in pool/ConnectionFairy._close() to see ! users.drop(conn2) conn2.close()
def test_numerics_broken_inspection(self): """Numeric scenarios where Oracle type info is 'broken', returning us precision, scale of the form (0, 0) or (0, -127). We convert to Decimal and let int()/float() processors take over. """ metadata = self.metadata # this test requires cx_oracle 5 foo = Table( "foo", metadata, Column("idata", Integer), Column("ndata", Numeric(20, 2)), Column("ndata2", Numeric(20, 2)), Column("nidata", Numeric(5, 0)), Column("fdata", Float()), ) foo.create() foo.insert().execute( { "idata": 5, "ndata": decimal.Decimal("45.6"), "ndata2": decimal.Decimal("45.0"), "nidata": decimal.Decimal("53"), "fdata": 45.68392, } ) stmt = "SELECT idata, ndata, ndata2, nidata, fdata FROM foo" row = testing.db.execute(stmt).fetchall()[0] eq_( [type(x) for x in row], [int, decimal.Decimal, decimal.Decimal, int, float], ) eq_( row, ( 5, decimal.Decimal("45.6"), decimal.Decimal("45"), 53, 45.683920000000001, ), ) # with a nested subquery, # both Numeric values that don't have decimal places, regardless # of their originating type, come back as ints with no useful # typing information beyond "numeric". So native handler # must convert to int. # this means our Decimal converters need to run no matter what. # totally sucks. stmt = """ SELECT (SELECT (SELECT idata FROM foo) FROM DUAL) AS idata, (SELECT CAST((SELECT ndata FROM foo) AS NUMERIC(20, 2)) FROM DUAL) AS ndata, (SELECT CAST((SELECT ndata2 FROM foo) AS NUMERIC(20, 2)) FROM DUAL) AS ndata2, (SELECT CAST((SELECT nidata FROM foo) AS NUMERIC(5, 0)) FROM DUAL) AS nidata, (SELECT CAST((SELECT fdata FROM foo) AS FLOAT) FROM DUAL) AS fdata FROM dual """ row = testing.db.execute(stmt).fetchall()[0] eq_( [type(x) for x in row], [int, decimal.Decimal, int, int, decimal.Decimal], ) eq_( row, (5, decimal.Decimal("45.6"), 45, 53, decimal.Decimal("45.68392")), ) row = testing.db.execute( text(stmt).columns( idata=Integer(), ndata=Numeric(20, 2), ndata2=Numeric(20, 2), nidata=Numeric(5, 0), fdata=Float(), ) ).fetchall()[0] eq_( [type(x) for x in row], [int, decimal.Decimal, decimal.Decimal, decimal.Decimal, float], ) eq_( row, ( 5, decimal.Decimal("45.6"), decimal.Decimal("45"), decimal.Decimal("53"), 45.683920000000001, ), ) stmt = """ SELECT anon_1.idata AS anon_1_idata, anon_1.ndata AS anon_1_ndata, anon_1.ndata2 AS anon_1_ndata2, anon_1.nidata AS anon_1_nidata, anon_1.fdata AS anon_1_fdata FROM (SELECT idata, ndata, ndata2, nidata, fdata FROM ( SELECT (SELECT (SELECT idata FROM foo) FROM DUAL) AS idata, (SELECT CAST((SELECT ndata FROM foo) AS NUMERIC(20, 2)) FROM DUAL) AS ndata, (SELECT CAST((SELECT ndata2 FROM foo) AS NUMERIC(20, 2)) FROM DUAL) AS ndata2, (SELECT CAST((SELECT nidata FROM foo) AS NUMERIC(5, 0)) FROM DUAL) AS nidata, (SELECT CAST((SELECT fdata FROM foo) AS FLOAT) FROM DUAL) AS fdata FROM dual ) WHERE ROWNUM >= 0) anon_1 """ row = testing.db.execute(stmt).fetchall()[0] eq_( [type(x) for x in row], [int, decimal.Decimal, int, int, decimal.Decimal], ) eq_( row, (5, decimal.Decimal("45.6"), 45, 53, decimal.Decimal("45.68392")), ) row = testing.db.execute( text(stmt).columns( anon_1_idata=Integer(), anon_1_ndata=Numeric(20, 2), anon_1_ndata2=Numeric(20, 2), anon_1_nidata=Numeric(5, 0), anon_1_fdata=Float(), ) ).fetchall()[0] eq_( [type(x) for x in row], [int, decimal.Decimal, decimal.Decimal, decimal.Decimal, float], ) eq_( row, ( 5, decimal.Decimal("45.6"), decimal.Decimal("45"), decimal.Decimal("53"), 45.683920000000001, ), ) row = testing.db.execute( text(stmt).columns( anon_1_idata=Integer(), anon_1_ndata=Numeric(20, 2, asdecimal=False), anon_1_ndata2=Numeric(20, 2, asdecimal=False), anon_1_nidata=Numeric(5, 0, asdecimal=False), anon_1_fdata=Float(asdecimal=True), ) ).fetchall()[0] eq_( [type(x) for x in row], [int, float, float, float, decimal.Decimal] ) eq_(row, (5, 45.6, 45, 53, decimal.Decimal("45.68392")))