def teardown(self): clear_mappers() for db in self.dbs: db.connect().invalidate() for i in range(1, 5): os.remove("shard%d_%s.db" % (i, provision.FOLLOWER_IDENT))
def go(): mapper( A, table1, properties={"bs": relationship(B, order_by=table2.c.col1)}, ) mapper(B, table2) sess = create_session() a1 = A(col2="a1") a2 = A(col2="a2") a3 = A(col2="a3") a1.bs.append(B(col2="b1")) a1.bs.append(B(col2="b2")) a3.bs.append(B(col2="b3")) for x in [a1, a2, a3]: sess.add(x) sess.flush() sess.expunge_all() alist = sess.query(A).order_by(A.col1).all() eq_( [ A(col2="a1", bs=[B(col2="b1"), B(col2="b2")]), A(col2="a2", bs=[]), A(col2="a3", bs=[B(col2="b3")]), ], alist, ) for a in alist: sess.delete(a) sess.flush() sess.close() clear_mappers()
def test_path_registry(self): metadata = MetaData() a = Table( "a", metadata, Column("id", Integer, primary_key=True), Column("foo", Integer), Column("bar", Integer), ) b = Table( "b", metadata, Column("id", Integer, primary_key=True), Column("a_id", ForeignKey("a.id")), ) m1 = mapper(A, a, properties={"bs": relationship(B)}) mapper(B, b) @profile_memory() def go(): ma = sa.inspect(aliased(A)) m1._path_registry[m1.attrs.bs][ma][m1.attrs.bar] go() clear_mappers()
def test_noninherited_warning(self): A, B, b_table, a_table, Dest, dest_table = ( self.classes.A, self.classes.B, self.tables.b_table, self.tables.a_table, self.classes.Dest, self.tables.dest_table, ) mapper(A, a_table, properties={"some_dest": relationship(Dest)}) mapper(B, b_table, inherits=A, concrete=True) mapper(Dest, dest_table) b = B() dest = Dest() assert_raises(AttributeError, setattr, b, "some_dest", dest) clear_mappers() mapper(A, a_table, properties={"a_id": a_table.c.id}) mapper(B, b_table, inherits=A, concrete=True) mapper(Dest, dest_table) b = B() assert_raises(AttributeError, setattr, b, "a_id", 3) clear_mappers() mapper(A, a_table, properties={"a_id": a_table.c.id}) mapper(B, b_table, inherits=A, concrete=True) mapper(Dest, dest_table)
def test_alias_pathing(self): metadata = MetaData(self.engine) a = Table( "a", metadata, Column("id", Integer, primary_key=True, test_needs_autoincrement=True), Column("bid", Integer, ForeignKey("b.id")), Column("type", String(30)), ) asub = Table( "asub", metadata, Column("id", Integer, ForeignKey("a.id"), primary_key=True), Column("data", String(30)), ) b = Table( "b", metadata, Column("id", Integer, primary_key=True, test_needs_autoincrement=True), ) mapper(A, a, polymorphic_identity="a", polymorphic_on=a.c.type) mapper(ASub, asub, inherits=A, polymorphic_identity="asub") mapper(B, b, properties={"as_": relationship(A)}) metadata.create_all() sess = Session() a1 = ASub(data="a1") a2 = ASub(data="a2") a3 = ASub(data="a3") b1 = B(as_=[a1, a2, a3]) sess.add(b1) sess.commit() del sess # sqlite has a slow enough growth here # that we have to run it more times to see the # "dip" again @profile_memory(maxtimes=120) def go(): sess = Session() sess.query(B).options(subqueryload(B.as_.of_type(ASub))).all() sess.close() try: go() finally: metadata.drop_all() clear_mappers()
def test_no_instrumentation(self): users = self.tables.users mapper(User, users) u1 = User(name="ed") u1_pickled = pickle.dumps(u1, -1) clear_mappers() mapper(User, users) u1 = pickle.loads(u1_pickled) # this fails unless the InstanceState # compiles the mapper eq_(str(u1), "User(name='ed')")
def test_no_mappers(self): users = self.tables.users mapper(User, users) u1 = User(name="ed") u1_pickled = pickle.dumps(u1, -1) clear_mappers() assert_raises_message( orm_exc.UnmappedInstanceError, "Cannot deserialize object of type " "<class 'sqlalchemy_1_3.testing.pickleable.User'> - no mapper()", pickle.loads, u1_pickled, )
def test_concurrent_create(self): for i in range(50): Base = declarative_base() clear_mappers() self.make_a(Base) result = [False] threads = [ threading.Thread(target=self.make_b, args=(Base, )), threading.Thread(target=self.query_a, args=(Base, result)), ] for t in threads: t.start() for t in threads: t.join() if isinstance(result[0], orm_exc.UnmappedClassError): raise result[0]
def teardown(self): clear_mappers()
def teardown(self): super(DeclarativeReflectionBase, self).teardown() clear_mappers()
def define_tables(cls, metadata): global Table1, Table1B, Table2, Table3, Data table1 = Table( "table1", metadata, Column("id", Integer, primary_key=True, test_needs_autoincrement=True), Column("related_id", Integer, ForeignKey("table1.id"), nullable=True), Column("type", String(30)), Column("name", String(30)), ) table2 = Table( "table2", metadata, Column("id", Integer, ForeignKey("table1.id"), primary_key=True), ) table3 = Table( "table3", metadata, Column("id", Integer, ForeignKey("table1.id"), primary_key=True), ) data = Table( "data", metadata, Column("id", Integer, primary_key=True, test_needs_autoincrement=True), Column("node_id", Integer, ForeignKey("table1.id")), Column("data", String(30)), ) # join = polymorphic_union( # { # 'table3' : table1.join(table3), # 'table2' : table1.join(table2), # 'table1' : table1.select(table1.c.type.in_(['table1', 'table1b'])), # }, None, 'pjoin') join = table1.outerjoin(table2).outerjoin(table3).alias("pjoin") # join = None class Table1(object): def __init__(self, name, data=None): self.name = name if data is not None: self.data = data def __repr__(self): return "%s(%s, %s, %s)" % ( self.__class__.__name__, self.id, repr(str(self.name)), repr(self.data), ) class Table1B(Table1): pass class Table2(Table1): pass class Table3(Table1): pass class Data(object): def __init__(self, data): self.data = data def __repr__(self): return "%s(%s, %s)" % ( self.__class__.__name__, self.id, repr(str(self.data)), ) try: # this is how the mapping used to work. ensure that this raises an # error now table1_mapper = mapper( Table1, table1, select_table=join, polymorphic_on=table1.c.type, polymorphic_identity="table1", properties={ "nxt": relationship( Table1, backref=backref("prev", foreignkey=join.c.id, uselist=False), uselist=False, primaryjoin=join.c.id == join.c.related_id, ), "data": relationship(mapper(Data, data)), }, ) configure_mappers() assert False except Exception: assert True clear_mappers() # currently, the "eager" relationships degrade to lazy relationships # due to the polymorphic load. # the "nxt" relationship used to have a "lazy='joined'" on it, but the # EagerLoader raises the "self-referential" # exception now. since eager loading would never work for that # relationship anyway, its better that the user # gets an exception instead of it silently not eager loading. # NOTE: using "nxt" instead of "next" to avoid 2to3 turning it into # __next__() for some reason. table1_mapper = mapper( Table1, table1, # select_table=join, polymorphic_on=table1.c.type, polymorphic_identity="table1", properties={ "nxt": relationship( Table1, backref=backref("prev", remote_side=table1.c.id, uselist=False), uselist=False, primaryjoin=table1.c.id == table1.c.related_id, ), "data": relationship(mapper(Data, data), lazy="joined", order_by=data.c.id), }, ) mapper(Table1B, inherits=table1_mapper, polymorphic_identity="table1b") mapper( Table2, table2, inherits=table1_mapper, polymorphic_identity="table2", ) mapper( Table3, table3, inherits=table1_mapper, polymorphic_identity="table3", ) configure_mappers() assert table1_mapper.primary_key == ( table1.c.id, ), table1_mapper.primary_key
def assert_no_mappers(): clear_mappers() gc_collect() assert len(_mapper_registry) == 0