def test_pre_ping_weakref_finalizer(self): pool = self._pool_fixture(pre_ping=True) conn = pool.connect() old_dbapi_conn = conn.connection conn.close() eq_(old_dbapi_conn.mock_calls, [call.cursor(), call.rollback()]) self.dbapi.shutdown("execute", stop=True) self.dbapi.restart() conn = pool.connect() dbapi_conn = conn.connection del conn gc_collect() # new connection was reset on return appropriately eq_(dbapi_conn.mock_calls, [call.cursor(), call.rollback()]) # old connection was just closed - did not get an # erroneous reset on return eq_( old_dbapi_conn.mock_calls, [call.cursor(), call.rollback(), call.cursor(), call.close()], )
def test_conn_reusable(self): conn = db.connect() conn.execute(select([1])) assert len(dbapi.connections) == 1 dbapi.shutdown() assert_raises( tsa.exc.DBAPIError, conn.execute, select([1]) ) assert not conn.closed assert conn.invalidated # ensure all connections closed (pool was recycled) gc_collect() assert len(dbapi.connections) == 0 # test reconnects conn.execute(select([1])) assert not conn.invalidated assert len(dbapi.connections) == 1
def test_invalidate_trans(self): conn = db.connect() trans = conn.begin() dbapi.shutdown() try: conn.execute(select([1])) assert False except tsa.exc.DBAPIError: pass # assert was invalidated gc_collect() assert len(dbapi.connections) == 0 assert not conn.closed assert conn.invalidated assert trans.is_active assert_raises_message( tsa.exc.StatementError, "Can't reconnect until invalid transaction is rolled back", conn.execute, select([1]) ) assert trans.is_active try: trans.commit() assert False except tsa.exc.InvalidRequestError, e: assert str(e) \ == "Can't reconnect until invalid transaction is "\ "rolled back"
def test_gced_delete_on_rollback(self): User, users = self.classes.User, self.tables.users s = self.session() u1 = User(name='ed') s.add(u1) s.commit() s.delete(u1) u1_state = attributes.instance_state(u1) assert u1_state in s.identity_map.all_states() assert u1_state in s._deleted s.flush() assert u1_state not in s.identity_map.all_states() assert u1_state not in s._deleted del u1 gc_collect() assert u1_state.obj() is None s.rollback() # new in 1.1, not in identity map if the object was # gc'ed and we restore snapshot; we've changed update_impl # to just skip this object assert u1_state not in s.identity_map.all_states() # in any version, the state is replaced by the query # because the identity map would switch it u1 = s.query(User).filter_by(name='ed').one() assert u1_state not in s.identity_map.all_states() assert s.scalar(users.count()) == 1 s.delete(u1) s.flush() assert s.scalar(users.count()) == 0 s.commit()
def test_gced_delete_on_rollback(self): User, users = self.classes.User, self.tables.users s = self.session() u1 = User(name='ed') s.add(u1) s.commit() s.delete(u1) u1_state = attributes.instance_state(u1) assert u1_state in s.identity_map.all_states() assert u1_state in s._deleted s.flush() assert u1_state not in s.identity_map.all_states() assert u1_state not in s._deleted del u1 gc_collect() assert u1_state.obj() is None s.rollback() assert u1_state in s.identity_map.all_states() u1 = s.query(User).filter_by(name='ed').one() assert u1_state not in s.identity_map.all_states() assert s.scalar(users.count()) == 1 s.delete(u1) s.flush() assert s.scalar(users.count()) == 0 s.commit()
def test_listener_collection_removed_cleanup(self): from sqlalchemy.event import registry Target = self._fixture() m1 = Mock() t1 = Target() event.listen(t1, "event_one", m1) key = (id(t1), "event_one", id(m1)) assert key in registry._key_to_collection collection_ref = list(registry._key_to_collection[key])[0] assert collection_ref in registry._collection_to_key t1.dispatch.event_one("t1") del t1 gc_collect() assert key not in registry._key_to_collection assert collection_ref not in registry._collection_to_key
def _test_overflow(self, thread_count, max_overflow): gc_collect() dbapi = MockDBAPI() def creator(): time.sleep(.05) return dbapi.connect() p = pool.QueuePool(creator=creator, pool_size=3, timeout=2, max_overflow=max_overflow) peaks = [] def whammy(): for i in range(10): try: con = p.connect() time.sleep(.005) peaks.append(p.overflow()) con.close() del con except tsa.exc.TimeoutError: pass threads = [] for i in range(thread_count): th = threading.Thread(target=whammy) th.start() threads.append(th) for th in threads: th.join() self.assert_(max(peaks) <= max_overflow) lazy_gc() assert not pool._refs
def test_auto_detach_on_gc_session(self): users, User = self.tables.users, self.classes.User mapper(User, users) sess = Session() u1 = User(name='u1') sess.add(u1) sess.commit() # can't add u1 to Session, # already belongs to u2 s2 = Session() assert_raises_message( sa.exc.InvalidRequestError, r".*is already attached to session", s2.add, u1 ) # garbage collect sess del sess gc_collect() # s2 lets it in now despite u1 having # session_key s2.add(u1) assert u1 in s2
def test_weakref_with_cycles_o2o(self): Address, addresses, users, User = (self.classes.Address, self.tables.addresses, self.tables.users, self.classes.User) s = sessionmaker()() mapper(User, users, properties={ "address": relationship(Address, backref="user", uselist=False) }) mapper(Address, addresses) s.add(User(name="ed", address=Address(email_address="ed1"))) s.commit() user = s.query(User).options(joinedload(User.address)).one() user.address.user eq_(user, User(name="ed", address=Address(email_address="ed1"))) del user gc_collect() assert len(s.identity_map) == 0 user = s.query(User).options(joinedload(User.address)).one() user.address.email_address = 'ed2' user.address.user # lazyload del user gc_collect() assert len(s.identity_map) == 2 s.commit() user = s.query(User).options(joinedload(User.address)).one() eq_(user, User(name="ed", address=Address(email_address="ed2")))
def test_autoflush_expressions(self): """test that an expression which is dependent on object state is evaluated after the session autoflushes. This is the lambda inside of strategies.py lazy_clause. """ users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users, properties={ 'addresses': relationship(Address, backref="user")}) mapper(Address, addresses) sess = create_session(autoflush=True, autocommit=False) u = User(name='ed', addresses=[Address(email_address='foo')]) sess.add(u) eq_(sess.query(Address).filter(Address.user == u).one(), Address(email_address='foo')) # still works after "u" is garbage collected sess.commit() sess.close() u = sess.query(User).get(u.id) q = sess.query(Address).filter(Address.user == u) del u gc_collect() eq_(q.one(), Address(email_address='foo'))
def test_weakref_pickled(self): users, User = self.tables.users, pickleable.User s = create_session() mapper(User, users) s.add(User(name='ed')) s.flush() assert not s.dirty user = s.query(User).one() user.name = 'fred' s.expunge(user) u2 = pickle.loads(pickle.dumps(user)) del user s.add(u2) del u2 gc_collect() assert len(s.identity_map) == 1 assert len(s.dirty) == 1 assert None not in s.dirty s.flush() gc_collect() assert not s.dirty assert not s.identity_map
def test_expire_all(self): users, Address, addresses, User = ( self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User, ) mapper( User, users, properties={"addresses": relationship(Address, backref="user", lazy="joined", order_by=addresses.c.id)}, ) mapper(Address, addresses) sess = create_session() userlist = sess.query(User).order_by(User.id).all() eq_(self.static.user_address_result, userlist) eq_(len(list(sess)), 9) sess.expire_all() gc_collect() eq_(len(list(sess)), 4) # since addresses were gc'ed userlist = sess.query(User).order_by(User.id).all() u = userlist[1] eq_(self.static.user_address_result, userlist) eq_(len(list(sess)), 9)
def profile(*args): gc_collect() samples = [0 for x in range(0, times)] for x in range(0, times): func(*args) gc_collect() samples[x] = len(gc.get_objects()) print "sample gc sizes:", samples assert len(_sessions) == 0 for x in samples[-4:]: if x != samples[-5]: flatline = False break else: flatline = True # object count is bigger than when it started if not flatline and samples[-1] > samples[0]: for x in samples[1:-2]: # see if a spike bigger than the endpoint exists if x > samples[-1]: break else: assert False, repr(samples) + " " + repr(flatline)
def all(): setup() try: t, t2 = 0, 0 def usage(label): now = resource.getrusage(resource.RUSAGE_SELF) print "%s: %0.3fs real, %0.3fs user, %0.3fs sys" % ( label, t2 - t, now.ru_utime - usage.last.ru_utime, now.ru_stime - usage.last.ru_stime) usage.snap(now) usage.snap = lambda stats=None: setattr( usage, 'last', stats or resource.getrusage(resource.RUSAGE_SELF)) session = create_session() gc_collect() usage.snap() t = time.clock() people = orm_select(session) t2 = time.clock() usage('load objects') gc_collect() usage.snap() t = time.clock() update_and_flush(session, people) t2 = time.clock() usage('update and flush') finally: metadata.drop_all()
def test_module_reg_cleanout_cls_to_base(self): base = weakref.WeakValueDictionary() f4 = MockClass(base, "single.Blat") clsregistry.add_class("Blat", f4) reg = base['_sa_module_registry'] assert reg['single'] del f4 gc_collect() assert 'single' not in reg
def test_module_reg_cleanout_sub_to_base(self): base = weakref.WeakValueDictionary() f3 = MockClass(base, "bat.bar.Hoho") clsregistry.add_class("Hoho", f3) reg = base['_sa_module_registry'] assert reg['bat']['bar'] del f3 gc_collect() assert 'bat' not in reg
def test_copy(self): mapper(Parent, self.parents, properties=dict(children=relationship(Child))) mapper(Child, self.children) p = Parent('p1') p.kids.extend(['c1', 'c2']) p_copy = copy.copy(p) del p gc_collect() assert set(p_copy.kids) == set(['c1', 'c2']), p.kids
def test_subclass(self): class SubTarget(self.Target): pass st = SubTarget() st.dispatch.some_event(1, 2) del st del SubTarget gc_collect() eq_(self.Target.__subclasses__(), [])
def prune(): if 'refs' not in session.info: return 0 sess_size = len(session.identity_map) session.info['refs'].clear() gc_collect() session.info['refs'] = set( s.obj() for s in session.identity_map.all_states()) return sess_size - len(session.identity_map)
def test_cleanout_elements(self): class Foo(object): pass f1, f2, f3 = Foo(), Foo(), Foo() w = WeakSequence([f1, f2, f3]) eq_(len(w), 3) eq_(len(w._storage), 3) del f2 gc_collect() eq_(len(w), 2) eq_(len(w._storage), 2)
def test_resolve(self): base = weakref.WeakValueDictionary() f1 = MockClass(base, "foo.bar.Foo") f2 = MockClass(base, "foo.alt.Foo") clsregistry.add_class("Foo", f1) clsregistry.add_class("Foo", f2) resolver = clsregistry._resolver(f1, MockProp()) gc_collect() is_(resolver("foo.bar.Foo")(), f1) is_(resolver("foo.alt.Foo")(), f2)
def test_stale_state_positive_gc(self): User = self.classes.User s, u1, a1 = self._fixture() s.expunge(u1) del u1 gc_collect() u1 = s.query(User).first() u1.addresses.remove(a1) self._assert_not_hasparent(a1)
def test_move_gc_session_persistent_dirty(self): sess, u1 = self._persistent_fixture() u1.name = 'edchanged' self._assert_cycle(u1) self._assert_modified(u1) del sess gc_collect() self._assert_cycle(u1) s2 = Session() s2.add(u1) self._assert_cycle(u1) self._assert_modified(u1)
def test_reconnect(self): """test that an 'is_disconnect' condition will invalidate the connection, and additionally dispose the previous connection pool and recreate.""" pid = id(db.pool) # make a connection conn = db.connect() # connection works conn.execute(select([1])) # create a second connection within the pool, which we'll ensure # also goes away conn2 = db.connect() conn2.close() # two connections opened total now assert len(dbapi.connections) == 2 # set it to fail dbapi.shutdown() try: conn.execute(select([1])) assert False except tsa.exc.DBAPIError: pass # assert was invalidated assert not conn.closed assert conn.invalidated # close shouldnt break conn.close() assert id(db.pool) != pid # ensure all connections closed (pool was recycled) gc_collect() assert len(dbapi.connections) == 0 conn = db.connect() conn.execute(select([1])) conn.close() assert len(dbapi.connections) == 1
def test_cls_not_strong_ref(self): class User(decl.DeferredReflection, fixtures.ComparableEntity, Base): __tablename__ = 'users' class Address(decl.DeferredReflection, fixtures.ComparableEntity, Base): __tablename__ = 'addresses' eq_(len(_DeferredMapperConfig._configs), 2) del Address gc_collect() eq_(len(_DeferredMapperConfig._configs), 1) decl.DeferredReflection.prepare(testing.db) assert not _DeferredMapperConfig._configs
def test_weakref_kaboom(self): p = self._queuepool_fixture(pool_size=3, max_overflow=-1, use_threadlocal=True) c1 = p.connect() c2 = p.connect() c1.close() c2 = None del c1 del c2 gc_collect() assert p.checkedout() == 0 c3 = p.connect() assert c3 is not None
def _test_prune(self, fixture): s, prune = fixture() users, User = self.tables.users, self.classes.User mapper(User, users) for o in [User(name='u%s' % x) for x in range(10)]: s.add(o) # o is still live after this loop... self.assert_(len(s.identity_map) == 0) eq_(prune(), 0) s.flush() gc_collect() eq_(prune(), 9) # o is still in local scope here, so still present self.assert_(len(s.identity_map) == 1) id = o.id del o eq_(prune(), 1) self.assert_(len(s.identity_map) == 0) u = s.query(User).get(id) eq_(prune(), 0) self.assert_(len(s.identity_map) == 1) u.name = 'squiznart' del u eq_(prune(), 0) self.assert_(len(s.identity_map) == 1) s.flush() eq_(prune(), 1) self.assert_(len(s.identity_map) == 0) s.add(User(name='x')) eq_(prune(), 0) self.assert_(len(s.identity_map) == 0) s.flush() self.assert_(len(s.identity_map) == 1) eq_(prune(), 1) self.assert_(len(s.identity_map) == 0) u = s.query(User).get(id) s.delete(u) del u eq_(prune(), 0) self.assert_(len(s.identity_map) == 1) s.flush() eq_(prune(), 0) self.assert_(len(s.identity_map) == 0)
def test_cleanout_appended(self): class Foo(object): pass f1, f2, f3 = Foo(), Foo(), Foo() w = WeakSequence() w.append(f1) w.append(f2) w.append(f3) eq_(len(w), 3) eq_(len(w._storage), 3) del f2 gc_collect() eq_(len(w), 2) eq_(len(w._storage), 2)
def test_dupe_classes_back_to_one(self): base = weakref.WeakValueDictionary() f1 = MockClass(base, "foo.bar.Foo") f2 = MockClass(base, "foo.alt.Foo") clsregistry.add_class("Foo", f1) clsregistry.add_class("Foo", f2) del f2 gc_collect() # registry restores itself to just the one class resolver = clsregistry._resolver(f1, MockProp()) resolver = resolver("Foo") is_(resolver(), f1)
def test_same_module_same_name(self): base = weakref.WeakValueDictionary() f1 = MockClass(base, "foo.bar.Foo") f2 = MockClass(base, "foo.bar.Foo") clsregistry.add_class("Foo", f1) gc_collect() assert_raises_message( exc.SAWarning, "This declarative base already contains a class with the " "same class name and module name as foo.bar.Foo, and " "will be replaced in the string-lookup table.", clsregistry.add_class, "Foo", f2 )
def assert_no_mappers(): clear_mappers() gc_collect() assert len(_mapper_registry) == 0
def all(): setup() try: t, t2 = 0, 0 def usage(label): now = resource.getrusage(resource.RUSAGE_SELF) print "%s: %0.3fs real, %0.3fs user, %0.3fs sys" % ( label, t2 - t, now.ru_utime - usage.last.ru_utime, now.ru_stime - usage.last.ru_stime) usage.snap(now) usage.snap = lambda stats=None: setattr( usage, 'last', stats or resource.getrusage(resource.RUSAGE_SELF)) gc_collect() usage.snap() t = time.clock() sqlite_select(RawPerson) t2 = time.clock() usage('sqlite select/native') gc_collect() usage.snap() t = time.clock() sqlite_select(Person) t2 = time.clock() usage('sqlite select/instrumented') gc_collect() usage.snap() t = time.clock() sql_select(RawPerson) t2 = time.clock() usage('sqlalchemy.sql select/native') gc_collect() usage.snap() t = time.clock() sql_select(Person) t2 = time.clock() usage('sqlalchemy.sql select/instrumented') gc_collect() usage.snap() t = time.clock() orm_select() t2 = time.clock() usage('sqlalchemy.orm fetch') gc_collect() usage.snap() t = time.clock() joined_orm_select() t2 = time.clock() usage('sqlalchemy.orm "joined" fetch') finally: metadata.drop_all()
def assert_no_mappers(): clear_mappers() gc_collect()
def profile(queue, func_args): # give testing.db a brand new pool and don't # touch the existing pool, since closing a socket # in the subprocess can affect the parent testing.db.pool = testing.db.pool.recreate() gc_collect() samples = [] max_ = 0 max_grew_for = 0 success = False until_maxtimes = 0 try: while True: if until_maxtimes >= maxtimes // 5: break for x in range(5): try: func(*func_args) except Exception as err: queue.put(( "result", False, "Test raised an exception: %r" % err, )) raise gc_collect() samples.append( get_num_objects() if get_num_objects is not None else len(get_objects_skipping_sqlite_issue())) if assert_no_sessions: assert len(_sessions) == 0, "sessions remain" # queue.put(('samples', samples)) latest_max = max(samples[-5:]) if latest_max > max_: queue.put(( "status", "Max grew from %s to %s, max has " "grown for %s samples" % (max_, latest_max, max_grew_for), )) max_ = latest_max max_grew_for += 1 until_maxtimes += 1 continue else: queue.put(( "status", "Max remained at %s, %s more attempts left" % (max_, max_grew_for), )) max_grew_for -= 1 if max_grew_for == 0: success = True break except Exception as err: queue.put(("result", False, "got exception: %s" % err)) else: if not success: queue.put(( "result", False, "Ran for a total of %d times, memory kept " "growing: %r" % (maxtimes, samples), )) else: queue.put(("result", True, "success"))
def generate(): objects = s.query(User).filter(User.id == 7).all() gc_collect() return objects
def generate(): objects = s.query(User).options(joinedload(User.addresses)).all() gc_collect() return objects