def childStore(self): """ Create a store suitable for use in a child process, that is hooked up to the store that a parent test process is managing. """ disableMemcacheForTest(TestCase()) staticQuota = 3000 attachmentRoot = (FilePath(self.sharedDBPath).child("attachments")) stubsvc = self.createService(lambda cf: Service()) cp = ConnectionPool( stubsvc.produceConnection, maxConnections=1, dbtype=DatabaseType(DB_TYPE[0], DB_TYPE[1]), ) # Attach the service to the running reactor. cp.startService() reactor.addSystemEventTrigger("before", "shutdown", cp.stopService) cds = CommonDataStore(cp.connection, { "push": StubNotifierFactory(), }, None, attachmentRoot, "", quota=staticQuota) return cds
def buildConnectionPool(testCase, schemaText="", dialect=SQLITE_DIALECT): """ Build a L{ConnectionPool} for testing purposes, with the given C{testCase}. @param testCase: the test case to attach the resulting L{ConnectionPool} to. @type testCase: L{twisted.trial.unittest.TestCase} @param schemaText: The text of the schema with which to initialize the database. @type schemaText: L{str} @return: a L{ConnectionPool} service whose C{startService} method has already been invoked. @rtype: L{ConnectionPool} """ sqlitename = testCase.mktemp() seqs = {} def connectionFactory(label=testCase.id()): conn = sqlite3.connect(sqlitename) def nextval(seq): result = seqs[seq] = seqs.get(seq, 0) + 1 return result conn.create_function("nextval", 1, nextval) return conn con = connectionFactory() con.executescript(schemaText) con.commit() pool = ConnectionPool(connectionFactory, paramstyle='numeric', dialect=SQLITE_DIALECT) pool.startService() testCase.addCleanup(pool.stopService) return pool
def test_isRunning(self): """ L{ConnectionPool.startService} should set its C{running} attribute to true. """ pool = ConnectionPool(None) pool.reactor = ClockWithThreads() self.assertEquals(pool.running, False) pool.startService() self.assertEquals(pool.running, True)
def test_default(self): """ If no value is given for the C{name} parameter to L{ConnectionPool}'s initializer then L{ConnectionPool.name} is C{None}. """ pool = ConnectionPool(None) self.assertIs(None, pool.name)
def test_specified(self): """ If a value is given for the C{name} parameter to L{ConnectionPool}'s initializer then it is used as the value for L{ConnectionPool.name}. """ name = "some test pool" pool = ConnectionPool(None, name=name) self.assertEqual(name, pool.name)
def test_threadCount(self): """ The reactor associated with a L{ConnectionPool} will have its maximum thread count adjusted when L{ConnectionPool.startService} is called, to accomodate for L{ConnectionPool.maxConnections} additional threads. Stopping the service should restore it to its original value, so that a repeatedly re-started L{ConnectionPool} will not cause the thread ceiling to grow without bound. """ defaultMax = 27 connsMax = 45 combinedMax = defaultMax + connsMax pool = ConnectionPool(None, maxConnections=connsMax) pool.reactor = ClockWithThreads() threadpool = pool.reactor.getThreadPool() pool.reactor.suggestThreadPoolSize(defaultMax) self.assertEquals(threadpool.max, defaultMax) pool.startService() self.assertEquals(threadpool.max, combinedMax) justChecking = [] pool.stopService().addCallback(justChecking.append) # No SQL run, so no threads started, so this deferred should fire # immediately. If not, we're in big trouble, so sanity check. self.assertEquals(justChecking, [None]) self.assertEquals(threadpool.max, defaultMax)
def makeAndCleanStore(self, testCase, notifierFactory, directoryService, attachmentRoot): """ Create a L{CommonDataStore} specific to the given L{TestCase}. This also creates a L{ConnectionPool} that gets stopped when the test finishes, to make sure that any test which fails will terminate cleanly. @return: a L{Deferred} that fires with a L{CommonDataStore} """ # Always clean-out old attachments if attachmentRoot.exists(): attachmentRoot.remove() attachmentRoot.createDirectory() currentTestID = testCase.id() cp = ConnectionPool(self.sharedService.produceConnection, maxConnections=5) quota = deriveQuota(testCase) store = CommonDataStore( cp.connection, {"push": notifierFactory} if notifierFactory is not None else {}, directoryService, attachmentRoot, "https://example.com/calendars/__uids__/%(home)s/attachments/%(name)s", quota=quota ) store.label = currentTestID cp.startService() def stopIt(): # active transactions should have been shut down. wasBusy = len(cp._busy) busyText = repr(cp._busy) stop = cp.stopService() def checkWasBusy(ignored): if wasBusy: testCase.fail("Outstanding Transactions: " + busyText) return ignored if deriveValue(testCase, _SPECIAL_TXN_CLEAN, lambda tc: False): stop.addBoth(checkWasBusy) return stop testCase.addCleanup(stopIt) yield self.cleanStore(testCase, store) returnValue(store)
def childStore(self): """ Create a store suitable for use in a child process, that is hooked up to the store that a parent test process is managing. """ disableMemcacheForTest(TestCase()) staticQuota = 3000 attachmentRoot = FilePath(self.sharedDBPath).child("attachments") stubsvc = self.createService(lambda cf: Service()) cp = ConnectionPool(stubsvc.produceConnection, maxConnections=1, dialect=DB_TYPE[0], paramstyle=DB_TYPE[1]) # Attach the service to the running reactor. cp.startService() reactor.addSystemEventTrigger("before", "shutdown", cp.stopService) cds = CommonDataStore( cp.connection, {"push": StubNotifierFactory()}, None, attachmentRoot, "", quota=staticQuota ) return cds
def setUp(self, test=None, connect=None): """ Support inheritance by L{TestCase} classes. """ if test is None: test = self if connect is None: self.factory = ConnectionFactory() connect = self.factory.connect self.connect = connect self.paused = False self.holders = [] self.pool = ConnectionPool(connect, maxConnections=2, dialect=self.dialect, paramstyle=self.paramstyle) self.pool._createHolder = self.makeAHolder self.clock = self.pool.reactor = ClockWithThreads() self.pool.startService() test.addCleanup(self.flushHolders)
def childStore(cls): """ Create a store suitable for use in a child process, that is hooked up to the store that a parent test process is managing. """ disableMemcacheForTest(TestCase()) staticQuota = 3000 attachmentRoot = (CachingFilePath(cls.SHARED_DB_PATH) .child("attachments")) stubsvc = cls.createService(lambda cf: Service()) cp = ConnectionPool(stubsvc.produceConnection, maxConnections=1) # Attach the service to the running reactor. cp.startService() reactor.addSystemEventTrigger("before", "shutdown", cp.stopService) cds = CommonDataStore( cp.connection, StubNotifierFactory(), attachmentRoot, "", quota=staticQuota ) return cds
def buildConnectionPool(testCase, schemaText="", dbtype=DatabaseType(SQLITE_DIALECT, "numeric")): """ Build a L{ConnectionPool} for testing purposes, with the given C{testCase}. @param testCase: the test case to attach the resulting L{ConnectionPool} to. @type testCase: L{twisted.trial.unittest.TestCase} @param schemaText: The text of the schema with which to initialize the database. @type schemaText: L{str} @return: a L{ConnectionPool} service whose C{startService} method has already been invoked. @rtype: L{ConnectionPool} """ sqlitename = testCase.mktemp() seqs = {} def connectionFactory(label=testCase.id()): conn = sqlite3.connect(sqlitename, isolation_level=None) def nextval(seq): result = seqs[seq] = seqs.get(seq, 0) + 1 return result conn.create_function("nextval", 1, nextval) return conn con = connectionFactory() con.executescript(schemaText) con.commit() pool = ConnectionPool(connectionFactory, dbtype=dbtype) pool.startService() testCase.addCleanup(pool.stopService) return pool
def getDBPool(config): """ Inspect configuration to determine what database connection pool to set up. return: (L{ConnectionPool}, transactionFactory) """ if config.DBType == 'oracle': dialect = ORACLE_DIALECT paramstyle = 'numeric' else: dialect = POSTGRES_DIALECT paramstyle = 'pyformat' pool = None if config.DBAMPFD: txnFactory = transactionFactoryFromFD( int(config.DBAMPFD), dialect, paramstyle ) elif not config.UseDatabase: txnFactory = None elif not config.SharedConnectionPool: if config.DBType == '': # get a PostgresService to tell us what the local connection # info is, but *don't* start it (that would start one postgres # master per slave, resulting in all kinds of mayhem...) connectionFactory = pgServiceFromConfig( config, None).produceConnection elif config.DBType == 'postgres': connectionFactory = pgConnectorFromConfig(config) elif config.DBType == 'oracle': connectionFactory = oracleConnectorFromConfig(config) else: raise UsageError("unknown DB type: %r" % (config.DBType,)) pool = ConnectionPool(connectionFactory, dialect=dialect, paramstyle=paramstyle, maxConnections=config.MaxDBConnectionsPerPool) txnFactory = pool.connection else: raise UsageError( "trying to use DB in slave, but no connection info from parent" ) return (pool, txnFactory)
def makeAndCleanStore(self, testCase, notifierFactory, directoryService, attachmentRoot, enableJobProcessing=True): """ Create a L{CommonDataStore} specific to the given L{TestCase}. This also creates a L{ConnectionPool} that gets stopped when the test finishes, to make sure that any test which fails will terminate cleanly. @return: a L{Deferred} that fires with a L{CommonDataStore} """ # Always clean-out old attachments if attachmentRoot.exists(): attachmentRoot.remove() attachmentRoot.createDirectory() currentTestID = testCase.id() cp = ConnectionPool(self.sharedService.produceConnection, maxConnections=4) quota = deriveQuota(testCase) store = CommonDataStore( cp.connection, {"push": notifierFactory} if notifierFactory is not None else {}, directoryService, attachmentRoot, "https://example.com/calendars/__uids__/%(home)s/attachments/%(name)s", quota=quota) store.label = currentTestID cp.startService() @inlineCallbacks def stopIt(): txn = store.newTransaction() jobs = yield JobItem.all(txn) yield txn.commit() if len(jobs): print("Jobs left in job queue {}: {}".format( testCase, ",".join([job.workType for job in jobs]))) if enableJobProcessing: yield pool.stopService() # active transactions should have been shut down. wasBusy = len(cp._busy) busyText = repr(cp._busy) result = yield cp.stopService() if deriveValue(testCase, _SPECIAL_TXN_CLEAN, lambda tc: False): if wasBusy: testCase.fail("Outstanding Transactions: " + busyText) returnValue(result) returnValue(result) testCase.addCleanup(stopIt) yield self.cleanStore(testCase, store) # Start the job queue after store is up and cleaned if enableJobProcessing: pool = PeerConnectionPool(reactor, store.newTransaction, None) store.queuer = store.queuer.transferProposalCallbacks(pool) pool.startService() returnValue(store)
def makeAndCleanStore(self, testCase, notifierFactory, directoryService, attachmentRoot, enableJobProcessing=True): """ Create a L{CommonDataStore} specific to the given L{TestCase}. This also creates a L{ConnectionPool} that gets stopped when the test finishes, to make sure that any test which fails will terminate cleanly. @return: a L{Deferred} that fires with a L{CommonDataStore} """ # Always clean-out old attachments if attachmentRoot.exists(): attachmentRoot.remove() attachmentRoot.createDirectory() currentTestID = testCase.id() cp = ConnectionPool(self.sharedService.produceConnection, maxConnections=4) quota = deriveQuota(testCase) store = CommonDataStore( cp.connection, {"push": notifierFactory} if notifierFactory is not None else {}, directoryService, attachmentRoot, "https://example.com/calendars/__uids__/%(home)s/attachments/%(name)s", quota=quota ) store.label = currentTestID cp.startService() @inlineCallbacks def stopIt(): txn = store.newTransaction() jobs = yield JobItem.all(txn) yield txn.commit() if len(jobs): print("Jobs left in job queue {}: {}".format( testCase, ",".join([job.workType for job in jobs]) )) if enableJobProcessing: yield pool.stopService() # active transactions should have been shut down. wasBusy = len(cp._busy) busyText = repr(cp._busy) result = yield cp.stopService() if deriveValue(testCase, _SPECIAL_TXN_CLEAN, lambda tc: False): if wasBusy: testCase.fail("Outstanding Transactions: " + busyText) returnValue(result) returnValue(result) testCase.addCleanup(stopIt) yield self.cleanStore(testCase, store) # Start the job queue after store is up and cleaned if enableJobProcessing: pool = PeerConnectionPool( reactor, store.newTransaction, None ) store.queuer = store.queuer.transferProposalCallbacks(pool) pool.startService() returnValue(store)
class ConnectionPoolHelper(object): """ Connection pool setting-up facilities for tests that need a L{ConnectionPool}. """ dialect = POSTGRES_DIALECT paramstyle = DEFAULT_PARAM_STYLE def setUp(self, test=None, connect=None): """ Support inheritance by L{TestCase} classes. """ if test is None: test = self if connect is None: self.factory = ConnectionFactory() connect = self.factory.connect self.connect = connect self.paused = False self.holders = [] self.pool = ConnectionPool(connect, maxConnections=2, dialect=self.dialect, paramstyle=self.paramstyle) self.pool._createHolder = self.makeAHolder self.clock = self.pool.reactor = ClockWithThreads() self.pool.startService() test.addCleanup(self.flushHolders) def flushHolders(self): """ Flush all pending C{submit}s since C{pauseHolders} was called. This makes sure the service is stopped and the fake ThreadHolders are all executing their queues so failed tsets can exit cleanly. """ self.paused = False for holder in self.holders: holder.flush() def pauseHolders(self): """ Pause all L{FakeThreadHolder}s, causing C{submit} to return an unfired L{Deferred}. """ self.paused = True def makeAHolder(self): """ Make a ThreadHolder-alike. """ fth = FakeThreadHolder(self) self.holders.append(fth) return fth def resultOf(self, it): return resultOf(it) def createTransaction(self): return self.pool.connection() def translateError(self, err): return err
class ConnectionPoolHelper(object): """ Connection pool setting-up facilities for tests that need a L{ConnectionPool}. """ dbtype = DatabaseType(POSTGRES_DIALECT, DEFAULT_PARAM_STYLE) def setUp(self, test=None, connect=None): """ Support inheritance by L{TestCase} classes. """ if test is None: test = self if connect is None: self.factory = ConnectionFactory() connect = self.factory.connect self.connect = connect self.paused = False self.holders = [] self.pool = ConnectionPool( connect, maxConnections=2, dbtype=self.dbtype, ) self.pool._createHolder = self.makeAHolder self.clock = self.pool.reactor = ClockWithThreads() self.pool.startService() test.addCleanup(self.flushHolders) def flushHolders(self): """ Flush all pending C{submit}s since C{pauseHolders} was called. This makes sure the service is stopped and the fake ThreadHolders are all executing their queues so failed tests can exit cleanly. """ self.paused = False for holder in self.holders: holder.flush() def pauseHolders(self): """ Pause all L{FakeThreadHolder}s, causing C{submit} to return an unfired L{Deferred}. """ self.paused = True def makeAHolder(self): """ Make a ThreadHolder-alike. """ fth = FakeThreadHolder(self) self.holders.append(fth) return fth def resultOf(self, it): return resultOf(it) def createTransaction(self): return self.pool.connection() def translateError(self, err): return err