def test_workerConnectionPoolPerformWork(self): """ L{WorkerConnectionPool.performWork} performs work by selecting a L{ConnectionFromWorker} and sending it a L{PerformWork} command. """ clock = Clock() peerPool = PeerConnectionPool(clock, None, 4322, schema) factory = peerPool.workerListenerFactory() def peer(): p = factory.buildProtocol(None) t = StringTransport() p.makeConnection(t) return p, t worker1, _ignore_trans1 = peer() worker2, _ignore_trans2 = peer() # Ask the worker to do something. worker1.performWork(schema.DUMMY_WORK_ITEM, 1) self.assertEquals(worker1.currentLoad, 1) self.assertEquals(worker2.currentLoad, 0) # Now ask the pool to do something peerPool.workerPool.performWork(schema.DUMMY_WORK_ITEM, 2) self.assertEquals(worker1.currentLoad, 1) self.assertEquals(worker2.currentLoad, 1)
def test_notBeforeBefore(self): """ L{PeerConnectionPool.enqueueWork} will execute its work immediately if the C{notBefore} attribute of the work item in question is in the past. """ dbpool = buildConnectionPool(self, schemaText + nodeSchema) fakeNow = datetime.datetime(2012, 12, 12, 12, 12, 12) sinceEpoch = astimestamp(fakeNow) clock = Clock() clock.advance(sinceEpoch) qpool = PeerConnectionPool(clock, dbpool.connection, 0, schema) realChoosePerformer = qpool.choosePerformer performerChosen = [] def catchPerformerChoice(): result = realChoosePerformer() performerChosen.append(True) return result qpool.choosePerformer = catchPerformerChoice @transactionally(dbpool.connection) def check(txn): return qpool.enqueueWork( txn, DummyWorkItem, a=3, b=9, notBefore=datetime.datetime(2012, 12, 12, 12, 12, 0) ).whenProposed() proposal = yield check clock.advance(1000) # Advance far beyond the given timestamp. self.assertEquals(performerChosen, [True]) result = yield proposal.whenExecuted() self.assertIdentical(result, proposal)
def test_performingWorkOnNetwork(self): """ The L{PerformWork} command will get relayed to the remote peer controller. """ peer = PeerConnectionPool(None, None, 4322, schema) local = self.pcp.peerFactory().buildProtocol(None) remote = peer.peerFactory().buildProtocol(None) connection = Connection(local, remote) connection.start() d = Deferred() class DummyPerformer(object): def performWork(self, table, workID): self.table = table self.workID = workID return d # Doing real database I/O in this test would be tedious so fake the # first method in the call stack which actually talks to the DB. dummy = DummyPerformer() def chooseDummy(onlyLocally=False): return dummy peer.choosePerformer = chooseDummy performed = local.performWork(schema.DUMMY_WORK_ITEM, 7384) performResult = [] performed.addCallback(performResult.append) # Sanity check. self.assertEquals(performResult, []) connection.flush() self.assertEquals(dummy.table, schema.DUMMY_WORK_ITEM) self.assertEquals(dummy.workID, 7384) self.assertEquals(performResult, []) d.callback(128374) connection.flush() self.assertEquals(performResult, [None])
def test_poolStartServiceChecksForWork(self): """ L{PeerConnectionPool.startService} kicks off the idle work-check loop. """ reactor = MemoryReactorWithClock() cph = SteppablePoolHelper(nodeSchema + schemaText) then = datetime.datetime(2012, 12, 12, 12, 12, 0) reactor.advance(astimestamp(then)) cph.setUp(self) pcp = PeerConnectionPool(reactor, cph.pool.connection, 4321, schema) now = then + datetime.timedelta(seconds=pcp.queueProcessTimeout * 2) @transactionally(cph.pool.connection) def createOldWork(txn): one = DummyWorkItem.create(txn, workID=1, a=3, b=4, notBefore=then) two = DummyWorkItem.create(txn, workID=2, a=7, b=9, notBefore=now) return gatherResults([one, two]) pcp.startService() cph.flushHolders() reactor.advance(pcp.queueProcessTimeout * 2) self.assertEquals( cph.rows("select * from DUMMY_WORK_DONE"), [(1, 7)] ) cph.rows("delete from DUMMY_WORK_DONE") reactor.advance(pcp.queueProcessTimeout * 2) self.assertEquals( cph.rows("select * from DUMMY_WORK_DONE"), [(2, 16)] )
def test_poolStartServiceChecksForWork(self): """ L{PeerConnectionPool.startService} kicks off the idle work-check loop. """ reactor = MemoryReactorWithClock() cph = SteppablePoolHelper(nodeSchema + schemaText) then = datetime.datetime(2012, 12, 12, 12, 12, 0) reactor.advance(astimestamp(then)) cph.setUp(self) pcp = PeerConnectionPool(reactor, cph.pool.connection, 4321, schema) now = then + datetime.timedelta(seconds=pcp.queueProcessTimeout * 2) @transactionally(cph.pool.connection) def createOldWork(txn): one = DummyWorkItem.create(txn, workID=1, a=3, b=4, notBefore=then) two = DummyWorkItem.create(txn, workID=2, a=7, b=9, notBefore=now) return gatherResults([one, two]) pcp.startService() cph.flushHolders() reactor.advance(pcp.queueProcessTimeout * 2) self.assertEquals(cph.rows("select * from DUMMY_WORK_DONE"), [(1, 7)]) cph.rows("delete from DUMMY_WORK_DONE") reactor.advance(pcp.queueProcessTimeout * 2) self.assertEquals(cph.rows("select * from DUMMY_WORK_DONE"), [(2, 16)])
def test_choosingPerformerFromNetwork(self): """ If L{PeerConnectionPool.choosePerformer} is invoked when no workers have spawned but some peers have connected, then it should choose a connection from the network to perform it. """ peer = PeerConnectionPool(None, None, 4322, schema) local = self.pcp.peerFactory().buildProtocol(None) remote = peer.peerFactory().buildProtocol(None) connection = Connection(local, remote) connection.start() self.checkPerformer(ConnectionFromPeerNode)
def test_notBeforeWhenCheckingForLostWork(self): """ L{PeerConnectionPool._periodicLostWorkCheck} should execute any outstanding work items, but only those that are expired. """ dbpool = buildConnectionPool(self, schemaText + nodeSchema) # An arbitrary point in time. fakeNow = datetime.datetime(2012, 12, 12, 12, 12, 12) # *why* does datetime still not have .astimestamp() sinceEpoch = astimestamp(fakeNow) clock = Clock() clock.advance(sinceEpoch) qpool = PeerConnectionPool(clock, dbpool.connection, 0, schema) # Let's create a couple of work items directly, not via the enqueue # method, so that they exist but nobody will try to immediately execute # them. @transactionally(dbpool.connection) @inlineCallbacks def setup(txn): # First, one that's right now. yield DummyWorkItem.create(txn, a=1, b=2, notBefore=fakeNow) # Next, create one that's actually far enough into the past to run. yield DummyWorkItem.create( txn, a=3, b=4, notBefore=( # Schedule it in the past so that it should have already # run. fakeNow - datetime.timedelta( seconds=qpool.queueProcessTimeout + 20))) # Finally, one that's actually scheduled for the future. yield DummyWorkItem.create(txn, a=10, b=20, notBefore=fakeNow + datetime.timedelta(1000)) yield setup qpool.running = True yield qpool._periodicLostWorkCheck() @transactionally(dbpool.connection) def check(txn): return DummyWorkDone.all(txn) every = yield check self.assertEquals([x.aPlusB for x in every], [7])
def test_notBeforeWhenEnqueueing(self): """ L{PeerConnectionPool.enqueueWork} enqueues some work immediately, but only executes it when enough time has elapsed to allow the C{notBefore} attribute of the given work item to have passed. """ dbpool = buildConnectionPool(self, schemaText + nodeSchema) fakeNow = datetime.datetime(2012, 12, 12, 12, 12, 12) sinceEpoch = astimestamp(fakeNow) clock = Clock() clock.advance(sinceEpoch) qpool = PeerConnectionPool(clock, dbpool.connection, 0, schema) realChoosePerformer = qpool.choosePerformer performerChosen = [] def catchPerformerChoice(): result = realChoosePerformer() performerChosen.append(True) return result qpool.choosePerformer = catchPerformerChoice @transactionally(dbpool.connection) def check(txn): return qpool.enqueueWork(txn, DummyWorkItem, a=3, b=9, notBefore=datetime.datetime( 2012, 12, 12, 12, 12, 20)).whenProposed() proposal = yield check # This is going to schedule the work to happen with some asynchronous # I/O in the middle; this is a problem because how do we know when it's # time to check to see if the work has started? We need to intercept # the thing that kicks off the work; we can then wait for the work # itself. self.assertEquals(performerChosen, []) # Advance to exactly the appointed second. clock.advance(20 - 12) self.assertEquals(performerChosen, [True]) # FIXME: if this fails, it will hang, but that's better than no # notification that it is broken at all. result = yield proposal.whenExecuted() self.assertIdentical(result, proposal)
def test_choosePerformerSorted(self): """ If L{PeerConnectionPool.choosePerformer} is invoked make it return the peer with the least load. """ peer = PeerConnectionPool(None, None, 4322, schema) class DummyPeer(object): def __init__(self, name, load): self.name = name self.load = load def currentLoadEstimate(self): return self.load apeer = DummyPeer("A", 1) bpeer = DummyPeer("B", 0) cpeer = DummyPeer("C", 2) peer.addPeerConnection(apeer) peer.addPeerConnection(bpeer) peer.addPeerConnection(cpeer) performer = peer.choosePerformer(onlyLocally=False) self.assertEqual(performer, bpeer) bpeer.load = 2 performer = peer.choosePerformer(onlyLocally=False) self.assertEqual(performer, apeer)
def setUp(self): """ L{PeerConnectionPool} requires access to a database and the reactor. """ self.store = yield buildStore(self, None) def doit(txn): return txn.execSQL(schemaText) yield inTransaction(lambda: self.store.newTransaction("bonus schema"), doit) def indirectedTransactionFactory(*a): """ Allow tests to replace 'self.store.newTransaction' to provide fixtures with extra methods on a test-by-test basis. """ return self.store.newTransaction(*a) def deschema(): @inlineCallbacks def deletestuff(txn): for stmt in dropSQL: yield txn.execSQL(stmt) return inTransaction(lambda *a: self.store.newTransaction(*a), deletestuff) self.addCleanup(deschema) from twisted.internet import reactor self.node1 = PeerConnectionPool( reactor, indirectedTransactionFactory, 0, schema) self.node2 = PeerConnectionPool( reactor, indirectedTransactionFactory, 0, schema) class FireMeService(Service, object): def __init__(self, d): super(FireMeService, self).__init__() self.d = d def startService(self): self.d.callback(None) d1 = Deferred() d2 = Deferred() FireMeService(d1).setServiceParent(self.node1) FireMeService(d2).setServiceParent(self.node2) ms = MultiService() self.node1.setServiceParent(ms) self.node2.setServiceParent(ms) ms.startService() self.addCleanup(ms.stopService) yield gatherResults([d1, d2]) self.store.queuer = self.node1
def test_notBeforeWhenCheckingForLostWork(self): """ L{PeerConnectionPool._periodicLostWorkCheck} should execute any outstanding work items, but only those that are expired. """ dbpool = buildConnectionPool(self, schemaText + nodeSchema) # An arbitrary point in time. fakeNow = datetime.datetime(2012, 12, 12, 12, 12, 12) # *why* does datetime still not have .astimestamp() sinceEpoch = astimestamp(fakeNow) clock = Clock() clock.advance(sinceEpoch) qpool = PeerConnectionPool(clock, dbpool.connection, 0, schema) # Let's create a couple of work items directly, not via the enqueue # method, so that they exist but nobody will try to immediately execute # them. @transactionally(dbpool.connection) @inlineCallbacks def setup(txn): # First, one that's right now. yield DummyWorkItem.create(txn, a=1, b=2, notBefore=fakeNow) # Next, create one that's actually far enough into the past to run. yield DummyWorkItem.create( txn, a=3, b=4, notBefore=( # Schedule it in the past so that it should have already # run. fakeNow - datetime.timedelta( seconds=qpool.queueProcessTimeout + 20 ) ) ) # Finally, one that's actually scheduled for the future. yield DummyWorkItem.create( txn, a=10, b=20, notBefore=fakeNow + datetime.timedelta(1000) ) yield setup yield qpool._periodicLostWorkCheck() @transactionally(dbpool.connection) def check(txn): return DummyWorkDone.all(txn) every = yield check self.assertEquals([x.aPlusB for x in every], [7])
def test_notBeforeWhenEnqueueing(self): """ L{PeerConnectionPool.enqueueWork} enqueues some work immediately, but only executes it when enough time has elapsed to allow the C{notBefore} attribute of the given work item to have passed. """ dbpool = buildConnectionPool(self, schemaText + nodeSchema) fakeNow = datetime.datetime(2012, 12, 12, 12, 12, 12) sinceEpoch = astimestamp(fakeNow) clock = Clock() clock.advance(sinceEpoch) qpool = PeerConnectionPool(clock, dbpool.connection, 0, schema) realChoosePerformer = qpool.choosePerformer performerChosen = [] def catchPerformerChoice(): result = realChoosePerformer() performerChosen.append(True) return result qpool.choosePerformer = catchPerformerChoice @transactionally(dbpool.connection) def check(txn): return qpool.enqueueWork( txn, DummyWorkItem, a=3, b=9, notBefore=datetime.datetime(2012, 12, 12, 12, 12, 20) ).whenProposed() proposal = yield check # This is going to schedule the work to happen with some asynchronous # I/O in the middle; this is a problem because how do we know when it's # time to check to see if the work has started? We need to intercept # the thing that kicks off the work; we can then wait for the work # itself. self.assertEquals(performerChosen, []) # Advance to exactly the appointed second. clock.advance(20 - 12) self.assertEquals(performerChosen, [True]) # FIXME: if this fails, it will hang, but that's better than no # notification that it is broken at all. result = yield proposal.whenExecuted() self.assertIdentical(result, proposal)
def test_notBeforeBefore(self): """ L{PeerConnectionPool.enqueueWork} will execute its work immediately if the C{notBefore} attribute of the work item in question is in the past. """ dbpool = buildConnectionPool(self, schemaText + nodeSchema) fakeNow = datetime.datetime(2012, 12, 12, 12, 12, 12) sinceEpoch = astimestamp(fakeNow) clock = Clock() clock.advance(sinceEpoch) qpool = PeerConnectionPool(clock, dbpool.connection, 0, schema) realChoosePerformer = qpool.choosePerformer performerChosen = [] def catchPerformerChoice(): result = realChoosePerformer() performerChosen.append(True) return result qpool.choosePerformer = catchPerformerChoice @transactionally(dbpool.connection) def check(txn): return qpool.enqueueWork(txn, DummyWorkItem, a=3, b=9, notBefore=datetime.datetime( 2012, 12, 12, 12, 12, 0)).whenProposed() proposal = yield check clock.advance(1000) # Advance far beyond the given timestamp. self.assertEquals(performerChosen, [True]) result = yield proposal.whenExecuted() self.assertIdentical(result, proposal)
def test_workerConnectionPoolPerformWork(self): """ L{WorkerConnectionPool.performWork} performs work by selecting a L{ConnectionFromWorker} and sending it a L{PerformWork} command. """ clock = Clock() peerPool = PeerConnectionPool(clock, None, 4322, schema) factory = peerPool.workerListenerFactory() def peer(): p = factory.buildProtocol(None) t = StringTransport() p.makeConnection(t) return p, t worker1, trans1 = peer() worker2, trans2 = peer() # Ask the worker to do something. worker1.performWork(schema.DUMMY_WORK_ITEM, 1) self.assertEquals(worker1.currentLoad, 1) self.assertEquals(worker2.currentLoad, 0) # Now ask the pool to do something peerPool.workerPool.performWork(schema.DUMMY_WORK_ITEM, 2) self.assertEquals(worker1.currentLoad, 1) self.assertEquals(worker2.currentLoad, 1)
class PeerConnectionPoolIntegrationTests(TestCase): """ L{PeerConnectionPool} is the service responsible for coordinating eventually-consistent task queuing within a cluster. """ @inlineCallbacks def setUp(self): """ L{PeerConnectionPool} requires access to a database and the reactor. """ self.store = yield buildStore(self, None) def doit(txn): return txn.execSQL(schemaText) yield inTransaction(lambda: self.store.newTransaction("bonus schema"), doit) def deschema(): @inlineCallbacks def deletestuff(txn): for stmt in dropSQL: yield txn.execSQL(stmt) return inTransaction(self.store.newTransaction, deletestuff) self.addCleanup(deschema) from twisted.internet import reactor self.node1 = PeerConnectionPool( reactor, self.store.newTransaction, 0, schema) self.node2 = PeerConnectionPool( reactor, self.store.newTransaction, 0, schema) class FireMeService(Service, object): def __init__(self, d): super(FireMeService, self).__init__() self.d = d def startService(self): self.d.callback(None) d1 = Deferred() d2 = Deferred() FireMeService(d1).setServiceParent(self.node1) FireMeService(d2).setServiceParent(self.node2) ms = MultiService() self.node1.setServiceParent(ms) self.node2.setServiceParent(ms) ms.startService() self.addCleanup(ms.stopService) yield gatherResults([d1, d2]) self.store.queuer = self.node1 def test_currentNodeInfo(self): """ There will be two C{NODE_INFO} rows in the database, retrievable as two L{NodeInfo} objects, once both nodes have started up. """ @inlineCallbacks def check(txn): self.assertEquals(len((yield self.node1.activeNodes(txn))), 2) self.assertEquals(len((yield self.node2.activeNodes(txn))), 2) return inTransaction(self.store.newTransaction, check) @inlineCallbacks def test_enqueueHappyPath(self): """ When a L{WorkItem} is scheduled for execution via L{PeerConnectionPool.enqueueWork} its C{doWork} method will be invoked by the time the L{Deferred} returned from the resulting L{WorkProposal}'s C{whenExecuted} method has fired. """ # TODO: this exact test should run against LocalQueuer as well. def operation(txn): # TODO: how does 'enqueue' get associated with the transaction? This # is not the fact with a raw t.w.enterprise transaction. Should # probably do something with components. return txn.enqueue(DummyWorkItem, a=3, b=4, workID=4321, notBefore=datetime.datetime.utcnow()) result = yield inTransaction(self.store.newTransaction, operation) # Wait for it to be executed. Hopefully this does not time out :-\. yield result.whenExecuted() def op2(txn): return Select([schema.DUMMY_WORK_DONE.WORK_ID, schema.DUMMY_WORK_DONE.A_PLUS_B], From=schema.DUMMY_WORK_DONE).on(txn) rows = yield inTransaction(self.store.newTransaction, op2) self.assertEquals(rows, [[4321, 7]])
class PeerConnectionPoolUnitTests(TestCase): """ L{PeerConnectionPool} has many internal components. """ def setUp(self): """ Create a L{PeerConnectionPool} that is just initialized enough. """ self.pcp = PeerConnectionPool(None, None, 4321, schema) def checkPerformer(self, cls): """ Verify that the performer returned by L{PeerConnectionPool.choosePerformer}. """ performer = self.pcp.choosePerformer() self.failUnlessIsInstance(performer, cls) verifyObject(_IWorkPerformer, performer) def test_choosingPerformerWhenNoPeersAndNoWorkers(self): """ If L{PeerConnectionPool.choosePerformer} is invoked when no workers have spawned and no peers have established connections (either incoming or outgoing), then it chooses an implementation of C{performWork} that simply executes the work locally. """ self.checkPerformer(LocalPerformer) def test_choosingPerformerWithLocalCapacity(self): """ If L{PeerConnectionPool.choosePerformer} is invoked when some workers have spawned, then it should choose the worker pool as the local performer. """ # Give it some local capacity. wlf = self.pcp.workerListenerFactory() proto = wlf.buildProtocol(None) proto.makeConnection(StringTransport()) # Sanity check. self.assertEqual(len(self.pcp.workerPool.workers), 1) self.assertEqual(self.pcp.workerPool.hasAvailableCapacity(), True) # Now it has some capacity. self.checkPerformer(WorkerConnectionPool) def test_choosingPerformerFromNetwork(self): """ If L{PeerConnectionPool.choosePerformer} is invoked when no workers have spawned but some peers have connected, then it should choose a connection from the network to perform it. """ peer = PeerConnectionPool(None, None, 4322, schema) local = self.pcp.peerFactory().buildProtocol(None) remote = peer.peerFactory().buildProtocol(None) connection = Connection(local, remote) connection.start() self.checkPerformer(ConnectionFromPeerNode) def test_performingWorkOnNetwork(self): """ The L{PerformWork} command will get relayed to the remote peer controller. """ peer = PeerConnectionPool(None, None, 4322, schema) local = self.pcp.peerFactory().buildProtocol(None) remote = peer.peerFactory().buildProtocol(None) connection = Connection(local, remote) connection.start() d = Deferred() class DummyPerformer(object): def performWork(self, table, workID): self.table = table self.workID = workID return d # Doing real database I/O in this test would be tedious so fake the # first method in the call stack which actually talks to the DB. dummy = DummyPerformer() def chooseDummy(onlyLocally=False): return dummy peer.choosePerformer = chooseDummy performed = local.performWork(schema.DUMMY_WORK_ITEM, 7384) performResult = [] performed.addCallback(performResult.append) # Sanity check. self.assertEquals(performResult, []) connection.flush() self.assertEquals(dummy.table, schema.DUMMY_WORK_ITEM) self.assertEquals(dummy.workID, 7384) self.assertEquals(performResult, []) d.callback(128374) connection.flush() self.assertEquals(performResult, [None]) @inlineCallbacks def test_notBeforeWhenCheckingForLostWork(self): """ L{PeerConnectionPool._periodicLostWorkCheck} should execute any outstanding work items, but only those that are expired. """ dbpool = buildConnectionPool(self, schemaText + nodeSchema) # An arbitrary point in time. fakeNow = datetime.datetime(2012, 12, 12, 12, 12, 12) # *why* does datetime still not have .astimestamp() sinceEpoch = astimestamp(fakeNow) clock = Clock() clock.advance(sinceEpoch) qpool = PeerConnectionPool(clock, dbpool.connection, 0, schema) # Let's create a couple of work items directly, not via the enqueue # method, so that they exist but nobody will try to immediately execute # them. @transactionally(dbpool.connection) @inlineCallbacks def setup(txn): # First, one that's right now. yield DummyWorkItem.create(txn, a=1, b=2, notBefore=fakeNow) # Next, create one that's actually far enough into the past to run. yield DummyWorkItem.create( txn, a=3, b=4, notBefore=( # Schedule it in the past so that it should have already run. fakeNow - datetime.timedelta( seconds=qpool.queueProcessTimeout + 20 ) ) ) # Finally, one that's actually scheduled for the future. yield DummyWorkItem.create( txn, a=10, b=20, notBefore=fakeNow + datetime.timedelta(1000) ) yield setup yield qpool._periodicLostWorkCheck() @transactionally(dbpool.connection) def check(txn): return DummyWorkDone.all(txn) every = yield check self.assertEquals([x.aPlusB for x in every], [7]) @inlineCallbacks def test_notBeforeWhenEnqueueing(self): """ L{PeerConnectionPool.enqueueWork} enqueues some work immediately, but only executes it when enough time has elapsed to allow the C{notBefore} attribute of the given work item to have passed. """ dbpool = buildConnectionPool(self, schemaText + nodeSchema) fakeNow = datetime.datetime(2012, 12, 12, 12, 12, 12) sinceEpoch = astimestamp(fakeNow) clock = Clock() clock.advance(sinceEpoch) qpool = PeerConnectionPool(clock, dbpool.connection, 0, schema) realChoosePerformer = qpool.choosePerformer performerChosen = [] def catchPerformerChoice(): result = realChoosePerformer() performerChosen.append(True) return result qpool.choosePerformer = catchPerformerChoice @transactionally(dbpool.connection) def check(txn): return qpool.enqueueWork( txn, DummyWorkItem, a=3, b=9, notBefore=datetime.datetime(2012, 12, 12, 12, 12, 20) ).whenProposed() proposal = yield check # This is going to schedule the work to happen with some asynchronous # I/O in the middle; this is a problem because how do we know when it's # time to check to see if the work has started? We need to intercept # the thing that kicks off the work; we can then wait for the work # itself. self.assertEquals(performerChosen, []) # Advance to exactly the appointed second. clock.advance(20 - 12) self.assertEquals(performerChosen, [True]) # FIXME: if this fails, it will hang, but that's better than no # notification that it is broken at all. result = yield proposal.whenExecuted() self.assertIdentical(result, proposal) @inlineCallbacks def test_notBeforeBefore(self): """ L{PeerConnectionPool.enqueueWork} will execute its work immediately if the C{notBefore} attribute of the work item in question is in the past. """ dbpool = buildConnectionPool(self, schemaText + nodeSchema) fakeNow = datetime.datetime(2012, 12, 12, 12, 12, 12) sinceEpoch = astimestamp(fakeNow) clock = Clock() clock.advance(sinceEpoch) qpool = PeerConnectionPool(clock, dbpool.connection, 0, schema) realChoosePerformer = qpool.choosePerformer performerChosen = [] def catchPerformerChoice(): result = realChoosePerformer() performerChosen.append(True) return result qpool.choosePerformer = catchPerformerChoice @transactionally(dbpool.connection) def check(txn): return qpool.enqueueWork( txn, DummyWorkItem, a=3, b=9, notBefore=datetime.datetime(2012, 12, 12, 12, 12, 0) ).whenProposed() proposal = yield check clock.advance(1000) # Advance far beyond the given timestamp. self.assertEquals(performerChosen, [True]) result = yield proposal.whenExecuted() self.assertIdentical(result, proposal)
def setUp(self): """ Create a L{PeerConnectionPool} that is just initialized enough. """ self.pcp = PeerConnectionPool(None, None, 4321, schema)
class PeerConnectionPoolIntegrationTests(TestCase): """ L{PeerConnectionPool} is the service responsible for coordinating eventually-consistent task queuing within a cluster. """ @inlineCallbacks def setUp(self): """ L{PeerConnectionPool} requires access to a database and the reactor. """ self.store = yield buildStore(self, None) @inlineCallbacks def doit(txn): for statement in splitSQLString(nodeSchema + schemaText): yield txn.execSQL(statement) yield inTransaction(lambda: self.store.newTransaction("bonus schema"), doit) def indirectedTransactionFactory(*a): """ Allow tests to replace "self.store.newTransaction" to provide fixtures with extra methods on a test-by-test basis. """ return self.store.newTransaction(*a) def deschema(): @inlineCallbacks def deletestuff(txn): for stmt in dropSQL: yield txn.execSQL(stmt) txn.execSQL("drop table node_info") return inTransaction(lambda *a: self.store.newTransaction(*a), deletestuff) self.addCleanup(deschema) from twisted.internet import reactor self.node1 = PeerConnectionPool(reactor, indirectedTransactionFactory, 0, schema) self.node2 = PeerConnectionPool(reactor, indirectedTransactionFactory, 0, schema) class FireMeService(Service, object): def __init__(self, d): super(FireMeService, self).__init__() self.d = d def startService(self): self.d.callback(None) d1 = Deferred() d2 = Deferred() FireMeService(d1).setServiceParent(self.node1) FireMeService(d2).setServiceParent(self.node2) ms = MultiService() self.node1.setServiceParent(ms) self.node2.setServiceParent(ms) ms.startService() @inlineCallbacks def _clean(): yield ms.stopService() self.flushLoggedErrors(CancelledError) self.addCleanup(_clean) yield gatherResults([d1, d2]) self.store.queuer = self.node1 def test_currentNodeInfo(self): """ There will be two C{NODE_INFO} rows in the database, retrievable as two L{NodeInfo} objects, once both nodes have started up. """ @inlineCallbacks def check(txn): self.assertEquals(len((yield self.node1.activeNodes(txn))), 2) self.assertEquals(len((yield self.node2.activeNodes(txn))), 2) return inTransaction(self.store.newTransaction, check) @inlineCallbacks def test_enqueueHappyPath(self): """ When a L{WorkItem} is scheduled for execution via L{PeerConnectionPool.enqueueWork} its C{doWork} method will be invoked by the time the L{Deferred} returned from the resulting L{WorkProposal}'s C{whenExecuted} method has fired. """ # TODO: this exact test should run against LocalQueuer as well. def operation(txn): # TODO: how does "enqueue" get associated with the transaction? # This is not the fact with a raw t.w.enterprise transaction. # Should probably do something with components. return txn.enqueue(DummyWorkItem, a=3, b=4, workID=4321, notBefore=datetime.datetime.utcnow()) result = yield inTransaction(self.store.newTransaction, operation) # Wait for it to be executed. Hopefully this does not time out :-\. yield result.whenExecuted() def op2(txn): return Select([ schema.DUMMY_WORK_DONE.WORK_ID, schema.DUMMY_WORK_DONE.A_PLUS_B, ], From=schema.DUMMY_WORK_DONE).on(txn) rows = yield inTransaction(self.store.newTransaction, op2) self.assertEquals(map(list, rows), [[4321, 7]]) @inlineCallbacks def test_noWorkDoneWhenConcurrentlyDeleted(self): """ When a L{WorkItem} is concurrently deleted by another transaction, it should I{not} perform its work. """ # Provide access to a method called "concurrently" everything using original = self.store.newTransaction def decorate(*a, **k): result = original(*a, **k) result.concurrently = self.store.newTransaction return result self.store.newTransaction = decorate def operation(txn): return txn.enqueue(DummyWorkItem, a=30, b=40, workID=5678, deleteOnLoad=1, notBefore=datetime.datetime.utcnow()) proposal = yield inTransaction(self.store.newTransaction, operation) yield proposal.whenExecuted() # Sanity check on the concurrent deletion. def op2(txn): return Select([schema.DUMMY_WORK_ITEM.WORK_ID], From=schema.DUMMY_WORK_ITEM).on(txn) rows = yield inTransaction(self.store.newTransaction, op2) self.assertEquals(list(rows), []) def op3(txn): return Select([ schema.DUMMY_WORK_DONE.WORK_ID, schema.DUMMY_WORK_DONE.A_PLUS_B, ], From=schema.DUMMY_WORK_DONE).on(txn) rows = yield inTransaction(self.store.newTransaction, op3) self.assertEquals(list(rows), [])
def setUp(self): """ L{PeerConnectionPool} requires access to a database and the reactor. """ self.store = yield buildStore(self, None) @inlineCallbacks def doit(txn): for statement in splitSQLString(nodeSchema + schemaText): yield txn.execSQL(statement) yield inTransaction(lambda: self.store.newTransaction("bonus schema"), doit) def indirectedTransactionFactory(*a): """ Allow tests to replace "self.store.newTransaction" to provide fixtures with extra methods on a test-by-test basis. """ return self.store.newTransaction(*a) def deschema(): @inlineCallbacks def deletestuff(txn): for stmt in dropSQL: yield txn.execSQL(stmt) txn.execSQL("drop table node_info") return inTransaction(lambda *a: self.store.newTransaction(*a), deletestuff) self.addCleanup(deschema) from twisted.internet import reactor self.node1 = PeerConnectionPool(reactor, indirectedTransactionFactory, 0, schema) self.node2 = PeerConnectionPool(reactor, indirectedTransactionFactory, 0, schema) class FireMeService(Service, object): def __init__(self, d): super(FireMeService, self).__init__() self.d = d def startService(self): self.d.callback(None) d1 = Deferred() d2 = Deferred() FireMeService(d1).setServiceParent(self.node1) FireMeService(d2).setServiceParent(self.node2) ms = MultiService() self.node1.setServiceParent(ms) self.node2.setServiceParent(ms) ms.startService() @inlineCallbacks def _clean(): yield ms.stopService() self.flushLoggedErrors(CancelledError) self.addCleanup(_clean) yield gatherResults([d1, d2]) self.store.queuer = self.node1
class PeerConnectionPoolUnitTests(TestCase): """ L{PeerConnectionPool} has many internal components. """ def setUp(self): """ Create a L{PeerConnectionPool} that is just initialized enough. """ self.pcp = PeerConnectionPool(None, None, 4321, schema) def checkPerformer(self, cls): """ Verify that the performer returned by L{PeerConnectionPool.choosePerformer}. """ performer = self.pcp.choosePerformer() self.failUnlessIsInstance(performer, cls) verifyObject(_IWorkPerformer, performer) def test_choosingPerformerWhenNoPeersAndNoWorkers(self): """ If L{PeerConnectionPool.choosePerformer} is invoked when no workers have spawned and no peers have established connections (either incoming or outgoing), then it chooses an implementation of C{performWork} that simply executes the work locally. """ self.checkPerformer(LocalPerformer) def test_choosingPerformerWithLocalCapacity(self): """ If L{PeerConnectionPool.choosePerformer} is invoked when some workers have spawned, then it should choose the worker pool as the local performer. """ # Give it some local capacity. wlf = self.pcp.workerListenerFactory() proto = wlf.buildProtocol(None) proto.makeConnection(StringTransport()) # Sanity check. self.assertEqual(len(self.pcp.workerPool.workers), 1) self.assertEqual(self.pcp.workerPool.hasAvailableCapacity(), True) # Now it has some capacity. self.checkPerformer(WorkerConnectionPool) def test_choosingPerformerFromNetwork(self): """ If L{PeerConnectionPool.choosePerformer} is invoked when no workers have spawned but some peers have connected, then it should choose a connection from the network to perform it. """ peer = PeerConnectionPool(None, None, 4322, schema) local = self.pcp.peerFactory().buildProtocol(None) remote = peer.peerFactory().buildProtocol(None) connection = Connection(local, remote) connection.start() self.checkPerformer(ConnectionFromPeerNode) def test_performingWorkOnNetwork(self): """ The L{PerformWork} command will get relayed to the remote peer controller. """ peer = PeerConnectionPool(None, None, 4322, schema) local = self.pcp.peerFactory().buildProtocol(None) remote = peer.peerFactory().buildProtocol(None) connection = Connection(local, remote) connection.start() d = Deferred() class DummyPerformer(object): def performWork(self, table, workID): self.table = table self.workID = workID return d # Doing real database I/O in this test would be tedious so fake the # first method in the call stack which actually talks to the DB. dummy = DummyPerformer() def chooseDummy(onlyLocally=False): return dummy peer.choosePerformer = chooseDummy performed = local.performWork(schema.DUMMY_WORK_ITEM, 7384) performResult = [] performed.addCallback(performResult.append) # Sanity check. self.assertEquals(performResult, []) connection.flush() self.assertEquals(dummy.table, schema.DUMMY_WORK_ITEM) self.assertEquals(dummy.workID, 7384) self.assertEquals(performResult, []) d.callback(128374) connection.flush() self.assertEquals(performResult, [None]) def test_choosePerformerSorted(self): """ If L{PeerConnectionPool.choosePerformer} is invoked make it return the peer with the least load. """ peer = PeerConnectionPool(None, None, 4322, schema) class DummyPeer(object): def __init__(self, name, load): self.name = name self.load = load def currentLoadEstimate(self): return self.load apeer = DummyPeer("A", 1) bpeer = DummyPeer("B", 0) cpeer = DummyPeer("C", 2) peer.addPeerConnection(apeer) peer.addPeerConnection(bpeer) peer.addPeerConnection(cpeer) performer = peer.choosePerformer(onlyLocally=False) self.assertEqual(performer, bpeer) bpeer.load = 2 performer = peer.choosePerformer(onlyLocally=False) self.assertEqual(performer, apeer) @inlineCallbacks def test_notBeforeWhenCheckingForLostWork(self): """ L{PeerConnectionPool._periodicLostWorkCheck} should execute any outstanding work items, but only those that are expired. """ dbpool = buildConnectionPool(self, schemaText + nodeSchema) # An arbitrary point in time. fakeNow = datetime.datetime(2012, 12, 12, 12, 12, 12) # *why* does datetime still not have .astimestamp() sinceEpoch = astimestamp(fakeNow) clock = Clock() clock.advance(sinceEpoch) qpool = PeerConnectionPool(clock, dbpool.connection, 0, schema) # Let's create a couple of work items directly, not via the enqueue # method, so that they exist but nobody will try to immediately execute # them. @transactionally(dbpool.connection) @inlineCallbacks def setup(txn): # First, one that's right now. yield DummyWorkItem.create(txn, a=1, b=2, notBefore=fakeNow) # Next, create one that's actually far enough into the past to run. yield DummyWorkItem.create( txn, a=3, b=4, notBefore=( # Schedule it in the past so that it should have already # run. fakeNow - datetime.timedelta( seconds=qpool.queueProcessTimeout + 20))) # Finally, one that's actually scheduled for the future. yield DummyWorkItem.create(txn, a=10, b=20, notBefore=fakeNow + datetime.timedelta(1000)) yield setup qpool.running = True yield qpool._periodicLostWorkCheck() @transactionally(dbpool.connection) def check(txn): return DummyWorkDone.all(txn) every = yield check self.assertEquals([x.aPlusB for x in every], [7]) @inlineCallbacks def test_notBeforeWhenEnqueueing(self): """ L{PeerConnectionPool.enqueueWork} enqueues some work immediately, but only executes it when enough time has elapsed to allow the C{notBefore} attribute of the given work item to have passed. """ dbpool = buildConnectionPool(self, schemaText + nodeSchema) fakeNow = datetime.datetime(2012, 12, 12, 12, 12, 12) sinceEpoch = astimestamp(fakeNow) clock = Clock() clock.advance(sinceEpoch) qpool = PeerConnectionPool(clock, dbpool.connection, 0, schema) realChoosePerformer = qpool.choosePerformer performerChosen = [] def catchPerformerChoice(): result = realChoosePerformer() performerChosen.append(True) return result qpool.choosePerformer = catchPerformerChoice @transactionally(dbpool.connection) def check(txn): return qpool.enqueueWork(txn, DummyWorkItem, a=3, b=9, notBefore=datetime.datetime( 2012, 12, 12, 12, 12, 20)).whenProposed() proposal = yield check # This is going to schedule the work to happen with some asynchronous # I/O in the middle; this is a problem because how do we know when it's # time to check to see if the work has started? We need to intercept # the thing that kicks off the work; we can then wait for the work # itself. self.assertEquals(performerChosen, []) # Advance to exactly the appointed second. clock.advance(20 - 12) self.assertEquals(performerChosen, [True]) # FIXME: if this fails, it will hang, but that's better than no # notification that it is broken at all. result = yield proposal.whenExecuted() self.assertIdentical(result, proposal) @inlineCallbacks def test_notBeforeBefore(self): """ L{PeerConnectionPool.enqueueWork} will execute its work immediately if the C{notBefore} attribute of the work item in question is in the past. """ dbpool = buildConnectionPool(self, schemaText + nodeSchema) fakeNow = datetime.datetime(2012, 12, 12, 12, 12, 12) sinceEpoch = astimestamp(fakeNow) clock = Clock() clock.advance(sinceEpoch) qpool = PeerConnectionPool(clock, dbpool.connection, 0, schema) realChoosePerformer = qpool.choosePerformer performerChosen = [] def catchPerformerChoice(): result = realChoosePerformer() performerChosen.append(True) return result qpool.choosePerformer = catchPerformerChoice @transactionally(dbpool.connection) def check(txn): return qpool.enqueueWork(txn, DummyWorkItem, a=3, b=9, notBefore=datetime.datetime( 2012, 12, 12, 12, 12, 0)).whenProposed() proposal = yield check clock.advance(1000) # Advance far beyond the given timestamp. self.assertEquals(performerChosen, [True]) result = yield proposal.whenExecuted() self.assertIdentical(result, proposal) def test_workerConnectionPoolPerformWork(self): """ L{WorkerConnectionPool.performWork} performs work by selecting a L{ConnectionFromWorker} and sending it a L{PerformWork} command. """ clock = Clock() peerPool = PeerConnectionPool(clock, None, 4322, schema) factory = peerPool.workerListenerFactory() def peer(): p = factory.buildProtocol(None) t = StringTransport() p.makeConnection(t) return p, t worker1, _ignore_trans1 = peer() worker2, _ignore_trans2 = peer() # Ask the worker to do something. worker1.performWork(schema.DUMMY_WORK_ITEM, 1) self.assertEquals(worker1.currentLoad, 1) self.assertEquals(worker2.currentLoad, 0) # Now ask the pool to do something peerPool.workerPool.performWork(schema.DUMMY_WORK_ITEM, 2) self.assertEquals(worker1.currentLoad, 1) self.assertEquals(worker2.currentLoad, 1) def test_poolStartServiceChecksForWork(self): """ L{PeerConnectionPool.startService} kicks off the idle work-check loop. """ reactor = MemoryReactorWithClock() cph = SteppablePoolHelper(nodeSchema + schemaText) then = datetime.datetime(2012, 12, 12, 12, 12, 0) reactor.advance(astimestamp(then)) cph.setUp(self) pcp = PeerConnectionPool(reactor, cph.pool.connection, 4321, schema) now = then + datetime.timedelta(seconds=pcp.queueProcessTimeout * 2) @transactionally(cph.pool.connection) def createOldWork(txn): one = DummyWorkItem.create(txn, workID=1, a=3, b=4, notBefore=then) two = DummyWorkItem.create(txn, workID=2, a=7, b=9, notBefore=now) return gatherResults([one, two]) pcp.startService() cph.flushHolders() reactor.advance(pcp.queueProcessTimeout * 2) self.assertEquals(cph.rows("select * from DUMMY_WORK_DONE"), [(1, 7)]) cph.rows("delete from DUMMY_WORK_DONE") reactor.advance(pcp.queueProcessTimeout * 2) self.assertEquals(cph.rows("select * from DUMMY_WORK_DONE"), [(2, 16)])