def test_startAndStop(self): """ Test that a process pool's start and stop method create the expected number of workers and keep state consistent in the process pool. """ pp = pool.ProcessPool() self.assertEquals(pp.started, False) self.assertEquals(pp.finished, False) self.assertEquals(pp.processes, set()) self.assertEquals(pp._finishCallbacks, {}) def _checks(_): self.assertEquals(pp.started, True) self.assertEquals(pp.finished, False) self.assertEquals(len(pp.processes), pp.min) self.assertEquals(len(pp._finishCallbacks), pp.min) return pp.stop() def _closingUp(_): self.assertEquals(pp.started, True) self.assertEquals(pp.finished, True) self.assertEquals(len(pp.processes), 0) self.assertEquals(pp._finishCallbacks, {}) return pp.start().addCallback(_checks).addCallback(_closingUp)
def test_adjustPoolSize(self): """ Test that calls to pool.adjustPoolSize are correctly handled. """ pp = pool.ProcessPool(min=10) self.assertEquals(pp.started, False) self.assertEquals(pp.finished, False) self.assertEquals(pp.processes, set()) self.assertEquals(pp._finishCallbacks, {}) def _resize1(_): self.assertEquals(pp.started, True) self.assertEquals(pp.finished, False) self.assertEquals(len(pp.processes), pp.min) self.assertEquals(len(pp._finishCallbacks), pp.min) return pp.adjustPoolSize(min=2, max=3) def _resize2(_): self.assertEquals(pp.started, True) self.assertEquals(pp.finished, False) self.assertEquals(pp.max, 3) self.assertEquals(pp.min, 2) self.assertEquals(len(pp.processes), pp.max) self.assertEquals(len(pp._finishCallbacks), pp.max) def _resize3(_): self.assertRaises(AssertionError, pp.adjustPoolSize, min=-1, max=5) self.assertRaises(AssertionError, pp.adjustPoolSize, min=5, max=1) return pp.stop() return pp.start().addCallback(_resize1).addCallback( _resize2).addCallback(_resize3)
def test_disableProcessRecycling(self): """ Test that by setting 0 to recycleAfter we actually disable process recycling. """ MAX = 1 MIN = 1 RECYCLE_AFTER = 0 pp = pool.ProcessPool(ampChild=PidChild, min=MIN, max=MAX, recycleAfter=RECYCLE_AFTER) def _checks(_): self.assertEquals(pp.started, True) self.assertEquals(pp.finished, False) self.assertEquals(len(pp.processes), pp.min) self.assertEquals(len(pp._finishCallbacks), pp.min) return pp.doWork(Pid).addCallback(lambda response: response['pid']) def _checks2(pid): return pp.doWork(Pid).addCallback( lambda response: response['pid']).addCallback( self.assertEquals, pid).addCallback(lambda _: pid) def finish(reason): return pp.stop().addCallback(lambda _: reason) return pp.start().addCallback(_checks).addCallback( _checks2).addCallback(_checks2).addCallback(finish)
def test_startStopWorker(self): """ Test that starting and stopping a worker keeps the state of the process pool consistent. """ pp = pool.ProcessPool() self.assertEquals(pp.started, False) self.assertEquals(pp.finished, False) self.assertEquals(pp.processes, set()) self.assertEquals(pp._finishCallbacks, {}) def _checks(): self.assertEquals(pp.started, False) self.assertEquals(pp.finished, False) self.assertEquals(len(pp.processes), 1) self.assertEquals(len(pp._finishCallbacks), 1) return pp.stopAWorker() def _closingUp(_): self.assertEquals(pp.started, False) self.assertEquals(pp.finished, False) self.assertEquals(len(pp.processes), 0) self.assertEquals(pp._finishCallbacks, {}) pp.startAWorker() return _checks().addCallback(_closingUp).addCallback( lambda _: pp.stop())
def test_recyclingWithQueueOverload(self): """ Test that we get the correct number of different results when we overload the pool of calls. """ MAX = 5 MIN = 1 RECYCLE_AFTER = 10 CALLS = 60 pp = pool.ProcessPool(ampChild=PidChild, min=MIN, max=MAX, recycleAfter=RECYCLE_AFTER) self.addCleanup(pp.stop) def _check(results): s = set() for succeed, response in results: s.add(response['pid']) # For the first C{MAX} calls, each is basically guaranteed to go to # a different child. After that, though, there are no guarantees. # All the rest might go to a single child, since the child to # perform a job is selected arbitrarily from the "ready" set. Fair # distribution of jobs needs to be implemented; right now it's "set # ordering" distribution of jobs. self.assertTrue(len(s) > MAX) def _work(_): l = [pp.doWork(Pid) for x in range(CALLS)] d = defer.DeferredList(l) return d.addCallback(_check) d = pp.start() d.addCallback(_work) return d
def _run(): pp = pool.ProcessPool(MyChild, min=1, max=1) yield pp.start() result = yield pp.doWork(Pid) print("The Child process PID is:", result['pid']) yield pp.stop() reactor.stop()
def test_recyclingProcessFails(self): """ A process exiting with a non-zero exit code when recycled does not get multiple processes started to replace it. """ MAX = 1 MIN = 1 RECYCLE_AFTER = 1 RECYCLE_AFTER = 1 pp = pool.ProcessPool(ampChild=ExitingChild, min=MIN, max=MAX, recycleAfter=RECYCLE_AFTER) self.addCleanup(pp.stop) def _checks(_): self.assertEquals(pp.started, True) self.assertEquals(pp.finished, False) self.assertEquals(len(pp.processes), pp.min) self.assertEquals(len(pp._finishCallbacks), pp.min) child = list(pp.ready)[0] finished = pp._finishCallbacks[child] return pp.doWork(Exit).addBoth(lambda _: finished) def _checks2(_): self.assertEquals(len(pp.processes), pp.max) d = pp.start() d.addCallback(_checks) d.addCallback(_checks2) return d
def test_growingToMax(self): """ Test that the pool grows over time until it reaches max processes. """ MAX = 5 pp = pool.ProcessPool(ampChild=WaitingChild, min=1, max=MAX) def _checks(_): self.assertEquals(pp.started, True) self.assertEquals(pp.finished, False) self.assertEquals(len(pp.processes), pp.min) self.assertEquals(len(pp._finishCallbacks), pp.min) D = b"DATA" d = [pp.doWork(First, data=D) for x in range(MAX)] self.assertEquals(pp.started, True) self.assertEquals(pp.finished, False) self.assertEquals(len(pp.processes), pp.max) self.assertEquals(len(pp._finishCallbacks), pp.max) [child.callRemote(Second) for child in pp.processes] return defer.DeferredList(d) return pp.start().addCallback(_checks).addCallback(lambda _: pp.stop())
def test_recycling(self): """ Test that after a given number of calls subprocesses are recycled. """ MAX = 1 MIN = 1 RECYCLE_AFTER = 1 pp = pool.ProcessPool(ampChild=PidChild, min=MIN, max=MAX, recycleAfter=RECYCLE_AFTER) self.addCleanup(pp.stop) def _checks(_): self.assertEquals(pp.started, True) self.assertEquals(pp.finished, False) self.assertEquals(len(pp.processes), pp.min) self.assertEquals(len(pp._finishCallbacks), pp.min) return pp.doWork(Pid).addCallback(lambda response: response['pid']) def _checks2(pid): return pp.doWork(Pid).addCallback( lambda response: response['pid']).addCallback( self.assertNotEquals, pid) d = pp.start() d.addCallback(_checks) d.addCallback(_checks2) return d
def test_supplyChildArgs(self): """Ensure that arguments for the child constructor are passed in.""" pp = pool.ProcessPool(Writer, ampChildArgs=['body'], min=0) def _check(result): return pp.doWork(Write).addCallback(self.assertEquals, {'response': b'body'}) return pp.start().addCallback(_check).addCallback(lambda _: pp.stop())
def processTimeoutTest(self, timeout): pp = pool.ProcessPool(WaitingChild, min=1, max=1) def _work(_): d = pp.callRemote(First, data=b"ciao", _timeout=timeout) self.assertFailure(d, error.ProcessTerminated) return d return pp.start().addCallback(_work).addCallback(lambda _: pp.stop())
def test_processDeadline(self): pp = pool.ProcessPool(WaitingChild, min=1, max=1) def _work(_): d = pp.callRemote(First, data=b"ciao", _deadline=reactor.seconds()) self.assertFailure(d, error.ProcessTerminated) return d return pp.start().addCallback(_work).addCallback(lambda _: pp.stop())
def test_processBeforeDeadline(self): pp = pool.ProcessPool(PidChild, min=1, max=1) def _work(_): d = pp.callRemote(Pid, _deadline=reactor.seconds() + 10) d.addCallback(lambda result: self.assertNotEqual(result['pid'], 0)) return d return pp.start().addCallback(_work).addCallback(lambda _: pp.stop())
def checkPool(_): pp = pool.ProcessPool(starter=main.ProcessStarter( childReactor=SECOND, packages=("twisted", "ampoule")), ampChild=ReactorChild, min=MIN, max=MAX) pp.start() return (pp.doWork(Reactor).addCallback(self.assertEquals, { 'classname': b"PollReactor" }).addCallback(lambda _: pp.stop()))
def test_growingToMaxAndShrinking(self): """ Test that the pool grows but after 'idle' time the number of processes goes back to the minimum. """ MAX = 5 MIN = 1 IDLE = 1 pp = pool.ProcessPool(ampChild=WaitingChild, min=MIN, max=MAX, maxIdle=IDLE) def _checks(_): self.assertEquals(pp.started, True) self.assertEquals(pp.finished, False) self.assertEquals(len(pp.processes), pp.min) self.assertEquals(len(pp._finishCallbacks), pp.min) D = b"DATA" d = [pp.doWork(First, data=D) for x in range(MAX)] self.assertEquals(pp.started, True) self.assertEquals(pp.finished, False) self.assertEquals(len(pp.processes), pp.max) self.assertEquals(len(pp._finishCallbacks), pp.max) [child.callRemote(Second) for child in pp.processes] return defer.DeferredList(d).addCallback(_realChecks) def _realChecks(_): from twisted.internet import reactor d = defer.Deferred() def _cb(): def __(_): try: self.assertEquals(pp.started, True) self.assertEquals(pp.finished, False) self.assertEquals(len(pp.processes), pp.min) self.assertEquals(len(pp._finishCallbacks), pp.min) d.callback(None) except Exception as e: d.errback(e) return pp._pruneProcesses().addCallback(__) # just to be shure we are called after the pruner pp.looping.stop() # stop the looping, we don't want it to # this right here reactor.callLater(IDLE, _cb) return d return pp.start().addCallback(_checks).addCallback(lambda _: pp.stop())
def _run(): pp = pool.ProcessPool(child.AMPChild, recycleAfter=5000) pp.min = 1 pp.max = 5 yield pp.start() t = time.time() REPEATS = 40000 l = [pp.doWork(Ping) for x in range(REPEATS)] yield defer.DeferredList(l) print(REPEATS / (time.time() - t)) yield pp.stop() reactor.stop()
def test_processGlobalTimeout(self): """ Test that a call that doesn't finish within the given global timeout time is correctly handled. """ pp = pool.ProcessPool(WaitingChild, min=1, max=1, timeout=1) def _work(_): d = pp.callRemote(First, data=b"ciao") self.assertFailure(d, error.ProcessTerminated) return d return pp.start().addCallback(_work).addCallback(lambda _: pp.stop())
def __init__(self, job_source, dbuser, logger=None, error_utility=None): env = {'PATH': os.environ['PATH']} if 'LPCONFIG' in os.environ: env['LPCONFIG'] = os.environ['LPCONFIG'] env['PYTHONPATH'] = os.pathsep.join(sys.path) starter = main.ProcessStarter(env=env) super(TwistedJobRunner, self).__init__(logger, error_utility) self.job_source = job_source self.import_name = '%s.%s' % ( removeSecurityProxy(job_source).__module__, job_source.__name__) self.pool = pool.ProcessPool( JobRunnerProcess, ampChildArgs=[self.import_name, str(dbuser)], starter=starter, min=0, timeout_signal=SIGHUP)
def test_childRestart(self): """ Test that a failing child process is immediately restarted. """ pp = pool.ProcessPool(ampChild=BadChild, min=1) STRING = b"DATA" def _checks(_): d = next(iter(pp._finishCallbacks.values())) pp.doWork(Die).addErrback(lambda _: None) return d.addBoth(_checksAgain) def _checksAgain(_): return pp.doWork(commands.Echo, data=STRING).addCallback( lambda result: self.assertEquals(result['response'], STRING)) return pp.start().addCallback(_checks).addCallback(lambda _: pp.stop())
def test_commandsWithoutResponse(self): """ Test that if we send a command without a required answer we actually don't have any problems. """ DATA = b"hello" pp = pool.ProcessPool(ampChild=NoResponseChild, min=1, max=1) def _check(_): return pp.doWork(GetResponse).addCallback(self.assertEquals, {"response": DATA}) def _work(_): return pp.doWork(NoResponse, arg=DATA) return pp.start().addCallback(_work).addCallback(_check).addCallback( lambda _: pp.stop())
def test_processTimeoutSignal(self): """ Test that a call that doesn't finish within the given timeout time is correctly handled. """ pp = pool.ProcessPool(WaitingChild, min=1, max=1, timeout_signal=SIGHUP) def _work(_): d = pp.callRemote(First, data=b"ciao", _timeout=1) d.addCallback(lambda d: self.fail()) text = 'signal %d' % SIGHUP d.addErrback(lambda f: self.assertIn(text, str(f.value))) return d return pp.start().addCallback(_work).addCallback(lambda _: pp.stop())
def test_processRestartAfterTimeout(self): """ Test that a call that times out doesn't cause all subsequent requests to fail """ pp = pool.ProcessPool(TimingOutChild, min=1, max=1, timeout=1) def _work(_): d1 = pp.callRemote(HangForever) d2 = pp.callRemote(Ping, data=b"hello") self.assertFailure(d1, error.ProcessTerminated) d2.addCallback(lambda result: self.assertEqual( result, {"response": b"hello"})) return defer.DeferredList([d1, d2]) return pp.start().addCallback(_work).addCallback(lambda _: pp.stop())
def test_parentProtocolChange(self): """ Test that the father can use an AMP protocol too. """ DATA = b"CIAO" APPEND = b"123" class Parent(amp.AMP): def pong(self, data): return {'response': DATA+APPEND} Pong.responder(pong) pp = pool.ProcessPool(ampChild=Child, ampParent=Parent) def _checks(_): return pp.doWork(Ping, data=DATA ).addCallback(lambda response: self.assertEquals(response['response'], DATA+APPEND) ) return pp.start().addCallback(_checks).addCallback(lambda _: pp.stop())
def test_checkStateInPool(self): """ Test that busy and ready lists are correctly maintained. """ pp = pool.ProcessPool(ampChild=WaitingChild) DATA = b"foobar" def _checks(_): d = pp.callRemote(First, data=DATA) self.assertEquals(pp.started, True) self.assertEquals(pp.finished, False) self.assertEquals(len(pp.processes), pp.min) self.assertEquals(len(pp._finishCallbacks), pp.min) self.assertEquals(len(pp.ready), pp.min - 1) self.assertEquals(len(pp.busy), 1) child = pp.busy.pop() pp.busy.add(child) child.callRemote(Second) return d return pp.start().addCallback(_checks).addCallback(lambda _: pp.stop())
def setUp(self): """ Setup the proxy service and the client connection to the proxy service in order to run call through them. Inspiration comes from twisted.test.test_amp """ self.pp = pool.ProcessPool() self.svc = service.AMPouleService(self.pp, child.AMPChild, 0, "") self.svc.startService() self.proxy_port = self.svc.server.getHost().port self.clientFactory = ClientFactory() self.clientFactory.protocol = ClientAMP d = self.clientFactory.onMade = defer.Deferred() self.clientConn = reactor.connectTCP("127.0.0.1", self.proxy_port, self.clientFactory) self.addCleanup(self.clientConn.disconnect) self.addCleanup(self.svc.stopService) def setClient(_): self.client = self.clientFactory.theProto return d.addCallback(setClient)
class JustEatSource(ChickenSource): NAME = "JustEat" MENUS = True NEEDS_POSTCODE = True POOL = pool.ProcessPool(FetchChickenPlace, min=8, max=15) MENU_POOL = pool.ProcessPool(FetchChickenMenu, min=4, max=4) @defer.inlineCallbacks def Setup(self): yield self.POOL.start() reactor.addSystemEventTrigger("before", "shutdown", self.POOL.stop) reactor.addSystemEventTrigger("before", "shutdown", self.MENU_POOL.stop) defer.returnValue(None) @cache.CacheResult("menu") @defer.inlineCallbacks def GetPlaceMenu(self, place_id): ''' I take an ID and I fetch the menu from the website. Go me! ''' result = yield self.MENU_POOL.doWork(FetchChickenMenuCommand, id=place_id) defer.returnValue(result["response"]) @cache.CacheResult("places") @defer.inlineCallbacks def GetAvailablePlaces(self, location): log.msg("Starting JustEat") returner = {} log.msg("Opening just eat page") just_eat_page = yield getPage(BASE_URL.format(location.postcode), agent=IOS_USER_AGENT) parser = get_parser(just_eat_page) open_places_tag = parser.find(id="OpenRestaurants") if open_places_tag is None: defer.returnValue({}) page_places = {} for place_root_tag in open_places_tag.findAll("li"): place = {"title": place_root_tag.find("h2").text.strip()} types_of_food = set([ x.strip() for x in place_root_tag.find("p", attrs={ "class": "cuisineTypeList" }).text.lower().split(",") ]) if not ALLOWED_FOOD_TYPES.intersection(types_of_food): print "Skipping place %s" % place["title"] continue place["identifier"] = place_root_tag.find("a")["href"] page_places[place_root_tag["data-restaurantid"]] = place if not page_places: defer.returnValue({}) places_from_db, places_with_no_chicken = yield db.getPlacesFromDatabase( self.NAME, page_places.keys()) returner.update(places_from_db) places_not_in_db = [ i for i in set(page_places.keys()).difference( set([x for x in places_from_db.keys()])) if not i in places_with_no_chicken ] print len(page_places.keys()) print "%s places not in db" % len(places_not_in_db) if places_not_in_db: futures = [ self.POOL.doWork(FetchChickenPlaceCommand, id=id, info=json.dumps(page_places[id])) for id in places_not_in_db ] results = yield defer.DeferredList(futures) to_add = {} for success, result in results: if success: if result["has_chicken"]: place_dict = json.loads(result["place"]) place_dict["Location"] = GeoPoint( *place_dict["Location"]) place = ChickenPlace(**place_dict) returner.update({result["id"]: place}) to_add[result["id"]] = place else: to_add[result["id"]] = None if len(to_add): db.addPlacesToDatabase(self.NAME, to_add) defer.returnValue(returner)
def main(): from InputOutput import interprocessChildProt from ampoule import pool from InputOutput import interprocessParentProt from ampoule import main as ampouleMain procStarter = ampouleMain.ProcessStarter( bootstrap=interprocessChildProt.childBootstrap) global pp pp = pool.ProcessPool(interprocessChildProt.GUIProcessProtocol, ampParent=interprocessParentProt.MainProcessProtocol, starter=procStarter, recycleAfter=0, min=1, max=1) pp.start() pp.ampParent.processPool = pp # Self referential much? Demeter.committer.receiveInterprocessProtocolInstance(pp) def checkForUpdates(): # One catch, any result available out of this will only be visible after next boot of the app. d = getPage('http://192.30.33.227') def processReceivedVersion(reply): if int(reply[:3]) > globals.appVersion: globals.updateAvailable = True print( 'There is an update available, local version is %d and gathered version is %s.' % (globals.appVersion, reply)) else: globals.updateAvailable = False print('There is no update available') globals.setUpdateAvailable(globals.updateAvailable) d.addCallback(processReceivedVersion) checkForUpdates() d = Demeter.checkUPNPStatus(2000) def maybeCommit(): thirtySecondsAgo = datetime.datetime.utcnow() - datetime.timedelta( seconds=10) # FIXME make it 30 in live. if Demeter.committer.lastCommit < thirtySecondsAgo and Demeter.committer.commitInProgress is False: print('Commit loop decided to commit.') Demeter.committer.commit() persephone = LoopingCall(maybeCommit) persephone.start(10) # this should be 60 under normal circumstances. marduk = LoopingCall(eventLoop.marduk, aetherProtocol.aetherProtocolFactoryInstance, Demeter.committer) marduk.start(60) #FIXME#marduk.start(60) # 5 minutes normally, which is 300 listenerEndpoint = SSL4ServerEndpoint(reactor, aetherListeningPort, globals.AetherContextFactory()) listenerEndpoint.listen(aetherProtocol.aetherProtocolFactoryInstance) # def checksan(): # d = pp.callRemote(interprocessChildProt.checkSanity) # d.addCallback(print) # def bootstuff(): # d = pp.callRemote(interprocessChildProt.bootGUI) # d.addCallback(print) # reactor.callLater(2, bootstuff) # reactor.callLater(20, checksan) #reactor.callLater(5, aetherProtocol.connectWithIP,'151.236.11.192', 39994) #192 ends reactor.run()