def test_async_ondone(self): boss = s_async.Boss() boss.runBossPool(3) data = {} evt = threading.Event() def ondone(job): data['job'] = job evt.set() def woot(): return 10 jid = s_async.jobid() task = s_async.newtask(woot) boss.initJob(jid, task=task, ondone=ondone) self.true(evt.wait(timeout=1)) job = data.get('job') self.eq(job[1].get('ret'), 10) boss.fini()
def test_async_timeout(self): boss = s_async.Boss() def myjob(): time.sleep(0.2) def mylongjob(): time.sleep(2.0) jid = s_async.jobid() job = boss.initJob(jid, task=(myjob, (), {}), timeout=0.01) boss.wait(jid) self.eq(job[1]['err'], 'HitMaxTime') # Ensure the boss.sync() fails as well jid = s_async.jobid() job = boss.initJob(jid, task=(mylongjob, (), {}), timeout=0.1) # Try a sync() call which times out. with self.raises(HitMaxTime) as cm: boss.sync(job, timeout=0.01) boss.fini()
def __init__(self, relay, plex=None, sock=None): s_eventbus.EventBus.__init__(self) self.onfini(self._onProxyFini) # NOTE: the _tele_ prefixes are designed to prevent accidental # derefs with overlapping names from working correctly self._tele_sid = None self._tele_q = s_queue.Queue() self._tele_pushed = {} # allow the server to give us events to fire back on # reconnect so we can help re-init our server-side state self._tele_reminders = [] if plex is None: plex = s_socket.Plex() self.onfini(plex.fini) self._tele_plex = plex self._tele_boss = s_async.Boss() self._tele_plex.on('link:sock:mesg', self._onLinkSockMesg) self._raw_on('tele:yield:init', self._onTeleYieldInit) self._raw_on('tele:yield:item', self._onTeleYieldItem) self._raw_on('tele:yield:fini', self._onTeleYieldFini) self._raw_on('tele:reminder', self._onTeleReminder) self._raw_on('fifo:xmit', self._onFifoXmit) self._raw_on('job:done', self._tele_boss.dist) self._raw_on('sock:gzip', self._onSockGzip) self._raw_on('tele:call', self._onTeleCall) self._raw_on('tele:sock:init', self._onTeleSockInit) self._tele_cthr = self.consume(self._tele_q) self._tele_ons = {} self._tele_sock = None self._tele_relay = relay # LinkRelay() self._tele_link = relay.link self._tele_yields = {} self._tele_csides = {} self._tele_reflect = None # obj name is path minus leading "/" self._tele_name = relay.link[1].get('path')[1:] if sock is None: sock = self._tele_relay.connect() self._initTeleSock(sock=sock)
def test_async_sugar(self): boss = s_async.Boss() job = boss.initJob() boss.done(job[0], 5) boss.wait(job[0]) self.eq(job[1].get('ret'), 5) boss.fini()
def test_async_custom_pool_basics(self): # Demonstrate Boss use with a custom thread pool. boss = s_async.Boss() my_pool = s_threads.Pool(3, maxsize=8) data = {} def jobmeth(x, y=20): return x + y def jobdork(x, y=20): raise Exception('hi') def jobdone(job): name = job[1].get('name') data[name] = job jid1 = s_async.jobid() jid2 = s_async.jobid() task1 = (jobmeth, (3, ), {}) task2 = (jobdork, (3, ), {}) job1 = boss.initJob(jid1, task=task1, name='job1', ondone=jobdone) job2 = boss.initJob(jid2, task=task2, name='job2', ondone=jobdone) self.eq(job1[0], jid1) self.eq(job2[0], jid2) # Test __iter__ since we've got jobs in the boss that haven't been run. jobs = [job for job in boss] self.eq(len(jobs), 2) my_pool.call(boss._runJob, job1) my_pool.call(boss._runJob, job2) boss.wait(jid1, timeout=1) boss.wait(jid2, timeout=1) ret1 = data.get('job1') self.nn(ret1) self.eq(ret1[1]['ret'], 23) ret2 = data.get('job2') self.nn(ret2) self.eq(ret2[1]['err'], 'Exception') self.eq(ret2[1]['errmsg'], 'hi') boss.fini()
def test_async_wait_timeout(self): def longtime(): time.sleep(0.1) boss = s_async.Boss() boss.runBossPool(1) jid = s_async.jobid() task = s_async.newtask(longtime) boss.initJob(jid, task=task) self.false(boss.wait(jid, timeout=0.01)) self.true(boss.wait(jid, timeout=1)) boss.fini()
def test_async_basics(self): boss = s_async.Boss() data = {} def jobmeth(x, y=20): return x + y def jobdork(x, y=20): raise Exception('hi') def jobdone(job): name = job[1].get('name') data[name] = job jid1 = s_async.jobid() jid2 = s_async.jobid() task1 = (jobmeth, (3, ), {}) task2 = (jobdork, (3, ), {}) job1 = boss.initJob(jid1, task=task1, name='job1', ondone=jobdone) job2 = boss.initJob(jid2, task=task2, name='job2', ondone=jobdone) self.eq(job1[0], jid1) self.eq(len(boss.jobs()), 2) boss._runJob(job1) self.eq(len(boss.jobs()), 1) boss._runJob(job2) self.eq(len(boss.jobs()), 0) ret1 = data.get('job1') self.nn(ret1) self.eq(ret1[1]['ret'], 23) ret2 = data.get('job2') self.nn(ret2) self.eq(ret2[1]['err'], 'Exception') boss.fini()
def test_async_wait_syntimeout(self): def longtime(): time.sleep(0.1) boss = s_async.Boss() boss.runBossPool(1) jid = s_async.jobid() task = s_async.newtask(longtime) boss.initJob(jid, task=task) with s_scope.enter({'syntimeout': 0.01}): self.false(boss.wait(jid)) self.true(boss.wait(jid, timeout=1)) boss.fini()
def test_async_sync(self): boss = s_async.Boss() boss.runBossPool(1) def myjob(): time.sleep(0.1) return True jid = s_async.jobid() job = boss.initJob(jid, task=(myjob, (), {}), timeout=0.2) # Try a sync() call which times out. with self.raises(HitMaxTime) as cm: boss.sync(job, timeout=0.01) self.false(job[1].get('status')) # Now sync() again and get the job ret ret = boss.sync(job) self.true(ret) boss.fini()
def __init__(self, **conf): s_config.Config.__init__(self) s_daemon.DmonConf.__init__(self) app_config = conf.get('app', {}) srv_config = conf.get('server', {}) boss_config = conf.get('boss', {}) boss_minsize = boss_config.get('minsize', 8) boss_maxsize = boss_config.get('maxsize', 128) tornado.web.Application.__init__(self, **app_config) self.loop = tornado.ioloop.IOLoop() self.serv = tornado.httpserver.HTTPServer(self, **srv_config) self.boss = s_async.Boss() self.boss.runBossPool(boss_minsize, maxsize=boss_maxsize) self.iothr = self._runWappLoop() self.onfini(self._onWappFini)
def __init__(self, core=None, opts=None, *args, **kwargs): s_config.Config.__init__(self) # Runtime-settable options self.onConfOptSet(CACHE_ENABLED, self._onSetWebCache) self.onConfOptSet(CACHE_TIMEOUT, self._onSetWebCacheTimeout) # Things we need prior to loading in conf values self.web_boss = s_async.Boss() self.web_cache = s_cache.Cache() self.web_cache_enabled = False if opts: self.setConfOpts(opts) self._web_required_keys = ('namespace', 'doc', 'apis') self._web_apis = {} self._web_namespaces = set([]) self._web_docs = {} self._web_default_http_args = { } # Global request headers per namespace # Check configable options before we spin up any more resources max_clients = self.getConfOpt(MAX_CLIENTS) pool_min = self.getConfOpt(MIN_WORKER_THREADS) pool_max = self.getConfOpt(MAX_WORKER_THREADS) if pool_min < 1: raise s_common.BadConfValu( name=MIN_WORKER_THREADS, valu=pool_min, mesg='web:worker:threads:min must be greater than 1') if pool_max < pool_min: raise s_common.BadConfValu( name=MAX_WORKER_THREADS, valu=pool_max, mesg= 'web:worker:threads:max must be greater than the web:worker:threads:min' ) if max_clients < 1: raise s_common.BadConfValu( name=MAX_CLIENTS, valu=max_clients, mesg='web:tornado:max_clients must be greater than 1') # Tornado Async loop = kwargs.get('ioloop') if loop is None: loop = t_ioloop.IOLoop() self.web_loop = loop self.web_client = t_http.AsyncHTTPClient(io_loop=self.web_loop, max_clients=max_clients) self.web_iothr = self._runIoLoop() # Synapse Async and thread pool self.web_pool = s_threads.Pool(pool_min, pool_max) # Synapse Core and ingest tracking if core is None: core = s_cortex.openurl('ram://') self.onfini(core.fini) self.web_core = core self._web_api_ingests = collections.defaultdict(list) self._web_api_gest_opens = {} # Setup Fini handlers self.onfini(self._onHypoFini) # List of content-type headers to skip automatic decoding self._web_content_type_skip = set([]) self.webContentTypeSkipAdd('application/octet-stream') for ct in kwargs.get('content_type_skip', []): self.webContentTypeSkipAdd(ct)