def test_async_timeout(self): boss = s_async.Boss() def myjob(): time.sleep(0.2) def mylongjob(): time.sleep(2.0) jid = s_async.jobid() job = boss.initJob(jid, task=(myjob, (), {}), timeout=0.01) boss.wait(jid) self.eq(job[1]['err'], 'HitMaxTime') # Ensure the boss.sync() fails as well jid = s_async.jobid() job = boss.initJob(jid, task=(mylongjob, (), {}), timeout=0.1) # Try a sync() call which times out. with self.raises(HitMaxTime) as cm: boss.sync(job, timeout=0.01) boss.fini()
def test_async_custom_pool_basics(self): # Demonstrate Boss use with a custom thread pool. boss = s_async.Boss() my_pool = s_threads.Pool(3, maxsize=8) data = {} def jobmeth(x, y=20): return x + y def jobdork(x, y=20): raise Exception('hi') def jobdone(job): name = job[1].get('name') data[name] = job jid1 = s_async.jobid() jid2 = s_async.jobid() task1 = (jobmeth, (3, ), {}) task2 = (jobdork, (3, ), {}) job1 = boss.initJob(jid1, task=task1, name='job1', ondone=jobdone) job2 = boss.initJob(jid2, task=task2, name='job2', ondone=jobdone) self.eq(job1[0], jid1) self.eq(job2[0], jid2) # Test __iter__ since we've got jobs in the boss that haven't been run. jobs = [job for job in boss] self.eq(len(jobs), 2) my_pool.call(boss._runJob, job1) my_pool.call(boss._runJob, job2) boss.wait(jid1, timeout=1) boss.wait(jid2, timeout=1) ret1 = data.get('job1') self.nn(ret1) self.eq(ret1[1]['ret'], 23) ret2 = data.get('job2') self.nn(ret2) self.eq(ret2[1]['err'], 'Exception') self.eq(ret2[1]['errmsg'], 'hi') boss.fini()
def test_async_ondone(self): boss = s_async.Boss() boss.runBossPool(3) data = {} evt = threading.Event() def ondone(job): data['job'] = job evt.set() def woot(): return 10 jid = s_async.jobid() task = s_async.newtask(woot) boss.initJob(jid, task=task, ondone=ondone) self.true(evt.wait(timeout=1)) job = data.get('job') self.eq(job[1].get('ret'), 10) boss.fini()
def test_async_basics(self): boss = s_async.Boss() data = {} def jobmeth(x, y=20): return x + y def jobdork(x, y=20): raise Exception('hi') def jobdone(job): name = job[1].get('name') data[name] = job jid1 = s_async.jobid() jid2 = s_async.jobid() task1 = (jobmeth, (3, ), {}) task2 = (jobdork, (3, ), {}) job1 = boss.initJob(jid1, task=task1, name='job1', ondone=jobdone) job2 = boss.initJob(jid2, task=task2, name='job2', ondone=jobdone) self.eq(job1[0], jid1) self.eq(len(boss.jobs()), 2) boss._runJob(job1) self.eq(len(boss.jobs()), 1) boss._runJob(job2) self.eq(len(boss.jobs()), 0) ret1 = data.get('job1') self.nn(ret1) self.eq(ret1[1]['ret'], 23) ret2 = data.get('job2') self.nn(ret2) self.eq(ret2[1]['err'], 'Exception') boss.fini()
def test_async_wait_timeout(self): def longtime(): time.sleep(0.1) boss = s_async.Boss() boss.runBossPool(1) jid = s_async.jobid() task = s_async.newtask(longtime) boss.initJob(jid, task=task) self.false(boss.wait(jid, timeout=0.01)) self.true(boss.wait(jid, timeout=1)) boss.fini()
def test_async_wait_syntimeout(self): def longtime(): time.sleep(0.1) boss = s_async.Boss() boss.runBossPool(1) jid = s_async.jobid() task = s_async.newtask(longtime) boss.initJob(jid, task=task) with s_scope.enter({'syntimeout': 0.01}): self.false(boss.wait(jid)) self.true(boss.wait(jid, timeout=1)) boss.fini()
def test_async_sync(self): boss = s_async.Boss() boss.runBossPool(1) def myjob(): time.sleep(0.1) return True jid = s_async.jobid() job = boss.initJob(jid, task=(myjob, (), {}), timeout=0.2) # Try a sync() call which times out. with self.raises(HitMaxTime) as cm: boss.sync(job, timeout=0.01) self.false(job[1].get('status')) # Now sync() again and get the job ret ret = boss.sync(job) self.true(ret) boss.fini()
def fireWebApi(self, name, *args, **kwargs): ''' Fire a request to a registered API. The API response is serviced by a thread in the Hypnos thread pool, which will fire either an event on the Hypnos service bus or a caller provided callback function. The default action is to fire an event on the service bus with the same name as the API itself. A flattened version of the response, error information and the Boss job id will be stamped into the kwargs passed along to the the callbacks. If the API name has a ingest associated with it, the response data will be pushed into a generator created according to the ingest open directive. The flattened response is a dictionary, accessed from kwargs using the 'resp' key. It contains the following information: * request: A dictionary containing the requested URL and headers. This is guaranteed to exist. It has the following values: - url: URL requested by the remote server. - headers: Headers passed to the remote server. * code: HTTP Response code. This will only be present on a successfull request or if a HTTPError is encountered. * data: This may be one of three values: - A SpooledTemporaryFile containing the raw bytes of the response. This will be present if there is a ingest associated with the named response. A corresponding generator will be created and placed in the "ingdata" field and consumed by the ingest. Post-consumption, seek(0) will be called on the file-like object. If there are multiple post-ingest consumers of the job, each one may want to call seek(0) on the file object before consuming it. - The decoded data as a string or a decoded json blob. We will attempt to parse the data based on the Content-Type header. This is a best effort decoding. - In the event that the best effort decoding fails, the response will be available as raw bytes. * effective_url: The effective url returned by the server. By default, Tornado will follow redirects, so this URL may differ from the request URL. It will only be present on a successful request or if a HTTPError is encountered. * headers: The response headers. It will only be present on a successful request or if a HTTPError is encountered. The flattened error is a dictionary, accessed from kwargs using the 'errinfo' key. It mimics the synapse excinfo output, but without investigating a stack trace for performance reasons. It contains the following information: * err: The Exception class raised during the request. * errmsg: The str() representation of the exception. * errfile: Empty string. * errline: Empty string. The Hypnos boss job id is a str which can be accessed from kwargs using the 'jid' key. Notes: The following items may be used via kwargs to set request parameters: * api_args: This should be a dictionary containing any required or optional arguments the API rquires. The following items may be passed via kwargs to change the job execution parameters: * callback: A function which will be called by the servicing thread. By default, this will be wrapped to fire boss.err() if excinfo is present in the callback's kwargs. * ondone: A function to be executed by the job:fini handler when the job has been completed. If the api we're firing has an ingest associated with it, the response data may not be available to be consumed by the ondone handler. * job_timeout: A timeout on how long the job can run from the perspective of the boss. This isn't related to the request or connect timeouts. * wrap_callback: By default, the callback function is wrapped to perform error checking (and fast job failure) in the event of an error encountered during the request, and additional processing of the HTTP response data to perform decoding and content-type processing. If this value is set to false, the decorator will not be applied to a provided callback function, and the error handling and additional data procesing will be the responsibility of any event handlers or the provided callback function. The fast failure behavior is handled by boss.err() on the job associated with the API call. A HTTP body can be provided to the request by passing its contents in by adding the “req_body” value to api_args argument. See the Nyx object documentation for more details. If caching is enabled, the caching will be performed as the first thing done by the worker thread handling the response data. This is done separately from the wrap_callback step mentioned above. Args: name (str): Name of the API to send a request for. *args: Additional args passed to the callback functions. **kwargs: Additional args passed to the callback functions or for changing the job execution. Returns: str: String containing a Job ID which can be used to look up a job against the Hypnos.web_boss object. Raises: NoSuchName: If the requested API name does not exist. ''' # First, make sure the name is good nyx = self.getNyxApi(name) # Fail fast on a bad name before creating a reference in the self.boss # for the job. if nyx is None: raise s_common.NoSuchName(name=name, mesg='Invalid API name') # Grab things out of kwargs callback = kwargs.pop('callback', None) ondone = kwargs.pop('ondone', None) job_timeout = kwargs.pop('job_timeout', None) wrap_callback = kwargs.pop('wrap_callback', True) api_args = kwargs.get('api_args', {}) if not callback: # Setup the default callback def default_callback(*cbargs, **cbkwargs): self.fire(name, **{'args': cbargs, 'kwargs': cbkwargs}) callback = default_callback # Wrap the callback so that it will fail fast in the case of a request error. if wrap_callback: callback = self._webFailRespWrapper(callback) # If the cache is enabled, wrap the callback so we cache the result before # The job is executed. if self.web_cache_enabled: callback = self._webCacheRespWrapper(callback) # Construct the job tufo jid = s_async.jobid() t = s_async.newtask(callback, *args, **kwargs) job = self.web_boss.initJob(jid, task=t, ondone=ondone, timeout=job_timeout) # Create our Async callback function - it enjoys the locals(). def response_nommer(resp): job_kwargs = job[1]['task'][2] # Stamp the job id and the web_api_name into the kwargs dictionary. job_kwargs['web_api_name'] = name job_kwargs['jid'] = job[0] if resp.error: _e = resp.error _execinfo = { 'err': _e.__class__.__name__, 'errmsg': str(_e), 'errfile': '', 'errline': '', } job_kwargs['excinfo'] = _execinfo resp_dict = self._webFlattenHttpResponse(resp) job_kwargs['resp'] = resp_dict self.web_pool.call(self.web_boss._runJob, job) # Construct the request object req = nyx.buildHttpRequest(api_args) self.web_loop.add_callback(self.web_client.fetch, req, response_nommer) return jid