def handle(self, request, response, mv, *args, **kwargs): # 执行拦截器的 pre handle 链 for interceptor in self.interceptors: try: return_value = interceptor.pre_handle( request, response, mv) if gen.is_future(return_value): yield return_value except InterceptError: raise gen.Return() # 执行 handler return_value = self.handler.invoke(*args, **kwargs) if gen.is_future(return_value): result = yield return_value else: result = return_value # 执行拦截器的 post handle 链 for interceptor in self.interceptors: try: return_value = interceptor.post_handle( request, response, mv) if gen.is_future(return_value): yield return_value except InterceptError: break raise gen.Return(result)
def post(self): try: resp = yield self.process(self.request.body) except ValueError as e: self.clear() self.set_status(400) self.finish(str(e)) except tornado.web.HTTPError as e: self.clear() self.set_status(e.status_code) self.finish(e.log_message) raise e except FilterException as e: self.clear() self.set_status(500) error = dict( name='JSONRPCError', code=100, message=str(e), error=str(e) ) self.finish(dumps(dict(result=None, error=error, id=None))) raise e else: if is_future(resp): resp = yield resp self.write(dumps(resp)) self.set_header("Content-Type", "application/json")
def __access_token(*args, **kwargs): accessToken, expireTime = self.core.atStorage.get_access_token() if expireTime < time.time(): updateResult = yield self.update_access_token() if not updateResult: raise gen.Return(updateResult) accessToken, expireTime = self.core.atStorage.get_access_token() kwargs['accessToken'] = accessToken future = fn(*args, **kwargs) if gen.is_future(future): r = yield future else: r = future try: errcode = r.json().get('errcode') isTokenTimeout = errcode == 40014 except: isTokenTimeout = False if isinstance(r, Response) and isTokenTimeout: updateResult = yield self.update_access_token() if not updateResult: raise gen.Return(updateResult) accessToken, expireTime = self.core.atStorage.get_access_token() kwargs['accessToken'] = accessToken future = fn(*args, **kwargs) r = yield future wrap_fn = getattr(future, '_wrap_result', None) if wrap_fn is not None: try: r = wrap_fn(r) except: r = ReturnValue({'errcode': -10005, 'errmsg': traceback.format_exc()}) raise gen.Return(r)
def post(self): try: resp = yield self.process(self.request.body) except ValueError as e: self.clear() self.set_status(400) self.finish(str(e)) except tornado.web.HTTPError as e: self.clear() self.set_status(e.status_code) self.finish(e.log_message) raise e except FilterException as e: self.clear() self.set_status(500) error = dict( name='JSONRPCError', code=100, message=str(e), error=str(e) ) self.finish(dumps(dict(result=None, error=error, id=None))) raise e else: self.set_header("Content-Type", "application/json") if is_future(resp): resp = yield resp self.write(dumps(resp))
def maybe_async(f, *args, **kwargs): """Calls a function that might be asynchronous with the given arguments. Returns a Future that contains the result of calling the function or the failure. .. code-block:: python maybe_async(f, 1, 2).add_done_callback(on_done) This may be used when a function may or may not be asynchronous and both cases need to be handled in the same way with respect to failures. """ answer = gen.Future() try: result = f(*args, **kwargs) except Exception: answer.set_exc_info(sys.exc_info()) else: if gen.is_future(result): _copy_future(answer, result) else: answer.set_result(result) return answer
def __access_token(*args, **kwargs): accessToken, expireTime = __server.atStorage.get_access_token() if expireTime < time.time(): updateResult = yield tokenFn() if not updateResult: raise gen.Return(updateResult) accessToken, expireTime = __server.atStorage.get_access_token() kwargs['accessToken'] = accessToken future = fn(*args, **kwargs) if gen.is_future(future): r = yield future else: r = future try: errcode = r.json().get('errcode') isTokenTimeout = errcode == 40014 except: isTokenTimeout = False if isinstance(r, Response) and isTokenTimeout: updateResult = yield tokenFn() if not updateResult: raise gen.Return(updateResult) accessToken, expireTime = __server.atStorage.get_access_token() kwargs['accessToken'] = accessToken future = fn(*args, **kwargs) r = yield future wrap_fn = getattr(future, '_wrap_result', None) if wrap_fn is not None: try: r = wrap_fn(r) except: r = ReturnValue({'errcode': -10005, 'errmsg': traceback.format_exc()}) raise gen.Return(r)
def wrapper(*a,**kw): g = f(*a,**kw) thing = None try: if not(hasattr(g,'send')): note('nogen?',f) return while True: thing = g.send(thing) try: thing = yield thing except Exception as e: note('ex down',type(e)) thing = g.throw(e) except StopIteration: pass except gen.Return as e: # why this isn't default, no idea. value = e.value while gen.is_future(value): value = yield value raise gen.Return(value) except Exit: raise except: note.alarm('error in ',f) traceback.print_exc() return
def invoke(self, *a, **kw): if self.page_handler is None: raise RuntimeError("no page handler specified") return_value = self.page_handler(*a, **kw) if gen.is_future(return_value): result = yield return_value raise gen.Return(result) raise gen.Return(return_value)
def _retire_workers(self): with log_errors(): workers = yield self.scheduler.retire_workers(remove=True, close_workers=True) logger.info("Retiring workers %s", workers) f = self.cluster.scale_down(workers) if gen.is_future(f): yield f
def process_hook(self, method): # Hook m = getattr(self.interceptor, method, None) if m and callable(m): try: result = m() if is_future(result): yield result except Exception as e: logger.exception(e)
def _retire_workers(self): with log_errors(): workers = yield self.scheduler.retire_workers(remove=False) logger.info("Retiring workers %s", workers) f = self.cluster.scale_down(workers) if gen.is_future(f): yield f for w in workers: self.scheduler.remove_worker(address=w, safe=True)
def _w(*args, **kwargs): result = wrapped_func.__func__(*args, **kwargs) if not gen.is_future(result): return result @gen.coroutine def async_call(): res = yield result raise gen.Return(res) return ioloop.IOLoop.current().run_sync(async_call)
def prepare(self): self._prepare() request = Request.from_tornado_request( self.request, self.settings['application_context']) response = Response() mv = ModelAndView() try: self._dispatcher.remove_context_path(request) for _ in range(self._dispatcher.configurer.max_redirect_count): chain = self._dispatcher.get_execution_chain(request) return_value = self._dispatcher.handle_request( chain, request, response) if gen.is_future(return_value): result = yield return_value else: result = return_value if self._dispatcher.process_internal_redirect( request, response): continue # 判断是否是外部重定向 if response.status_code in [ HTTPStatus.MovedPermanently, HTTPStatus.MovedTemporarily ]: response.remove_headers("Content-Length", "Transfer-Encoding") self.set_status(response.status_code, response.message) for one_header in response.get_headers(): self.set_header(*one_header) request.close() response.close() self.finish() return mv.merge(result) break else: raise MaxRedirectCountReached("max redirect count reached") except BaseException as exception: self._dispatcher.process_exception(exception, response, mv) body = self._dispatcher.rend(mv, response) self.set_status(response.status_code, response.message) for one_header in response.get_headers(): self.set_header(*one_header) request.close() response.close() self.finish(body)
def _execute(self): try: result = self.prepare() if is_future(result): result = yield result if result is not None: raise TypeError("Expected None, got %r" % result) if self._finished: return command_method = getattr(self, self.command.name.lower()) result = command_method() if is_future(result): result = yield result if result is not None: raise TypeError("Expected None, got %r" % result) if self._auto_finish and not self._finished: self.finish() except Exception, e: try: self.handle_command_exception(e) except Exception: app_log.error("Exception in exception handler", exc_info=True)
def handle(self, request, response, handler_execution_chain): mv = ModelAndView() handler = handler_execution_chain.handler exceptions = tuple(handler.exception_handlers.keys()) try: arg_spec = get_request_mapping( handler.page_handler)["arg_spec"] return_value = handler_execution_chain.handle( request, response, mv, *self.build_args(arg_spec, request, response, mv, handler.matches)) if gen.is_future(return_value): result = yield return_value else: result = return_value except exceptions as exc: for exception in exceptions: if isinstance(exc, exception): exception_handler, matches = \ handler.exception_handlers[exception] arg_spec = get_exception_handler( exception_handler)["arg_spec"] return_value = exception_handler( request, response, mv, *self.build_args(arg_spec, request, response, mv, matches, exc)) if gen.is_future(return_value): result = yield return_value else: result = return_value break else: raise RuntimeError("unreachable") mv = self.post_handle(response, handler, result, mv, False) raise gen.Return(mv)
def _retire_workers(self, workers=None): if workers is None: workers = self.workers_to_close() with log_errors(): result = yield self.scheduler.retire_workers(workers=workers, remove=True, close_workers=True) if result: logger.info("Retiring workers %s", result) f = self.cluster.scale_down(result) if gen.is_future(f): yield f raise gen.Return(result)
def fail_to(future): """A decorator for function callbacks to catch uncaught non-async exceptions and forward them to the given future. The primary use for this is to catch exceptions in async callbacks and propagate them to futures. For example, consider, .. code-block:: python answer = Future() def on_done(future): foo = bar() answer.set_result(foo) some_async_operation().add_done_callback(on_done) If ``bar()`` fails, ``answer`` will never get filled with an exception or a result. Now if we change ``on_done`` to, .. code-block:: python @fail_to(answer) def on_done(future): foo = bar() answer.set_result(foo) Uncaught exceptions in ``on_done`` will be caught and propagated to ``answer``. Note that ``on_done`` will return None if an exception was caught. :param answer: Future to which the result will be written. """ assert is_future(future), 'you forgot to pass a future' def decorator(f): @wraps(f) def new_f(*args, **kwargs): try: return f(*args, **kwargs) except Exception: future.set_exc_info(sys.exc_info()) return new_f return decorator
def _adapt(self): if self._adapting: # Semaphore to avoid overlapping adapt calls return self._adapting = True try: if self.should_scale_up(): kwargs = self.get_scale_up_kwargs() f = self.cluster.scale_up(**kwargs) if gen.is_future(f): yield f if self.should_scale_down(): yield self._retire_workers() finally: self._adapting = False
def _retire_workers(self, workers=None): if workers is None: workers = self.workers_to_close(key=self.worker_key, minimum=self.minimum) if not workers: raise gen.Return(workers) with log_errors(): yield self.scheduler.retire_workers( workers=workers, remove=True, close_workers=True ) logger.info("Retiring workers %s", workers) f = self.cluster.scale_down(workers) if gen.is_future(f): yield f raise gen.Return(workers)
def _adapt(self): if self._adapting: # Semaphore to avoid overlapping adapt calls return self._adapting = True try: if self.should_scale_up(): instances = max(1, len(self.scheduler.ncores) * 2) logger.info("Scaling up to %d workers", instances) f = self.cluster.scale_up(instances) if gen.is_future(f): yield f yield self._retire_workers() finally: self._adapting = False
def _retire_workers(self, workers=None): if workers is None: workers = self.workers_to_close(key=self.worker_key, minimum=self.minimum) if not workers: raise gen.Return(workers) with log_errors(): yield self.scheduler.retire_workers(workers=workers, remove=True, close_workers=True) logger.info("Retiring workers %s", workers) f = self.cluster.scale_down(workers) if gen.is_future(f): yield f raise gen.Return(workers)
def _adapt(self): if self._adapting: # Semaphore to avoid overlapping adapt calls return self._adapting = True try: should_scale_up = self.should_scale_up() workers = set( self.workers_to_close(key=self.worker_key, minimum=self.minimum)) if should_scale_up and workers: logger.info( "Attempting to scale up and scale down simultaneously.") return if should_scale_up: kwargs = self.get_scale_up_kwargs() f = self.cluster.scale_up(**kwargs) self.log.append((time(), 'up', kwargs)) if gen.is_future(f): yield f d = {} to_close = [] for w, c in self.close_counts.items(): if w in workers: if c >= self.wait_count: to_close.append(w) else: d[w] = c for w in workers: d[w] = d.get(w, 0) + 1 self.close_counts = d if to_close: self.log.append((time(), 'down', workers)) workers = yield self._retire_workers(workers=to_close) finally: self._adapting = False
def _adapt(self): if self._adapting: # Semaphore to avoid overlapping adapt calls return self._adapting = True try: recommendations = self.recommendations() if not recommendations: return status = recommendations.pop('status') if status == 'up': f = self.cluster.scale_up(**recommendations) self.log.append((time(), 'up', recommendations)) if gen.is_future(f): yield f elif status == 'down': self.log.append((time(), 'down', recommendations['workers'])) workers = yield self._retire_workers(workers=recommendations['workers']) finally: self._adapting = False
def _adapt(self): if self._adapting: # Semaphore to avoid overlapping adapt calls return self._adapting = True try: should_scale_up = self.should_scale_up() should_scale_down = self.should_scale_down() if should_scale_up and should_scale_down: logger.info("Attempting to scale up and scale down simultaneously.") else: if should_scale_up: kwargs = self.get_scale_up_kwargs() f = self.cluster.scale_up(**kwargs) if gen.is_future(f): yield f if should_scale_down: yield self._retire_workers() finally: self._adapting = False
def _adapt(self): if self._adapting: # Semaphore to avoid overlapping adapt calls return self._adapting = True try: recommendations = self.recommendations() if not recommendations: return status = recommendations.pop("status") if status == "up": f = self.cluster.scale_up(**recommendations) self.log.append((time(), "up", recommendations)) if gen.is_future(f): yield f elif status == "down": self.log.append((time(), "down", recommendations["workers"])) workers = yield self._retire_workers(workers=recommendations["workers"]) finally: self._adapting = False
def raw_handler(self, *args): reply_args = handler(self, *args) if gen.is_future(reply_args): return async_make_reply(msgname, types, reply_args, major) else: return make_reply(msgname, types, reply_args, major)
def dispatch(self, method, params, jid): cached_method = method[0:2] == "**" hash_value = None if cached_method: method = method[2:] hash_value = params.pop(0) # Try to call method with dispatcher if not self.dispatcher.hasMethod(method): text = "No such method '%s'" % method error_value = dict( name='JSONRPCError', code=100, message=text, error=text) self.log.warning(text) self.set_status(500) return dict(result=None, error=error_value, id=jid) try: self.log.debug("calling method %s(%s)" % (method, params)) user = self.get_secure_cookie('REMOTE_USER').decode('ascii') if self.get_secure_cookie('REMOTE_USER') else None sid = self.get_secure_cookie('REMOTE_SESSION').decode('ascii') if self.get_secure_cookie('REMOTE_SESSION') else None self.log.debug("received call [%s] for %s (SID=%s): %s(%s)" % (jid, user, sid, method, params)) if user is None and method in no_login_commands: # allow execution without user user = self.dispatcher if isinstance(params, dict): result = self.dispatcher.dispatch(user, sid, method, **params) else: result = self.dispatcher.dispatch(user, sid, method, *params) if is_future(result): result = yield result except JSONRPCException as e: exc_value = sys.exc_info()[1] error_value = dict( name='JSONRPCError', code=100, message=str(exc_value), error=e.error) self.log.error(e.error) self.set_status(500) return dict(result=None, error=error_value, id=jid) except Exception as e: text = traceback.format_exc() exc_value = sys.exc_info()[1] status_code = 500 #TODO: enroll information if it's an extended exception err = str(e) err_id = C.get_error_id(err) if err_id is not None: # get error err = C.getError(None, None, err_id, keep=True) if err and 'status_code' in err and err['status_code'] is not None: status_code = err['status_code'] error_value = dict( name='JSONRPCError', code=status_code, message=str(exc_value), error=err) self.set_status(status_code, reason=error_value['message']) self.log.error("returning call [%s]: %s / %s" % (jid, None, f_print(err))) self.log.error(text) return dict(result=None, error=error_value, id=jid) self.log.debug("returning call [%s]: %s / %s" % (jid, result, None)) if cached_method: response_hash = hashlib.md5(repr(result).encode('utf-8')).hexdigest() if hash_value == response_hash: # cache hit result = dict(hash=response_hash) else: # cache miss result = dict(hash=response_hash, response=result) return dict(result=result, error=None, id=jid)
def _real_logic(*a, **kw): jp = JointPoint(f, a, kw) # 执行前置通知 for before_advice in before_advices: formal_arguments = inspect.getargspec(before_advice).args try: if "joint_point" in formal_arguments: return_value = before_advice(joint_point=jp) else: return_value = before_advice() if gen.is_future(return_value): yield return_value except Return as e: raise gen.Return(e.get_return_value()) # 执行环绕通知 around_after_advices = [] for around_advice in around_advices: formal_arguments = inspect.getargspec(around_advice).args try: if "joint_point" in formal_arguments: return_value = around_advice(joint_point=jp) else: return_value = around_advice() if gen.is_future(return_value): result = yield return_value else: result = return_value if callable(result): around_after_advices.append(result) except Return as e: raise gen.Return(e.get_return_value()) # 执行连接点 returning = None exc_info = None try: returning = jp.proceed() if gen.is_future(returning): returning = yield returning except: exc_info = sys.exc_info() # 执行环绕通知 for around_after_advice in around_after_advices: try: return_value = around_after_advice( jp, returning, exc_info) if gen.is_future(return_value): yield return_value except Return as e: raise gen.Return(e.get_return_value()) if exc_info is None: # 执行返回通知 for after_returning_advice in after_returning_advices: formal_arguments = inspect.getargspec( after_returning_advice).args kwargs = {} if "joint_point" in formal_arguments: kwargs["joint_point"] = jp if "returning" in formal_arguments: kwargs["returning"] = returning try: return_value = after_returning_advice(**kwargs) if gen.is_future(return_value): yield return_value except Return as e: raise gen.Return(e.get_return_value()) else: # 执行异常通知 for after_throwing_advice in after_throwing_advices: formal_arguments = inspect.getargspec( after_throwing_advice).args kwargs = {} if "joint_point" in formal_arguments: kwargs["joint_point"] = jp if "exc_info" in formal_arguments: kwargs["exc_info"] = exc_info try: return_value = after_throwing_advice(**kwargs) if gen.is_future(return_value): yield return_value except Return as e: raise gen.Return(e.get_return_value()) # 执行最终通知 for after_advice in after_advices: formal_arguments = inspect.getargspec(after_advice).args kwargs = {} if "joint_point" in formal_arguments: kwargs["joint_point"] = jp if "returning" in formal_arguments: kwargs["returning"] = returning if "exc_info" in formal_arguments: kwargs["exc_info"] = exc_info try: return_value = after_advice(**kwargs) if gen.is_future(return_value): yield return_value except Return as e: raise gen.Return(e.get_return_value()) if exc_info is not None: raise exc_info[0], exc_info[1], exc_info[2] raise gen.Return(returning)
def data_received(self, chunk): """ Receive chunk of multipart/form-data :arg chunk: chunk of data """ if not self._buffer: self._buffer = chunk else: self._buffer += chunk while True: if self.current_phase == PHASE_BOUNDARY: if len(self._buffer) > len(self._boundary_delimiter): if self._buffer.startswith(self._boundary_delimiter): self.current_phase = PHASE_HEADERS self._buffer = self._buffer[len(self. _boundary_delimiter):] elif self._buffer.startswith(self._end_boundary): result = self.parser_delegate.finish_file() if is_future(result): yield result return else: gen_log.warning("Invalid multipart/form-data") return else: # wait for next chunk return if self.current_phase == PHASE_HEADERS: if b"\r\n\r\n" in self._buffer: headers, remaining_part = self._buffer.split( b"\r\n\r\n", 1) if headers: headers = HTTPHeaders.parse(headers.decode("utf-8")) else: gen_log.warning("multipart/form-data missing headers") return disp_header = headers.get("Content-Disposition", "") disposition, disp_params = _parse_header(disp_header) if disposition != "form-data": gen_log.warning("Invalid multipart/form-data") return self._buffer = remaining_part self.current_phase = PHASE_BODY result = self.parser_delegate.start_file( headers, disp_params) if is_future(result): yield result else: # wait for all headers for current file return if self.current_phase == PHASE_BODY: if self._boundary_delimiter in self._buffer: data, remaining_data = self._buffer.split( self._boundary_delimiter, 1) self._buffer = remaining_data result = self.parser_delegate.file_data_received(data[:-2]) if is_future(result): yield result self.current_phase = PHASE_HEADERS result = self.parser_delegate.finish_file() if is_future(result): yield result continue elif self._end_boundary in self._buffer: result = self.parser_delegate.file_data_received( self._buffer.split(self._end_boundary)[0]) if is_future(result): yield result result = self.parser_delegate.finish_file() if is_future(result): yield result return else: if self._buffer: result = self.parser_delegate.file_data_received( self._buffer) if is_future(result): yield result self._buffer = b"" return
def async_request(method='GET', url=None, params=None, headers=None, data=None, json=None, on_response=None, on_error=None, connect_timeout=DEFAULT_CONNECT_TIMEOUT, request_timeout=DEFAULT_REQUEST_TIMEOUT, follow_redirects=False, proxy_host=None, proxy_port=None, proxy_username=None, proxy_password=None): try: if url is None: return method = method.upper() base_headers = { 'User-Agent': random_agent(), } if params is not None: url_parsed = urlparse(url) query = urlencode(params, doseq=True) if url_parsed.query != '': query = '%s&%s' % (query, url_parsed.query) url = urlunparse( (url_parsed.scheme, url_parsed.netloc, url_parsed.path, url_parsed.params, query, url_parsed.fragment)) if method == 'GET': body = None else: if json is not None: body = json_util.dumps(json) base_headers['Content-Type'] = 'application/json;charset=utf-8' elif isinstance(data, dict): body = urlencode(data, doseq=True) base_headers[ 'Content-Type'] = 'application/x-www-form-urlencoded' elif isinstance(data, list): body = force_text(data) else: body = data if isinstance(headers, dict): base_headers.update(headers) headers = base_headers response = yield AsyncHTTPClient().fetch( HTTPRequest(url=url, headers=headers, method=method, body=body, validate_cert=False, decompress_response=True, connect_timeout=connect_timeout, request_timeout=request_timeout, follow_redirects=follow_redirects, proxy_host=proxy_host, proxy_port=proxy_port, proxy_username=proxy_username, proxy_password=proxy_password)) if on_response is not None: ret = on_response(response) if is_future(ret): yield ret raise Return(response) except Return as e: # 上面有抛出 Return 的异常,这里捕获到以后,要重新抛出 # 否则返回的实际上就不是 response raise e except HTTPError as e: if hasattr(e, 'response') and e.response: if on_response is not None: ret = on_response(e.response) if is_future(ret): yield ret raise Return(e.response) else: if on_error is not None: ret = on_error(e) if is_future(ret): yield ret raise Return(None) except Exception as e: if on_error is not None: ret = on_error(e) if is_future(ret): yield ret raise Return(None)
def dispatch(self, method, params, jid): cached_method = method[0:2] == "**" hash_value = None if cached_method: method = method[2:] hash_value = params.pop(0) # Try to call method with dispatcher if not self.dispatcher.hasMethod(method): text = "No such method '%s'" % method error_value = dict( name='JSONRPCError', code=100, message=text, error=text) self.log.warning(text) self.set_status(500) return dict(result=None, error=error_value, id=jid) try: self.log.debug("calling method %s(%s)" % (method, params)) user = self.get_secure_cookie('REMOTE_USER').decode('ascii') if self.get_secure_cookie('REMOTE_USER') else None sid = self.get_secure_cookie('REMOTE_SESSION').decode('ascii') if self.get_secure_cookie('REMOTE_SESSION') else None self.log.debug("received call [%s] for %s (SID=%s): %s(%s)" % (jid, user, sid, method, params)) if user is None and method in no_login_commands: # allow execution without user user = self.dispatcher if isinstance(params, dict): result = self.dispatcher.dispatch(user, sid, method, **params) else: result = self.dispatcher.dispatch(user, sid, method, *params) if is_future(result): result = yield result except JSONRPCException as e: exc_value = sys.exc_info()[1] error_value = dict( name='JSONRPCError', code=100, message=str(exc_value), error=e.error) self.log.error(e.error) self.set_status(500) return dict(result=None, error=error_value, id=jid) except Exception as e: text = traceback.format_exc() exc_value = sys.exc_info()[1] status_code = 500 #TODO: enroll information if it's an extended exception err = str(e) err_id = C.get_error_id(err) if err_id is not None: # get error err = C.getError(None, None, err_id, keep=True) if err and 'status_code' in err and err['status_code'] is not None: status_code = err['status_code'] error_value = dict( name='JSONRPCError', code=100, message=str(exc_value), error=err) self.log.error("returning call [%s]: %s / %s" % (jid, None, f_print(err))) self.log.error(text) self.set_status(status_code) return dict(result=None, error=error_value, id=jid) self.log.debug("returning call [%s]: %s / %s" % (jid, result, None)) if cached_method: response_hash = hashlib.md5(repr(result).encode('utf-8')).hexdigest() if hash_value == response_hash: # cache hit result = dict(hash=response_hash) else: # cache miss result = dict(hash=response_hash, response=result) return dict(result=result, error=None, id=jid)