def process_miss(self, context, request_md5, timer, f, args, kwargs): # Let everybody know we're dealing with this request, so that # they don't rush to do it as well. # # Set the TTL of this entry to a reasonably low value, so that # others may retry it if we die while we're processing it. self.cache.set(request_md5, IN_FLIGHT, ttl=self.inflight_ttl) timer.stop() try: response = f(context, *args, **kwargs) finally: timer.start() response_json = json.dumps( {"result": response, "created": int(time.time()), "max_age": self.ttl,} ) self.cache.set(request_md5, response_json, ttl=self.ttl) return "done", response, "miss"
def wrapped_f(context, *args, **kwargs): if not context.request.kwargs.get("cache", True): self.annotate(context, status="off") return f(context, *args, **kwargs) with context.timer("cache") as timer: if context.name is not None: service_name = context.name else: service_name = f.func_name assert service_name request = (service_name, args, list(kwargs.items())) try: request_encoded = json.dumps(request, sort_keys=True).encode( "utf-8" ) request_md5 = hashlib.md5(request_encoded).hexdigest() status, response = self.state_loop( context, request_md5, timer, f, args, kwargs ) except Exception as exc: try: log = context.log except Exception: log = logutils.get_logger(__name__) log.warn( "cache_control: Error handling request: %s", exc, exc_info=True, stack_info=True, ) self.cache.delete(request_md5) raise self.annotate(context, status, request_md5) return response
def http_body(self): return json.dumps(self.args)
def http_headers(self): ret = {} for k, v in self.kwargs.items(): ret["x-servicelib-{}".format(k)] = json.dumps(v) return ret
def http_body(self): if self._encoded_body is None: self._encoded_body = json.dumps(self.value).encode("utf-8") return self._encoded_body