def cached_neighbors(self, mo, key, iter_neighbors): """ Cache iter_neighbors results according to profile settings :param mo: :param key: :param iter_neighbors: :return: """ ttl = mo.object_profile.neighbor_cache_ttl if not ttl: # Disabled cache metrics["neighbor_cache_misses"] += 1 neighbors = iter_neighbors(mo) if isinstance(neighbors, types.GeneratorType): neighbors = list(iter_neighbors(mo)) return neighbors # Cached version neighbors = cache.get(key, version=self.NEIGHBOR_CACHE_VERSION) self.logger.debug("Use neighbors cache") if neighbors is None: neighbors = iter_neighbors(mo) if isinstance(neighbors, types.GeneratorType): neighbors = list(iter_neighbors(mo)) cache.set(key, neighbors, ttl=ttl, version=self.NEIGHBOR_CACHE_VERSION) if self.interface_aliases: alias_cache = {(mo.id, n[0]): self.interface_aliases[(mo.id, n[0])] for n in neighbors if (mo.id, n[0]) in self.interface_aliases} cache.set("%s-aliases" % key, alias_cache, ttl=ttl, version=self.NEIGHBOR_CACHE_VERSION) metrics["neighbor_cache_misses"] += 1 else: alias_cache = cache.get("%s-aliases" % key, version=self.NEIGHBOR_CACHE_VERSION) self.logger.debug("Alias cache is %s", alias_cache) if alias_cache: self.interface_aliases.update(alias_cache) metrics["neighbor_cache_hits"] += 1 return neighbors
def __init__(self, ids, cache_key=None, ignore_profiles=None): self.ids = ids self.ignore_profiles = ignore_profiles if cache_key: self.out = cache.get(key=cache_key) if not self.out: self.out = self.load(self.ids, self.ignore_profiles) cache.set(cache_key, self.out, ttl=28800) else: self.out = self.load(self.ids, self.ignore_profiles)
def messages(request): if "noc_user" in request.COOKIES: session_id = request.COOKIES["noc_user"].rsplit("|", 1)[-1] key = "msg-%s" % session_id msg = cache.get(key, version=CACHE_VERSION) if msg: messages = ujson.loads(msg) cache.delete(key, version=CACHE_VERSION) return {"messages": messages} return {}
def run_job(self, job, mo, checks): if job == "segment": scheduler = Scheduler("scheduler", pool=None, service=ServiceStub()) else: scheduler = Scheduler("discovery", pool=mo.pool.name, service=ServiceStub()) jcls = self.jcls[job] # Try to dereference job job_args = scheduler.get_collection().find_one({ Job.ATTR_CLASS: jcls, Job.ATTR_KEY: mo.id }) if job_args: self.print("Job ID: %s" % job_args["_id"]) else: job_args = {Job.ATTR_ID: "fakeid", Job.ATTR_KEY: mo.id} job_args["_checks"] = checks job = get_handler(jcls)(scheduler, job_args) if job.context_version: ctx_key = job.get_context_cache_key() self.print("Loading job context from %s" % ctx_key) ctx = cache.get(ctx_key, version=job.context_version) if not ctx: self.print("Job context is empty") job.load_context(ctx) sample = 1 if self.trace else 0 with Span(sample=sample): job.dereference() job.handler() if sample: spans = get_spans() self.print("Spans:") self.print("\n".join(str(s) for s in spans)) if scheduler.service.metrics: self.print("Collected CH data:") for t in scheduler.service.metrics: self.print("Table: %s" % t) self.print("\n".join( str(x) for x in scheduler.service.metrics[t])) if job.context_version and job.context: self.print("Saving job context to %s" % ctx_key) scheduler.cache_set(key=ctx_key, value=job.context, version=job.context_version) scheduler.apply_cache_ops() time.sleep(3)
def __init__(self, ids, cache_key=None, ignore_profiles=None, filter_exists_link=False): self.ids = ids self.ignore_profiles = ignore_profiles self.filter_exists_link = filter_exists_link if cache_key: self.out = cache.get(key=cache_key) if not self.out: self.out = self.load(self.ids, self.ignore_profiles, self.filter_exists_link) cache.set(cache_key, self.out, ttl=28800) else: self.out = self.load(self.ids, self.ignore_profiles, self.filter_exists_link)