def _store(self, obj, form, bcs, tsfc_parameters): key = self._cache_key(form) if self.invalid_count[key] > parameters["assembly_cache"]["max_misses"]: if self.invalid_count[key] == \ parameters["assembly_cache"]["max_misses"] + 1: debug("form %s missed too many times, excluding from cache." % form) else: cache_entry = _CacheEntry(obj, form, bcs) self.cache[key] = str(tsfc_parameters), cache_entry self.evict()
def evict(self): """Run the cache eviction algorithm. This works out the permitted cache size and deletes objects until it is achieved. Cache values are assumed to have a ``value`` attribute and eviction occurs in increasing ``value`` order. Currently ``value`` is an index of the assembly operation, so older operations are evicted first. The cache will be evicted down to 90% of permitted size. The permitted size is either the explicit ``parameters["assembly_cache"]["max_bytes"]`` or it is the amount of memory per core scaled by ``parameters["assembly_cache"]["max_factor"]`` (by default the scale factor is 0.6). In MPI parallel, the nbytes of each cache entry is set to the maximum over all processes, while the available memory is set to the minimum. This produces a conservative caching policy which is guaranteed to result in the same evictions on each processor. """ if not parameters["assembly_cache"]["eviction"]: return max_cache_size = min(parameters["assembly_cache"]["max_bytes"] or float("inf"), (memory or float("inf")) * parameters["assembly_cache"]["max_factor"] ) if max_cache_size == float("inf"): if not self.evictwarned: warning("No maximum assembly cache size. Install psutil >= 2.0.0 or risk leaking memory!") self.evictwarned = True return cache_size = self.nbytes if cache_size < max_cache_size: return debug("Cache eviction triggered. %s bytes in cache, %s bytes allowed" % (cache_size, max_cache_size)) # Evict down to 90% full. bytes_to_evict = cache_size - 0.9 * max_cache_size sorted_cache = sorted(self.cache.items(), key=lambda x: x[1][1].value) nbytes = lambda x: x[1][1].nbytes candidates = [] while bytes_to_evict > 0: next = sorted_cache.pop(0) candidates.append(next) bytes_to_evict -= nbytes(next) for c in reversed(candidates): if bytes_to_evict + nbytes(c) < 0: # We may have been overzealous. bytes_to_evict += nbytes(c) else: del self.cache[c[0]]