def _clear(self, dbname, timestamp=None): logger.debug("clearing cache '%s' of '%s'", self._name, dbname) self._timestamp[dbname] = timestamp self._database_cache[dbname] = self._database_cache.default_factory() self._transaction_lower[dbname] = max( Transaction.monotonic_time(), self._transaction_lower.get(dbname, self._default_lower))
class MemoryCache(BaseCache): """ A key value LRU cache with size limit. """ _reset = WeakKeyDictionary() _clean_last = datetime.now() _default_lower = Transaction.monotonic_time() _listener = {} _listener_lock = defaultdict(threading.Lock) _table = 'ir_cache' _channel = _table def __init__(self, *args, **kwargs): super(MemoryCache, self).__init__(*args, **kwargs) self._database_cache = defaultdict(lambda: LRUDict(self.size_limit)) self._transaction_cache = WeakKeyDictionary() self._transaction_lower = {} self._timestamp = {} def _get_cache(self): transaction = Transaction() dbname = transaction.database.name lower = self._transaction_lower.get(dbname, self._default_lower) if (transaction in self._reset or transaction.started_at < lower): try: return self._transaction_cache[transaction] except KeyError: cache = self._database_cache.default_factory() self._transaction_cache[transaction] = cache return cache else: return self._database_cache[dbname] def get(self, key, default=None): key = self._key(key) cache = self._get_cache() try: (expire, result) = cache.pop(key) if expire and expire < dt.datetime.now(): return default cache[key] = (expire, result) return result # JCA: Properly crash on type error except KeyError: return default def set(self, key, value): key = self._key(key) cache = self._get_cache() if self.duration: expire = dt.datetime.now() + self.duration else: expire = None # JCA: Log cases where the cache size is exceeded if show_debug_logs: if len(cache) >= cache.size_limit: logger.debug('Cache limit exceeded for %s' % self._name) cache[key] = (expire, value) # JCA: Properly crash on type error return value def clear(self): transaction = Transaction() self._reset.setdefault(transaction, set()).add(self._name) self._transaction_cache.pop(transaction, None) def _clear(self, dbname, timestamp=None): logger.debug("clearing cache '%s' of '%s'", self._name, dbname) self._timestamp[dbname] = timestamp self._database_cache[dbname] = self._database_cache.default_factory() self._transaction_lower[dbname] = max( Transaction.monotonic_time(), self._transaction_lower.get(dbname, self._default_lower)) @classmethod def sync(cls, transaction): database = transaction.database dbname = database.name if not _clear_timeout and database.has_channel(): pid = os.getpid() with cls._listener_lock[pid]: if (pid, dbname) not in cls._listener: cls._listener[pid, dbname] = listener = threading.Thread( target=cls._listen, args=(dbname,), daemon=True) listener.start() return if (datetime.now() - cls._clean_last).total_seconds() < _clear_timeout: return connection = database.get_connection(readonly=True, autocommit=True) try: with connection.cursor() as cursor: table = Table(cls._table) cursor.execute(*table.select( _cast(table.timestamp), table.name)) timestamps = {} for timestamp, name in cursor.fetchall(): timestamps[name] = timestamp finally: database.put_connection(connection) for name, timestamp in timestamps.items(): try: inst = cls._instances[name] except KeyError: continue inst_timestamp = inst._timestamp.get(dbname) if not inst_timestamp or timestamp > inst_timestamp: inst._clear(dbname, timestamp) cls._clean_last = datetime.now() def sync_since(self, value): return self._clean_last > value @classmethod def commit(cls, transaction): table = Table(cls._table) reset = cls._reset.pop(transaction, None) if not reset: return database = transaction.database dbname = database.name if not _clear_timeout and transaction.database.has_channel(): with transaction.connection.cursor() as cursor: # The count computed as # 8000 (max notify size) / 64 (max name data len) for sub_reset in grouped_slice(reset, 125): cursor.execute( 'NOTIFY "%s", %%s' % cls._channel, (json.dumps(list(sub_reset), separators=(',', ':')),)) else: connection = database.get_connection( readonly=False, autocommit=True) try: with connection.cursor() as cursor: for name in reset: cursor.execute(*table.select(table.name, table.id, table.timestamp, where=table.name == name, limit=1)) if cursor.fetchone(): # It would be better to insert only cursor.execute(*table.update([table.timestamp], [CurrentTimestamp()], where=table.name == name)) else: cursor.execute(*table.insert( [table.timestamp, table.name], [[CurrentTimestamp(), name]])) cursor.execute(*table.select( Max(table.timestamp), where=table.name == name)) timestamp, = cursor.fetchone() cursor.execute(*table.select( _cast(Max(table.timestamp)), where=table.name == name)) timestamp, = cursor.fetchone() inst = cls._instances[name] inst._clear(dbname, timestamp) connection.commit() finally: database.put_connection(connection) cls._clean_last = datetime.now() reset.clear() @classmethod def rollback(cls, transaction): cls._reset.pop(transaction, None) @classmethod def drop(cls, dbname): pid = os.getpid() with cls._listener_lock[pid]: listener = cls._listener.pop((pid, dbname), None) if listener: database = backend.Database(dbname) conn = database.get_connection() try: cursor = conn.cursor() cursor.execute('NOTIFY "%s"' % cls._channel) conn.commit() finally: database.put_connection(conn) listener.join() for inst in cls._instances.values(): inst._timestamp.pop(dbname, None) inst._database_cache.pop(dbname, None) inst._transaction_lower.pop(dbname, None) @classmethod def _listen(cls, dbname): database = backend.Database(dbname) if not database.has_channel(): raise NotImplementedError logger.info("listening on channel '%s' of '%s'", cls._channel, dbname) conn = database.get_connection() pid = os.getpid() current_thread = threading.current_thread() try: cursor = conn.cursor() cursor.execute('LISTEN "%s"' % cls._channel) conn.commit() while cls._listener.get((pid, dbname)) == current_thread: readable, _, _ = select.select([conn], [], []) if not readable: continue conn.poll() while conn.notifies: notification = conn.notifies.pop() if notification.payload: reset = json.loads(notification.payload) for name in reset: # XUNG # Name not in instances when control_vesion_upgrade table is locked # because another process is currently upgrading # We must ignore cache reset notifications (Not yet loaded anyway) if name in cls._instances: inst = cls._instances[name] inst._clear(dbname) cls._clean_last = datetime.now() except Exception: logger.error( "cache listener on '%s' crashed", dbname, exc_info=True) raise finally: database.put_connection(conn) with cls._listener_lock[pid]: if cls._listener.get((pid, dbname)) == current_thread: del cls._listener[pid, dbname] @classmethod def purge_listeners(cls, dbname): ''' Purges all listeners for a given database ''' pid = os.getpid() with cls._listener_lock[pid]: if (pid, dbname) in cls._listener: del cls._listener[pid, dbname]
class MemoryCache(BaseCache): """ A key value LRU cache with size limit. """ _reset = WeakKeyDictionary() _clean_last = datetime.now() _default_lower = Transaction.monotonic_time() _listener = {} _listener_lock = threading.Lock() _table = 'ir_cache' _channel = _table def __init__(self, *args, **kwargs): super(MemoryCache, self).__init__(*args, **kwargs) self._database_cache = defaultdict(lambda: LRUDict(self.size_limit)) self._transaction_cache = WeakKeyDictionary() self._transaction_lower = {} self._timestamp = {} def _get_cache(self): transaction = Transaction() dbname = transaction.database.name lower = self._transaction_lower.get(dbname, self._default_lower) if (transaction in self._reset or transaction.started_at < lower): try: return self._transaction_cache[transaction] except KeyError: cache = self._database_cache.default_factory() self._transaction_cache[transaction] = cache return cache else: return self._database_cache[dbname] def get(self, key, default=None): key = self._key(key) cache = self._get_cache() try: (expire, result) = cache.pop(key) if expire and expire < dt.datetime.now(): return default cache[key] = (expire, result) return result except (KeyError, TypeError): return default def set(self, key, value): key = self._key(key) cache = self._get_cache() if self.duration: expire = dt.datetime.now() + self.duration else: expire = None try: cache[key] = (expire, value) except TypeError: pass return value def clear(self): transaction = Transaction() self._reset.setdefault(transaction, set()).add(self._name) self._transaction_cache.pop(transaction, None) def _clear(self, dbname, timestamp=None): logger.debug("clearing cache '%s' of '%s'", self._name, dbname) self._timestamp[dbname] = timestamp self._database_cache[dbname] = self._database_cache.default_factory() self._transaction_lower[dbname] = max( Transaction.monotonic_time(), self._transaction_lower.get(dbname, self._default_lower)) @classmethod def sync(cls, transaction): database = transaction.database dbname = database.name if not _clear_timeout and database.has_channel(): with cls._listener_lock: if dbname not in cls._listener: cls._listener[dbname] = listener = threading.Thread( target=cls._listen, args=(dbname, ), daemon=True) listener.start() return if (datetime.now() - cls._clean_last).total_seconds() < _clear_timeout: return connection = database.get_connection(readonly=True, autocommit=True) try: with connection.cursor() as cursor: table = Table(cls._table) cursor.execute( *table.select(_cast(table.timestamp), table.name)) timestamps = {} for timestamp, name in cursor.fetchall(): timestamps[name] = timestamp finally: database.put_connection(connection) for name, timestamp in timestamps.items(): try: inst = cls._instances[name] except KeyError: continue inst_timestamp = inst._timestamp.get(dbname) if not inst_timestamp or timestamp > inst_timestamp: inst._clear(dbname, timestamp) cls._clean_last = datetime.now() def sync_since(self, value): return self._clean_last > value @classmethod def commit(cls, transaction): table = Table(cls._table) reset = cls._reset.setdefault(transaction, set()) if not reset: return database = transaction.database dbname = database.name if not _clear_timeout and transaction.database.has_channel(): with transaction.connection.cursor() as cursor: cursor.execute( 'NOTIFY "%s", %%s' % cls._channel, (json.dumps(list(reset), separators=(',', ':')), )) else: connection = database.get_connection(readonly=False, autocommit=True) try: with connection.cursor() as cursor: for name in reset: cursor.execute(*table.select(table.name, table.id, table.timestamp, where=table.name == name, limit=1)) if cursor.fetchone(): # It would be better to insert only cursor.execute(*table.update( [table.timestamp], [CurrentTimestamp()], where=table.name == name)) else: cursor.execute( *table.insert([table.timestamp, table.name], [[CurrentTimestamp(), name]])) cursor.execute(*table.select(Max(table.timestamp), where=table.name == name)) timestamp, = cursor.fetchone() cursor.execute( *table.select(_cast(Max(table.timestamp)), where=table.name == name)) timestamp, = cursor.fetchone() inst = cls._instances[name] inst._clear(dbname, timestamp) connection.commit() finally: database.put_connection(connection) cls._clean_last = datetime.now() reset.clear() @classmethod def rollback(cls, transaction): try: cls._reset[transaction].clear() except KeyError: pass @classmethod def drop(cls, dbname): with cls._listener_lock: listener = cls._listener.pop(dbname, None) if listener: database = backend.Database(dbname) conn = database.get_connection() try: cursor = conn.cursor() cursor.execute('NOTIFY "%s"' % cls._channel) conn.commit() finally: database.put_connection(conn) listener.join() for inst in cls._instances.values(): inst._timestamp.pop(dbname, None) inst._database_cache.pop(dbname, None) inst._transaction_lower.pop(dbname, None) @classmethod def _listen(cls, dbname): database = backend.Database(dbname) if not database.has_channel(): raise NotImplementedError logger.info("listening on channel '%s' of '%s'", cls._channel, dbname) conn = database.get_connection() try: cursor = conn.cursor() cursor.execute('LISTEN "%s"' % cls._channel) conn.commit() while cls._listener.get(dbname) == threading.current_thread(): readable, _, _ = select.select([conn], [], []) if not readable: continue conn.poll() while conn.notifies: notification = conn.notifies.pop() if notification.payload: reset = json.loads(notification.payload) for name in reset: inst = cls._instances[name] inst._clear(dbname) cls._clean_last = datetime.now() except Exception: logger.error("cache listener on '%s' crashed", dbname, exc_info=True) raise finally: database.put_connection(conn) with cls._listener_lock: if cls._listener.get(dbname) == threading.current_thread(): del cls._listener[dbname]