Пример #1
0
class MemoryNamespaceManager(NamespaceManager):
    namespaces = util.SyncDict(_threading.Lock(), {})

    def __init__(self, namespace, **kwargs):
        NamespaceManager.__init__(self, namespace, **kwargs)

        self.lock = Synchronizer(
            identifier="memorycontainer/namespacelock/%s" % self.namespace,
            use_files=False)

        self.dictionary = MemoryNamespaceManager.namespaces.get(
            self.namespace, lambda: {})

    def do_acquire_read_lock(self):
        self.lock.acquire_read_lock()

    def do_release_read_lock(self):
        self.lock.release_read_lock()

    def do_acquire_write_lock(self, wait=True):
        return self.lock.acquire_write_lock(wait)

    def do_release_write_lock(self):
        self.lock.release_write_lock()

    # the open and close methods are totally overridden to eliminate
    # the unnecessary "open count" computation involved
    def open(self, *args, **kwargs):
        pass

    def close(self, *args, **kwargs):
        pass

    def __getitem__(self, key):
        return self.dictionary[key]

    def __contains__(self, key):
        return self.dictionary.__contains__(key)

    def has_key(self, key):
        return self.dictionary.__contains__(key)

    def __setitem__(self, key, value):
        self.dictionary[key] = value

    def __delitem__(self, key):
        del self.dictionary[key]

    def do_remove(self):
        self.dictionary.clear()

    def keys(self):
        return self.dictionary.keys()
Пример #2
0
 def __init__(self, namespace, **kwargs):
     self.namespace = namespace
     self.openers = 0
     self.mutex = _threading.Lock()
Пример #3
0
 def __init__(self, namespace):
     NamespaceManager.__init__(self, namespace)
     self.access_lock = self.get_access_lock()
     self.openers = 0
     self.mutex = _threading.Lock()
Пример #4
0
 def __init__(self, clsmap):
     self._clsmap = clsmap
     self._mutex = _threading.Lock()
Пример #5
0
class DatabaseNamespaceManager(NamespaceManager):
    metadatas = SyncDict(_threading.Lock(), {})
    tables = SyncDict(_threading.Lock(), {})

    def __init__(self,
                 namespace,
                 url,
                 sa_opts=None,
                 optimistic=False,
                 table_name='beaker_cache',
                 data_dir=None,
                 lock_dir=None,
                 **params):
        """Creates a database namespace manager
        
        ``url``
            A SQLAlchemy database URL
        ``sa_opts``
            A dictionary of SQLAlchemy keyword options to initialize the engine
            with.
        ``optimistic``
            Use optimistic session locking, note that this will result in an
            additional select when updating a cache value to compare version
            numbers.
        ``table_name``
            The table name to use in the database for the cache.
        """
        NamespaceManager.__init__(self, namespace, **params)

        if sa_opts is None:
            sa_opts = {}

        if lock_dir is not None:
            self.lock_dir = lock_dir
        elif data_dir is None:
            raise MissingCacheParameter("data_dir or lock_dir is required")
        else:
            self.lock_dir = data_dir + "/container_db_lock"

        verify_directory(self.lock_dir)

        # Check to see if the table's been created before
        table_key = url + str(sa_opts) + table_name

        def make_cache():
            # Check to see if we have a connection pool open already
            meta_key = url + str(sa_opts)

            def make_meta():
                if url.startswith('mysql') and not sa_opts:
                    sa_opts['poolclass'] = pool.QueuePool
                engine = sa.create_engine(url, **sa_opts)
                meta = sa.BoundMetaData(engine)
                return meta

            meta = DatabaseNamespaceManager.metadatas.get(meta_key, make_meta)
            # Create the table object and cache it now
            cache = sa.Table(
                table_name, meta, sa.Column('id', sa.Integer,
                                            primary_key=True),
                sa.Column('namespace', sa.String(255), nullable=False),
                sa.Column('key', sa.String(255), nullable=False),
                sa.Column('value', sa.BLOB(), nullable=False),
                sa.UniqueConstraint('namespace', 'key'))
            cache.create(checkfirst=True)
            return cache

        self.cache = DatabaseNamespaceManager.tables.get(table_key, make_cache)

    # The database does its own locking.  override our own stuff
    def do_acquire_read_lock(self):
        pass

    def do_release_read_lock(self):
        pass

    def do_acquire_write_lock(self, wait=True):
        return True

    def do_release_write_lock(self):
        pass

    # override open/close to do nothing, keep the connection open as long
    # as possible
    def open(self, *args, **params):
        pass

    def close(self, *args, **params):
        pass

    def __getitem__(self, key):
        cache = self.cache
        result = sa.select([cache.c.value],
                           sa.and_(cache.c.namespace == self.namespace,
                                   cache.c.key == key)).execute()
        rows = result.fetchall()
        if len(rows) > 0:
            return cPickle.loads(str(rows[0]['value']))
        else:
            raise KeyError(key)

    def __contains__(self, key):
        cache = self.cache
        rows = sa.select([cache.c.id],
                         sa.and_(cache.c.namespace == self.namespace,
                                 cache.c.key == key)).execute().fetchall()
        return len(rows) > 0

    def has_key(self, key):
        cache = self.cache
        rows = sa.select([cache.c.id],
                         sa.and_(cache.c.namespace == self.namespace,
                                 cache.c.key == key)).execute().fetchall()
        return len(rows) > 0

    def __setitem__(self, key, value):
        cache = self.cache
        rows = sa.select([cache.c.id],
                         sa.and_(cache.c.namespace == self.namespace,
                                 cache.c.key == key)).execute().fetchall()
        value = cPickle.dumps(value)
        if len(rows) > 0:
            id = rows[0]['id']
            cache.update(cache.c.id == id).execute(value=value)
        else:
            cache.insert().execute(namespace=self.namespace,
                                   key=key,
                                   value=value)

    def __delitem__(self, key):
        cache = self.cache
        cache.delete(
            sa.and_(cache.c.namespace == self.namespace,
                    cache.c.key == key)).execute()

    def do_remove(self):
        cache = self.cache
        cache.delete(cache.c.namespace == self.namespace).execute()

    def keys(self):
        cache = self.cache
        rows = sa.select(
            [cache.c.key],
            cache.c.namespace == self.namespace).execute().fetchall()
        return [x['key'] for x in rows]
Пример #6
0
class MemcachedNamespaceManager(NamespaceManager):
    clients = SyncDict(_threading.Lock(), {})

    def __init__(self, namespace, url, data_dir=None, lock_dir=None, **params):
        NamespaceManager.__init__(self, namespace, **params)

        if lock_dir is not None:
            self.lock_dir = lock_dir
        elif data_dir is None:
            raise MissingCacheParameter("data_dir or lock_dir is required")
        else:
            self.lock_dir = data_dir + "/container_mcd_lock"

        verify_directory(self.lock_dir)

        self.mc = MemcachedNamespaceManager.clients.get(
            url, lambda: memcache.Client(url.split(';'), debug=0))

    # memcached does its own locking.  override our own stuff
    def do_acquire_read_lock(self):
        pass

    def do_release_read_lock(self):
        pass

    def do_acquire_write_lock(self, wait=True):
        return True

    def do_release_write_lock(self):
        pass

    # override open/close to do nothing, keep memcache connection open as long
    # as possible
    def open(self, *args, **params):
        pass

    def close(self, *args, **params):
        pass

    def __getitem__(self, key):
        cache_key = key.replace(' ', '\302\267')
        keys = [self.namespace + '_' + cache_key, self.namespace + ':keys']
        key_dict = self.mc.get_multi(keys)
        if cache_key not in key_dict.get(self.namespace + ':keys', {}):
            raise KeyError(key)
        return key_dict[self.namespace + '_' + cache_key]

    def __contains__(self, key):
        return self.has_key(key)

    def has_key(self, key):
        key = key.replace(' ', '\302\267')
        keys = self.mc.get(self.namespace + ':keys') or {}
        return key in keys

    def __setitem__(self, key, value):
        key = key.replace(' ', '\302\267')
        keys = self.mc.get(self.namespace + ':keys')
        if keys is None:
            keys = {}
        keys[key] = True
        self.mc.set(self.namespace + ':keys', keys)
        self.mc.set(self.namespace + "_" + key, value)

    def __delitem__(self, key):
        key = key.replace(' ', '\302\267')
        keys = self.mc.get(self.namespace + ':keys')
        try:
            del keys[key]
            self.mc.delete(self.namespace + "_" + key)
            self.mc.set(self.namespace + ':keys', keys)
        except KeyError:
            raise

    def do_remove(self):
        keys = self.mc.get(self.namespace + ':keys')
        if keys is not None:
            delete_keys = [self.namespace + '_' + x for x in keys]
            delete_keys.append(self.namespace + ':keys')
            self.mc.delete_multi(delete_keys)

    def keys(self):
        keys = self.mc.get(self.namespace + ':keys')
        if keys is None:
            return []
        else:
            return [x.replace('\302\267', ' ') for x in keys.keys()]
Пример #7
0
class DatabaseNamespaceManager(NamespaceManager):
    metadatas = SyncDict(_threading.Lock(), {})
    tables = SyncDict(_threading.Lock(), {})
    
    def __init__(self, namespace, url=None, sa_opts=None, optimistic=False,
                 table_name='beaker_cache', data_dir=None, lock_dir=None,
                 **params):
        """Creates a database namespace manager
        
        ``url``
            SQLAlchemy compliant db url
        ``sa_opts``
            A dictionary of SQLAlchemy keyword options to initialize the engine
            with.
        ``optimistic``
            Use optimistic session locking, note that this will result in an
            additional select when updating a cache value to compare version
            numbers.
        ``table_name``
            The table name to use in the database for the cache.
        """
        NamespaceManager.__init__(self, namespace, **params)
        
        if sa_opts is None:
            sa_opts = params
        
        if lock_dir is not None:
            self.lock_dir = lock_dir
        elif data_dir is None:
            raise MissingCacheParameter("data_dir or lock_dir is required")
        else:
            self.lock_dir = data_dir + "/container_db_lock"
        
        verify_directory(self.lock_dir)
        
        # Check to see if the table's been created before
        url = url or sa_opts['sa.url']
        table_key = url + table_name
        def make_cache():
            # Check to see if we have a connection pool open already
            meta_key = url + table_name
            def make_meta():
                if sa_version == '0.3':
                    if url.startswith('mysql') and not sa_opts:
                        sa_opts['poolclass'] = pool.QueuePool
                    engine = sa.create_engine(url, **sa_opts)
                    meta = sa.BoundMetaData(engine)
                else:
                    # SQLAlchemy pops the url, this ensures it sticks around
                    # later
                    sa_opts['sa.url'] = url
                    engine = sa.engine_from_config(sa_opts, 'sa.')
                    meta = sa.MetaData()
                    meta.bind = engine
                return meta
            meta = DatabaseNamespaceManager.metadatas.get(meta_key, make_meta)
            # Create the table object and cache it now
            cache = sa.Table(table_name, meta,
                             sa.Column('id', types.Integer, primary_key=True),
                             sa.Column('namespace', types.String(255), nullable=False),
                             sa.Column('accessed', types.DateTime, nullable=False),
                             sa.Column('created', types.DateTime, nullable=False),
                             sa.Column('data', types.BLOB(), nullable=False),
                             sa.UniqueConstraint('namespace')
            )
            cache.create(checkfirst=True)
            return cache
        self.hash = {}
        self._is_new = False
        self.loaded = False
        self.cache = DatabaseNamespaceManager.tables.get(table_key, make_cache)
    
    # The database does its own locking.  override our own stuff
    def do_acquire_read_lock(self): pass
    def do_release_read_lock(self): pass
    def do_acquire_write_lock(self, wait = True): return True
    def do_release_write_lock(self): pass
    
    def do_open(self, flags):
        # If we already loaded the data, don't bother loading it again
        if self.loaded:
            self.flags = flags
            return
        
        cache = self.cache
        result = sa.select([cache.c.data], 
                           cache.c.namespace==self.namespace
                          ).execute().fetchone()
        if not result:
            self._is_new = True
            self.hash = {}
        else:
            self._is_new = False
            try:
                self.hash = cPickle.loads(str(result['data']))
            except (IOError, OSError, EOFError, cPickle.PickleError):
                log.debug("Couln't load pickle data, creating new storage")
                self.hash = {}
                self._is_new = True
        self.flags = flags
        self.loaded = True
    
    def do_close(self):
        if self.flags is not None and (self.flags == 'c' or self.flags == 'w'):
            cache = self.cache
            if self._is_new:
                cache.insert().execute(namespace=self.namespace, 
                                       data=cPickle.dumps(self.hash),
                                       accessed=datetime.now(), 
                                       created=datetime.now())
                self._is_new = False
            else:
                cache.update(cache.c.namespace==self.namespace).execute(
                    data=cPickle.dumps(self.hash), accessed=datetime.now())
        self.flags = None
    
    def do_remove(self):
        cache = self.cache
        cache.delete(cache.c.namespace==self.namespace).execute()
        self.hash = {}
        
        # We can retain the fact that we did a load attempt, but since the
        # file is gone this will be a new namespace should it be saved.
        self._is_new = True

    def __getitem__(self, key): 
        return self.hash[key]

    def __contains__(self, key): 
        return self.hash.has_key(key)
        
    def __setitem__(self, key, value):
        self.hash[key] = value

    def __delitem__(self, key):
        del self.hash[key]

    def keys(self):
        return self.hash.keys()
Пример #8
0
class SQLAlchemyNamespaceManager(NamespaceManager):
    binds = SyncDict(_threading.Lock(), {})
    tables = SyncDict(_threading.Lock(), {})

    def __init__(self, namespace, bind, table, data_dir=None, lock_dir=None,
                 **kwargs):
        """Create a namespace manager for use with a database table via
        SQLAlchemy.

        ``bind``
            SQLAlchemy ``Engine`` or ``Connection`` object

        ``table``
            SQLAlchemy ``Table`` object in which to store namespace data.
            This should usually be something created by ``make_cache_table``.
        """
        NamespaceManager.__init__(self, namespace, **kwargs)

        if lock_dir is not None:
            self.lock_dir = lock_dir
        elif data_dir is None:
            raise MissingCacheParameter('data_dir or lock_dir is required')
        else:
            self.lock_dir = data_dir + '/container_db_lock'

        verify_directory(self.lock_dir)

        self.bind = self.__class__.binds.get(str(bind.url), lambda: bind)
        self.table = self.__class__.tables.get('%s:%s' % (bind.url, table.name),
                                               lambda: table)
        self.hash = {}
        self._is_new = False
        self.loaded = False

    def do_acquire_read_lock(self):
        pass

    def do_release_read_lock(self):
        pass

    def do_acquire_write_lock(self, wait=True):
        return True

    def do_release_write_lock(self):
        pass

    def do_open(self, flags):
        if self.loaded:
            self.flags = flags
            return
        select = sa.select([self.table.c.data],
                           (self.table.c.namespace == self.namespace))
        result = self.bind.execute(select).fetchone()
        if not result:
            self._is_new = True
            self.hash = {}
        else:
            self._is_new = False
            try:
                self.hash = cPickle.loads(str(result['data']))
            except (IOError, OSError, EOFError, cPickle.PickleError):
                log.debug("Couln't load pickle data, creating new storage")
                self.hash = {}
                self._is_new = True
        self.flags = flags
        self.loaded = True

    def do_close(self):
        if self.flags is not None and (self.flags == 'c' or self.flags == 'w'):
            data = cPickle.dumps(self.hash)
            if self._is_new:
                insert = self.table.insert()
                self.bind.execute(insert, namespace=self.namespace, data=data,
                                  accessed=datetime.now(),
                                  created=datetime.now())
                self._is_new = False
            else:
                update = self.table.update(self.table.c.namespace == self.namespace)
                self.bind.execute(update, data=data, accessed=datetime.now())
        self.flags = None

    def do_remove(self):
        delete = self.table.delete(self.table.c.namespace == self.namespace)
        self.bind.execute(delete)
        self.hash = {}
        self._is_new = True

    def __getitem__(self, key):
        return self.hash[key]

    def __contains__(self, key):
        return self.hash.has_key(key)

    def __setitem__(self, key, value):
        self.hash[key] = value

    def __delitem__(self, key):
        del self.hash[key]

    def keys(self):
        return self.hash.keys()