Beispiel #1
0
class DatabaseNamespaceManager(OpenResourceNamespaceManager):
    metadatas = SyncDict()
    tables = SyncDict()

    @classmethod
    def _init_dependencies(cls):
        global sa, pool, types
        if sa is not None:
            return
        try:
            import sqlalchemy as sa
            import sqlalchemy.pool as pool
            from sqlalchemy import types
        except ImportError:
            raise InvalidCacheBackendError("Database cache backend requires "
                                           "the 'sqlalchemy' library")

    def __init__(self,
                 namespace,
                 url=None,
                 sa_opts=None,
                 optimistic=False,
                 table_name='beaker_cache',
                 data_dir=None,
                 lock_dir=None,
                 schema_name=None,
                 **params):
        """Creates a database namespace manager

        ``url``
            SQLAlchemy compliant db url
        ``sa_opts``
            A dictionary of SQLAlchemy keyword options to initialize the engine
            with.
        ``optimistic``
            Use optimistic session locking, note that this will result in an
            additional select when updating a cache value to compare version
            numbers.
        ``table_name``
            The table name to use in the database for the cache.
        ``schema_name``
            The schema name to use in the database for the cache.
        """
        OpenResourceNamespaceManager.__init__(self, namespace)

        if sa_opts is None:
            sa_opts = {}

        self.lock_dir = None

        if lock_dir:
            self.lock_dir = lock_dir
        elif data_dir:
            self.lock_dir = data_dir + "/container_db_lock"
        if self.lock_dir:
            verify_directory(self.lock_dir)

        # Check to see if the table's been created before
        url = url or sa_opts['sa.url']
        table_key = url + table_name

        def make_cache():
            # Check to see if we have a connection pool open already
            meta_key = url + table_name

            def make_meta():
                # SQLAlchemy pops the url, this ensures it sticks around
                # later
                sa_opts['sa.url'] = url
                engine = sa.engine_from_config(sa_opts, 'sa.')
                meta = sa.MetaData()
                meta.bind = engine
                return meta

            meta = DatabaseNamespaceManager.metadatas.get(meta_key, make_meta)
            # Create the table object and cache it now
            cache = sa.Table(
                table_name,
                meta,
                sa.Column('id', types.Integer, primary_key=True),
                sa.Column('namespace', types.String(255), nullable=False),
                sa.Column('accessed', types.DateTime, nullable=False),
                sa.Column('created', types.DateTime, nullable=False),
                sa.Column('data', types.PickleType, nullable=False),
                sa.UniqueConstraint('namespace'),
                schema=schema_name if schema_name else meta.schema)
            cache.create(checkfirst=True)
            return cache

        self.hash = {}
        self._is_new = False
        self.loaded = False
        self.cache = DatabaseNamespaceManager.tables.get(table_key, make_cache)

    def get_access_lock(self):
        return null_synchronizer()

    def get_creation_lock(self, key):
        return file_synchronizer(
            identifier="databasecontainer/funclock/%s/%s" %
            (self.namespace, key),
            lock_dir=self.lock_dir)

    def do_open(self, flags, replace):
        # If we already loaded the data, don't bother loading it again
        if self.loaded:
            self.flags = flags
            return

        cache = self.cache
        result_proxy = sa.select(
            [cache.c.data], cache.c.namespace == self.namespace).execute()
        result = result_proxy.fetchone()
        result_proxy.close()

        if not result:
            self._is_new = True
            self.hash = {}
        else:
            self._is_new = False
            try:
                self.hash = result['data']
            except (IOError, OSError, EOFError, pickle.PickleError,
                    pickle.PickleError):
                log.debug("Couln't load pickle data, creating new storage")
                self.hash = {}
                self._is_new = True
        self.flags = flags
        self.loaded = True

    def do_close(self):
        if self.flags is not None and (self.flags == 'c' or self.flags == 'w'):
            cache = self.cache
            if self._is_new:
                cache.insert().execute(namespace=self.namespace,
                                       data=self.hash,
                                       accessed=datetime.now(),
                                       created=datetime.now())
                self._is_new = False
            else:
                cache.update(cache.c.namespace == self.namespace).execute(
                    data=self.hash, accessed=datetime.now())
        self.flags = None

    def do_remove(self):
        cache = self.cache
        cache.delete(cache.c.namespace == self.namespace).execute()
        self.hash = {}

        # We can retain the fact that we did a load attempt, but since the
        # file is gone this will be a new namespace should it be saved.
        self._is_new = True

    def __getitem__(self, key):
        return self.hash[key]

    def __contains__(self, key):
        return key in self.hash

    def __setitem__(self, key, value):
        self.hash[key] = value

    def __delitem__(self, key):
        del self.hash[key]

    def keys(self):
        return self.hash.keys()
Beispiel #2
0
class DatabaseNamespaceManager(NamespaceManager):
    metadatas = SyncDict(_threading.Lock(), {})
    tables = SyncDict(_threading.Lock(), {})

    def __init__(self,
                 namespace,
                 url,
                 sa_opts=None,
                 optimistic=False,
                 table_name='beaker_cache',
                 data_dir=None,
                 lock_dir=None,
                 **params):
        """Creates a database namespace manager
        
        ``url``
            A SQLAlchemy database URL
        ``sa_opts``
            A dictionary of SQLAlchemy keyword options to initialize the engine
            with.
        ``optimistic``
            Use optimistic session locking, note that this will result in an
            additional select when updating a cache value to compare version
            numbers.
        ``table_name``
            The table name to use in the database for the cache.
        """
        NamespaceManager.__init__(self, namespace, **params)

        if sa_opts is None:
            sa_opts = {}

        if lock_dir is not None:
            self.lock_dir = lock_dir
        elif data_dir is None:
            raise MissingCacheParameter("data_dir or lock_dir is required")
        else:
            self.lock_dir = data_dir + "/container_db_lock"

        verify_directory(self.lock_dir)

        # Check to see if the table's been created before
        table_key = url + str(sa_opts) + table_name

        def make_cache():
            # Check to see if we have a connection pool open already
            meta_key = url + str(sa_opts)

            def make_meta():
                if url.startswith('mysql') and not sa_opts:
                    sa_opts['poolclass'] = pool.QueuePool
                engine = sa.create_engine(url, **sa_opts)
                meta = sa.BoundMetaData(engine)
                return meta

            meta = DatabaseNamespaceManager.metadatas.get(meta_key, make_meta)
            # Create the table object and cache it now
            cache = sa.Table(
                table_name, meta, sa.Column('id', sa.Integer,
                                            primary_key=True),
                sa.Column('namespace', sa.String(255), nullable=False),
                sa.Column('key', sa.String(255), nullable=False),
                sa.Column('value', sa.BLOB(), nullable=False),
                sa.UniqueConstraint('namespace', 'key'))
            cache.create(checkfirst=True)
            return cache

        self.cache = DatabaseNamespaceManager.tables.get(table_key, make_cache)

    # The database does its own locking.  override our own stuff
    def do_acquire_read_lock(self):
        pass

    def do_release_read_lock(self):
        pass

    def do_acquire_write_lock(self, wait=True):
        return True

    def do_release_write_lock(self):
        pass

    # override open/close to do nothing, keep the connection open as long
    # as possible
    def open(self, *args, **params):
        pass

    def close(self, *args, **params):
        pass

    def __getitem__(self, key):
        cache = self.cache
        result = sa.select([cache.c.value],
                           sa.and_(cache.c.namespace == self.namespace,
                                   cache.c.key == key)).execute()
        rows = result.fetchall()
        if len(rows) > 0:
            return cPickle.loads(str(rows[0]['value']))
        else:
            raise KeyError(key)

    def __contains__(self, key):
        cache = self.cache
        rows = sa.select([cache.c.id],
                         sa.and_(cache.c.namespace == self.namespace,
                                 cache.c.key == key)).execute().fetchall()
        return len(rows) > 0

    def has_key(self, key):
        cache = self.cache
        rows = sa.select([cache.c.id],
                         sa.and_(cache.c.namespace == self.namespace,
                                 cache.c.key == key)).execute().fetchall()
        return len(rows) > 0

    def __setitem__(self, key, value):
        cache = self.cache
        rows = sa.select([cache.c.id],
                         sa.and_(cache.c.namespace == self.namespace,
                                 cache.c.key == key)).execute().fetchall()
        value = cPickle.dumps(value)
        if len(rows) > 0:
            id = rows[0]['id']
            cache.update(cache.c.id == id).execute(value=value)
        else:
            cache.insert().execute(namespace=self.namespace,
                                   key=key,
                                   value=value)

    def __delitem__(self, key):
        cache = self.cache
        cache.delete(
            sa.and_(cache.c.namespace == self.namespace,
                    cache.c.key == key)).execute()

    def do_remove(self):
        cache = self.cache
        cache.delete(cache.c.namespace == self.namespace).execute()

    def keys(self):
        cache = self.cache
        rows = sa.select(
            [cache.c.key],
            cache.c.namespace == self.namespace).execute().fetchall()
        return [x['key'] for x in rows]
Beispiel #3
0
class MemcachedNamespaceManager(NamespaceManager):
    clients = SyncDict()

    @classmethod
    def _init_dependencies(cls):
        global memcache
        if memcache is not None:
            return
        try:
            import pylibmc as memcache
        except ImportError:
            try:
                import cmemcache as memcache
                warnings.warn(
                    "cmemcache is known to have serious "
                    "concurrency issues; consider using 'memcache' or 'pylibmc'"
                )
            except ImportError:
                try:
                    import memcache
                except ImportError:
                    raise InvalidCacheBackendError(
                        "Memcached cache backend requires either "
                        "the 'memcache' or 'cmemcache' library")

    def __init__(self,
                 namespace,
                 url=None,
                 data_dir=None,
                 lock_dir=None,
                 **params):
        NamespaceManager.__init__(self, namespace)

        if not url:
            raise MissingCacheParameter("url is required")

        if lock_dir:
            self.lock_dir = lock_dir
        elif data_dir:
            self.lock_dir = data_dir + "/container_mcd_lock"
        if self.lock_dir:
            verify_directory(self.lock_dir)

        self.mc = MemcachedNamespaceManager.clients.get(
            url, memcache.Client, url.split(';'))

    def get_creation_lock(self, key):
        return file_synchronizer(identifier="memcachedcontainer/funclock/%s" %
                                 self.namespace,
                                 lock_dir=self.lock_dir)

    def _format_key(self, key):
        return self.namespace + '_' + key.replace(' ', '\302\267')

    def __getitem__(self, key):
        return self.mc.get(self._format_key(key))

    def __contains__(self, key):
        value = self.mc.get(self._format_key(key))
        return value is not None

    def has_key(self, key):
        return key in self

    def set_value(self, key, value, expiretime=None):
        if expiretime:
            self.mc.set(self._format_key(key), value, time=expiretime)
        else:
            self.mc.set(self._format_key(key), value)

    def __setitem__(self, key, value):
        self.set_value(key, value)

    def __delitem__(self, key):
        self.mc.delete(self._format_key(key))

    def do_remove(self):
        self.mc.flush_all()

    def keys(self):
        raise NotImplementedError(
            "Memcache caching does not support iteration of all cache keys")
Beispiel #4
0
class MemcachedNamespaceManager(NamespaceManager):
    """Provides the :class:`.NamespaceManager` API over a memcache client library."""

    clients = SyncDict()

    def __new__(cls, *args, **kw):
        memcache_module = kw.pop('memcache_module', 'auto')

        memcache_client = _load_client(memcache_module)

        if _is_configured_for_pylibmc(memcache_module, memcache_client):
            return object.__new__(PyLibMCNamespaceManager)
        else:
            return object.__new__(MemcachedNamespaceManager)

    def __init__(self,
                 namespace,
                 url,
                 memcache_module='auto',
                 data_dir=None,
                 lock_dir=None,
                 **kw):
        NamespaceManager.__init__(self, namespace)

        _memcache_module = _client_libs[memcache_module]

        if not url:
            raise MissingCacheParameter("url is required")

        if lock_dir:
            self.lock_dir = lock_dir
        elif data_dir:
            self.lock_dir = data_dir + "/container_mcd_lock"
        if self.lock_dir:
            verify_directory(self.lock_dir)

        # Check for pylibmc namespace manager, in which case client will be
        # instantiated by subclass __init__, to handle behavior passing to the
        # pylibmc client
        if not _is_configured_for_pylibmc(memcache_module, _memcache_module):
            self.mc = MemcachedNamespaceManager.clients.get(
                (memcache_module, url), _memcache_module.Client,
                url.split(';'))

    def get_creation_lock(self, key):
        return file_synchronizer(
            identifier="memcachedcontainer/funclock/%s/%s" %
            (self.namespace, key),
            lock_dir=self.lock_dir)

    def _format_key(self, key):
        if not isinstance(key, str):
            key = key.decode('ascii')
        formated_key = (self.namespace + '_' + key).replace(' ', '\302\267')
        if len(formated_key) > MAX_KEY_LENGTH:
            formated_key = sha1(formated_key).hexdigest()
        return formated_key

    def __getitem__(self, key):
        return self.mc.get(self._format_key(key))

    def __contains__(self, key):
        value = self.mc.get(self._format_key(key))
        return value is not None

    def has_key(self, key):
        return key in self

    def set_value(self, key, value, expiretime=None):
        if expiretime:
            self.mc.set(self._format_key(key), value, time=expiretime)
        else:
            self.mc.set(self._format_key(key), value)

    def __setitem__(self, key, value):
        self.set_value(key, value)

    def __delitem__(self, key):
        self.mc.delete(self._format_key(key))

    def do_remove(self):
        self.mc.flush_all()

    def keys(self):
        raise NotImplementedError("Memcache caching does not "
                                  "support iteration of all cache keys")
Beispiel #5
0
class SqlaNamespaceManager(OpenResourceNamespaceManager):
    binds = SyncDict()
    tables = SyncDict()

    def __init__(self,
                 namespace,
                 bind,
                 table,
                 data_dir=None,
                 lock_dir=None,
                 **kwargs):
        """Create a namespace manager for use with a database table via
        SQLAlchemy.

        ``bind``
            SQLAlchemy ``Engine`` or ``Connection`` object

        ``table``
            SQLAlchemy ``Table`` object in which to store namespace data.
            This should usually be something created by ``make_cache_table``.
        """
        OpenResourceNamespaceManager.__init__(self, namespace)

        if lock_dir:
            self.lock_dir = lock_dir
        elif data_dir:
            self.lock_dir = data_dir + "/container_db_lock"
        if self.lock_dir:
            verify_directory(self.lock_dir)

        self.bind = self.__class__.binds.get(str(bind.url), lambda: bind)
        self.table = self.__class__.tables.get(
            '%s:%s' % (bind.url, table.name), lambda: table)
        self.hash = {}
        self._is_new = False
        self.loaded = False

    def get_access_lock(self):
        return null_synchronizer()

    def get_creation_lock(self, key):
        return file_synchronizer(identifier="databasecontainer/funclock/%s" %
                                 self.namespace,
                                 lock_dir=self.lock_dir)

    def do_open(self, flags):
        if self.loaded:
            self.flags = flags
            return
        select = sa.select([self.table.c.data],
                           (self.table.c.namespace == self.namespace))
        result = self.bind.execute(select).fetchone()
        if not result:
            self._is_new = True
            self.hash = {}
        else:
            self._is_new = False
            try:
                self.hash = cPickle.loads(str(result['data']))
            except (IOError, OSError, EOFError, cPickle.PickleError):
                log.debug("Couln't load pickle data, creating new storage")
                self.hash = {}
                self._is_new = True
        self.flags = flags
        self.loaded = True

    def do_close(self):
        if self.flags is not None and (self.flags == 'c' or self.flags == 'w'):
            data = cPickle.dumps(self.hash)
            if self._is_new:
                insert = self.table.insert()
                self.bind.execute(insert,
                                  namespace=self.namespace,
                                  data=data,
                                  accessed=datetime.now(),
                                  created=datetime.now())
                self._is_new = False
            else:
                update = self.table.update(
                    self.table.c.namespace == self.namespace)
                self.bind.execute(update, data=data, accessed=datetime.now())
        self.flags = None

    def do_remove(self):
        delete = self.table.delete(self.table.c.namespace == self.namespace)
        self.bind.execute(delete)
        self.hash = {}
        self._is_new = True

    def __getitem__(self, key):
        return self.hash[key]

    def __contains__(self, key):
        return self.hash.has_key(key)

    def __setitem__(self, key, value):
        self.hash[key] = value

    def __delitem__(self, key):
        del self.hash[key]

    def keys(self):
        return self.hash.keys()
Beispiel #6
0
class MemcachedNamespaceManager(NamespaceManager):
    clients = SyncDict()

    def __init__(self,
                 namespace,
                 url=None,
                 data_dir=None,
                 lock_dir=None,
                 **params):
        NamespaceManager.__init__(self, namespace)

        if not url:
            raise MissingCacheParameter("url is required")

        if lock_dir:
            self.lock_dir = lock_dir
        elif data_dir:
            self.lock_dir = data_dir + "/container_mcd_lock"
        if self.lock_dir:
            verify_directory(self.lock_dir)

        self.mc = MemcachedNamespaceManager.clients.get(url,
                                                        memcache.Client,
                                                        url.split(';'),
                                                        debug=0)

    def get_creation_lock(self, key):
        return file_synchronizer(identifier="memcachedcontainer/funclock/%s" %
                                 self.namespace,
                                 lock_dir=self.lock_dir)

    def _format_key(self, key):
        return self.namespace + '_' + key.replace(' ', '\302\267')

    def __getitem__(self, key):
        return self.mc.get(self._format_key(key))

    def __contains__(self, key):
        value = self.mc.get(self._format_key(key))
        return value is not None

    def has_key(self, key):
        return key in self

    def set_value(self, key, value, expiretime=None):
        if expiretime:
            self.mc.set(self._format_key(key), value, time=expiretime)
        else:
            self.mc.set(self._format_key(key), value)

    def __setitem__(self, key, value):
        self.set_value(key, value)

    def __delitem__(self, key):
        self.mc.delete(self._format_key(key))

    def do_remove(self):
        self.mc.flush_all()

    def keys(self):
        raise NotImplementedError(
            "Memcache caching does not support iteration of all cache keys")
Beispiel #7
0
def test_dict():
    # normal dictionary test, where we will remove the value
    # periodically. the number of creates should be equal to
    # the number of removes plus one.
    print("\ntesting with normal dict")
    runtest(SyncDict())
Beispiel #8
0
class MemcachedNamespaceManager(NamespaceManager):
    clients = SyncDict(_threading.Lock(), {})

    def __init__(self, namespace, url, data_dir=None, lock_dir=None, **params):
        NamespaceManager.__init__(self, namespace, **params)

        if lock_dir is not None:
            self.lock_dir = lock_dir
        elif data_dir is None:
            raise MissingCacheParameter("data_dir or lock_dir is required")
        else:
            self.lock_dir = data_dir + "/container_mcd_lock"

        verify_directory(self.lock_dir)

        self.mc = MemcachedNamespaceManager.clients.get(
            url, lambda: memcache.Client(url.split(';'), debug=0))

    # memcached does its own locking.  override our own stuff
    def do_acquire_read_lock(self):
        pass

    def do_release_read_lock(self):
        pass

    def do_acquire_write_lock(self, wait=True):
        return True

    def do_release_write_lock(self):
        pass

    # override open/close to do nothing, keep memcache connection open as long
    # as possible
    def open(self, *args, **params):
        pass

    def close(self, *args, **params):
        pass

    def __getitem__(self, key):
        cache_key = key.replace(' ', '\302\267')
        keys = [self.namespace + '_' + cache_key, self.namespace + ':keys']
        key_dict = self.mc.get_multi(keys)
        if cache_key not in key_dict.get(self.namespace + ':keys', {}):
            raise KeyError(key)
        return key_dict[self.namespace + '_' + cache_key]

    def __contains__(self, key):
        return self.has_key(key)

    def has_key(self, key):
        key = key.replace(' ', '\302\267')
        keys = self.mc.get(self.namespace + ':keys') or {}
        return key in keys

    def __setitem__(self, key, value):
        key = key.replace(' ', '\302\267')
        keys = self.mc.get(self.namespace + ':keys')
        if keys is None:
            keys = {}
        keys[key] = True
        self.mc.set(self.namespace + ':keys', keys)
        self.mc.set(self.namespace + "_" + key, value)

    def __delitem__(self, key):
        key = key.replace(' ', '\302\267')
        keys = self.mc.get(self.namespace + ':keys')
        try:
            del keys[key]
            self.mc.delete(self.namespace + "_" + key)
            self.mc.set(self.namespace + ':keys', keys)
        except KeyError:
            raise

    def do_remove(self):
        keys = self.mc.get(self.namespace + ':keys')
        if keys is not None:
            delete_keys = [self.namespace + '_' + x for x in keys]
            delete_keys.append(self.namespace + ':keys')
            self.mc.delete_multi(delete_keys)

    def keys(self):
        keys = self.mc.get(self.namespace + ':keys')
        if keys is None:
            return []
        else:
            return [x.replace('\302\267', ' ') for x in keys.keys()]
Beispiel #9
0
class MongoNamespaceManager(NamespaceManager):
    """Provides the :class:`.NamespaceManager` API over MongoDB.

    Provided ``url`` can be both a mongodb connection string or
    an already existing MongoClient instance.

    The data will be stored into ``beaker_cache`` collection of the
    *default database*, so make sure your connection string or
    MongoClient point to a default database.
    """

    MAX_KEY_LENGTH = 1024

    clients = SyncDict()

    def __init__(self, namespace, url, **kw):
        super(MongoNamespaceManager, self).__init__(namespace)
        self.lock_dir = None  # MongoDB uses mongo itself for locking.

        if pymongo is None:
            raise RuntimeError("pymongo3 is not available")

        if isinstance(url, string_type):
            self.client = MongoNamespaceManager.clients.get(
                url, pymongo.MongoClient, url)
        else:
            self.client = url
        self.db = self.client.get_default_database()

    def _format_key(self, key):
        if not isinstance(key, str):
            key = key.decode("ascii")
        if len(key) > (self.MAX_KEY_LENGTH - len(self.namespace) - 1):
            if not PY2:
                key = key.encode("utf-8")
            key = sha1(key).hexdigest()
        return "%s:%s" % (self.namespace, key)

    def get_creation_lock(self, key):
        return MongoSynchronizer(self._format_key(key), self.client)

    def __getitem__(self, key):
        self._clear_expired()
        entry = self.db.backer_cache.find_one({"_id": self._format_key(key)})
        if entry is None:
            raise KeyError(key)
        return pickle.loads(entry["value"])

    def __contains__(self, key):
        self._clear_expired()
        entry = self.db.backer_cache.find_one({"_id": self._format_key(key)})
        return entry is not None

    def has_key(self, key):
        return key in self

    def set_value(self, key, value, expiretime=None):
        self._clear_expired()

        expiration = None
        if expiretime is not None:
            expiration = time.time() + expiretime

        value = pickle.dumps(value)
        self.db.backer_cache.update_one(
            {"_id": self._format_key(key)},
            {"$set": {
                "value": bson.Binary(value),
                "expiration": expiration
            }},
            upsert=True,
        )

    def __setitem__(self, key, value):
        self.set_value(key, value)

    def __delitem__(self, key):
        self._clear_expired()
        self.db.backer_cache.delete_many({"_id": self._format_key(key)})

    def do_remove(self):
        self.db.backer_cache.delete_many(
            {"_id": {
                "$regex": "^%s" % self.namespace
            }})

    def keys(self):
        return [
            e["key"].split(":", 1)[-1] for e in self.db.backer_cache.find_all(
                {"_id": {
                    "$regex": "^%s" % self.namespace
                }})
        ]

    def _clear_expired(self):
        now = time.time()
        self.db.backer_cache.delete_many({
            "_id": {
                "$regex": "^%s" % self.namespace
            },
            "expiration": {
                "$ne": None,
                "$lte": now
            },
        })
Beispiel #10
0
class MongoDBNamespaceManager(NamespaceManager):
    clients = SyncDict()
    _pickle = True
    _sparse = False

    # TODO _- support write concern / safe
    def __init__(self, namespace, url=None, data_dir=None,
                 lock_dir=None, skip_pickle=False, 
                 sparse_collection=False, **params):
        NamespaceManager.__init__(self, namespace)

        if not url:
            raise MissingCacheParameter("MongoDB url is required")

        if skip_pickle:
            log.info("Disabling pickling for namespace: %s" % self.namespace)
            self._pickle = False

        if sparse_collection:
            log.info("Separating data to one row per key (sparse collection) for ns %s ." % self.namespace)
            self._sparse = True

        # Temporarily uses a local copy of the functions until pymongo upgrades to new parser code
        (host_list, database, username, password, collection, options) = _parse_uri(url)

        if database and host_list:
            data_key = "mongodb:%s" % (database)
        else:
            raise MissingCacheParameter("Invalid Cache URL.  Cannot parse.")

        # Key will be db + collection
        if lock_dir:
            self.lock_dir = lock_dir
        elif data_dir:
            self.lock_dir = data_dir + "/container_mongodb_lock"
        if self.lock_dir:
            verify_directory(self.lock_dir)

        def _create_mongo_conn():
            host_uri = 'mongodb://'
            for x in host_list:
                host_uri += '%s:%s' % x
            log.info("Host URI: %s" % host_uri)
            conn = Connection(host_uri, slave_okay=options.get('slaveok', False))

            db = conn[database]

            if username:
                log.info("Attempting to authenticate %s/%s " % (username, password))
                if not db.authenticate(username, password):
                    raise InvalidCacheBackendError('Cannot authenticate to '
                                                   ' MongoDB.')
            return db[collection]

        self.mongo = MongoDBNamespaceManager.clients.get(data_key,
                    _create_mongo_conn)

    def get_creation_lock(self, key):
        """@TODO - stop hitting filesystem for this...
        I think mongo can properly avoid dog piling for us.
        """
        return file_synchronizer(
            identifier = "mongodb_container/funclock/%s" % self.namespace,
            lock_dir = self.lock_dir)

    def do_remove(self):
        """Clears the entire filesystem (drops the collection)"""
        log.debug("[MongoDB] Remove namespace: %s" % self.namespace)
        q = {}
        if self._sparse:
            q = {'_id.namespace': self.namespace}
        else:
            q = {'_id': self.namespace}

        log.debug("[MongoDB] Remove Query: %s" % q)
        self.mongo.remove(q)


    def __getitem__(self, key):
        log.debug("[MongoDB %s] Get Key: %s" % (self.mongo,
                                                key))

        _id = {}
        fields = {}
        if self._sparse:
            _id = {
                'namespace': self.namespace,
                'key': key
            }
            fields['data'] = True
        else:
            _id = self.namespace
            fields['data.' + key] = True

        log.debug("[MongoDB] Get Query: id == %s Fields: %s", _id, fields)
        result = self.mongo.find_one({'_id': _id}, fields=fields)
        log.debug("[MongoDB] Get Result: %s", result)

        if result:
            """Running into instances in which mongo is returning
            -1, which causes an error as __len__ should return 0
            or positive integers, hence the check of size explicit"""
            log.debug("Result: %s", result)
            data = result.get('data', None)
            log.debug("Data: %s", data)
            if self._sparse:
                value = data
            else:
                value = data.get(key, None)

            if not value:
                return None

            if self._pickle or key == 'session':
                value = _depickle(value)
            else:
                if value['pickled']:
                    value = (value['stored'], value['expires'], _depickle(value['value']))
                else:
                    value = (value['stored'], value['expires'], value['value'])

            log.debug("[key: %s] Value: %s" % (key, value))

            return value
        else:
            return None


    def __contains__(self, key):
        def _has():
            result = self.__getitem__(key)
            if result:
                log.debug("[MongoDB] %s == %s" % (key, result))
                return result is not None
            else:
                return False

        log.debug("[MongoDB] Has '%s'? " % key)
        ret = _has()


        return ret

    def has_key(self, key):
        return key in self

    def set_value(self, key, value, expiretime=None):
        log.debug("[MongoDB %s] Set Key: %s (Expiry: %s) ... " %
                 (self.mongo, key, expiretime))

        _id = {}
        doc = {}

        if self._pickle or key == 'session':
            try:
                value = pickle.dumps(value)
            except:
                log.exception("Failed to pickle value.")
        else:
            value = {
                'stored': value[0],
                'expires': value[1],
                'value': value[2],
                'pickled': False
            }
            try:
                bson.encode(value)
            except:
                log.warning("Value is not bson serializable, pickling inner value.")
                value['value'] = pickle.dumps(value['value'])
                value['pickled'] = True



        if self._sparse:
            _id = {
                'namespace': self.namespace,
                'key': key
            }

            doc['data'] = value
            doc['_id'] = _id
            if expiretime:
                # TODO - What is the datatype of this? it should be instantiated as a datetime instance
                doc['valid_until'] = expiretime
        else:
            _id = self.namespace
            doc['$set'] = {'data.' + key: value}
            if expiretime:
                # TODO - What is the datatype of this? it should be instantiated as a datetime instance
                doc['$set']['valid_until'] = expiretime

        log.debug("Upserting Doc '%s' to _id '%s'" % (doc, _id))
        self.mongo.update({"_id": _id}, doc, upsert=True, safe=True)

    def __setitem__(self, key, value):
        self.set_value(key, value)

    def __delitem__(self, key):
        """Delete JUST the key, by setting it to None."""
        if self._sparse:
            self.mongo.remove({'_id.namespace': self.namespace})
        else:
            self.mongo.update({'_id': self.namespace},
                              {'$unset': {'data.' + key: True}}, upsert=False)

    def keys(self):
        if self._sparse:
            return [row['_id']['field'] for row in self.mongo.find({'_id.namespace': self.namespace}, {'_id': True})]
        else:
            return self.mongo.find_one({'_id': self.namespace}, {'data': True}).get('data', {})
class MongoDBGridFSNamespaceManager(NamespaceManager):

    clients = SyncDict()

    def __init__(self,
                 namespace,
                 url=None,
                 data_dir=None,
                 lock_dir=None,
                 **params):
        NamespaceManager.__init__(self, namespace)

        if lock_dir:
            self.lock_dir = lock_dir
        elif data_dir:
            self.lock_dir = data_dir + "/container_mongodb_gridfs_lock"
        if self.lock_dir:
            verify_directory(self.lock_dir)

        if not url:
            raise MissingCacheParameter("MongoDB url is required")

        for k, v in parse_uri(url).iteritems():
            setattr(self, "url_%s" % k, v)

        if not self.url_database or not self.url_nodelist:
            raise MissingCacheParameter("Invalid MongoDB url.")

        data_key = "mongodb_gridfs:%s:%s" % (self.url_database,
                                             self.url_collection)
        self.gridfs = MongoDBGridFSNamespaceManager.clients.get(
            data_key, self._create_mongo_connection)

    def _create_mongo_connection(self):
        host_uri = \
            'mongodb://%s' % (",".join(["%s:%s" % h for h in self.url_nodelist]))
        log.info("[MongoDBGridFS] Host URI: %s" % host_uri)

        params = {}
        params['slaveok'] = self.url_options.get("slaveok", False)
        if self.url_options.has_key("replicaset"):
            params['replicaset'] = self.url_options["replicaset"] or ""

        conn = Connection(host_uri, **params)
        db = conn[self.url_database]

        if self.url_username:
            log.info("[MongoDBGridFS] Attempting to authenticate %s/%s " %
                     (self.url_username, self.url_password))
            if not db.authenticate(self.url_username, self.url_password):
                raise InvalidCacheBackendError(
                    'Cannot authenticate to MongoDB.')

        collection = db["%s.files" % self.url_collection]
        collection.ensure_index([("namespace", ASCENDING),
                                 ("filename", ASCENDING)],
                                unique=True)
        collection.ensure_index([("namespace", ASCENDING)])

        return (db, GridFS(db, self.url_collection))

    @property
    def collection(self):
        mongo = self.gridfs[0]
        return mongo["%s.files" % self.url_collection]

    def get_creation_lock(self, key):
        return file_synchronizer(
            identifier="mongodb_gridfs_container/funclock/%s" % self.namespace,
            lock_dir=self.lock_dir)

    def do_remove(self):
        log.debug("[MongoDBGridFS] Remove namespace: %s" % self.namespace)
        self.collection.remove({'namespace': self.namespace})

    def _get_file_for_key(self, key):
        query = {'namespace': self.namespace, 'filename': key}
        log.debug("[MongoDBGridFS] Get Query: %s", query)
        try:
            result = self.gridfs[1].get_last_version(**query)
        except NoFile:
            result = None
        log.debug("[MongoDBGridFS] Get Result: %s", result)
        return result

    def __getitem__(self, key):
        query = {'namespace': self.namespace, 'filename': key}
        log.debug("[MongoDBGridFS %s] Get Key: %s" % (self.gridfs, query))

        result = self._get_file_for_key(key)
        if not result:
            return None

        value = result.read()
        if not value:
            return None

        try:
            value = pickle.loads(value)
        except Exception, e:
            log.exception("[MongoDBGridFS] Failed to unpickle value.")
            return None

        log.debug("[MongoDBGridFS] key: %s; value: %s" % (key, value))
        return value
class HandlerSocketMySQLNamespaceManager(NamespaceManager):

    clients = SyncDict()
    _pickle = True

    def __init__(self,
                 namespace,
                 read_servers=None,
                 write_servers=None,
                 database=None,
                 table=None,
                 index=None,
                 data_dir=None,
                 skip_pickle=False,
                 **kw):

        self.database = database
        self.table = table
        self.index = index

        NamespaceManager.__init__(self, namespace)

        data_key = "hs:%s" % (database)

        def _initiate_connections(read_servers, write_servers):
            read_servers = parse_servers_for_hs(read_servers)
            write_servers = parse_servers_for_hs(write_servers)
            hs = Manager(read_servers, write_servers)
            return hs

        self.hs = HandlerSocketMySQLNamespaceManager.clients.get(
            data_key, _initiate_connections, read_servers, write_servers)

    def _format_key(self, key):
        key = self.namespace
        return key

    def get_creation_lock(self, key):
        return null_synchronizer()

    def do_remove(self):
        raise NotImplementedError('Method not supported')

    def __getitem__(self, key):
        key = self._format_key(key)
        result = None
        try:
            result = self.hs.find(self.database, self.table, '=',
                                  ['id', 'data'], [key], self.index, 1)
        except Exception as e:
            log.error("Failure {} trying to find key {}".format(
                e.message, key))
            raise

        data = result
        if result:
            #TODO document result data structure
            data = pickle.loads(result[0][1][1])
        return data

    def __contains__(self, key):
        key = self._format_key(key)
        result = self.__getitem__(key)
        return result

    def has_key(self, key):
        key = self._format_key(key)
        return key in self

    def set_value(self, key, value, expiretime=None):
        value = pickle.dumps(value)
        key = self._format_key(key)
        created = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        ekey = self.has_key(key)
        try:
            if not ekey:
                self.hs.insert(self.database, self.table,
                               [('id', key), ('data', value),
                                ('created', created)])
            else:
                self.hs.update(self.database, self.table, '=', ['id', 'data'],
                               [key], [key, value])
        except Exception as e:
            log.error("Failure {} while setting value for key {}".format(
                e.message, key))
            raise

    def __setitem__(self, key, value):
        key = self._format_key(key)
        self.set_value(key, value)

    def __delitem__(self, key):
        key = self._format_key(key)
        self.hs.delete(self.database, self.table, '=', ['id'], [key])

    def keys(self):
        raise NotImplementedError('Method not supported')
Beispiel #13
0
class RedisNamespaceManager(NamespaceManager):
    """Provides the :class:`.NamespaceManager` API over Redis.

    Provided ``url`` can be both a redis connection string or
    an already existing StrictRedis instance.

    The data will be stored into redis keys, with their name
    starting with ``beaker_cache:``. So make sure you provide
    a specific database number if you don't want to mix them
    with your own data.
    """

    MAX_KEY_LENGTH = 1024

    clients = SyncDict()

    def __init__(self, namespace, url, **kw):
        super(RedisNamespaceManager, self).__init__(namespace)
        self.lock_dir = None  # Redis uses redis itself for locking.

        if redis is None:
            raise RuntimeError("redis is not available")

        if isinstance(url, string_type):
            self.client = RedisNamespaceManager.clients.get(
                url, redis.StrictRedis.from_url, url)
        else:
            self.client = url

    def _format_key(self, key):
        if not isinstance(key, str):
            key = key.decode("ascii")
        if len(key) > (self.MAX_KEY_LENGTH - len(self.namespace) -
                       len("beaker_cache:") - 1):
            if not PY2:
                key = key.encode("utf-8")
            key = sha1(key).hexdigest()
        return "beaker_cache:%s:%s" % (self.namespace, key)

    def get_creation_lock(self, key):
        return RedisSynchronizer(self._format_key(key), self.client)

    def __getitem__(self, key):
        entry = self.client.get(self._format_key(key))
        if entry is None:
            raise KeyError(key)
        return pickle.loads(entry)

    def __contains__(self, key):
        return self.client.exists(self._format_key(key))

    def has_key(self, key):
        return key in self

    def set_value(self, key, value, expiretime=None):
        value = pickle.dumps(value)
        if expiretime is not None:
            self.client.setex(self._format_key(key), int(expiretime), value)
        else:
            self.client.set(self._format_key(key), value)

    def __setitem__(self, key, value):
        self.set_value(key, value)

    def __delitem__(self, key):
        self.client.delete(self._format_key(key))

    def do_remove(self):
        for k in self.keys():
            self.client.delete(k)

    def keys(self):
        return self.client.keys("beaker_cache:%s:*" % self.namespace)
Beispiel #14
0
class DatabaseNamespaceManager(NamespaceManager):
    metadatas = SyncDict(_threading.Lock(), {})
    tables = SyncDict(_threading.Lock(), {})
    
    def __init__(self, namespace, url=None, sa_opts=None, optimistic=False,
                 table_name='beaker_cache', data_dir=None, lock_dir=None,
                 **params):
        """Creates a database namespace manager
        
        ``url``
            SQLAlchemy compliant db url
        ``sa_opts``
            A dictionary of SQLAlchemy keyword options to initialize the engine
            with.
        ``optimistic``
            Use optimistic session locking, note that this will result in an
            additional select when updating a cache value to compare version
            numbers.
        ``table_name``
            The table name to use in the database for the cache.
        """
        NamespaceManager.__init__(self, namespace, **params)
        
        if sa_opts is None:
            sa_opts = params
        
        if lock_dir is not None:
            self.lock_dir = lock_dir
        elif data_dir is None:
            raise MissingCacheParameter("data_dir or lock_dir is required")
        else:
            self.lock_dir = data_dir + "/container_db_lock"
        
        verify_directory(self.lock_dir)
        
        # Check to see if the table's been created before
        url = url or sa_opts['sa.url']
        table_key = url + table_name
        def make_cache():
            # Check to see if we have a connection pool open already
            meta_key = url + table_name
            def make_meta():
                if sa_version == '0.3':
                    if url.startswith('mysql') and not sa_opts:
                        sa_opts['poolclass'] = pool.QueuePool
                    engine = sa.create_engine(url, **sa_opts)
                    meta = sa.BoundMetaData(engine)
                else:
                    # SQLAlchemy pops the url, this ensures it sticks around
                    # later
                    sa_opts['sa.url'] = url
                    engine = sa.engine_from_config(sa_opts, 'sa.')
                    meta = sa.MetaData()
                    meta.bind = engine
                return meta
            meta = DatabaseNamespaceManager.metadatas.get(meta_key, make_meta)
            # Create the table object and cache it now
            cache = sa.Table(table_name, meta,
                             sa.Column('id', types.Integer, primary_key=True),
                             sa.Column('namespace', types.String(255), nullable=False),
                             sa.Column('accessed', types.DateTime, nullable=False),
                             sa.Column('created', types.DateTime, nullable=False),
                             sa.Column('data', types.BLOB(), nullable=False),
                             sa.UniqueConstraint('namespace')
            )
            cache.create(checkfirst=True)
            return cache
        self.hash = {}
        self._is_new = False
        self.loaded = False
        self.cache = DatabaseNamespaceManager.tables.get(table_key, make_cache)
    
    # The database does its own locking.  override our own stuff
    def do_acquire_read_lock(self): pass
    def do_release_read_lock(self): pass
    def do_acquire_write_lock(self, wait = True): return True
    def do_release_write_lock(self): pass
    
    def do_open(self, flags):
        # If we already loaded the data, don't bother loading it again
        if self.loaded:
            self.flags = flags
            return
        
        cache = self.cache
        result = sa.select([cache.c.data], 
                           cache.c.namespace==self.namespace
                          ).execute().fetchone()
        if not result:
            self._is_new = True
            self.hash = {}
        else:
            self._is_new = False
            try:
                self.hash = cPickle.loads(str(result['data']))
            except (IOError, OSError, EOFError, cPickle.PickleError):
                log.debug("Couln't load pickle data, creating new storage")
                self.hash = {}
                self._is_new = True
        self.flags = flags
        self.loaded = True
    
    def do_close(self):
        if self.flags is not None and (self.flags == 'c' or self.flags == 'w'):
            cache = self.cache
            if self._is_new:
                cache.insert().execute(namespace=self.namespace, 
                                       data=cPickle.dumps(self.hash),
                                       accessed=datetime.now(), 
                                       created=datetime.now())
                self._is_new = False
            else:
                cache.update(cache.c.namespace==self.namespace).execute(
                    data=cPickle.dumps(self.hash), accessed=datetime.now())
        self.flags = None
    
    def do_remove(self):
        cache = self.cache
        cache.delete(cache.c.namespace==self.namespace).execute()
        self.hash = {}
        
        # We can retain the fact that we did a load attempt, but since the
        # file is gone this will be a new namespace should it be saved.
        self._is_new = True

    def __getitem__(self, key): 
        return self.hash[key]

    def __contains__(self, key): 
        return self.hash.has_key(key)
        
    def __setitem__(self, key, value):
        self.hash[key] = value

    def __delitem__(self, key):
        del self.hash[key]

    def keys(self):
        return self.hash.keys()
Beispiel #15
0
class MemcachedNamespaceManager(NamespaceManager):
    """Provides the :class:`.NamespaceManager` API over a memcache client library."""

    clients = SyncDict()

    def __new__(cls, *args, **kw):
        memcache_module = kw.pop('memcache_module', 'auto')

        memcache_client = _load_client(memcache_module)

        if memcache_module == 'pylibmc' or \
            memcache_client.__name__.startswith('pylibmc'):
            return object.__new__(PyLibMCNamespaceManager)
        else:
            return object.__new__(MemcachedNamespaceManager)

    def __init__(self,
                 namespace,
                 url,
                 memcache_module='auto',
                 data_dir=None,
                 lock_dir=None,
                 **kw):
        NamespaceManager.__init__(self, namespace)

        _memcache_module = _client_libs[memcache_module]

        if not url:
            raise MissingCacheParameter("url is required")

        if lock_dir:
            self.lock_dir = lock_dir
        elif data_dir:
            self.lock_dir = data_dir + "/container_mcd_lock"
        if self.lock_dir:
            verify_directory(self.lock_dir)

        self.mc = MemcachedNamespaceManager.clients.get(
            (memcache_module, url), _memcache_module.Client, url.split(';'))

    def get_creation_lock(self, key):
        return file_synchronizer(
            identifier="memcachedcontainer/funclock/%s/%s" %
            (self.namespace, key),
            lock_dir=self.lock_dir)

    def _format_key(self, key):
        return self.namespace + '_' + key.replace(' ', '\302\267')

    def __getitem__(self, key):
        return self.mc.get(self._format_key(key))

    def __contains__(self, key):
        value = self.mc.get(self._format_key(key))
        return value is not None

    def has_key(self, key):
        return key in self

    def set_value(self, key, value, expiretime=None):
        if expiretime:
            self.mc.set(self._format_key(key), value, time=expiretime)
        else:
            self.mc.set(self._format_key(key), value)

    def __setitem__(self, key, value):
        self.set_value(key, value)

    def __delitem__(self, key):
        self.mc.delete(self._format_key(key))

    def do_remove(self):
        self.mc.flush_all()

    def keys(self):
        raise NotImplementedError("Memcache caching does not "
                                  "support iteration of all cache keys")
Beispiel #16
0
class SqlaNamespaceManager(OpenResourceNamespaceManager):
    binds = SyncDict()
    tables = SyncDict()

    @classmethod
    def _init_dependencies(cls):
        global sa
        if sa is not None:
            return
        try:
            import sqlalchemy as sa
        except ImportError:
            raise InvalidCacheBackendError("SQLAlchemy, which is required by "
                                           "this backend, is not installed")

    def __init__(self,
                 namespace,
                 bind,
                 table,
                 data_dir=None,
                 lock_dir=None,
                 **kwargs):
        """Create a namespace manager for use with a database table via
        SQLAlchemy.

        ``bind``
            SQLAlchemy ``Engine`` or ``Connection`` object

        ``table``
            SQLAlchemy ``Table`` object in which to store namespace data.
            This should usually be something created by ``make_cache_table``.
        """
        OpenResourceNamespaceManager.__init__(self, namespace)

        if lock_dir:
            self.lock_dir = lock_dir
        elif data_dir:
            self.lock_dir = data_dir + "/container_db_lock"
        if self.lock_dir:
            verify_directory(self.lock_dir)

        self.bind = self.__class__.binds.get(str(bind.url), lambda: bind)
        self.table = self.__class__.tables.get(
            "%s:%s" % (bind.url, table.name), lambda: table)
        self.hash = {}
        self._is_new = False
        self.loaded = False

    def get_access_lock(self):
        return null_synchronizer()

    def get_creation_lock(self, key):
        return file_synchronizer(
            identifier="databasecontainer/funclock/%s" % self.namespace,
            lock_dir=self.lock_dir,
        )

    def do_open(self, flags, replace):
        if self.loaded:
            self.flags = flags
            return
        select = sa.select([self.table.c.data],
                           (self.table.c.namespace == self.namespace))
        result = self.bind.execute(select).fetchone()
        if not result:
            self._is_new = True
            self.hash = {}
        else:
            self._is_new = False
            try:
                self.hash = result["data"]
            except (IOError, OSError, EOFError, pickle.PickleError,
                    pickle.PickleError):
                log.debug("Couln't load pickle data, creating new storage")
                self.hash = {}
                self._is_new = True
        self.flags = flags
        self.loaded = True

    def do_close(self):
        if self.flags is not None and (self.flags == "c" or self.flags == "w"):
            if self._is_new:
                insert = self.table.insert()
                self.bind.execute(
                    insert,
                    namespace=self.namespace,
                    data=self.hash,
                    accessed=datetime.now(),
                    created=datetime.now(),
                )
                self._is_new = False
            else:
                update = self.table.update(
                    self.table.c.namespace == self.namespace)
                self.bind.execute(update,
                                  data=self.hash,
                                  accessed=datetime.now())
        self.flags = None

    def do_remove(self):
        delete = self.table.delete(self.table.c.namespace == self.namespace)
        self.bind.execute(delete)
        self.hash = {}
        self._is_new = True

    def __getitem__(self, key):
        return self.hash[key]

    def __contains__(self, key):
        return key in self.hash

    def __setitem__(self, key, value):
        self.hash[key] = value

    def __delitem__(self, key):
        del self.hash[key]

    def keys(self):
        return self.hash.keys()
class RedisNamespaceManager(NamespaceManager):
    """Provides the :class:`.NamespaceManager` API over Redis."""
    MAX_KEY_LENGTH = 1024

    clients = SyncDict()

    def __init__(self, namespace, url, **kw):
        super(RedisNamespaceManager, self).__init__(namespace)
        self.lock_dir = None  # Redis uses redis itself for locking.

        if redis is None:
            raise RuntimeError('redis is not available')

        if isinstance(url, string_type):
            self.client = RedisNamespaceManager.clients.get(
                url, redis.StrictRedis.from_url, url)
        else:
            self.client = url

    def _format_key(self, key):
        if not isinstance(key, str):
            key = key.decode('ascii')
        if len(key) > (self.MAX_KEY_LENGTH - len(self.namespace) -
                       len('beaker_cache:') - 1):
            if not PY2:
                key = key.encode('utf-8')
            key = sha1(key).hexdigest()
        return 'beaker_cache:%s:%s' % (self.namespace, key)

    def get_creation_lock(self, key):
        return RedisSynchronizer(self._format_key(key), self.client)

    def __getitem__(self, key):
        entry = self.client.get(self._format_key(key))
        if entry is None:
            raise KeyError(key)
        return pickle.loads(entry)

    def __contains__(self, key):
        return self.client.exists(self._format_key(key))

    def has_key(self, key):
        return key in self

    def set_value(self, key, value, expiretime=None):
        value = pickle.dumps(value)
        if expiretime is not None:
            self.client.setex(self._format_key(key), int(expiretime), value)
        else:
            self.client.set(self._format_key(key), value)

    def __setitem__(self, key, value):
        self.set_value(key, value)

    def __delitem__(self, key):
        self.client.delete(self._format_key(key))

    def do_remove(self):
        for k in self.keys():
            self.client.delete(k)

    def keys(self):
        return self.client.keys('beaker_cache:%s:*' % self.namespace)
Beispiel #18
0
class SQLAlchemyNamespaceManager(NamespaceManager):
    binds = SyncDict(_threading.Lock(), {})
    tables = SyncDict(_threading.Lock(), {})

    def __init__(self, namespace, bind, table, data_dir=None, lock_dir=None,
                 **kwargs):
        """Create a namespace manager for use with a database table via
        SQLAlchemy.

        ``bind``
            SQLAlchemy ``Engine`` or ``Connection`` object

        ``table``
            SQLAlchemy ``Table`` object in which to store namespace data.
            This should usually be something created by ``make_cache_table``.
        """
        NamespaceManager.__init__(self, namespace, **kwargs)

        if lock_dir is not None:
            self.lock_dir = lock_dir
        elif data_dir is None:
            raise MissingCacheParameter('data_dir or lock_dir is required')
        else:
            self.lock_dir = data_dir + '/container_db_lock'

        verify_directory(self.lock_dir)

        self.bind = self.__class__.binds.get(str(bind.url), lambda: bind)
        self.table = self.__class__.tables.get('%s:%s' % (bind.url, table.name),
                                               lambda: table)
        self.hash = {}
        self._is_new = False
        self.loaded = False

    def do_acquire_read_lock(self):
        pass

    def do_release_read_lock(self):
        pass

    def do_acquire_write_lock(self, wait=True):
        return True

    def do_release_write_lock(self):
        pass

    def do_open(self, flags):
        if self.loaded:
            self.flags = flags
            return
        select = sa.select([self.table.c.data],
                           (self.table.c.namespace == self.namespace))
        result = self.bind.execute(select).fetchone()
        if not result:
            self._is_new = True
            self.hash = {}
        else:
            self._is_new = False
            try:
                self.hash = cPickle.loads(str(result['data']))
            except (IOError, OSError, EOFError, cPickle.PickleError):
                log.debug("Couln't load pickle data, creating new storage")
                self.hash = {}
                self._is_new = True
        self.flags = flags
        self.loaded = True

    def do_close(self):
        if self.flags is not None and (self.flags == 'c' or self.flags == 'w'):
            data = cPickle.dumps(self.hash)
            if self._is_new:
                insert = self.table.insert()
                self.bind.execute(insert, namespace=self.namespace, data=data,
                                  accessed=datetime.now(),
                                  created=datetime.now())
                self._is_new = False
            else:
                update = self.table.update(self.table.c.namespace == self.namespace)
                self.bind.execute(update, data=data, accessed=datetime.now())
        self.flags = None

    def do_remove(self):
        delete = self.table.delete(self.table.c.namespace == self.namespace)
        self.bind.execute(delete)
        self.hash = {}
        self._is_new = True

    def __getitem__(self, key):
        return self.hash[key]

    def __contains__(self, key):
        return self.hash.has_key(key)

    def __setitem__(self, key, value):
        self.hash[key] = value

    def __delitem__(self, key):
        del self.hash[key]

    def keys(self):
        return self.hash.keys()