def __init__(self, namespace, url=None, data_dir=None, lock_dir=None, **params): NamespaceManager.__init__(self, namespace) if lock_dir: self.lock_dir = lock_dir elif data_dir: self.lock_dir = data_dir + "/container_mongodb_gridfs_lock" if self.lock_dir: verify_directory(self.lock_dir) if not url: raise MissingCacheParameter("MongoDB url is required") for k, v in parse_uri(url).iteritems(): setattr(self, "url_%s" % k, v) if not self.url_database or not self.url_nodelist: raise MissingCacheParameter("Invalid MongoDB url.") data_key = "mongodb_gridfs:%s:%s" % (self.url_database, self.url_collection) self.gridfs = MongoDBGridFSNamespaceManager.clients.get( data_key, self._create_mongo_connection)
def __init__(self, namespace, dbmmodule=None, data_dir=None, dbm_dir=None, lock_dir=None, digest_filenames=True, **kwargs): self.digest_filenames = digest_filenames if not dbm_dir and not data_dir: raise MissingCacheParameter("data_dir or dbm_dir is required") elif dbm_dir: self.dbm_dir = dbm_dir else: self.dbm_dir = data_dir + "/container_dbm" util.verify_directory(self.dbm_dir) if not lock_dir and not data_dir: raise MissingCacheParameter("data_dir or lock_dir is required") elif lock_dir: self.lock_dir = lock_dir else: self.lock_dir = data_dir + "/container_dbm_lock" util.verify_directory(self.lock_dir) self.dbmmodule = dbmmodule or anydbm self.dbm = None OpenResourceNamespaceManager.__init__(self, namespace) self.file = util.encoded_path(root= self.dbm_dir, identifiers=[self.namespace], extension='.dbm', digest_filenames=self.digest_filenames) debug("data file %s", self.file) self._checkfile()
def __init__(self, namespace, data_dir=None, file_dir=None, lock_dir=None, digest_filenames=True, **kwargs): self.digest_filenames = digest_filenames if not file_dir and not data_dir: raise MissingCacheParameter("data_dir or file_dir is required") elif file_dir: self.file_dir = file_dir else: self.file_dir = data_dir + "/container_file" util.verify_directory(self.file_dir) if not lock_dir and not data_dir: raise MissingCacheParameter("data_dir or lock_dir is required") elif lock_dir: self.lock_dir = lock_dir else: self.lock_dir = data_dir + "/container_file_lock" util.verify_directory(self.lock_dir) OpenResourceNamespaceManager.__init__(self, namespace) self.file = util.encoded_path(root=self.file_dir, identifiers=[self.namespace], extension='.cache', digest_filenames=self.digest_filenames) self.hash = {} debug("data file %s", self.file)
def __init__(self, namespace, url=None, data_dir=None, skip_pickle=False, sparse_collection=False, **params): NamespaceManager.__init__(self, namespace) if not url: raise MissingCacheParameter("MongoDB url is required") if skip_pickle: log.info("Disabling pickling for namespace: %s" % self.namespace) self._pickle = False if sparse_collection: log.info( "Separating data to one row per key (sparse collection) for ns %s ." % self.namespace) self._sparse = True # Temporarily uses a local copy of the functions until pymongo upgrades to new parser code (host_list, database, username, password, collection, options) = _parse_uri(url) if database and host_list: data_key = "mongodb:%s" % (database) else: raise MissingCacheParameter("Invalid Cache URL. Cannot parse.") def _create_mongo_conn(): host_uri = 'mongodb://' for x in host_list: host_uri += '%s:%s' % x log.info("Host URI: %s" % host_uri) conn = Connection(host_uri, slave_okay=options.get('slaveok', False)) db = conn[database] if username: log.info("Attempting to authenticate %s/%s " % (username, password)) if not db.authenticate(username, password): raise InvalidCacheBackendError('Cannot authenticate to ' ' MongoDB.') return db[collection] self.mongo = MongoDBNamespaceManager.clients.get( data_key, _create_mongo_conn)
def __init__(self, namespace, url=None, data_dir=None, lock_dir=None, **params): NamespaceManager.__init__(self, namespace) if not url: raise MissingCacheParameter("url is required") if lock_dir: self.lock_dir = lock_dir elif data_dir: self.lock_dir = data_dir + "/container_tcd_lock" else: self.lock_dir = None if self.lock_dir: verify_directory(self.lock_dir) conn_params = {} parts = url.split('?', 1) url = parts[0] if len(parts) > 1: conn_params = dict(p.split('=', 1) for p in parts[1].split('&')) host, port = url.split(':', 1) self.open_connection(host, int(port), **conn_params)
def __init__(self, namespace, url, memcache_module='auto', data_dir=None, lock_dir=None, **kw): NamespaceManager.__init__(self, namespace) _memcache_module = _client_libs[memcache_module] if not url: raise MissingCacheParameter("url is required") self.lock_dir = None if lock_dir: self.lock_dir = lock_dir elif data_dir: self.lock_dir = data_dir + "/container_mcd_lock" if self.lock_dir: verify_directory(self.lock_dir) # Check for pylibmc namespace manager, in which case client will be # instantiated by subclass __init__, to handle behavior passing to the # pylibmc client if not _is_configured_for_pylibmc(memcache_module, _memcache_module): self.mc = MemcachedNamespaceManager.clients.get( (memcache_module, url), _memcache_module.Client, url.split(';'))
def open_connection(self, session, cluster, cluster_getter, **params): if cluster: self.cluster = cluster self.cluster_getter = cluster_getter self.session = self.cluster.connect() elif cluster_getter: self.cluster = cluster_getter() self.cluster_getter = cluster_getter self.session = self.cluster.connect() elif session: self.cluster = cluster self.cluster_getter = cluster_getter self.session = session else: raise MissingCacheParameter( "session, cluster, or cluster_getter is required") stmt = self._qry_create_keyspace.format(self.keyspace_name(), self.replication_policy()) self.session.execute(stmt, timeout=60) self.session.set_keyspace(self.keyspace_name()) self.session.execute(self._qry_create_cf) self._prepared_insert = self.session.prepare(self._qry_insert) self._prepared_insert.consistency_level = ConsistencyLevel.ANY self._prepared_select = self.session.prepare(self._qry_select) self._prepared_check_key = self.session.prepare(self._qry_check_key) self._prepared_remove_key = self.session.prepare(self._qry_remove_key) self._prepared_remove_key.consistency_level = ConsistencyLevel.ANY self._prepared_keys = self.session.prepare(self._qry_keys)
def __init__(self, namespace, url=None, keyspace=None, column_family=None, **params): if not keyspace: raise MissingCacheParameter("keyspace is required") if re.search(r"\W", keyspace): raise ValueError("keyspace can only have alphanumeric chars and underscore") self.__keyspace_cql_safe = keyspace table = column_family or "beaker" if re.search(r"[^0-9a-zA-Z_]", table): raise ValueError("table can only have alphanumeric chars and underscore") self.__table_cql_safe = table expire = params.get("expire", None) self._expiretime = int(expire) if expire else None self._tries = int(params.pop("tries", 1)) cluster = self.__connect_to_cluster(url, params) self.__session = cluster.connect(self.__keyspace_cql_safe) consistency_level_param = params.get("consistency_level") if isinstance(consistency_level_param, six.string_types): consistency_level = getattr(cassandra.ConsistencyLevel, consistency_level_param.upper(), None) if consistency_level: self.__session.default_consistency_level = consistency_level self.__ensure_table() self.__prepare_statements() # This 10s default matches the driver's default. self.__session.default_timeout = int(params.get("query_timeout", 10))
def __init__(self, namespace, bind, table, data_dir=None, lock_dir=None, **kwargs): """Create a namespace manager for use with a database table via SQLAlchemy. ``bind`` SQLAlchemy ``Engine`` or ``Connection`` object ``table`` SQLAlchemy ``Table`` object in which to store namespace data. This should usually be something created by ``make_cache_table``. """ NamespaceManager.__init__(self, namespace, **kwargs) if lock_dir is not None: self.lock_dir = lock_dir elif data_dir is None: raise MissingCacheParameter('data_dir or lock_dir is required') else: self.lock_dir = data_dir + '/container_db_lock' verify_directory(self.lock_dir) self.bind = self.__class__.binds.get(str(bind.url), lambda: bind) self.table = self.__class__.tables.get('%s:%s' % (bind.url, table.name), lambda: table) self.hash = {} self._is_new = False self.loaded = False
def __init__(self, namespace, url=None, data_dir=None, lock_dir=None, expiretime=None, **params): NamespaceManager.__init__(self, namespace) if not url: raise MissingCacheParameter("url is required") if lock_dir: self.lock_dir = lock_dir elif data_dir: self.lock_dir = data_dir + "/container_tcd_lock" if hasattr(self, 'lock_dir'): verify_directory(self.lock_dir) # Specify the serializer to use (pickle or json?) self.serializer = params.pop('serializer', 'pickle') self._expiretime = int(expiretime) if expiretime else None conn_params = {} parts = url.split('?', 1) url = parts[0] if len(parts) > 1: conn_params = dict(p.split('=', 1) for p in parts[1].split('&')) host, port = url.split(':', 1) self.open_connection(host, int(port), **conn_params)
def __init__(self, namespace, url=None, data_dir=None, lock_dir=None, **params): NamespaceManager.__init__(self, namespace) if not url: raise MissingCacheParameter("url is required") if lock_dir: self.lock_dir = lock_dir elif data_dir: self.lock_dir = data_dir + "/container_tcd_lock" if hasattr(self, 'lock_dir'): verify_directory(self.lock_dir) conn_params = {} parts = url.split('?', 1) url = parts[0] if len(parts) > 1: conn_params = dict(p.split('=', 1) for p in parts[1].split('&')) netloc = urlparse(url).netloc parts = netloc.split(':') port = parts.pop() if port: port = int(port) host = ':'.join(parts) self.open_connection(host, port, **conn_params)
def __init__(self, namespace, dbmmodule=None, data_dir=None, dbm_dir=None, lock_dir=None, digest_filenames=True, **kwargs): NamespaceManager.__init__(self, namespace, **kwargs) if dbm_dir is not None: self.dbm_dir = dbm_dir elif data_dir is None: raise MissingCacheParameter("data_dir or dbm_dir is required") else: self.dbm_dir = data_dir + "/container_dbm" if lock_dir is not None: self.lock_dir = lock_dir elif data_dir is None: raise MissingCacheParameter("data_dir or lock_dir is required") else: self.lock_dir = data_dir + "/container_dbm_lock" if dbmmodule is None: import anydbm self.dbmmodule = anydbm else: self.dbmmodule = dbmmodule util.verify_directory(self.dbm_dir) util.verify_directory(self.lock_dir) self.dbm = None self.lock = Synchronizer(identifier=self.namespace, use_files=True, lock_dir=self.lock_dir, digest_filenames=digest_filenames) self.file = util.encoded_path(root=self.dbm_dir, identifiers=[self.namespace], digest=digest_filenames, extension='.dbm') self.debug("data file %s" % self.file) self._checkfile()
def __init__(self, namespace, url=None, data_dir=None, lock_dir=None, skip_pickle=False, sparse_collection=False, **params): NamespaceManager.__init__(self, namespace) if not url: raise MissingCacheParameter("MongoDB url is required") if skip_pickle: log.info("Disabling pickling for namespace: %s" % self.namespace) self._pickle = False if sparse_collection: log.info("Separating data to one row per key (sparse collection) for ns %s ." % self.namespace) self._sparse = True # Temporarily uses a local copy of the functions until pymongo upgrades to new parser code (host_list, database, username, password, collection, options) = _parse_uri(url) if database and host_list: data_key = "mongodb:%s" % (database) else: raise MissingCacheParameter("Invalid Cache URL. Cannot parse.") # Key will be db + collection if lock_dir: self.lock_dir = lock_dir elif data_dir: self.lock_dir = data_dir + "/container_mongodb_lock" if self.lock_dir: verify_directory(self.lock_dir) def _create_mongo_conn(): store = MongoStore.get_default() return MongoCollection(store.get_collection(collection)) self.mongo = MongoDBNamespaceManager.clients.get(data_key, _create_mongo_conn)
def __init__(self, namespace, url, data_dir=None, lock_dir=None, **params): NamespaceManager.__init__(self, namespace, **params) if lock_dir is not None: self.lock_dir = lock_dir elif data_dir is None: raise MissingCacheParameter("data_dir or lock_dir is required") else: self.lock_dir = data_dir + "/container_mcd_lock" verify_directory(self.lock_dir) self.mc = MemcachedNamespaceManager.clients.get( url, lambda: memcache.Client(url.split(';'), debug=0))
def __init__(self, namespace, data_dir=None, file_dir=None, lock_dir=None, digest_filenames=True, **kwargs): NamespaceManager.__init__(self, namespace, **kwargs) if file_dir is not None: self.file_dir = file_dir elif data_dir is None: raise MissingCacheParameter("data_dir or file_dir is required") else: self.file_dir = data_dir + "/container_file" if lock_dir is not None: self.lock_dir = lock_dir elif data_dir is None: raise MissingCacheParameter("data_dir or lock_dir is required") else: self.lock_dir = data_dir + "/container_file_lock" util.verify_directory(self.file_dir) util.verify_directory(self.lock_dir) self.lock = Synchronizer(identifier=self.namespace, use_files=True, lock_dir=self.lock_dir, digest_filenames=digest_filenames) self.file = util.encoded_path(root=self.file_dir, identifiers=[self.namespace], digest=digest_filenames, extension='.cache') self.hash = {} self.debug("data file %s" % self.file)
def __init__(self, namespace, url=None, data_dir=None, lock_dir=None, keyspace=None, column_family=None, **params): if not keyspace: raise MissingCacheParameter("keyspace is required") self.keyspace = keyspace self.column_family = column_family or 'beaker' NoSqlManager.__init__(self, namespace, url=url, data_dir=data_dir, lock_dir=lock_dir, **params)
def __init__(self, namespace, url=None, data_dir=None, lock_dir=None, keyspace_prefix=None, cluster=None, session=None, cluster_getter=None, **params): super(CassandraManager, self).__init__(namespace) if not keyspace_prefix: raise MissingCacheParameter("keyspace_prefix is required") self.keyspace_prefix = keyspace_prefix self.session = None self.cluster = None self.cluster_getter = None self.open_connection(session, cluster, cluster_getter, **params)
def __init__(self, namespace, url=None, data_dir=None, lock_dir=None, **params): NamespaceManager.__init__(self, namespace) if not url: raise MissingCacheParameter("url is required") if lock_dir: self.lock_dir = lock_dir elif data_dir: self.lock_dir = data_dir + "/container_mcd_lock" if self.lock_dir: verify_directory(self.lock_dir) self.mc = MemcachedNamespaceManager.clients.get( url, memcache.Client, url.split(';'))
def __init__(self, namespace, url, memcache_module='auto', data_dir=None, lock_dir=None, **kw): NamespaceManager.__init__(self, namespace) if not url: raise MissingCacheParameter("url is required") if lock_dir: self.lock_dir = lock_dir elif data_dir: self.lock_dir = data_dir + "/container_mcd_lock" if self.lock_dir: verify_directory(self.lock_dir) if ultramemcache is None: raise ImportError('`python-ultramemcached` is required.') _memcache_module = ultramemcache self.mc = MemcachedNamespaceManager.clients.get( (memcache_module, url), _memcache_module.Client, url.split(';'))
def __init__(self, namespace, table_name, region=None, hash_key='id', **params): OpenResourceNamespaceManager.__init__(self, namespace) if table_name is None: raise MissingCacheParameter('DynamoDB table name required.') options = verify_rules( dict([(k, v) for k, v in params.iteritems() if k in self._supported_options]), self._rules) self._hash_key = hash_key self._table = ddb.table.Table(table_name, connection=ddb.connect_to_region( region, **options)) self._flags = None self._item = None
def __init__(self, namespace, url, memcache_module='auto', data_dir=None, lock_dir=None, **kw): NamespaceManager.__init__(self, namespace) _memcache_module = _client_libs[memcache_module] if not url: raise MissingCacheParameter("url is required") if lock_dir: self.lock_dir = lock_dir elif data_dir: self.lock_dir = data_dir + "/container_mcd_lock" if self.lock_dir: verify_directory(self.lock_dir) self.mc = MemcachedNamespaceManager.clients.get( (memcache_module, url), _memcache_module.Client, url.split(';'))
def __init__(self, namespace, url, sa_opts=None, optimistic=False, table_name='beaker_cache', data_dir=None, lock_dir=None, **params): """Creates a database namespace manager ``url`` A SQLAlchemy database URL ``sa_opts`` A dictionary of SQLAlchemy keyword options to initialize the engine with. ``optimistic`` Use optimistic session locking, note that this will result in an additional select when updating a cache value to compare version numbers. ``table_name`` The table name to use in the database for the cache. """ NamespaceManager.__init__(self, namespace, **params) if sa_opts is None: sa_opts = {} if lock_dir is not None: self.lock_dir = lock_dir elif data_dir is None: raise MissingCacheParameter("data_dir or lock_dir is required") else: self.lock_dir = data_dir + "/container_db_lock" verify_directory(self.lock_dir) # Check to see if the table's been created before table_key = url + str(sa_opts) + table_name def make_cache(): # Check to see if we have a connection pool open already meta_key = url + str(sa_opts) def make_meta(): if url.startswith('mysql') and not sa_opts: sa_opts['poolclass'] = pool.QueuePool engine = sa.create_engine(url, **sa_opts) meta = sa.BoundMetaData(engine) return meta meta = DatabaseNamespaceManager.metadatas.get(meta_key, make_meta) # Create the table object and cache it now cache = sa.Table( table_name, meta, sa.Column('id', sa.Integer, primary_key=True), sa.Column('namespace', sa.String(255), nullable=False), sa.Column('key', sa.String(255), nullable=False), sa.Column('value', sa.BLOB(), nullable=False), sa.UniqueConstraint('namespace', 'key')) cache.create(checkfirst=True) return cache self.cache = DatabaseNamespaceManager.tables.get(table_key, make_cache)
def __init__(self, namespace, url=None, sa_opts=None, optimistic=False, table_name='beaker_cache', data_dir=None, lock_dir=None, **params): """Creates a database namespace manager ``url`` SQLAlchemy compliant db url ``sa_opts`` A dictionary of SQLAlchemy keyword options to initialize the engine with. ``optimistic`` Use optimistic session locking, note that this will result in an additional select when updating a cache value to compare version numbers. ``table_name`` The table name to use in the database for the cache. """ NamespaceManager.__init__(self, namespace, **params) if sa_opts is None: sa_opts = params if lock_dir is not None: self.lock_dir = lock_dir elif data_dir is None: raise MissingCacheParameter("data_dir or lock_dir is required") else: self.lock_dir = data_dir + "/container_db_lock" verify_directory(self.lock_dir) # Check to see if the table's been created before url = url or sa_opts['sa.url'] table_key = url + table_name def make_cache(): # Check to see if we have a connection pool open already meta_key = url + table_name def make_meta(): if sa_version == '0.3': if url.startswith('mysql') and not sa_opts: sa_opts['poolclass'] = pool.QueuePool engine = sa.create_engine(url, **sa_opts) meta = sa.BoundMetaData(engine) else: # SQLAlchemy pops the url, this ensures it sticks around # later sa_opts['sa.url'] = url engine = sa.engine_from_config(sa_opts, 'sa.') meta = sa.MetaData() meta.bind = engine return meta meta = DatabaseNamespaceManager.metadatas.get(meta_key, make_meta) # Create the table object and cache it now cache = sa.Table(table_name, meta, sa.Column('id', types.Integer, primary_key=True), sa.Column('namespace', types.String(255), nullable=False), sa.Column('accessed', types.DateTime, nullable=False), sa.Column('created', types.DateTime, nullable=False), sa.Column('data', types.BLOB(), nullable=False), sa.UniqueConstraint('namespace') ) cache.create(checkfirst=True) return cache self.hash = {} self._is_new = False self.loaded = False self.cache = DatabaseNamespaceManager.tables.get(table_key, make_cache)
def __init__(self, namespace, url=None, data_dir=None, lock_dir=None, skip_pickle=False, sparse_collection=False, use_file_lock=False, **params): NamespaceManager.__init__(self, namespace) if not url: raise MissingCacheParameter("MongoDB url is required") if skip_pickle: log.info("Disabling pickling for namespace: %s" % self.namespace) self._pickle = False if sparse_collection: log.info( "Separating data to one row per key (sparse collection) for ns %s ." % self.namespace) self._sparse = True if use_file_lock: log.info("Enabling file_locks for namespace: %s" % self.namespace) self._use_file_lock = True # Temporarily uses a local copy of the functions until pymongo upgrades to new parser code (host_list, database, username, password, collection, options) = _parse_uri(url) if database and host_list: data_key = "mongodb:%s" % (database) else: raise MissingCacheParameter("Invalid Cache URL. Cannot parse.") # Key will be db + collection if lock_dir: self.lock_dir = lock_dir elif data_dir: self.lock_dir = data_dir + "/container_mongodb_lock" elif self._use_file_lock: raise ImproperlyConfigured( "Neither data_dir nor lock_dir are specified, while use_file_lock is set to True" ) if self._use_file_lock and self.lock_dir: verify_directory(self.lock_dir) def _create_mongo_conn(): host_uri = 'mongodb://' for x in host_list: host_uri += '%s:%s' % x log.info("Host URI: %s" % host_uri) conn = Connection(host_uri, slave_okay=options.get('slaveok', False)) db = conn[database] if username: log.info("Attempting to authenticate %s/%s " % (username, password)) if not db.authenticate(username, password): raise InvalidCacheBackendError('Cannot authenticate to ' ' MongoDB.') return db[collection] self.mongo = MongoDBNamespaceManager.clients.get( data_key, _create_mongo_conn)