def __init__(self, namespace, bind, table, data_dir=None, lock_dir=None, **kwargs): """Create a namespace manager for use with a database table via SQLAlchemy. ``bind`` SQLAlchemy ``Engine`` or ``Connection`` object ``table`` SQLAlchemy ``Table`` object in which to store namespace data. This should usually be something created by ``make_cache_table``. """ NamespaceManager.__init__(self, namespace, **kwargs) if lock_dir is not None: self.lock_dir = lock_dir elif data_dir is None: raise MissingCacheParameter('data_dir or lock_dir is required') else: self.lock_dir = data_dir + '/container_db_lock' verify_directory(self.lock_dir) self.bind = self.__class__.binds.get(str(bind.url), lambda: bind) self.table = self.__class__.tables.get('%s:%s' % (bind.url, table.name), lambda: table) self.hash = {} self._is_new = False self.loaded = False
def __init__(self, namespace, url, memcache_module='auto', data_dir=None, lock_dir=None, **kw): NamespaceManager.__init__(self, namespace) _memcache_module = _client_libs[memcache_module] if not url: raise MissingCacheParameter("url is required") self.lock_dir = None if lock_dir: self.lock_dir = lock_dir elif data_dir: self.lock_dir = data_dir + "/container_mcd_lock" if self.lock_dir: verify_directory(self.lock_dir) # Check for pylibmc namespace manager, in which case client will be # instantiated by subclass __init__, to handle behavior passing to the # pylibmc client if not _is_configured_for_pylibmc(memcache_module, _memcache_module): self.mc = MemcachedNamespaceManager.clients.get( (memcache_module, url), _memcache_module.Client, url.split(';'))
def __init__(self, namespace, url=None, data_dir=None, lock_dir=None, **params): NamespaceManager.__init__(self, namespace) if not url: raise MissingCacheParameter("url is required") if lock_dir: self.lock_dir = lock_dir elif data_dir: self.lock_dir = data_dir + "/container_tcd_lock" if hasattr(self, 'lock_dir'): verify_directory(self.lock_dir) conn_params = {} parts = url.split('?', 1) url = parts[0] if len(parts) > 1: conn_params = dict(p.split('=', 1) for p in parts[1].split('&')) netloc = urlparse(url).netloc parts = netloc.split(':') port = parts.pop() if port: port = int(port) host = ':'.join(parts) self.open_connection(host, port, **conn_params)
def __init__(self, namespace, url, memcache_module='auto', data_dir=None, lock_dir=None, **kw): NamespaceManager.__init__(self, namespace) _memcache_module = _client_libs[memcache_module] if not url: raise MissingCacheParameter("url is required") if lock_dir: self.lock_dir = lock_dir elif data_dir: self.lock_dir = data_dir + "/container_mcd_lock" if self.lock_dir: verify_directory(self.lock_dir) # Check for pylibmc namespace manager, in which case client will be # instantiated by subclass __init__, to handle behavior passing to the # pylibmc client if not _is_configured_for_pylibmc(memcache_module, _memcache_module): self.mc = MemcachedNamespaceManager.clients.get( (memcache_module, url), _memcache_module.Client, url.split(';'))
def __init__(self, namespace, url=None, data_dir=None, lock_dir=None, **params): NamespaceManager.__init__(self, namespace) if not url: raise MissingCacheParameter("url is required") if lock_dir: self.lock_dir = lock_dir elif data_dir: self.lock_dir = data_dir + "/container_tcd_lock" else: self.lock_dir = None if self.lock_dir: verify_directory(self.lock_dir) self.url = url # parse the url properly using urlparse url = urlparse.urlparse(url) conn_params = {} parts = url.path.split('?', 1) path = parts[0] if len(parts) > 1: conn_params = dict(p.split('=', 1) for p in parts[1].split('&')) self.host = url.hostname self.port = url.port self.username = url.username self.password = url.password self.scheme = url.scheme self.open_connection(self.host, self.port, **conn_params)
def __init__(self, namespace, url=None, data_dir=None, lock_dir=None, **params): NamespaceManager.__init__(self, namespace) if not url: raise MissingCacheParameter("url is required") if lock_dir: self.lock_dir = lock_dir elif data_dir: self.lock_dir = data_dir + "/container_tcd_lock" else: self.lock_dir = None if self.lock_dir: verify_directory(self.lock_dir) conn_params = {} parts = url.split('?', 1) url = parts[0] if len(parts) > 1: conn_params = dict(p.split('=', 1) for p in parts[1].split('&')) host, port = url.split(':', 1) self.open_connection(host, int(port), **conn_params)
def __init__(self, namespace, url=None, data_dir=None, lock_dir=None, expire=None, **params): NamespaceManager.__init__(self, namespace) if not url: raise MissingCacheParameter("url is required") if lock_dir: self.lock_dir = lock_dir elif data_dir: self.lock_dir = data_dir + "/container_tcd_lock" if hasattr(self, 'lock_dir'): verify_directory(self.lock_dir) # Specify the serializer to use (pickle or json?) self.serializer = params.pop('serializer', 'pickle') self._expiretime = int(expire) if expire else None conn_params = {} parts = url.split('?', 1) url = parts[0] if len(parts) > 1: conn_params = dict(p.split('=', 1) for p in parts[1].split('&')) host, port = url.split(':', 1) self.open_connection(host, int(port), **conn_params)
def __init__(self, namespace, read_servers=None, write_servers=None, database=None, table=None, index=None, data_dir=None, skip_pickle=False, **kw): self.database = database self.table = table self.index = index NamespaceManager.__init__(self, namespace) data_key = "hs:%s" % (database) def _initiate_connections(read_servers, write_servers): read_servers = parse_servers_for_hs(read_servers) write_servers = parse_servers_for_hs(write_servers) hs = Manager(read_servers, write_servers) return hs self.hs = HandlerSocketMySQLNamespaceManager.clients.get( data_key, _initiate_connections, read_servers, write_servers)
def __init__(self, namespace, url=None, data_dir=None, lock_dir=None, **params): NamespaceManager.__init__(self, namespace) if lock_dir: self.lock_dir = lock_dir elif data_dir: self.lock_dir = data_dir + "/container_mongodb_gridfs_lock" if self.lock_dir: verify_directory(self.lock_dir) if not url: raise MissingCacheParameter("MongoDB url is required") for k, v in parse_uri(url).iteritems(): setattr(self, "url_%s" % k, v) if not self.url_database or not self.url_nodelist: raise MissingCacheParameter("Invalid MongoDB url.") data_key = "mongodb_gridfs:%s:%s" % (self.url_database, self.url_collection) self.gridfs = MongoDBGridFSNamespaceManager.clients.get( data_key, self._create_mongo_connection)
def __init__(self, namespace, url, memcache_module='auto', data_dir=None, lock_dir=None, **kw): NamespaceManager.__init__(self, namespace) if not url: raise MissingCacheParameter("url is required") if lock_dir: self.lock_dir = lock_dir elif data_dir: self.lock_dir = data_dir + "/container_mcd_lock" if self.lock_dir: verify_directory(self.lock_dir) if bmemcached is None: raise ImportError('`bmemcached` is required.') auth_args = [] username = kw.get('username', None) password = kw.get('password', None) if username: auth_args.append(username) if password: auth_args.append(password) logging.warn('WFT') logging.warn((bmemcached, auth_args)) self.mc = MemcachedNamespaceManager.clients.get((memcache_module, url), bmemcached.Client, url.split(';'), *auth_args)
def __init__(self, namespace, url=None, data_dir=None, lock_dir=None, expiretime=None, **params): NamespaceManager.__init__(self, namespace) if not url: raise MissingCacheParameter("url is required") if lock_dir: self.lock_dir = lock_dir elif data_dir: self.lock_dir = data_dir + "/container_tcd_lock" if hasattr(self, 'lock_dir'): verify_directory(self.lock_dir) # Specify the serializer to use (pickle or json?) self.serializer = params.pop('serializer', 'pickle') self._expiretime = int(expiretime) if expiretime else None conn_params = {} parts = url.split('?', 1) url = parts[0] if len(parts) > 1: conn_params = dict(p.split('=', 1) for p in parts[1].split('&')) host, port = url.split(':', 1) self.open_connection(host, int(port), **conn_params)
def __init__(self, namespace, bind, table, data_dir=None, lock_dir=None, **kwargs): """Create a namespace manager for use with a database table via SQLAlchemy. ``bind`` SQLAlchemy ``Engine`` or ``Connection`` object ``table`` SQLAlchemy ``Table`` object in which to store namespace data. This should usually be something created by ``make_cache_table``. """ NamespaceManager.__init__(self, namespace) if lock_dir is not None: self.lock_dir = lock_dir elif data_dir is None: raise MissingCacheParameter('data_dir or lock_dir is required') else: self.lock_dir = data_dir + '/container_db_lock' verify_directory(self.lock_dir) self.bind = self.__class__.binds.get(str(bind.url), lambda: bind) self.table = self.__class__.tables.get('%s:%s' % (bind.url, table.name), lambda: table) self.hash = {} self._is_new = False self.loaded = False
def __init__(self, namespace, url=None, data_dir=None, lock_dir=None, skip_pickle=False, sparse_collection=False, **params): NamespaceManager.__init__(self, namespace) if not url: raise MissingCacheParameter("MongoDB url is required") if skip_pickle: log.info("Disabling pickling for namespace: %s" % self.namespace) self._pickle = False if sparse_collection: log.info(("Separating data to one row per key (sparse collection)" " for ns %s .") % self.namespace) self._sparse = True # Temporarily uses a local copy of the functions until pymongo upgrades to new parser code url_components = parse_uri(url) host_list = url_components['nodelist'] username = url_components['username'] password = url_components['password'] collection = url_components['collection'] database = url_components['database'] options = url_components['options'] if database and host_list: data_key = "mongodb:%s" % (database) else: raise MissingCacheParameter("Invalid Cache URL. Cannot parse.") # Key will be db + collection if lock_dir: self.lock_dir = lock_dir elif data_dir: self.lock_dir = data_dir + "/container_mongodb_lock" if hasattr(self, 'lock_dir'): verify_directory(self.lock_dir) def _create_mongo_conn(): host_uri = 'mongodb://' for x in host_list: host_uri += '%s:%s' % x log.info("Host URI: %s" % host_uri) conn = Connection(url, slave_okay=options.get('slaveok', False)) db = conn[database] if username: log.info("Attempting to authenticate %s/%s " % (username, password)) if not db.authenticate(username, password): raise InvalidCacheBackendError('Cannot authenticate to ' ' MongoDB.') return db[collection] self.mongo = MongoDBNamespaceManager.clients.get( data_key, _create_mongo_conn)
def __init__(self, namespace, url=None, data_dir=None, lock_dir=None, skip_pickle=False, sparse_collection=False, use_file_lock=False, **params): NamespaceManager.__init__(self, namespace) if not url: raise MissingCacheParameter("MongoDB url is required") if skip_pickle: log.info("Disabling pickling for namespace: %s" % self.namespace) self._pickle = False if sparse_collection: log.info("Separating data to one row per key (sparse collection) for ns %s ." % self.namespace) self._sparse = True if use_file_lock: log.info("Enabling file_locks for namespace: %s" % self.namespace) self._use_file_lock = True # Temporarily uses a local copy of the functions until pymongo upgrades to new parser code (host_list, database, username, password, collection, options) = _parse_uri(url) if database and host_list: data_key = "mongodb:%s" % (database) else: raise MissingCacheParameter("Invalid Cache URL. Cannot parse.") # Key will be db + collection if lock_dir: self.lock_dir = lock_dir elif data_dir: self.lock_dir = data_dir + "/container_mongodb_lock" elif self._use_file_lock: raise ImproperlyConfigured("Neither data_dir nor lock_dir are specified, while use_file_lock is set to True") if self._use_file_lock and self.lock_dir: verify_directory(self.lock_dir) def _create_mongo_conn(): host_uri = 'mongodb://' for x in host_list: host_uri += '%s:%s' % x log.info("Host URI: %s" % host_uri) conn = Connection(host_uri, slave_okay=options.get('slaveok', False)) db = conn[database] if username: log.info("Attempting to authenticate %s/%s " % (username, password)) if not db.authenticate(username, password): raise InvalidCacheBackendError('Cannot authenticate to ' ' MongoDB.') return db[collection] self.mongo = MongoDBNamespaceManager.clients.get(data_key, _create_mongo_conn)
def __init__(self, namespace, url, sa_opts=None, optimistic=False, table_name='beaker_cache', data_dir=None, lock_dir=None, **params): """Creates a database namespace manager ``url`` A SQLAlchemy database URL ``sa_opts`` A dictionary of SQLAlchemy keyword options to initialize the engine with. ``optimistic`` Use optimistic session locking, note that this will result in an additional select when updating a cache value to compare version numbers. ``table_name`` The table name to use in the database for the cache. """ NamespaceManager.__init__(self, namespace, **params) if sa_opts is None: sa_opts = {} if lock_dir is not None: self.lock_dir = lock_dir elif data_dir is None: raise MissingCacheParameter("data_dir or lock_dir is required") else: self.lock_dir = data_dir + "/container_db_lock" verify_directory(self.lock_dir) # Check to see if the table's been created before table_key = url + str(sa_opts) + table_name def make_cache(): # Check to see if we have a connection pool open already meta_key = url + str(sa_opts) def make_meta(): if url.startswith('mysql') and not sa_opts: sa_opts['poolclass'] = pool.QueuePool engine = sa.create_engine(url, **sa_opts) meta = sa.BoundMetaData(engine) return meta meta = DatabaseNamespaceManager.metadatas.get(meta_key, make_meta) # Create the table object and cache it now cache = sa.Table(table_name, meta, sa.Column('id', sa.Integer, primary_key=True), sa.Column('namespace', sa.String(255), nullable=False), sa.Column('key', sa.String(255), nullable=False), sa.Column('value', sa.BLOB(), nullable=False), sa.UniqueConstraint('namespace', 'key') ) cache.create(checkfirst=True) return cache self.cache = DatabaseNamespaceManager.tables.get(table_key, make_cache)
def __init__(self, namespace, url=None, data_dir=None, lock_dir=None, skip_pickle=False, sparse_collection=False, **params): NamespaceManager.__init__(self, namespace) if not url: raise MissingCacheParameter("MongoDB url is required") if skip_pickle: log.info("Disabling pickling for namespace: %s" % self.namespace) _pickle = False if sparse_collection: log.info("Separating data to one row per key (sparse collection) for ns %s ." % self.namespace) self._sparse = True conn_params = parse_mongo_url(url) if conn_params['database'] and conn_params['host'] and \ conn_params['collection']: data_key = "mongodb:%s#%s" % (conn_params['database'], conn_params['collection']) else: raise MissingCacheParameter("Invalid Cache URL. Cannot parse" " host, database and/or " " collection name.") conn_params['slave_okay'] = params.get('slave_okay') == 'True' # Key will be db + collection if lock_dir: self.lock_dir = lock_dir elif data_dir: self.lock_dir = data_dir + "/container_mongodb_lock" if self.lock_dir: verify_directory(self.lock_dir) def _create_mongo_conn(): conn = pymongo.connection.Connection(conn_params['host'],\ conn_params['port'], slave_okay=conn_params['slave_okay']) db = conn[conn_params['database']] if conn_params['username'] and conn_params['password']: log.info("Attempting to authenticate %s/%s " % (conn_params['username'], conn_params['password'])) if not db.authenticate(conn_params['username'], conn_params['password']): raise InvalidCacheBackendError('Cannot authenticate to ' ' MongoDB.') return db[conn_params['collection']] self.mongo = MongoDBNamespaceManager.clients.get(data_key, _create_mongo_conn)
def __init__(self, namespace, url, data_dir=None, lock_dir=None, **params): NamespaceManager.__init__(self, namespace, **params) if lock_dir is not None: self.lock_dir = lock_dir elif data_dir is None: raise MissingCacheParameter("data_dir or lock_dir is required") else: self.lock_dir = data_dir + "/container_mcd_lock" verify_directory(self.lock_dir) self.mc = MemcachedNamespaceManager.clients.get(url, lambda: memcache.Client(url.split(';'), debug=0))
def __init__(self, namespace, url=None, data_dir=None, skip_pickle=False, sparse_collection=False, **params): NamespaceManager.__init__(self, namespace) if not url: raise MissingCacheParameter("MongoDB url is required") if skip_pickle: log.info("Disabling pickling for namespace: %s" % self.namespace) self._pickle = False if sparse_collection: log.info( "Separating data to one row per key (sparse collection) for ns %s ." % self.namespace) self._sparse = True # Temporarily uses a local copy of the functions until pymongo upgrades to new parser code (host_list, database, username, password, collection, options) = _parse_uri(url) if database and host_list: data_key = "mongodb:%s" % (database) else: raise MissingCacheParameter("Invalid Cache URL. Cannot parse.") def _create_mongo_conn(): host_uri = 'mongodb://' for x in host_list: host_uri += '%s:%s' % x log.info("Host URI: %s" % host_uri) conn = Connection(host_uri, slave_okay=options.get('slaveok', False)) db = conn[database] if username: log.info("Attempting to authenticate %s/%s " % (username, password)) if not db.authenticate(username, password): raise InvalidCacheBackendError('Cannot authenticate to ' ' MongoDB.') return db[collection] self.mongo = MongoDBNamespaceManager.clients.get( data_key, _create_mongo_conn)
def __init__(self, namespace, url=None, data_dir=None, lock_dir=None, **params): NamespaceManager.__init__(self, namespace) if not url: raise MissingCacheParameter("url is required") if lock_dir: self.lock_dir = lock_dir elif data_dir: self.lock_dir = data_dir + "/container_mcd_lock" if self.lock_dir: verify_directory(self.lock_dir) self.mc = MemcachedNamespaceManager.clients.get(url, memcache.Client, url.split(';'))
def __init__(self, namespace, url, data_dir=None, lock_dir=None, **params): NamespaceManager.__init__(self, namespace, **params) if lock_dir is not None: self.lock_dir = lock_dir elif data_dir is None: raise MissingCacheParameter("data_dir or lock_dir is required") else: self.lock_dir = data_dir + "/container_mcd_lock" verify_directory(self.lock_dir) self.mc = MemcachedNamespaceManager.clients.get( url, lambda: memcache.Client(url.split(';'), debug=0))
def __init__(self, namespace, url=None, data_dir=None, lock_dir=None, **params): NamespaceManager.__init__(self, namespace) if not url: raise MissingCacheParameter("url is required") if lock_dir: self.lock_dir = lock_dir elif data_dir: self.lock_dir = data_dir + "/container_tcd_lock" if self.lock_dir: verify_directory(self.lock_dir) host, port = url.split(':') self.open_connection(host, int(port))
def __init__(self, namespace, url, memcache_module="auto", data_dir=None, lock_dir=None, **kw): NamespaceManager.__init__(self, namespace) _memcache_module = _client_libs[memcache_module] if not url: raise MissingCacheParameter("url is required") if lock_dir: self.lock_dir = lock_dir elif data_dir: self.lock_dir = data_dir + "/container_mcd_lock" if self.lock_dir: verify_directory(self.lock_dir) self.mc = MemcachedNamespaceManager.clients.get((memcache_module, url), _memcache_module.Client, url.split(";"))
def __init__(self, namespace, url, memcache_module='auto', data_dir=None, lock_dir=None, **kw): NamespaceManager.__init__(self, namespace) if not url: raise MissingCacheParameter("url is required") if lock_dir: self.lock_dir = lock_dir elif data_dir: self.lock_dir = data_dir + "/container_mcd_lock" if self.lock_dir: verify_directory(self.lock_dir) if ultramemcache is None: raise ImportError('`python-ultramemcached` is required.') _memcache_module = ultramemcache self.mc = MemcachedNamespaceManager.clients.get((memcache_module, url), _memcache_module.Client, url.split(';'))
def __init__(self, namespace, url=None, data_dir=None, lock_dir=None, **params): NamespaceManager.__init__(self, namespace) ## if not url: ## raise MissingCacheParameter("url is required") ## if lock_dir: ## self.lock_dir = lock_dir ## elif data_dir: ## self.lock_dir = data_dir + "/container_mcd_lock" ## if self.lock_dir: ## verify_directory(self.lock_dir) ## ## self.mc = MemcachedNamespaceManager.clients.get(url, ## memcache.Client, url.split(';'), debug=0) self.mc = memcache.Client()
def __init__(self, namespace, url, data_dir=None, lock_dir=None, **kwargs): """ Initialize Redis connection. """ NamespaceManager.__init__(self, namespace) if not url: raise MissingCacheParameter('URL setting for Redis is required.') if lock_dir: self.lock_dir = lock_dir elif data_dir: self.lock_dir = os.path.join(data_dir, 'container_redis_lock') if self.lock_dir: verify_directory(self.lock_dir) self.redis = StrictRedis.from_url(url)
def __init__(self, namespace, url=None, sa_opts=None, optimistic=False, table_name='beaker_cache', data_dir=None, lock_dir=None, **params): print namespace NamespaceManager.__init__(self, namespace) if sa_opts is None: sa_opts = params if lock_dir: self.lock_dir = lock_dir elif data_dir: self.lock_dir = data_dir + "/container_db_lock" if self.lock_dir: verify_directory(self.lock_dir) # Check to see if the table's been created before url = url or sa_opts['sa.url'] table_key = url + table_name def make_table(): # Check to see if we have a connection pool open already meta_key = url + table_name def make_meta(): # SQLAlchemy pops the url, this ensures it sticks around # later sa_opts['sa.url'] = url engine = sa.engine_from_config(sa_opts, 'sa.') meta = sa.MetaData() meta.bind = engine return meta meta = DBTableNamespaceManager.metadatas.get(meta_key, make_meta) # Create the table object and cache it now table = sa.Table(table_name, meta, sa.Column('key', types.String(100), primary_key=True), sa.Column('data', types.String(255), nullable=False), extend_existing=True ) table.create(checkfirst=True) return table self.hash = {} self._is_new = False self.loaded = False self.table = DBTableNamespaceManager.tables.get(table_key, make_table)
def __init__(self, namespace, read_servers=None, write_servers=None, database=None, table=None, index=None, data_dir=None, skip_pickle=False, **kw): self.database = database self.table = table self.index = index NamespaceManager.__init__(self, namespace) data_key = "hs:%s" % (database) def _initiate_connections(read_servers, write_servers): read_servers = parse_servers_for_hs(read_servers) write_servers = parse_servers_for_hs(write_servers) hs = Manager(read_servers, write_servers) return hs self.hs = HandlerSocketMySQLNamespaceManager.clients.get(data_key, _initiate_connections, read_servers, write_servers)
def __init__(self, namespace, url=None, data_dir=None, lock_dir=None, **params): NamespaceManager.__init__(self, namespace) if not url: raise MissingCacheParameter("url is required") if lock_dir: self.lock_dir = lock_dir elif data_dir: self.lock_dir = data_dir + "/container_mcd_lock" if self.lock_dir: verify_directory(self.lock_dir) self.mc = MemcachedNamespaceManager.clients.get( url, memcache.Client, url.split(';'))
def __init__(self, namespace, table_name='beaker_cache', **params): """Creates a datastore namespace manager""" NamespaceManager.__init__(self, namespace, **params) def make_cache(): table_dict = dict(created=db.DateTimeProperty(), accessed=db.DateTimeProperty(), data=db.TextProperty()) table = type(table_name, (db.Model,), table_dict) return table self.table_name = table_name self.cache = GoogleNamespaceManager.tables.setdefault(table_name, make_cache()) self.hash = {} self._is_new = False self.loaded = False self.log_debug = logging.DEBUG >= log.getEffectiveLevel() # Google wants namespaces to start with letters, change the namespace # to start with a letter self.namespace = 'p%s' % self.namespace
def __init__(self, namespace, uri=None, data_dir=None, lock_dir=None, **params): NamespaceManager.__init__(self, namespace) if not uri: raise MissingCacheParameter("URI is required") self.db_connection_params = pymongo.uri_parser.parse_uri(uri) if not self.db_connection_params["collection"]: raise MissingCacheParameter("invalid URI: missing collection") elif not self.db_connection_params["database"]: raise MissingCacheParameter("invalid URI: missing database") if lock_dir: self.lock_dir = lock_dir elif data_dir: self.lock_dir = data_dir + "/container_mongodb_lock" if hasattr(self, "lock_dir"): verify_directory(self.lock_dir) self.open_connection(uri)
def __init__(self, namespace, url=None, data_dir=None, skip_pickle=False, sparse_collection=False, **params): NamespaceManager.__init__(self, namespace) if not url: raise MissingCacheParameter("MongoDB url is required") if skip_pickle: log.info("Disabling pickling for namespace: %s" % self.namespace) self._pickle = False if sparse_collection: log.info("Separating data to one row per key (sparse collection) for ns %s ." % self.namespace) self._sparse = True # Temporarily uses a local copy of the functions until pymongo upgrades to new parser code (host_list, database, username, password, collection, options) = _parse_uri(url) if database and host_list: data_key = "mongodb:%s" % (database) else: raise MissingCacheParameter("Invalid Cache URL. Cannot parse.") def _create_mongo_conn(): host_uri = 'mongodb://' for x in host_list: host_uri += '%s:%s' % x log.info("Host URI: %s" % host_uri) conn = Connection(host_uri, slave_okay=options.get('slaveok', False)) db = conn[database] if username: log.info("Attempting to authenticate %s/%s " % (username, password)) if not db.authenticate(username, password): raise InvalidCacheBackendError('Cannot authenticate to ' ' MongoDB.') return db[collection] self.mongo = MongoDBNamespaceManager.clients.get(data_key, _create_mongo_conn)
def __init__(self, namespace, url, memcache_module='auto', data_dir=None, lock_dir=None, **kw): NamespaceManager.__init__(self, namespace) if not url: raise MissingCacheParameter("url is required") if lock_dir: self.lock_dir = lock_dir elif data_dir: self.lock_dir = data_dir + "/container_mcd_lock" if self.lock_dir: verify_directory(self.lock_dir) if ultramemcache is None: raise ImportError('`python-ultramemcached` is required.') _memcache_module = ultramemcache self.mc = MemcachedNamespaceManager.clients.get( (memcache_module, url), _memcache_module.Client, url.split(';'))
def __init__(self, namespace, url=None, data_dir=None, lock_dir=None, **params): NamespaceManager.__init__(self, namespace) if lock_dir: self.lock_dir = lock_dir elif data_dir: self.lock_dir = data_dir + "/container_mongodb_gridfs_lock" if self.lock_dir: verify_directory(self.lock_dir) if not url: raise MissingCacheParameter("MongoDB url is required") for k, v in parse_uri(url).iteritems(): setattr(self, "url_%s"%k, v) if not self.url_database or not self.url_nodelist: raise MissingCacheParameter("Invalid MongoDB url.") data_key = "mongodb_gridfs:%s:%s" % (self.url_database, self.url_collection) self.gridfs = MongoDBGridFSNamespaceManager.clients.get( data_key, self._create_mongo_connection)
def __init__(self, namespace, url=None, data_dir=None, lock_dir=None, skip_pickle=False, sparse_collection=False, **params): NamespaceManager.__init__(self, namespace) if not url: raise MissingCacheParameter("MongoDB url is required") if skip_pickle: log.info("Disabling pickling for namespace: %s" % self.namespace) self._pickle = False if sparse_collection: log.info("Separating data to one row per key (sparse collection) for ns %s ." % self.namespace) self._sparse = True # Temporarily uses a local copy of the functions until pymongo upgrades to new parser code (host_list, database, username, password, collection, options) = _parse_uri(url) if database and host_list: data_key = "mongodb:%s" % (database) else: raise MissingCacheParameter("Invalid Cache URL. Cannot parse.") # Key will be db + collection if lock_dir: self.lock_dir = lock_dir elif data_dir: self.lock_dir = data_dir + "/container_mongodb_lock" if self.lock_dir: verify_directory(self.lock_dir) def _create_mongo_conn(): store = MongoStore.get_default() return MongoCollection(store.get_collection(collection)) self.mongo = MongoDBNamespaceManager.clients.get(data_key, _create_mongo_conn)
def __init__(self, namespace, url, memcache_module='auto', data_dir=None, lock_dir=None, **kw): NamespaceManager.__init__(self, namespace) _memcache_module = _client_libs[memcache_module] if not url: raise MissingCacheParameter("url is required") if lock_dir: self.lock_dir = lock_dir elif data_dir: self.lock_dir = data_dir + "/container_mcd_lock" if self.lock_dir: verify_directory(self.lock_dir) self.mc = MemcachedNamespaceManager.clients.get( (memcache_module, url), _memcache_module.Client, url.split(';'))
def __init__(self, namespace, url, sa_opts=None, optimistic=False, table_name='beaker_cache', data_dir=None, lock_dir=None, **params): """Creates a database namespace manager ``url`` A SQLAlchemy database URL ``sa_opts`` A dictionary of SQLAlchemy keyword options to initialize the engine with. ``optimistic`` Use optimistic session locking, note that this will result in an additional select when updating a cache value to compare version numbers. ``table_name`` The table name to use in the database for the cache. """ NamespaceManager.__init__(self, namespace, **params) if sa_opts is None: sa_opts = {} if lock_dir is not None: self.lock_dir = lock_dir elif data_dir is None: raise MissingCacheParameter("data_dir or lock_dir is required") else: self.lock_dir = data_dir + "/container_db_lock" verify_directory(self.lock_dir) # Check to see if the table's been created before table_key = url + str(sa_opts) + table_name def make_cache(): # Check to see if we have a connection pool open already meta_key = url + str(sa_opts) def make_meta(): if url.startswith('mysql') and not sa_opts: sa_opts['poolclass'] = pool.QueuePool engine = sa.create_engine(url, **sa_opts) meta = sa.BoundMetaData(engine) return meta meta = DatabaseNamespaceManager.metadatas.get(meta_key, make_meta) # Create the table object and cache it now cache = sa.Table( table_name, meta, sa.Column('id', sa.Integer, primary_key=True), sa.Column('namespace', sa.String(255), nullable=False), sa.Column('key', sa.String(255), nullable=False), sa.Column('value', sa.BLOB(), nullable=False), sa.UniqueConstraint('namespace', 'key')) cache.create(checkfirst=True) return cache self.cache = DatabaseNamespaceManager.tables.get(table_key, make_cache)
def __init__(self, namespace, url=None, sa_opts=None, optimistic=False, table_name='beaker_cache', data_dir=None, lock_dir=None, **params): """Creates a database namespace manager ``url`` SQLAlchemy compliant db url ``sa_opts`` A dictionary of SQLAlchemy keyword options to initialize the engine with. ``optimistic`` Use optimistic session locking, note that this will result in an additional select when updating a cache value to compare version numbers. ``table_name`` The table name to use in the database for the cache. """ NamespaceManager.__init__(self, namespace, **params) if sa_opts is None: sa_opts = params if lock_dir is not None: self.lock_dir = lock_dir elif data_dir is None: raise MissingCacheParameter("data_dir or lock_dir is required") else: self.lock_dir = data_dir + "/container_db_lock" verify_directory(self.lock_dir) # Check to see if the table's been created before url = url or sa_opts['sa.url'] table_key = url + table_name def make_cache(): # Check to see if we have a connection pool open already meta_key = url + table_name def make_meta(): if sa_version == '0.3': if url.startswith('mysql') and not sa_opts: sa_opts['poolclass'] = pool.QueuePool engine = sa.create_engine(url, **sa_opts) meta = sa.BoundMetaData(engine) else: # SQLAlchemy pops the url, this ensures it sticks around # later sa_opts['sa.url'] = url engine = sa.engine_from_config(sa_opts, 'sa.') meta = sa.MetaData() meta.bind = engine return meta meta = DatabaseNamespaceManager.metadatas.get(meta_key, make_meta) # Create the table object and cache it now cache = sa.Table(table_name, meta, sa.Column('id', types.Integer, primary_key=True), sa.Column('namespace', types.String(255), nullable=False), sa.Column('accessed', types.DateTime, nullable=False), sa.Column('created', types.DateTime, nullable=False), sa.Column('data', types.BLOB(), nullable=False), sa.UniqueConstraint('namespace') ) cache.create(checkfirst=True) return cache self.hash = {} self._is_new = False self.loaded = False self.cache = DatabaseNamespaceManager.tables.get(table_key, make_cache)
def __init__(self, namespace, host='localhost', port=6379, db=0, password=None, **params): NamespaceManager.__init__(self, namespace) self.redis_options = dict(host=host, port=int(port), db=int(db), password=password) self._redis = threading.local()
def __init__(self, namespace, url=None, data_dir=None, lock_dir=None, skip_pickle=False, sparse_collection=False, use_file_lock=False, **params): NamespaceManager.__init__(self, namespace) if not url: raise MissingCacheParameter("MongoDB url is required") if skip_pickle: log.info("Disabling pickling for namespace: %s" % self.namespace) self._pickle = False if sparse_collection: log.info( "Separating data to one row per key (sparse collection) for ns %s ." % self.namespace) self._sparse = True if use_file_lock: log.info("Enabling file_locks for namespace: %s" % self.namespace) self._use_file_lock = True # Temporarily uses a local copy of the functions until pymongo upgrades to new parser code (host_list, database, username, password, collection, options) = _parse_uri(url) if database and host_list: data_key = "mongodb:%s" % (database) else: raise MissingCacheParameter("Invalid Cache URL. Cannot parse.") # Key will be db + collection if lock_dir: self.lock_dir = lock_dir elif data_dir: self.lock_dir = data_dir + "/container_mongodb_lock" elif self._use_file_lock: raise ImproperlyConfigured( "Neither data_dir nor lock_dir are specified, while use_file_lock is set to True" ) if self._use_file_lock and self.lock_dir: verify_directory(self.lock_dir) def _create_mongo_conn(): host_uri = 'mongodb://' for x in host_list: host_uri += '%s:%s' % x log.info("Host URI: %s" % host_uri) conn = Connection(host_uri, slave_okay=options.get('slaveok', False)) db = conn[database] if username: log.info("Attempting to authenticate %s/%s " % (username, password)) if not db.authenticate(username, password): raise InvalidCacheBackendError('Cannot authenticate to ' ' MongoDB.') return db[collection] self.mongo = MongoDBNamespaceManager.clients.get( data_key, _create_mongo_conn)
def __init__(self, namespace, url=None, sa_opts=None, optimistic=False, table_name='beaker_cache', data_dir=None, lock_dir=None, **params): """Creates a database namespace manager ``url`` SQLAlchemy compliant db url ``sa_opts`` A dictionary of SQLAlchemy keyword options to initialize the engine with. ``optimistic`` Use optimistic session locking, note that this will result in an additional select when updating a cache value to compare version numbers. ``table_name`` The table name to use in the database for the cache. """ NamespaceManager.__init__(self, namespace) if sa_opts is None: sa_opts = params if lock_dir is not None: self.lock_dir = lock_dir elif data_dir is None: raise MissingCacheParameter("data_dir or lock_dir is required") else: self.lock_dir = data_dir + "/container_db_lock" verify_directory(self.lock_dir) # Check to see if the table's been created before url = url or sa_opts['sa.url'] table_key = url + table_name def make_cache(): # Check to see if we have a connection pool open already meta_key = url + table_name def make_meta(): if sa_version == '0.3': if url.startswith('mysql') and not sa_opts: sa_opts['poolclass'] = pool.QueuePool engine = sa.create_engine(url, **sa_opts) meta = sa.BoundMetaData(engine) else: # SQLAlchemy pops the url, this ensures it sticks around # later sa_opts['sa.url'] = url engine = sa.engine_from_config(sa_opts, 'sa.') meta = sa.MetaData() meta.bind = engine return meta meta = DatabaseNamespaceManager.metadatas.get(meta_key, make_meta) # Create the table object and cache it now cache = sa.Table(table_name, meta, sa.Column('id', types.Integer, primary_key=True), sa.Column('namespace', types.String(255), nullable=False), sa.Column('accessed', types.DateTime, nullable=False), sa.Column('created', types.DateTime, nullable=False), sa.Column('data', types.BLOB(), nullable=False), sa.UniqueConstraint('namespace') ) cache.create(checkfirst=True) return cache self.hash = {} self._is_new = False self.loaded = False self.cache = DatabaseNamespaceManager.tables.get(table_key, make_cache)
def __init__(self, namespace, **kw): NamespaceManager.__init__(self, namespace) self.mc = googlememcache