def test_does_create_connection_with_defaults(self, create_connection): create_cluster({ 'backend': DummyConnection, 'defaults': {'foo': 'baz'}, 'hosts': { 0: {'resp': 'bar'}, } }) create_connection.assert_called_once_with(DummyConnection, 0, {'resp': 'bar'}, {'foo': 'baz'})
def test_does_create_connection_with_defaults(self, create_connection): create_cluster({ 'backend': DummyConnection, 'defaults': { 'foo': 'baz' }, 'hosts': { 0: { 'resp': 'bar' }, } }) create_connection.assert_called_once_with(DummyConnection, 0, {'resp': 'bar'}, {'foo': 'baz'})
def get_redis(redis_namespace='default'): """Connects to a redis using nydus We simlulate a redis cluster by connecting to several redis servers in the background and using a consistent hashing ring to choose which server stores the data. Returns a redis object that can be used like a regular redis object see http://redis.io/ """ if not CFG_REDIS_HOSTS or not CFG_REDIS_HOSTS[redis_namespace]: return DummyRedisClient() redis = _REDIS_CONN.get(redis_namespace, None) if redis: return redis hosts_dict = {} for server_num, server_info in enumerate(CFG_REDIS_HOSTS[redis_namespace]): hosts_dict[server_num] = server_info redis = create_cluster({ 'backend': 'nydus.db.backends.redis.Redis', 'router': 'nydus.db.routers.keyvalue.ConsistentHashingRouter', 'hosts': hosts_dict }) _REDIS_CONN[redis_namespace] = redis return redis
def create_redis_conn(): options = { 'engine': 'nydus.db.backends.redis.Redis', } options.update(settings.SENTRY_REDIS_OPTIONS) return create_cluster(options)
def test_map_does_pipeline(self, RedisClient): redis = create_cluster({ 'backend': 'nydus.db.backends.redis.Redis', 'router': 'nydus.db.routers.keyvalue.PartitionRouter', 'hosts': { 0: { 'db': 0 }, 1: { 'db': 1 }, } }) with redis.map() as conn: conn.set('a', 0) conn.set('d', 1) # ensure this was actually called through the pipeline self.assertFalse(RedisClient().set.called) self.assertEquals(RedisClient().pipeline.call_count, 2) RedisClient().pipeline.assert_called_with() self.assertEquals(RedisClient().pipeline().set.call_count, 2) RedisClient().pipeline().set.assert_any_call('a', 0) RedisClient().pipeline().set.assert_any_call('d', 1) self.assertEquals(RedisClient().pipeline().execute.call_count, 2) RedisClient().pipeline().execute.assert_called_with()
def test_normal_exceptions_dont_break_the_cluster(self): redis = create_cluster({ 'engine': 'nydus.db.backends.redis.Redis', 'router': 'nydus.db.routers.keyvalue.ConsistentHashingRouter', 'hosts': { 0: { 'db': 0 }, 1: { 'db': 1 }, } }) # Create a normal key redis.set('a', 0) with self.assertRaises(redis_.ResponseError): # We are going to preform an operation on a key that is not a set # This call *should* raise the actual Redis exception, and # not continue on to think the host is down. redis.scard('a') # This shouldn't raise a HostListExhausted exception redis.get('a')
def __init__(self, settings, **kwargs): nydus_hosts = {} hosts = settings.get("hosts", []) if not hosts: raise Exception("No redis hosts specified") for i, host in enumerate(hosts): nydus_hosts[i] = host defaults = settings.get( "defaults", { 'host': 'localhost', 'port': 6379, }) self._backend = create_cluster({ 'engine': 'nydus.db.backends.redis.Redis', 'router': 'nydus.db.routers.keyvalue.ConsistentHashingRouter', 'hosts': nydus_hosts, 'defaults': defaults, }) self._prefix = kwargs.get('prefix', "ssnake:")
def test_map_only_runs_on_required_nodes(self, RedisClient): redis = create_cluster({ 'engine': 'nydus.db.backends.redis.Redis', 'router': 'nydus.db.routers.keyvalue.PartitionRouter', 'hosts': { 0: {'db': 0}, 1: {'db': 1}, } }) with redis.map() as conn: conn.set('a', 0) conn.set('b', 1) # ensure this was actually called through the pipeline self.assertFalse(RedisClient().set.called) self.assertEquals(RedisClient().pipeline.call_count, 1) RedisClient().pipeline.assert_called_with() self.assertEquals(RedisClient().pipeline().set.call_count, 2) RedisClient().pipeline().set.assert_any_call('a', 0) RedisClient().pipeline().set.assert_any_call('b', 1) self.assertEquals(RedisClient().pipeline().execute.call_count, 1) RedisClient().pipeline().execute.assert_called_with()
def test_map_only_runs_on_required_nodes(self, StrictRedisClient): redis = create_cluster({ 'engine': 'nydus.db.backends.redis.StrictRedis', 'router': 'nydus.db.routers.keyvalue.PartitionRouter', 'hosts': { 0: {'db': 0}, 1: {'db': 1}, } }) with redis.map() as conn: conn.set('a', 0) conn.set('b', 1) # ensure this was actually called through the pipeline self.assertFalse(StrictRedisClient().set.called) self.assertEquals(StrictRedisClient().pipeline.call_count, 1) StrictRedisClient().pipeline.assert_called_with() self.assertEquals(StrictRedisClient().pipeline().set.call_count, 2) StrictRedisClient().pipeline().set.assert_any_call('a', 0) StrictRedisClient().pipeline().set.assert_any_call('b', 1) self.assertEquals(StrictRedisClient().pipeline().execute.call_count, 1) StrictRedisClient().pipeline().execute.assert_called_with()
def test_map_does_pipeline(self, StrictRedisClient): redis = create_cluster({ 'backend': 'nydus.db.backends.redis.StrictRedis', 'router': 'nydus.db.routers.keyvalue.PartitionRouter', 'hosts': { 0: {'db': 0}, 1: {'db': 1}, } }) with redis.map() as conn: conn.set('a', 0) conn.set('d', 1) # ensure this was actually called through the pipeline self.assertFalse(StrictRedisClient().set.called) self.assertEquals(StrictRedisClient().pipeline.call_count, 2) StrictRedisClient().pipeline.assert_called_with() self.assertEquals(StrictRedisClient().pipeline().set.call_count, 2) StrictRedisClient().pipeline().set.assert_any_call('a', 0) StrictRedisClient().pipeline().set.assert_any_call('d', 1) self.assertEquals(StrictRedisClient().pipeline().execute.call_count, 2) StrictRedisClient().pipeline().execute.assert_called_with()
def get_redis(redis_namespace='default'): """Connects to a redis using nydus We simlulate a redis cluster by connecting to several redis servers in the background and using a consistent hashing ring to choose which server stores the data. Returns a redis object that can be used like a regular redis object see http://redis.io/ """ if not CFG_REDIS_HOSTS or not CFG_REDIS_HOSTS[redis_namespace]: return DummyRedisClient() redis = _REDIS_CONN.get(redis_namespace, None) if redis: return redis hosts_dict = {} for server_num, server_info in enumerate(CFG_REDIS_HOSTS[redis_namespace]): hosts_dict[server_num] = server_info redis = create_cluster({ 'backend': 'nydus.db.backends.redis.Redis', 'hosts': hosts_dict }) _REDIS_CONN[redis_namespace] = redis return redis
def __init__(self, model, name): self.redis = create_cluster({ 'engine': 'nydus.db.backends.redis.Redis', 'router': 'nydus.db.routers.redis.PartitionRouter', 'hosts': dict((n, {'db': n}) for n in xrange(9)), }) self.model = model self.ns = 'timeline:%s' % name
def build_cluster(self, connection=FlakeyConnection, router=RetryableRouter): return create_cluster({ 'backend': connection, 'router': router, 'hosts': { 0: {'resp': 'bar'}, } })
def __init__(self, servers, **options): self.backend = create_cluster({ 'engine': 'sentry.search.solr.client.Solr', 'router': 'nydus.db.routers.base.RoundRobinRouter', 'hosts': [{ 'url': u } for u in servers], })
def test_creates_cluster(self): c = create_cluster({ 'backend': DummyConnection, 'router': DummyRouter, 'hosts': { 0: {'resp': 'bar'}, } }) self.assertEqual(len(c), 1)
def __init__(self, keyspace, size=1000, hosts=None, router="nydus.db.routers.keyvalue.PartitionRouter", **options): if hosts is None: hosts = {0: {}} # localhost / default self.conn = create_cluster({"engine": "nydus.db.backends.redis.Redis", "router": router, "hosts": hosts}) # We could set this to the maximum value of random.random() (1.0) if we new this pool class # could stay instantiated. Unfortuantely we'll need an offset per project, which could grow # indefinitely and would require us to have an LRU. self.offset = None
def get_redis_connection(): from nydus.db import create_cluster config = settings.FEEDLY_NYDUS_CONFIG['CONNECTIONS']['redis'] key = unicode(config) cluster = connection_cache.get(key) if not cluster: cluster = create_cluster(config) connection_cache[key] = cluster return cluster
def setUp(self): from nydus.db import create_cluster engine = 'nydus.db.backends.redis.Redis' router = 'nydus.db.routers.redis.PrefixPartitionRouter' nydus_config = dict(engine=engine, router=router, hosts={ 'default': {'db': 0, 'host': 'localhost', 'port': 6379}, 'user:loves:': {'db': 1, 'host': 'localhost', 'port': 6379} }) redis = create_cluster(nydus_config) self.redis = redis
def get_cluster(hosts=None, router='nydus.db.routers.keyvalue.PartitionRouter'): if hosts is None: hosts = { 0: {} # localhost / default } return create_cluster({ 'engine': 'nydus.db.backends.redis.Redis', 'router': router, 'hosts': hosts, })
def test_creates_cluster(self): c = create_cluster({ 'backend': DummyConnection, 'router': DummyRouter, 'hosts': { 0: { 'resp': 'bar' }, } }) self.assertEquals(len(c), 1)
def get_redis_connection(): from nydus.db import create_cluster from django.conf import settings config = settings.NYDUS_CONFIG["CONNECTIONS"]["redis"] key = unicode(config) cluster = connection_cache.get(key) if not cluster: cluster = create_cluster(config) connection_cache[key] = cluster return cluster
def __init__(self, hosts=None, router='nydus.db.routers.keyvalue.PartitionRouter', **options): super(RedisBuffer, self).__init__(**options) if hosts is None: hosts = { 0: {} # localhost / default } self.conn = create_cluster({ 'engine': 'nydus.db.backends.redis.Redis', 'router': router, 'hosts': hosts, })
def __init__(self, **options): if not options: # inherit default options from REDIS_OPTIONS options = settings.SENTRY_REDIS_OPTIONS options.setdefault('hosts', {0: {}}) options.setdefault('router', 'nydus.db.routers.keyvalue.PartitionRouter') self.conn = create_cluster({ 'engine': 'nydus.db.backends.redis.Redis', 'router': options['router'], 'hosts': options['hosts'], })
def cluster(self): return create_cluster({ 'backend': 'nydus.db.backends.redis.StrictRedis', 'router': 'nydus.db.routers.keyvalue.PartitionRouter', 'hosts': { 0: {'db': 5}, 1: {'db': 6}, 2: {'db': 7}, 3: {'db': 8}, 4: {'db': 9}, } })
def build_cluster(self, connection=FlakeyConnection, router=RetryableRouter): return create_cluster({ 'backend': connection, 'router': router, 'hosts': { 0: { 'resp': 'bar' }, } })
def cluster(self): return create_cluster({ 'backend': 'nydus.db.backends.redis.Redis', 'router': 'nydus.db.routers.keyvalue.PartitionRouter', 'hosts': { 0: {'db': 5}, 1: {'db': 6}, 2: {'db': 7}, 3: {'db': 8}, 4: {'db': 9}, } })
def __init__(self, **options): if not options: # inherit default options from REDIS_OPTIONS options = settings.SENTRY_REDIS_OPTIONS super(RedisQuota, self).__init__(**options) options.setdefault('hosts', {0: {}}) options.setdefault('router', 'nydus.db.routers.keyvalue.PartitionRouter') self.conn = create_cluster({ 'engine': 'nydus.db.backends.redis.Redis', 'router': options['router'], 'hosts': options['hosts'], })
def test_custom_identifier_specified(self): cluster_config = { 'backend': 'nydus.db.backends.redis.Redis', 'hosts': { 0: {'db': 0, 'identifier': 'redis://127.0.0.1:6379/0'}, 1: {'db': 1, 'identifier': 'redis://127.0.0.1:6380/1'}, }, } redis = create_cluster(cluster_config) for idx in cluster_config['hosts'].keys(): self.assertEquals(redis.hosts[idx].identifier, cluster_config['hosts'][idx]['identifier'])
def test_missing_default(self): from nydus.db import create_cluster from functools import partial engine = 'nydus.db.backends.redis.Redis' router = 'nydus.db.routers.redis.PrefixPartitionRouter' nydus_config = dict(engine=engine, router=router, hosts={ 'base': {'db': 0, 'host': 'localhost', 'port': 6379}, 'user:loves:': {'db': 1, 'host': 'localhost', 'port': 6379} }) redis = create_cluster(nydus_config) redis_call = partial(redis.get, 'thiswillbreak') self.assertRaises(ValueError, redis_call)
def get_cluster(self, router): cluster = create_cluster({ 'backend': 'nydus.db.backends.thoonk.Thoonk', 'router': router, 'hosts': { 0: {'db': 5}, 1: {'db': 6}, 2: {'db': 7}, 3: {'db': 8}, 4: {'db': 9}, } }) self.flush_custer(cluster) return cluster
def __init__(self, keyspace, size=1000, hosts=None, router='nydus.db.routers.keyvalue.PartitionRouter', **options): if hosts is None: hosts = { 0: {} # localhost / default } self.conn = create_cluster({ 'engine': 'nydus.db.backends.redis.Redis', 'router': router, 'hosts': hosts, }) # We could set this to the maximum value of random.random() (1.0) if we new this pool class # could stay instantiated. Unfortunately we'll need an offset per project, which could grow # indefinitely and would require us to have an LRU. self.offset = None
def __init__(self, settings_dict, alias=DEFAULT_DB_ALIAS): self.settings_dict = settings_dict self.alias = alias options = settings_dict['OPTIONS'] options.setdefault('backend', 'djnydus.db.DjangoDatabase') options.setdefault('router', 'djnydus.db.router.OrmRouter') try: self.cluster = create_cluster(options) except KeyError: exc_info = sys.exc_info() raise ImproperlyConfigured('%s: %s' % (exc_info[0].__name__, exc_info[1])), None, exc_info[2] self.creation = DatabaseCreation(self, self.cluster) self.introspection = DatabaseIntrospection(self, self.cluster)
def test_pipelined_map(self): redis = create_cluster({ 'backend': 'nydus.db.backends.redis.Redis', 'router': 'nydus.db.routers.keyvalue.PartitionRouter', 'hosts': { 0: {'db': 5}, 1: {'db': 6}, 2: {'db': 7}, 3: {'db': 8}, 4: {'db': 9}, } }) chars = ('a', 'b', 'c', 'd', 'e', 'f') with redis.map() as conn: [conn.set(c, i) for i, c in enumerate(chars)] res = [conn.get(c) for c in chars] self.assertEqual(range(len(chars)), [int(r._wrapped) for r in res])
def test_pipeline_behavior(self, Client): cluster = create_cluster({ 'engine': 'nydus.db.backends.memcache.Memcache', 'hosts': { 0: {'binary': True}, } }) with cluster.map() as conn: conn.set('a', 1) conn.set('b', 2) conn.set('c', 3) conn.get('a') conn.get('b') conn.get('c') Client.return_value.set_multi.assert_any_call({'a': 1, 'b': 2, 'c': 3}) Client.return_value.get_multi.assert_any_call(['a', 'b', 'c'])
def __init__(self, hosts=None, router=None, prefix='ts:', vnodes=64, **kwargs): # inherit default options from REDIS_OPTIONS defaults = settings.SENTRY_REDIS_OPTIONS if hosts is None: hosts = defaults.get('hosts', {0: {}}) if router is None: router = defaults.get('router', 'nydus.db.routers.keyvalue.PartitionRouter') self.conn = create_cluster({ 'engine': 'nydus.db.backends.redis.Redis', 'router': router, 'hosts': hosts, }) self.prefix = prefix self.vnodes = vnodes super(RedisTSDB, self).__init__(**kwargs)
def test_custom_identifier_specified(self): cluster_config = { 'backend': 'nydus.db.backends.redis.Redis', 'hosts': { 0: { 'db': 0, 'identifier': 'redis://127.0.0.1:6379/0' }, 1: { 'db': 1, 'identifier': 'redis://127.0.0.1:6380/1' }, }, } redis = create_cluster(cluster_config) for idx in cluster_config['hosts'].keys(): self.assertEqual(redis.hosts[idx].identifier, cluster_config['hosts'][idx]['identifier'])
def test_pipeline_integration(self): cluster = create_cluster({ 'engine': 'nydus.db.backends.memcache.Memcache', 'hosts': { 0: {'binary': True}, } }) with cluster.map() as conn: conn.set('a', 1) conn.set('b', 2) conn.set('c', 3) conn.get('a') conn.get('b') conn.get('c') results = conn.get_results() self.assertEquals(len(results), 6, results) self.assertEquals(results[0:3], [None, None, None]) self.assertEquals(results[3:6], [1, 2, 3])
def test_pipeline_integration(self): cluster = create_cluster({ 'engine': 'nydus.db.backends.memcache.Memcache', 'hosts': { 0: {'binary': True}, } }) with cluster.map() as conn: conn.set('a', 1) conn.set('b', 2) conn.set('c', 3) conn.get('a') conn.get('b') conn.get('c') results = conn.get_results() self.assertEqual(len(results), 6, results) self.assertEqual(results[0:3], [None, None, None]) self.assertEqual(results[3:6], [1, 2, 3])
def __init__(self, settings, **kwargs): nydus_hosts = {} hosts = settings.get("hosts", []) if not hosts: raise Exception("No redis hosts specified") for i, host in enumerate(hosts): nydus_hosts[i] = host defaults = settings.get("defaults", {"host": "localhost", "port": 6379}) self._analytics_backend = create_cluster( { "engine": "nydus.db.backends.redis.Redis", "router": "nydus.db.routers.keyvalue.ConsistentHashingRouter", "hosts": nydus_hosts, "defaults": defaults, } )
def test_pipeline_get_multi(self, Client): cluster = create_cluster({ 'engine': 'nydus.db.backends.memcache.Memcache', 'router': 'nydus.db.routers.RoundRobinRouter', 'hosts': { 0: {'binary': True}, 1: {'binary': True}, } }) keys = ['a', 'b', 'c', 'd', 'e', 'f'] with cluster.map() as conn: for key in keys: conn.get(key) self.assertEqual(len(conn.get_results()), len(keys)) print conn.get_results() self.assertEqual(Client().get.call_count, 0) # Note: This is two because it should execute the command once for each # of the two servers. self.assertEqual(Client().get_multi.call_count, 2)
def test_normal_exceptions_dont_break_the_cluster(self): redis = create_cluster({ 'engine': 'nydus.db.backends.redis.Redis', 'router': 'nydus.db.routers.keyvalue.ConsistentHashingRouter', 'hosts': { 0: {'db': 0}, 1: {'db': 1}, } }) # Create a normal key redis.set('a', 0) with self.assertRaises(redis_.ResponseError): # We are going to preform an operation on a key that is not a set # This call *should* raise the actual Redis exception, and # not continue on to think the host is down. redis.scard('a') # This shouldn't raise a HostListExhausted exception redis.get('a')
def _init(self, server, params): super(CacheClass, self).__init__(params) self._server = server self._params = params unix_socket_path = None if ':' in self.server: host, port = self.server.rsplit(':', 1) try: port = int(port) except (ValueError, TypeError): raise ImproperlyConfigured("port value must be an integer") else: host, port = None, None unix_socket_path = self.server kwargs = { 'db': self.db, 'password': self.password, 'host': host, 'port': port, 'unix_socket_path': unix_socket_path, } if not NYDUS_CACHE_BACKEND: connection_pool = pool.get_connection_pool( parser_class=self.parser_class, **kwargs ) self._client = redis.Redis( connection_pool=connection_pool, **kwargs ) else: self._client = create_cluster({ 'backend': 'nydus.db.backends.redis.Redis', 'router': 'nydus.db.routers.keyvalue.PartitionRouter', 'hosts': { 0: kwargs, }, })
def __init__(self, settings, **kwargs): nydus_hosts = {} hosts = settings.get("hosts", []) if not hosts: raise Exception("No redis hosts specified") for i, host in enumerate(hosts): nydus_hosts[i] = host defaults = settings.get("defaults", { 'host': 'localhost', 'port': 6379, }) self._analytics_backend = create_cluster({ 'engine': 'nydus.db.backends.redis.Redis', 'router': 'nydus.db.routers.keyvalue.ConsistentHashingRouter', 'hosts': nydus_hosts, 'defaults': defaults, }) super(Redis, self).__init__(settings, **kwargs)
def test_map_does_pipeline(self, Client): cluster = create_cluster({ 'engine': 'nydus.db.backends.memcache.Memcache', 'router': 'nydus.db.routers.RoundRobinRouter', 'hosts': { 0: {'binary': True}, 1: {'binary': True}, 2: {'binary': True}, 3: {'binary': True}, } }) with cluster.map() as conn: conn.set('a', 1) conn.set('b', 2) conn.set('c', 3) conn.set('d', 4) conn.set('e', 5) conn.set('f', 6) conn.set('g', 7) self.assertEqual(Client().set.call_count, 7) self.assertEqual(Client.call_count, 5) self.assertEqual(len(conn.get_results()), 7)
def get_cluster(self, router): cluster = create_cluster({ 'backend': 'nydus.db.backends.thoonk.Thoonk', 'router': router, 'hosts': { 0: { 'db': 5 }, 1: { 'db': 6 }, 2: { 'db': 7 }, 3: { 'db': 8 }, 4: { 'db': 9 }, } }) self.flush_custer(cluster) return cluster
from django.core.exceptions import ImproperlyConfigured from sequere.utils import get_client from sequere.backends.redis.managers import Manager from . import settings from .wrappers import RedisWrapper, NydusWrapper nydus_connection = settings.TIMELINE_NYDUS_CONNECTION if nydus_connection: try: from nydus.db import create_cluster except ImportError: raise ImproperlyConfigured( "The nydus backend requires nydus to be installed.") else: client = NydusWrapper(create_cluster(nydus_connection)) else: client = RedisWrapper(get_client(settings.TIMELINE_CONNECTION, connection_class=settings.TIMELINE_CONNECTION_CLASS)) storage = Manager(client, prefix=settings.TIMELINE_PREFIX)
def init_database(app): from nydus.db import create_cluster app.config.setdefault('DB', {}) return create_cluster(app.config['DB'])
def reload(self): from nydus.db import create_cluster for conn_alias, conn_settings in iteritems(self.conf_callback()): self[conn_alias] = create_cluster(conn_settings) self._is_ready = True
from nydus.db import create_cluster import time partition_cluster = create_cluster({ 'engine': 'nydus.db.backends.redis.Redis', 'router': 'nydus.db.routers.keyvalue.PartitionRouter', 'hosts': { 0: { 'db': 0 }, 1: { 'db': 1 }, 2: { 'db': 2 }, 3: { 'db': 3 }, }, }) ketama_cluster = create_cluster({ 'engine': 'nydus.db.backends.redis.Redis', 'router': 'nydus.db.routers.keyvalue.ConsistentHashingRouter', 'hosts': { 0: { 'db': 0 }, 1: { 'db': 1
def cluster(self): return create_cluster({ 'backend': 'nydus.db.backends.pycassa.Pycassa', 'hosts': ['localhost'], 'keyspace': 'test', })