def __init__(self, servers): VERSION = tuple(map(int, redis.__version__.split('.'))) self.nodes = [] self.connections = {} if VERSION < (2, 4, 0): self.pool = redis.ConnectionPool() else: self.pool = None if isinstance(servers, list): for server in servers: conn = redis.Redis( host=server['host'], port=server[ 'port'], db=server['db'], connection_pool=self.pool, password=server.get('password'), socket_timeout=server.get('socket_timeout')) name = server['name'] if name in self.connections: raise ValueError("server's name config must be unique") self.connections[name] = conn self.nodes.append(name) elif isinstance(servers, dict): for server_name, server in servers.items(): conn = redis.Redis( host=server['host'], port=server[ 'port'], db=server['db'], connection_pool=self.pool, password=server.get('password'), socket_timeout=server.get('socket_timeout')) name = server_name if name in self.connections: raise ValueError("server's name config must be unique") self.connections[name] = conn self.nodes.append(name) else: raise ValueError("server's config must be list or dict") self.ring = HashRing(self.nodes)
def test_server_ring(): memcache_servers = [ '192.168.0.246:11212', '192.168.0.247:11212', '192.168.0.249:11212' ] ring = HashRing(memcache_servers) actual = ring.get_node('my_key') expected = '192.168.0.247:11212' assert expected == actual
def test_server_ring(): memcache_servers = ['192.168.0.246:11212', '192.168.0.247:11212', '192.168.0.249:11212'] ring = HashRing(memcache_servers) actual = ring.get_node('my_key') expected = '192.168.0.247:11212' assert expected == actual
def configure(self, options, config): self.node_count = options.distributed_nodes self.node_id = options.distributed_node_number self.hash_by_class = options.distributed_hash_by_class if not self._options_are_valid(): self.enabled = False return if options.distributed_disabled: self.enabled = False return if self.node_count > 1: # If the user gives us a non-1 count of distributed nodes, then # let's distribute their tests self.enabled = True self.hash_ring = HashRing(range(1, self.node_count + 1))
class RedisShardAPI(object): def __init__(self,servers): self.pool = redis.ConnectionPool() nodes = [] self.connections = {} for server in servers: conn = redis.Redis(host=server['host'], port=server['port'], db=server['db'],connection_pool=self.pool) name = server['name'] if name in self.connections: raise ValueError("server's name config must be unique") self.connections[name] = conn nodes.append(name) self.ring = HashRing(nodes) def get_server(self, key): name = self.ring.get_node(key) return self.connections[name] def __wrap(self, method, *args, **kwargs): try: key = args[0] assert isinstance(key, basestring) except: raise ValueError("method '%s' requires a key param as the first argument" % method) g = _findhash.match(key) if g != None and len(g.groups()) > 0: key = g.groups()[0] server = self.get_server(key) f = getattr(server, method) return f(*args, **kwargs) def __getattr__(self, method): if method in [ "get", "set", "getset", "setnx", "setex", "incr", "decr", "exists", "delete", "get_type", "rename", "expire", "ttl", "push", "llen", "lrange", "ltrim", "lindex", "pop", "lset", "lrem", "sadd", "srem", "sismember", "smembers", "zadd", "zrem", "zincr", "zrange", "zrevrange", "zrangebyscore","zremrangebyrank", "zremrangebyscore", "zcard", "zscore", "hget", "hset", "hdel", "hincrby", "hlen", "hkeys", "hvals", "hgetall", "hexists", "hmget", "hmset", "publish", ]: return functools.partial(self.__wrap, method) else: raise NotImplementedError("method '%s' cannot be sharded" % method)
def __init__(self,servers): self.pool = redis.ConnectionPool() nodes = [] self.connections = {} for server in servers: conn = redis.Redis(host=server['host'], port=server['port'], db=server['db'],connection_pool=self.pool) name = server['name'] if name in self.connections: raise ValueError("server's name config must be unique") self.connections[name] = conn nodes.append(name) self.ring = HashRing(nodes)
def __init__(self, settings=None): self.nodes = [] self.connections = {} settings = format_config(settings) for server_config in settings: name = server_config.pop('name') conn = redis.Redis(**server_config) if name in self.connections: raise ValueError("server's name config must be unique") server_config['name'] = name self.connections[name] = conn self.nodes.append(name) self.ring = HashRing(self.nodes)
def __init__(self, settings=None): self.nodes = [] self.connections = {} settings = format_config(settings) for server in settings: name = server.get('name') conn = redis.Redis(host=server.get('host'), port=server.get('port'), db=server.get('db'), password=server.get('password'), socket_timeout=server.get('socket_timeout'), unix_socket_path=server.get('unix_socket_path'), ) server['name'] = name if name in self.connections: raise ValueError("server's name config must be unique") self.connections[name] = conn self.nodes.append(name) self.ring = HashRing(self.nodes)
class DistributedNose(Plugin): """ Distribute a test run, shared-nothing style, by specifying the total number of runners and a unique ID for this runner. """ name = 'distributed' def __init__(self): Plugin.__init__(self) self.node_count = None self.node_id = None self.hash_ring = None def options(self, parser, env): parser.add_option( "--nodes", action="store", dest="distributed_nodes", default=env.get('NOSE_NODES', 1), help="Across how many nodes are tests being distributed?", ) parser.add_option( "--node-number", action="store", dest="distributed_node_number", default=env.get('NOSE_NODE_NUMBER', 1), help=("Of the total nodes running distributed tests, " "which number is this node? (1-indexed)"), ) parser.add_option( "--distributed-disabled", action="store_true", dest="distributed_disabled", default=False, help=(("Set this flag to disable distribution, " "despite having more than 1 node configured. " "This is useful if you use environment configs " "and want to temporarily disable test distribution.")), ) parser.add_option( "--hash-by-class", action="store_true", dest="distributed_hash_by_class", # any non-empty value enables default=bool(env.get('NOSE_HASH_BY_CLASS', False)), help=(( "By default, tests are distributed individually. " "This results in the most even distribution and the" " best speed if all tests have the same runtime. " "However, it duplicates class setup/teardown work; " "set this flag to keep tests in the same class on the same node. " # noqa )), ) def configure(self, options, config): self.node_count = options.distributed_nodes self.node_id = options.distributed_node_number self.hash_by_class = options.distributed_hash_by_class if not self._options_are_valid(): self.enabled = False return if options.distributed_disabled: self.enabled = False return if self.node_count > 1: # If the user gives us a non-1 count of distributed nodes, then # let's distribute their tests self.enabled = True self.hash_ring = HashRing(range(1, self.node_count + 1)) def _options_are_valid(self): try: self.node_count = int(self.node_count) except ValueError: logger.critical("--nodes must be an integer") return False try: self.node_id = int(self.node_id) except ValueError: logger.critical("--node-number must be an integer") return False if self.node_id > self.node_count: logger.critical(("--node-number can't be larger " "than the number of nodes")) return False if self.node_id < 1: logger.critical("--node-number must be greater than zero") return False return True def validateName(self, testObject): try: _, module, call = test_address(testObject) except TypeError: module = 'unknown' call = str(testObject) node = self.hash_ring.get_node('%s.%s' % (module, call)) if node != self.node_id: return False return None def wantClass(self, cls): if not self.hash_by_class: # Defer to wantMethod. return None node = self.hash_ring.get_node(str(cls)) if node != self.node_id: return False return None def wantMethod(self, method): if self.hash_by_class: # Don't override class selection decisions. return None return self.validateName(method) def wantFunction(self, function): # Always operate directly on bare functions. return self.validateName(function)
from hashring import HashRing memcache_servers = [ '192.168.0.246:11212', '192.168.0.247:11212', '192.168.0.249:11212' ] ring = HashRing(memcache_servers) server = ring.get_node("my_key") print(server) node = ring.get_node_pos("mk") print(node)
class DistributedNose(Plugin): """ Distribute a test run, shared-nothing style, by specifying the total number of runners and a unique ID for this runner. """ name = 'distributed' def __init__(self): Plugin.__init__(self) self.node_count = None self.node_id = None self.hash_ring = None def options(self, parser, env): parser.add_option( "--nodes", action="store", dest="distributed_nodes", default=env.get('NOSE_NODES', 1), help="Across how many nodes are tests being distributed?", ) parser.add_option( "--node-number", action="store", dest="distributed_node_number", default=env.get('NOSE_NODE_NUMBER', 1), help=( "Of the total nodes running distributed tests, " "which number is this node? (1-indexed)" ), ) parser.add_option( "--distributed-disabled", action="store_true", dest="distributed_disabled", default=False, help=(( "Set this flag to disable distribution, " "despite having more than 1 node configured. " "This is useful if you use environment configs " "and want to temporarily disable test distribution." )), ) parser.add_option( "--hash-by-class", action="store_true", dest="distributed_hash_by_class", # any non-empty value enables default=bool(env.get('NOSE_HASH_BY_CLASS', False)), help=(( "By default, tests are distributed individually. " "This results in the most even distribution and the" " best speed if all tests have the same runtime. " "However, it duplicates class setup/teardown work; " "set this flag to keep tests in the same class on the same node. " # noqa )), ) def configure(self, options, config): self.node_count = options.distributed_nodes self.node_id = options.distributed_node_number self.hash_by_class = options.distributed_hash_by_class if not self._options_are_valid(): self.enabled = False return if options.distributed_disabled: self.enabled = False return if self.node_count > 1: # If the user gives us a non-1 count of distributed nodes, then # let's distribute their tests self.enabled = True self.hash_ring = HashRing(range(1, self.node_count + 1)) def _options_are_valid(self): try: self.node_count = int(self.node_count) except ValueError: logger.critical("--nodes must be an integer") return False try: self.node_id = int(self.node_id) except ValueError: logger.critical("--node-number must be an integer") return False if self.node_id > self.node_count: logger.critical(( "--node-number can't be larger " "than the number of nodes" )) return False if self.node_id < 1: logger.critical( "--node-number must be greater than zero" ) return False return True def validateName(self, testObject): try: _, module, call = test_address(testObject) except TypeError: module = 'unknown' call = str(testObject) node = self.hash_ring.get_node('%s.%s' % (module, call)) if node != self.node_id: return False return None def wantClass(self, cls): if not self.hash_by_class: # Defer to wantMethod. return None node = self.hash_ring.get_node(str(cls)) if node != self.node_id: return False return None def wantMethod(self, method): if self.hash_by_class: # Don't override class selection decisions. return None return self.validateName(method) def wantFunction(self, function): # Always operate directly on bare functions. return self.validateName(function)
def set_ring (self, shard_id, exe_dict): """initialize the HashRing""" self._shard_id = shard_id self._exe_dict = exe_dict self._hash_ring = HashRing(exe_dict.keys())
def test_basic_ring(): hr = HashRing(range(3)) actual = hr.get_node('howdy') expected = 1 assert expected == actual
class Population (object): def __init__ (self, indiv_instance, ff_name, prefix="/tmp/exelixi"): self.indiv_class = indiv_instance.__class__ self.feature_factory = instantiate_class(ff_name) self.prefix = prefix self._shard_id = None self._exe_dict = None self._hash_ring = None self.n_pop = self.feature_factory.n_pop self._total_indiv = 0 self._term_limit = self.feature_factory.term_limit self._hist_granularity = self.feature_factory.hist_granularity self._selection_rate = self.feature_factory.selection_rate self._mutation_rate = self.feature_factory.mutation_rate self._shard = {} self._bf = BloomFilter(num_bytes=125, num_probes=14, iterable=[]) def set_ring (self, shard_id, exe_dict): """initialize the HashRing""" self._shard_id = shard_id self._exe_dict = exe_dict self._hash_ring = HashRing(exe_dict.keys()) ###################################################################### ## Individual lifecycle within the local subset of the Population def populate (self, current_gen): """initialize the population""" for _ in xrange(self.n_pop): # constructor pattern indiv = self.indiv_class() indiv.populate(current_gen, self.feature_factory.generate_features()) # add the generated Individual to the Population # failure semantics: must filter nulls from initial population self.reify(indiv) def reify (self, indiv): """test/add a newly generated Individual into the Population (birth)""" neighbor_shard_id = None exe_uri = None if self._hash_ring: neighbor_shard_id = self._hash_ring.get_node(indiv.key) if neighbor_shard_id != self._shard_id: exe_uri = self._exe_dict[neighbor_shard_id] # distribute this operation over the hash ring, through a remote queue if exe_uri: msg = { "key": indiv.key, "gen": indiv.gen, "feature_set": loads(indiv.get_json_feature_set()) } lines = post_exe_rest(self.prefix, neighbor_shard_id, exe_uri, "pop/reify", msg) return False else: return self._reify_locally(indiv) def receive_reify (self, key, gen, feature_set): """test/add a received reify request """ indiv = self.indiv_class() indiv.populate(gen, feature_set) self._reify_locally(indiv) def _reify_locally (self, indiv): """test/add a newly generated Individual into the Population locally (birth)""" if not indiv.key in self._bf: self._bf.update([indiv.key]) self._total_indiv += 1 # potentially the most expensive operation, deferred until remote reification indiv.get_fitness(self.feature_factory, force=True) self._shard[indiv.key] = indiv return True else: return False def evict (self, indiv): """remove an Individual from the Population (death)""" if indiv.key in self._shard: # Individual only needs to be removed locally del self._shard[indiv.key] # NB: serialize to disk (write behinds) url = self._get_storage_path(indiv) def get_part_hist (self): """tally counts for the partial histogram of the fitness distribution""" d = (Counter([ round(indiv.get_fitness(self.feature_factory, force=False), self._hist_granularity) for indiv in self._shard.values() ])).items() d.sort(reverse=True) return d def get_fitness_cutoff (self, hist): """determine fitness cutoff (bin lower bounds) for the parent selection filter""" h = hist.items() h.sort(reverse=True) logging.debug("fit: %s", h) n_indiv = sum([ count for bin, count in h ]) part_sum = 0 break_next = False for bin, count in h: if break_next: break part_sum += count percentile = part_sum / float(n_indiv) break_next = percentile >= self._selection_rate logging.debug("fit: percentile %f part_sum %d n_indiv %d bin %f", percentile, part_sum, n_indiv, bin) return bin def _get_storage_path (self, indiv): """create a path for durable storage of an Individual""" return self.prefix + "/" + indiv.key def _boost_diversity (self, current_gen, indiv): """randomly select other individuals and mutate them, to promote genetic diversity""" if self._mutation_rate > random(): indiv.mutate(self, current_gen, self.feature_factory) elif len(self._shard.values()) >= 3: # ensure that there are at least three parents self.evict(indiv) def _select_parents (self, current_gen, fitness_cutoff): """select the parents for the next generation""" partition = map(lambda x: (round(x.get_fitness(), self._hist_granularity) >= fitness_cutoff, x), self._shard.values()) good_fit = map(lambda x: x[1], filter(lambda x: x[0], partition)) poor_fit = map(lambda x: x[1], filter(lambda x: not x[0], partition)) # randomly select other individuals to promote genetic diversity, while removing the remnant for indiv in poor_fit: self._boost_diversity(current_gen, indiv) return self._shard.values() def next_generation (self, current_gen, fitness_cutoff): """select/mutate/crossover parents to produce a new generation""" parents = self._select_parents(current_gen, fitness_cutoff) for _ in xrange(self.n_pop - len(parents)): f, m = sample(parents, 2) success = f.breed(self, current_gen, m, self.feature_factory) # backfill to avoid the dreaded Population collapse for _ in xrange(self.n_pop - len(self._shard.values())): # constructor pattern indiv = self.indiv_class() indiv.populate(current_gen, self.feature_factory.generate_features()) self.reify(indiv) logging.info("gen: %d shard %s size %d total %d", current_gen, self._shard_id, len(self._shard.values()), self._total_indiv) def test_termination (self, current_gen, hist): """evaluate the terminating condition for this generation and report progress""" return self.feature_factory.test_termination(current_gen, self._term_limit, hist) def enum (self, fitness_cutoff): """enum all Individuals that exceed the given fitness cutoff""" return [[ "%0.4f" % indiv.get_fitness(), str(indiv.gen), indiv.get_json_feature_set() ] for indiv in filter(lambda x: x.get_fitness() >= fitness_cutoff, self._shard.values()) ] def report_summary (self): """report a summary of the evolution""" for indiv in sorted(self._shard.values(), key=lambda x: x.get_fitness(), reverse=True): print self._get_storage_path(indiv) print "\t".join(["%0.4f" % indiv.get_fitness(), "%d" % indiv.gen, indiv.get_json_feature_set()])
# coding=utf-8 from hashring import HashRing memcache_servers = ['192.168.100.50', '192.168.100.51', '192.168.100.52'] ring = HashRing(memcache_servers) ring.add_node('192.168.100.53') ring.remove_node('192.168.100.53') server = ring.get_proper_node('my_key') print server
class RedisShardAPI(object): def __init__(self, servers): VERSION = tuple(map(int, redis.__version__.split('.'))) self.nodes = [] self.connections = {} if VERSION < (2, 4, 0): self.pool = redis.ConnectionPool() else: self.pool = None if isinstance(servers, list): for server in servers: conn = redis.Redis( host=server['host'], port=server[ 'port'], db=server['db'], connection_pool=self.pool, password=server.get('password'), socket_timeout=server.get('socket_timeout')) name = server['name'] if name in self.connections: raise ValueError("server's name config must be unique") self.connections[name] = conn self.nodes.append(name) elif isinstance(servers, dict): for server_name, server in servers.items(): conn = redis.Redis( host=server['host'], port=server[ 'port'], db=server['db'], connection_pool=self.pool, password=server.get('password'), socket_timeout=server.get('socket_timeout')) name = server_name if name in self.connections: raise ValueError("server's name config must be unique") self.connections[name] = conn self.nodes.append(name) else: raise ValueError("server's config must be list or dict") self.ring = HashRing(self.nodes) def get_server_name(self, key): g = _findhash.match(key) if g is not None and len(g.groups()) > 0: key = g.groups()[0] name = self.ring.get_node(key) return name def get_server(self, key): name = self.get_server_name(key) return self.connections[name] def __wrap(self, method, *args, **kwargs): try: key = args[0] assert isinstance(key, basestring) except: raise ValueError("method '%s' requires a key param as the first argument" % method) server = self.get_server(key) f = getattr(server, method) return f(*args, **kwargs) def __wrap_tag(self, method, *args, **kwargs): key = args[0] if isinstance(key, basestring) and '{' in key: server = self.get_server(key) elif isinstance(key, list) and '{' in key[0]: server = self.get_server(key[0]) else: raise ValueError("method '%s' requires tag key params as its arguments" % method) method = method.lstrip("tag_") f = getattr(server, method) return f(*args, **kwargs) def __hop_in(self, method, *args, **kwargs): ''' 使用field作为查询hashring的key ''' if not isinstance(args[1], str): key = str(args[1]) else: key = args[1] try: assert isinstance(key, basestring) except: raise ValueError("method '%s' requires a key param as the second argument" % method) server = self.get_server(key) if method == "hget_in": method = "hget" elif method == "hset_in": method = "hset" elif method == "hdel_in": method = "hdel" else: print "you can't be here" f = getattr(server, method) return f(*args, **kwargs) def __qop_in(self, method, *args, **kwargs): ''' 指定key值所对应的hashring上的一个节点作为队列服务器 ''' key = "queue" server = self.get_server(key) if method == "rpush_in": method = "rpush" elif method == "blpop_in": method = "blpop" else: print "you can't be here" f = getattr(server, method) return f(*args, **kwargs) def __getattr__(self, method): if method in [ "get", "set", "getset", "setnx", "setex", "incr", "decr", "exists", "delete", "get_type", "type", "rename", "expire", "ttl", "push", "persist", "llen", "lrange", "ltrim", "lpush", "lpop", "lindex", "pop", "lset", "lrem", "sadd", "srem", "scard", "sismember", "smembers", "zadd", "zrem", "zincrby", "zincr", "zrank", "zrange", "zrevrange", "zrangebyscore", "zremrangebyrank", "zrevrangebyscore", "zremrangebyscore", "zcard", "zscore", "zcount", "hget", "hset", "hdel", "hincrby", "hlen", "hkeys", "hvals", "hgetall", "hexists", "hmget", "hmset", "publish", "rpush", "rpop" ]: return functools.partial(self.__wrap, method) elif method.startswith("tag_"): return functools.partial(self.__wrap_tag, method) elif method in ["hget_in", "hset_in", "hdel_in"]: return functools.partial(self.__hop_in, method) elif method in ["blpop_in", "rpush_in"]: return functools.partial(self.__qop_in, method) else: raise NotImplementedError("method '%s' cannot be sharded" % method) ######################################### ### some methods implement as needed ### ######################################## def brpop(self, key, timeout=0): if not isinstance(key, basestring): raise NotImplementedError("The key must be single string;mutiple keys cannot be sharded") server = self.get_server(key) return server.brpop(key, timeout) def blpop(self, key, timeout=0): if not isinstance(key, basestring): raise NotImplementedError("The key must be single string;mutiple keys cannot be sharded") server = self.get_server(key) return server.blpop(key, timeout) def keys(self, key): _keys = [] for node in self.nodes: server = self.connections[node] _keys.extend(server.keys(key)) return _keys def flushdb(self): for node in self.nodes: server = self.connections[node] server.flushdb() def hgetall_in(self, key): result = {} for node in self.nodes: server = self.connections[node] result.update(server.hgetall(key)) return result def hmget_in(self, key, fields): result = {} node_field = {} for field in fields: node = self.get_server_name(field) node_field.setdefault(node, []) node_field[node].append(field) for node, field_list in node_field.items(): server = self.connections[node] value = server.hmget(key, field_list) for i in range(len(field_list)): result[field_list[i]] = value[i] return result def lock(self, name, timeout=None, sleep=0.1): """ Return a new Lock object using key ``name`` that mimics the behavior of threading.Lock. If specified, ``timeout`` indicates a maximum life for the lock. By default, it will remain locked until release() is called. ``sleep`` indicates the amount of time to sleep per loop iteration when the lock is in blocking mode and another client is currently holding the lock. """ return Lock(self, name, timeout=timeout, sleep=sleep) def pipeline(self): return Pipeline(self)
class RedisShardAPI(object): def __init__(self, servers): VERSION = tuple(map(int, redis.__version__.split("."))) self.nodes = [] self.connections = {} if VERSION < (2, 4, 0): self.pool = redis.ConnectionPool() else: self.pool = None if isinstance(servers, list): for server in servers: conn = redis.Redis( host=server["host"], port=server["port"], db=server["db"], connection_pool=self.pool, password=server.get("password"), socket_timeout=server.get("socket_timeout"), ) name = server["name"] if name in self.connections: raise ValueError("server's name config must be unique") self.connections[name] = conn self.nodes.append(name) elif isinstance(servers, dict): for server_name, server in servers.items(): conn = redis.Redis( host=server["host"], port=server["port"], db=server["db"], connection_pool=self.pool, password=server.get("password"), socket_timeout=server.get("socket_timeout"), ) name = server_name if name in self.connections: raise ValueError("server's name config must be unique") self.connections[name] = conn self.nodes.append(name) else: raise ValueError("server's config must be list or dict") self.ring = HashRing(self.nodes) def get_server_name(self, key): g = _findhash.match(key) if g != None and len(g.groups()) > 0: key = g.groups()[0] name = self.ring.get_node(key) return name def get_server(self, key): name = self.get_server_name(key) return self.connections[name] def __wrap(self, method, *args, **kwargs): try: key = args[0] assert isinstance(key, basestring) except: raise ValueError("method '%s' requires a key param as the first argument" % method) server = self.get_server(key) f = getattr(server, method) return f(*args, **kwargs) def __wrap_tag(self, method, *args, **kwargs): key = args[0] if isinstance(key, basestring) and "{" in key: server = self.get_server(key) elif isinstance(key, list) and "{" in key[0]: server = self.get_server(key[0]) else: raise ValueError("method '%s' requires tag key params as its arguments" % method) method = method.lstrip("tag_") f = getattr(server, method) return f(*args, **kwargs) def __hop_in(self, method, *args, **kwargs): """ 使用field作为查询hashring的key """ if not isinstance(args[1], str): key = str(args[1]) else: key = args[1] try: assert isinstance(key, basestring) except: raise ValueError("method '%s' requires a key param as the second argument" % method) server = self.get_server(key) if method == "hget_in": method = "hget" elif method == "hset_in": method = "hset" elif method == "hdel_in": method = "hdel" else: print "you can't be here" f = getattr(server, method) return f(*args, **kwargs) def __qop_in(self, method, *args, **kwargs): """ 指定key值所对应的hashring上的一个节点作为队列服务器 """ key = "queue" server = self.get_server(key) if method == "rpush_in": method = "rpush" elif method == "blpop_in": method = "blpop" else: print "you can't be here" f = getattr(server, method) return f(*args, **kwargs) def __getattr__(self, method): if method in [ "get", "set", "getset", "setnx", "setex", "incr", "decr", "exists", "delete", "get_type", "type", "rename", "expire", "ttl", "push", "persist", "llen", "lrange", "ltrim", "lpush", "lpop", "lindex", "pop", "lset", "lrem", "sadd", "srem", "scard", "sismember", "smembers", "zadd", "zrem", "zincr", "zrank", "zrange", "zrevrange", "zrangebyscore", "zremrangebyrank", "zrevrangebyscore", "zremrangebyscore", "zcard", "zscore", "zcount", "hget", "hset", "hdel", "hincrby", "hlen", "hkeys", "hvals", "hgetall", "hexists", "hmget", "hmset", "publish", "rpush", "rpop", ]: return functools.partial(self.__wrap, method) elif method.startswith("tag_"): return functools.partial(self.__wrap_tag, method) elif method in ["hget_in", "hset_in", "hdel_in"]: return functools.partial(self.__hop_in, method) elif method in ["blpop_in", "rpush_in"]: return functools.partial(self.__qop_in, method) else: raise NotImplementedError("method '%s' cannot be pipelined" % method) ######################################### ### some methods implement as needed ### ######################################## def brpop(self, key, timeout=0): if not isinstance(key, basestring): raise NotImplementedError("The key must be single string;mutiple keys cannot be sharded") server = self.get_server(key) return server.brpop(key, timeout) def blpop(self, key, timeout=0): if not isinstance(key, basestring): raise NotImplementedError("The key must be single string;mutiple keys cannot be sharded") server = self.get_server(key) return server.blpop(key, timeout) def keys(self, key): _keys = [] for node in self.nodes: server = self.connections[node] _keys.extend(server.keys(key)) return _keys def flushdb(self): for node in self.nodes: server = self.connections[node] server.flushdb() def hgetall_in(self, key): result = {} for node in self.nodes: server = self.connections[node] result.update(server.hgetall(key)) return result def hmget_in(self, key, fields): result = {} node_field = {} for field in fields: node = self.get_server_name(field) node_field.setdefault(node, []) node_field[node].append(field) for node, field_list in node_field.items(): server = self.connections[node] value = server.hmget(key, field_list) for i in range(len(field_list)): result[field_list[i]] = value[i] return result def lock(self, name, timeout=None, sleep=0.1): """ Return a new Lock object using key ``name`` that mimics the behavior of threading.Lock. If specified, ``timeout`` indicates a maximum life for the lock. By default, it will remain locked until release() is called. ``sleep`` indicates the amount of time to sleep per loop iteration when the lock is in blocking mode and another client is currently holding the lock. """ return Lock(self, name, timeout=timeout, sleep=sleep) def pipeline(self): return Pipeline(self)
class RedisShardAPI(object): def __init__(self, settings=None): self.nodes = [] self.connections = {} settings = format_config(settings) for server_config in settings: name = server_config.pop('name') conn = redis.Redis(**server_config) if name in self.connections: raise ValueError("server's name config must be unique") server_config['name'] = name self.connections[name] = conn self.nodes.append(name) self.ring = HashRing(self.nodes) def get_server_name(self, key): g = _findhash.match(key) if g is not None and len(g.groups()) > 0: key = g.groups()[0] name = self.ring.get_node(key) return name def get_server(self, key): name = self.get_server_name(key) return self.connections[name] def __wrap(self, method, *args, **kwargs): try: key = args[0] assert isinstance(key, basestring) except: raise ValueError("method '%s' requires a key param as the first argument" % method) server = self.get_server(key) f = getattr(server, method) return f(*args, **kwargs) def __wrap_tag(self, method, *args, **kwargs): key = args[0] if isinstance(key, basestring) and '{' in key: server = self.get_server(key) elif isinstance(key, list) and '{' in key[0]: server = self.get_server(key[0]) else: raise ValueError("method '%s' requires tag key params as its arguments" % method) method = method.lstrip("tag_") f = getattr(server, method) return f(*args, **kwargs) def __getattr__(self, method): if method in SHARD_METHODS: return functools.partial(self.__wrap, method) elif method.startswith("tag_"): return functools.partial(self.__wrap_tag, method) else: raise NotImplementedError("method '%s' cannot be sharded" % method) ######################################### ### some methods implement as needed ### ######################################## def brpop(self, key, timeout=0): if not isinstance(key, basestring): raise NotImplementedError("The key must be single string;mutiple keys cannot be sharded") server = self.get_server(key) return server.brpop(key, timeout) def blpop(self, key, timeout=0): if not isinstance(key, basestring): raise NotImplementedError("The key must be single string;mutiple keys cannot be sharded") server = self.get_server(key) return server.blpop(key, timeout) def keys(self, key): _keys = [] for node in self.nodes: server = self.connections[node] _keys.extend(server.keys(key)) return _keys def flushdb(self): for node in self.nodes: server = self.connections[node] server.flushdb() def lock(self, name, timeout=None, sleep=0.1): """ Return a new Lock object using key ``name`` that mimics the behavior of threading.Lock. If specified, ``timeout`` indicates a maximum life for the lock. By default, it will remain locked until release() is called. ``sleep`` indicates the amount of time to sleep per loop iteration when the lock is in blocking mode and another client is currently holding the lock. """ return Lock(self, name, timeout=timeout, sleep=sleep) def pipeline(self): return Pipeline(self)
class RedisShardAPI(object): def __init__(self,servers): VERSION = tuple(map(int, redis.__version__.split('.'))) self.nodes = [] self.connections = {} if VERSION < (2,4,0): self.pool = redis.ConnectionPool() else: self.pool = None if isinstance(servers,list): for server in servers: conn = redis.Redis(host=server['host'], port=server['port'], db=server['db'],connection_pool=self.pool, password=server.get('password'), socket_timeout=server.get('socket_timeout')) name = server['name'] if name in self.connections: raise ValueError("server's name config must be unique") self.connections[name] = conn self.nodes.append(name) elif isinstance(servers,dict): for server_name,server in servers.items(): conn = redis.Redis(host=server['host'], port=server['port'], db=server['db'],connection_pool=self.pool, password=server.get('password'), socket_timeout=server.get('socket_timeout')) name = server_name if name in self.connections: raise ValueError("server's name config must be unique") self.connections[name] = conn self.nodes.append(name) else: raise ValueError("server's config must be list or dict") self.ring = HashRing(self.nodes) def get_server_name(self, key): g = _findhash.match(key) if g != None and len(g.groups()) > 0: key = g.groups()[0] name = self.ring.get_node(key) return name def get_server(self,key): name = self.get_server_name(key) return self.connections[name] def __wrap(self, method, *args, **kwargs): try: key = args[0] assert isinstance(key, basestring) except: raise ValueError("method '%s' requires a key param as the first argument" % method) server = self.get_server(key) f = getattr(server, method) return f(*args, **kwargs) def __wrap_tag(self,method,*args,**kwargs): key = args[0] if isinstance(key, basestring) and '{' in key: server = self.get_server(key) elif isinstance(key, list) and '{' in key[0]: server = self.get_server(key[0]) else: raise ValueError("method '%s' requires tag key params as its arguments" % method) method = method.lstrip("tag_") f = getattr(server, method) return f(*args, **kwargs) def __hop_in(self, method, *args, **kwargs): ''' 使用field作为查询hashring的key ''' if not isinstance(args[1], str): key = str(args[1]) else: key = args[1] try: assert isinstance(key, basestring) except: raise ValueError("method '%s' requires a key param as the second argument" % method) server = self.get_server(key) if method == "hget_in": method = "hget" elif method == "hset_in": method = "hset" elif method == "hdel_in": method = "hdel" else: print "you can't be here" f = getattr(server, method) return f(*args, **kwargs) def __qop_in(self, method, *args, **kwargs): ''' 指定key值所对应的hashring上的一个节点作为队列服务器 ''' key = "queue" server = self.get_server(key) if method == "rpush_in": method = "rpush" elif method == "blpop_in": method = "blpop" else: print "you can't be here" f = getattr(server, method) return f(*args, **kwargs) def __getattr__(self, method): if method in [ "get", "set", "getset", "setnx", "setex", "incr", "decr", "exists", "delete", "get_type", "type", "rename", "expire", "ttl", "push","persist", "llen", "lrange", "ltrim","lpush","lpop", "lindex", "pop", "lset", "lrem", "sadd", "srem","scard", "sismember", "smembers", "zadd", "zrem", "zincr","zrank", "zrange", "zrevrange", "zrangebyscore","zremrangebyrank","zrevrangebyscore", "zremrangebyscore", "zcard", "zscore","zcount", "hget", "hset", "hdel", "hincrby", "hlen", "hkeys", "hvals", "hgetall", "hexists", "hmget", "hmset", "publish","rpush","rpop" ]: return functools.partial(self.__wrap, method) elif method.startswith("tag_"): return functools.partial(self.__wrap_tag, method) elif method in ["hget_in", "hset_in", "hdel_in"]: return functools.partial(self.__hop_in, method) elif method in ["blpop_in", "rpush_in"]: return functools.partial(self.__qop_in, method) else: raise NotImplementedError("method '%s' cannot be sharded" % method) ######################################### ### some methods implement as needed ### ######################################## def brpop(self,key, timeout=0): if not isinstance(key, basestring): raise NotImplementedError("The key must be single string;mutiple keys cannot be sharded") server = self.get_server(key) return server.brpop(key,timeout) def blpop(self,key, timeout=0): if not isinstance(key, basestring): raise NotImplementedError("The key must be single string;mutiple keys cannot be sharded") server = self.get_server(key) return server.blpop(key,timeout) def keys(self,key): _keys = [] for node in self.nodes: server = self.connections[node] _keys.extend(server.keys(key)) return _keys def flushdb(self): for node in self.nodes: server = self.connections[node] server.flushdb() def hgetall_in(self, key): result = {} for node in self.nodes: server = self.connections[node] result.update(server.hgetall(key)) return result def hmget_in(self, key, fields): result = {} node_field = {} for field in fields: node = self.get_server_name(field) node_field.setdefault(node, []) node_field[node].append(field) for node, field_list in node_field.items(): server = self.connections[node] value = server.hmget(key, field_list) for i in range(len(field_list)): result[field_list[i]]=value[i] return result
class RedisShardAPI(object): SHARD_METHODS = set([ "get", "set", "getset", "setnx", "setex", "incr", "decr", "exists", "delete", "get_type", "type", "rename", "expire", "ttl", "push", "persist", "llen", "lrange", "ltrim", "lpush", "lpop", "lindex", "pop", "lset", "lrem", "sadd", "srem", "scard", "sismember", "smembers", "zadd", "zrem", "zincrby", "zincr", "zrank", "zrange", "zrevrange", "zrangebyscore", "zremrangebyrank", "zrevrangebyscore", "zremrangebyscore", "zcard", "zscore", "zcount", "hget", "hset", "hdel", "hincrby", "hlen", "hkeys", "hvals", "hgetall", "hexists", "hmget", "hmset", "publish", "rpush", "rpop" ]) def __init__(self, settings=None): self.nodes = [] self.connections = {} settings = format_config(settings) for server in settings: name = server.get('name') conn = redis.Redis(host=server.get('host'), port=server.get('port'), db=server.get('db'), password=server.get('password'), socket_timeout=server.get('socket_timeout'), unix_socket_path=server.get('unix_socket_path'), ) server['name'] = name if name in self.connections: raise ValueError("server's name config must be unique") self.connections[name] = conn self.nodes.append(name) self.ring = HashRing(self.nodes) def get_server_name(self, key): g = _findhash.match(key) if g is not None and len(g.groups()) > 0: key = g.groups()[0] name = self.ring.get_node(key) return name def get_server(self, key): name = self.get_server_name(key) return self.connections[name] def __wrap(self, method, *args, **kwargs): try: key = args[0] assert isinstance(key, basestring) except: raise ValueError("method '%s' requires a key param as the first argument" % method) server = self.get_server(key) f = getattr(server, method) return f(*args, **kwargs) def __wrap_tag(self, method, *args, **kwargs): key = args[0] if isinstance(key, basestring) and '{' in key: server = self.get_server(key) elif isinstance(key, list) and '{' in key[0]: server = self.get_server(key[0]) else: raise ValueError("method '%s' requires tag key params as its arguments" % method) method = method.lstrip("tag_") f = getattr(server, method) return f(*args, **kwargs) def __getattr__(self, method): if method in self.SHARD_METHODS: return functools.partial(self.__wrap, method) elif method.startswith("tag_"): return functools.partial(self.__wrap_tag, method) else: raise NotImplementedError("method '%s' cannot be sharded" % method) ######################################### ### some methods implement as needed ### ######################################## def brpop(self, key, timeout=0): if not isinstance(key, basestring): raise NotImplementedError("The key must be single string;mutiple keys cannot be sharded") server = self.get_server(key) return server.brpop(key, timeout) def blpop(self, key, timeout=0): if not isinstance(key, basestring): raise NotImplementedError("The key must be single string;mutiple keys cannot be sharded") server = self.get_server(key) return server.blpop(key, timeout) def keys(self, key): _keys = [] for node in self.nodes: server = self.connections[node] _keys.extend(server.keys(key)) return _keys def flushdb(self): for node in self.nodes: server = self.connections[node] server.flushdb() def lock(self, name, timeout=None, sleep=0.1): """ Return a new Lock object using key ``name`` that mimics the behavior of threading.Lock. If specified, ``timeout`` indicates a maximum life for the lock. By default, it will remain locked until release() is called. ``sleep`` indicates the amount of time to sleep per loop iteration when the lock is in blocking mode and another client is currently holding the lock. """ return Lock(self, name, timeout=timeout, sleep=sleep) def pipeline(self): return Pipeline(self)
def set_ring(self, shard_id, shard_dict): """initialize the HashRing""" self._shard_id = shard_id self._shard_dict = shard_dict self._hash_ring = HashRing(shard_dict.keys())