def process(udp_clients): client_ring = ConsistentHash(udp_clients) hash_codes = set() # PUT all users. for u in USERS: data_bytes, key = serialize_PUT(u) response = client_ring.get_node(key).send(data_bytes) print(response) hash_codes.add(str(response.decode())) print( f"Number of Users={len(USERS)}\nNumber of Users Cached={len(hash_codes)}" ) # GET all users. for hc in hash_codes: print(hc) data_bytes, key = serialize_GET(hc) response = client_ring.get_node(key).send(data_bytes) print(response) # DELETE for hc in hash_codes: print(hc) data_bytes, key = serialize_DELETE(hc) response = client_ring.get_node(key).send(data_bytes) print(response)
class PartitionClient(object): """ Client Class for the Partition Library Example usage: --------------------- import libpartition from libpartition.libpartition import PartitionClient def own_change_cb(l): print "ownership change:" + str(l) c = PartitionClient("test", "s1", ["s1", "s2", "s3"], 32, own_change_cb, "zookeeper_s1") ##do some real work now" if (c.own_partition(1)): ...... do something with partition #1 ..... ......... ... c.update_cluster_list(["s1", "s2"]) ... ---------------------- You should not call any partition library routine from within the callback function Args: app_name(str): Name of the app for which partition cluster is used self_name(str): Name of the local cluster node (can be ip address) cluster_list(list): List of all the nodes in the cluster including local node max_partition(int): Partition space always go from 0..max_partition-1 partition_update_cb: Callback function invoked when partition ownership list is updated.x zk_server(str): <zookeeper server>:<zookeeper server port> """ def __init__(self, app_name, self_name, cluster_list, max_partition, partition_update_cb, zk_server, logger=None): # Initialize local variables self._zk_server = zk_server self._cluster_list = set(cluster_list) self._max_partition = max_partition self._update_cb = partition_update_cb self._curr_part_ownership_list = [] self._target_part_ownership_list = [] self._con_hash = ConsistentHash(cluster_list) self._name = self_name # some sanity check if not (self._name in cluster_list): raise ValueError('cluster list is missing local server name') # initialize logging and other stuff if logger is None: logging.basicConfig() self._logger = logging else: self._logger = logger self._conn_state = None self._sandesh_connection_info_update(status='INIT', message='') # connect to zookeeper self._zk = KazooClient(zk_server) while True: try: self._zk.start() break except gevent.event.Timeout as e: # Update connection info self._sandesh_connection_info_update(status='DOWN', message=str(e)) gevent.sleep(1) # Zookeeper is also throwing exception due to delay in master election except Exception as e: # Update connection info self._sandesh_connection_info_update(status='DOWN', message=str(e)) gevent.sleep(1) # Update connection info self._sandesh_connection_info_update(status='UP', message='') # Done connecting to ZooKeeper # create a lock array to contain locks for each partition self._part_locks = [] for part in range(0, self._max_partition): lockpath = "/lockpath/" + app_name + "/" + str(part) l = self._zk.Lock(lockpath, self._name) self._part_locks.append(l) # initialize partition # to lock acquire greenlet dictionary self._part_lock_task_dict = {} self._logger.error("initial servers:" + str(self._cluster_list)) # update target partition ownership list for part in range(0, self._max_partition): if (self._con_hash.get_node(str(part)) == self._name): self._target_part_ownership_list.append(part) # update current ownership list self._acquire_partition_ownership() #end __init__ def _sandesh_connection_info_update(self, status, message): from pysandesh.connection_info import ConnectionState from pysandesh.gen_py.process_info.ttypes import ConnectionStatus, \ ConnectionType from pysandesh.gen_py.sandesh.ttypes import SandeshLevel new_conn_state = getattr(ConnectionStatus, status) ConnectionState.update(conn_type=ConnectionType.ZOOKEEPER, name='Zookeeper', status=new_conn_state, message=message, server_addrs=self._zk_server.split(',')) if (self._conn_state and self._conn_state != ConnectionStatus.DOWN and new_conn_state == ConnectionStatus.DOWN): msg = 'Connection to Zookeeper down: %s' % (message) self._logger.error(msg) if (self._conn_state and self._conn_state != new_conn_state and new_conn_state == ConnectionStatus.UP): msg = 'Connection to Zookeeper ESTABLISHED' self._logger.error(msg) self._conn_state = new_conn_state # end _sandesh_connection_info_update # following routine is the greenlet task function to acquire the lock # for a partition def _acquire_lock(self, part): # lock for the partition l = self._part_locks[part] # go in an infinite loop waiting to acquire the lock try: while True: ret = l.acquire(blocking=False) if ret == True: self._logger.error("Acquired lock for:" + str(part)) self._curr_part_ownership_list.append(part) self._update_cb(self._curr_part_ownership_list) return True else: gevent.sleep(1) except CancelledError: self._logger.error("Lock acquire cancelled for:" + str(part)) return False except Exception as ex: # TODO: If we have a non-KazooException, the lock object # may get stuck in the "cancelled" state self._logger.error("Lock acquire unexpected error!: " + str(ex)) # This exception should get propogated to main thread raise SystemExit return False #end _acquire_lock # get rid of finished spawned tasks from datastructures def _cleanup_greenlets(self): for part in self._part_lock_task_dict.keys(): if (self._part_lock_task_dict[part].ready()): del self._part_lock_task_dict[part] #end _cleanup_greenlets # following routine launches tasks to acquire partition locks def _acquire_partition_ownership(self): # cleanup any finished greenlets self._cleanup_greenlets() # this variable will help us decide if we need to call callback updated_curr_ownership = False # list of partitions for which locks have to be released release_lock_list = [] self._logger.error("known servers: %s" % self._con_hash.get_all_nodes()) for part in range(0, self._max_partition): if (part in self._target_part_ownership_list): if (part in self._curr_part_ownership_list): # do nothing, I already have ownership of this partition self._logger.error("No need to acquire ownership of:" + str(part)) else: # I need to acquire lock for this partition before I own if (part in self._part_lock_task_dict.keys()): try: self._part_lock_task_dict[part].get(block=False) except: # do nothing there is already a greenlet running to # acquire the lock self._logger.error("Already a greenlet running to" " acquire:" + str(part)) continue # Greenlet died without getting ownership. Cleanup self._logger.error("Cleanup stale greenlet running to" " acquire:" + str(part)) del self._part_lock_task_dict[part] self._logger.error("Starting greenlet running to" " acquire:" + str(part)) # launch the greenlet to acquire the loc, k g = Greenlet.spawn(self._acquire_lock, part) self._part_lock_task_dict[part] = g else: # give up ownership of the partition # cancel any lock acquisition which is ongoing if (part in self._part_lock_task_dict.keys()): try: self._part_lock_task_dict[part].get(block=False) except: self._logger.error( "canceling lock acquisition going on \ for:" + str(part)) # Cancelling the lock should result in killing the gevent self._part_locks[part].cancel() self._part_lock_task_dict[part].get(block=True) del self._part_lock_task_dict[part] if (part in self._curr_part_ownership_list): release_lock_list.append(part) self._curr_part_ownership_list.remove(part) updated_curr_ownership = True self._logger.error("giving up ownership of:" + str(part)) if (updated_curr_ownership is True): # current partition membership was updated call the callback self._update_cb(self._curr_part_ownership_list) if (len(release_lock_list) != 0): # release locks which were acquired for part in release_lock_list: self._logger.error("release the lock which was acquired:" + \ str(part)) try: self._part_locks[part].release() self._logger.error("fully gave up ownership of:" + str(part)) except: pass #end _acquire_partition_ownership def update_cluster_list(self, cluster_list): """ Updates the cluster node list Args: cluster_list(list): New list of names of the nodes in the cluster Returns: None """ # some sanity check if not (self._name in cluster_list): raise ValueError('cluster list is missing local server name') new_cluster_list = set(cluster_list) new_servers = list(new_cluster_list.difference(self._cluster_list)) deleted_servers = list( set(self._cluster_list).difference(new_cluster_list)) self._cluster_list = set(cluster_list) self._logger.error("deleted servers:" + str(deleted_servers)) self._logger.error("new servers:" + str(new_servers)) # update the hash structure if new_servers: self._con_hash.add_nodes(new_servers) if deleted_servers: self._con_hash.del_nodes(deleted_servers) # update target partition ownership list self._target_part_ownership_list = [] for part in range(0, self._max_partition): if (self._con_hash.get_node(str(part)) == self._name): if not (part in self._target_part_ownership_list): self._target_part_ownership_list.append(part) # update current ownership list self._acquire_partition_ownership() #end update_cluster_list def own_partition(self, part_no): """ Returns ownership information of a partition Args: part_no(int) : Partition no Returns: True if partition is owned by the local node False if partition is not owned by the local node """ return part_no in self._curr_part_ownership_list #end own_partition def close(self): """ Closes any connections and frees up any data structures Args: Returns: None """ # clean up greenlets for part in self._part_lock_task_dict.keys(): try: self._part_lock_task_dict[part].kill() except: pass # close zookeeper try: self._zk.stop() except: pass try: self._zk.close() except: pass
class PartitionClient(object): """ Client Class for the Partition Library Example usage: --------------------- import libpartition from libpartition.libpartition import PartitionClient def own_change_cb(l): print "ownership change:" + str(l) c = PartitionClient("test", "s1", ["s1", "s2", "s3"], 32, own_change_cb, "zookeeper_s1") ##do some real work now" if (c.own_partition(1)): ...... do something with partition #1 ..... ......... ... c.update_cluster_list(["s1", "s2"]) ... ---------------------- You should not call any partition library routine from within the callback function Args: app_name(str): Name of the app for which partition cluster is used self_name(str): Name of the local cluster node (can be ip address) cluster_list(list): List of all the nodes in the cluster including local node max_partition(int): Partition space always go from 0..max_partition-1 partition_update_cb: Callback function invoked when partition ownership list is updated.x zk_server(str): <zookeeper server>:<zookeeper server port> """ def __init__( self, app_name, self_name, cluster_list, max_partition, partition_update_cb, zk_server, logger = None): # Initialize local variables self._zk_server = zk_server self._cluster_list = set(cluster_list) self._max_partition = max_partition self._update_cb = partition_update_cb self._curr_part_ownership_list = [] self._target_part_ownership_list = [] self._con_hash = ConsistentHash(cluster_list) self._name = self_name # some sanity check if not(self._name in cluster_list): raise ValueError('cluster list is missing local server name') # initialize logging and other stuff if logger is None: logging.basicConfig() self._logger = logging else: self._logger = logger self._conn_state = None self._sandesh_connection_info_update(status='INIT', message='') # connect to zookeeper self._zk = KazooClient(zk_server) while True: try: self._zk.start() break except gevent.event.Timeout as e: # Update connection info self._sandesh_connection_info_update(status='DOWN', message=str(e)) gevent.sleep(1) # Zookeeper is also throwing exception due to delay in master election except Exception as e: # Update connection info self._sandesh_connection_info_update(status='DOWN', message=str(e)) gevent.sleep(1) # Update connection info self._sandesh_connection_info_update(status='UP', message='') # Done connecting to ZooKeeper # create a lock array to contain locks for each partition self._part_locks = [] for part in range(0, self._max_partition): lockpath = "/lockpath/"+ app_name + "/" + str(part) l = self._zk.Lock(lockpath, self._name) self._part_locks.append(l) # initialize partition # to lock acquire greenlet dictionary self._part_lock_task_dict = {} self._logger.error("initial servers:" + str(self._cluster_list)) # update target partition ownership list for part in range(0, self._max_partition): if (self._con_hash.get_node(str(part)) == self._name): self._target_part_ownership_list.append(part) # update current ownership list self._acquire_partition_ownership() #end __init__ def _sandesh_connection_info_update(self, status, message): from pysandesh.connection_info import ConnectionState from pysandesh.gen_py.process_info.ttypes import ConnectionStatus, \ ConnectionType from pysandesh.gen_py.sandesh.ttypes import SandeshLevel new_conn_state = getattr(ConnectionStatus, status) ConnectionState.update(conn_type = ConnectionType.ZOOKEEPER, name = 'Zookeeper', status = new_conn_state, message = message, server_addrs = self._zk_server.split(',')) if (self._conn_state and self._conn_state != ConnectionStatus.DOWN and new_conn_state == ConnectionStatus.DOWN): msg = 'Connection to Zookeeper down: %s' %(message) self._logger.error(msg) if (self._conn_state and self._conn_state != new_conn_state and new_conn_state == ConnectionStatus.UP): msg = 'Connection to Zookeeper ESTABLISHED' self._logger.error(msg) self._conn_state = new_conn_state # end _sandesh_connection_info_update # following routine is the greenlet task function to acquire the lock # for a partition def _acquire_lock(self, part): # lock for the partition l = self._part_locks[part] # go in an infinite loop waiting to acquire the lock try: while True: ret = l.acquire(blocking=False) if ret == True: self._logger.error("Acquired lock for:" + str(part)) self._curr_part_ownership_list.append(part) self._update_cb(self._curr_part_ownership_list) return True else: gevent.sleep(1) except CancelledError: self._logger.error("Lock acquire cancelled for:" + str(part)) return False except Exception as ex: # TODO: If we have a non-KazooException, the lock object # may get stuck in the "cancelled" state self._logger.error("Lock acquire unexpected error!: " + str(ex)) # This exception should get propogated to main thread raise SystemExit return False #end _acquire_lock # get rid of finished spawned tasks from datastructures def _cleanup_greenlets(self): for part in self._part_lock_task_dict.keys(): if (self._part_lock_task_dict[part].ready()): del self._part_lock_task_dict[part] #end _cleanup_greenlets # following routine launches tasks to acquire partition locks def _acquire_partition_ownership(self): # cleanup any finished greenlets self._cleanup_greenlets() # this variable will help us decide if we need to call callback updated_curr_ownership = False # list of partitions for which locks have to be released release_lock_list = [] self._logger.info("known servers: %s" % self._con_hash.get_all_nodes()) for part in range(0, self._max_partition): if (part in self._target_part_ownership_list): if (part in self._curr_part_ownership_list): # do nothing, I already have ownership of this partition self._logger.info("No need to acquire ownership of:" + str(part)) else: # I need to acquire lock for this partition before I own if (part in self._part_lock_task_dict.keys()): try: self._part_lock_task_dict[part].get(block=False) except: # do nothing there is already a greenlet running to # acquire the lock self._logger.error("Already a greenlet running to" " acquire:" + str(part)) continue # Greenlet died without getting ownership. Cleanup self._logger.error("Cleanup stale greenlet running to" " acquire:" + str(part)) del self._part_lock_task_dict[part] self._logger.error("Starting greenlet running to" " acquire:" + str(part)) # launch the greenlet to acquire the loc, k g = Greenlet.spawn(self._acquire_lock, part) self._part_lock_task_dict[part] = g else: # give up ownership of the partition # cancel any lock acquisition which is ongoing if (part in self._part_lock_task_dict.keys()): try: self._part_lock_task_dict[part].get(block=False) except: self._logger.error("canceling lock acquisition going on \ for:" + str(part)) # Cancelling the lock should result in killing the gevent self._part_locks[part].cancel() self._part_lock_task_dict[part].get(block=True) del self._part_lock_task_dict[part] if (part in self._curr_part_ownership_list): release_lock_list.append(part) self._curr_part_ownership_list.remove(part) updated_curr_ownership = True self._logger.error("giving up ownership of:" + str(part)) if (updated_curr_ownership is True): # current partition membership was updated call the callback self._update_cb(self._curr_part_ownership_list) if (len(release_lock_list) != 0): # release locks which were acquired for part in release_lock_list: self._logger.error("release the lock which was acquired:" + \ str(part)) try: self._part_locks[part].release() self._logger.error("fully gave up ownership of:" + str(part)) except: pass #end _acquire_partition_ownership def update_cluster_list(self, cluster_list): """ Updates the cluster node list Args: cluster_list(list): New list of names of the nodes in the cluster Returns: None """ # some sanity check if not(self._name in cluster_list): raise ValueError('cluster list is missing local server name') new_cluster_list = set(cluster_list) new_servers = list(new_cluster_list.difference( self._cluster_list)) deleted_servers = list(set(self._cluster_list).difference( new_cluster_list)) self._cluster_list = set(cluster_list) # update the hash structure if new_servers: self._logger.error("new servers:" + str(new_servers)) self._con_hash.add_nodes(new_servers) if deleted_servers: self._logger.error("deleted servers:" + str(deleted_servers)) self._con_hash.del_nodes(deleted_servers) # update target partition ownership list self._target_part_ownership_list = [] for part in range(0, self._max_partition): if (self._con_hash.get_node(str(part)) == self._name): if not (part in self._target_part_ownership_list): self._target_part_ownership_list.append(part) # update current ownership list self._acquire_partition_ownership() #end update_cluster_list def own_partition(self, part_no): """ Returns ownership information of a partition Args: part_no(int) : Partition no Returns: True if partition is owned by the local node False if partition is not owned by the local node """ return part_no in self._curr_part_ownership_list #end own_partition def close(self): """ Closes any connections and frees up any data structures Args: Returns: None """ # clean up greenlets for part in self._part_lock_task_dict.keys(): try: self._part_lock_task_dict[part].kill() except: pass # close zookeeper try: self._zk.stop() except: pass try: self._zk.close() except: pass
class PartitionClient(object): """ Client Class for the Partition Library Example usage: --------------------- import libpartition from libpartition import PartitionClient def own_change_cb(l): print "ownership change:" + str(l) c = PartitionClient("test", "s1", ["s1", "s2", "s3"], 32, own_change_cb, "zookeeper_s1") ##do some real work now" if (c.own_partition(1)): ...... do something with partition #1 ..... ......... ... c.update_cluster_list(["s1", "s2"]) ... ---------------------- You should not call any partition library routine from within the callback function Args: app_name(str): Name of the app for which partition cluster is used self_name(str): Name of the local cluster node (can be ip address) cluster_list(list): List of all the nodes in the cluster including local node max_partition(int): Partition space always go from 0..max_partition-1 partition_update_cb: Callback function invoked when partition ownership list is updated.x zk_server(str): <zookeeper server>:<zookeeper server port> """ def __init__(self, app_name, self_name, cluster_list, max_partition, partition_update_cb, zk_server): # Initialize local variables self._zk_server = zk_server self._cluster_list = set(cluster_list) self._max_partition = max_partition self._update_cb = partition_update_cb self._curr_part_ownership_list = [] self._target_part_ownership_list = [] self._con_hash = ConsistentHash(cluster_list) self._name = self_name # some sanity check if not (self._name in cluster_list): raise ValueError('cluster list is missing local server name') # connect to zookeeper self._zk = KazooClient(zk_server) self._zk.start() # create a lock array to contain locks for each partition self._part_locks = [] for part in range(0, self._max_partition): lockpath = "/lockpath/" + app_name + "/" + str(part) l = self._zk.Lock(lockpath, self._name) self._part_locks.append(l) # initialize partition # to lock acquire greenlet dictionary self._part_lock_task_dict = {} # update target partition ownership list for part in range(0, self._max_partition): if (self._con_hash.get_node(str(part)) == self._name): self._target_part_ownership_list.append(part) # update current ownership list self._acquire_partition_ownership() #end __init__ # following routine is the greenlet task function to acquire the lock # for a partition def _acquire_lock(self, part): # lock for the partition l = self._part_locks[part] while True: if (l.cancelled == True): # a lock acquisition is getting cancelled let's wait logging.info("lock acquisition is getting cancelled, \ lets wait") gevent.sleep(1) else: break # go in an infinite loop waiting to acquire the lock while True: ret = l.acquire(blocking=False) if ret == True: logging.info("Acquired lock for:" + str(part)) self._curr_part_ownership_list.append(part) self._update_cb(self._curr_part_ownership_list) return ret else: gevent.sleep(1) #end _acquire_lock # get rid of finished spawned tasks from datastructures def _cleanup_greenlets(self): for part in self._part_lock_task_dict.keys(): if (self._part_lock_task_dict[part].ready()): del self._part_lock_task_dict[part] #end _cleanup_greenlets # following routine launches tasks to acquire partition locks def _acquire_partition_ownership(self): # cleanup any finished greenlets self._cleanup_greenlets() # this variable will help us decide if we need to call callback updated_curr_ownership = False for part in range(0, self._max_partition): if (part in self._target_part_ownership_list): if (part in self._curr_part_ownership_list): # do nothing, I already have ownership of this partition logging.info("No need to acquire ownership of:" + str(part)) else: # I need to acquire lock for this partition before I own if (part in self._part_lock_task_dict.keys()): # do nothing there is already a greenlet running to # acquire the lock logging.info("Already a greenlet running to" " acquire:" + str(part)) else: # launch the greenlet to acquire the loc, k g = Greenlet.spawn(self._acquire_lock, part) self._part_lock_task_dict[part] = g else: # give up ownership of the partition # cancel any lock acquisition which is ongoing if (part in self._part_lock_task_dict.keys()): # kill the greenlet trying to get the lock for this # partition self._part_lock_task_dict[part].kill() del self._part_lock_task_dict[part] logging.info("canceling lock acquisition going on \ for:" + str(part)) try: self._part_locks[part].cancel() except: pass if (part in self._curr_part_ownership_list): # release if lock was already acquired logging.info("release the lock which was acquired:" + \ str(part)) try: self._part_locks[part].release() except: pass self._curr_part_ownership_list.remove(part) updated_curr_ownership = True logging.info("gave up ownership of:" + str(part)) if (updated_curr_ownership is True): # current partition membership was updated call the callback self._update_cb(self._curr_part_ownership_list) #end _acquire_partition_ownership def update_cluster_list(self, cluster_list): """ Updates the cluster node list Args: cluster_list(list): New list of names of the nodes in the cluster Returns: None """ # some sanity check if not (self._name in cluster_list): raise ValueError('cluster list is missing local server name') new_cluster_list = set(cluster_list) new_servers = list(new_cluster_list.difference(self._cluster_list)) deleted_servers = list( set(self._cluster_list).difference(new_cluster_list)) self._cluster_list = cluster_list logging.info("deleted servers:" + str(deleted_servers)) logging.info("new servers:" + str(new_servers)) # update the hash structure if new_servers: self._con_hash.add_nodes(new_servers) if deleted_servers: self._con_hash.del_nodes(deleted_servers) # update target partition ownership list self._target_part_ownership_list = [] for part in range(0, self._max_partition): if (self._con_hash.get_node(str(part)) == self._name): if not (part in self._target_part_ownership_list): self._target_part_ownership_list.append(part) # update current ownership list self._acquire_partition_ownership() #end update_cluster_list def own_partition(self, part_no): """ Returns ownership information of a partition Args: part_no(int) : Partition no Returns: True if partition is owned by the local node False if partition is not owned by the local node """ return part_no in self._curr_part_ownership_list #end own_partition def close(self): """ Closes any connections and frees up any data structures Args: Returns: None """ # clean up greenlets for part in self._part_lock_task_dict.keys(): try: self._part_lock_task_dict[part].kill() except: pass # close zookeeper try: self._zk.stop() except: pass try: self._zk.close() except: pass
class ShardedRedisConnection(object): """Maintain a list of several Redis backends in a sharded manner. This class establishes several pools based off of the IP addresses resolved from the ``hostname`` part of the ``REDIS_URI`` environment variable. Any reads, writes, or deletes will be delegated to the proper Redis backend by determining which shard the query should be directed to. Additionally, the ``info`` method is available to gather health information across all of the servers in the backend. This data can be used to determine the health of the service. .. note:: The hostname in the ``REDIS_URI`` will be DNS resolved and a connection will be opened for each address returned in the answer section. You can specify a Round-Robin DNS record and we will open a connection to all hosts returned. """ def __init__(self): self.config = self._get_redis_config() self._connections = {} self._consistent_hash = ConsistentHash(self.config.hosts) self._establish_connections(self.config) def _get_redis_config(self): """Construct a Redis config object from the URI env-var.""" LOGGER.debug('Creating connection info for "%s"', os.environ['REDIS_URI']) broken = urlsplit(os.environ['REDIS_URI']) if broken.scheme != 'redis': raise RuntimeError('Non "redis://" URI provided in REDIS_URI!') _, _, ip_addresses = socket.gethostbyname_ex(broken.hostname) if not ip_addresses: raise RuntimeError('Unable to find Redis in DNS!') ttl = DEFAULT_TTL if broken.query: # parse_qs returns a list of values given a key ttl = parse_qs(broken.query).get('ttl', [ttl])[0] return _redis_config( hosts=ip_addresses, port=broken.port or DEFAULT_PORT, db=broken.path[1:], # Remove the leading / ttl=int(ttl), ) def _establish_connections(self, config): """Create Redis connection pool objects for each server shard.""" LOGGER.debug('Establishing Redis connection pools') for host in config.hosts: LOGGER.debug('Opening Redis connection to host "%s"', host) self._connections[host] = redis.StrictRedis( host=host, port=config.port, db=config.db, ) def _get_shard_connection(self, key): """Get a connection for a Redis shard given a ``key``.""" LOGGER.debug('Getting Redis host shard given key "%s"', key) host = self._consistent_hash.get_node(key) LOGGER.debug('Got Redis host shard "%s" given key "%s"', host, key) return self._connections[host] def set(self, key, value, ttl=None): """Set ``key`` to ``value`` in a Redis shard.""" LOGGER.debug('Setting Redis key "%s"', key) ttl = ttl or self.config.ttl connection = self._get_shard_connection(key) connection.set(key, value, ex=ttl) def get(self, key): """Get a ``key`` in a Redis shard.""" LOGGER.debug('Getting Redis value given key "%s"', key) connection = self._get_shard_connection(key) return connection.get(key) def delete(self, key): """Delete a ``key`` in a Redis shard.""" LOGGER.debug('Deleting Redis key "%s"', key) connection = self._get_shard_connection(key) connection.delete(key) def sadd(self, key, *values): """Add the specified ``values`` to the set stored at ``key``.""" LOGGER.debug('Getting Redis key "%s" set members', key) connection = self._get_shard_connection(key) return connection.sadd(key, *values) def smembers(self, key): """Return all members of the set stored at ``key``.""" LOGGER.debug('Getting Redis key "%s" set members', key) connection = self._get_shard_connection(key) return connection.smembers(key) def sismember(self, key, member): """Returns if ``member`` is a member of the set stored at ``key``.""" LOGGER.debug('Checkin for membership at Redis key "%s"', key) connection = self._get_shard_connection(key) return connection.sismember(key, member) def info(self): """Return a list of the health of each connected redis server. :rtype: list :returns: A list of the server info from all of the server shards. """ LOGGER.info('Getting Redis server stats') stats = [] for host, connection in self._connections.items(): LOGGER.debug('Getting Redis health for host "%s"', host) stats.append(connection.info()) return stats
class ConsistentMemcachedClient(Client): """ Consistent Memcached Client attempts to create a scalable Memcached cluster that uses Consistent Hashing (using the ketama algorithm). In any distributed caching setup, adding or deleting servers disrupts the entire hashing and results in significant redistribution of keys. A consistent hashing function will significantly decrease the chances of a key being hashed to a different slot. A good explanation for the algorithm is found here: http://michaelnielsen.org/blog/consistent-hashing/ """ # The timeout period before marking a server as a dead _RETRY_GAP = 0.1 def __init__(self, *args, **kwargs): """ A memcache subclass. It currently allows you to add or delete a new host at run time. It also checks if a memcache server is down and automatically readjusts if a memcached server is not reachable. """ super(ConsistentMemcachedClient, self).__init__(*args, **kwargs) self.hash_manager = ConsistentHash(self.servers) def _reconfigure_hashing(self): """ If a server can be reached add it to the list of available servers. If a server cannot be reached, delete it from the list of available servers. """ for server in self.servers: if self._is_server_alive(server, sleep=False): self._add_alive_server(server) for server in self.hash_manager.nodes: if not self._is_server_alive(server, sleep=False): self._remove_dead_server(server) def _add_alive_server(self, server): """ Add a server to the hash manager """ if server not in self.hash_manager.nodes: self.hash_manager.add_nodes([server]) def _is_server_alive(self, server, sleep=True): """ Check is server is alive Client._SERVER_RETRIES times """ for i in range(Client._SERVER_RETRIES): try: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) if not sock.connect_ex(server.address): return True if sleep: time.sleep(Client._RETRY_GAP) finally: sock.close() return False def _remove_dead_server(self, server): """ Reconfigure hashing by removing the server that is not responding """ try: self.hash_manager.nodes.remove(server) self.hash_manager = ConsistentHash(self.hash_manager.nodes) except ValueError: raise ValueError('no data store is functioning, cannot process request') def _get_server(self, key): """ Returns the most likely server to hold the key """ self._reconfigure_hashing() server = self.hash_manager.get_node(key) if not self.buckets: return None, None for i in range(Client._SERVER_RETRIES): try: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) if server and server.connect() and not sock.connect_ex(server.address): return server, key time.sleep(Client._RETRY_GAP) finally: sock.close() return None, None