def main_hashclient_test(): client = HashClient([('192.168.204.128', 11211) ]) #('192.168.204.128', 11212), client.set('heshanshan', 'some value') result = client.get('heshanshan') logging.debug(client.get('heshanshan'))
class MemCacheHelper(object): def __init__(self): self.client = HashClient([(MEMCACHED_ENDPOINT, MEMCACHED_PORT)]) def set(self, key, value): self.client.set(key, value) def get(self, key): return self.client.get(key)
class MetricCache: def __init__(self, server_list): # server_list = list of (host,port) self.client = HashClient(server_list) def increment(self, *, metric_name, value): if not self.client.get(metric_name): self.client.set(metric_name, 0) return self.client.incr(metric_name, value)
def sendtocache(link): elasticache_config_endpoint = "giftbot.h0k94j4.cfg.use1.cache.amazonaws.com:11211" nodes = elasticache_auto_discovery.discover(elasticache_config_endpoint) nodes = map(lambda x: (x[1], int(x[2])), nodes) memcache_client = HashClient(nodes) print nodes memcache_client.set('ImageLink', link[5]) print link[5] memcache_client.set('ImageName', link[4]) print link[4]
class MemcacheMap(Map): def open(self): self._db = HashClient(('127.0.0.1', 11211)) return self def _put(self, key: Key, value: bytes) -> Key: self._db.set(key_bytes(key), value) return key def _get(self, uuid: UUID, time: int) -> Optional[bytes]: return self._db.get(key_bytes(key)) def close(self): del (self._db) return self
class MemcacheHashCli(object): def __init__(self, hosts): self.client = HashClient(hosts) def set(self, key, value, expire): try: return self.client.set(key, value, expire) except Exception as e: return False def get(self, key): try: return self.client.get(key, default=None) except Exception as e: return None def mset(self, values, expire): try: return self.client.set_many(values, expire) except Exception as e: return False def mget(self, keys): try: return self.client.get_many(keys) except Exception as e: return None
class MemCache(Cache): def __init__(self, memcache_conf_url, key_expiry_secs): try: self._conf_url = memcache_conf_url self._expire_secs = key_expiry_secs print("config url", self._conf_url) nodes = elasticache_auto_discovery.discover(self._conf_url) print("nodes 1:", nodes) nodes = map(lambda x: (x[1], int(x[2])), nodes) print("nodes 2:", nodes) self._client = HashClient( nodes, serde=serde.PickleSerde(pickle_version=3), default_noreply=False, ignore_exc=False, connect_timeout=3, timeout=5, retry_attempts=3, retry_timeout=1) print("memcache client", self._client) except Exception as e: print("MemCache exception", e) print(traceback.format_exc()) finally: print("~MemCache constructor") def reinit(self): return MemCache(self._conf_url, self._expire_secs) def set(self, cache_key, value): try: self._client.set(cache_key, value, expire=self._expire_secs) #print("wrote to memcache {} = {}".format(cache_key, value)) except Exception as e: print("ERROR writing '{}' to memcache: ".format(cache_key, e)) raise e def get(self, cache_key): return self._client.get(cache_key) def multiget(self, cache_keys): return self._client.get_multi(cache_keys) def delete(self, cache_key): self._client.delete(cache_key)
def GetQuery(self): servers = settings.JSON_SETTINGS['MemcachedServers'] cServers = [] for server in servers: tmp = server.split(':') cServers += [(tmp[0], int(tmp[1]))] cl = HashClient(cServers) q = cl.get('queue') if q is None: self.QueryStat() queue = [] for event in self.events: queue += [event['Queue']] cl.set('queue', ','.join(queue), settings.JSON_SETTINGS['MemcachedExpire']) return queue else: return q.decode("utf-8").split(',')
class Memcache(object): """ This class is an interface to a memcache storage backend it implements needed methods to store and fetch for an url shortener. """ from pymemcache.client.hash import HashClient def __init__(self, host, key_expiration, username=None, password=None): """ Instanciate a memcache storage backend object @params: host: the server to connect to key_expiration: key expiration in seconds username: not use password: not use @returns: """ self.client = HashClient(servers='{} 11211'.format(host), connect_timeout=True, timeout=True, use_pooling=True, max_pool_size=100) self.key_expiration = key_expiration def set_value(self, key, value): """ Set a new record in the datavase for a short code @params: key: the short code to insert value: the long url corresponding @returns: True or False if succeeded or not """ return self.client.set(key, value.encode('utf-8'), self.key_expiration) def get_value(self, key): """ Get a long url from the shorten form @params: key: the short form to lookup @returns: a long url if found, None otherwise """ res = None res = self.client.get(key) if res is not None: return res.decode('utf-8') return None def get_stat(self, key): pass def get_all(self): pass
def push_game_info(group_id, game_info): """ Push the game info for a group. :param group_id: ID for the group :type group_id: String :param game_info: JSON information regarding the game status (serialized into String format) :type game_info: String """ nodes = elasticache_auto_discovery.discover('hackgt5mem.gy46cz.cfg.use1.cache.amazonaws.com:11211') nodes = map(lambda x: (x[1], int(x[2])), nodes) memcache_client = HashClient(nodes) memcache_client.set(str(group_id), str(game_info)) return { "group_id" : group_id, "game_info" : game_info, "status" : "success" }
class Memcached(Cache): """Implements a cache""" def __init__(self, servers="127.0.0.1:11211", **kwargs): """Constructor""" Cache.__init__(self, **kwargs) if isinstance(servers, string_types): servers = [s.strip() for s in servers.split(",")] self.cache = HashClient(servers, use_pooling=True) self.timeout = int(kwargs.get("timeout", 0)) def getKey(self, tile): """Get the key for this tile""" return "/".join(map(str, [tile.layer.name, tile.x, tile.y, tile.z])) def get(self, tile): """Get the cache data""" key = self.getKey(tile) tile.data = self.cache.get(key) return tile.data def set(self, tile, data): """Set the cache data""" if self.readonly: return data key = self.getKey(tile) self.cache.set(key, data, self.timeout) return data def delete(self, tile): """Delete a tile from the cache""" key = self.getKey(tile) self.cache.delete(key) def attemptLock(self, tile): """Attempt to lock the cache for a tile""" return self.cache.add(self.getLockName(tile), "0", time.time() + self.timeout) def unlock(self, tile): """Attempt to unlock the cache for a tile""" self.cache.delete(self.getLockName(tile))
def test_no_servers_left_with_commands_return_default_value(self): from pymemcache.client.hash import HashClient client = HashClient( [], use_pooling=True, ignore_exc=True, timeout=1, connect_timeout=1 ) result = client.get('foo') assert result is None result = client.set('foo', 'bar') assert result is False
from pymemcache.client.hash import HashClient import time, random client = HashClient([ ('127.0.0.1', 11211), ('127.0.0.1', 11212), ('127.0.0.1', 11213) ]) string = 'zxcvbnmasdfghjklqwertyuiop1234567890' counter = 0 max_items = 100000 nodes = 3 def randomizer(): data = ''.join(random.choice(string) for x in range(512)) return data start_time = int(time.time()) while counter != max_items: counter += 1 value = randomizer() client.set(str(counter), value, expire=360) end_time = int(time.time()) print("Took {} seconds to write {} using {} nodes".format(end_time - start_time, max_items, nodes))
"""Produce `n`-character chunks from `s`.""" for start in range(0, len(s), n): yield s[start:start + n] valmap = {} for subval in splitchunks(val, SPLIT_VALUE_SIZE): subhash = hashlib.new('md4') subhash.update(subval) buf += subhash.digest() + struct.pack('!I', len(subval)) subkey = "%s-%d" % (subhash.hexdigest(), len(subval)) print("# %s: chunk %d" % (subkey, len(subval))) #mc.set(subkey, subval) valmap[subkey] = subval chunks = chunks + 1 mc.set_multi(valmap) mc.set(key, buf) else: mc.set(key, val) files = files + 1 blobs = blobs + 1 elif ext == '.stderr' or ext == '.d' or ext == '.dia': # was added above files = files + 1 elif ext == '.manifest': manifest = manifest + 1 key = "".join(list(os.path.split(dirname)) + [base]) val = open(os.path.join(dirpath, filename), 'rb').read() or None if val: print("%s: manifest %d" % (key, len(val))) mc.set(key, val, 0, 0) files = files + 1
from pymemcache.client.hash import HashClient client = HashClient([ ('memcached1', 11211), ('memcached2', 11212) ]) client.set('boom', 'foo')
class AutodiscoveryClient(): def __init__(self, cluster_id): self.elasticache = boto3.client('elasticache', region_name='us-east-1') self.cluster_id = cluster_id self.servers = self.new_server_list() self.internal_scale = False self.hash_client = HashClient(self.servers, serializer=self.json_serializer, deserializer=self.json_deserializer, use_pooling=True) thread = TimerThread(self.check_cluster) thread.start() def new_server_list(self): new_servers = [] response = self.elasticache.describe_cache_clusters( CacheClusterId=self.cluster_id, ShowCacheNodeInfo=True) self.endpoint = response['CacheClusters'][0]['ConfigurationEndpoint'] nodes = response['CacheClusters'][0]['CacheNodes'] for node in nodes: if 'Endpoint' in node: # server may be coming up, no endpoint yet endpoint = (node['Endpoint']['Address'], node['Endpoint']['Port']) new_servers.append(endpoint) return new_servers def check_cluster(self): print('Checking cluster') new_servers = self.new_server_list() try: new_server_set = set(new_servers) cur_server_set = set(self.servers) servers_changed = False if (new_server_set - cur_server_set) or (cur_server_set - new_server_set): print('Found a node difference') # self.dump_keys('all_keys.txt') servers_changed = True if (new_server_set - cur_server_set): print('Server added') self.internal_scale = False self.hash_client = HashClient( new_servers, serializer=self.json_serializer, deserializer=self.json_deserializer, use_pooling=True) self.remap_keys(self.servers) # server removed not with our code if (cur_server_set - new_server_set): print('Server removed') if self.internal_scale: self.internal_scale = False else: print('Removing server') self.hash_client = HashClient( new_servers, serializer=self.json_serializer, deserializer=self.json_deserializer, use_pooling=True) if servers_changed: self.servers = new_servers # self.dump_keys('all_keys_2.txt') self.internal_scale = False except Exception as e: print(str(e)) def json_serializer(self, key, value): if type(value) == str: return value, 1 return json.dumps(value), 2 def json_deserializer(self, key, value, flags): if flags == 1: return value.decode('utf-8') if flags == 2: return json.loads(value.decode('utf-8')) raise Exception("Unknown serialization format") def remap_keys(self, server_list): # self.dump_keys(server_list) for endpoint in server_list: startTime = int(time.time()) ip, port = endpoint print('Processing ' + ip) memcachedStats = MemcachedStats(ip, port) key_list = memcachedStats.keys() client = HashClient([endpoint], serializer=self.json_serializer, deserializer=self.json_deserializer) count = 0 for key, expiry in key_list: if not self.hash_client.get(key): val = client.get(key) if val: self.hash_client.set(key, val, expire=expiry, noreply=True) count = count + 1 endTime = int(time.time()) print('Found {} keys. Remapped {} in {} seconds'.format( len(key_list), count, endTime - startTime)) def dump_keys(self, server_list): count = 1 for endpoint in server_list: ip, port = endpoint print('Dumping ' + ip) command = 'memdump --servers={}:{} > {}.txt'.format( ip, port, count) count = count + 1 os.system(command) def add_node(self): if self.internal_scale: print('Cannot scale more than one at a time') try: response = self.elasticache.describe_cache_clusters( CacheClusterId=cluster_id, ShowCacheNodeInfo=True) count = response['CacheClusters'][0]['NumCacheNodes'] self.internal_scale = True self.elasticache.modify_cache_cluster(CacheClusterId=cluster_id, NumCacheNodes=count + 1, ApplyImmediately=True) print('Added node {}'.format(count + 1)) except Exception as e: print(str(e)) def remove_node(self): if self.internal_scale: print('Cannot scale more than one at a time') try: self.internal_scale = True response = self.elasticache.describe_cache_clusters( CacheClusterId=self.cluster_id, ShowCacheNodeInfo=True) count = response['CacheClusters'][0]['NumCacheNodes'] nodes = response['CacheClusters'][0]['CacheNodes'] node = nodes[count - 1] id_to_remove = node['CacheNodeId'] endpoint = (node['Endpoint']['Address'], node['Endpoint']['Port']) print('Removing node: ' + str(endpoint) + ' with id ' + str(id_to_remove)) new_servers = self.servers.copy() new_servers.remove(endpoint) print('Remaining servers: ' + str(new_servers)) self.hash_client = HashClient(new_servers, serializer=self.json_serializer, deserializer=self.json_deserializer, use_pooling=True) self.remap_keys(self.servers) # remove the node response = self.elasticache.modify_cache_cluster( CacheClusterId=self.cluster_id, NumCacheNodes=count - 1, CacheNodeIdsToRemove=[id_to_remove], ApplyImmediately=True) print(response) except Exception as e: print(str(e))
from pymemcache.client.hash import HashClient client = HashClient([('127.0.0.1', 11211), ('127.0.0.1', 11212)]) client.set('some_key', 'some value') result = client.get('some_key')
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # @namespace pymemcache-client from pymemcache.client.hash import HashClient from pprintpp import pprint import socket host = socket.gethostbyname("localhost") client = HashClient([ (host, 11211), (host, 11212) ]) pprint(type(client)) # cache some value under some key and expire it after 10 seconds try: client.set('some_key', 'some_value', expire=10) except ConnectionRefusedError as ex: print("ConnectionRefusedError: {0}".format(ex)) except Exception as ex: print("Exception: {0}".format(ex)) # retrieve value for the same key result = client.get('some_key') pprint(result)
return avg_rps, delta if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("addresses", type=str, help="where's my memcached?") parser.add_argument("--target", default=1000000, type=int, help="how many requests to send") args = parser.parse_args() addresses = args.addresses.split(',') print(addresses) # start client and load memcached w/ 200 keys _, _, ips = socket.gethostbyname_ex( 'mycache-memcached.default.svc.cluster.local') servers = [(ip, 11211) for ip in ips] client = HashClient(servers, use_pooling=True) #mc = pylibmc.Client(addresses, binary=True,behaviors={"tcp_nodelay": True,"ketama": True}) #pool = pylibmc.ClientPool(mc, 10) for n in range(200): client.set('bench_key_%d' % n, 'bench_value_%d' % n) # run and print results result = run(client, target=args.target) print("exp. time: %f seconds\navg. rps: %f" % (result[1], result[0]))
class MemcachedDriver(coordination._RunWatchersMixin, coordination.CoordinationDriver): """A `memcached`_ based driver. This driver users `memcached`_ concepts to provide the coordination driver semantics and required API(s). It **is** fully functional and implements all of the coordination driver API(s). It stores data into memcache using expiries and `msgpack`_ encoded values. General recommendations/usage considerations: - Memcache (without different backend technology) is a **cache** enough said. .. _memcached: http://memcached.org/ .. _msgpack: http://msgpack.org/ """ CHARACTERISTICS = ( coordination.Characteristics.DISTRIBUTED_ACROSS_THREADS, coordination.Characteristics.DISTRIBUTED_ACROSS_PROCESSES, coordination.Characteristics.DISTRIBUTED_ACROSS_HOSTS, coordination.Characteristics.CAUSAL, ) """ Tuple of :py:class:`~tooz.coordination.Characteristics` introspectable enum member(s) that can be used to interogate how this driver works. """ #: Key prefix attached to groups (used in name-spacing keys) GROUP_PREFIX = b'_TOOZ_GROUP_' #: Key prefix attached to leaders of groups (used in name-spacing keys) GROUP_LEADER_PREFIX = b'_TOOZ_GROUP_LEADER_' #: Key prefix attached to members of groups (used in name-spacing keys) MEMBER_PREFIX = b'_TOOZ_MEMBER_' #: Key where all groups 'known' are stored. GROUP_LIST_KEY = b'_TOOZ_GROUP_LIST' #: Default socket/lock/member/leader timeout used when none is provided. DEFAULT_TIMEOUT = 30 #: String used to keep a key/member alive (until it next expires). STILL_ALIVE = b"It's alive!" def __init__(self, member_id, parsed_url, options): super(MemcachedDriver, self).__init__() options = utils.collapse(options) self._options = options self._member_id = member_id self._joined_groups = set() self._executor = utils.ProxyExecutor.build("Memcached", options) # self.host = (parsed_url.hostname or "localhost", # parsed_url.port or 11211) self.host = [] for one_url in parsed_url: tmp = (one_url.hostname or "localhost", one_url.port or 11211) self.host.append(tmp) default_timeout = options.get('timeout', self.DEFAULT_TIMEOUT) self.timeout = int(default_timeout) self.membership_timeout = int( options.get('membership_timeout', default_timeout)) self.lock_timeout = int(options.get('lock_timeout', default_timeout)) self.leader_timeout = int( options.get('leader_timeout', default_timeout)) max_pool_size = options.get('max_pool_size', None) if max_pool_size is not None: self.max_pool_size = int(max_pool_size) else: self.max_pool_size = None self._acquired_locks = [] @staticmethod def _msgpack_serializer(key, value): if isinstance(value, six.binary_type): return value, 1 return utils.dumps(value), 2 @staticmethod def _msgpack_deserializer(key, value, flags): if flags == 1: return value if flags == 2: return utils.loads(value) raise coordination.SerializationError("Unknown serialization" " format '%s'" % flags) @_translate_failures def _start(self): #self.client = pymemcache_client.PooledClient( from pymemcache.client.hash import HashClient self.client = HashClient(self.host, serializer=self._msgpack_serializer, deserializer=self._msgpack_deserializer, timeout=self.timeout, connect_timeout=self.timeout, max_pool_size=self.max_pool_size) # Run heartbeat here because pymemcache use a lazy connection # method and only connect once you do an operation. self.heartbeat() self._group_members = collections.defaultdict(set) self._executor.start() @_translate_failures def _stop(self): for lock in list(self._acquired_locks): lock.release() self.client.delete(self._encode_member_id(self._member_id)) for g in list(self._joined_groups): try: self.leave_group(g).get() except (coordination.MemberNotJoined, coordination.GroupNotCreated): # Guess we got booted out/never existed in the first place... pass except coordination.ToozError: LOG.warning("Unable to leave group '%s'", g, exc_info=True) self._executor.stop() # self.client.close() def _encode_group_id(self, group_id): return self.GROUP_PREFIX + group_id def _encode_member_id(self, member_id): return self.MEMBER_PREFIX + member_id def _encode_group_leader(self, group_id): return self.GROUP_LEADER_PREFIX + group_id @_retry.retry() def _add_group_to_group_list(self, group_id): """Add group to the group list. :param group_id: The group id """ group_list, cas = self.client.gets(self.GROUP_LIST_KEY) if cas: group_list = set(group_list) group_list.add(group_id) if not self.client.cas(self.GROUP_LIST_KEY, list(group_list), cas): # Someone updated the group list before us, try again! raise _retry.Retry else: if not self.client.add(self.GROUP_LIST_KEY, [group_id], noreply=False): # Someone updated the group list before us, try again! raise _retry.Retry @_retry.retry() def _remove_from_group_list(self, group_id): """Remove group from the group list. :param group_id: The group id """ group_list, cas = self.client.gets(self.GROUP_LIST_KEY) group_list = set(group_list) group_list.remove(group_id) if not self.client.cas(self.GROUP_LIST_KEY, list(group_list), cas): # Someone updated the group list before us, try again! raise _retry.Retry def create_group(self, group_id): encoded_group = self._encode_group_id(group_id) @_translate_failures def _create_group(): if not self.client.add(encoded_group, {}, noreply=False): raise coordination.GroupAlreadyExist(group_id) self._add_group_to_group_list(group_id) return MemcachedFutureResult(self._executor.submit(_create_group)) def get_groups(self): @_translate_failures def _get_groups(): return self.client.get(self.GROUP_LIST_KEY) or [] return MemcachedFutureResult(self._executor.submit(_get_groups)) def join_group(self, group_id, capabilities=b""): encoded_group = self._encode_group_id(group_id) @_retry.retry() @_translate_failures def _join_group(): group_members, cas = self.client.gets(encoded_group) if group_members is None: raise coordination.GroupNotCreated(group_id) if self._member_id in group_members: raise coordination.MemberAlreadyExist(group_id, self._member_id) group_members[self._member_id] = { b"capabilities": capabilities, } if not self.client.cas(encoded_group, group_members, cas): # It changed, let's try again raise _retry.Retry self._joined_groups.add(group_id) return MemcachedFutureResult(self._executor.submit(_join_group)) def leave_group(self, group_id): encoded_group = self._encode_group_id(group_id) @_retry.retry() @_translate_failures def _leave_group(): group_members, cas = self.client.gets(encoded_group) if group_members is None: raise coordination.GroupNotCreated(group_id) if self._member_id not in group_members: raise coordination.MemberNotJoined(group_id, self._member_id) del group_members[self._member_id] if not self.client.cas(encoded_group, group_members, cas): # It changed, let's try again raise _retry.Retry self._joined_groups.discard(group_id) return MemcachedFutureResult(self._executor.submit(_leave_group)) def _destroy_group(self, group_id): self.client.delete(self._encode_group_id(group_id)) def delete_group(self, group_id): encoded_group = self._encode_group_id(group_id) @_retry.retry() @_translate_failures def _delete_group(): group_members, cas = self.client.gets(encoded_group) if group_members is None: raise coordination.GroupNotCreated(group_id) if group_members != {}: raise coordination.GroupNotEmpty(group_id) # Delete is not atomic, so we first set the group to # using CAS, and then we delete it, to avoid race conditions. if not self.client.cas(encoded_group, None, cas): raise _retry.Retry self.client.delete(encoded_group) self._remove_from_group_list(group_id) return MemcachedFutureResult(self._executor.submit(_delete_group)) @_retry.retry() @_translate_failures def _get_members(self, group_id): encoded_group = self._encode_group_id(group_id) group_members, cas = self.client.gets(encoded_group) if group_members is None: raise coordination.GroupNotCreated(group_id) actual_group_members = {} for m, v in six.iteritems(group_members): # Never kick self from the group, we know we're alive if (m == self._member_id or self.client.get(self._encode_member_id(m))): actual_group_members[m] = v if group_members != actual_group_members: # There are some dead members, update the group if not self.client.cas(encoded_group, actual_group_members, cas): # It changed, let's try again raise _retry.Retry return actual_group_members def get_members(self, group_id): def _get_members(): return self._get_members(group_id).keys() return MemcachedFutureResult(self._executor.submit(_get_members)) def get_member_capabilities(self, group_id, member_id): def _get_member_capabilities(): group_members = self._get_members(group_id) if member_id not in group_members: raise coordination.MemberNotJoined(group_id, member_id) return group_members[member_id][b'capabilities'] return MemcachedFutureResult( self._executor.submit(_get_member_capabilities)) def update_capabilities(self, group_id, capabilities): encoded_group = self._encode_group_id(group_id) @_retry.retry() @_translate_failures def _update_capabilities(): group_members, cas = self.client.gets(encoded_group) if group_members is None: raise coordination.GroupNotCreated(group_id) if self._member_id not in group_members: raise coordination.MemberNotJoined(group_id, self._member_id) group_members[self._member_id][b'capabilities'] = capabilities if not self.client.cas(encoded_group, group_members, cas): # It changed, try again raise _retry.Retry return MemcachedFutureResult( self._executor.submit(_update_capabilities)) def get_leader(self, group_id): def _get_leader(): return self._get_leader_lock(group_id).get_owner() return MemcachedFutureResult(self._executor.submit(_get_leader)) @_translate_failures def heartbeat(self): self.client.set(self._encode_member_id(self._member_id), self.STILL_ALIVE, expire=self.membership_timeout) # Reset the acquired locks for lock in self._acquired_locks: lock.heartbeat() return min(self.membership_timeout, self.leader_timeout, self.lock_timeout) @_translate_failures def _init_watch_group(self, group_id): members = self.client.get(self._encode_group_id(group_id)) if members is None: raise coordination.GroupNotCreated(group_id) # Initialize with the current group member list if group_id not in self._group_members: self._group_members[group_id] = set(members.keys()) def watch_join_group(self, group_id, callback): self._init_watch_group(group_id) return super(MemcachedDriver, self).watch_join_group(group_id, callback) def unwatch_join_group(self, group_id, callback): return super(MemcachedDriver, self).unwatch_join_group(group_id, callback) def watch_leave_group(self, group_id, callback): self._init_watch_group(group_id) return super(MemcachedDriver, self).watch_leave_group(group_id, callback) def unwatch_leave_group(self, group_id, callback): return super(MemcachedDriver, self).unwatch_leave_group(group_id, callback) def watch_elected_as_leader(self, group_id, callback): return super(MemcachedDriver, self).watch_elected_as_leader(group_id, callback) def unwatch_elected_as_leader(self, group_id, callback): return super(MemcachedDriver, self).unwatch_elected_as_leader(group_id, callback) def get_lock(self, name): return MemcachedLock(self, name, self.lock_timeout) def _get_leader_lock(self, group_id): return MemcachedLock(self, self._encode_group_leader(group_id), self.leader_timeout) @_translate_failures def run_elect_coordinator(self): for group_id, hooks in six.iteritems(self._hooks_elected_leader): # Try to grab the lock, if that fails, that means someone has it # already. leader_lock = self._get_leader_lock(group_id) if leader_lock.acquire(blocking=False): # We got the lock hooks.run(coordination.LeaderElected(group_id, self._member_id)) def run_watchers(self, timeout=None): result = super(MemcachedDriver, self).run_watchers(timeout=timeout) self.run_elect_coordinator() return result
class CouchbaseMemcacheMirror(object): def __init__(self, couchbase_uri, memcached_hosts, primary=PRIMARY_COUCHBASE): """ :param couchbase_uri: Connection string for Couchbase :param memcached_hosts: List of Memcached nodes :param primary: Determines which datastore is authoritative. This affects how get operations are performed and which datastore is used for CAS operations. PRIMARY_COUCHBASE: Couchbase is authoritative PRIMARY_MEMCACHED: Memcached is authoritative By default, Couchbase is the primary store :return: """ self.cb = CbBucket(couchbase_uri) self.mc = McClient(memcached_hosts) self._primary = primary @property def primary(self): return self._primary def _cb_get(self, key): try: return self.cb.get(key).value except NotFoundError: return None def get(self, key, try_alternate=True): """ Gets a document :param key: The key to retrieve :param try_alternate: Whether to try the secondary data source if the item is not found in the primary. :return: The value as a Python object """ if self._primary == PRIMARY_COUCHBASE: order = [self._cb_get, self.mc.get] else: order = [self.mc.get, self._cb_get] for meth in order: ret = meth(key) if ret or not try_alternate: return ret return None def _cb_mget(self, keys): """ Internal method to execute a Couchbase multi-get :param keys: The keys to retrieve :return: A tuple of {found_key:found_value, ...}, [missing_key1,...] """ try: ok_rvs = self.cb.get_multi(keys) bad_rvs = {} except NotFoundError as e: ok_rvs, bad_rvs = e.split_results() ok_dict = {k: (v.value, v.cas) for k, v in ok_rvs} return ok_dict, bad_rvs.keys() def get_multi(self, keys, try_alternate=True): """ Gets multiple items from the server :param keys: The keys to fetch as an iterable :param try_alternate: Whether to fetch missing items from alternate store :return: A dictionary of key:value. Only contains keys which exist and have values """ if self._primary == PRIMARY_COUCHBASE: ok, err = self._cb_get(keys) if err and try_alternate: ok.update(self.mc.get_many(err)) return ok else: ok = self.mc.get_many(keys) if len(ok) < len(keys) and try_alternate: keys_err = set(keys) - set(ok) ok.update(self._cb_mget(list(keys_err))[0]) return ok def gets(self, key): """ Get an item with its CAS. The item will always be fetched from the primary data store. :param key: the key to get :return: the value of the key, or None if no such value """ if self._primary == PRIMARY_COUCHBASE: try: rv = self.cb.get(key) return key, rv.cas except NotFoundError: return None, None else: return self.mc.gets(key) def gets_multi(self, keys): if self._primary == PRIMARY_COUCHBASE: try: rvs = self.cb.get_multi(keys) except NotFoundError as e: rvs, _ = e.split_results() return {k: (v.value, v.cas) for k, v in rvs} else: # TODO: I'm not sure if this is implemented in HasClient :( return self.mc.gets_many(keys) def delete(self, key): st = Status() try: self.cb.remove(key) except NotFoundError as e: st.cb_error = e st.mc_status = self.mc.delete(key) return st def delete_multi(self, keys): st = Status() try: self.cb.remove_multi(keys) except NotFoundError as e: st.cb_error = e st.mc_status = self.mc.delete_many(keys) def _do_incrdecr(self, key, value, is_incr): cb_value = value if is_incr else -value mc_meth = self.mc.incr if is_incr else self.mc.decr st = Status() try: self.cb.counter(key, delta=cb_value) except NotFoundError as e: st.cb_error = e st.mc_status = mc_meth(key, value) def incr(self, key, value): return self._do_incrdecr(key, value, True) def decr(self, key, value): return self._do_incrdecr(key, value, False) def touch(self, key, expire=0): st = Status() try: self.cb.touch(key, ttl=expire) except NotFoundError as e: st.cb_error = st st.mc_status = self.mc.touch(key) def set(self, key, value, expire=0): """ Write first to Couchbase, and then to Memcached :param key: Key to use :param value: Value to use :param expire: If set, the item will expire in the given amount of time :return: Status object if successful (will always be success). on failure an exception is raised """ self.cb.upsert(key, value, ttl=expire) self.mc.set(key, value, expire=expire) return Status() def set_multi(self, values, expire=0): """ Set multiple items. :param values: A dictionary of key, value indicating values to store :param expire: If present, expiration time for all the items :return: """ self.cb.upsert_multi(values, ttl=expire) self.mc.set_many(values, expire=expire) return Status() def replace(self, key, value, expire=0): """ Replace existing items :param key: key to replace :param value: new value :param expire: expiration for item :return: Status object. Will be OK """ status = Status() try: self.cb.replace(key, value, ttl=expire) except NotFoundError as e: status.cb_error = e status.mc_status = self.mc.replace(key, value, expire=expire) return status def add(self, key, value, expire=0): status = Status() try: self.cb.insert(key, value, ttl=expire) except KeyExistsError as e: status.cb_error = e status.mc_status = self.mc.add(key, value, expire=expire) return status def _append_prepend(self, key, value, is_append): cb_meth = self.cb.append if is_append else self.cb.prepend mc_meth = self.mc.append if is_append else self.mc.prepend st = Status() try: cb_meth(key, value, format=FMT_UTF8) except (NotStoredError, NotFoundError) as e: st.cb_error = e st.mc_status = mc_meth(key, value) def append(self, key, value): return self._append_prepend(key, value, True) def prepend(self, key, value): return self._append_prepend(key, value, False) def cas(self, key, value, cas, expire=0): if self._primary == PRIMARY_COUCHBASE: try: self.cb.replace(key, value, cas=cas, ttl=expire) self.mc.set(key, value, ttl=expire) return True except KeyExistsError: return False except NotFoundError: return None else: return self.mc.cas(key, value, cas)
class MemcachedDriver(coordination._RunWatchersMixin, coordination.CoordinationDriver): """A `memcached`_ based driver. This driver users `memcached`_ concepts to provide the coordination driver semantics and required API(s). It **is** fully functional and implements all of the coordination driver API(s). It stores data into memcache using expiries and `msgpack`_ encoded values. General recommendations/usage considerations: - Memcache (without different backend technology) is a **cache** enough said. .. _memcached: http://memcached.org/ .. _msgpack: http://msgpack.org/ """ CHARACTERISTICS = ( coordination.Characteristics.DISTRIBUTED_ACROSS_THREADS, coordination.Characteristics.DISTRIBUTED_ACROSS_PROCESSES, coordination.Characteristics.DISTRIBUTED_ACROSS_HOSTS, coordination.Characteristics.CAUSAL, ) """ Tuple of :py:class:`~tooz.coordination.Characteristics` introspectable enum member(s) that can be used to interogate how this driver works. """ #: Key prefix attached to groups (used in name-spacing keys) GROUP_PREFIX = b'_TOOZ_GROUP_' #: Key prefix attached to leaders of groups (used in name-spacing keys) GROUP_LEADER_PREFIX = b'_TOOZ_GROUP_LEADER_' #: Key prefix attached to members of groups (used in name-spacing keys) MEMBER_PREFIX = b'_TOOZ_MEMBER_' #: Key where all groups 'known' are stored. GROUP_LIST_KEY = b'_TOOZ_GROUP_LIST' #: Default socket/lock/member/leader timeout used when none is provided. DEFAULT_TIMEOUT = 30 #: String used to keep a key/member alive (until it next expires). STILL_ALIVE = b"It's alive!" def __init__(self, member_id, parsed_url, options): super(MemcachedDriver, self).__init__() options = utils.collapse(options) self._options = options self._member_id = member_id self._joined_groups = set() self._executor = utils.ProxyExecutor.build("Memcached", options) # self.host = (parsed_url.hostname or "localhost", # parsed_url.port or 11211) self.host = [] for one_url in parsed_url: tmp = (one_url.hostname or "localhost", one_url.port or 11211) self.host.append(tmp) default_timeout = options.get('timeout', self.DEFAULT_TIMEOUT) self.timeout = int(default_timeout) self.membership_timeout = int(options.get( 'membership_timeout', default_timeout)) self.lock_timeout = int(options.get( 'lock_timeout', default_timeout)) self.leader_timeout = int(options.get( 'leader_timeout', default_timeout)) max_pool_size = options.get('max_pool_size', None) if max_pool_size is not None: self.max_pool_size = int(max_pool_size) else: self.max_pool_size = None self._acquired_locks = [] @staticmethod def _msgpack_serializer(key, value): if isinstance(value, six.binary_type): return value, 1 return utils.dumps(value), 2 @staticmethod def _msgpack_deserializer(key, value, flags): if flags == 1: return value if flags == 2: return utils.loads(value) raise coordination.SerializationError("Unknown serialization" " format '%s'" % flags) @_translate_failures def _start(self): #self.client = pymemcache_client.PooledClient( from pymemcache.client.hash import HashClient self.client = HashClient( self.host, serializer=self._msgpack_serializer, deserializer=self._msgpack_deserializer, timeout=self.timeout, connect_timeout=self.timeout, max_pool_size=self.max_pool_size) # Run heartbeat here because pymemcache use a lazy connection # method and only connect once you do an operation. self.heartbeat() self._group_members = collections.defaultdict(set) self._executor.start() @_translate_failures def _stop(self): for lock in list(self._acquired_locks): lock.release() self.client.delete(self._encode_member_id(self._member_id)) for g in list(self._joined_groups): try: self.leave_group(g).get() except (coordination.MemberNotJoined, coordination.GroupNotCreated): # Guess we got booted out/never existed in the first place... pass except coordination.ToozError: LOG.warning("Unable to leave group '%s'", g, exc_info=True) self._executor.stop() # self.client.close() def _encode_group_id(self, group_id): return self.GROUP_PREFIX + group_id def _encode_member_id(self, member_id): return self.MEMBER_PREFIX + member_id def _encode_group_leader(self, group_id): return self.GROUP_LEADER_PREFIX + group_id @_retry.retry() def _add_group_to_group_list(self, group_id): """Add group to the group list. :param group_id: The group id """ group_list, cas = self.client.gets(self.GROUP_LIST_KEY) if cas: group_list = set(group_list) group_list.add(group_id) if not self.client.cas(self.GROUP_LIST_KEY, list(group_list), cas): # Someone updated the group list before us, try again! raise _retry.Retry else: if not self.client.add(self.GROUP_LIST_KEY, [group_id], noreply=False): # Someone updated the group list before us, try again! raise _retry.Retry @_retry.retry() def _remove_from_group_list(self, group_id): """Remove group from the group list. :param group_id: The group id """ group_list, cas = self.client.gets(self.GROUP_LIST_KEY) group_list = set(group_list) group_list.remove(group_id) if not self.client.cas(self.GROUP_LIST_KEY, list(group_list), cas): # Someone updated the group list before us, try again! raise _retry.Retry def create_group(self, group_id): encoded_group = self._encode_group_id(group_id) @_translate_failures def _create_group(): if not self.client.add(encoded_group, {}, noreply=False): raise coordination.GroupAlreadyExist(group_id) self._add_group_to_group_list(group_id) return MemcachedFutureResult(self._executor.submit(_create_group)) def get_groups(self): @_translate_failures def _get_groups(): return self.client.get(self.GROUP_LIST_KEY) or [] return MemcachedFutureResult(self._executor.submit(_get_groups)) def join_group(self, group_id, capabilities=b""): encoded_group = self._encode_group_id(group_id) @_retry.retry() @_translate_failures def _join_group(): group_members, cas = self.client.gets(encoded_group) if group_members is None: raise coordination.GroupNotCreated(group_id) if self._member_id in group_members: raise coordination.MemberAlreadyExist(group_id, self._member_id) group_members[self._member_id] = { b"capabilities": capabilities, } if not self.client.cas(encoded_group, group_members, cas): # It changed, let's try again raise _retry.Retry self._joined_groups.add(group_id) return MemcachedFutureResult(self._executor.submit(_join_group)) def leave_group(self, group_id): encoded_group = self._encode_group_id(group_id) @_retry.retry() @_translate_failures def _leave_group(): group_members, cas = self.client.gets(encoded_group) if group_members is None: raise coordination.GroupNotCreated(group_id) if self._member_id not in group_members: raise coordination.MemberNotJoined(group_id, self._member_id) del group_members[self._member_id] if not self.client.cas(encoded_group, group_members, cas): # It changed, let's try again raise _retry.Retry self._joined_groups.discard(group_id) return MemcachedFutureResult(self._executor.submit(_leave_group)) def _destroy_group(self, group_id): self.client.delete(self._encode_group_id(group_id)) def delete_group(self, group_id): encoded_group = self._encode_group_id(group_id) @_retry.retry() @_translate_failures def _delete_group(): group_members, cas = self.client.gets(encoded_group) if group_members is None: raise coordination.GroupNotCreated(group_id) if group_members != {}: raise coordination.GroupNotEmpty(group_id) # Delete is not atomic, so we first set the group to # using CAS, and then we delete it, to avoid race conditions. if not self.client.cas(encoded_group, None, cas): raise _retry.Retry self.client.delete(encoded_group) self._remove_from_group_list(group_id) return MemcachedFutureResult(self._executor.submit(_delete_group)) @_retry.retry() @_translate_failures def _get_members(self, group_id): encoded_group = self._encode_group_id(group_id) group_members, cas = self.client.gets(encoded_group) if group_members is None: raise coordination.GroupNotCreated(group_id) actual_group_members = {} for m, v in six.iteritems(group_members): # Never kick self from the group, we know we're alive if (m == self._member_id or self.client.get(self._encode_member_id(m))): actual_group_members[m] = v if group_members != actual_group_members: # There are some dead members, update the group if not self.client.cas(encoded_group, actual_group_members, cas): # It changed, let's try again raise _retry.Retry return actual_group_members def get_members(self, group_id): def _get_members(): return self._get_members(group_id).keys() return MemcachedFutureResult(self._executor.submit(_get_members)) def get_member_capabilities(self, group_id, member_id): def _get_member_capabilities(): group_members = self._get_members(group_id) if member_id not in group_members: raise coordination.MemberNotJoined(group_id, member_id) return group_members[member_id][b'capabilities'] return MemcachedFutureResult( self._executor.submit(_get_member_capabilities)) def update_capabilities(self, group_id, capabilities): encoded_group = self._encode_group_id(group_id) @_retry.retry() @_translate_failures def _update_capabilities(): group_members, cas = self.client.gets(encoded_group) if group_members is None: raise coordination.GroupNotCreated(group_id) if self._member_id not in group_members: raise coordination.MemberNotJoined(group_id, self._member_id) group_members[self._member_id][b'capabilities'] = capabilities if not self.client.cas(encoded_group, group_members, cas): # It changed, try again raise _retry.Retry return MemcachedFutureResult( self._executor.submit(_update_capabilities)) def get_leader(self, group_id): def _get_leader(): return self._get_leader_lock(group_id).get_owner() return MemcachedFutureResult(self._executor.submit(_get_leader)) @_translate_failures def heartbeat(self): self.client.set(self._encode_member_id(self._member_id), self.STILL_ALIVE, expire=self.membership_timeout) # Reset the acquired locks for lock in self._acquired_locks: lock.heartbeat() return min(self.membership_timeout, self.leader_timeout, self.lock_timeout) @_translate_failures def _init_watch_group(self, group_id): members = self.client.get(self._encode_group_id(group_id)) if members is None: raise coordination.GroupNotCreated(group_id) # Initialize with the current group member list if group_id not in self._group_members: self._group_members[group_id] = set(members.keys()) def watch_join_group(self, group_id, callback): self._init_watch_group(group_id) return super(MemcachedDriver, self).watch_join_group( group_id, callback) def unwatch_join_group(self, group_id, callback): return super(MemcachedDriver, self).unwatch_join_group( group_id, callback) def watch_leave_group(self, group_id, callback): self._init_watch_group(group_id) return super(MemcachedDriver, self).watch_leave_group( group_id, callback) def unwatch_leave_group(self, group_id, callback): return super(MemcachedDriver, self).unwatch_leave_group( group_id, callback) def watch_elected_as_leader(self, group_id, callback): return super(MemcachedDriver, self).watch_elected_as_leader( group_id, callback) def unwatch_elected_as_leader(self, group_id, callback): return super(MemcachedDriver, self).unwatch_elected_as_leader( group_id, callback) def get_lock(self, name): return MemcachedLock(self, name, self.lock_timeout) def _get_leader_lock(self, group_id): return MemcachedLock(self, self._encode_group_leader(group_id), self.leader_timeout) @_translate_failures def run_elect_coordinator(self): for group_id, hooks in six.iteritems(self._hooks_elected_leader): # Try to grab the lock, if that fails, that means someone has it # already. leader_lock = self._get_leader_lock(group_id) if leader_lock.acquire(blocking=False): # We got the lock hooks.run(coordination.LeaderElected( group_id, self._member_id)) def run_watchers(self, timeout=None): result = super(MemcachedDriver, self).run_watchers(timeout=timeout) self.run_elect_coordinator() return result
class MemcachedCache(CacheBase): """ Memcached-backed cache implementation. Compatible with AWS ElastiCache when using their memcached interface. """ def __init__( self, servers: Optional[Tuple[str, int]] = None, connect_timeout=None, read_timeout=None, serde=None, testing=False, ignore_exc=False, ): client_kwargs = dict( connect_timeout=connect_timeout, timeout=read_timeout, serde=serde or JsonSerializerDeserializer(), ignore_exc=ignore_exc, ) if testing: self.client = MockMemcacheClient( server=None, **client_kwargs, ) else: self.client = HashClient( servers=servers, **client_kwargs, ) def get(self, key: str): """ Return the value for a key, or None if not found """ return self.client.get(key) def add(self, key: str, value, ttl=None): """ Set the value for a key, but only that key hasn't been set. """ if ttl is None: # pymemcache interprets 0 as no expiration ttl = 0 # NB: If input is malformed, this will not raise errors. # set `noreply` to False for further debugging return self.client.add(key, value, expire=ttl) def set(self, key: str, value, ttl=None): """ Set the value for a key, but overwriting existing values """ if ttl is None: # pymemcache interprets 0 as no expiration ttl = 0 # NB: If input is malformed, this will not raise errors. # set `noreply` to False for further debugging return self.client.set(key, value, expire=ttl) def set_many(self, values, ttl=None): """ Set the many key-value pairs at a time, overwriting existing values """ if ttl is None: # pymemcache interprets 0 as no expiration ttl = 0 return self.client.set_many(values, expire=ttl)
PORT = 24000 #memc = base.Client(('127.0.0.1',11211)); memc = HashClient([('localhost', 11211), ('localhost', 11212)]) conn = _mysql.connect(host="localhost", user="******", passwd="tenzin", db="cs632") popularfilms = memc.get('top5films') if not popularfilms: #cursor = conn.cursor() qu = 'SELECT * FROM filmorder' conn.query(qu) rows = conn.store_result() rows = rows.fetch_row(how=1, maxrows=0) #for x in ro #print(rows) memc.set('top5films', rows, 60) print("Updated memcached with MySQL data") else: print("Loaded data from memcached") print(popularfilms) c**t = memc.get('bunt') if not c**t: asdf = {'heck': 'asdf', 'sdaf': 'reasf'} memc.set('bunt', asdf, 60) else: print(c**t.decode('UTF-8'))