def __init__(self, endpoint, autodiscovery_timeout=10, autodiscovery_interval=60, *args, **kwargs): """ Create a new Client object, and launch a timer for the object. @param endpoint: String something like: test.lwgyhw.cfg.usw2.cache.amazonaws.com:11211 @autodiscovery_timeout: Number Secondes for socket connection timeout when do autodiscovery @autodiscovery_interval: Number Seconds interval for check cluster status @client_debug: String A file name, if set, will write debug message to that file All Other parameters will be passed to python-memcached """ self.endpoint = endpoint self.autodiscovery_timeout = autodiscovery_timeout elasticache_logger.debug('endpoint: %s' % endpoint) self.cluster = Cluster(endpoint, autodiscovery_timeout) self.ring = MemcacheRing(self.cluster.servers, *args, **kwargs) self.need_update = False self.lock = Lock() self.timer = RepeatTimer('autodiscovery', autodiscovery_interval, self._update) self.timer.start()
class MemcacheClient(): """ Do autodiscovery for elasticache memcache cluster. """ def __init__(self, endpoint, autodiscovery_timeout=10, autodiscovery_interval=60, *args, **kwargs): """ Create a new Client object, and launch a timer for the object. @param endpoint: String something like: test.lwgyhw.cfg.usw2.cache.amazonaws.com:11211 @autodiscovery_timeout: Number Secondes for socket connection timeout when do autodiscovery @autodiscovery_interval: Number Seconds interval for check cluster status @client_debug: String A file name, if set, will write debug message to that file All Other parameters will be passed to python-memcached """ self.endpoint = endpoint self.autodiscovery_timeout = autodiscovery_timeout elasticache_logger.debug('endpoint: %s' % endpoint) self.cluster = Cluster(endpoint, autodiscovery_timeout) self.ring = MemcacheRing(self.cluster.servers, *args, **kwargs) self.need_update = False self.lock = Lock() self.timer = RepeatTimer('autodiscovery', autodiscovery_interval, self._update) self.timer.start() def __getattr__(self, key): if not hasattr(self.ring, key): msg = "'%s' object has no attribute '%s'" % (type(self).__name__, key) raise AttributeError(msg) ori_func = getattr(self.ring, key) def tmp_func(self, *args, **kwargs): self.lock.acquire(True) if self.need_update: self.ring.set_servers(self.cluster.servers) self.need_update = False self.lock.release() return ori_func(*args, **kwargs) tmp_func.__name__ = key return MethodType(tmp_func, self) def _update(self): try: cluster = Cluster(self.endpoint, self.autodiscovery_timeout) except Exception, e: elasticache_logger.debug(e) return if cluster.version != self.cluster.version: self.lock.acquire(True) self.cluster = cluster self.need_update = True self.lock.release()
def __init__(self, endpoint, autodiscovery_timeout=10, autodiscovery_interval=60, *args, **kwargs): """ Create a new Client object, and launch a timer for the object. @param endpoint: String something like: test.lwgyhw.cfg.usw2.cache.amazonaws.com:11211 @autodiscovery_timeout: Number Secondes for socket connection timeout when do autodiscovery @autodiscovery_interval: Number Seconds interval for check cluster status @client_debug: String A file name, if set, will write debug message to that file All Other parameters will be passed to python-memcached """ self.endpoint = endpoint self.autodiscovery_timeout = autodiscovery_timeout elasticache_logger.debug("endpoint: %s" % endpoint) self.cluster = Cluster(endpoint, autodiscovery_timeout) self.ring = MemcacheRing(self.cluster.servers, *args, **kwargs) self.need_update = False self.lock = Lock() self.timer = RepeatTimer("autodiscovery", autodiscovery_interval, self._update) self.timer.start()
from hash_ring import MemcacheRing mc=MemcacheRing(['127.0.0.1:11212']) mc.set('hello','world') print(mc.get('hello'))
def __init__(self, server, params): MemcCacheClass.__init__(self, server, params) self._cache = MemClient(server.split(';'), pickleProtocol=getattr(settings, 'CACHE_PICKLE_PROTOCOL', 0))
class CacheClass(MemcCacheClass): def __init__(self, server, params): MemcCacheClass.__init__(self, server, params) self._cache = MemClient(server.split(';'), pickleProtocol=getattr(settings, 'CACHE_PICKLE_PROTOCOL', 0)) def add(self, key, value, timeout=0): """ dodano debugowanie """ ### value to utf8 if isinstance(value, unicode): value = value.encode('utf-8') ### debug srv,k = _parsekey(key) if _ispiedkey(k): # wylicz size size = _calcsize(value) log.debug('ADD %s %s %s',k, srv, size) ### add return self._cache.add(gm_smart_str(key), value, timeout or self.default_timeout, getattr(settings, 'CACHE_MIN_COMPRESSION_LEN', 0) ) def get(self, key, default=None): """ Dodane logowanie wybranych kluczy (missing) info: brak klucza takiego i takiego debug: kazde odpytanie do cache """ ### get value val = self._cache.get(gm_smart_str(key)) ### debug srv,k = _parsekey(key) if _ispiedkey(k): # wylicz size size = _calcsize(val) # log if val is None: log.info("%s %s %s %s", 'MISS', k, srv, size) else: log.debug("%s %s %s %s", 'HIT', k, srv, size) # return value if val is None: return default else: if isinstance(val, basestring): return smart_unicode(val) else: return val def set(self, key, value, timeout=0, zip=True): """ Dodany ficzer z min_compression_len, aby mozliwa byla kompresja duzych kluczy. """ ### value to utf8 if isinstance(value, unicode): value = value.encode('utf-8') srv,k = _parsekey(key) if _ispiedkey(k): # wylicz size size = _calcsize(value) # log log.debug("%s %s %s %s", 'SET', k, srv, size) ### set return self._cache.set(gm_smart_str(key), value, timeout or self.default_timeout, getattr(settings, 'CACHE_MIN_COMPRESSION_LEN', 0) if zip else 0) def delete(self, key): """ normlane kasowanie + DEBUG + zwracanie wartosci """ ### debug srv,k = _parsekey(key) if _ispiedkey(k): log.debug('DELETE %s %s',k, srv) ### delete return self._cache.delete(gm_smart_str(key)) def get_many(self, keys): """ get many z obslugi sledzenia kluczy """ spiedkeys = [k for k in keys if _ispiedkey(k)] if spiedkeys: log.debug('GETMULTI %s', ','.join(spiedkeys) ) return self._cache.get_multi(map(gm_smart_str, keys))
def setUp(self): self.pmc = pylibmc.Client(hosts) self.pmc.behaviors = {'ketama': True} self.mcr = MemcacheRing(hosts)
def __init__(self, server, params): MemcCacheClass.__init__(self, server, params) self._cache = MemClient(server.split(';'), pickleProtocol=getattr( settings, 'CACHE_PICKLE_PROTOCOL', 0))
class CacheClass(MemcCacheClass): def __init__(self, server, params): MemcCacheClass.__init__(self, server, params) self._cache = MemClient(server.split(';'), pickleProtocol=getattr( settings, 'CACHE_PICKLE_PROTOCOL', 0)) def add(self, key, value, timeout=0): """ dodano debugowanie """ ### value to utf8 if isinstance(value, unicode): value = value.encode('utf-8') ### debug srv, k = _parsekey(key) if _ispiedkey(k): # wylicz size size = _calcsize(value) log.debug('ADD %s %s %s', k, srv, size) ### add return self._cache.add( gm_smart_str(key), value, timeout or self.default_timeout, getattr(settings, 'CACHE_MIN_COMPRESSION_LEN', 0)) def get(self, key, default=None): """ Dodane logowanie wybranych kluczy (missing) info: brak klucza takiego i takiego debug: kazde odpytanie do cache """ ### get value val = self._cache.get(gm_smart_str(key)) ### debug srv, k = _parsekey(key) if _ispiedkey(k): # wylicz size size = _calcsize(val) # log if val is None: log.info("%s %s %s %s", 'MISS', k, srv, size) else: log.debug("%s %s %s %s", 'HIT', k, srv, size) # return value if val is None: return default else: if isinstance(val, basestring): return smart_unicode(val) else: return val def set(self, key, value, timeout=0, zip=True): """ Dodany ficzer z min_compression_len, aby mozliwa byla kompresja duzych kluczy. """ ### value to utf8 if isinstance(value, unicode): value = value.encode('utf-8') srv, k = _parsekey(key) if _ispiedkey(k): # wylicz size size = _calcsize(value) # log log.debug("%s %s %s %s", 'SET', k, srv, size) ### set return self._cache.set( gm_smart_str(key), value, timeout or self.default_timeout, getattr(settings, 'CACHE_MIN_COMPRESSION_LEN', 0) if zip else 0) def delete(self, key): """ normlane kasowanie + DEBUG + zwracanie wartosci """ ### debug srv, k = _parsekey(key) if _ispiedkey(k): log.debug('DELETE %s %s', k, srv) ### delete return self._cache.delete(gm_smart_str(key)) def get_many(self, keys): """ get many z obslugi sledzenia kluczy """ spiedkeys = [k for k in keys if _ispiedkey(k)] if spiedkeys: log.debug('GETMULTI %s', ','.join(spiedkeys)) return self._cache.get_multi(map(gm_smart_str, keys))