Example #1
0
def main_hashclient_test():
    client = HashClient([('192.168.204.128', 11211)
                         ])  #('192.168.204.128', 11212),
    client.set('heshanshan', 'some value')
    result = client.get('heshanshan')

    logging.debug(client.get('heshanshan'))
Example #2
0
    def test_unavailable_servers_zero_retry_raise_exception(self):
        from pymemcache.client.hash import HashClient
        client = HashClient(
            [('example.com', 11211)], use_pooling=True,
            ignore_exc=False,
            retry_attempts=0, timeout=1, connect_timeout=1
        )

        with pytest.raises(socket.error):
            client.get('foo')
Example #3
0
    def test_no_servers_left_with_commands_return_default_value(self):
        from pymemcache.client.hash import HashClient

        client = HashClient([],
                            use_pooling=True,
                            ignore_exc=True,
                            timeout=1,
                            connect_timeout=1)

        result = client.get("foo")
        assert result is None
        result = client.get("foo", default="default")
        assert result == "default"
        result = client.set("foo", "bar")
        assert result is False
Example #4
0
class MemcacheHashCli(object):
    def __init__(self, hosts):
        self.client = HashClient(hosts)

    def set(self, key, value, expire):
        try:
            return self.client.set(key, value, expire)
        except Exception as e:
            return False

    def get(self, key):
        try:
            return self.client.get(key, default=None)
        except Exception as e:
            return None

    def mset(self, values, expire):
        try:
            return self.client.set_many(values, expire)
        except Exception as e:
            return False

    def mget(self, keys):
        try:
            return self.client.get_many(keys)
        except Exception as e:
            return None
    def test_dead_server_comes_back(self, client_patch):
        client = HashClient([], dead_timeout=0, retry_attempts=0)
        client.add_server("127.0.0.1", 11211)

        test_client = client_patch.return_value
        test_client.server = ("127.0.0.1", 11211)

        test_client.get.side_effect = socket.timeout()
        with pytest.raises(socket.timeout):
            client.get(b"key", noreply=False)
        # Client gets removed because of socket timeout
        assert ("127.0.0.1", 11211) in client._dead_clients

        test_client.get.side_effect = lambda *_: "Some value"
        # Client should be retried and brought back
        assert client.get(b"key") == "Some value"
        assert ("127.0.0.1", 11211) not in client._dead_clients
    def test_failed_is_retried(self, client_patch):
        client = HashClient([], retry_attempts=1, retry_timeout=0)
        client.add_server("127.0.0.1", 11211)

        assert client_patch.call_count == 1

        test_client = client_patch.return_value
        test_client.server = ("127.0.0.1", 11211)

        test_client.get.side_effect = socket.timeout()
        with pytest.raises(socket.timeout):
            client.get(b"key", noreply=False)

        test_client.get.side_effect = lambda *_: "Some value"
        assert client.get(b"key") == "Some value"

        assert client_patch.call_count == 1
Example #7
0
class MemCacheHelper(object):
    def __init__(self):
        self.client = HashClient([(MEMCACHED_ENDPOINT, MEMCACHED_PORT)])

    def set(self, key, value):
        self.client.set(key, value)

    def get(self, key):
        return self.client.get(key)
Example #8
0
class Memcache(object):
    """
    This class is an interface to a memcache storage backend
    it implements needed methods to store and fetch for an url
    shortener.
    """

    from pymemcache.client.hash import HashClient

    def __init__(self, host, key_expiration, username=None, password=None):
        """
        Instanciate a memcache storage backend object
        @params:
            host: the server to connect to
            key_expiration: key expiration in seconds
            username: not use
            password: not use
        @returns:
        """
        self.client = HashClient(servers='{} 11211'.format(host),
                                 connect_timeout=True,
                                 timeout=True,
                                 use_pooling=True,
                                 max_pool_size=100)
        self.key_expiration = key_expiration

    def set_value(self, key, value):
        """
        Set a new record in the datavase for a short code
        @params:
            key: the short code to insert
            value: the long url corresponding
        @returns:
            True or False if succeeded or not
        """
        return self.client.set(key, value.encode('utf-8'), self.key_expiration)

    def get_value(self, key):
        """
        Get a long url from the shorten form
        @params:
            key: the short form to lookup
        @returns:
            a long url if found, None otherwise
        """
        res = None
        res = self.client.get(key)
        if res is not None:
            return res.decode('utf-8')
        return None

    def get_stat(self, key):
        pass

    def get_all(self):
        pass
    def test_no_servers_left_with_commands(self):
        from pymemcache.client.hash import HashClient
        client = HashClient(
            [], use_pooling=True,
            ignore_exc=True,
            timeout=1, connect_timeout=1
        )

        result = client.get('foo')
        assert result is False
Example #10
0
class MetricCache:

    def __init__(self, server_list):
        # server_list = list of (host,port)
        self.client = HashClient(server_list)

    def increment(self, *, metric_name, value):
        if not self.client.get(metric_name):
            self.client.set(metric_name, 0)
        return self.client.incr(metric_name, value)
Example #11
0
    def test_no_servers_left_with_commands(self):
        from pymemcache.client.hash import HashClient
        client = HashClient(
            [], use_pooling=True,
            ignore_exc=True,
            timeout=1, connect_timeout=1
        )

        result = client.get('foo')
        assert result is False
Example #12
0
def get_game_info(group_id):
    """
    Get the game info for a group.

    :param group_id: ID for the group
    :type group_id: String
    :return: String representation of group information
    """
    nodes = elasticache_auto_discovery.discover('hackgt5mem.gy46cz.cfg.use1.cache.amazonaws.com:11211')
    nodes = map(lambda x: (x[1], int(x[2])), nodes)
    memcache_client = HashClient(nodes)

    return memcache_client.get(str(group_id)).decode("utf-8")
Example #13
0
    def test_no_servers_left_return_positional_default(self):
        from pymemcache.client.hash import HashClient

        client = HashClient([],
                            use_pooling=True,
                            ignore_exc=True,
                            timeout=1,
                            connect_timeout=1)

        # Ensure compatibility with clients that pass the default as a
        # positional argument
        result = client.get("foo", "default")
        assert result == "default"
Example #14
0
class MemcacheMap(Map):
    def open(self):
        self._db = HashClient(('127.0.0.1', 11211))
        return self

    def _put(self, key: Key, value: bytes) -> Key:
        self._db.set(key_bytes(key), value)
        return key

    def _get(self, uuid: UUID, time: int) -> Optional[bytes]:
        return self._db.get(key_bytes(key))

    def close(self):
        del (self._db)
        return self
Example #15
0
class MemCache(Cache):
    def __init__(self, memcache_conf_url, key_expiry_secs):
        try:
            self._conf_url = memcache_conf_url
            self._expire_secs = key_expiry_secs
            print("config url", self._conf_url)
            nodes = elasticache_auto_discovery.discover(self._conf_url)
            print("nodes 1:", nodes)
            nodes = map(lambda x: (x[1], int(x[2])), nodes)
            print("nodes 2:", nodes)
            self._client = HashClient(
                nodes,
                serde=serde.PickleSerde(pickle_version=3),
                default_noreply=False,
                ignore_exc=False,
                connect_timeout=3,
                timeout=5,
                retry_attempts=3,
                retry_timeout=1)
            print("memcache client", self._client)
        except Exception as e:
            print("MemCache exception", e)
            print(traceback.format_exc())
        finally:
            print("~MemCache constructor")

    def reinit(self):
        return MemCache(self._conf_url, self._expire_secs)

    def set(self, cache_key, value):
        try:
            self._client.set(cache_key, value, expire=self._expire_secs)
            #print("wrote to memcache {} = {}".format(cache_key, value))
        except Exception as e:
            print("ERROR writing '{}' to memcache: ".format(cache_key, e))
            raise e

    def get(self, cache_key):
        return self._client.get(cache_key)

    def multiget(self, cache_keys):
        return self._client.get_multi(cache_keys)

    def delete(self, cache_key):
        self._client.delete(cache_key)
Example #16
0
 def GetQuery(self):
     servers = settings.JSON_SETTINGS['MemcachedServers']
     cServers = []
     for server in servers:
         tmp = server.split(':')
         cServers += [(tmp[0], int(tmp[1]))]
     cl = HashClient(cServers)
     q = cl.get('queue')
     if q is None:
         self.QueryStat()
         queue = []
         for event in self.events:
             queue += [event['Queue']]
         cl.set('queue', ','.join(queue),
                settings.JSON_SETTINGS['MemcachedExpire'])
         return queue
     else:
         return q.decode("utf-8").split(',')
Example #17
0
class Memcached(Cache):
    """Implements a cache"""
    def __init__(self, servers="127.0.0.1:11211", **kwargs):
        """Constructor"""
        Cache.__init__(self, **kwargs)
        if isinstance(servers, string_types):
            servers = [s.strip() for s in servers.split(",")]
        self.cache = HashClient(servers, use_pooling=True)
        self.timeout = int(kwargs.get("timeout", 0))

    def getKey(self, tile):
        """Get the key for this tile"""
        return "/".join(map(str, [tile.layer.name, tile.x, tile.y, tile.z]))

    def get(self, tile):
        """Get the cache data"""
        key = self.getKey(tile)
        tile.data = self.cache.get(key)
        return tile.data

    def set(self, tile, data):
        """Set the cache data"""
        if self.readonly:
            return data
        key = self.getKey(tile)
        self.cache.set(key, data, self.timeout)
        return data

    def delete(self, tile):
        """Delete a tile from the cache"""
        key = self.getKey(tile)
        self.cache.delete(key)

    def attemptLock(self, tile):
        """Attempt to lock the cache for a tile"""
        return self.cache.add(self.getLockName(tile), "0",
                              time.time() + self.timeout)

    def unlock(self, tile):
        """Attempt to unlock the cache for a tile"""
        self.cache.delete(self.getLockName(tile))
Example #18
0
 def remap_keys(self, server_list):
     #       self.dump_keys(server_list)
     for endpoint in server_list:
         startTime = int(time.time())
         ip, port = endpoint
         print('Processing ' + ip)
         memcachedStats = MemcachedStats(ip, port)
         key_list = memcachedStats.keys()
         client = HashClient([endpoint],
                             serializer=self.json_serializer,
                             deserializer=self.json_deserializer)
         count = 0
         for key, expiry in key_list:
             if not self.hash_client.get(key):
                 val = client.get(key)
                 if val:
                     self.hash_client.set(key,
                                          val,
                                          expire=expiry,
                                          noreply=True)
                     count = count + 1
         endTime = int(time.time())
         print('Found {} keys. Remapped {} in {} seconds'.format(
             len(key_list), count, endTime - startTime))
Example #19
0
class AutodiscoveryClient():
    def __init__(self, cluster_id):
        self.elasticache = boto3.client('elasticache', region_name='us-east-1')
        self.cluster_id = cluster_id
        self.servers = self.new_server_list()
        self.internal_scale = False
        self.hash_client = HashClient(self.servers,
                                      serializer=self.json_serializer,
                                      deserializer=self.json_deserializer,
                                      use_pooling=True)
        thread = TimerThread(self.check_cluster)
        thread.start()

    def new_server_list(self):
        new_servers = []
        response = self.elasticache.describe_cache_clusters(
            CacheClusterId=self.cluster_id, ShowCacheNodeInfo=True)
        self.endpoint = response['CacheClusters'][0]['ConfigurationEndpoint']
        nodes = response['CacheClusters'][0]['CacheNodes']
        for node in nodes:
            if 'Endpoint' in node:  # server may be coming up, no endpoint yet
                endpoint = (node['Endpoint']['Address'],
                            node['Endpoint']['Port'])
                new_servers.append(endpoint)
        return new_servers

    def check_cluster(self):
        print('Checking cluster')
        new_servers = self.new_server_list()

        try:
            new_server_set = set(new_servers)
            cur_server_set = set(self.servers)
            servers_changed = False
            if (new_server_set - cur_server_set) or (cur_server_set -
                                                     new_server_set):
                print('Found a node difference')
                # self.dump_keys('all_keys.txt')
                servers_changed = True

            if (new_server_set - cur_server_set):
                print('Server added')
                self.internal_scale = False
                self.hash_client = HashClient(
                    new_servers,
                    serializer=self.json_serializer,
                    deserializer=self.json_deserializer,
                    use_pooling=True)
                self.remap_keys(self.servers)

            # server removed not with our code
            if (cur_server_set - new_server_set):
                print('Server removed')
                if self.internal_scale:
                    self.internal_scale = False
                else:
                    print('Removing server')
                    self.hash_client = HashClient(
                        new_servers,
                        serializer=self.json_serializer,
                        deserializer=self.json_deserializer,
                        use_pooling=True)

            if servers_changed:
                self.servers = new_servers
                #               self.dump_keys('all_keys_2.txt')
                self.internal_scale = False

        except Exception as e:
            print(str(e))

    def json_serializer(self, key, value):
        if type(value) == str:
            return value, 1
        return json.dumps(value), 2

    def json_deserializer(self, key, value, flags):
        if flags == 1:
            return value.decode('utf-8')
        if flags == 2:
            return json.loads(value.decode('utf-8'))
        raise Exception("Unknown serialization format")

    def remap_keys(self, server_list):
        #       self.dump_keys(server_list)
        for endpoint in server_list:
            startTime = int(time.time())
            ip, port = endpoint
            print('Processing ' + ip)
            memcachedStats = MemcachedStats(ip, port)
            key_list = memcachedStats.keys()
            client = HashClient([endpoint],
                                serializer=self.json_serializer,
                                deserializer=self.json_deserializer)
            count = 0
            for key, expiry in key_list:
                if not self.hash_client.get(key):
                    val = client.get(key)
                    if val:
                        self.hash_client.set(key,
                                             val,
                                             expire=expiry,
                                             noreply=True)
                        count = count + 1
            endTime = int(time.time())
            print('Found {} keys. Remapped {} in {} seconds'.format(
                len(key_list), count, endTime - startTime))

    def dump_keys(self, server_list):
        count = 1
        for endpoint in server_list:
            ip, port = endpoint
            print('Dumping ' + ip)
            command = 'memdump --servers={}:{} > {}.txt'.format(
                ip, port, count)
            count = count + 1
            os.system(command)

    def add_node(self):
        if self.internal_scale:
            print('Cannot scale more than one at a time')
        try:
            response = self.elasticache.describe_cache_clusters(
                CacheClusterId=cluster_id, ShowCacheNodeInfo=True)
            count = response['CacheClusters'][0]['NumCacheNodes']
            self.internal_scale = True
            self.elasticache.modify_cache_cluster(CacheClusterId=cluster_id,
                                                  NumCacheNodes=count + 1,
                                                  ApplyImmediately=True)
            print('Added node {}'.format(count + 1))
        except Exception as e:
            print(str(e))

    def remove_node(self):
        if self.internal_scale:
            print('Cannot scale more than one at a time')
        try:
            self.internal_scale = True
            response = self.elasticache.describe_cache_clusters(
                CacheClusterId=self.cluster_id, ShowCacheNodeInfo=True)
            count = response['CacheClusters'][0]['NumCacheNodes']
            nodes = response['CacheClusters'][0]['CacheNodes']
            node = nodes[count - 1]
            id_to_remove = node['CacheNodeId']
            endpoint = (node['Endpoint']['Address'], node['Endpoint']['Port'])
            print('Removing node: ' + str(endpoint) + ' with id ' +
                  str(id_to_remove))

            new_servers = self.servers.copy()
            new_servers.remove(endpoint)
            print('Remaining servers: ' + str(new_servers))
            self.hash_client = HashClient(new_servers,
                                          serializer=self.json_serializer,
                                          deserializer=self.json_deserializer,
                                          use_pooling=True)

            self.remap_keys(self.servers)

            # remove the node
            response = self.elasticache.modify_cache_cluster(
                CacheClusterId=self.cluster_id,
                NumCacheNodes=count - 1,
                CacheNodeIdsToRemove=[id_to_remove],
                ApplyImmediately=True)
            print(response)

        except Exception as e:
            print(str(e))
Example #20
0
class MemcachedCache(CacheBase):
    """
    Memcached-backed cache implementation.

    Compatible with AWS ElastiCache when using their memcached interface.

    """
    def __init__(
        self,
        servers: Optional[Tuple[str, int]] = None,
        connect_timeout=None,
        read_timeout=None,
        serde=None,
        testing=False,
        ignore_exc=False,
    ):
        client_kwargs = dict(
            connect_timeout=connect_timeout,
            timeout=read_timeout,
            serde=serde or JsonSerializerDeserializer(),
            ignore_exc=ignore_exc,
        )

        if testing:
            self.client = MockMemcacheClient(
                server=None,
                **client_kwargs,
            )
        else:
            self.client = HashClient(
                servers=servers,
                **client_kwargs,
            )

    def get(self, key: str):
        """
        Return the value for a key, or None if not found

        """
        return self.client.get(key)

    def add(self, key: str, value, ttl=None):
        """
        Set the value for a key, but only that key hasn't been set.

        """
        if ttl is None:
            # pymemcache interprets 0 as no expiration
            ttl = 0
        # NB: If input is malformed, this will not raise errors.
        # set `noreply` to False for further debugging
        return self.client.add(key, value, expire=ttl)

    def set(self, key: str, value, ttl=None):
        """
        Set the value for a key, but overwriting existing values

        """
        if ttl is None:
            # pymemcache interprets 0 as no expiration
            ttl = 0
        # NB: If input is malformed, this will not raise errors.
        # set `noreply` to False for further debugging
        return self.client.set(key, value, expire=ttl)

    def set_many(self, values, ttl=None):
        """
        Set the many key-value pairs at a time, overwriting existing values

        """
        if ttl is None:
            # pymemcache interprets 0 as no expiration
            ttl = 0

        return self.client.set_many(values, expire=ttl)
Example #21
0
from pymemcache.client.hash import HashClient

client = HashClient([('127.0.0.1', 11211), ('127.0.0.1', 11212)])
client.set('some_key', 'some value')
result = client.get('some_key')
Example #22
0
import sys
import _mysql
from pymemcache.client import base
from pymemcache.client.hash import HashClient
import socket

PORT = 24000
#memc = base.Client(('127.0.0.1',11211));
memc = HashClient([('localhost', 11211), ('localhost', 11212)])

conn = _mysql.connect(host="localhost",
                      user="******",
                      passwd="tenzin",
                      db="cs632")

popularfilms = memc.get('top5films')
if not popularfilms:
    #cursor = conn.cursor()
    qu = 'SELECT * FROM filmorder'
    conn.query(qu)
    rows = conn.store_result()
    rows = rows.fetch_row(how=1, maxrows=0)
    #for x in ro
    #print(rows)
    memc.set('top5films', rows, 60)
    print("Updated memcached with MySQL data")
else:
    print("Loaded data from memcached")
    print(popularfilms)

c**t = memc.get('bunt')
Example #23
0
#!/usr/bin/env python

from pymemcache.client.hash import HashClient
client = HashClient([('127.0.0.1', 9999)])
client.set('some_key', 'some value')
result = client.get('some_key')
print(result)
client.delete('some_key')
print(client.get('some_key'))
Example #24
0
class MemcachedDriver(coordination._RunWatchersMixin,
                      coordination.CoordinationDriver):
    """A `memcached`_ based driver.

    This driver users `memcached`_ concepts to provide the coordination driver
    semantics and required API(s). It **is** fully functional and implements
    all of the coordination driver API(s). It stores data into memcache
    using expiries and `msgpack`_ encoded values.

    General recommendations/usage considerations:

    - Memcache (without different backend technology) is a **cache** enough
      said.

    .. _memcached: http://memcached.org/
    .. _msgpack: http://msgpack.org/
    """

    CHARACTERISTICS = (
        coordination.Characteristics.DISTRIBUTED_ACROSS_THREADS,
        coordination.Characteristics.DISTRIBUTED_ACROSS_PROCESSES,
        coordination.Characteristics.DISTRIBUTED_ACROSS_HOSTS,
        coordination.Characteristics.CAUSAL,
    )
    """
    Tuple of :py:class:`~tooz.coordination.Characteristics` introspectable
    enum member(s) that can be used to interogate how this driver works.
    """

    #: Key prefix attached to groups (used in name-spacing keys)
    GROUP_PREFIX = b'_TOOZ_GROUP_'

    #: Key prefix attached to leaders of groups (used in name-spacing keys)
    GROUP_LEADER_PREFIX = b'_TOOZ_GROUP_LEADER_'

    #: Key prefix attached to members of groups (used in name-spacing keys)
    MEMBER_PREFIX = b'_TOOZ_MEMBER_'

    #: Key where all groups 'known' are stored.
    GROUP_LIST_KEY = b'_TOOZ_GROUP_LIST'

    #: Default socket/lock/member/leader timeout used when none is provided.
    DEFAULT_TIMEOUT = 30

    #: String used to keep a key/member alive (until it next expires).
    STILL_ALIVE = b"It's alive!"

    def __init__(self, member_id, parsed_url, options):
        super(MemcachedDriver, self).__init__()
        options = utils.collapse(options)
        self._options = options
        self._member_id = member_id
        self._joined_groups = set()
        self._executor = utils.ProxyExecutor.build("Memcached", options)
        # self.host = (parsed_url.hostname or "localhost",
        #              parsed_url.port or 11211)
        self.host = []
        for one_url in parsed_url:
            tmp = (one_url.hostname or "localhost", one_url.port or 11211)
            self.host.append(tmp)
        default_timeout = options.get('timeout', self.DEFAULT_TIMEOUT)
        self.timeout = int(default_timeout)
        self.membership_timeout = int(
            options.get('membership_timeout', default_timeout))
        self.lock_timeout = int(options.get('lock_timeout', default_timeout))
        self.leader_timeout = int(
            options.get('leader_timeout', default_timeout))
        max_pool_size = options.get('max_pool_size', None)
        if max_pool_size is not None:
            self.max_pool_size = int(max_pool_size)
        else:
            self.max_pool_size = None
        self._acquired_locks = []

    @staticmethod
    def _msgpack_serializer(key, value):
        if isinstance(value, six.binary_type):
            return value, 1
        return utils.dumps(value), 2

    @staticmethod
    def _msgpack_deserializer(key, value, flags):
        if flags == 1:
            return value
        if flags == 2:
            return utils.loads(value)
        raise coordination.SerializationError("Unknown serialization"
                                              " format '%s'" % flags)

    @_translate_failures
    def _start(self):
        #self.client = pymemcache_client.PooledClient(
        from pymemcache.client.hash import HashClient
        self.client = HashClient(self.host,
                                 serializer=self._msgpack_serializer,
                                 deserializer=self._msgpack_deserializer,
                                 timeout=self.timeout,
                                 connect_timeout=self.timeout,
                                 max_pool_size=self.max_pool_size)

        # Run heartbeat here because pymemcache use a lazy connection
        # method and only connect once you do an operation.
        self.heartbeat()
        self._group_members = collections.defaultdict(set)
        self._executor.start()

    @_translate_failures
    def _stop(self):
        for lock in list(self._acquired_locks):
            lock.release()
        self.client.delete(self._encode_member_id(self._member_id))
        for g in list(self._joined_groups):
            try:
                self.leave_group(g).get()
            except (coordination.MemberNotJoined,
                    coordination.GroupNotCreated):
                # Guess we got booted out/never existed in the first place...
                pass
            except coordination.ToozError:
                LOG.warning("Unable to leave group '%s'", g, exc_info=True)
        self._executor.stop()
        # self.client.close()

    def _encode_group_id(self, group_id):
        return self.GROUP_PREFIX + group_id

    def _encode_member_id(self, member_id):
        return self.MEMBER_PREFIX + member_id

    def _encode_group_leader(self, group_id):
        return self.GROUP_LEADER_PREFIX + group_id

    @_retry.retry()
    def _add_group_to_group_list(self, group_id):
        """Add group to the group list.

        :param group_id: The group id
        """
        group_list, cas = self.client.gets(self.GROUP_LIST_KEY)
        if cas:
            group_list = set(group_list)
            group_list.add(group_id)
            if not self.client.cas(self.GROUP_LIST_KEY, list(group_list), cas):
                # Someone updated the group list before us, try again!
                raise _retry.Retry
        else:
            if not self.client.add(self.GROUP_LIST_KEY, [group_id],
                                   noreply=False):
                # Someone updated the group list before us, try again!
                raise _retry.Retry

    @_retry.retry()
    def _remove_from_group_list(self, group_id):
        """Remove group from the group list.

        :param group_id: The group id
        """
        group_list, cas = self.client.gets(self.GROUP_LIST_KEY)
        group_list = set(group_list)
        group_list.remove(group_id)
        if not self.client.cas(self.GROUP_LIST_KEY, list(group_list), cas):
            # Someone updated the group list before us, try again!
            raise _retry.Retry

    def create_group(self, group_id):
        encoded_group = self._encode_group_id(group_id)

        @_translate_failures
        def _create_group():
            if not self.client.add(encoded_group, {}, noreply=False):
                raise coordination.GroupAlreadyExist(group_id)
            self._add_group_to_group_list(group_id)

        return MemcachedFutureResult(self._executor.submit(_create_group))

    def get_groups(self):
        @_translate_failures
        def _get_groups():
            return self.client.get(self.GROUP_LIST_KEY) or []

        return MemcachedFutureResult(self._executor.submit(_get_groups))

    def join_group(self, group_id, capabilities=b""):
        encoded_group = self._encode_group_id(group_id)

        @_retry.retry()
        @_translate_failures
        def _join_group():
            group_members, cas = self.client.gets(encoded_group)
            if group_members is None:
                raise coordination.GroupNotCreated(group_id)
            if self._member_id in group_members:
                raise coordination.MemberAlreadyExist(group_id,
                                                      self._member_id)
            group_members[self._member_id] = {
                b"capabilities": capabilities,
            }
            if not self.client.cas(encoded_group, group_members, cas):
                # It changed, let's try again
                raise _retry.Retry
            self._joined_groups.add(group_id)

        return MemcachedFutureResult(self._executor.submit(_join_group))

    def leave_group(self, group_id):
        encoded_group = self._encode_group_id(group_id)

        @_retry.retry()
        @_translate_failures
        def _leave_group():
            group_members, cas = self.client.gets(encoded_group)
            if group_members is None:
                raise coordination.GroupNotCreated(group_id)
            if self._member_id not in group_members:
                raise coordination.MemberNotJoined(group_id, self._member_id)
            del group_members[self._member_id]
            if not self.client.cas(encoded_group, group_members, cas):
                # It changed, let's try again
                raise _retry.Retry
            self._joined_groups.discard(group_id)

        return MemcachedFutureResult(self._executor.submit(_leave_group))

    def _destroy_group(self, group_id):
        self.client.delete(self._encode_group_id(group_id))

    def delete_group(self, group_id):
        encoded_group = self._encode_group_id(group_id)

        @_retry.retry()
        @_translate_failures
        def _delete_group():
            group_members, cas = self.client.gets(encoded_group)
            if group_members is None:
                raise coordination.GroupNotCreated(group_id)
            if group_members != {}:
                raise coordination.GroupNotEmpty(group_id)
            # Delete is not atomic, so we first set the group to
            # using CAS, and then we delete it, to avoid race conditions.
            if not self.client.cas(encoded_group, None, cas):
                raise _retry.Retry
            self.client.delete(encoded_group)
            self._remove_from_group_list(group_id)

        return MemcachedFutureResult(self._executor.submit(_delete_group))

    @_retry.retry()
    @_translate_failures
    def _get_members(self, group_id):
        encoded_group = self._encode_group_id(group_id)
        group_members, cas = self.client.gets(encoded_group)
        if group_members is None:
            raise coordination.GroupNotCreated(group_id)
        actual_group_members = {}
        for m, v in six.iteritems(group_members):
            # Never kick self from the group, we know we're alive
            if (m == self._member_id
                    or self.client.get(self._encode_member_id(m))):
                actual_group_members[m] = v
        if group_members != actual_group_members:
            # There are some dead members, update the group
            if not self.client.cas(encoded_group, actual_group_members, cas):
                # It changed, let's try again
                raise _retry.Retry
        return actual_group_members

    def get_members(self, group_id):
        def _get_members():
            return self._get_members(group_id).keys()

        return MemcachedFutureResult(self._executor.submit(_get_members))

    def get_member_capabilities(self, group_id, member_id):
        def _get_member_capabilities():
            group_members = self._get_members(group_id)
            if member_id not in group_members:
                raise coordination.MemberNotJoined(group_id, member_id)
            return group_members[member_id][b'capabilities']

        return MemcachedFutureResult(
            self._executor.submit(_get_member_capabilities))

    def update_capabilities(self, group_id, capabilities):
        encoded_group = self._encode_group_id(group_id)

        @_retry.retry()
        @_translate_failures
        def _update_capabilities():
            group_members, cas = self.client.gets(encoded_group)
            if group_members is None:
                raise coordination.GroupNotCreated(group_id)
            if self._member_id not in group_members:
                raise coordination.MemberNotJoined(group_id, self._member_id)
            group_members[self._member_id][b'capabilities'] = capabilities
            if not self.client.cas(encoded_group, group_members, cas):
                # It changed, try again
                raise _retry.Retry

        return MemcachedFutureResult(
            self._executor.submit(_update_capabilities))

    def get_leader(self, group_id):
        def _get_leader():
            return self._get_leader_lock(group_id).get_owner()

        return MemcachedFutureResult(self._executor.submit(_get_leader))

    @_translate_failures
    def heartbeat(self):
        self.client.set(self._encode_member_id(self._member_id),
                        self.STILL_ALIVE,
                        expire=self.membership_timeout)
        # Reset the acquired locks
        for lock in self._acquired_locks:
            lock.heartbeat()
        return min(self.membership_timeout, self.leader_timeout,
                   self.lock_timeout)

    @_translate_failures
    def _init_watch_group(self, group_id):
        members = self.client.get(self._encode_group_id(group_id))
        if members is None:
            raise coordination.GroupNotCreated(group_id)
        # Initialize with the current group member list
        if group_id not in self._group_members:
            self._group_members[group_id] = set(members.keys())

    def watch_join_group(self, group_id, callback):
        self._init_watch_group(group_id)
        return super(MemcachedDriver,
                     self).watch_join_group(group_id, callback)

    def unwatch_join_group(self, group_id, callback):
        return super(MemcachedDriver,
                     self).unwatch_join_group(group_id, callback)

    def watch_leave_group(self, group_id, callback):
        self._init_watch_group(group_id)
        return super(MemcachedDriver,
                     self).watch_leave_group(group_id, callback)

    def unwatch_leave_group(self, group_id, callback):
        return super(MemcachedDriver,
                     self).unwatch_leave_group(group_id, callback)

    def watch_elected_as_leader(self, group_id, callback):
        return super(MemcachedDriver,
                     self).watch_elected_as_leader(group_id, callback)

    def unwatch_elected_as_leader(self, group_id, callback):
        return super(MemcachedDriver,
                     self).unwatch_elected_as_leader(group_id, callback)

    def get_lock(self, name):
        return MemcachedLock(self, name, self.lock_timeout)

    def _get_leader_lock(self, group_id):
        return MemcachedLock(self, self._encode_group_leader(group_id),
                             self.leader_timeout)

    @_translate_failures
    def run_elect_coordinator(self):
        for group_id, hooks in six.iteritems(self._hooks_elected_leader):
            # Try to grab the lock, if that fails, that means someone has it
            # already.
            leader_lock = self._get_leader_lock(group_id)
            if leader_lock.acquire(blocking=False):
                # We got the lock
                hooks.run(coordination.LeaderElected(group_id,
                                                     self._member_id))

    def run_watchers(self, timeout=None):
        result = super(MemcachedDriver, self).run_watchers(timeout=timeout)
        self.run_elect_coordinator()
        return result
Example #25
0
class MemcachedDriver(coordination._RunWatchersMixin,
                      coordination.CoordinationDriver):
    """A `memcached`_ based driver.

    This driver users `memcached`_ concepts to provide the coordination driver
    semantics and required API(s). It **is** fully functional and implements
    all of the coordination driver API(s). It stores data into memcache
    using expiries and `msgpack`_ encoded values.

    General recommendations/usage considerations:

    - Memcache (without different backend technology) is a **cache** enough
      said.

    .. _memcached: http://memcached.org/
    .. _msgpack: http://msgpack.org/
    """

    CHARACTERISTICS = (
        coordination.Characteristics.DISTRIBUTED_ACROSS_THREADS,
        coordination.Characteristics.DISTRIBUTED_ACROSS_PROCESSES,
        coordination.Characteristics.DISTRIBUTED_ACROSS_HOSTS,
        coordination.Characteristics.CAUSAL,
    )
    """
    Tuple of :py:class:`~tooz.coordination.Characteristics` introspectable
    enum member(s) that can be used to interogate how this driver works.
    """

    #: Key prefix attached to groups (used in name-spacing keys)
    GROUP_PREFIX = b'_TOOZ_GROUP_'

    #: Key prefix attached to leaders of groups (used in name-spacing keys)
    GROUP_LEADER_PREFIX = b'_TOOZ_GROUP_LEADER_'

    #: Key prefix attached to members of groups (used in name-spacing keys)
    MEMBER_PREFIX = b'_TOOZ_MEMBER_'

    #: Key where all groups 'known' are stored.
    GROUP_LIST_KEY = b'_TOOZ_GROUP_LIST'

    #: Default socket/lock/member/leader timeout used when none is provided.
    DEFAULT_TIMEOUT = 30

    #: String used to keep a key/member alive (until it next expires).
    STILL_ALIVE = b"It's alive!"

    def __init__(self, member_id, parsed_url, options):
        super(MemcachedDriver, self).__init__()
        options = utils.collapse(options)
        self._options = options
        self._member_id = member_id
        self._joined_groups = set()
        self._executor = utils.ProxyExecutor.build("Memcached", options)
        # self.host = (parsed_url.hostname or "localhost",
        #              parsed_url.port or 11211)
        self.host = []
        for one_url in parsed_url:
            tmp = (one_url.hostname or "localhost",
                   one_url.port or 11211)
            self.host.append(tmp)
        default_timeout = options.get('timeout', self.DEFAULT_TIMEOUT)
        self.timeout = int(default_timeout)
        self.membership_timeout = int(options.get(
            'membership_timeout', default_timeout))
        self.lock_timeout = int(options.get(
            'lock_timeout', default_timeout))
        self.leader_timeout = int(options.get(
            'leader_timeout', default_timeout))
        max_pool_size = options.get('max_pool_size', None)
        if max_pool_size is not None:
            self.max_pool_size = int(max_pool_size)
        else:
            self.max_pool_size = None
        self._acquired_locks = []

    @staticmethod
    def _msgpack_serializer(key, value):
        if isinstance(value, six.binary_type):
            return value, 1
        return utils.dumps(value), 2

    @staticmethod
    def _msgpack_deserializer(key, value, flags):
        if flags == 1:
            return value
        if flags == 2:
            return utils.loads(value)
        raise coordination.SerializationError("Unknown serialization"
                                              " format '%s'" % flags)

    @_translate_failures
    def _start(self):
        #self.client = pymemcache_client.PooledClient(
        from pymemcache.client.hash import HashClient
        self.client = HashClient(
            self.host,
            serializer=self._msgpack_serializer,
            deserializer=self._msgpack_deserializer,
            timeout=self.timeout,
            connect_timeout=self.timeout,
            max_pool_size=self.max_pool_size)

        # Run heartbeat here because pymemcache use a lazy connection
        # method and only connect once you do an operation.
        self.heartbeat()
        self._group_members = collections.defaultdict(set)
        self._executor.start()

    @_translate_failures
    def _stop(self):
        for lock in list(self._acquired_locks):
            lock.release()
        self.client.delete(self._encode_member_id(self._member_id))
        for g in list(self._joined_groups):
            try:
                self.leave_group(g).get()
            except (coordination.MemberNotJoined,
                    coordination.GroupNotCreated):
                # Guess we got booted out/never existed in the first place...
                pass
            except coordination.ToozError:
                LOG.warning("Unable to leave group '%s'", g, exc_info=True)
        self._executor.stop()
        # self.client.close()

    def _encode_group_id(self, group_id):
        return self.GROUP_PREFIX + group_id

    def _encode_member_id(self, member_id):
        return self.MEMBER_PREFIX + member_id

    def _encode_group_leader(self, group_id):
        return self.GROUP_LEADER_PREFIX + group_id

    @_retry.retry()
    def _add_group_to_group_list(self, group_id):
        """Add group to the group list.

        :param group_id: The group id
        """
        group_list, cas = self.client.gets(self.GROUP_LIST_KEY)
        if cas:
            group_list = set(group_list)
            group_list.add(group_id)
            if not self.client.cas(self.GROUP_LIST_KEY,
                                   list(group_list), cas):
                # Someone updated the group list before us, try again!
                raise _retry.Retry
        else:
            if not self.client.add(self.GROUP_LIST_KEY,
                                   [group_id], noreply=False):
                # Someone updated the group list before us, try again!
                raise _retry.Retry

    @_retry.retry()
    def _remove_from_group_list(self, group_id):
        """Remove group from the group list.

        :param group_id: The group id
        """
        group_list, cas = self.client.gets(self.GROUP_LIST_KEY)
        group_list = set(group_list)
        group_list.remove(group_id)
        if not self.client.cas(self.GROUP_LIST_KEY,
                               list(group_list), cas):
            # Someone updated the group list before us, try again!
            raise _retry.Retry

    def create_group(self, group_id):
        encoded_group = self._encode_group_id(group_id)

        @_translate_failures
        def _create_group():
            if not self.client.add(encoded_group, {}, noreply=False):
                raise coordination.GroupAlreadyExist(group_id)
            self._add_group_to_group_list(group_id)

        return MemcachedFutureResult(self._executor.submit(_create_group))

    def get_groups(self):

        @_translate_failures
        def _get_groups():
            return self.client.get(self.GROUP_LIST_KEY) or []

        return MemcachedFutureResult(self._executor.submit(_get_groups))

    def join_group(self, group_id, capabilities=b""):
        encoded_group = self._encode_group_id(group_id)

        @_retry.retry()
        @_translate_failures
        def _join_group():
            group_members, cas = self.client.gets(encoded_group)
            if group_members is None:
                raise coordination.GroupNotCreated(group_id)
            if self._member_id in group_members:
                raise coordination.MemberAlreadyExist(group_id,
                                                      self._member_id)
            group_members[self._member_id] = {
                b"capabilities": capabilities,
            }
            if not self.client.cas(encoded_group, group_members, cas):
                # It changed, let's try again
                raise _retry.Retry
            self._joined_groups.add(group_id)

        return MemcachedFutureResult(self._executor.submit(_join_group))

    def leave_group(self, group_id):
        encoded_group = self._encode_group_id(group_id)

        @_retry.retry()
        @_translate_failures
        def _leave_group():
            group_members, cas = self.client.gets(encoded_group)
            if group_members is None:
                raise coordination.GroupNotCreated(group_id)
            if self._member_id not in group_members:
                raise coordination.MemberNotJoined(group_id, self._member_id)
            del group_members[self._member_id]
            if not self.client.cas(encoded_group, group_members, cas):
                # It changed, let's try again
                raise _retry.Retry
            self._joined_groups.discard(group_id)

        return MemcachedFutureResult(self._executor.submit(_leave_group))

    def _destroy_group(self, group_id):
        self.client.delete(self._encode_group_id(group_id))

    def delete_group(self, group_id):
        encoded_group = self._encode_group_id(group_id)

        @_retry.retry()
        @_translate_failures
        def _delete_group():
            group_members, cas = self.client.gets(encoded_group)
            if group_members is None:
                raise coordination.GroupNotCreated(group_id)
            if group_members != {}:
                raise coordination.GroupNotEmpty(group_id)
            # Delete is not atomic, so we first set the group to
            # using CAS, and then we delete it, to avoid race conditions.
            if not self.client.cas(encoded_group, None, cas):
                raise _retry.Retry
            self.client.delete(encoded_group)
            self._remove_from_group_list(group_id)

        return MemcachedFutureResult(self._executor.submit(_delete_group))

    @_retry.retry()
    @_translate_failures
    def _get_members(self, group_id):
        encoded_group = self._encode_group_id(group_id)
        group_members, cas = self.client.gets(encoded_group)
        if group_members is None:
            raise coordination.GroupNotCreated(group_id)
        actual_group_members = {}
        for m, v in six.iteritems(group_members):
            # Never kick self from the group, we know we're alive
            if (m == self._member_id or
               self.client.get(self._encode_member_id(m))):
                actual_group_members[m] = v
        if group_members != actual_group_members:
            # There are some dead members, update the group
            if not self.client.cas(encoded_group, actual_group_members, cas):
                # It changed, let's try again
                raise _retry.Retry
        return actual_group_members

    def get_members(self, group_id):

        def _get_members():
            return self._get_members(group_id).keys()

        return MemcachedFutureResult(self._executor.submit(_get_members))

    def get_member_capabilities(self, group_id, member_id):

        def _get_member_capabilities():
            group_members = self._get_members(group_id)
            if member_id not in group_members:
                raise coordination.MemberNotJoined(group_id, member_id)
            return group_members[member_id][b'capabilities']

        return MemcachedFutureResult(
            self._executor.submit(_get_member_capabilities))

    def update_capabilities(self, group_id, capabilities):
        encoded_group = self._encode_group_id(group_id)

        @_retry.retry()
        @_translate_failures
        def _update_capabilities():
            group_members, cas = self.client.gets(encoded_group)
            if group_members is None:
                raise coordination.GroupNotCreated(group_id)
            if self._member_id not in group_members:
                raise coordination.MemberNotJoined(group_id, self._member_id)
            group_members[self._member_id][b'capabilities'] = capabilities
            if not self.client.cas(encoded_group, group_members, cas):
                # It changed, try again
                raise _retry.Retry

        return MemcachedFutureResult(
            self._executor.submit(_update_capabilities))

    def get_leader(self, group_id):

        def _get_leader():
            return self._get_leader_lock(group_id).get_owner()

        return MemcachedFutureResult(self._executor.submit(_get_leader))

    @_translate_failures
    def heartbeat(self):
        self.client.set(self._encode_member_id(self._member_id),
                        self.STILL_ALIVE,
                        expire=self.membership_timeout)
        # Reset the acquired locks
        for lock in self._acquired_locks:
            lock.heartbeat()
        return min(self.membership_timeout,
                   self.leader_timeout,
                   self.lock_timeout)

    @_translate_failures
    def _init_watch_group(self, group_id):
        members = self.client.get(self._encode_group_id(group_id))
        if members is None:
            raise coordination.GroupNotCreated(group_id)
        # Initialize with the current group member list
        if group_id not in self._group_members:
            self._group_members[group_id] = set(members.keys())

    def watch_join_group(self, group_id, callback):
        self._init_watch_group(group_id)
        return super(MemcachedDriver, self).watch_join_group(
            group_id, callback)

    def unwatch_join_group(self, group_id, callback):
        return super(MemcachedDriver, self).unwatch_join_group(
            group_id, callback)

    def watch_leave_group(self, group_id, callback):
        self._init_watch_group(group_id)
        return super(MemcachedDriver, self).watch_leave_group(
            group_id, callback)

    def unwatch_leave_group(self, group_id, callback):
        return super(MemcachedDriver, self).unwatch_leave_group(
            group_id, callback)

    def watch_elected_as_leader(self, group_id, callback):
        return super(MemcachedDriver, self).watch_elected_as_leader(
            group_id, callback)

    def unwatch_elected_as_leader(self, group_id, callback):
        return super(MemcachedDriver, self).unwatch_elected_as_leader(
            group_id, callback)

    def get_lock(self, name):
        return MemcachedLock(self, name, self.lock_timeout)

    def _get_leader_lock(self, group_id):
        return MemcachedLock(self, self._encode_group_leader(group_id),
                             self.leader_timeout)

    @_translate_failures
    def run_elect_coordinator(self):
        for group_id, hooks in six.iteritems(self._hooks_elected_leader):
            # Try to grab the lock, if that fails, that means someone has it
            # already.
            leader_lock = self._get_leader_lock(group_id)
            if leader_lock.acquire(blocking=False):
                # We got the lock
                hooks.run(coordination.LeaderElected(
                    group_id,
                    self._member_id))

    def run_watchers(self, timeout=None):
        result = super(MemcachedDriver, self).run_watchers(timeout=timeout)
        self.run_elect_coordinator()
        return result
Example #26
0
    for s in server.split(','):
        if ':' in s:
            servers.append(tuple(s.split(':')))
        else:
            servers.append((s, 11211))
    mc = HashClient(servers)
else:
    s = server
    if ':' in s:
        server = tuple(s.split(':'))
    else:
        server = (s, 11211)
    mc = Client(server)

key = sys.argv[1]
val = mc.get(key)
if val[0:4] == MEMCCACHE_BIG:
    numkeys = struct.unpack('!I', val[4:8])[0]
    assert struct.unpack('!I', val[8:12])[0] == 16
    assert struct.unpack('!I', val[12:16])[0] == 0
    size = struct.unpack('!I', val[16:20])[0]
    val = val[20:]
    buf = ""
    while val:
        md4 = val[0:16]
        size = struct.unpack('!I', val[16:20])[0]
        val = val[20:]
        subkey = "%s-%d" % (binascii.hexlify(md4), size)
        subval = mc.get(subkey)
        if not subval:
            print("%s not found" % subkey)