예제 #1
0
    def __init__(
        self,
        servers: Optional[Tuple[str, int]] = None,
        connect_timeout=None,
        read_timeout=None,
        serde=None,
        testing=False,
        ignore_exc=False,
    ):
        client_kwargs = dict(
            connect_timeout=connect_timeout,
            timeout=read_timeout,
            serde=serde or JsonSerializerDeserializer(),
            ignore_exc=ignore_exc,
        )

        if testing:
            self.client = MockMemcacheClient(
                server=None,
                **client_kwargs,
            )
        else:
            self.client = HashClient(
                servers=servers,
                **client_kwargs,
            )
예제 #2
0
 def __init__(self, servers="127.0.0.1:11211", **kwargs):
     """Constructor"""
     Cache.__init__(self, **kwargs)
     if isinstance(servers, string_types):
         servers = [s.strip() for s in servers.split(",")]
     self.cache = HashClient(servers, use_pooling=True)
     self.timeout = int(kwargs.get("timeout", 0))
예제 #3
0
파일: memcacheUtil.py 프로젝트: HebeYan/yly
class MemcacheHashCli(object):
    def __init__(self, hosts):
        self.client = HashClient(hosts)

    def set(self, key, value, expire):
        try:
            return self.client.set(key, value, expire)
        except Exception as e:
            return False

    def get(self, key):
        try:
            return self.client.get(key, default=None)
        except Exception as e:
            return None

    def mset(self, values, expire):
        try:
            return self.client.set_many(values, expire)
        except Exception as e:
            return False

    def mget(self, keys):
        try:
            return self.client.get_many(keys)
        except Exception as e:
            return None
예제 #4
0
    def remove_node(self):
        if self.internal_scale:
            print('Cannot scale more than one at a time')
        try:
            self.internal_scale = True
            response = self.elasticache.describe_cache_clusters(
                CacheClusterId=self.cluster_id, ShowCacheNodeInfo=True)
            count = response['CacheClusters'][0]['NumCacheNodes']
            nodes = response['CacheClusters'][0]['CacheNodes']
            node = nodes[count - 1]
            id_to_remove = node['CacheNodeId']
            endpoint = (node['Endpoint']['Address'], node['Endpoint']['Port'])
            print('Removing node: ' + str(endpoint) + ' with id ' +
                  str(id_to_remove))

            new_servers = self.servers.copy()
            new_servers.remove(endpoint)
            print('Remaining servers: ' + str(new_servers))
            self.hash_client = HashClient(new_servers,
                                          serializer=self.json_serializer,
                                          deserializer=self.json_deserializer,
                                          use_pooling=True)

            self.remap_keys(self.servers)

            # remove the node
            response = self.elasticache.modify_cache_cluster(
                CacheClusterId=self.cluster_id,
                NumCacheNodes=count - 1,
                CacheNodeIdsToRemove=[id_to_remove],
                ApplyImmediately=True)
            print(response)

        except Exception as e:
            print(str(e))
예제 #5
0
    def test_custom_client(self):
        class MyClient(Client):
            pass

        client = HashClient([])
        client.client_class = MyClient
        client.add_server(("host", 11211))
        assert isinstance(client.clients["host:11211"], MyClient)
예제 #6
0
    def test_setup_client_without_pooling(self):
        with mock.patch('pymemcache.client.hash.Client') as internal_client:
            client = HashClient([], timeout=999, key_prefix='foo_bar_baz')
            client.add_server('127.0.0.1', '11211')

        assert internal_client.call_args[0][0] == ('127.0.0.1', '11211')
        kwargs = internal_client.call_args[1]
        assert kwargs['timeout'] == 999
        assert kwargs['key_prefix'] == 'foo_bar_baz'
예제 #7
0
class MemCacheHelper(object):
    def __init__(self):
        self.client = HashClient([(MEMCACHED_ENDPOINT, MEMCACHED_PORT)])

    def set(self, key, value):
        self.client.set(key, value)

    def get(self, key):
        return self.client.get(key)
예제 #8
0
    def make_unix_client(self, sockets, *mock_socket_values, **kwargs):
        client = HashClient([], **kwargs)

        for socket_, vals in zip(sockets, mock_socket_values):
            c = self.make_client_pool(socket_, vals, **kwargs)
            client.clients[socket_] = c
            client.hasher.add_node(socket_)

        return client
예제 #9
0
class Memcache(object):
    """
    This class is an interface to a memcache storage backend
    it implements needed methods to store and fetch for an url
    shortener.
    """

    from pymemcache.client.hash import HashClient

    def __init__(self, host, key_expiration, username=None, password=None):
        """
        Instanciate a memcache storage backend object
        @params:
            host: the server to connect to
            key_expiration: key expiration in seconds
            username: not use
            password: not use
        @returns:
        """
        self.client = HashClient(servers='{} 11211'.format(host),
                                 connect_timeout=True,
                                 timeout=True,
                                 use_pooling=True,
                                 max_pool_size=100)
        self.key_expiration = key_expiration

    def set_value(self, key, value):
        """
        Set a new record in the datavase for a short code
        @params:
            key: the short code to insert
            value: the long url corresponding
        @returns:
            True or False if succeeded or not
        """
        return self.client.set(key, value.encode('utf-8'), self.key_expiration)

    def get_value(self, key):
        """
        Get a long url from the shorten form
        @params:
            key: the short form to lookup
        @returns:
            a long url if found, None otherwise
        """
        res = None
        res = self.client.get(key)
        if res is not None:
            return res.decode('utf-8')
        return None

    def get_stat(self, key):
        pass

    def get_all(self):
        pass
예제 #10
0
    def test_unavailable_servers_zero_retry_raise_exception(self):
        from pymemcache.client.hash import HashClient
        client = HashClient(
            [('example.com', 11211)], use_pooling=True,
            ignore_exc=False,
            retry_attempts=0, timeout=1, connect_timeout=1
        )

        with pytest.raises(socket.error):
            client.get('foo')
예제 #11
0
class MetricCache:

    def __init__(self, server_list):
        # server_list = list of (host,port)
        self.client = HashClient(server_list)

    def increment(self, *, metric_name, value):
        if not self.client.get(metric_name):
            self.client.set(metric_name, 0)
        return self.client.incr(metric_name, value)
예제 #12
0
    def test_no_servers_left_with_get_many(self):
        from pymemcache.client.hash import HashClient
        client = HashClient(
            [], use_pooling=True,
            ignore_exc=True,
            timeout=1, connect_timeout=1
        )

        result = client.get_many(['foo', 'bar'])
        assert result == {'foo': False, 'bar': False}
예제 #13
0
    def test_no_servers_left_with_commands(self):
        from pymemcache.client.hash import HashClient
        client = HashClient(
            [], use_pooling=True,
            ignore_exc=True,
            timeout=1, connect_timeout=1
        )

        result = client.get('foo')
        assert result is False
예제 #14
0
    def test_no_servers_left(self):
        from pymemcache.client.hash import HashClient
        client = HashClient(
            [], use_pooling=True,
            ignore_exc=True,
            timeout=1, connect_timeout=1
        )

        hashed_client = client._get_client('foo')
        assert hashed_client is None
예제 #15
0
    def test_no_servers_left_with_commands(self):
        from pymemcache.client.hash import HashClient
        client = HashClient(
            [], use_pooling=True,
            ignore_exc=True,
            timeout=1, connect_timeout=1
        )

        result = client.get('foo')
        assert result is False
예제 #16
0
    def test_setup_client_without_pooling(self):
        client_class = "pymemcache.client.hash.HashClient.client_class"
        with mock.patch(client_class) as internal_client:
            client = HashClient([], timeout=999, key_prefix="foo_bar_baz")
            client.add_server(("127.0.0.1", "11211"))

        assert internal_client.call_args[0][0] == ("127.0.0.1", "11211")
        kwargs = internal_client.call_args[1]
        assert kwargs["timeout"] == 999
        assert kwargs["key_prefix"] == "foo_bar_baz"
예제 #17
0
 def __init__(self, cluster_id):
     self.elasticache = boto3.client('elasticache', region_name='us-east-1')
     self.cluster_id = cluster_id
     self.servers = self.new_server_list()
     self.internal_scale = False
     self.hash_client = HashClient(self.servers,
                                   serializer=self.json_serializer,
                                   deserializer=self.json_deserializer,
                                   use_pooling=True)
     thread = TimerThread(self.check_cluster)
     thread.start()
예제 #18
0
    def test_no_servers_left_with_set_many(self):
        from pymemcache.client.hash import HashClient

        client = HashClient([],
                            use_pooling=True,
                            ignore_exc=True,
                            timeout=1,
                            connect_timeout=1)

        result = client.set_many({"foo": "bar"})
        assert result == ["foo"]
예제 #19
0
    def test_custom_client_with_pooling(self):
        class MyClient(Client):
            pass

        client = HashClient([], use_pooling=True)
        client.client_class = MyClient
        client.add_server(("host", 11211))
        assert isinstance(client.clients["host:11211"], PooledClient)

        pool = client.clients["host:11211"].client_pool
        with pool.get_and_release(destroy_on_fail=True) as c:
            assert isinstance(c, MyClient)
예제 #20
0
    def test_no_servers_left_raise_exception(self):
        from pymemcache.client.hash import HashClient
        client = HashClient(
            [], use_pooling=True,
            ignore_exc=False,
            timeout=1, connect_timeout=1
        )

        with pytest.raises(MemcacheError) as e:
            client._get_client('foo')

        assert str(e.value) == 'All servers seem to be down right now'
예제 #21
0
    def make_client(self, *mock_socket_values, **kwargs):
        current_port = 11012
        client = HashClient([], **kwargs)
        ip = '127.0.0.1'

        for vals in mock_socket_values:
            s = '%s:%s' % (ip, current_port)
            c = self.make_client_pool((ip, current_port), vals, **kwargs)
            client.clients[s] = c
            client.hasher.add_node(s)
            current_port += 1

        return client
예제 #22
0
def get_game_info(group_id):
    """
    Get the game info for a group.

    :param group_id: ID for the group
    :type group_id: String
    :return: String representation of group information
    """
    nodes = elasticache_auto_discovery.discover('hackgt5mem.gy46cz.cfg.use1.cache.amazonaws.com:11211')
    nodes = map(lambda x: (x[1], int(x[2])), nodes)
    memcache_client = HashClient(nodes)

    return memcache_client.get(str(group_id)).decode("utf-8")
예제 #23
0
    def test_no_servers_left_return_positional_default(self):
        from pymemcache.client.hash import HashClient

        client = HashClient([],
                            use_pooling=True,
                            ignore_exc=True,
                            timeout=1,
                            connect_timeout=1)

        # Ensure compatibility with clients that pass the default as a
        # positional argument
        result = client.get("foo", "default")
        assert result == "default"
예제 #24
0
    def test_no_servers_left_with_commands_return_default_value(self):
        from pymemcache.client.hash import HashClient

        client = HashClient([],
                            use_pooling=True,
                            ignore_exc=True,
                            timeout=1,
                            connect_timeout=1)

        result = client.get("foo")
        assert result is None
        result = client.get("foo", default="default")
        assert result == "default"
        result = client.set("foo", "bar")
        assert result is False
예제 #25
0
    def _start(self):
        #self.client = pymemcache_client.PooledClient(
        from pymemcache.client.hash import HashClient
        self.client = HashClient(self.host,
                                 serializer=self._msgpack_serializer,
                                 deserializer=self._msgpack_deserializer,
                                 timeout=self.timeout,
                                 connect_timeout=self.timeout,
                                 max_pool_size=self.max_pool_size)

        # Run heartbeat here because pymemcache use a lazy connection
        # method and only connect once you do an operation.
        self.heartbeat()
        self._group_members = collections.defaultdict(set)
        self._executor.start()
예제 #26
0
class MemcacheMap(Map):
    def open(self):
        self._db = HashClient(('127.0.0.1', 11211))
        return self

    def _put(self, key: Key, value: bytes) -> Key:
        self._db.set(key_bytes(key), value)
        return key

    def _get(self, uuid: UUID, time: int) -> Optional[bytes]:
        return self._db.get(key_bytes(key))

    def close(self):
        del (self._db)
        return self
예제 #27
0
    def _get_memcache_client(endpoint, is_local):
        client = None
        if is_local:
            address = endpoint.split(':')
            node = (address[0], int(address[1]))
            client = Client(node,
                            serializer=ECClient._json_serializer,
                            deserializer=ECClient._json_deserializer,
                            timeout=10000,
                            connect_timeout=10000)
        else:
            save_log_info('Trying to get nodes')
            nodes = elasticache_auto_discovery.discover(endpoint, 10)
            nodes = list(map(lambda x: (x[1], int(x[2])), nodes))
            # save_log_info(nodes)
            # address = endpoint.split(':')
            # nodes = [(address[0], int(address[1]))]
            save_log_info('Nodes found: {}'.format(nodes))
            client = HashClient(nodes,
                                serializer=ECClient._json_serializer,
                                deserializer=ECClient._json_deserializer,
                                timeout=10000,
                                connect_timeout=10000)

        save_log_info('Return ECClient: {}'.format(client))

        return client
예제 #28
0
def getClient(config):
    if type(config) == list:
        from pymemcache.client.hash import HashClient
        return HashClient(config)
    else:
        from pymemcache.client.base import Client
        return Client(config[0])
예제 #29
0
 def __init__(self, host, key_expiration, username=None, password=None):
     """
     Instanciate a memcache storage backend object
     @params:
         host: the server to connect to
         key_expiration: key expiration in seconds
         username: not use
         password: not use
     @returns:
     """
     self.client = HashClient(servers='{} 11211'.format(host),
                              connect_timeout=True,
                              timeout=True,
                              use_pooling=True,
                              max_pool_size=100)
     self.key_expiration = key_expiration
예제 #30
0
    def make_client(self, *mock_socket_values, **kwargs):
        current_port = 11012
        client = HashClient([], **kwargs)
        ip = '127.0.0.1'

        for vals in mock_socket_values:
            s = '%s:%s' % (ip, current_port)
            c = self.make_client_pool(
                (ip, current_port),
                vals
            )
            client.clients[s] = c
            client.hasher.add_node(s)
            current_port += 1

        return client
예제 #31
0
def hash_client_test():
    '''一致性Hash客户端,通过一致性HASH算法,减少因为某个节点宕机而导致整个集群缓存失效的问题,但是由于set key的时候需要计算hash,所以效率会有一定的影响'''
    hash_client = HashClient([('192.168.24.138', 11211),
                              ('192.168.24.139', 11211),
                              ('192.168.24.140', 11211)])
    hash_key_prefix = 'hash_test_key'
    hash_value_prefix = 'hash_test_value'
    #在第一次执行完kv_check方法后,停掉上面3台的任意一台memcache服务,把kv_check的client.set相关代码注释掉,再次执行kv_check方法时,发现有1/3左右的kv丢失
    kv_check(hash_client, hash_key_prefix, hash_value_prefix)
예제 #32
0
    def make_client(self, mock_socket_values, **kwargs):
        from pymemcache.client.hash import HashClient

        tracer = DummyTracer()
        Pin.override(pymemcache, tracer=tracer)
        self.client = HashClient([(TEST_HOST, TEST_PORT)], **kwargs)
        for _c in self.client.clients.values():
            _c.sock = MockSocket(list(mock_socket_values))
        return self.client
예제 #33
0
파일: utils.py 프로젝트: reubano/mezmorize
def get_pymemcache_client(servers, timeout=None, **kwargs):
    from pymemcache.client.hash import HashClient

    from pymemcache.serde import (
        python_memcache_serializer, python_memcache_deserializer)

    kwargs.setdefault('serializer', python_memcache_serializer)
    kwargs.setdefault('deserializer', python_memcache_deserializer)

    if timeout:
        kwargs['timeout'] = timeout

    split = [s.split(':') for s in servers]
    _servers = [(host, int(port)) for host, port in split]
    client = HashClient(_servers, **kwargs)

    try:
        client.TooBig = ConnectionResetError
    except NameError:
        import socket
        client.TooBig = socket.error

    return client
 def __init__(self, couchbase_uri, memcached_hosts, primary=PRIMARY_COUCHBASE):
     """
     :param couchbase_uri: Connection string for Couchbase
     :param memcached_hosts: List of Memcached nodes
     :param primary: Determines which datastore is authoritative.
         This affects how get operations are performed and which datastore
         is used for CAS operations.
             PRIMARY_COUCHBASE: Couchbase is authoritative
             PRIMARY_MEMCACHED: Memcached is authoritative
         By default, Couchbase is the primary store
     :return:
     """
     self.cb = CbBucket(couchbase_uri)
     self.mc = McClient(memcached_hosts)
     self._primary = primary
예제 #35
0
파일: memcached.py 프로젝트: rocknio/tooz
    def _start(self):
        #self.client = pymemcache_client.PooledClient(
        from pymemcache.client.hash import HashClient
        self.client = HashClient(
            self.host,
            serializer=self._msgpack_serializer,
            deserializer=self._msgpack_deserializer,
            timeout=self.timeout,
            connect_timeout=self.timeout,
            max_pool_size=self.max_pool_size)

        # Run heartbeat here because pymemcache use a lazy connection
        # method and only connect once you do an operation.
        self.heartbeat()
        self._group_members = collections.defaultdict(set)
        self._executor.start()
예제 #36
0
  def setupMemcacheClient(self):
    """ Sets up the memcache client. """
    if os.path.exists(self.APPSCALE_MEMCACHE_FILE):
      memcache_file = open(self.APPSCALE_MEMCACHE_FILE, "r")
      all_ips = memcache_file.read().split("\n")
      memcache_file.close()
    else:
      all_ips = ['localhost']

    memcaches = [(ip, self.MEMCACHE_PORT) for ip in all_ips if ip]
    memcaches.sort()    
    self._memcache = HashClient(
      memcaches, serializer=serializer, deserializer=deserializer,
      connect_timeout=5, timeout=1, use_pooling=True)

    # The GAE API expects return values for all mutate operations.
    for client in six.itervalues(self._memcache.clients):
      client.default_noreply = False
class CouchbaseMemcacheMirror(object):
    def __init__(self, couchbase_uri, memcached_hosts, primary=PRIMARY_COUCHBASE):
        """
        :param couchbase_uri: Connection string for Couchbase
        :param memcached_hosts: List of Memcached nodes
        :param primary: Determines which datastore is authoritative.
            This affects how get operations are performed and which datastore
            is used for CAS operations.
                PRIMARY_COUCHBASE: Couchbase is authoritative
                PRIMARY_MEMCACHED: Memcached is authoritative
            By default, Couchbase is the primary store
        :return:
        """
        self.cb = CbBucket(couchbase_uri)
        self.mc = McClient(memcached_hosts)
        self._primary = primary

    @property
    def primary(self):
        return self._primary

    def _cb_get(self, key):
        try:
            return self.cb.get(key).value
        except NotFoundError:
            return None

    def get(self, key, try_alternate=True):
        """
        Gets a document
        :param key: The key to retrieve
        :param try_alternate: Whether to try the secondary data source if the
            item is not found in the primary.
        :return: The value as a Python object
        """
        if self._primary == PRIMARY_COUCHBASE:
            order = [self._cb_get, self.mc.get]
        else:
            order = [self.mc.get, self._cb_get]

        for meth in order:
            ret = meth(key)
            if ret or not try_alternate:
                return ret

        return None

    def _cb_mget(self, keys):
        """
        Internal method to execute a Couchbase multi-get
        :param keys: The keys to retrieve
        :return: A tuple of {found_key:found_value, ...}, [missing_key1,...]
        """
        try:
            ok_rvs = self.cb.get_multi(keys)
            bad_rvs = {}
        except NotFoundError as e:
            ok_rvs, bad_rvs = e.split_results()

        ok_dict = {k: (v.value, v.cas) for k, v in ok_rvs}
        return ok_dict, bad_rvs.keys()

    def get_multi(self, keys, try_alternate=True):
        """
        Gets multiple items from the server
        :param keys: The keys to fetch as an iterable
        :param try_alternate: Whether to fetch missing items from alternate store
        :return: A dictionary of key:value. Only contains keys which exist and have values
        """
        if self._primary == PRIMARY_COUCHBASE:
            ok, err = self._cb_get(keys)
            if err and try_alternate:
                ok.update(self.mc.get_many(err))
            return ok
        else:
            ok = self.mc.get_many(keys)
            if len(ok) < len(keys) and try_alternate:
                keys_err = set(keys) - set(ok)
                ok.update(self._cb_mget(list(keys_err))[0])
            return ok

    def gets(self, key):
        """
        Get an item with its CAS. The item will always be fetched from the primary
        data store.

        :param key: the key to get
        :return: the value of the key, or None if no such value
        """
        if self._primary == PRIMARY_COUCHBASE:
            try:
                rv = self.cb.get(key)
                return key, rv.cas
            except NotFoundError:
                return None, None
        else:
            return self.mc.gets(key)

    def gets_multi(self, keys):
        if self._primary == PRIMARY_COUCHBASE:
            try:
                rvs = self.cb.get_multi(keys)
            except NotFoundError as e:
                rvs, _ = e.split_results()

            return {k: (v.value, v.cas) for k, v in rvs}
        else:
            # TODO: I'm not sure if this is implemented in HasClient :(
            return self.mc.gets_many(keys)

    def delete(self, key):
        st = Status()
        try:
            self.cb.remove(key)
        except NotFoundError as e:
            st.cb_error = e

        st.mc_status = self.mc.delete(key)
        return st

    def delete_multi(self, keys):
        st = Status()
        try:
            self.cb.remove_multi(keys)
        except NotFoundError as e:
            st.cb_error = e

        st.mc_status = self.mc.delete_many(keys)

    def _do_incrdecr(self, key, value, is_incr):
        cb_value = value if is_incr else -value
        mc_meth = self.mc.incr if is_incr else self.mc.decr
        st = Status()
        try:
            self.cb.counter(key, delta=cb_value)
        except NotFoundError as e:
            st.cb_error = e

        st.mc_status = mc_meth(key, value)

    def incr(self, key, value):
        return self._do_incrdecr(key, value, True)

    def decr(self, key, value):
        return self._do_incrdecr(key, value, False)

    def touch(self, key, expire=0):
        st = Status()
        try:
            self.cb.touch(key, ttl=expire)
        except NotFoundError as e:
            st.cb_error = st

        st.mc_status = self.mc.touch(key)

    def set(self, key, value, expire=0):
        """
        Write first to Couchbase, and then to Memcached
        :param key: Key to use
        :param value: Value to use
        :param expire: If set, the item will expire in the given amount of time
        :return: Status object if successful (will always be success).
                 on failure an exception is raised
        """
        self.cb.upsert(key, value, ttl=expire)
        self.mc.set(key, value, expire=expire)
        return Status()

    def set_multi(self, values, expire=0):
        """
        Set multiple items.
        :param values: A dictionary of key, value indicating values to store
        :param expire: If present, expiration time for all the items
        :return:
        """
        self.cb.upsert_multi(values, ttl=expire)
        self.mc.set_many(values, expire=expire)
        return Status()

    def replace(self, key, value, expire=0):
        """
        Replace existing items
        :param key: key to replace
        :param value: new value
        :param expire: expiration for item
        :return: Status object. Will be OK
        """
        status = Status()
        try:
            self.cb.replace(key, value, ttl=expire)
        except NotFoundError as e:
            status.cb_error = e

        status.mc_status = self.mc.replace(key, value, expire=expire)
        return status

    def add(self, key, value, expire=0):
        status = Status()
        try:
            self.cb.insert(key, value, ttl=expire)
        except KeyExistsError as e:
            status.cb_error = e

        status.mc_status = self.mc.add(key, value, expire=expire)
        return status

    def _append_prepend(self, key, value, is_append):
        cb_meth = self.cb.append if is_append else self.cb.prepend
        mc_meth = self.mc.append if is_append else self.mc.prepend
        st = Status()

        try:
            cb_meth(key, value, format=FMT_UTF8)
        except (NotStoredError, NotFoundError) as e:
            st.cb_error = e

        st.mc_status = mc_meth(key, value)

    def append(self, key, value):
        return self._append_prepend(key, value, True)

    def prepend(self, key, value):
        return self._append_prepend(key, value, False)

    def cas(self, key, value, cas, expire=0):
        if self._primary == PRIMARY_COUCHBASE:
            try:
                self.cb.replace(key, value, cas=cas, ttl=expire)
                self.mc.set(key, value, ttl=expire)
                return True
            except KeyExistsError:
                return False
            except NotFoundError:
                return None
        else:
            return self.mc.cas(key, value, cas)
예제 #38
0
파일: memcached.py 프로젝트: rocknio/tooz
class MemcachedDriver(coordination._RunWatchersMixin,
                      coordination.CoordinationDriver):
    """A `memcached`_ based driver.

    This driver users `memcached`_ concepts to provide the coordination driver
    semantics and required API(s). It **is** fully functional and implements
    all of the coordination driver API(s). It stores data into memcache
    using expiries and `msgpack`_ encoded values.

    General recommendations/usage considerations:

    - Memcache (without different backend technology) is a **cache** enough
      said.

    .. _memcached: http://memcached.org/
    .. _msgpack: http://msgpack.org/
    """

    CHARACTERISTICS = (
        coordination.Characteristics.DISTRIBUTED_ACROSS_THREADS,
        coordination.Characteristics.DISTRIBUTED_ACROSS_PROCESSES,
        coordination.Characteristics.DISTRIBUTED_ACROSS_HOSTS,
        coordination.Characteristics.CAUSAL,
    )
    """
    Tuple of :py:class:`~tooz.coordination.Characteristics` introspectable
    enum member(s) that can be used to interogate how this driver works.
    """

    #: Key prefix attached to groups (used in name-spacing keys)
    GROUP_PREFIX = b'_TOOZ_GROUP_'

    #: Key prefix attached to leaders of groups (used in name-spacing keys)
    GROUP_LEADER_PREFIX = b'_TOOZ_GROUP_LEADER_'

    #: Key prefix attached to members of groups (used in name-spacing keys)
    MEMBER_PREFIX = b'_TOOZ_MEMBER_'

    #: Key where all groups 'known' are stored.
    GROUP_LIST_KEY = b'_TOOZ_GROUP_LIST'

    #: Default socket/lock/member/leader timeout used when none is provided.
    DEFAULT_TIMEOUT = 30

    #: String used to keep a key/member alive (until it next expires).
    STILL_ALIVE = b"It's alive!"

    def __init__(self, member_id, parsed_url, options):
        super(MemcachedDriver, self).__init__()
        options = utils.collapse(options)
        self._options = options
        self._member_id = member_id
        self._joined_groups = set()
        self._executor = utils.ProxyExecutor.build("Memcached", options)
        # self.host = (parsed_url.hostname or "localhost",
        #              parsed_url.port or 11211)
        self.host = []
        for one_url in parsed_url:
            tmp = (one_url.hostname or "localhost",
                   one_url.port or 11211)
            self.host.append(tmp)
        default_timeout = options.get('timeout', self.DEFAULT_TIMEOUT)
        self.timeout = int(default_timeout)
        self.membership_timeout = int(options.get(
            'membership_timeout', default_timeout))
        self.lock_timeout = int(options.get(
            'lock_timeout', default_timeout))
        self.leader_timeout = int(options.get(
            'leader_timeout', default_timeout))
        max_pool_size = options.get('max_pool_size', None)
        if max_pool_size is not None:
            self.max_pool_size = int(max_pool_size)
        else:
            self.max_pool_size = None
        self._acquired_locks = []

    @staticmethod
    def _msgpack_serializer(key, value):
        if isinstance(value, six.binary_type):
            return value, 1
        return utils.dumps(value), 2

    @staticmethod
    def _msgpack_deserializer(key, value, flags):
        if flags == 1:
            return value
        if flags == 2:
            return utils.loads(value)
        raise coordination.SerializationError("Unknown serialization"
                                              " format '%s'" % flags)

    @_translate_failures
    def _start(self):
        #self.client = pymemcache_client.PooledClient(
        from pymemcache.client.hash import HashClient
        self.client = HashClient(
            self.host,
            serializer=self._msgpack_serializer,
            deserializer=self._msgpack_deserializer,
            timeout=self.timeout,
            connect_timeout=self.timeout,
            max_pool_size=self.max_pool_size)

        # Run heartbeat here because pymemcache use a lazy connection
        # method and only connect once you do an operation.
        self.heartbeat()
        self._group_members = collections.defaultdict(set)
        self._executor.start()

    @_translate_failures
    def _stop(self):
        for lock in list(self._acquired_locks):
            lock.release()
        self.client.delete(self._encode_member_id(self._member_id))
        for g in list(self._joined_groups):
            try:
                self.leave_group(g).get()
            except (coordination.MemberNotJoined,
                    coordination.GroupNotCreated):
                # Guess we got booted out/never existed in the first place...
                pass
            except coordination.ToozError:
                LOG.warning("Unable to leave group '%s'", g, exc_info=True)
        self._executor.stop()
        # self.client.close()

    def _encode_group_id(self, group_id):
        return self.GROUP_PREFIX + group_id

    def _encode_member_id(self, member_id):
        return self.MEMBER_PREFIX + member_id

    def _encode_group_leader(self, group_id):
        return self.GROUP_LEADER_PREFIX + group_id

    @_retry.retry()
    def _add_group_to_group_list(self, group_id):
        """Add group to the group list.

        :param group_id: The group id
        """
        group_list, cas = self.client.gets(self.GROUP_LIST_KEY)
        if cas:
            group_list = set(group_list)
            group_list.add(group_id)
            if not self.client.cas(self.GROUP_LIST_KEY,
                                   list(group_list), cas):
                # Someone updated the group list before us, try again!
                raise _retry.Retry
        else:
            if not self.client.add(self.GROUP_LIST_KEY,
                                   [group_id], noreply=False):
                # Someone updated the group list before us, try again!
                raise _retry.Retry

    @_retry.retry()
    def _remove_from_group_list(self, group_id):
        """Remove group from the group list.

        :param group_id: The group id
        """
        group_list, cas = self.client.gets(self.GROUP_LIST_KEY)
        group_list = set(group_list)
        group_list.remove(group_id)
        if not self.client.cas(self.GROUP_LIST_KEY,
                               list(group_list), cas):
            # Someone updated the group list before us, try again!
            raise _retry.Retry

    def create_group(self, group_id):
        encoded_group = self._encode_group_id(group_id)

        @_translate_failures
        def _create_group():
            if not self.client.add(encoded_group, {}, noreply=False):
                raise coordination.GroupAlreadyExist(group_id)
            self._add_group_to_group_list(group_id)

        return MemcachedFutureResult(self._executor.submit(_create_group))

    def get_groups(self):

        @_translate_failures
        def _get_groups():
            return self.client.get(self.GROUP_LIST_KEY) or []

        return MemcachedFutureResult(self._executor.submit(_get_groups))

    def join_group(self, group_id, capabilities=b""):
        encoded_group = self._encode_group_id(group_id)

        @_retry.retry()
        @_translate_failures
        def _join_group():
            group_members, cas = self.client.gets(encoded_group)
            if group_members is None:
                raise coordination.GroupNotCreated(group_id)
            if self._member_id in group_members:
                raise coordination.MemberAlreadyExist(group_id,
                                                      self._member_id)
            group_members[self._member_id] = {
                b"capabilities": capabilities,
            }
            if not self.client.cas(encoded_group, group_members, cas):
                # It changed, let's try again
                raise _retry.Retry
            self._joined_groups.add(group_id)

        return MemcachedFutureResult(self._executor.submit(_join_group))

    def leave_group(self, group_id):
        encoded_group = self._encode_group_id(group_id)

        @_retry.retry()
        @_translate_failures
        def _leave_group():
            group_members, cas = self.client.gets(encoded_group)
            if group_members is None:
                raise coordination.GroupNotCreated(group_id)
            if self._member_id not in group_members:
                raise coordination.MemberNotJoined(group_id, self._member_id)
            del group_members[self._member_id]
            if not self.client.cas(encoded_group, group_members, cas):
                # It changed, let's try again
                raise _retry.Retry
            self._joined_groups.discard(group_id)

        return MemcachedFutureResult(self._executor.submit(_leave_group))

    def _destroy_group(self, group_id):
        self.client.delete(self._encode_group_id(group_id))

    def delete_group(self, group_id):
        encoded_group = self._encode_group_id(group_id)

        @_retry.retry()
        @_translate_failures
        def _delete_group():
            group_members, cas = self.client.gets(encoded_group)
            if group_members is None:
                raise coordination.GroupNotCreated(group_id)
            if group_members != {}:
                raise coordination.GroupNotEmpty(group_id)
            # Delete is not atomic, so we first set the group to
            # using CAS, and then we delete it, to avoid race conditions.
            if not self.client.cas(encoded_group, None, cas):
                raise _retry.Retry
            self.client.delete(encoded_group)
            self._remove_from_group_list(group_id)

        return MemcachedFutureResult(self._executor.submit(_delete_group))

    @_retry.retry()
    @_translate_failures
    def _get_members(self, group_id):
        encoded_group = self._encode_group_id(group_id)
        group_members, cas = self.client.gets(encoded_group)
        if group_members is None:
            raise coordination.GroupNotCreated(group_id)
        actual_group_members = {}
        for m, v in six.iteritems(group_members):
            # Never kick self from the group, we know we're alive
            if (m == self._member_id or
               self.client.get(self._encode_member_id(m))):
                actual_group_members[m] = v
        if group_members != actual_group_members:
            # There are some dead members, update the group
            if not self.client.cas(encoded_group, actual_group_members, cas):
                # It changed, let's try again
                raise _retry.Retry
        return actual_group_members

    def get_members(self, group_id):

        def _get_members():
            return self._get_members(group_id).keys()

        return MemcachedFutureResult(self._executor.submit(_get_members))

    def get_member_capabilities(self, group_id, member_id):

        def _get_member_capabilities():
            group_members = self._get_members(group_id)
            if member_id not in group_members:
                raise coordination.MemberNotJoined(group_id, member_id)
            return group_members[member_id][b'capabilities']

        return MemcachedFutureResult(
            self._executor.submit(_get_member_capabilities))

    def update_capabilities(self, group_id, capabilities):
        encoded_group = self._encode_group_id(group_id)

        @_retry.retry()
        @_translate_failures
        def _update_capabilities():
            group_members, cas = self.client.gets(encoded_group)
            if group_members is None:
                raise coordination.GroupNotCreated(group_id)
            if self._member_id not in group_members:
                raise coordination.MemberNotJoined(group_id, self._member_id)
            group_members[self._member_id][b'capabilities'] = capabilities
            if not self.client.cas(encoded_group, group_members, cas):
                # It changed, try again
                raise _retry.Retry

        return MemcachedFutureResult(
            self._executor.submit(_update_capabilities))

    def get_leader(self, group_id):

        def _get_leader():
            return self._get_leader_lock(group_id).get_owner()

        return MemcachedFutureResult(self._executor.submit(_get_leader))

    @_translate_failures
    def heartbeat(self):
        self.client.set(self._encode_member_id(self._member_id),
                        self.STILL_ALIVE,
                        expire=self.membership_timeout)
        # Reset the acquired locks
        for lock in self._acquired_locks:
            lock.heartbeat()
        return min(self.membership_timeout,
                   self.leader_timeout,
                   self.lock_timeout)

    @_translate_failures
    def _init_watch_group(self, group_id):
        members = self.client.get(self._encode_group_id(group_id))
        if members is None:
            raise coordination.GroupNotCreated(group_id)
        # Initialize with the current group member list
        if group_id not in self._group_members:
            self._group_members[group_id] = set(members.keys())

    def watch_join_group(self, group_id, callback):
        self._init_watch_group(group_id)
        return super(MemcachedDriver, self).watch_join_group(
            group_id, callback)

    def unwatch_join_group(self, group_id, callback):
        return super(MemcachedDriver, self).unwatch_join_group(
            group_id, callback)

    def watch_leave_group(self, group_id, callback):
        self._init_watch_group(group_id)
        return super(MemcachedDriver, self).watch_leave_group(
            group_id, callback)

    def unwatch_leave_group(self, group_id, callback):
        return super(MemcachedDriver, self).unwatch_leave_group(
            group_id, callback)

    def watch_elected_as_leader(self, group_id, callback):
        return super(MemcachedDriver, self).watch_elected_as_leader(
            group_id, callback)

    def unwatch_elected_as_leader(self, group_id, callback):
        return super(MemcachedDriver, self).unwatch_elected_as_leader(
            group_id, callback)

    def get_lock(self, name):
        return MemcachedLock(self, name, self.lock_timeout)

    def _get_leader_lock(self, group_id):
        return MemcachedLock(self, self._encode_group_leader(group_id),
                             self.leader_timeout)

    @_translate_failures
    def run_elect_coordinator(self):
        for group_id, hooks in six.iteritems(self._hooks_elected_leader):
            # Try to grab the lock, if that fails, that means someone has it
            # already.
            leader_lock = self._get_leader_lock(group_id)
            if leader_lock.acquire(blocking=False):
                # We got the lock
                hooks.run(coordination.LeaderElected(
                    group_id,
                    self._member_id))

    def run_watchers(self, timeout=None):
        result = super(MemcachedDriver, self).run_watchers(timeout=timeout)
        self.run_elect_coordinator()
        return result
예제 #39
0
class MemcacheService(apiproxy_stub.APIProxyStub):
  """Python only memcache service.

  This service keeps all data in any external servers running memcached.
  """
  # The memcached default port.
  MEMCACHE_PORT = 11211

  # An AppScale file which has a list of IPs running memcached.
  APPSCALE_MEMCACHE_FILE = "/etc/appscale/memcache_ips"

  def __init__(self, project_id, service_name='memcache'):
    """Initializer.

    Args:
      service_name: Service name expected for all calls.
    """
    super(MemcacheService, self).__init__(service_name)
    self._memcache = None
    self.setupMemcacheClient()
    self._methods = {MemcacheSetRequest.SET: self._memcache.set,
                     MemcacheSetRequest.ADD: self._memcache.add,
                     MemcacheSetRequest.REPLACE: self._memcache.replace,
                     MemcacheSetRequest.CAS: self._memcache.cas}
    self._project_id = project_id

  def setupMemcacheClient(self):
    """ Sets up the memcache client. """
    if os.path.exists(self.APPSCALE_MEMCACHE_FILE):
      memcache_file = open(self.APPSCALE_MEMCACHE_FILE, "r")
      all_ips = memcache_file.read().split("\n")
      memcache_file.close()
    else:
      all_ips = ['localhost']

    memcaches = [(ip, self.MEMCACHE_PORT) for ip in all_ips if ip]
    memcaches.sort()    
    self._memcache = HashClient(
      memcaches, serializer=serializer, deserializer=deserializer,
      connect_timeout=5, timeout=1, use_pooling=True)

    # The GAE API expects return values for all mutate operations.
    for client in six.itervalues(self._memcache.clients):
      client.default_noreply = False

  def _Dynamic_Get(self, request, response):
    """Implementation of gets for memcache.
     
    Args:
      request: A MemcacheGetRequest protocol buffer.
      response: A MemcacheGetResponse protocol buffer.
    """
    # Remove duplicate keys.
    original_keys = {
      encode_key(self._project_id, request.name_space(), key): key
      for key in request.key_list()}

    try:
      backend_response = self._memcache.get_many(
        original_keys.keys(), gets=request.for_cas())
    except MemcacheClientError as error:
      raise apiproxy_errors.ApplicationError(INVALID_VALUE, str(error))
    except TRANSIENT_ERRORS as error:
      raise apiproxy_errors.ApplicationError(
        UNSPECIFIED_ERROR, 'Transient memcache error: {}'.format(error))

    for encoded_key, value_tuple in six.iteritems(backend_response):
      item = response.add_item()
      item.set_key(original_keys[encoded_key])
      if request.for_cas():
        item.set_cas_id(int(value_tuple[1]))
        value_tuple = value_tuple[0]

      item.set_value(value_tuple[0])
      item.set_flags(value_tuple[1])

  def _Dynamic_Set(self, request, response):
    """Implementation of sets for memcache. 

    Args:
      request: A MemcacheSetRequest.
      response: A MemcacheSetResponse.
    """
    namespace = request.name_space()
    if any(item.set_policy() not in self._methods
           for item in request.item_list()):
      raise apiproxy_errors.ApplicationError(
        INVALID_VALUE, 'Unsupported set_policy')

    if not all(item.has_cas_id() for item in request.item_list()
               if item.set_policy() == MemcacheSetRequest.CAS):
      raise apiproxy_errors.ApplicationError(
        INVALID_VALUE, 'All CAS items must have a cas_id')

    for item in request.item_list():
      try:
        encoded_key = encode_key(self._project_id, namespace, item.key())
      except apiproxy_errors.ApplicationError:
        response.add_set_status(MemcacheSetResponse.ERROR)
        continue

      args = {'key': encoded_key,
              'value': (item.value(), item.flags()),
              'expire': int(item.expiration_time())}
      is_cas = item.set_policy() == MemcacheSetRequest.CAS
      if is_cas:
        args['cas'] = six.binary_type(item.cas_id())

      try:
        backend_response = self._methods[item.set_policy()](**args)
      except (TRANSIENT_ERRORS + (MemcacheClientError,)):
        response.add_set_status(MemcacheSetResponse.ERROR)
        continue

      if backend_response:
        response.add_set_status(MemcacheSetResponse.STORED)
        continue

      if is_cas and backend_response is False:
        response.add_set_status(MemcacheSetResponse.EXISTS)
        continue

      response.add_set_status(MemcacheSetResponse.NOT_STORED)

  def _Dynamic_Delete(self, request, response):
    """Implementation of delete in memcache.

    Args:
      request: A MemcacheDeleteRequest protocol buffer.
      response: A MemcacheDeleteResponse protocol buffer.
    """
    for item in request.item_list():
      encoded_key = encode_key(self._project_id, request.name_space(),
                               item.key())
      try:
        key_existed = self._memcache.delete(encoded_key)
      except MemcacheClientError as error:
        raise apiproxy_errors.ApplicationError(INVALID_VALUE, str(error))
      except TRANSIENT_ERRORS as error:
        raise apiproxy_errors.ApplicationError(
          UNSPECIFIED_ERROR, 'Transient memcache error: {}'.format(error))

      response.add_delete_status(MemcacheDeleteResponse.DELETED if key_existed
                                 else MemcacheDeleteResponse.NOT_FOUND)

  def _Increment(self, namespace, request):
    """Internal function for incrementing from a MemcacheIncrementRequest.

    Args:
      namespace: A string containing the namespace for the request,
        if any. Pass an empty string if there is no namespace.
      request: A MemcacheIncrementRequest instance.

    Returns:
      An integer indicating the new value.
    Raises:
      ApplicationError if unable to perform the mutation.
    """
    encoded_key = encode_key(self._project_id, namespace, request.key())
    method = self._memcache.incr
    if request.direction() == MemcacheIncrementRequest.DECREMENT:
      method = self._memcache.decr

    try:
      response = method(encoded_key, request.delta())
    except MemcacheClientError as error:
      raise apiproxy_errors.ApplicationError(INVALID_VALUE, str(error))
    except TRANSIENT_ERRORS as error:
      raise apiproxy_errors.ApplicationError(
        UNSPECIFIED_ERROR, 'Transient memcache error: {}'.format(error))

    if response is None and not request.has_initial_value():
      raise apiproxy_errors.ApplicationError(
        UNSPECIFIED_ERROR, 'Key does not exist')

    if response is not None:
      return response

    # If the key was not present and an initial value was provided, perform
    # the mutation client-side and set the key if it still doesn't exist.
    flags = 0
    if request.has_initial_flags():
      flags = request.initial_flags()

    if request.direction() == MemcacheIncrementRequest.INCREMENT:
      updated_val = request.initial_value() + request.delta()
    else:
      updated_val = request.initial_value() - request.delta()

    updated_val = max(updated_val, 0) % (MAX_INCR + 1)
    try:
      response = self._memcache.add(
        encoded_key, (six.binary_type(updated_val), flags))
    except (TRANSIENT_ERRORS + (MemcacheClientError,)):
      raise apiproxy_errors.ApplicationError(
        UNSPECIFIED_ERROR, 'Unable to set initial value')

    if response is False:
      raise apiproxy_errors.ApplicationError(
        UNSPECIFIED_ERROR, 'Unable to set initial value')

    return updated_val

  def _Dynamic_Increment(self, request, response):
    """Implementation of increment for memcache.

    Args:
      request: A MemcacheIncrementRequest protocol buffer.
      response: A MemcacheIncrementResponse protocol buffer.
    """
    new_value = self._Increment(request.name_space(), request)
    response.set_new_value(new_value)

  def _Dynamic_BatchIncrement(self, request, response):
    """Implementation of batch increment for memcache.

    Args:
      request: A MemcacheBatchIncrementRequest protocol buffer.
      response: A MemcacheBatchIncrementResponse protocol buffer.
    """
    for request_item in request.item_list():
      item = response.add_item()
      try:
        new_value = self._Increment(request.name_space(), request_item)
      except apiproxy_errors.ApplicationError as error:
        if error.application_error == INVALID_VALUE:
          item.set_increment_status(MemcacheIncrementResponse.NOT_CHANGED)
        else:
          item.set_increment_status(MemcacheIncrementResponse.ERROR)

        continue

      item.set_increment_status(MemcacheIncrementResponse.OK)
      item.set_new_value(new_value)

  def _Dynamic_FlushAll(self, request, response):
    """Implementation of MemcacheService::FlushAll().

    Args:
      request: A MemcacheFlushRequest.
      response: A MemcacheFlushResponse.
    """
    # TODO: Prevent a project from clearing another project's namespace.
    self._memcache.flush_all()

  def _Dynamic_Stats(self, request, response):
    """Implementation of MemcacheService::Stats().
    
    Args:
      request: A MemcacheStatsRequest.
      response: A MemcacheStatsResponse.
    """
    # TODO: Gather stats for a project rather than the deployment.
    hits = 0
    misses = 0
    byte_hits = 0
    items = 0
    byte_count = 0
    oldest_item_age = 0
    for server in six.itervalues(self._memcache.clients):
      server_stats = server.stats()
      hits += server_stats.get('get_hits', 0)
      misses += server_stats.get('get_misses', 0)
      byte_hits += server_stats.get('bytes_read', 0)
      items += server_stats.get('curr_items', 0)
      byte_count += server_stats.get('bytes', 0)

      # Using the "age" field may not be correct here. The GAE docs claim this
      # should specify "how long in seconds since the oldest item in the cache
      # was accessed" rather than when it was created.
      item_stats = server.stats('items')
      oldest_server_item = max(age for key, age in six.iteritems(item_stats)
                               if key.endswith(':age'))
      oldest_item_age = max(oldest_item_age, oldest_server_item)

    stats = response.mutable_stats()
    stats.set_hits(hits)
    stats.set_misses(misses)
    stats.set_byte_hits(byte_hits)
    stats.set_items(items)
    stats.set_bytes(byte_count)
    stats.set_oldest_item_age(oldest_item_age)
예제 #40
0
 def _get_client(self, key):
     client = HashClient._get_client(self, key)
     client.default_noreply = False
     return client