Exemple #1
0
    def get_multi(self, keys, key_prefix=''):
        self._statlog('mget_multi')
        el = logging.getLogger('evmemc.get_multi')
        el.debug('entered')
        
        server_keys, prefixed_to_orig_key = self._map_and_prefix_keys(keys, key_prefix)

        for server, keys in server_keys.items():
            thread.start_new_thread(self.get_multi_worker, (server, keys, prefixed_to_orig_key))
    
        el.debug('workers spawned')
        # wait for workers to die
        wcount = len(server_keys)
        retval = {}
        el.info('[%s] initial wcount=%d', coev.getpos(), wcount)
        while wcount > 0:
            el.info('[%s] wcount=%d', coev.getpos(), wcount)
            wcount -= 1
            try:
                retval.update(coev.switch2scheduler())
            except Exception, e:
                el.error('worker failed: %s', e)
Exemple #2
0
    def set_multi(self, mapping, ttl=0, key_prefix='', min_compress_len=0):
        '''
        Sets multiple keys in the memcache doing just one query.

        >>> notset_keys = mc.set_multi({'key1' : 'val1', 'key2' : 'val2'})
        >>> mc.get_multi(['key1', 'key2']) == {'key1' : 'val1', 'key2' : 'val2'}
        1


        This method is recommended over regular L{set} as it lowers the number of
        total packets flying around your network, reducing total latency, since
        your app doesn't have to wait for each round-trip of L{set} before sending
        the next one.

        @param mapping: A dict of key/value pairs to set.
        @param ttl: Tells memcached the time which this value should expire, either
        as a delta number of seconds, or an absolute unix time-since-the-epoch
        value. See the memcached protocol docs section "Storage Commands"
        for more info on <exptime>. We default to 0 == cache forever.
        @param key_prefix:  Optional string to prepend to each key when sending to memcache. Allows you to efficiently stuff these keys into a pseudo-namespace in memcache:
            >>> notset_keys = mc.set_multi({'key1' : 'val1', 'key2' : 'val2'}, key_prefix='subspace_')
            >>> len(notset_keys) == 0
            True
            >>> mc.get_multi(['subspace_key1', 'subspace_key2']) == {'subspace_key1' : 'val1', 'subspace_key2' : 'val2'}
            True

            Causes key 'subspace_key1' and 'subspace_key2' to be set. Useful in conjunction with a higher-level layer which applies namespaces to data in memcache.
            In this case, the return result would be the list of notset original keys, prefix not applied.

        @param min_compress_len: The threshold length to kick in auto-compression
        of the value using the zlib.compress() routine. If the value being cached is
        a string, then the length of the string is measured, else if the value is an
        object, then the length of the pickle result is measured. If the resulting
        attempt at compression yeilds a larger string than the input, then it is
        discarded. For backwards compatability, this parameter defaults to 0,
        indicating don't ever try to compress.
        @return: List of keys which failed to be stored [ memcache out of memory, etc. ].
        @rtype: list

        '''
        
        self._statlog('mset_multi')
        el = logging.getLogger('evmemc.set_multi')
        el.debug('entered')
        
        server_keys, prefixed_to_orig_key = self._map_and_prefix_keys(mapping.iterkeys(), key_prefix)

        for server, keys in server_keys.items():
            thread.start_new_thread(self.set_multi_worker, (server, keys, prefixed_to_orig_key, mapping, ttl, min_compress_len))
    
        el.debug('workers spawned')
        # wait for workers to die
        wcount = len(server_keys)
        retval = []
        el.info('[%s] initial wcount=%d', coev.getpos(), wcount)
        while wcount > 0:
            el.info('[%s] wcount=%d', coev.getpos(), wcount)
            wcount -= 1
            try:
                rv = coev.switch2scheduler()
                el.debug("worker retval %r", retval)
                retval += rv 
            except Exception, e:
                el.error('worker failed: %s', e)
Exemple #3
0
 def get_multi_worker(self, server, keys, prefixed_to_orig_key):
     el = logging.getLogger("evmemc.get_multi_worker")
     retvals = {}
     connection = server.connect()
     connection.send_cmd("get %s" % " ".join(keys))
     line = connection.readline()
     while line and line != 'END':
         rkey, flags, rlen = self._expectvalue(connection, line)
         try:
            if rkey is not None:
                 val = self._recv_value(connection, flags, rlen)
                 retvals[prefixed_to_orig_key[rkey]] = val   # un-prefix returned key.             
         except KeyError:
             raise KeyError("'%s' using conn %d in [%s]" % (rkey, id(connection.sfile.conn), coev.getpos()))
         line = connection.readline()
     return retvals
Exemple #4
0
        el.debug('workers spawned')
        # wait for workers to die
        wcount = len(server_keys)
        retval = []
        el.info('[%s] initial wcount=%d', coev.getpos(), wcount)
        while wcount > 0:
            el.info('[%s] wcount=%d', coev.getpos(), wcount)
            wcount -= 1
            try:
                rv = coev.switch2scheduler()
                el.debug("worker retval %r", retval)
                retval += rv 
            except Exception, e:
                el.error('worker failed: %s', e)
        el.debug('returning %d keys', len(retval))
        el.info('[%s] workers collected; returning', coev.getpos())
        return retval

    def old_set_multi(self, mapping, time=0, key_prefix='', min_compress_len=0):

        self._statlog('set_multi')
        el = logging.getLogger('evmemc.set_multi')
        el.debug('entered')


        server_keys, prefixed_to_orig_key = self._map_and_prefix_keys(mapping.iterkeys(), key_prefix)
        connection_keys = {}

        # send out all requests on each server before reading anything
        dead_servers = []