Ejemplo n.º 1
0
  def _set_multi_with_policy(self, policy, mapping, time=0, key_prefix='',
                             namespace=None):
    """Set multiple keys with a specified policy.

    Helper function for set_multi(), add_multi(), and replace_multi(). This
    reduces the network latency of doing many requests in serial.

    Args:
      policy:  One of MemcacheSetRequest.SET, ADD, or REPLACE.
      mapping: Dictionary of keys to values.
      time: Optional expiration time, either relative number of seconds
        from current time (up to 1 month), or an absolute Unix epoch time.
        By default, items never expire, though items may be evicted due to
        memory pressure.  Float values will be rounded up to the nearest
        whole second.
      key_prefix: Prefix for to prepend to all keys.
      namespace: a string specifying an optional namespace to use in
        the request.

    Returns:
      A list of keys whose values were NOT set.  On total success,
      this list should be empty.  On network/RPC/server errors,
      a list of all input keys is returned; in this case the keys
      may or may not have been updated.
    """
    if not isinstance(time, (int, long, float)):
      raise TypeError('Expiration must be a number.')
    if time < 0.0:
      raise ValueError('Expiration must not be negative.')

    request = MemcacheSetRequest()
    user_key = {}
    server_keys = []
    for key, value in mapping.iteritems():
      server_key = _key_string(key, key_prefix, user_key)
      stored_value, flags = _validate_encode_value(value, self._do_pickle)
      server_keys.append(server_key)

      item = request.add_item()
      item.set_key(server_key)
      item.set_value(stored_value)
      item.set_flags(flags)
      item.set_set_policy(policy)
      item.set_expiration_time(int(math.ceil(time)))
    namespace_manager._add_name_space(request, namespace)

    response = MemcacheSetResponse()
    try:
      self._make_sync_call('memcache', 'Set', request, response)
    except apiproxy_errors.Error:
      return user_key.values()

    assert response.set_status_size() == len(server_keys)

    unset_list = []
    for server_key, set_status in zip(server_keys, response.set_status_list()):
      if set_status != MemcacheSetResponse.STORED:
        unset_list.append(user_key[server_key])

    return unset_list
Ejemplo n.º 2
0
  def get(self, key, namespace=None):
    """Looks up a single key in memcache.

    If you have multiple items to load, though, it's much more efficient
    to use get_multi() instead, which loads them in one bulk operation,
    reducing the networking latency that'd otherwise be required to do
    many serialized get() operations.

    Args:
      key: The key in memcache to look up.  See docs on Client
        for details of format.
      namespace: a string specifying an optional namespace to use in
        the request.

    Returns:
      The value of the key, if found in memcache, else None.
    """
    request = MemcacheGetRequest()
    request.add_key(_key_string(key))
    namespace_manager._add_name_space(request, namespace)
    response = MemcacheGetResponse()
    try:
      self._make_sync_call('memcache', 'Get', request, response)
    except apiproxy_errors.Error:
      return None

    if not response.item_size():
      return None

    return _decode_value(response.item(0).value(),
                         response.item(0).flags(),
                         self._do_unpickle)
Ejemplo n.º 3
0
    def get(self, key, namespace=None):
        """Looks up a single key in memcache.

    If you have multiple items to load, though, it's much more efficient
    to use get_multi() instead, which loads them in one bulk operation,
    reducing the networking latency that'd otherwise be required to do
    many serialized get() operations.

    Args:
      key: The key in memcache to look up.  See docs on Client
        for details of format.
      namespace: a string specifying an optional namespace to use in
        the request.

    Returns:
      The value of the key, if found in memcache, else None.
    """
        request = MemcacheGetRequest()
        request.add_key(_key_string(key))
        namespace_manager._add_name_space(request, namespace)
        response = MemcacheGetResponse()
        try:
            self._make_sync_call('memcache', 'Get', request, response)
        except apiproxy_errors.Error:
            return None

        if not response.item_size():
            return None

        return _decode_value(
            response.item(0).value(),
            response.item(0).flags(), self._do_unpickle)
Ejemplo n.º 4
0
  def _incrdecr(self, key, is_negative, delta, namespace=None,
                initial_value=None):
    """Increment or decrement a key by a provided delta.

    Args:
      key: Key to increment or decrement.
      is_negative: Boolean, if this is a decrement.
      delta: Non-negative integer amount (int or long) to increment
        or decrement by.
      namespace: a string specifying an optional namespace to use in
        the request.
      initial_value: initial value to put in the cache, if it doesn't
        already exist.  The default value, None, will not create a cache
        entry if it doesn't already exist.

    Returns:
      New long integer value, or None on cache miss or network/RPC/server
      error.

    Raises:
      ValueError: If delta is negative.
      TypeError: If delta isn't an int or long.
    """
    if not isinstance(delta, (int, long)):
      raise TypeError('Delta must be an integer or long, received %r' % delta)
    if delta < 0:
      raise ValueError('Delta must not be negative.')

    request = MemcacheIncrementRequest()
    namespace_manager._add_name_space(request, namespace)
    response = MemcacheIncrementResponse()
    request.set_key(_key_string(key))
    request.set_delta(delta)
    if is_negative:
      request.set_direction(MemcacheIncrementRequest.DECREMENT)
    else:
      request.set_direction(MemcacheIncrementRequest.INCREMENT)
    if initial_value is not None:
      request.set_initial_value(long(initial_value))

    try:
      self._make_sync_call('memcache', 'Increment', request, response)
    except apiproxy_errors.Error:
      return None

    if response.has_new_value():
      return response.new_value()
    return None
Ejemplo n.º 5
0
    def delete(self, key, seconds=0, namespace=None):
        """Deletes a key from memcache.

    Args:
      key: Key to delete.  See docs on Client for detils.
      seconds: Optional number of seconds to make deleted items 'locked'
        for 'add' operations. Value can be a delta from current time (up to
        1 month), or an absolute Unix epoch time.  Defaults to 0, which means
        items can be immediately added.  With or without this option,
        a 'set' operation will always work.  Float values will be rounded up to
        the nearest whole second.
      namespace: a string specifying an optional namespace to use in
        the request.

    Returns:
      DELETE_NETWORK_FAILURE (0) on network failure,
      DELETE_ITEM_MISSING (1) if the server tried to delete the item but
      didn't have it, or
      DELETE_SUCCESSFUL (2) if the item was actually deleted.
      This can be used as a boolean value, where a network failure is the
      only bad condition.
    """
        if not isinstance(seconds, (int, long, float)):
            raise TypeError('Delete timeout must be a number.')
        if seconds < 0:
            raise ValueError('Delete timeout must be non-negative.')

        request = MemcacheDeleteRequest()
        namespace_manager._add_name_space(request, namespace)
        response = MemcacheDeleteResponse()

        delete_item = request.add_item()
        delete_item.set_key(_key_string(key))
        delete_item.set_delete_time(int(math.ceil(seconds)))
        try:
            self._make_sync_call('memcache', 'Delete', request, response)
        except apiproxy_errors.Error:
            return DELETE_NETWORK_FAILURE
        assert response.delete_status_size() == 1, 'Unexpected status size.'

        if response.delete_status(0) == MemcacheDeleteResponse.DELETED:
            return DELETE_SUCCESSFUL
        elif response.delete_status(0) == MemcacheDeleteResponse.NOT_FOUND:
            return DELETE_ITEM_MISSING
        assert False, 'Unexpected deletion status code.'
Ejemplo n.º 6
0
    def grab_tail(self, item_count, namespace):
        """Grab items from the tail of namespace's LRU cache.

    Grab means atomically get and delete.

    This method can be used to create queue systems with very high throughput
    and low latency, but low reliability.

    Args:
      item_count: Number of items to retrieve off the tail of LRU cache.
      namespace: a string specifying namespace to use in the request. Can't
        be empty.

    Returns:
      A list of values that were present in memcache.

    Raises:
      ValueError: if namespace is empty.
    """
        if not isinstance(item_count, int):
            raise TypeError('Item count must be an integer.')
        if item_count < 0:
            raise ValueError('Item count must not be negative.')
        if item_count >= 0x80000000:
            raise ValueError('Item count must be less than 2147483648.')
        if not namespace:
            raise ValueError('Namespace must not be empty.')
        if not isinstance(namespace, str):
            raise TypeError('Namespace must be a string.')

        request = MemcacheGrabTailRequest()
        namespace_manager._add_name_space(request, namespace)
        response = MemcacheGrabTailResponse()
        request.set_item_count(item_count)
        try:
            self._make_sync_call('memcache', 'GrabTail', request, response)
        except apiproxy_errors.Error:
            return []

        return_value = []
        for returned_item in response.item_list():
            value = _decode_value(returned_item.value(), returned_item.flags(),
                                  self._do_unpickle)
            return_value.append(value)
        return return_value
Ejemplo n.º 7
0
  def grab_tail(self, item_count, namespace):
    """Grab items from the tail of namespace's LRU cache.

    Grab means atomically get and delete.

    This method can be used to create queue systems with very high throughput
    and low latency, but low reliability.

    Args:
      item_count: Number of items to retrieve off the tail of LRU cache.
      namespace: a string specifying namespace to use in the request. Can't
        be empty.

    Returns:
      A list of values that were present in memcache.

    Raises:
      ValueError: if namespace is empty.
    """
    if not isinstance(item_count, int):
      raise TypeError('Item count must be an integer.')
    if item_count < 0:
      raise ValueError('Item count must not be negative.')
    if item_count >= 0x80000000:
      raise ValueError('Item count must be less than 2147483648.')
    if not namespace:
      raise ValueError('Namespace must not be empty.')
    if not isinstance(namespace, str):
      raise TypeError('Namespace must be a string.')

    request = MemcacheGrabTailRequest()
    namespace_manager._add_name_space(request, namespace)
    response = MemcacheGrabTailResponse()
    request.set_item_count(item_count)
    try:
      self._make_sync_call('memcache', 'GrabTail', request, response)
    except apiproxy_errors.Error:
      return []

    return_value = []
    for returned_item in response.item_list():
      value = _decode_value(returned_item.value(), returned_item.flags(),
                            self._do_unpickle)
      return_value.append(value)
    return return_value
Ejemplo n.º 8
0
  def delete(self, key, seconds=0, namespace=None):
    """Deletes a key from memcache.

    Args:
      key: Key to delete.  See docs on Client for detils.
      seconds: Optional number of seconds to make deleted items 'locked'
        for 'add' operations. Value can be a delta from current time (up to
        1 month), or an absolute Unix epoch time.  Defaults to 0, which means
        items can be immediately added.  With or without this option,
        a 'set' operation will always work.  Float values will be rounded up to
        the nearest whole second.
      namespace: a string specifying an optional namespace to use in
        the request.

    Returns:
      DELETE_NETWORK_FAILURE (0) on network failure,
      DELETE_ITEM_MISSING (1) if the server tried to delete the item but
      didn't have it, or
      DELETE_SUCCESSFUL (2) if the item was actually deleted.
      This can be used as a boolean value, where a network failure is the
      only bad condition.
    """
    if not isinstance(seconds, (int, long, float)):
      raise TypeError('Delete timeout must be a number.')
    if seconds < 0:
      raise ValueError('Delete timeout must be non-negative.')

    request = MemcacheDeleteRequest()
    namespace_manager._add_name_space(request, namespace)
    response = MemcacheDeleteResponse()

    delete_item = request.add_item()
    delete_item.set_key(_key_string(key))
    delete_item.set_delete_time(int(math.ceil(seconds)))
    try:
      self._make_sync_call('memcache', 'Delete', request, response)
    except apiproxy_errors.Error:
      return DELETE_NETWORK_FAILURE
    assert response.delete_status_size() == 1, 'Unexpected status size.'

    if response.delete_status(0) == MemcacheDeleteResponse.DELETED:
      return DELETE_SUCCESSFUL
    elif response.delete_status(0) == MemcacheDeleteResponse.NOT_FOUND:
      return DELETE_ITEM_MISSING
    assert False, 'Unexpected deletion status code.'
Ejemplo n.º 9
0
  def _set_with_policy(self, policy, key, value, time=0, namespace=None):
    """Sets a single key with a specified policy.

    Helper function for set(), add(), and replace().

    Args:
      policy:  One of MemcacheSetRequest.SET, .ADD, or .REPLACE.
      key: Key to add, set, or replace.  See docs on Client for details.
      value: Value to set.
      time: Expiration time, defaulting to 0 (never expiring).
      namespace: a string specifying an optional namespace to use in
        the request.

    Returns:
      True if stored, False on RPC error or policy error, e.g. a replace
      that failed due to the item not already existing, or an add
      failing due to the item not already existing.
    """
    if not isinstance(time, (int, long, float)):
      raise TypeError('Expiration must be a number.')
    if time < 0:
      raise ValueError('Expiration must not be negative.')

    request = MemcacheSetRequest()
    item = request.add_item()
    item.set_key(_key_string(key))
    stored_value, flags = _validate_encode_value(value, self._do_pickle)
    item.set_value(stored_value)
    item.set_flags(flags)
    item.set_set_policy(policy)
    item.set_expiration_time(int(math.ceil(time)))
    namespace_manager._add_name_space(request, namespace)
    response = MemcacheSetResponse()
    try:
      self._make_sync_call('memcache', 'Set', request, response)
    except apiproxy_errors.Error:
      return False
    if response.set_status_size() != 1:
      return False
    return response.set_status(0) == MemcacheSetResponse.STORED
Ejemplo n.º 10
0
    def _set_with_policy(self, policy, key, value, time=0, namespace=None):
        """Sets a single key with a specified policy.

    Helper function for set(), add(), and replace().

    Args:
      policy:  One of MemcacheSetRequest.SET, .ADD, or .REPLACE.
      key: Key to add, set, or replace.  See docs on Client for details.
      value: Value to set.
      time: Expiration time, defaulting to 0 (never expiring).
      namespace: a string specifying an optional namespace to use in
        the request.

    Returns:
      True if stored, False on RPC error or policy error, e.g. a replace
      that failed due to the item not already existing, or an add
      failing due to the item not already existing.
    """
        if not isinstance(time, (int, long, float)):
            raise TypeError('Expiration must be a number.')
        if time < 0:
            raise ValueError('Expiration must not be negative.')

        request = MemcacheSetRequest()
        item = request.add_item()
        item.set_key(_key_string(key))
        stored_value, flags = _validate_encode_value(value, self._do_pickle)
        item.set_value(stored_value)
        item.set_flags(flags)
        item.set_set_policy(policy)
        item.set_expiration_time(int(math.ceil(time)))
        namespace_manager._add_name_space(request, namespace)
        response = MemcacheSetResponse()
        try:
            self._make_sync_call('memcache', 'Set', request, response)
        except apiproxy_errors.Error:
            return False
        if response.set_status_size() != 1:
            return False
        return response.set_status(0) == MemcacheSetResponse.STORED
Ejemplo n.º 11
0
    def get_multi(self, keys, key_prefix='', namespace=None):
        """Looks up multiple keys from memcache in one operation.

    This is the recommended way to do bulk loads.

    Args:
      keys: List of keys to look up.  Keys may be strings or
        tuples of (hash_value, string).  Google App Engine
        does the sharding and hashing automatically, though, so the hash
        value is ignored.  To memcache, keys are just series of bytes,
        and not in any particular encoding.
      key_prefix: Prefix to prepend to all keys when talking to the server;
        not included in the returned dictionary.
      namespace: a string specifying an optional namespace to use in
        the request.

    Returns:
      A dictionary of the keys and values that were present in memcache.
      Even if the key_prefix was specified, that key_prefix won't be on
      the keys in the returned dictionary.
    """
        request = MemcacheGetRequest()
        namespace_manager._add_name_space(request, namespace)
        response = MemcacheGetResponse()
        user_key = {}
        for key in keys:
            request.add_key(_key_string(key, key_prefix, user_key))
        try:
            self._make_sync_call('memcache', 'Get', request, response)
        except apiproxy_errors.Error:
            return {}

        return_value = {}
        for returned_item in response.item_list():
            value = _decode_value(returned_item.value(), returned_item.flags(),
                                  self._do_unpickle)
            return_value[user_key[returned_item.key()]] = value
        return return_value
Ejemplo n.º 12
0
  def delete_multi(self, keys, seconds=0, key_prefix='', namespace=None):
    """Delete multiple keys at once.

    Args:
      keys: List of keys to delete.
      seconds: Optional number of seconds to make deleted items 'locked'
        for 'add' operations. Value can be a delta from current time (up to
        1 month), or an absolute Unix epoch time.  Defaults to 0, which means
        items can be immediately added.  With or without this option,
        a 'set' operation will always work.  Float values will be rounded up to
        the nearest whole second.
      key_prefix: Prefix to put on all keys when sending specified
        keys to memcache.  See docs for get_multi() and set_multi().
      namespace: a string specifying an optional namespace to use in
        the request.

    Returns:
      True if all operations completed successfully.  False if one
      or more failed to complete.
    """
    if not isinstance(seconds, (int, long, float)):
      raise TypeError('Delete timeout must be a number.')
    if seconds < 0:
      raise ValueError('Delete timeout must not be negative.')

    request = MemcacheDeleteRequest()
    namespace_manager._add_name_space(request, namespace)
    response = MemcacheDeleteResponse()

    for key in keys:
      delete_item = request.add_item()
      delete_item.set_key(_key_string(key, key_prefix=key_prefix))
      delete_item.set_delete_time(int(math.ceil(seconds)))
    try:
      self._make_sync_call('memcache', 'Delete', request, response)
    except apiproxy_errors.Error:
      return False
    return True
Ejemplo n.º 13
0
  def get_multi(self, keys, key_prefix='', namespace=None):
    """Looks up multiple keys from memcache in one operation.

    This is the recommended way to do bulk loads.

    Args:
      keys: List of keys to look up.  Keys may be strings or
        tuples of (hash_value, string).  Google App Engine
        does the sharding and hashing automatically, though, so the hash
        value is ignored.  To memcache, keys are just series of bytes,
        and not in any particular encoding.
      key_prefix: Prefix to prepend to all keys when talking to the server;
        not included in the returned dictionary.
      namespace: a string specifying an optional namespace to use in
        the request.

    Returns:
      A dictionary of the keys and values that were present in memcache.
      Even if the key_prefix was specified, that key_prefix won't be on
      the keys in the returned dictionary.
    """
    request = MemcacheGetRequest()
    namespace_manager._add_name_space(request, namespace)
    response = MemcacheGetResponse()
    user_key = {}
    for key in keys:
      request.add_key(_key_string(key, key_prefix, user_key))
    try:
      self._make_sync_call('memcache', 'Get', request, response)
    except apiproxy_errors.Error:
      return {}

    return_value = {}
    for returned_item in response.item_list():
      value = _decode_value(returned_item.value(), returned_item.flags(),
                            self._do_unpickle)
      return_value[user_key[returned_item.key()]] = value
    return return_value
Ejemplo n.º 14
0
    def delete_multi(self, keys, seconds=0, key_prefix='', namespace=None):
        """Delete multiple keys at once.

    Args:
      keys: List of keys to delete.
      seconds: Optional number of seconds to make deleted items 'locked'
        for 'add' operations. Value can be a delta from current time (up to
        1 month), or an absolute Unix epoch time.  Defaults to 0, which means
        items can be immediately added.  With or without this option,
        a 'set' operation will always work.  Float values will be rounded up to
        the nearest whole second.
      key_prefix: Prefix to put on all keys when sending specified
        keys to memcache.  See docs for get_multi() and set_multi().
      namespace: a string specifying an optional namespace to use in
        the request.

    Returns:
      True if all operations completed successfully.  False if one
      or more failed to complete.
    """
        if not isinstance(seconds, (int, long, float)):
            raise TypeError('Delete timeout must be a number.')
        if seconds < 0:
            raise ValueError('Delete timeout must not be negative.')

        request = MemcacheDeleteRequest()
        namespace_manager._add_name_space(request, namespace)
        response = MemcacheDeleteResponse()

        for key in keys:
            delete_item = request.add_item()
            delete_item.set_key(_key_string(key, key_prefix=key_prefix))
            delete_item.set_delete_time(int(math.ceil(seconds)))
        try:
            self._make_sync_call('memcache', 'Delete', request, response)
        except apiproxy_errors.Error:
            return False
        return True
Ejemplo n.º 15
0
  def offset_multi(self, mapping, key_prefix='',
                   namespace=None, initial_value=None):
    """Offsets multiple keys by a delta, incrementing and decrementing in batch.

    Args:
      mapping: Dictionary mapping keys to deltas (positive or negative integers)
        to apply to each corresponding key.
      key_prefix: Prefix for to prepend to all keys.
      initial_value: Initial value to put in the cache, if it doesn't
        already exist. The default value, None, will not create a cache
        entry if it doesn't already exist.
      namespace: A string specifying an optional namespace to use in
        the request.

    Returns:
      Dictionary mapping input keys to new integer values. The new value will
      be None if an error occurs, the key does not already exist, or the value
      was not an integer type. The values will wrap-around at unsigned 64-bit
      integer-maximum and underflow will be floored at zero.
    """
    if initial_value is not None:
      if not isinstance(initial_value, (int, long)):
        raise TypeError('initial_value must be an integer')
      if initial_value < 0:
        raise ValueError('initial_value must be >= 0')

    request = MemcacheBatchIncrementRequest()
    response = MemcacheBatchIncrementResponse()
    namespace_manager._add_name_space(request, namespace)

    for key, delta in mapping.iteritems():
      if not isinstance(delta, (int, long)):
        raise TypeError('Delta must be an integer or long, received %r' % delta)
      if delta >= 0:
        direction = MemcacheIncrementRequest.INCREMENT
      else:
        delta = -delta
        direction = MemcacheIncrementRequest.DECREMENT

      server_key = _key_string(key, key_prefix)

      item = request.add_item()
      item.set_key(server_key)
      item.set_delta(delta)
      item.set_direction(direction)
      if initial_value is not None:
        item.set_initial_value(initial_value)

    try:
      self._make_sync_call('memcache', 'BatchIncrement', request, response)
    except apiproxy_errors.Error:
      return dict((k, None) for k in mapping.iterkeys())

    assert response.item_size() == len(mapping)

    result_dict = {}
    for key, resp_item in zip(mapping.iterkeys(), response.item_list()):
      if (resp_item.increment_status() == MemcacheIncrementResponse.OK and
          resp_item.has_new_value()):
        result_dict[key] = resp_item.new_value()
      else:
        result_dict[key] = None

    return result_dict
Ejemplo n.º 16
0
    def offset_multi(self,
                     mapping,
                     key_prefix='',
                     namespace=None,
                     initial_value=None):
        """Offsets multiple keys by a delta, incrementing and decrementing in batch.

    Args:
      mapping: Dictionary mapping keys to deltas (positive or negative integers)
        to apply to each corresponding key.
      key_prefix: Prefix for to prepend to all keys.
      initial_value: Initial value to put in the cache, if it doesn't
        already exist. The default value, None, will not create a cache
        entry if it doesn't already exist.
      namespace: A string specifying an optional namespace to use in
        the request.

    Returns:
      Dictionary mapping input keys to new integer values. The new value will
      be None if an error occurs, the key does not already exist, or the value
      was not an integer type. The values will wrap-around at unsigned 64-bit
      integer-maximum and underflow will be floored at zero.
    """
        if initial_value is not None:
            if not isinstance(initial_value, (int, long)):
                raise TypeError('initial_value must be an integer')
            if initial_value < 0:
                raise ValueError('initial_value must be >= 0')

        request = MemcacheBatchIncrementRequest()
        response = MemcacheBatchIncrementResponse()
        namespace_manager._add_name_space(request, namespace)

        for key, delta in mapping.iteritems():
            if not isinstance(delta, (int, long)):
                raise TypeError(
                    'Delta must be an integer or long, received %r' % delta)
            if delta >= 0:
                direction = MemcacheIncrementRequest.INCREMENT
            else:
                delta = -delta
                direction = MemcacheIncrementRequest.DECREMENT

            server_key = _key_string(key, key_prefix)

            item = request.add_item()
            item.set_key(server_key)
            item.set_delta(delta)
            item.set_direction(direction)
            if initial_value is not None:
                item.set_initial_value(initial_value)

        try:
            self._make_sync_call('memcache', 'BatchIncrement', request,
                                 response)
        except apiproxy_errors.Error:
            return dict((k, None) for k in mapping.iterkeys())

        assert response.item_size() == len(mapping)

        result_dict = {}
        for key, resp_item in zip(mapping.iterkeys(), response.item_list()):
            if (resp_item.increment_status() == MemcacheIncrementResponse.OK
                    and resp_item.has_new_value()):
                result_dict[key] = resp_item.new_value()
            else:
                result_dict[key] = None

        return result_dict
Ejemplo n.º 17
0
    def _incrdecr(self,
                  key,
                  is_negative,
                  delta,
                  namespace=None,
                  initial_value=None):
        """Increment or decrement a key by a provided delta.

    Args:
      key: Key to increment or decrement. If an iterable collection, each
        one of the keys will be offset.
      is_negative: Boolean, if this is a decrement.
      delta: Non-negative integer amount (int or long) to increment
        or decrement by.
      namespace: a string specifying an optional namespace to use in
        the request.
      initial_value: initial value to put in the cache, if it doesn't
        already exist.  The default value, None, will not create a cache
        entry if it doesn't already exist.

    Returns:
      New long integer value, or None on cache miss or network/RPC/server
      error.

    Raises:
      ValueError: If delta is negative.
      TypeError: If delta isn't an int or long.
    """
        if not isinstance(delta, (int, long)):
            raise TypeError('Delta must be an integer or long, received %r' %
                            delta)
        if delta < 0:
            raise ValueError('Delta must not be negative.')

        if not isinstance(key, basestring):
            try:
                iter(key)
                if is_negative:
                    delta = -delta
                return self.offset_multi(dict((k, delta) for k in key),
                                         namespace=namespace,
                                         initial_value=initial_value)
            except TypeError:
                pass

        request = MemcacheIncrementRequest()
        namespace_manager._add_name_space(request, namespace)
        response = MemcacheIncrementResponse()
        request.set_key(_key_string(key))
        request.set_delta(delta)
        if is_negative:
            request.set_direction(MemcacheIncrementRequest.DECREMENT)
        else:
            request.set_direction(MemcacheIncrementRequest.INCREMENT)
        if initial_value is not None:
            request.set_initial_value(long(initial_value))

        try:
            self._make_sync_call('memcache', 'Increment', request, response)
        except apiproxy_errors.Error:
            return None

        if response.has_new_value():
            return response.new_value()
        return None
Ejemplo n.º 18
0
    def _set_multi_with_policy(self,
                               policy,
                               mapping,
                               time=0,
                               key_prefix='',
                               namespace=None):
        """Set multiple keys with a specified policy.

    Helper function for set_multi(), add_multi(), and replace_multi(). This
    reduces the network latency of doing many requests in serial.

    Args:
      policy:  One of MemcacheSetRequest.SET, ADD, or REPLACE.
      mapping: Dictionary of keys to values.
      time: Optional expiration time, either relative number of seconds
        from current time (up to 1 month), or an absolute Unix epoch time.
        By default, items never expire, though items may be evicted due to
        memory pressure.  Float values will be rounded up to the nearest
        whole second.
      key_prefix: Prefix for to prepend to all keys.
      namespace: a string specifying an optional namespace to use in
        the request.

    Returns:
      A list of keys whose values were NOT set.  On total success,
      this list should be empty.  On network/RPC/server errors,
      a list of all input keys is returned; in this case the keys
      may or may not have been updated.
    """
        if not isinstance(time, (int, long, float)):
            raise TypeError('Expiration must be a number.')
        if time < 0.0:
            raise ValueError('Expiration must not be negative.')

        request = MemcacheSetRequest()
        user_key = {}
        server_keys = []
        for key, value in mapping.iteritems():
            server_key = _key_string(key, key_prefix, user_key)
            stored_value, flags = _validate_encode_value(
                value, self._do_pickle)
            server_keys.append(server_key)

            item = request.add_item()
            item.set_key(server_key)
            item.set_value(stored_value)
            item.set_flags(flags)
            item.set_set_policy(policy)
            item.set_expiration_time(int(math.ceil(time)))
        namespace_manager._add_name_space(request, namespace)

        response = MemcacheSetResponse()
        try:
            self._make_sync_call('memcache', 'Set', request, response)
        except apiproxy_errors.Error:
            return user_key.values()

        assert response.set_status_size() == len(server_keys)

        unset_list = []
        for server_key, set_status in zip(server_keys,
                                          response.set_status_list()):
            if set_status != MemcacheSetResponse.STORED:
                unset_list.append(user_key[server_key])

        return unset_list