コード例 #1
0
 def _sync_save(self, deployment):
     """
     Save and flush new configuration to disk synchronously.
     """
     config = Configuration(version=_CONFIG_VERSION, deployment=deployment)
     data = wire_encode(config)
     self._hash = b16encode(mmh3_hash_bytes(data)).lower()
     self._config_path.setContent(data)
コード例 #2
0
def generation_hash(input_object):
    """
    This computes the mmh3 hash for an input object, providing a consistent
    hash of deeply persistent objects across python nodes and implementations.

    :returns: An mmh3 hash of input_object.
    """
    # Ensure this is a quick function for basic types:
    # Note that ``type(x) in frozenset([str, int])`` is faster than
    # ``isinstance(x, (str, int))``.
    input_type = type(input_object)
    if (
            input_object is None or
            input_type in _BASIC_JSON_TYPES
    ):
        if input_type == unicode:
            input_type = bytes
            input_object = input_object.encode('utf8')

        if input_type == bytes:
            # Add a token to identify this as a string. This ensures that
            # strings like str('5') are hashed to different values than values
            # who have an identical JSON representation like int(5).
            object_to_process = b''.join([_STR_TOKEN, bytes(input_object)])
        else:
            # For non-string objects, just hash the JSON encoding.
            object_to_process = dumps(input_object)
        return mmh3_hash_bytes(object_to_process)

    is_pyrsistent = _is_pyrsistent(input_object)
    if is_pyrsistent:
        cached = _generation_hash_cache.get(input_object, _UNCACHED_SENTINEL)
        if cached is not _UNCACHED_SENTINEL:
            return cached

    object_to_process = input_object

    if isinstance(object_to_process, PClass):
        object_to_process = object_to_process._to_dict()

    if isinstance(object_to_process, Mapping):
        # Union a mapping token so that empty maps and empty sets have
        # different hashes.
        object_to_process = frozenset(object_to_process.iteritems()).union(
            [_MAPPING_TOKEN]
        )

    if isinstance(object_to_process, Set):
        sub_hashes = (generation_hash(x) for x in object_to_process)
        result = bytes(
            reduce(_xor_bytes, sub_hashes, bytearray(_NULLSET_TOKEN))
        )
    elif isinstance(object_to_process, Iterable):
        result = mmh3_hash_bytes(b''.join(
            generation_hash(x) for x in object_to_process
        ))
    else:
        result = mmh3_hash_bytes(wire_encode(object_to_process))

    if is_pyrsistent:
        _generation_hash_cache[input_object] = result

    return result
コード例 #3
0
    obj_type = type(obj)
    if obj_type == dict:
        result = dict((_cached_dfs_serialize(key),
                       _cached_dfs_serialize(value))
                      for key, value in obj.iteritems())
    elif obj_type == list or obj_type == tuple:
        result = list(_cached_dfs_serialize(x) for x in obj)

    if is_pyrsistent:
        _cached_dfs_serialize_cache[input_object] = result

    return result

# A couple tokens that are used below in the generation hash.
_NULLSET_TOKEN = mmh3_hash_bytes(b'NULLSET')
_MAPPING_TOKEN = mmh3_hash_bytes(b'MAPPING')
_STR_TOKEN = mmh3_hash_bytes(b'STRING')

_generation_hash_cache = WeakKeyDictionary()


def _xor_bytes(aggregating_bytearray, updating_bytes):
    """
    Aggregate bytes into a bytearray using XOR.

    This function has a somewhat particular function signature in order for it
    to be compatible with a call to `reduce`

    :param bytearray aggregating_bytearray: Resulting bytearray to aggregate
        the XOR of both input arguments byte-by-byte.