Exemplo n.º 1
0
def _convergence_hasher_tag(k, n, segsize, convergence):
    """
    Create the convergence hashing tag.

    :param int k: Required shares (in [1..256]).
    :param int n: Total shares (in [1..256]).
    :param int segsize: Maximum segment size.
    :param bytes convergence: The convergence secret.

    :return bytes: The bytestring to use as a tag in the convergence hash.
    """
    assert isinstance(convergence, bytes)
    if k > n:
        raise ValueError("k > n not allowed; k = {}, n = {}".format(k, n), )
    if k < 1 or n < 1:
        # It doesn't make sense to have zero shares.  Zero shares carry no
        # information, cannot encode any part of the application data.
        raise ValueError("k, n < 1 not allowed; k = {}, n = {}".format(k, n), )
    if k > 256 or n > 256:
        # ZFEC supports encoding application data into a maximum of 256
        # shares.  If we ignore the limitations of ZFEC, it may be fine to use
        # a configuration with more shares than that and it may be fine to
        # construct a convergence tag from such a configuration.  Since ZFEC
        # is the only supported encoder, though, this is moot for now.
        raise ValueError("k, n > 256 not allowed; k = {}, n = {}".format(k,
                                                                         n), )
    param_tag = netstring(b"%d,%d,%d" % (k, n, segsize))
    tag = CONVERGENT_ENCRYPTION_TAG + netstring(convergence) + param_tag
    return tag
Exemplo n.º 2
0
def _pack_normalized_children(children, writekey, deep_immutable=False):
    """Take a dict that maps:
         children[unicode_nfc_name] = (IFileSystemNode, metadata_dict)
    and pack it into a single string, for use as the contents of the backing
    file. This is the same format as is returned by _unpack_contents. I also
    accept an AuxValueDict, in which case I'll use the auxilliary cached data
    as the pre-packed entry, which is faster than re-packing everything each
    time.

    If writekey is provided then I will superencrypt the child's writecap with
    writekey.

    If deep_immutable is True, I will require that all my children are deeply
    immutable, and will raise a MustBeDeepImmutableError if not.
    """
    precondition((writekey is None) or isinstance(writekey, str), writekey)

    has_aux = isinstance(children, AuxValueDict)
    entries = []
    for name in sorted(children.keys()):
        assert isinstance(name, unicode)
        entry = None
        (child, metadata) = children[name]
        child.raise_error()
        if deep_immutable and not child.is_allowed_in_immutable_directory():
            raise MustBeDeepImmutableError(
                "child %s is not allowed in an immutable directory" % quote_output(name, encoding="utf-8"), name
            )
        if has_aux:
            entry = children.get_aux(name)
        if not entry:
            assert IFilesystemNode.providedBy(child), (name, child)
            assert isinstance(metadata, dict)
            rw_uri = child.get_write_uri()
            if rw_uri is None:
                rw_uri = ""
            assert isinstance(rw_uri, str), rw_uri

            # should be prevented by MustBeDeepImmutableError check above
            assert not (rw_uri and deep_immutable)

            ro_uri = child.get_readonly_uri()
            if ro_uri is None:
                ro_uri = ""
            assert isinstance(ro_uri, str), ro_uri
            if writekey is not None:
                writecap = netstring(_encrypt_rw_uri(writekey, rw_uri))
            else:
                writecap = ZERO_LEN_NETSTR
            entry = "".join(
                [
                    netstring(name.encode("utf-8")),
                    netstring(strip_prefix_for_ro(ro_uri, deep_immutable)),
                    writecap,
                    netstring(simplejson.dumps(metadata)),
                ]
            )
        entries.append(netstring(entry))
    return "".join(entries)
Exemplo n.º 3
0
def _pack_normalized_children(children, writekey, deep_immutable=False):
    """Take a dict that maps:
         children[unicode_nfc_name] = (IFileSystemNode, metadata_dict)
    and pack it into a single string, for use as the contents of the backing
    file. This is the same format as is returned by _unpack_contents. I also
    accept an AuxValueDict, in which case I'll use the auxilliary cached data
    as the pre-packed entry, which is faster than re-packing everything each
    time.

    If writekey is provided then I will superencrypt the child's writecap with
    writekey.

    If deep_immutable is True, I will require that all my children are deeply
    immutable, and will raise a MustBeDeepImmutableError if not.
    """
    precondition((writekey is None) or isinstance(writekey, str), writekey)

    has_aux = isinstance(children, AuxValueDict)
    entries = []
    for name in sorted(children.keys()):
        assert isinstance(name, unicode)
        entry = None
        (child, metadata) = children[name]
        child.raise_error()
        if deep_immutable and not child.is_allowed_in_immutable_directory():
            raise MustBeDeepImmutableError(
                "child %s is not allowed in an immutable directory" %
                quote_output(name, encoding='utf-8'), name)
        if has_aux:
            entry = children.get_aux(name)
        if not entry:
            assert IFilesystemNode.providedBy(child), (name, child)
            assert isinstance(metadata, dict)
            rw_uri = child.get_write_uri()
            if rw_uri is None:
                rw_uri = ""
            assert isinstance(rw_uri, str), rw_uri

            # should be prevented by MustBeDeepImmutableError check above
            assert not (rw_uri and deep_immutable)

            ro_uri = child.get_readonly_uri()
            if ro_uri is None:
                ro_uri = ""
            assert isinstance(ro_uri, str), ro_uri
            if writekey is not None:
                writecap = netstring(_encrypt_rw_uri(writekey, rw_uri))
            else:
                writecap = ZERO_LEN_NETSTR
            entry = "".join([
                netstring(name.encode("utf-8")),
                netstring(strip_prefix_for_ro(ro_uri, deep_immutable)),
                writecap,
                netstring(json.dumps(metadata))
            ])
        entries.append(netstring(entry))
    return "".join(entries)
Exemplo n.º 4
0
 def test_split(self):
     a = netstring("hello") + netstring("world")
     self.failUnlessEqual(split_netstring(a, 2), (["hello", "world"], len(a)))
     self.failUnlessEqual(split_netstring(a, 2, required_trailer=""), (["hello", "world"], len(a)))
     self.failUnlessRaises(ValueError, split_netstring, a, 3)
     self.failUnlessRaises(ValueError, split_netstring, a+" extra", 2, required_trailer="")
     self.failUnlessEqual(split_netstring(a+" extra", 2), (["hello", "world"], len(a)))
     self.failUnlessEqual(split_netstring(a+"++", 2, required_trailer="++"),
                          (["hello", "world"], len(a)+2))
     self.failUnlessRaises(ValueError,
                           split_netstring, a+"+", 2, required_trailer="not")
Exemplo n.º 5
0
    def check_directory(self, contents):
        """I will tell you if a new directory needs to be created for a given
        set of directory contents, or if I know of an existing (immutable)
        directory that can be used instead.

        'contents' should be a dictionary that maps from child name (a single
        unicode string) to immutable childcap (filecap or dircap).

        I return a DirectoryResult object, synchronously. If r.was_created()
        returns False, you should create the directory (with
        t=mkdir-immutable). When you are finished, call r.did_create(dircap)
        so I can update my database.

        If was_created() returns a dircap, you might be able to avoid the
        mkdir. Call r.should_check(), and if it says False, you can skip the
        mkdir and use the dircap returned by was_created().

        If should_check() returns True, you should perform a check operation
        on the dircap returned by was_created(). If the check indicates the
        directory is healthy, please call
        r.did_check_healthy(checker_results) so I can update the database,
        using the de-JSONized response from the webapi t=check call for
        'checker_results'. If the check indicates the directory is not
        healthy, please repair or re-create the directory and call
        r.did_create(dircap) when you're done.
        """

        now = time.time()
        entries = []
        for name in contents:
            entries.append([name.encode("utf-8"), contents[name]])
        entries.sort()
        data = "".join([
            netstring(name_utf8) + netstring(cap)
            for (name_utf8, cap) in entries
        ])
        dirhash = backupdb_dirhash(data)
        dirhash_s = base32.b2a(dirhash)
        c = self.cursor
        c.execute(
            "SELECT dircap, last_checked"
            " FROM directories WHERE dirhash=?", (dirhash_s, ))
        row = c.fetchone()
        if not row:
            return DirectoryResult(self, dirhash_s, None, False)
        (dircap, last_checked) = row
        age = now - last_checked

        probability = ((age - self.NO_CHECK_BEFORE) /
                       (self.ALWAYS_CHECK_AFTER - self.NO_CHECK_BEFORE))
        probability = min(max(probability, 0.0), 1.0)
        should_check = bool(random.random() < probability)

        return DirectoryResult(self, dirhash_s, to_str(dircap), should_check)
Exemplo n.º 6
0
 def test_split(self):
     a = netstring(b"hello") + netstring(b"world")
     for s in split_netstring(a, 2)[0]:
         self.assertIsInstance(s, bytes)
     self.failUnlessEqual(split_netstring(a, 2), ([b"hello", b"world"], len(a)))
     self.failUnlessEqual(split_netstring(a, 2, required_trailer=b""), ([b"hello", b"world"], len(a)))
     self.failUnlessRaises(ValueError, split_netstring, a, 3)
     self.failUnlessRaises(ValueError, split_netstring, a+b" extra", 2, required_trailer=b"")
     self.failUnlessEqual(split_netstring(a+b" extra", 2), ([b"hello", b"world"], len(a)))
     self.failUnlessEqual(split_netstring(a+b"++", 2, required_trailer=b"++"),
                          ([b"hello", b"world"], len(a)+2))
     self.failUnlessRaises(ValueError,
                           split_netstring, a+b"+", 2, required_trailer=b"not")
Exemplo n.º 7
0
    def check_directory(self, contents):
        """I will tell you if a new directory needs to be created for a given
        set of directory contents, or if I know of an existing (immutable)
        directory that can be used instead.

        'contents' should be a dictionary that maps from child name (a single
        unicode string) to immutable childcap (filecap or dircap).

        I return a DirectoryResult object, synchronously. If r.was_created()
        returns False, you should create the directory (with
        t=mkdir-immutable). When you are finished, call r.did_create(dircap)
        so I can update my database.

        If was_created() returns a dircap, you might be able to avoid the
        mkdir. Call r.should_check(), and if it says False, you can skip the
        mkdir and use the dircap returned by was_created().

        If should_check() returns True, you should perform a check operation
        on the dircap returned by was_created(). If the check indicates the
        directory is healthy, please call
        r.did_check_healthy(checker_results) so I can update the database,
        using the de-JSONized response from the webapi t=check call for
        'checker_results'. If the check indicates the directory is not
        healthy, please repair or re-create the directory and call
        r.did_create(dircap) when you're done.
        """

        now = time.time()
        entries = []
        for name in contents:
            entries.append( [name.encode("utf-8"), contents[name]] )
        entries.sort()
        data = "".join([netstring(name_utf8)+netstring(cap)
                        for (name_utf8,cap) in entries])
        dirhash = backupdb_dirhash(data)
        dirhash_s = base32.b2a(dirhash)
        c = self.cursor
        c.execute("SELECT dircap, last_checked"
                  " FROM directories WHERE dirhash=?", (dirhash_s,))
        row = c.fetchone()
        if not row:
            return DirectoryResult(self, dirhash_s, None, False)
        (dircap, last_checked) = row
        age = now - last_checked

        probability = ((age - self.NO_CHECK_BEFORE) /
                       (self.ALWAYS_CHECK_AFTER - self.NO_CHECK_BEFORE))
        probability = min(max(probability, 0.0), 1.0)
        should_check = bool(random.random() < probability)

        return DirectoryResult(self, dirhash_s, to_str(dircap), should_check)
Exemplo n.º 8
0
 def test_nested(self):
     a = netstring(b"hello") + netstring(b"world") + b"extra stuff"
     b = netstring(b"a") + netstring(b"is") + netstring(a) + netstring(b".")
     (top, pos) = split_netstring(b, 4)
     self.failUnlessEqual(len(top), 4)
     self.failUnlessEqual(top[0], b"a")
     self.failUnlessEqual(top[1], b"is")
     self.failUnlessEqual(top[2], a)
     self.failUnlessEqual(top[3], b".")
     self.failUnlessRaises(ValueError, split_netstring, a, 2, required_trailer=b"")
     bottom = split_netstring(a, 2)
     self.failUnlessEqual(bottom, ([b"hello", b"world"], len(netstring(b"hello")+netstring(b"world"))))
Exemplo n.º 9
0
 def test_nested(self):
     a = netstring("hello") + netstring("world") + "extra stuff"
     b = netstring("a") + netstring("is") + netstring(a) + netstring(".")
     (top, pos) = split_netstring(b, 4)
     self.failUnlessEqual(len(top), 4)
     self.failUnlessEqual(top[0], "a")
     self.failUnlessEqual(top[1], "is")
     self.failUnlessEqual(top[2], a)
     self.failUnlessEqual(top[3], ".")
     self.failUnlessRaises(ValueError, split_netstring, a, 2, required_trailer="")
     bottom = split_netstring(a, 2)
     self.failUnlessEqual(bottom, (["hello", "world"], len(netstring("hello")+netstring("world"))))
Exemplo n.º 10
0
 def test_split(self):
     a = netstring("hello") + netstring("world")
     self.failUnlessEqual(split_netstring(a, 2),
                          (["hello", "world"], len(a)))
     self.failUnlessEqual(split_netstring(a, 2, required_trailer=""),
                          (["hello", "world"], len(a)))
     self.failUnlessRaises(ValueError, split_netstring, a, 3)
     self.failUnlessRaises(ValueError,
                           split_netstring,
                           a + " extra",
                           2,
                           required_trailer="")
     self.failUnlessEqual(split_netstring(a + " extra", 2),
                          (["hello", "world"], len(a)))
     self.failUnlessEqual(
         split_netstring(a + "++", 2, required_trailer="++"),
         (["hello", "world"], len(a) + 2))
     self.failUnlessRaises(ValueError,
                           split_netstring,
                           a + "+",
                           2,
                           required_trailer="not")
Exemplo n.º 11
0
    return salt + crypttext + mac
    # The MAC is not checked by readers in Tahoe >= 1.3.0, but we still
    # produce it for the sake of older readers.

def pack_children(childrenx, writekey, deep_immutable=False):
    # initial_children must have metadata (i.e. {} instead of None)
    children = {}
    for (namex, (node, metadata)) in childrenx.iteritems():
        precondition(isinstance(metadata, dict),
                     "directory creation requires metadata to be a dict, not None", metadata)
        children[normalize(namex)] = (node, metadata)

    return _pack_normalized_children(children, writekey=writekey, deep_immutable=deep_immutable)


ZERO_LEN_NETSTR=netstring(b'')
def _pack_normalized_children(children, writekey, deep_immutable=False):
    """Take a dict that maps:
         children[unicode_nfc_name] = (IFileSystemNode, metadata_dict)
    and pack it into a single string, for use as the contents of the backing
    file. This is the same format as is returned by _unpack_contents. I also
    accept an AuxValueDict, in which case I'll use the auxilliary cached data
    as the pre-packed entry, which is faster than re-packing everything each
    time.

    If writekey is provided then I will superencrypt the child's writecap with
    writekey.

    If deep_immutable is True, I will require that all my children are deeply
    immutable, and will raise a MustBeDeepImmutableError if not.
    """
Exemplo n.º 12
0
    return salt + crypttext + mac
    # The MAC is not checked by readers in Tahoe >= 1.3.0, but we still
    # produce it for the sake of older readers.

def pack_children(childrenx, writekey, deep_immutable=False):
    # initial_children must have metadata (i.e. {} instead of None)
    children = {}
    for (namex, (node, metadata)) in childrenx.iteritems():
        precondition(isinstance(metadata, dict),
                     "directory creation requires metadata to be a dict, not None", metadata)
        children[normalize(namex)] = (node, metadata)

    return _pack_normalized_children(children, writekey=writekey, deep_immutable=deep_immutable)


ZERO_LEN_NETSTR=netstring('')
def _pack_normalized_children(children, writekey, deep_immutable=False):
    """Take a dict that maps:
         children[unicode_nfc_name] = (IFileSystemNode, metadata_dict)
    and pack it into a single string, for use as the contents of the backing
    file. This is the same format as is returned by _unpack_contents. I also
    accept an AuxValueDict, in which case I'll use the auxilliary cached data
    as the pre-packed entry, which is faster than re-packing everything each
    time.

    If writekey is provided then I will superencrypt the child's writecap with
    writekey.

    If deep_immutable is True, I will require that all my children are deeply
    immutable, and will raise a MustBeDeepImmutableError if not.
    """
Exemplo n.º 13
0
def tagged_pair_hash(tag, val1, val2, truncate_to=None):
    s = _SHA256d_Hasher(truncate_to)
    s.update(netstring(tag))
    s.update(netstring(val1))
    s.update(netstring(val2))
    return s.digest()
Exemplo n.º 14
0
def tagged_hasher(tag, truncate_to=None):
    hasher = _SHA256d_Hasher(truncate_to)
    hasher.update(netstring(tag))
    return hasher
Exemplo n.º 15
0
def convergence_hasher(k, n, segsize, convergence):
    assert isinstance(convergence, str)
    param_tag = netstring("%d,%d,%d" % (k, n, segsize))
    tag = CONVERGENT_ENCRYPTION_TAG + netstring(convergence) + param_tag
    return tagged_hasher(tag, KEYLEN)
Exemplo n.º 16
0
def convergence_hasher(k, n, segsize, convergence):
    assert isinstance(convergence, str)
    param_tag = netstring("%d,%d,%d" % (k, n, segsize))
    tag = CONVERGENT_ENCRYPTION_TAG + netstring(convergence) + param_tag
    return tagged_hasher(tag, KEYLEN)
Exemplo n.º 17
0
 def test_extra(self):
     a = netstring(b"hello")
     self.failUnlessEqual(split_netstring(a, 1), ([b"hello"], len(a)))
     b = netstring(b"hello") + b"extra stuff"
     self.failUnlessEqual(split_netstring(b, 1),
                          ([b"hello"], len(a)))
Exemplo n.º 18
0
 def test_extra(self):
     a = netstring("hello")
     self.failUnlessEqual(split_netstring(a, 1), (["hello"], len(a)))
     b = netstring("hello") + "extra stuff"
     self.failUnlessEqual(split_netstring(b, 1),
                          (["hello"], len(a)))
Exemplo n.º 19
0
 def test_encode(self):
     """netstring() correctly encodes the given bytes."""
     result = netstring(b"abc")
     self.assertEqual(result, b"3:abc,")
     self.assertIsInstance(result, bytes)
Exemplo n.º 20
0
def tagged_pair_hash(tag, val1, val2, truncate_to=None):
    s = _SHA256d_Hasher(truncate_to)
    s.update(netstring(tag))
    s.update(netstring(val1))
    s.update(netstring(val2))
    return s.digest()
Exemplo n.º 21
0
def tagged_hasher(tag, truncate_to=None):
    hasher = _SHA256d_Hasher(truncate_to)
    hasher.update(netstring(tag))
    return hasher