Beispiel #1
0
def get_share_file(filename):
    with open(filename, "rb") as f:
        prefix = f.read(32)
    if MutableShareFile.is_valid_header(prefix):
        return MutableShareFile(filename)
    # otherwise assume it's immutable
    return ShareFile(filename)
Beispiel #2
0
 def remote_slot_readv(self, storage_index, shares, readv):
     start = time.time()
     self.count("readv")
     si_s = si_b2a(storage_index)
     lp = log.msg("storage: slot_readv %s %s" % (si_s, shares),
                  facility="tahoe.storage", level=log.OPERATIONAL)
     si_dir = storage_index_to_dir(storage_index)
     # shares exist if there is a file for them
     bucketdir = os.path.join(self.sharedir, si_dir)
     if not os.path.isdir(bucketdir):
         self.add_latency("readv", time.time() - start)
         return {}
     datavs = {}
     for sharenum_s in os.listdir(bucketdir):
         try:
             sharenum = int(sharenum_s)
         except ValueError:
             continue
         if sharenum in shares or not shares:
             filename = os.path.join(bucketdir, sharenum_s)
             msf = MutableShareFile(filename, self)
             datavs[sharenum] = msf.readv(readv)
     log.msg("returning shares %s" % (datavs.keys(),),
             facility="tahoe.storage", level=log.NOISY, parent=lp)
     self.add_latency("readv", time.time() - start)
     return datavs
Beispiel #3
0
 def remote_slot_readv(self, storage_index, shares, readv):
     start = time.time()
     self.count("readv")
     si_s = si_b2a(storage_index)
     lp = log.msg("storage: slot_readv %s %s" % (si_s, shares),
                  facility="tahoe.storage",
                  level=log.OPERATIONAL)
     si_dir = storage_index_to_dir(storage_index)
     # shares exist if there is a file for them
     bucketdir = os.path.join(self.sharedir, si_dir)
     if not os.path.isdir(bucketdir):
         self.add_latency("readv", time.time() - start)
         return {}
     datavs = {}
     for sharenum_s in os.listdir(bucketdir):
         try:
             sharenum = int(sharenum_s)
         except ValueError:
             continue
         if sharenum in shares or not shares:
             filename = os.path.join(bucketdir, sharenum_s)
             msf = MutableShareFile(filename, self)
             datavs[sharenum] = msf.readv(readv)
     log.msg("returning shares %s" % (datavs.keys(), ),
             facility="tahoe.storage",
             level=log.NOISY,
             parent=lp)
     self.add_latency("readv", time.time() - start)
     return datavs
Beispiel #4
0
    def _collect_mutable_shares_for_storage_index(self, bucketdir,
                                                  write_enabler, si_s):
        """
        Gather up existing mutable shares for the given storage index.

        :param bytes bucketdir: The filesystem path containing shares for the
            given storage index.

        :param bytes write_enabler: The write enabler secret for the shares.

        :param bytes si_s: The storage index in encoded (base32) form.

        :raise BadWriteEnablerError: If the write enabler is not correct for
            any of the collected shares.

        :return dict[int, MutableShareFile]: The collected shares in a mapping
            from integer share numbers to ``MutableShareFile`` instances.
        """
        shares = {}
        if os.path.isdir(bucketdir):
            # shares exist if there is a file for them
            for sharenum_s in os.listdir(bucketdir):
                try:
                    sharenum = int(sharenum_s)
                except ValueError:
                    continue
                filename = os.path.join(bucketdir, sharenum_s)
                msf = MutableShareFile(filename, self)
                msf.check_write_enabler(write_enabler, si_s)
                shares[sharenum] = msf
        return shares
Beispiel #5
0
def corrupt_share(options):
    import random
    from allmydata.storage.mutable import MutableShareFile
    from allmydata.storage.immutable import ShareFile
    from allmydata.mutable.layout import unpack_header
    from allmydata.immutable.layout import ReadBucketProxy
    out = options.stdout
    fn = options['filename']
    assert options["offset"] == "block-random", "other offsets not implemented"

    # first, what kind of share is it?

    def flip_bit(start, end):
        offset = random.randrange(start, end)
        bit = random.randrange(0, 8)
        print("[%d..%d):  %d.b%d" % (start, end, offset, bit), file=out)
        f = open(fn, "rb+")
        f.seek(offset)
        d = f.read(1)
        d = bchr(ord(d) ^ 0x01)
        f.seek(offset)
        f.write(d)
        f.close()

    with open(fn, "rb") as f:
        prefix = f.read(32)

        if MutableShareFile.is_valid_header(prefix):
            # mutable
            m = MutableShareFile(fn)
            with open(fn, "rb") as f:
                f.seek(m.DATA_OFFSET)
                # Read enough data to get a mutable header to unpack.
                data = f.read(2000)
            # make sure this slot contains an SMDF share
            assert data[
                0:1] == b"\x00", "non-SDMF mutable shares not supported"
            f.close()

            (version, ig_seqnum, ig_roothash, ig_IV, ig_k, ig_N, ig_segsize,
             ig_datalen, offsets) = unpack_header(data)

            assert version == 0, "we only handle v0 SDMF files"
            start = m.DATA_OFFSET + offsets["share_data"]
            end = m.DATA_OFFSET + offsets["enc_privkey"]
            flip_bit(start, end)
        else:
            # otherwise assume it's immutable
            f = ShareFile(fn)
            bp = ReadBucketProxy(None, None, '')
            offsets = bp._parse_offsets(f.read_share_data(0, 0x24))
            start = f._data_offset + offsets["data"]
            end = f._data_offset + offsets["plaintext_hash_tree"]
            flip_bit(start, end)
Beispiel #6
0
    def get_slot_leases(self, storage_index):
        """
        This method is not for client use.

        :note: Only for mutable shares.

        :return: An iterable of the leases attached to this slot.
        """
        for _, share_filename in self._get_bucket_shares(storage_index):
            share = MutableShareFile(share_filename)
            return share.get_leases()
        return []
Beispiel #7
0
def describe_share(abs_sharefile, si_s, shnum_s, now, out):
    from allmydata import uri
    from allmydata.storage.mutable import MutableShareFile
    from allmydata.storage.immutable import ShareFile
    from allmydata.mutable.layout import unpack_share
    from allmydata.mutable.common import NeedMoreDataError
    from allmydata.immutable.layout import ReadBucketProxy
    from allmydata.util import base32
    from allmydata.util.encodingutil import quote_output
    import struct

    f = open(abs_sharefile, "rb")
    prefix = f.read(32)

    if prefix == MutableShareFile.MAGIC:
        # mutable share
        m = MutableShareFile(abs_sharefile)
        WE, nodeid = m._read_write_enabler_and_nodeid(f)
        data_length = m._read_data_length(f)
        expiration_time = min( [lease.expiration_time
                                for (i,lease) in m._enumerate_leases(f)] )
        expiration = max(0, expiration_time - now)

        share_type = "unknown"
        f.seek(m.DATA_OFFSET)
        if f.read(1) == "\x00":
            # this slot contains an SMDF share
            share_type = "SDMF"

        if share_type == "SDMF":
            f.seek(m.DATA_OFFSET)
            data = f.read(min(data_length, 2000))

            try:
                pieces = unpack_share(data)
            except NeedMoreDataError, e:
                # retry once with the larger size
                size = e.needed_bytes
                f.seek(m.DATA_OFFSET)
                data = f.read(min(data_length, size))
                pieces = unpack_share(data)
            (seqnum, root_hash, IV, k, N, segsize, datalen,
             pubkey, signature, share_hash_chain, block_hash_tree,
             share_data, enc_privkey) = pieces

            print >>out, "SDMF %s %d/%d %d #%d:%s %d %s" % \
                  (si_s, k, N, datalen,
                   seqnum, base32.b2a(root_hash),
                   expiration, quote_output(abs_sharefile))
        else:
            print >>out, "UNKNOWN mutable %s" % quote_output(abs_sharefile)
Beispiel #8
0
def describe_share(abs_sharefile, si_s, shnum_s, now, out):
    from allmydata import uri
    from allmydata.storage.mutable import MutableShareFile
    from allmydata.storage.immutable import ShareFile
    from allmydata.mutable.layout import unpack_share
    from allmydata.mutable.common import NeedMoreDataError
    from allmydata.immutable.layout import ReadBucketProxy
    from allmydata.util import base32
    from allmydata.util.encodingutil import quote_output
    import struct

    f = open(abs_sharefile, "rb")
    prefix = f.read(32)

    if prefix == MutableShareFile.MAGIC:
        # mutable share
        m = MutableShareFile(abs_sharefile)
        WE, nodeid = m._read_write_enabler_and_nodeid(f)
        data_length = m._read_data_length(f)
        expiration_time = min(
            [lease.expiration_time for (i, lease) in m._enumerate_leases(f)])
        expiration = max(0, expiration_time - now)

        share_type = "unknown"
        f.seek(m.DATA_OFFSET)
        if f.read(1) == "\x00":
            # this slot contains an SMDF share
            share_type = "SDMF"

        if share_type == "SDMF":
            f.seek(m.DATA_OFFSET)
            data = f.read(min(data_length, 2000))

            try:
                pieces = unpack_share(data)
            except NeedMoreDataError, e:
                # retry once with the larger size
                size = e.needed_bytes
                f.seek(m.DATA_OFFSET)
                data = f.read(min(data_length, size))
                pieces = unpack_share(data)
            (seqnum, root_hash, IV, k, N, segsize, datalen, pubkey, signature,
             share_hash_chain, block_hash_tree, share_data,
             enc_privkey) = pieces

            print >>out, "SDMF %s %d/%d %d #%d:%s %d %s" % \
                  (si_s, k, N, datalen,
                   seqnum, base32.b2a(root_hash),
                   expiration, quote_output(abs_sharefile))
        else:
            print >> out, "UNKNOWN mutable %s" % quote_output(abs_sharefile)
Beispiel #9
0
 def _iter_share_files(self, storage_index):
     for shnum, filename in self.get_shares(storage_index):
         with open(filename, 'rb') as f:
             header = f.read(32)
         if MutableShareFile.is_valid_header(header):
             sf = MutableShareFile(filename, self)
             # note: if the share has been migrated, the renew_lease()
             # call will throw an exception, with information to help the
             # client update the lease.
         elif ShareFile.is_valid_header(header):
             sf = ShareFile(filename)
         else:
             continue  # non-sharefile
         yield sf
Beispiel #10
0
def get_share_file(filename):
    with open(filename, "rb") as f:
        prefix = f.read(32)
    if prefix == MutableShareFile.MAGIC:
        return MutableShareFile(filename)
    # otherwise assume it's immutable
    return ShareFile(filename)
Beispiel #11
0
def get_share_file(filename):
    f = open(filename, "rb")
    prefix = f.read(32)
    f.close()
    if prefix == MutableShareFile.MAGIC:
        return MutableShareFile(filename)
    # otherwise assume it's immutable
    return ShareFile(filename)
Beispiel #12
0
def describe_share(abs_sharefile, si_s, shnum_s, now, out):
    with open(abs_sharefile, "rb") as f:
        prefix = f.read(32)
        if MutableShareFile.is_valid_header(prefix):
            _describe_mutable_share(abs_sharefile, f, now, si_s, out)
        elif ShareFile.is_valid_header(prefix):
            _describe_immutable_share(abs_sharefile, now, si_s, out)
        else:
            print("UNKNOWN really-unknown %s" % quote_output(abs_sharefile),
                  file=out)
Beispiel #13
0
def dump_mutable_share(options):
    from allmydata.storage.mutable import MutableShareFile
    from allmydata.util import base32, idlib
    out = options.stdout
    m = MutableShareFile(options['filename'])
    f = open(options['filename'], "rb")
    WE, nodeid = m._read_write_enabler_and_nodeid(f)
    num_extra_leases = m._read_num_extra_leases(f)
    data_length = m._read_data_length(f)
    extra_lease_offset = m._read_extra_lease_offset(f)
    container_size = extra_lease_offset - m.DATA_OFFSET
    leases = list(m._enumerate_leases(f))

    share_type = "unknown"
    f.seek(m.DATA_OFFSET)
    version = f.read(1)
    if version == b"\x00":
        # this slot contains an SMDF share
        share_type = "SDMF"
    elif version == b"\x01":
        share_type = "MDMF"
    f.close()

    print(file=out)
    print("Mutable slot found:", file=out)
    print(" share_type: %s" % share_type, file=out)
    print(" write_enabler: %s" % unicode(base32.b2a(WE), "utf-8"), file=out)
    print(" WE for nodeid: %s" % idlib.nodeid_b2a(nodeid), file=out)
    print(" num_extra_leases: %d" % num_extra_leases, file=out)
    print(" container_size: %d" % container_size, file=out)
    print(" data_length: %d" % data_length, file=out)
    if leases:
        for (leasenum, lease) in leases:
            print(file=out)
            print(" Lease #%d:" % leasenum, file=out)
            print("  ownerid: %d" % lease.owner_num, file=out)
            when = format_expiration_time(lease.expiration_time)
            print("  expires in %s" % when, file=out)
            print("  renew_secret: %s" %
                  unicode(base32.b2a(lease.renew_secret), "utf-8"),
                  file=out)
            print("  cancel_secret: %s" %
                  unicode(base32.b2a(lease.cancel_secret), "utf-8"),
                  file=out)
            print("  secrets are for nodeid: %s" %
                  idlib.nodeid_b2a(lease.nodeid),
                  file=out)
    else:
        print("No leases.", file=out)
    print(file=out)

    if share_type == "SDMF":
        dump_SDMF_share(m, data_length, options)
    elif share_type == "MDMF":
        dump_MDMF_share(m, data_length, options)

    return 0
Beispiel #14
0
 def _iter_share_files(self, storage_index):
     for shnum, filename in self._get_bucket_shares(storage_index):
         with open(filename, 'rb') as f:
             header = f.read(32)
         if header[:32] == MutableShareFile.MAGIC:
             sf = MutableShareFile(filename, self)
             # note: if the share has been migrated, the renew_lease()
             # call will throw an exception, with information to help the
             # client update the lease.
         elif header[:4] == struct.pack(">L", 1):
             sf = ShareFile(filename)
         else:
             continue  # non-sharefile
         yield sf
Beispiel #15
0
def dump_share(options):
    from allmydata.storage.mutable import MutableShareFile
    from allmydata.util.encodingutil import quote_output

    out = options.stdout

    # check the version, to see if we have a mutable or immutable share
    print("share filename: %s" % quote_output(options['filename']), file=out)

    with open(options['filename'], "rb") as f:
        if MutableShareFile.is_valid_header(f.read(32)):
            return dump_mutable_share(options)
        # otherwise assume it's immutable
        return dump_immutable_share(options)
Beispiel #16
0
def dump_mutable_share(options):
    from allmydata.storage.mutable import MutableShareFile
    from allmydata.util import base32, idlib
    out = options.stdout
    m = MutableShareFile(options['filename'])
    f = open(options['filename'], "rb")
    WE, nodeid = m._read_write_enabler_and_nodeid(f)
    num_extra_leases = m._read_num_extra_leases(f)
    data_length = m._read_data_length(f)
    extra_lease_offset = m._read_extra_lease_offset(f)
    container_size = extra_lease_offset - m.DATA_OFFSET
    leases = list(m._enumerate_leases(f))

    share_type = "unknown"
    f.seek(m.DATA_OFFSET)
    version = f.read(1)
    if version == "\x00":
        # this slot contains an SMDF share
        share_type = "SDMF"
    elif version == "\x01":
        share_type = "MDMF"
    f.close()

    print >>out
    print >>out, "Mutable slot found:"
    print >>out, " share_type: %s" % share_type
    print >>out, " write_enabler: %s" % base32.b2a(WE)
    print >>out, " WE for nodeid: %s" % idlib.nodeid_b2a(nodeid)
    print >>out, " num_extra_leases: %d" % num_extra_leases
    print >>out, " container_size: %d" % container_size
    print >>out, " data_length: %d" % data_length
    if leases:
        for (leasenum, lease) in leases:
            print >>out
            print >>out, " Lease #%d:" % leasenum
            print >>out, "  ownerid: %d" % lease.owner_num
            when = format_expiration_time(lease.expiration_time)
            print >>out, "  expires in %s" % when
            print >>out, "  renew_secret: %s" % base32.b2a(lease.renew_secret)
            print >>out, "  cancel_secret: %s" % base32.b2a(lease.cancel_secret)
            print >>out, "  secrets are for nodeid: %s" % idlib.nodeid_b2a(lease.nodeid)
    else:
        print >>out, "No leases."
    print >>out

    if share_type == "SDMF":
        dump_SDMF_share(m, data_length, options)
    elif share_type == "MDMF":
        dump_MDMF_share(m, data_length, options)

    return 0
Beispiel #17
0
def describe_share(abs_sharefile, si_s, shnum_s, now, out):
    from allmydata import uri
    from allmydata.storage.mutable import MutableShareFile
    from allmydata.storage.immutable import ShareFile
    from allmydata.mutable.layout import unpack_share
    from allmydata.mutable.common import NeedMoreDataError
    from allmydata.immutable.layout import ReadBucketProxy
    from allmydata.util import base32
    from allmydata.util.encodingutil import quote_output
    import struct

    f = open(abs_sharefile, "rb")
    prefix = f.read(32)

    if prefix == MutableShareFile.MAGIC:
        # mutable share
        m = MutableShareFile(abs_sharefile)
        WE, nodeid = m._read_write_enabler_and_nodeid(f)
        data_length = m._read_data_length(f)
        expiration_time = min( [lease.expiration_time
                                for (i,lease) in m._enumerate_leases(f)] )
        expiration = max(0, expiration_time - now)

        share_type = "unknown"
        f.seek(m.DATA_OFFSET)
        version = f.read(1)
        if version == "\x00":
            # this slot contains an SMDF share
            share_type = "SDMF"
        elif version == "\x01":
            share_type = "MDMF"

        if share_type == "SDMF":
            f.seek(m.DATA_OFFSET)
            data = f.read(min(data_length, 2000))

            try:
                pieces = unpack_share(data)
            except NeedMoreDataError as e:
                # retry once with the larger size
                size = e.needed_bytes
                f.seek(m.DATA_OFFSET)
                data = f.read(min(data_length, size))
                pieces = unpack_share(data)
            (seqnum, root_hash, IV, k, N, segsize, datalen,
             pubkey, signature, share_hash_chain, block_hash_tree,
             share_data, enc_privkey) = pieces

            print("SDMF %s %d/%d %d #%d:%s %d %s" % \
                  (si_s, k, N, datalen,
                   seqnum, base32.b2a(root_hash),
                   expiration, quote_output(abs_sharefile)), file=out)
        elif share_type == "MDMF":
            from allmydata.mutable.layout import MDMFSlotReadProxy
            fake_shnum = 0
            # TODO: factor this out with dump_MDMF_share()
            class ShareDumper(MDMFSlotReadProxy):
                def _read(self, readvs, force_remote=False, queue=False):
                    data = []
                    for (where,length) in readvs:
                        f.seek(m.DATA_OFFSET+where)
                        data.append(f.read(length))
                    return defer.succeed({fake_shnum: data})

            p = ShareDumper(None, "fake-si", fake_shnum)
            def extract(func):
                stash = []
                # these methods return Deferreds, but we happen to know that
                # they run synchronously when not actually talking to a
                # remote server
                d = func()
                d.addCallback(stash.append)
                return stash[0]

            verinfo = extract(p.get_verinfo)
            (seqnum, root_hash, salt_to_use, segsize, datalen, k, N, prefix,
             offsets) = verinfo
            print("MDMF %s %d/%d %d #%d:%s %d %s" % \
                  (si_s, k, N, datalen,
                   seqnum, base32.b2a(root_hash),
                   expiration, quote_output(abs_sharefile)), file=out)
        else:
            print("UNKNOWN mutable %s" % quote_output(abs_sharefile), file=out)

    elif struct.unpack(">L", prefix[:4]) == (1,):
        # immutable

        class ImmediateReadBucketProxy(ReadBucketProxy):
            def __init__(self, sf):
                self.sf = sf
                ReadBucketProxy.__init__(self, None, None, "")
            def __repr__(self):
                return "<ImmediateReadBucketProxy>"
            def _read(self, offset, size):
                return defer.succeed(sf.read_share_data(offset, size))

        # use a ReadBucketProxy to parse the bucket and find the uri extension
        sf = ShareFile(abs_sharefile)
        bp = ImmediateReadBucketProxy(sf)

        expiration_time = min( [lease.expiration_time
                                for lease in sf.get_leases()] )
        expiration = max(0, expiration_time - now)

        UEB_data = call(bp.get_uri_extension)
        unpacked = uri.unpack_extension_readable(UEB_data)

        k = unpacked["needed_shares"]
        N = unpacked["total_shares"]
        filesize = unpacked["size"]
        ueb_hash = unpacked["UEB_hash"]

        print("CHK %s %d/%d %d %s %d %s" % (si_s, k, N, filesize,
                                                   ueb_hash, expiration,
                                                   quote_output(abs_sharefile)), file=out)

    else:
        print("UNKNOWN really-unknown %s" % quote_output(abs_sharefile), file=out)

    f.close()
Beispiel #18
0
def describe_share(abs_sharefile, si_s, shnum_s, now, out):
    from allmydata import uri
    from allmydata.storage.mutable import MutableShareFile
    from allmydata.storage.immutable import ShareFile
    from allmydata.mutable.layout import unpack_share
    from allmydata.mutable.common import NeedMoreDataError
    from allmydata.immutable.layout import ReadBucketProxy
    from allmydata.util import base32
    from allmydata.util.encodingutil import quote_output
    import struct

    f = open(abs_sharefile, "rb")
    prefix = f.read(32)

    if prefix == MutableShareFile.MAGIC:
        # mutable share
        m = MutableShareFile(abs_sharefile)
        WE, nodeid = m._read_write_enabler_and_nodeid(f)
        data_length = m._read_data_length(f)
        expiration_time = min(
            [lease.expiration_time for (i, lease) in m._enumerate_leases(f)])
        expiration = max(0, expiration_time - now)

        share_type = "unknown"
        f.seek(m.DATA_OFFSET)
        version = f.read(1)
        if version == b"\x00":
            # this slot contains an SMDF share
            share_type = "SDMF"
        elif version == b"\x01":
            share_type = "MDMF"

        if share_type == "SDMF":
            f.seek(m.DATA_OFFSET)
            data = f.read(min(data_length, 2000))

            try:
                pieces = unpack_share(data)
            except NeedMoreDataError as e:
                # retry once with the larger size
                size = e.needed_bytes
                f.seek(m.DATA_OFFSET)
                data = f.read(min(data_length, size))
                pieces = unpack_share(data)
            (seqnum, root_hash, IV, k, N, segsize, datalen, pubkey, signature,
             share_hash_chain, block_hash_tree, share_data,
             enc_privkey) = pieces

            print("SDMF %s %d/%d %d #%d:%s %d %s" % \
                  (si_s, k, N, datalen,
                   seqnum, unicode(base32.b2a(root_hash), "utf-8"),
                   expiration, quote_output(abs_sharefile)), file=out)
        elif share_type == "MDMF":
            from allmydata.mutable.layout import MDMFSlotReadProxy
            fake_shnum = 0

            # TODO: factor this out with dump_MDMF_share()
            class ShareDumper(MDMFSlotReadProxy):
                def _read(self, readvs, force_remote=False, queue=False):
                    data = []
                    for (where, length) in readvs:
                        f.seek(m.DATA_OFFSET + where)
                        data.append(f.read(length))
                    return defer.succeed({fake_shnum: data})

            p = ShareDumper(None, "fake-si", fake_shnum)

            def extract(func):
                stash = []
                # these methods return Deferreds, but we happen to know that
                # they run synchronously when not actually talking to a
                # remote server
                d = func()
                d.addCallback(stash.append)
                return stash[0]

            verinfo = extract(p.get_verinfo)
            (seqnum, root_hash, salt_to_use, segsize, datalen, k, N, prefix,
             offsets) = verinfo
            print("MDMF %s %d/%d %d #%d:%s %d %s" % \
                  (si_s, k, N, datalen,
                   seqnum, unicode(base32.b2a(root_hash), "utf-8"),
                   expiration, quote_output(abs_sharefile)), file=out)
        else:
            print("UNKNOWN mutable %s" % quote_output(abs_sharefile), file=out)

    elif struct.unpack(">L", prefix[:4]) == (1, ):
        # immutable

        class ImmediateReadBucketProxy(ReadBucketProxy):
            def __init__(self, sf):
                self.sf = sf
                ReadBucketProxy.__init__(self, None, None, "")

            def __repr__(self):
                return "<ImmediateReadBucketProxy>"

            def _read(self, offset, size):
                return defer.succeed(sf.read_share_data(offset, size))

        # use a ReadBucketProxy to parse the bucket and find the uri extension
        sf = ShareFile(abs_sharefile)
        bp = ImmediateReadBucketProxy(sf)

        expiration_time = min(
            [lease.expiration_time for lease in sf.get_leases()])
        expiration = max(0, expiration_time - now)

        UEB_data = call(bp.get_uri_extension)
        unpacked = uri.unpack_extension_readable(UEB_data)

        k = unpacked["needed_shares"]
        N = unpacked["total_shares"]
        filesize = unpacked["size"]
        ueb_hash = unpacked["UEB_hash"]

        print("CHK %s %d/%d %d %s %d %s" %
              (si_s, k, N, filesize, unicode(
                  ueb_hash, "utf-8"), expiration, quote_output(abs_sharefile)),
              file=out)

    else:
        print("UNKNOWN really-unknown %s" % quote_output(abs_sharefile),
              file=out)

    f.close()
Beispiel #19
0
def describe_share(abs_sharefile, si_s, shnum_s, now, out):
    from allmydata import uri
    from allmydata.storage.mutable import MutableShareFile
    from allmydata.storage.immutable import ShareFile
    from allmydata.mutable.layout import unpack_share
    from allmydata.mutable.common import NeedMoreDataError
    from allmydata.immutable.layout import ReadBucketProxy
    from allmydata.util import base32
    from allmydata.util.encodingutil import quote_output
    import struct

    f = open(abs_sharefile, "rb")
    prefix = f.read(32)

    if prefix == MutableShareFile.MAGIC:
        # mutable share
        m = MutableShareFile(abs_sharefile)
        WE, nodeid = m._read_write_enabler_and_nodeid(f)
        data_length = m._read_data_length(f)
        expiration_time = min( [lease.expiration_time
                                for (i,lease) in m._enumerate_leases(f)] )
        expiration = max(0, expiration_time - now)

        share_type = "unknown"
        f.seek(m.DATA_OFFSET)
        version = f.read(1)
        if version == "\x00":
            # this slot contains an SMDF share
            share_type = "SDMF"
        elif version == "\x01":
            share_type = "MDMF"

        if share_type == "SDMF":
            f.seek(m.DATA_OFFSET)
            data = f.read(min(data_length, 2000))

            try:
                pieces = unpack_share(data)
            except NeedMoreDataError, e:
                # retry once with the larger size
                size = e.needed_bytes
                f.seek(m.DATA_OFFSET)
                data = f.read(min(data_length, size))
                pieces = unpack_share(data)
            (seqnum, root_hash, IV, k, N, segsize, datalen,
             pubkey, signature, share_hash_chain, block_hash_tree,
             share_data, enc_privkey) = pieces

            print >>out, "SDMF %s %d/%d %d #%d:%s %d %s" % \
                  (si_s, k, N, datalen,
                   seqnum, base32.b2a(root_hash),
                   expiration, quote_output(abs_sharefile))
        elif share_type == "MDMF":
            from allmydata.mutable.layout import MDMFSlotReadProxy
            fake_shnum = 0
            # TODO: factor this out with dump_MDMF_share()
            class ShareDumper(MDMFSlotReadProxy):
                def _read(self, readvs, force_remote=False, queue=False):
                    data = []
                    for (where,length) in readvs:
                        f.seek(m.DATA_OFFSET+where)
                        data.append(f.read(length))
                    return defer.succeed({fake_shnum: data})

            p = ShareDumper(None, "fake-si", fake_shnum)
            def extract(func):
                stash = []
                # these methods return Deferreds, but we happen to know that
                # they run synchronously when not actually talking to a
                # remote server
                d = func()
                d.addCallback(stash.append)
                return stash[0]

            verinfo = extract(p.get_verinfo)
            (seqnum, root_hash, salt_to_use, segsize, datalen, k, N, prefix,
             offsets) = verinfo
            print >>out, "MDMF %s %d/%d %d #%d:%s %d %s" % \
                  (si_s, k, N, datalen,
                   seqnum, base32.b2a(root_hash),
                   expiration, quote_output(abs_sharefile))
        else:
            print >>out, "UNKNOWN mutable %s" % quote_output(abs_sharefile)
Beispiel #20
0
def describe_share(abs_sharefile, si_s, shnum_s, now, out):
    from allmydata import uri
    from allmydata.storage.mutable import MutableShareFile
    from allmydata.storage.immutable import ShareFile
    from allmydata.mutable.layout import unpack_share
    from allmydata.mutable.common import NeedMoreDataError
    from allmydata.immutable.layout import ReadBucketProxy
    from allmydata.util import base32
    from allmydata.util.encodingutil import quote_output
    import struct

    f = open(abs_sharefile, "rb")
    prefix = f.read(32)

    if prefix == MutableShareFile.MAGIC:
        # mutable share
        m = MutableShareFile(abs_sharefile)
        WE, nodeid = m._read_write_enabler_and_nodeid(f)
        data_length = m._read_data_length(f)
        expiration_time = min( [lease.expiration_time
                                for (i,lease) in m._enumerate_leases(f)] )
        expiration = max(0, expiration_time - now)

        share_type = "unknown"
        f.seek(m.DATA_OFFSET)
        version = f.read(1)
        if version == "\x00":
            # this slot contains an SMDF share
            share_type = "SDMF"
        elif version == "\x01":
            share_type = "MDMF"

        if share_type == "SDMF":
            f.seek(m.DATA_OFFSET)
            data = f.read(min(data_length, 2000))

            try:
                pieces = unpack_share(data)
            except NeedMoreDataError, e:
                # retry once with the larger size
                size = e.needed_bytes
                f.seek(m.DATA_OFFSET)
                data = f.read(min(data_length, size))
                pieces = unpack_share(data)
            (seqnum, root_hash, IV, k, N, segsize, datalen,
             pubkey, signature, share_hash_chain, block_hash_tree,
             share_data, enc_privkey) = pieces

            print >>out, "SDMF %s %d/%d %d #%d:%s %d %s" % \
                  (si_s, k, N, datalen,
                   seqnum, base32.b2a(root_hash),
                   expiration, quote_output(abs_sharefile))
        elif share_type == "MDMF":
            from allmydata.mutable.layout import MDMFSlotReadProxy
            fake_shnum = 0
            # TODO: factor this out with dump_MDMF_share()
            class ShareDumper(MDMFSlotReadProxy):
                def _read(self, readvs, force_remote=False, queue=False):
                    data = []
                    for (where,length) in readvs:
                        f.seek(m.DATA_OFFSET+where)
                        data.append(f.read(length))
                    return defer.succeed({fake_shnum: data})

            p = ShareDumper(None, "fake-si", fake_shnum)
            def extract(func):
                stash = []
                # these methods return Deferreds, but we happen to know that
                # they run synchronously when not actually talking to a
                # remote server
                d = func()
                d.addCallback(stash.append)
                return stash[0]

            verinfo = extract(p.get_verinfo)
            (seqnum, root_hash, salt_to_use, segsize, datalen, k, N, prefix,
             offsets) = verinfo
            print >>out, "MDMF %s %d/%d %d #%d:%s %d %s" % \
                  (si_s, k, N, datalen,
                   seqnum, base32.b2a(root_hash),
                   expiration, quote_output(abs_sharefile))
        else:
            print >>out, "UNKNOWN mutable %s" % quote_output(abs_sharefile)
Beispiel #21
0
    def remote_slot_testv_and_readv_and_writev(self, storage_index,
                                               secrets,
                                               test_and_write_vectors,
                                               read_vector):
        start = time.time()
        self.count("writev")
        si_s = si_b2a(storage_index)
        log.msg("storage: slot_writev %s" % si_s)
        si_dir = storage_index_to_dir(storage_index)
        (write_enabler, renew_secret, cancel_secret) = secrets
        # shares exist if there is a file for them
        bucketdir = os.path.join(self.sharedir, si_dir)
        shares = {}
        if os.path.isdir(bucketdir):
            for sharenum_s in os.listdir(bucketdir):
                try:
                    sharenum = int(sharenum_s)
                except ValueError:
                    continue
                filename = os.path.join(bucketdir, sharenum_s)
                msf = MutableShareFile(filename, self)
                msf.check_write_enabler(write_enabler, si_s)
                shares[sharenum] = msf
        # write_enabler is good for all existing shares.

        # Now evaluate test vectors.
        testv_is_good = True
        for sharenum in test_and_write_vectors:
            (testv, datav, new_length) = test_and_write_vectors[sharenum]
            if sharenum in shares:
                if not shares[sharenum].check_testv(testv):
                    self.log("testv failed: [%d]: %r" % (sharenum, testv))
                    testv_is_good = False
                    break
            else:
                # compare the vectors against an empty share, in which all
                # reads return empty strings.
                if not EmptyShare().check_testv(testv):
                    self.log("testv failed (empty): [%d] %r" % (sharenum,
                                                                testv))
                    testv_is_good = False
                    break

        # now gather the read vectors, before we do any writes
        read_data = {}
        for sharenum, share in shares.items():
            read_data[sharenum] = share.readv(read_vector)

        ownerid = 1 # TODO
        expire_time = time.time() + 31*24*60*60   # one month
        lease_info = LeaseInfo(ownerid,
                               renew_secret, cancel_secret,
                               expire_time, self.my_nodeid)

        if testv_is_good:
            # now apply the write vectors
            for sharenum in test_and_write_vectors:
                (testv, datav, new_length) = test_and_write_vectors[sharenum]
                if new_length == 0:
                    if sharenum in shares:
                        shares[sharenum].unlink()
                else:
                    if sharenum not in shares:
                        # allocate a new share
                        allocated_size = 2000 # arbitrary, really
                        share = self._allocate_slot_share(bucketdir, secrets,
                                                          sharenum,
                                                          allocated_size,
                                                          owner_num=0)
                        shares[sharenum] = share
                    shares[sharenum].writev(datav, new_length)
                    # and update the lease
                    shares[sharenum].add_or_renew_lease(lease_info)

            if new_length == 0:
                # delete empty bucket directories
                if not os.listdir(bucketdir):
                    os.rmdir(bucketdir)


        # all done
        self.add_latency("writev", time.time() - start)
        return (testv_is_good, read_data)
Beispiel #22
0
def _describe_mutable_share(abs_sharefile, f, now, si_s, out):
    # mutable share
    m = MutableShareFile(abs_sharefile)
    WE, nodeid = m._read_write_enabler_and_nodeid(f)
    data_length = m._read_data_length(f)
    expiration_time = min(
        [lease.get_expiration_time() for (i, lease) in m._enumerate_leases(f)])
    expiration = max(0, expiration_time - now)

    share_type = "unknown"
    f.seek(m.DATA_OFFSET)
    version = f.read(1)
    if version == b"\x00":
        # this slot contains an SMDF share
        share_type = "SDMF"
    elif version == b"\x01":
        share_type = "MDMF"

    if share_type == "SDMF":
        f.seek(m.DATA_OFFSET)

        # Read at least the mutable header length, if possible.  If there's
        # less data than that in the share, don't try to read more (we won't
        # be able to unpack the header in this case but we surely don't want
        # to try to unpack bytes *following* the data section as if they were
        # header data).  Rather than 2000 we could use HEADER_LENGTH from
        # allmydata/mutable/layout.py, probably.
        data = f.read(min(data_length, 2000))

        try:
            pieces = unpack_share(data)
        except NeedMoreDataError as e:
            # retry once with the larger size
            size = e.needed_bytes
            f.seek(m.DATA_OFFSET)
            data = f.read(min(data_length, size))
            pieces = unpack_share(data)
        (seqnum, root_hash, IV, k, N, segsize, datalen, pubkey, signature,
         share_hash_chain, block_hash_tree, share_data, enc_privkey) = pieces

        print("SDMF %s %d/%d %d #%d:%s %d %s" % \
              (si_s, k, N, datalen,
               seqnum, str(base32.b2a(root_hash), "utf-8"),
               expiration, quote_output(abs_sharefile)), file=out)
    elif share_type == "MDMF":
        fake_shnum = 0

        # TODO: factor this out with dump_MDMF_share()
        class ShareDumper(MDMFSlotReadProxy):
            def _read(self, readvs, force_remote=False, queue=False):
                data = []
                for (where, length) in readvs:
                    f.seek(m.DATA_OFFSET + where)
                    data.append(f.read(length))
                return defer.succeed({fake_shnum: data})

        p = ShareDumper(None, "fake-si", fake_shnum)

        def extract(func):
            stash = []
            # these methods return Deferreds, but we happen to know that
            # they run synchronously when not actually talking to a
            # remote server
            d = func()
            d.addCallback(stash.append)
            return stash[0]

        verinfo = extract(p.get_verinfo)
        (seqnum, root_hash, salt_to_use, segsize, datalen, k, N, prefix,
         offsets) = verinfo
        print("MDMF %s %d/%d %d #%d:%s %d %s" % \
              (si_s, k, N, datalen,
               seqnum, str(base32.b2a(root_hash), "utf-8"),
               expiration, quote_output(abs_sharefile)), file=out)
    else:
        print("UNKNOWN mutable %s" % quote_output(abs_sharefile), file=out)
Beispiel #23
0
    def remote_slot_testv_and_readv_and_writev(self, storage_index, secrets,
                                               test_and_write_vectors,
                                               read_vector):
        start = time.time()
        self.count("writev")
        si_s = si_b2a(storage_index)
        log.msg("storage: slot_writev %s" % si_s)
        si_dir = storage_index_to_dir(storage_index)
        (write_enabler, renew_secret, cancel_secret) = secrets
        # shares exist if there is a file for them
        bucketdir = os.path.join(self.sharedir, si_dir)
        shares = {}
        if os.path.isdir(bucketdir):
            for sharenum_s in os.listdir(bucketdir):
                try:
                    sharenum = int(sharenum_s)
                except ValueError:
                    continue
                filename = os.path.join(bucketdir, sharenum_s)
                msf = MutableShareFile(filename, self)
                msf.check_write_enabler(write_enabler, si_s)
                shares[sharenum] = msf
        # write_enabler is good for all existing shares.

        # Now evaluate test vectors.
        testv_is_good = True
        for sharenum in test_and_write_vectors:
            (testv, datav, new_length) = test_and_write_vectors[sharenum]
            if sharenum in shares:
                if not shares[sharenum].check_testv(testv):
                    self.log("testv failed: [%d]: %r" % (sharenum, testv))
                    testv_is_good = False
                    break
            else:
                # compare the vectors against an empty share, in which all
                # reads return empty strings.
                if not EmptyShare().check_testv(testv):
                    self.log("testv failed (empty): [%d] %r" %
                             (sharenum, testv))
                    testv_is_good = False
                    break

        # now gather the read vectors, before we do any writes
        read_data = {}
        for sharenum, share in shares.items():
            read_data[sharenum] = share.readv(read_vector)

        ownerid = 1  # TODO
        expire_time = time.time() + 31 * 24 * 60 * 60  # one month
        lease_info = LeaseInfo(ownerid, renew_secret, cancel_secret,
                               expire_time, self.my_nodeid)

        if testv_is_good:
            # now apply the write vectors
            for sharenum in test_and_write_vectors:
                (testv, datav, new_length) = test_and_write_vectors[sharenum]
                if new_length == 0:
                    if sharenum in shares:
                        shares[sharenum].unlink()
                else:
                    if sharenum not in shares:
                        # allocate a new share
                        allocated_size = 2000  # arbitrary, really
                        share = self._allocate_slot_share(bucketdir,
                                                          secrets,
                                                          sharenum,
                                                          allocated_size,
                                                          owner_num=0)
                        shares[sharenum] = share
                    shares[sharenum].writev(datav, new_length)
                    # and update the lease
                    shares[sharenum].add_or_renew_lease(lease_info)

            if new_length == 0:
                # delete empty bucket directories
                if not os.listdir(bucketdir):
                    os.rmdir(bucketdir)

        # all done
        self.add_latency("writev", time.time() - start)
        return (testv_is_good, read_data)