Beispiel #1
0
def _describe_immutable_share(abs_sharefile, now, si_s, out):
    class ImmediateReadBucketProxy(ReadBucketProxy):
        def __init__(self, sf):
            self.sf = sf
            ReadBucketProxy.__init__(self, None, None, "")

        def __repr__(self):
            return "<ImmediateReadBucketProxy>"

        def _read(self, offset, size):
            return defer.succeed(sf.read_share_data(offset, size))

    # use a ReadBucketProxy to parse the bucket and find the uri extension
    sf = ShareFile(abs_sharefile)
    bp = ImmediateReadBucketProxy(sf)

    expiration_time = min(lease.get_expiration_time()
                          for lease in sf.get_leases())
    expiration = max(0, expiration_time - now)

    UEB_data = call(bp.get_uri_extension)
    unpacked = uri.unpack_extension_readable(UEB_data)

    k = unpacked["needed_shares"]
    N = unpacked["total_shares"]
    filesize = unpacked["size"]
    ueb_hash = unpacked["UEB_hash"]

    print("CHK %s %d/%d %d %s %d %s" %
          (si_s, k, N, filesize, str(
              ueb_hash, "utf-8"), expiration, quote_output(abs_sharefile)),
          file=out)
Beispiel #2
0
def corrupt_share(options):
    import random
    from allmydata.storage.mutable import MutableShareFile
    from allmydata.storage.immutable import ShareFile
    from allmydata.mutable.layout import unpack_header
    from allmydata.immutable.layout import ReadBucketProxy
    out = options.stdout
    fn = options['filename']
    assert options["offset"] == "block-random", "other offsets not implemented"

    # first, what kind of share is it?

    def flip_bit(start, end):
        offset = random.randrange(start, end)
        bit = random.randrange(0, 8)
        print("[%d..%d):  %d.b%d" % (start, end, offset, bit), file=out)
        f = open(fn, "rb+")
        f.seek(offset)
        d = f.read(1)
        d = bchr(ord(d) ^ 0x01)
        f.seek(offset)
        f.write(d)
        f.close()

    with open(fn, "rb") as f:
        prefix = f.read(32)

        if MutableShareFile.is_valid_header(prefix):
            # mutable
            m = MutableShareFile(fn)
            with open(fn, "rb") as f:
                f.seek(m.DATA_OFFSET)
                # Read enough data to get a mutable header to unpack.
                data = f.read(2000)
            # make sure this slot contains an SMDF share
            assert data[
                0:1] == b"\x00", "non-SDMF mutable shares not supported"
            f.close()

            (version, ig_seqnum, ig_roothash, ig_IV, ig_k, ig_N, ig_segsize,
             ig_datalen, offsets) = unpack_header(data)

            assert version == 0, "we only handle v0 SDMF files"
            start = m.DATA_OFFSET + offsets["share_data"]
            end = m.DATA_OFFSET + offsets["enc_privkey"]
            flip_bit(start, end)
        else:
            # otherwise assume it's immutable
            f = ShareFile(fn)
            bp = ReadBucketProxy(None, None, '')
            offsets = bp._parse_offsets(f.read_share_data(0, 0x24))
            start = f._data_offset + offsets["data"]
            end = f._data_offset + offsets["plaintext_hash_tree"]
            flip_bit(start, end)
Beispiel #3
0
def corrupt_share(options):
    import random
    from allmydata.storage.mutable import MutableShareFile
    from allmydata.storage.immutable import ShareFile
    from allmydata.mutable.layout import unpack_header
    from allmydata.immutable.layout import ReadBucketProxy
    out = options.stdout
    fn = options['filename']
    assert options["offset"] == "block-random", "other offsets not implemented"
    # first, what kind of share is it?

    def flip_bit(start, end):
        offset = random.randrange(start, end)
        bit = random.randrange(0, 8)
        print >>out, "[%d..%d):  %d.b%d" % (start, end, offset, bit)
        f = open(fn, "rb+")
        f.seek(offset)
        d = f.read(1)
        d = chr(ord(d) ^ 0x01)
        f.seek(offset)
        f.write(d)
        f.close()

    f = open(fn, "rb")
    prefix = f.read(32)
    f.close()
    if prefix == MutableShareFile.MAGIC:
        # mutable
        m = MutableShareFile(fn)
        f = open(fn, "rb")
        f.seek(m.DATA_OFFSET)
        data = f.read(2000)
        # make sure this slot contains an SMDF share
        assert data[0] == "\x00", "non-SDMF mutable shares not supported"
        f.close()

        (version, ig_seqnum, ig_roothash, ig_IV, ig_k, ig_N, ig_segsize,
         ig_datalen, offsets) = unpack_header(data)

        assert version == 0, "we only handle v0 SDMF files"
        start = m.DATA_OFFSET + offsets["share_data"]
        end = m.DATA_OFFSET + offsets["enc_privkey"]
        flip_bit(start, end)
    else:
        # otherwise assume it's immutable
        f = ShareFile(fn)
        bp = ReadBucketProxy(None, '', '')
        offsets = bp._parse_offsets(f.read_share_data(0, 0x24))
        start = f._data_offset + offsets["data"]
        end = f._data_offset + offsets["plaintext_hash_tree"]
        flip_bit(start, end)
Beispiel #4
0
 def _iter_share_files(self, storage_index):
     for shnum, filename in self.get_shares(storage_index):
         with open(filename, 'rb') as f:
             header = f.read(32)
         if MutableShareFile.is_valid_header(header):
             sf = MutableShareFile(filename, self)
             # note: if the share has been migrated, the renew_lease()
             # call will throw an exception, with information to help the
             # client update the lease.
         elif ShareFile.is_valid_header(header):
             sf = ShareFile(filename)
         else:
             continue  # non-sharefile
         yield sf
Beispiel #5
0
    def get_leases(self, storage_index):
        """Provide an iterator that yields all of the leases attached to this
        bucket. Each lease is returned as a LeaseInfo instance.

        This method is not for client use.
        """

        # since all shares get the same lease data, we just grab the leases
        # from the first share
        try:
            shnum, filename = self._get_bucket_shares(storage_index).next()
            sf = ShareFile(filename)
            return sf.get_leases()
        except StopIteration:
            return iter([])
Beispiel #6
0
    def get_leases(self, storage_index):
        """Provide an iterator that yields all of the leases attached to this
        bucket. Each lease is returned as a LeaseInfo instance.

        This method is not for client use.
        """

        # since all shares get the same lease data, we just grab the leases
        # from the first share
        try:
            shnum, filename = self._get_bucket_shares(storage_index).next()
            sf = ShareFile(filename)
            return sf.get_leases()
        except StopIteration:
            return iter([])
Beispiel #7
0
def get_share_file(filename):
    with open(filename, "rb") as f:
        prefix = f.read(32)
    if prefix == MutableShareFile.MAGIC:
        return MutableShareFile(filename)
    # otherwise assume it's immutable
    return ShareFile(filename)
Beispiel #8
0
def get_share_file(filename):
    with open(filename, "rb") as f:
        prefix = f.read(32)
    if MutableShareFile.is_valid_header(prefix):
        return MutableShareFile(filename)
    # otherwise assume it's immutable
    return ShareFile(filename)
Beispiel #9
0
def get_share_file(filename):
    f = open(filename, "rb")
    prefix = f.read(32)
    f.close()
    if prefix == MutableShareFile.MAGIC:
        return MutableShareFile(filename)
    # otherwise assume it's immutable
    return ShareFile(filename)
Beispiel #10
0
def describe_share(abs_sharefile, si_s, shnum_s, now, out):
    with open(abs_sharefile, "rb") as f:
        prefix = f.read(32)
        if MutableShareFile.is_valid_header(prefix):
            _describe_mutable_share(abs_sharefile, f, now, si_s, out)
        elif ShareFile.is_valid_header(prefix):
            _describe_immutable_share(abs_sharefile, now, si_s, out)
        else:
            print("UNKNOWN really-unknown %s" % quote_output(abs_sharefile),
                  file=out)
Beispiel #11
0
def dump_immutable_share(options):
    from allmydata.storage.immutable import ShareFile

    out = options.stdout
    f = ShareFile(options['filename'])
    if not options["leases-only"]:
        dump_immutable_chk_share(f, out, options)
    dump_immutable_lease_info(f, out)
    print(file=out)
    return 0
Beispiel #12
0
 def _iter_share_files(self, storage_index):
     for shnum, filename in self._get_bucket_shares(storage_index):
         with open(filename, 'rb') as f:
             header = f.read(32)
         if header[:32] == MutableShareFile.MAGIC:
             sf = MutableShareFile(filename, self)
             # note: if the share has been migrated, the renew_lease()
             # call will throw an exception, with information to help the
             # client update the lease.
         elif header[:4] == struct.pack(">L", 1):
             sf = ShareFile(filename)
         else:
             continue  # non-sharefile
         yield sf
Beispiel #13
0
    elif struct.unpack(">L", prefix[:4]) == (1, ):
        # immutable

        class ImmediateReadBucketProxy(ReadBucketProxy):
            def __init__(self, sf):
                self.sf = sf
                ReadBucketProxy.__init__(self, None, None, "")

            def __repr__(self):
                return "<ImmediateReadBucketProxy>"

            def _read(self, offset, size):
                return defer.succeed(sf.read_share_data(offset, size))

        # use a ReadBucketProxy to parse the bucket and find the uri extension
        sf = ShareFile(abs_sharefile)
        bp = ImmediateReadBucketProxy(sf)

        expiration_time = min(
            [lease.expiration_time for lease in sf.get_leases()])
        expiration = max(0, expiration_time - now)

        UEB_data = call(bp.get_uri_extension)
        unpacked = uri.unpack_extension_readable(UEB_data)

        k = unpacked["needed_shares"]
        N = unpacked["total_shares"]
        filesize = unpacked["size"]
        ueb_hash = unpacked["UEB_hash"]

        print >> out, "CHK %s %d/%d %d %s %d %s" % (
Beispiel #14
0
            print >>out, "UNKNOWN mutable %s" % quote_output(abs_sharefile)

    elif struct.unpack(">L", prefix[:4]) == (1,):
        # immutable

        class ImmediateReadBucketProxy(ReadBucketProxy):
            def __init__(self, sf):
                self.sf = sf
                ReadBucketProxy.__init__(self, None, None, "")
            def __repr__(self):
                return "<ImmediateReadBucketProxy>"
            def _read(self, offset, size):
                return defer.succeed(sf.read_share_data(offset, size))

        # use a ReadBucketProxy to parse the bucket and find the uri extension
        sf = ShareFile(abs_sharefile)
        bp = ImmediateReadBucketProxy(sf)

        expiration_time = min( [lease.expiration_time
                                for lease in sf.get_leases()] )
        expiration = max(0, expiration_time - now)

        UEB_data = call(bp.get_uri_extension)
        unpacked = uri.unpack_extension_readable(UEB_data)

        k = unpacked["needed_shares"]
        N = unpacked["total_shares"]
        filesize = unpacked["size"]
        ueb_hash = unpacked["UEB_hash"]

        print >>out, "CHK %s %d/%d %d %s %d %s" % (si_s, k, N, filesize,
Beispiel #15
0
    def remote_allocate_buckets(self, storage_index,
                                renew_secret, cancel_secret,
                                sharenums, allocated_size,
                                canary, owner_num=0):
        # owner_num is not for clients to set, but rather it should be
        # curried into the PersonalStorageServer instance that is dedicated
        # to a particular owner.
        start = time.time()
        self.count("allocate")
        alreadygot = set()
        bucketwriters = {} # k: shnum, v: BucketWriter
        si_dir = storage_index_to_dir(storage_index)
        si_s = si_b2a(storage_index)

        log.msg("storage: allocate_buckets %s" % si_s)

        # in this implementation, the lease information (including secrets)
        # goes into the share files themselves. It could also be put into a
        # separate database. Note that the lease should not be added until
        # the BucketWriter has been closed.
        expire_time = time.time() + 31*24*60*60
        lease_info = LeaseInfo(owner_num,
                               renew_secret, cancel_secret,
                               expire_time, self.my_nodeid)

        max_space_per_bucket = allocated_size

        remaining_space = self.get_available_space()
        limited = remaining_space is not None
        if limited:
            # this is a bit conservative, since some of this allocated_size()
            # has already been written to disk, where it will show up in
            # get_available_space.
            remaining_space -= self.allocated_size()
        # self.readonly_storage causes remaining_space <= 0

        # fill alreadygot with all shares that we have, not just the ones
        # they asked about: this will save them a lot of work. Add or update
        # leases for all of them: if they want us to hold shares for this
        # file, they'll want us to hold leases for this file.
        for (shnum, fn) in self._get_bucket_shares(storage_index):
            alreadygot.add(shnum)
            sf = ShareFile(fn)
            sf.add_or_renew_lease(lease_info)

        for shnum in sharenums:
            incominghome = os.path.join(self.incomingdir, si_dir, "%d" % shnum)
            finalhome = os.path.join(self.sharedir, si_dir, "%d" % shnum)
            if os.path.exists(finalhome):
                # great! we already have it. easy.
                pass
            elif os.path.exists(incominghome):
                # Note that we don't create BucketWriters for shnums that
                # have a partial share (in incoming/), so if a second upload
                # occurs while the first is still in progress, the second
                # uploader will use different storage servers.
                pass
            elif (not limited) or (remaining_space >= max_space_per_bucket):
                # ok! we need to create the new share file.
                bw = BucketWriter(self, incominghome, finalhome,
                                  max_space_per_bucket, lease_info, canary)
                if self.no_storage:
                    bw.throw_out_all_data = True
                bucketwriters[shnum] = bw
                self._active_writers[bw] = 1
                if limited:
                    remaining_space -= max_space_per_bucket
            else:
                # bummer! not enough space to accept this bucket
                pass

        if bucketwriters:
            fileutil.make_dirs(os.path.join(self.sharedir, si_dir))

        self.add_latency("allocate", time.time() - start)
        return alreadygot, bucketwriters
Beispiel #16
0
            print >>out, "UNKNOWN mutable %s" % quote_output(abs_sharefile)

    elif struct.unpack(">L", prefix[:4]) == (1,):
        # immutable

        class ImmediateReadBucketProxy(ReadBucketProxy):
            def __init__(self, sf):
                self.sf = sf
                ReadBucketProxy.__init__(self, "", "", "")
            def __repr__(self):
                return "<ImmediateReadBucketProxy>"
            def _read(self, offset, size):
                return defer.succeed(sf.read_share_data(offset, size))

        # use a ReadBucketProxy to parse the bucket and find the uri extension
        sf = ShareFile(abs_sharefile)
        bp = ImmediateReadBucketProxy(sf)

        expiration_time = min( [lease.expiration_time
                                for lease in sf.get_leases()] )
        expiration = max(0, expiration_time - now)

        UEB_data = call(bp.get_uri_extension)
        unpacked = uri.unpack_extension_readable(UEB_data)

        k = unpacked["needed_shares"]
        N = unpacked["total_shares"]
        filesize = unpacked["size"]
        ueb_hash = unpacked["UEB_hash"]

        print >>out, "CHK %s %d/%d %d %s %d %s" % (si_s, k, N, filesize,
Beispiel #17
0
    def allocate_buckets(self,
                         storage_index,
                         renew_secret,
                         cancel_secret,
                         sharenums,
                         allocated_size,
                         owner_num=0,
                         renew_leases=True):
        """
        Generic bucket allocation API.

        :param bool renew_leases: If and only if this is ``True`` then renew a
            secret-matching lease on (or, if none match, add a new lease to)
            existing shares in this bucket.  Any *new* shares are given a new
            lease regardless.
        """
        # owner_num is not for clients to set, but rather it should be
        # curried into the PersonalStorageServer instance that is dedicated
        # to a particular owner.
        start = self._clock.seconds()
        self.count("allocate")
        alreadygot = {}
        bucketwriters = {}  # k: shnum, v: BucketWriter
        si_dir = storage_index_to_dir(storage_index)
        si_s = si_b2a(storage_index)

        log.msg("storage: allocate_buckets %r" % si_s)

        # in this implementation, the lease information (including secrets)
        # goes into the share files themselves. It could also be put into a
        # separate database. Note that the lease should not be added until
        # the BucketWriter has been closed.
        expire_time = self._clock.seconds() + DEFAULT_RENEWAL_TIME
        lease_info = LeaseInfo(owner_num, renew_secret, cancel_secret,
                               expire_time, self.my_nodeid)

        max_space_per_bucket = allocated_size

        remaining_space = self.get_available_space()
        limited = remaining_space is not None
        if limited:
            # this is a bit conservative, since some of this allocated_size()
            # has already been written to disk, where it will show up in
            # get_available_space.
            remaining_space -= self.allocated_size()
        # self.readonly_storage causes remaining_space <= 0

        # fill alreadygot with all shares that we have, not just the ones
        # they asked about: this will save them a lot of work. Add or update
        # leases for all of them: if they want us to hold shares for this
        # file, they'll want us to hold leases for this file.
        for (shnum, fn) in self.get_shares(storage_index):
            alreadygot[shnum] = ShareFile(fn)
        if renew_leases:
            self._add_or_renew_leases(alreadygot.values(), lease_info)

        for shnum in sharenums:
            incominghome = os.path.join(self.incomingdir, si_dir, "%d" % shnum)
            finalhome = os.path.join(self.sharedir, si_dir, "%d" % shnum)
            if os.path.exists(finalhome):
                # great! we already have it. easy.
                pass
            elif os.path.exists(incominghome):
                # For Foolscap we don't create BucketWriters for shnums that
                # have a partial share (in incoming/), so if a second upload
                # occurs while the first is still in progress, the second
                # uploader will use different storage servers.
                pass
            elif (not limited) or (remaining_space >= max_space_per_bucket):
                # ok! we need to create the new share file.
                bw = BucketWriter(self,
                                  incominghome,
                                  finalhome,
                                  max_space_per_bucket,
                                  lease_info,
                                  clock=self._clock)
                if self.no_storage:
                    # Really this should be done by having a separate class for
                    # this situation; see
                    # https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3862
                    bw.throw_out_all_data = True
                bucketwriters[shnum] = bw
                self._bucket_writers[incominghome] = bw
                if limited:
                    remaining_space -= max_space_per_bucket
            else:
                # bummer! not enough space to accept this bucket
                pass

        if bucketwriters:
            fileutil.make_dirs(os.path.join(self.sharedir, si_dir))

        self.add_latency("allocate", self._clock.seconds() - start)
        return set(alreadygot), bucketwriters
Beispiel #18
0
def describe_share(abs_sharefile, si_s, shnum_s, now, out):
    from allmydata import uri
    from allmydata.storage.mutable import MutableShareFile
    from allmydata.storage.immutable import ShareFile
    from allmydata.mutable.layout import unpack_share
    from allmydata.mutable.common import NeedMoreDataError
    from allmydata.immutable.layout import ReadBucketProxy
    from allmydata.util import base32
    from allmydata.util.encodingutil import quote_output
    import struct

    f = open(abs_sharefile, "rb")
    prefix = f.read(32)

    if prefix == MutableShareFile.MAGIC:
        # mutable share
        m = MutableShareFile(abs_sharefile)
        WE, nodeid = m._read_write_enabler_and_nodeid(f)
        data_length = m._read_data_length(f)
        expiration_time = min(
            [lease.expiration_time for (i, lease) in m._enumerate_leases(f)])
        expiration = max(0, expiration_time - now)

        share_type = "unknown"
        f.seek(m.DATA_OFFSET)
        version = f.read(1)
        if version == b"\x00":
            # this slot contains an SMDF share
            share_type = "SDMF"
        elif version == b"\x01":
            share_type = "MDMF"

        if share_type == "SDMF":
            f.seek(m.DATA_OFFSET)
            data = f.read(min(data_length, 2000))

            try:
                pieces = unpack_share(data)
            except NeedMoreDataError as e:
                # retry once with the larger size
                size = e.needed_bytes
                f.seek(m.DATA_OFFSET)
                data = f.read(min(data_length, size))
                pieces = unpack_share(data)
            (seqnum, root_hash, IV, k, N, segsize, datalen, pubkey, signature,
             share_hash_chain, block_hash_tree, share_data,
             enc_privkey) = pieces

            print("SDMF %s %d/%d %d #%d:%s %d %s" % \
                  (si_s, k, N, datalen,
                   seqnum, unicode(base32.b2a(root_hash), "utf-8"),
                   expiration, quote_output(abs_sharefile)), file=out)
        elif share_type == "MDMF":
            from allmydata.mutable.layout import MDMFSlotReadProxy
            fake_shnum = 0

            # TODO: factor this out with dump_MDMF_share()
            class ShareDumper(MDMFSlotReadProxy):
                def _read(self, readvs, force_remote=False, queue=False):
                    data = []
                    for (where, length) in readvs:
                        f.seek(m.DATA_OFFSET + where)
                        data.append(f.read(length))
                    return defer.succeed({fake_shnum: data})

            p = ShareDumper(None, "fake-si", fake_shnum)

            def extract(func):
                stash = []
                # these methods return Deferreds, but we happen to know that
                # they run synchronously when not actually talking to a
                # remote server
                d = func()
                d.addCallback(stash.append)
                return stash[0]

            verinfo = extract(p.get_verinfo)
            (seqnum, root_hash, salt_to_use, segsize, datalen, k, N, prefix,
             offsets) = verinfo
            print("MDMF %s %d/%d %d #%d:%s %d %s" % \
                  (si_s, k, N, datalen,
                   seqnum, unicode(base32.b2a(root_hash), "utf-8"),
                   expiration, quote_output(abs_sharefile)), file=out)
        else:
            print("UNKNOWN mutable %s" % quote_output(abs_sharefile), file=out)

    elif struct.unpack(">L", prefix[:4]) == (1, ):
        # immutable

        class ImmediateReadBucketProxy(ReadBucketProxy):
            def __init__(self, sf):
                self.sf = sf
                ReadBucketProxy.__init__(self, None, None, "")

            def __repr__(self):
                return "<ImmediateReadBucketProxy>"

            def _read(self, offset, size):
                return defer.succeed(sf.read_share_data(offset, size))

        # use a ReadBucketProxy to parse the bucket and find the uri extension
        sf = ShareFile(abs_sharefile)
        bp = ImmediateReadBucketProxy(sf)

        expiration_time = min(
            [lease.expiration_time for lease in sf.get_leases()])
        expiration = max(0, expiration_time - now)

        UEB_data = call(bp.get_uri_extension)
        unpacked = uri.unpack_extension_readable(UEB_data)

        k = unpacked["needed_shares"]
        N = unpacked["total_shares"]
        filesize = unpacked["size"]
        ueb_hash = unpacked["UEB_hash"]

        print("CHK %s %d/%d %d %s %d %s" %
              (si_s, k, N, filesize, unicode(
                  ueb_hash, "utf-8"), expiration, quote_output(abs_sharefile)),
              file=out)

    else:
        print("UNKNOWN really-unknown %s" % quote_output(abs_sharefile),
              file=out)

    f.close()
Beispiel #19
0
    def remote_allocate_buckets(self,
                                storage_index,
                                renew_secret,
                                cancel_secret,
                                sharenums,
                                allocated_size,
                                canary,
                                owner_num=0):
        # owner_num is not for clients to set, but rather it should be
        # curried into the PersonalStorageServer instance that is dedicated
        # to a particular owner.
        start = time.time()
        self.count("allocate")
        alreadygot = set()
        bucketwriters = {}  # k: shnum, v: BucketWriter
        si_dir = storage_index_to_dir(storage_index)
        si_s = si_b2a(storage_index)

        log.msg("storage: allocate_buckets %s" % si_s)

        # in this implementation, the lease information (including secrets)
        # goes into the share files themselves. It could also be put into a
        # separate database. Note that the lease should not be added until
        # the BucketWriter has been closed.
        expire_time = time.time() + 31 * 24 * 60 * 60
        lease_info = LeaseInfo(owner_num, renew_secret, cancel_secret,
                               expire_time, self.my_nodeid)

        max_space_per_bucket = allocated_size

        remaining_space = self.get_available_space()
        limited = remaining_space is not None
        if limited:
            # this is a bit conservative, since some of this allocated_size()
            # has already been written to disk, where it will show up in
            # get_available_space.
            remaining_space -= self.allocated_size()
        # self.readonly_storage causes remaining_space <= 0

        # fill alreadygot with all shares that we have, not just the ones
        # they asked about: this will save them a lot of work. Add or update
        # leases for all of them: if they want us to hold shares for this
        # file, they'll want us to hold leases for this file.
        for (shnum, fn) in self._get_bucket_shares(storage_index):
            alreadygot.add(shnum)
            sf = ShareFile(fn)
            sf.add_or_renew_lease(lease_info)

        for shnum in sharenums:
            incominghome = os.path.join(self.incomingdir, si_dir, "%d" % shnum)
            finalhome = os.path.join(self.sharedir, si_dir, "%d" % shnum)
            if os.path.exists(finalhome):
                # great! we already have it. easy.
                pass
            elif os.path.exists(incominghome):
                # Note that we don't create BucketWriters for shnums that
                # have a partial share (in incoming/), so if a second upload
                # occurs while the first is still in progress, the second
                # uploader will use different storage servers.
                pass
            elif (not limited) or (remaining_space >= max_space_per_bucket):
                # ok! we need to create the new share file.
                bw = BucketWriter(self, incominghome, finalhome,
                                  max_space_per_bucket, lease_info, canary)
                if self.no_storage:
                    bw.throw_out_all_data = True
                bucketwriters[shnum] = bw
                self._active_writers[bw] = 1
                if limited:
                    remaining_space -= max_space_per_bucket
            else:
                # bummer! not enough space to accept this bucket
                pass

        if bucketwriters:
            fileutil.make_dirs(os.path.join(self.sharedir, si_dir))

        self.add_latency("allocate", time.time() - start)
        return alreadygot, bucketwriters
Beispiel #20
0
def describe_share(abs_sharefile, si_s, shnum_s, now, out):
    from allmydata import uri
    from allmydata.storage.mutable import MutableShareFile
    from allmydata.storage.immutable import ShareFile
    from allmydata.mutable.layout import unpack_share
    from allmydata.mutable.common import NeedMoreDataError
    from allmydata.immutable.layout import ReadBucketProxy
    from allmydata.util import base32
    from allmydata.util.encodingutil import quote_output
    import struct

    f = open(abs_sharefile, "rb")
    prefix = f.read(32)

    if prefix == MutableShareFile.MAGIC:
        # mutable share
        m = MutableShareFile(abs_sharefile)
        WE, nodeid = m._read_write_enabler_and_nodeid(f)
        data_length = m._read_data_length(f)
        expiration_time = min( [lease.expiration_time
                                for (i,lease) in m._enumerate_leases(f)] )
        expiration = max(0, expiration_time - now)

        share_type = "unknown"
        f.seek(m.DATA_OFFSET)
        version = f.read(1)
        if version == "\x00":
            # this slot contains an SMDF share
            share_type = "SDMF"
        elif version == "\x01":
            share_type = "MDMF"

        if share_type == "SDMF":
            f.seek(m.DATA_OFFSET)
            data = f.read(min(data_length, 2000))

            try:
                pieces = unpack_share(data)
            except NeedMoreDataError as e:
                # retry once with the larger size
                size = e.needed_bytes
                f.seek(m.DATA_OFFSET)
                data = f.read(min(data_length, size))
                pieces = unpack_share(data)
            (seqnum, root_hash, IV, k, N, segsize, datalen,
             pubkey, signature, share_hash_chain, block_hash_tree,
             share_data, enc_privkey) = pieces

            print("SDMF %s %d/%d %d #%d:%s %d %s" % \
                  (si_s, k, N, datalen,
                   seqnum, base32.b2a(root_hash),
                   expiration, quote_output(abs_sharefile)), file=out)
        elif share_type == "MDMF":
            from allmydata.mutable.layout import MDMFSlotReadProxy
            fake_shnum = 0
            # TODO: factor this out with dump_MDMF_share()
            class ShareDumper(MDMFSlotReadProxy):
                def _read(self, readvs, force_remote=False, queue=False):
                    data = []
                    for (where,length) in readvs:
                        f.seek(m.DATA_OFFSET+where)
                        data.append(f.read(length))
                    return defer.succeed({fake_shnum: data})

            p = ShareDumper(None, "fake-si", fake_shnum)
            def extract(func):
                stash = []
                # these methods return Deferreds, but we happen to know that
                # they run synchronously when not actually talking to a
                # remote server
                d = func()
                d.addCallback(stash.append)
                return stash[0]

            verinfo = extract(p.get_verinfo)
            (seqnum, root_hash, salt_to_use, segsize, datalen, k, N, prefix,
             offsets) = verinfo
            print("MDMF %s %d/%d %d #%d:%s %d %s" % \
                  (si_s, k, N, datalen,
                   seqnum, base32.b2a(root_hash),
                   expiration, quote_output(abs_sharefile)), file=out)
        else:
            print("UNKNOWN mutable %s" % quote_output(abs_sharefile), file=out)

    elif struct.unpack(">L", prefix[:4]) == (1,):
        # immutable

        class ImmediateReadBucketProxy(ReadBucketProxy):
            def __init__(self, sf):
                self.sf = sf
                ReadBucketProxy.__init__(self, None, None, "")
            def __repr__(self):
                return "<ImmediateReadBucketProxy>"
            def _read(self, offset, size):
                return defer.succeed(sf.read_share_data(offset, size))

        # use a ReadBucketProxy to parse the bucket and find the uri extension
        sf = ShareFile(abs_sharefile)
        bp = ImmediateReadBucketProxy(sf)

        expiration_time = min( [lease.expiration_time
                                for lease in sf.get_leases()] )
        expiration = max(0, expiration_time - now)

        UEB_data = call(bp.get_uri_extension)
        unpacked = uri.unpack_extension_readable(UEB_data)

        k = unpacked["needed_shares"]
        N = unpacked["total_shares"]
        filesize = unpacked["size"]
        ueb_hash = unpacked["UEB_hash"]

        print("CHK %s %d/%d %d %s %d %s" % (si_s, k, N, filesize,
                                                   ueb_hash, expiration,
                                                   quote_output(abs_sharefile)), file=out)

    else:
        print("UNKNOWN really-unknown %s" % quote_output(abs_sharefile), file=out)

    f.close()