Beispiel #1
0
 def get_leases(self):
     """Yields a LeaseInfo instance for all leases."""
     with open(self.home, 'rb') as f:
         (version, unused, num_leases) = struct.unpack(">LLL", f.read(0xc))
         f.seek(self._lease_offset)
         for i in range(num_leases):
             data = f.read(self.LEASE_SIZE)
             if data:
                 yield LeaseInfo().from_immutable_data(data)
Beispiel #2
0
 def _make_lease_info(self, renew_secret, cancel_secret):
     """
     :return LeaseInfo: Information for a new lease for a share.
     """
     ownerid = 1  # TODO
     expire_time = time.time() + 31 * 24 * 60 * 60  # one month
     lease_info = LeaseInfo(ownerid, renew_secret, cancel_secret,
                            expire_time, self.my_nodeid)
     return lease_info
Beispiel #3
0
 def _make_lease_info(self, renew_secret, cancel_secret):
     """
     :return LeaseInfo: Information for a new lease for a share.
     """
     ownerid = 1  # TODO
     expire_time = self._clock.seconds() + DEFAULT_RENEWAL_TIME
     lease_info = LeaseInfo(ownerid, renew_secret, cancel_secret,
                            expire_time, self.my_nodeid)
     return lease_info
Beispiel #4
0
 def remote_add_lease(self, storage_index, renew_secret, cancel_secret,
                      owner_num=1):
     start = time.time()
     self.count("add-lease")
     new_expire_time = time.time() + 31*24*60*60
     lease_info = LeaseInfo(owner_num,
                            renew_secret, cancel_secret,
                            new_expire_time, self.my_nodeid)
     for sf in self._iter_share_files(storage_index):
         sf.add_or_renew_lease(lease_info)
     self.add_latency("add-lease", time.time() - start)
     return None
Beispiel #5
0
 def add_lease(self,
               storage_index,
               renew_secret,
               cancel_secret,
               owner_num=1):
     start = self._clock.seconds()
     self.count("add-lease")
     new_expire_time = self._clock.seconds() + DEFAULT_RENEWAL_TIME
     lease_info = LeaseInfo(owner_num, renew_secret, cancel_secret,
                            new_expire_time, self.my_nodeid)
     self._add_or_renew_leases(
         self._iter_share_files(storage_index),
         lease_info,
     )
     self.add_latency("add-lease", self._clock.seconds() - start)
     return None
Beispiel #6
0
 def _read_lease_record(self, f, lease_number):
     # returns a LeaseInfo instance, or None
     extra_lease_offset = self._read_extra_lease_offset(f)
     num_extra_leases = self._read_num_extra_leases(f)
     if lease_number < 4:
         offset = self.HEADER_SIZE + lease_number * self.LEASE_SIZE
     elif (lease_number - 4) < num_extra_leases:
         offset = (extra_lease_offset + 4 +
                   (lease_number - 4) * self.LEASE_SIZE)
     else:
         raise IndexError("No such lease number %d" % lease_number)
     f.seek(offset)
     assert f.tell() == offset
     data = f.read(self.LEASE_SIZE)
     lease_info = LeaseInfo().from_mutable_data(data)
     if lease_info.owner_num == 0:
         return None
     return lease_info
Beispiel #7
0
    def cancel_lease(self, cancel_secret):
        """Remove any leases with the given cancel_secret. If the last lease
        is cancelled, the file will be removed. Return the number of bytes
        that were freed (by truncating the list of leases, and possibly by
        deleting the file. Raise IndexError if there was no lease with the
        given cancel_secret."""

        accepting_nodeids = set()
        modified = 0
        remaining = 0
        blank_lease = LeaseInfo(owner_num=0,
                                renew_secret="\x00" * 32,
                                cancel_secret="\x00" * 32,
                                expiration_time=0,
                                nodeid="\x00" * 20)
        with open(self.home, 'rb+') as f:
            for (leasenum, lease) in self._enumerate_leases(f):
                accepting_nodeids.add(lease.nodeid)
                if timing_safe_compare(lease.cancel_secret, cancel_secret):
                    self._write_lease_record(f, leasenum, blank_lease)
                    modified += 1
                else:
                    remaining += 1
            if modified:
                freed_space = self._pack_leases(f)
                f.close()
                if not remaining:
                    freed_space += os.stat(self.home)[stat.ST_SIZE]
                    self.unlink()
                return freed_space

        msg = ("Unable to cancel non-existent lease. I have leases "
               "accepted by nodeids: ")
        msg += ",".join([("'%s'" % idlib.nodeid_b2a(anid))
                         for anid in accepting_nodeids])
        msg += " ."
        raise IndexError(msg)
Beispiel #8
0
    def remote_slot_testv_and_readv_and_writev(self, storage_index, secrets,
                                               test_and_write_vectors,
                                               read_vector):
        start = time.time()
        self.count("writev")
        si_s = si_b2a(storage_index)
        log.msg("storage: slot_writev %s" % si_s)
        si_dir = storage_index_to_dir(storage_index)
        (write_enabler, renew_secret, cancel_secret) = secrets
        # shares exist if there is a file for them
        bucketdir = os.path.join(self.sharedir, si_dir)
        shares = {}
        if os.path.isdir(bucketdir):
            for sharenum_s in os.listdir(bucketdir):
                try:
                    sharenum = int(sharenum_s)
                except ValueError:
                    continue
                filename = os.path.join(bucketdir, sharenum_s)
                msf = MutableShareFile(filename, self)
                msf.check_write_enabler(write_enabler, si_s)
                shares[sharenum] = msf
        # write_enabler is good for all existing shares.

        # Now evaluate test vectors.
        testv_is_good = True
        for sharenum in test_and_write_vectors:
            (testv, datav, new_length) = test_and_write_vectors[sharenum]
            if sharenum in shares:
                if not shares[sharenum].check_testv(testv):
                    self.log("testv failed: [%d]: %r" % (sharenum, testv))
                    testv_is_good = False
                    break
            else:
                # compare the vectors against an empty share, in which all
                # reads return empty strings.
                if not EmptyShare().check_testv(testv):
                    self.log("testv failed (empty): [%d] %r" %
                             (sharenum, testv))
                    testv_is_good = False
                    break

        # now gather the read vectors, before we do any writes
        read_data = {}
        for sharenum, share in shares.items():
            read_data[sharenum] = share.readv(read_vector)

        ownerid = 1  # TODO
        expire_time = time.time() + 31 * 24 * 60 * 60  # one month
        lease_info = LeaseInfo(ownerid, renew_secret, cancel_secret,
                               expire_time, self.my_nodeid)

        if testv_is_good:
            # now apply the write vectors
            for sharenum in test_and_write_vectors:
                (testv, datav, new_length) = test_and_write_vectors[sharenum]
                if new_length == 0:
                    if sharenum in shares:
                        shares[sharenum].unlink()
                else:
                    if sharenum not in shares:
                        # allocate a new share
                        allocated_size = 2000  # arbitrary, really
                        share = self._allocate_slot_share(bucketdir,
                                                          secrets,
                                                          sharenum,
                                                          allocated_size,
                                                          owner_num=0)
                        shares[sharenum] = share
                    shares[sharenum].writev(datav, new_length)
                    # and update the lease
                    shares[sharenum].add_or_renew_lease(lease_info)

            if new_length == 0:
                # delete empty bucket directories
                if not os.listdir(bucketdir):
                    os.rmdir(bucketdir)

        # all done
        self.add_latency("writev", time.time() - start)
        return (testv_is_good, read_data)
Beispiel #9
0
    def remote_allocate_buckets(self,
                                storage_index,
                                renew_secret,
                                cancel_secret,
                                sharenums,
                                allocated_size,
                                canary,
                                owner_num=0):
        # owner_num is not for clients to set, but rather it should be
        # curried into the PersonalStorageServer instance that is dedicated
        # to a particular owner.
        start = time.time()
        self.count("allocate")
        alreadygot = set()
        bucketwriters = {}  # k: shnum, v: BucketWriter
        si_dir = storage_index_to_dir(storage_index)
        si_s = si_b2a(storage_index)

        log.msg("storage: allocate_buckets %s" % si_s)

        # in this implementation, the lease information (including secrets)
        # goes into the share files themselves. It could also be put into a
        # separate database. Note that the lease should not be added until
        # the BucketWriter has been closed.
        expire_time = time.time() + 31 * 24 * 60 * 60
        lease_info = LeaseInfo(owner_num, renew_secret, cancel_secret,
                               expire_time, self.my_nodeid)

        max_space_per_bucket = allocated_size

        remaining_space = self.get_available_space()
        limited = remaining_space is not None
        if limited:
            # this is a bit conservative, since some of this allocated_size()
            # has already been written to disk, where it will show up in
            # get_available_space.
            remaining_space -= self.allocated_size()
        # self.readonly_storage causes remaining_space <= 0

        # fill alreadygot with all shares that we have, not just the ones
        # they asked about: this will save them a lot of work. Add or update
        # leases for all of them: if they want us to hold shares for this
        # file, they'll want us to hold leases for this file.
        for (shnum, fn) in self._get_bucket_shares(storage_index):
            alreadygot.add(shnum)
            sf = ShareFile(fn)
            sf.add_or_renew_lease(lease_info)

        for shnum in sharenums:
            incominghome = os.path.join(self.incomingdir, si_dir, "%d" % shnum)
            finalhome = os.path.join(self.sharedir, si_dir, "%d" % shnum)
            if os.path.exists(finalhome):
                # great! we already have it. easy.
                pass
            elif os.path.exists(incominghome):
                # Note that we don't create BucketWriters for shnums that
                # have a partial share (in incoming/), so if a second upload
                # occurs while the first is still in progress, the second
                # uploader will use different storage servers.
                pass
            elif (not limited) or (remaining_space >= max_space_per_bucket):
                # ok! we need to create the new share file.
                bw = BucketWriter(self, incominghome, finalhome,
                                  max_space_per_bucket, lease_info, canary)
                if self.no_storage:
                    bw.throw_out_all_data = True
                bucketwriters[shnum] = bw
                self._active_writers[bw] = 1
                if limited:
                    remaining_space -= max_space_per_bucket
            else:
                # bummer! not enough space to accept this bucket
                pass

        if bucketwriters:
            fileutil.make_dirs(os.path.join(self.sharedir, si_dir))

        self.add_latency("allocate", time.time() - start)
        return alreadygot, bucketwriters
Beispiel #10
0
    def allocate_buckets(self,
                         storage_index,
                         renew_secret,
                         cancel_secret,
                         sharenums,
                         allocated_size,
                         owner_num=0,
                         renew_leases=True):
        """
        Generic bucket allocation API.

        :param bool renew_leases: If and only if this is ``True`` then renew a
            secret-matching lease on (or, if none match, add a new lease to)
            existing shares in this bucket.  Any *new* shares are given a new
            lease regardless.
        """
        # owner_num is not for clients to set, but rather it should be
        # curried into the PersonalStorageServer instance that is dedicated
        # to a particular owner.
        start = self._clock.seconds()
        self.count("allocate")
        alreadygot = {}
        bucketwriters = {}  # k: shnum, v: BucketWriter
        si_dir = storage_index_to_dir(storage_index)
        si_s = si_b2a(storage_index)

        log.msg("storage: allocate_buckets %r" % si_s)

        # in this implementation, the lease information (including secrets)
        # goes into the share files themselves. It could also be put into a
        # separate database. Note that the lease should not be added until
        # the BucketWriter has been closed.
        expire_time = self._clock.seconds() + DEFAULT_RENEWAL_TIME
        lease_info = LeaseInfo(owner_num, renew_secret, cancel_secret,
                               expire_time, self.my_nodeid)

        max_space_per_bucket = allocated_size

        remaining_space = self.get_available_space()
        limited = remaining_space is not None
        if limited:
            # this is a bit conservative, since some of this allocated_size()
            # has already been written to disk, where it will show up in
            # get_available_space.
            remaining_space -= self.allocated_size()
        # self.readonly_storage causes remaining_space <= 0

        # fill alreadygot with all shares that we have, not just the ones
        # they asked about: this will save them a lot of work. Add or update
        # leases for all of them: if they want us to hold shares for this
        # file, they'll want us to hold leases for this file.
        for (shnum, fn) in self.get_shares(storage_index):
            alreadygot[shnum] = ShareFile(fn)
        if renew_leases:
            self._add_or_renew_leases(alreadygot.values(), lease_info)

        for shnum in sharenums:
            incominghome = os.path.join(self.incomingdir, si_dir, "%d" % shnum)
            finalhome = os.path.join(self.sharedir, si_dir, "%d" % shnum)
            if os.path.exists(finalhome):
                # great! we already have it. easy.
                pass
            elif os.path.exists(incominghome):
                # For Foolscap we don't create BucketWriters for shnums that
                # have a partial share (in incoming/), so if a second upload
                # occurs while the first is still in progress, the second
                # uploader will use different storage servers.
                pass
            elif (not limited) or (remaining_space >= max_space_per_bucket):
                # ok! we need to create the new share file.
                bw = BucketWriter(self,
                                  incominghome,
                                  finalhome,
                                  max_space_per_bucket,
                                  lease_info,
                                  clock=self._clock)
                if self.no_storage:
                    # Really this should be done by having a separate class for
                    # this situation; see
                    # https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3862
                    bw.throw_out_all_data = True
                bucketwriters[shnum] = bw
                self._bucket_writers[incominghome] = bw
                if limited:
                    remaining_space -= max_space_per_bucket
            else:
                # bummer! not enough space to accept this bucket
                pass

        if bucketwriters:
            fileutil.make_dirs(os.path.join(self.sharedir, si_dir))

        self.add_latency("allocate", self._clock.seconds() - start)
        return set(alreadygot), bucketwriters