def write_share_data(self, offset, data): length = len(data) precondition(offset >= 0, offset) if self._max_size is not None and offset + length > self._max_size: raise DataTooLargeError(self._max_size, offset, length) with open(self.home, 'rb+') as f: real_offset = self._data_offset + offset f.seek(real_offset) assert f.tell() == real_offset f.write(data)
def _change_container_size(self, f, new_container_size): if new_container_size > self.MAX_SIZE: raise DataTooLargeError() old_extra_lease_offset = self._read_extra_lease_offset(f) new_extra_lease_offset = self.DATA_OFFSET + new_container_size if new_extra_lease_offset < old_extra_lease_offset: # TODO: allow containers to shrink. For now they remain large. return num_extra_leases = self._read_num_extra_leases(f) f.seek(old_extra_lease_offset) extra_lease_data = f.read(4 + num_extra_leases * self.LEASE_SIZE) f.seek(new_extra_lease_offset) f.write(extra_lease_data) # an interrupt here will corrupt the leases, iff the move caused the # extra leases to overlap. self._write_extra_lease_offset(f, new_extra_lease_offset)
def _change_container_size(self, f, new_container_size): if new_container_size > self.MAX_SIZE: raise DataTooLargeError() old_extra_lease_offset = self._read_extra_lease_offset(f) new_extra_lease_offset = self.DATA_OFFSET + new_container_size if new_extra_lease_offset < old_extra_lease_offset: # TODO: allow containers to shrink. For now they remain large. return num_extra_leases = self._read_num_extra_leases(f) f.seek(old_extra_lease_offset) leases_size = 4 + num_extra_leases * self.LEASE_SIZE extra_lease_data = f.read(leases_size) # Zero out the old lease info (in order to minimize the chance that # it could accidentally be exposed to a reader later, re #1528). f.seek(old_extra_lease_offset) f.write('\x00' * leases_size) f.flush() # An interrupt here will corrupt the leases. f.seek(new_extra_lease_offset) f.write(extra_lease_data) self._write_extra_lease_offset(f, new_extra_lease_offset)