def delete_some_dummy_chunks(self, how_many, dummy_chunk_uuids=None): """ Delete some dummy chunks (from FS and from DB). Yields the UUIDs of dummy chunks after removing of each one. @param how_many: How many dummy chunks to delete. @type how_many: numbers.Integral @param dummy_chunk_uuids: (Optional) set of the dummy chunk UUIDs (if we know it already). If None, calculated automatically. @type dummy_chunk_uuids: (set, NoneType) """ # For clarity only super(ChunkStorageFS, self).delete_some_dummy_chunks(how_many, dummy_chunk_uuids) with self.__chunk_op_lock: _dummy_chunk_uuids_iter = \ self.__get_dummy_chunk_uuids_on_fs() \ if dummy_chunk_uuids is None \ else dummy_chunk_uuids # list needed so it can be used three times removing_dummy_chunks = list(take(how_many, _dummy_chunk_uuids_iter)) # not "how_many", but len(removing_dummy_chunks), cause # we may probably remove less than required. logger.debug('Removing %i dummy chunks', len(removing_dummy_chunks)) for dummy_chunk_uuid in removing_dummy_chunks: try: _path = self.__get_chunk_file_path(dummy_chunk_uuid, is_dummy=True) os.unlink(_path) except Exception as e: logger.exception('Problem during removing chunk %s: %r', dummy_chunk_uuid, e) yield dummy_chunk_uuid # even if failed logger.debug('Deleted dummy chunk %r', dummy_chunk_uuid) HostQueries.HostChunks.delete_dummy_chunks(removing_dummy_chunks)
def update_dummy_chunks_size(self, old_limit_mib=None, new_limit_mib=None): """ Whenever the situation with the chunks could have been changed, update the dummy chunks: create new ones if there is a lack of them, or remove unneeded ones if there is an excess of them. @param old_limit_mib: The previous total amount of chunks to keep in storage, assumed 0 if None. @type old_limit_mib: numbers.Integral, NoneType @param new_limit_mib: The new total amount of chunks to keep in storage, taken from the settings if None. @type new_limit_mib: numbers.Integral, NoneType """ # For clarity only super(ChunkStorageFS, self).update_dummy_chunks_size(old_limit_mib, new_limit_mib) # TODO! TODO! TODO! we don't bother with dummy chunks for now logger.warn('update_dummy_chunks_size() disabled') return if new_limit_mib is None: assert old_limit_mib is None new_limit_mib = \ HostQueries.HostSettings.get(Queries.Settings .MAX_STORAGE_SIZE_MIB) # This two variables will be used to specify the progress of the task. num, of = 0, 0 _operation = '<generic operation>' @exceptions_logged(logger) def timercb(): """ Callback function called on timer firing. """ if (num, of) != (0, 0): logger_status_chunks_op.info( 'The chunk reallocation takes too long, completed %i/%i', num, of, extra={'_type': 'chunks_allocation.progress', 'num': num, 'of': of}) timer_service = TimerService(1.0, timercb) # If the task takes more than 3 seconds, # start notifying about the progress _callLater = reactor.callLater # pylint:disable=E1101,C0103 # Won't worry about deferToThread here, cause it is very fast. long_task_timer = _callLater(3.0, timer_service.startService) logger.debug('Resizing dummy chunk set from %s to %s', old_limit_mib, new_limit_mib) with self.__chunk_op_lock: try: # Check for dummy chunks before the check for present files, # as the check for dummy chunks also may remove some # of the files. # What dummy chunks are available? # list, so it can be used twice. # TODO: do we need to use it twice? dummy_chunk_uuids = list(self.__get_dummy_chunk_uuids_on_fs()) how_many_dummy_chunks = len(dummy_chunk_uuids) # What chunk files are present on the FS,... present_chunk_filenames_iter = \ self.__get_chunk_filenames_on_fs(self.__chunk_dir) # ... and what are the chunk UUIDs? # present_chunk_uuids = \ # self.__class__.convert_chunk_filenames_to_uuids( # present_chunk_filenames) # How many bytes/MiB do we need to have preallocated? reserved_mib = long(new_limit_mib) reserved_size = reserved_mib * 0x100000 # How many bytes/MiB is preallocated already? present_chunk_size = \ sum(os.stat(f).st_size for f in present_chunk_filenames_iter) del present_chunk_filenames_iter # help GC present_chunks_in_mib = \ round_up_to_multiply(present_chunk_size, 0x100000) // 0x100000 if reserved_mib > present_chunks_in_mib: # Add new chunks how_many_new_chunks = reserved_mib - present_chunks_in_mib of = how_many_new_chunks _operation = 'allocation' for u in self.__create_some_dummy_chunks( how_many_new_chunks): num += 1 elif reserved_mib < present_chunks_in_mib: # Try to remove some dummy chunks... how_many_chunks_try_to_remove = \ present_chunks_in_mib - reserved_mib # But we cannot remove more than len(dummy_chunk_uuids)! if how_many_dummy_chunks < how_many_chunks_try_to_remove: logger.debug('Trying to remove %i chunks, ' 'but only %i dummy chunks available!', how_many_chunks_try_to_remove, how_many_dummy_chunks) how_many_chunks_to_remove = \ min(how_many_chunks_try_to_remove, how_many_dummy_chunks) of = how_many_chunks_to_remove _operation = 'removing' chunk_uuids_to_delete = take(how_many_chunks_to_remove, dummy_chunk_uuids) for u in self.delete_some_dummy_chunks( how_many_chunks_to_remove, chunk_uuids_to_delete): num += 1 except Exception as e: logger_status_chunks_op_error.error( 'The chunk %s failed: %r', _operation, e, extra={'_type': 'chunks_allocation.error', '_exc': e, '_tb': traceback.format_exc()}) finally: # We've done with the chunks allocation. # Now stop the timer, and manually report that 100% # of the work is done. if (not long_task_timer.called and not long_task_timer.cancelled): long_task_timer.cancel() if timer_service.running: timer_service.stopService() timercb()