예제 #1
0
파일: chunks.py 프로젝트: shvar/redfs
    def _timeout_for_bytesize(cls, bytes):
        r"""Calculate the expected timeout depending on the message length.

        >>> f = ChunksMessage._timeout_for_bytesize
        >>> f(0)  # 0 bytes - 1 minute
        datetime.timedelta(0, 60)
        >>> f(1024)  # 1 kb - ~1 minute
        datetime.timedelta(0, 60, 62500)
        >>> f(1024 * 1024)  # 1 Mb ~ 2 minutes 4 seconds
        datetime.timedelta(0, 124)
        >>> f(1024 * 1024 * 63)  # 63 Mb ~ 1 hour 8 minutes 12 seconds
        datetime.timedelta(0, 4092)

        @param bytes: the length (in bytes).
        @type bytes: numbers.Integral

        @rtype: timedelta
        """
        total_size_kb = round_up_to_multiply(bytes, 1024) // 1024
        # On 128 kbit/s, the upload speed will be...
        # let's think it will be 16 kbytes/s.
        return timedelta(minutes=1) \
               + timedelta(seconds=1) * total_size_kb // 16
예제 #2
0
    def _timeout_for_bytesize(cls, bytes):
        r"""Calculate the expected timeout depending on the message length.

        >>> f = ChunksMessage._timeout_for_bytesize
        >>> f(0)  # 0 bytes - 1 minute
        datetime.timedelta(0, 60)
        >>> f(1024)  # 1 kb - ~1 minute
        datetime.timedelta(0, 60, 62500)
        >>> f(1024 * 1024)  # 1 Mb ~ 2 minutes 4 seconds
        datetime.timedelta(0, 124)
        >>> f(1024 * 1024 * 63)  # 63 Mb ~ 1 hour 8 minutes 12 seconds
        datetime.timedelta(0, 4092)

        @param bytes: the length (in bytes).
        @type bytes: numbers.Integral

        @rtype: timedelta
        """
        total_size_kb = round_up_to_multiply(bytes, 1024) // 1024
        # On 128 kbit/s, the upload speed will be...
        # let's think it will be 16 kbytes/s.
        return timedelta(minutes=1) \
               + timedelta(seconds=1) * total_size_kb // 16
예제 #3
0
파일: chunks.py 프로젝트: shvar/redfs
    def _incoming_chunks_body_received(self, data):
        """
        @todo: What if the "since" field is too old?
        """
        assert self.is_incoming(), repr(self)
        sender = self.message.src

        chunks = self.message.chunks
        _chunk_lengths_iter = (ch.phys_size() for ch in chunks)

        logger.verbose('Received %i chunk(s) from %r: %r',
                       len(chunks) if chunks else 0, sender, chunks)

        # Check if the chunks were expected...
        exp_chunk_tuples = [
            HostQueries.HostChunks.get_expected_chunk(_chunk.uuid, sender.uuid)
            for _chunk in chunks
        ]

        byte_size_to_wipe = 0
        for _chunk_len, exp_chunk_tuple in izip(_chunk_lengths_iter,
                                                exp_chunk_tuples):
            doing_replication_not_restore = exp_chunk_tuple is None or \
                                            not exp_chunk_tuple.restore_flag

            # But if it was the dummy replication chunk,
            # we need to free some space by killing some dummy chunks.
            if doing_replication_not_restore:
                byte_size_to_wipe += _chunk_len

        # Now really cleanup some dummy chunks
        if byte_size_to_wipe != 0:
            how_many_chunks_to_wipe = \
                round_up_to_multiply(byte_size_to_wipe, 0x100000) // 0x100000
            logger.debug('Before writing chunks body: wiping %i chunks',
                         how_many_chunks_to_wipe)
            self.manager.app.chunk_storage.delete_some_dummy_chunks(
                how_many_chunks_to_wipe)

        # Let's consider it successful for the phase of chunks writing,
        # unless even a single chunk fails to write.
        overall_success = True

        # And finally, loop over the chunks and write their bodies.
        for _chunk, exp_chunk_tuple in izip(chunks, exp_chunk_tuples):
            _chunk_uuid = _chunk.uuid

            _expected_hash = _chunk.hash
            _actual_hash = _chunk.hash_from_body
            if _expected_hash != _actual_hash:
                logger.error(
                    'Chunk %s has hash %s rather than %s; '
                    'writing this for now, but someday '
                    'might need to fix it to prevent '
                    'the cloud/deduplication clogging '
                    'with the unreadable files.', _chunk_uuid,
                    binascii.hexlify(_actual_hash),
                    binascii.hexlify(_expected_hash))

            logger.debug('Writing chunk %r', _chunk)
            try:
                self.manager.app.chunk_storage.write_chunk(_chunk,
                                                           is_dummy=False)
            except Exception:
                logger.exception(
                    'Could not store the incoming chunk %r; '
                    'will be considered as failed', _chunk)
                overall_success = False
            else:  # written successfully
                if exp_chunk_tuple is not None:
                    # This is an expected chunk, either replication
                    # (restore_flag = False)
                    # or restore-related (restore_flag = True) one.
                    logger.debug(
                        'Received unexpected chunk %r from %r, '
                        'meeting expectation (%s)', _chunk_uuid, sender.uuid,
                        exp_chunk_tuple.restore_flag)
                    HostQueries.HostChunks.meet_expectations(
                        _chunk_uuid, sender.uuid, exp_chunk_tuple.restore_flag)
                else:
                    # This is an unexpected chunk,
                    # assume it is a replication
                    logger.debug(
                        'Received unexpected chunk %r from %r, '
                        'assume replication', _chunk_uuid, sender)

        # If something failed before this moment, the default value of
        # C{incoming_success} of False will consider the transaction failed.
        with self.open_state(for_update=True) as state:
            state.incoming_success = overall_success
            logger.debug('Overall success of %r is %r', self, overall_success)

        # At the very end, regenerate back the dummy chunks, if needed.
        logger.debug('After writing chunk body: recalculating dummy chunks')
        self.manager.app.chunk_storage.update_dummy_chunks_size()
예제 #4
0
파일: chunks.py 프로젝트: shvar/redfs
    def _incoming_chunks_body_received(self, data):
        """
        @todo: What if the "since" field is too old?
        """
        assert self.is_incoming(), repr(self)
        sender = self.message.src

        chunks = self.message.chunks
        _chunk_lengths_iter = (ch.phys_size() for ch in chunks)

        logger.verbose('Received %i chunk(s) from %r: %r',
                       len(chunks) if chunks else 0,
                       sender,
                       chunks)

        # Check if the chunks were expected...
        exp_chunk_tuples = [HostQueries.HostChunks
                                       .get_expected_chunk(_chunk.uuid,
                                                           sender.uuid)
                                for _chunk in chunks]

        byte_size_to_wipe = 0
        for _chunk_len, exp_chunk_tuple in izip(_chunk_lengths_iter,
                                                exp_chunk_tuples):
            doing_replication_not_restore = exp_chunk_tuple is None or \
                                            not exp_chunk_tuple.restore_flag

            # But if it was the dummy replication chunk,
            # we need to free some space by killing some dummy chunks.
            if doing_replication_not_restore:
                byte_size_to_wipe += _chunk_len

        # Now really cleanup some dummy chunks
        if byte_size_to_wipe != 0:
            how_many_chunks_to_wipe = \
                round_up_to_multiply(byte_size_to_wipe, 0x100000) // 0x100000
            logger.debug('Before writing chunks body: wiping %i chunks',
                         how_many_chunks_to_wipe)
            self.manager.app.chunk_storage.delete_some_dummy_chunks(
                how_many_chunks_to_wipe)

        # Let's consider it successful for the phase of chunks writing,
        # unless even a single chunk fails to write.
        overall_success = True

        # And finally, loop over the chunks and write their bodies.
        for _chunk, exp_chunk_tuple in izip(chunks, exp_chunk_tuples):
            _chunk_uuid = _chunk.uuid

            _expected_hash = _chunk.hash
            _actual_hash = _chunk.hash_from_body
            if _expected_hash != _actual_hash:
                logger.error('Chunk %s has hash %s rather than %s; '
                                 'writing this for now, but someday '
                                 'might need to fix it to prevent '
                                 'the cloud/deduplication clogging '
                                 'with the unreadable files.',
                             _chunk_uuid,
                             binascii.hexlify(_actual_hash),
                             binascii.hexlify(_expected_hash))

            logger.debug('Writing chunk %r', _chunk)
            try:
                self.manager.app.chunk_storage.write_chunk(_chunk,
                                                           is_dummy=False)
            except Exception:
                logger.exception('Could not store the incoming chunk %r; '
                                     'will be considered as failed',
                                 _chunk)
                overall_success = False
            else:  # written successfully
                if exp_chunk_tuple is not None:
                    # This is an expected chunk, either replication
                    # (restore_flag = False)
                    # or restore-related (restore_flag = True) one.
                    logger.debug('Received unexpected chunk %r from %r, '
                                     'meeting expectation (%s)',
                                 _chunk_uuid,
                                 sender.uuid,
                                 exp_chunk_tuple.restore_flag)
                    HostQueries.HostChunks.meet_expectations(
                        _chunk_uuid, sender.uuid, exp_chunk_tuple.restore_flag)
                else:
                    # This is an unexpected chunk,
                    # assume it is a replication
                    logger.debug('Received unexpected chunk %r from %r, '
                                     'assume replication',
                                 _chunk_uuid, sender)

        # If something failed before this moment, the default value of
        # C{incoming_success} of False will consider the transaction failed.
        with self.open_state(for_update=True) as state:
            state.incoming_success = overall_success
            logger.debug('Overall success of %r is %r', self, overall_success)

        # At the very end, regenerate back the dummy chunks, if needed.
        logger.debug('After writing chunk body: recalculating dummy chunks')
        self.manager.app.chunk_storage.update_dummy_chunks_size()
예제 #5
0
    def update_dummy_chunks_size(self,
                                 old_limit_mib=None, new_limit_mib=None):
        """
        Whenever the situation with the chunks could have been changed,
        update the dummy chunks: create new ones if there is a lack of them,
        or remove unneeded ones if there is an excess of them.

        @param old_limit_mib: The previous total amount of chunks to keep
                              in storage, assumed 0 if None.
        @type old_limit_mib: numbers.Integral, NoneType
        @param new_limit_mib: The new total amount of chunks to keep
                              in storage, taken from the settings if None.
        @type new_limit_mib: numbers.Integral, NoneType
        """
        # For clarity only
        super(ChunkStorageFS, self).update_dummy_chunks_size(old_limit_mib,
                                                             new_limit_mib)

        # TODO! TODO! TODO! we don't bother with dummy chunks for now
        logger.warn('update_dummy_chunks_size() disabled')
        return

        if new_limit_mib is None:
            assert old_limit_mib is None
            new_limit_mib = \
                HostQueries.HostSettings.get(Queries.Settings
                                                    .MAX_STORAGE_SIZE_MIB)

        # This two variables will be used to specify the progress of the task.
        num, of = 0, 0
        _operation = '<generic operation>'


        @exceptions_logged(logger)
        def timercb():
            """
            Callback function called on timer firing.
            """
            if (num, of) != (0, 0):
                logger_status_chunks_op.info(
                    'The chunk reallocation takes too long, completed %i/%i',
                    num, of,
                    extra={'_type': 'chunks_allocation.progress',
                           'num': num,
                           'of': of})


        timer_service = TimerService(1.0, timercb)

        # If the task takes more than 3 seconds,
        # start notifying about the progress
        _callLater = reactor.callLater  # pylint:disable=E1101,C0103
        # Won't worry about deferToThread here, cause it is very fast.
        long_task_timer = _callLater(3.0, timer_service.startService)

        logger.debug('Resizing dummy chunk set from %s to %s',
                     old_limit_mib, new_limit_mib)

        with self.__chunk_op_lock:
            try:
                # Check for dummy chunks before the check for present files,
                # as the check for dummy chunks also may remove some
                # of the files.

                # What dummy chunks are available?
                # list, so it can be used twice.
                # TODO: do we need to use it twice?
                dummy_chunk_uuids = list(self.__get_dummy_chunk_uuids_on_fs())
                how_many_dummy_chunks = len(dummy_chunk_uuids)

                # What chunk files are present on the FS,...
                present_chunk_filenames_iter = \
                    self.__get_chunk_filenames_on_fs(self.__chunk_dir)
                # ... and what are the chunk UUIDs?
                # present_chunk_uuids = \
                #     self.__class__.convert_chunk_filenames_to_uuids(
                #         present_chunk_filenames)

                # How many bytes/MiB do we need to have preallocated?
                reserved_mib = long(new_limit_mib)
                reserved_size = reserved_mib * 0x100000

                # How many bytes/MiB is preallocated already?
                present_chunk_size = \
                    sum(os.stat(f).st_size
                            for f in present_chunk_filenames_iter)
                del present_chunk_filenames_iter  # help GC
                present_chunks_in_mib = \
                    round_up_to_multiply(present_chunk_size,
                                         0x100000) // 0x100000

                if reserved_mib > present_chunks_in_mib:
                    # Add new chunks
                    how_many_new_chunks = reserved_mib - present_chunks_in_mib
                    of = how_many_new_chunks
                    _operation = 'allocation'

                    for u in self.__create_some_dummy_chunks(
                                 how_many_new_chunks):
                        num += 1

                elif reserved_mib < present_chunks_in_mib:
                    # Try to remove some dummy chunks...
                    how_many_chunks_try_to_remove = \
                        present_chunks_in_mib - reserved_mib

                    # But we cannot remove more than len(dummy_chunk_uuids)!
                    if how_many_dummy_chunks < how_many_chunks_try_to_remove:
                        logger.debug('Trying to remove %i chunks, '
                                         'but only %i dummy chunks available!',
                                     how_many_chunks_try_to_remove,
                                     how_many_dummy_chunks)

                    how_many_chunks_to_remove = \
                        min(how_many_chunks_try_to_remove,
                            how_many_dummy_chunks)
                    of = how_many_chunks_to_remove
                    _operation = 'removing'

                    chunk_uuids_to_delete = take(how_many_chunks_to_remove,
                                                 dummy_chunk_uuids)

                    for u in self.delete_some_dummy_chunks(
                                 how_many_chunks_to_remove,
                                 chunk_uuids_to_delete):
                        num += 1

            except Exception as e:
                logger_status_chunks_op_error.error(
                    'The chunk %s failed: %r',
                    _operation, e,
                    extra={'_type': 'chunks_allocation.error',
                           '_exc': e,
                           '_tb': traceback.format_exc()})

            finally:
                # We've done with the chunks allocation.
                # Now stop the timer, and manually report that 100%
                # of the work is done.
                if (not long_task_timer.called and
                    not long_task_timer.cancelled):
                    long_task_timer.cancel()

                if timer_service.running:
                    timer_service.stopService()

                timercb()