Beispiel #1
0
 def __on_next_iteration_of_file_state_bunch(self):
     assert in_main_thread()
     d = threads.deferToThread(
             lambda: exceptions_logged(logger)(
                         self.__try_save_next_bunch_of_file_states)())
     d.addBoth(lambda ignore:
                   exceptions_logged(logger)(
                       self.__do_next_iteration_of_file_state_bunch)())
Beispiel #2
0
    def __init__(self, *args, **kwargs):
        """Constructor."""
        super(UHostApp, self).__init__(*args, **kwargs)

        # The manager that just tracks the FS
        __internal_fsnotify_manager = FSNotifyManager()
        # The manager that supports the symlinks on toplevel.
        self.__fsnotify_manager = \
            SyncDirFSNotifyProxy(fsnotify_manager=__internal_fsnotify_manager)

        self.__syncer_starter = None
        self.auto_start_sync = True

        on_non_empty_cooling_down_to_store = \
            lambda: logger_status_cooling_down_to_store.info(
                        'Some files cooled down to store recently',
                         extra={'status': True})
        on_empty_cooling_down_to_store = \
            lambda: logger_status_cooling_down_to_store.info(
                        'No more files cooling down to store',
                        extra={'status': False})
        self.__cooling_down_to_store = \
            EventfulDict(_on_non_empty_cb=on_non_empty_cooling_down_to_store,
                         _on_empty_cb=on_empty_cooling_down_to_store)
        self.__cooling_down_to_store_lock = RLock()

        self.__file_states_ready_to_write = {}
        self.__file_states_ready_to_write_lock = Lock()

        self.__ignore_file_paths = dict()
        self.__ignore_file_paths_lock = RLock()

        self.ux = ux.UX()
        ux_handlers = [
            ux.RestoreFSM(self.ux.handle_event),
            ux.BackupFSM(self.ux.handle_event),
            ux.SyncFSM(self.ux.handle_event),
            ux.IdleFSM(self.ux.handle_event),
            ux.NetworkConnectionFSM(self.ux.handle_event),
            ux.OccupationHandler(self.ux.handle_event),
            ux.UXEventForwarder()
        ]
        for ux_handler in ux_handlers:
            self.ux.add_handler(ux_handler)

        self.__network_connection_is_working = True

        logger.debug('Will attempt to create a dataset once every %s',
                     CREATE_DATASET_PERIOD)
        logger.debug('Will publish connection state every %s',
                     PUBLISH_HOST_STATE_PERIOD)
        self.__all_timer_services = [
            # We should not create a dataset until the previous attempt
            # of dataset creation has been completed.
            # Thus DelayServiceForCallback rather than TimerService.
            DelayServiceForCallback(period=CREATE_DATASET_PERIOD,
                                    callback=self.__on_create_dataset_timer),
            TimerService(PUBLISH_HOST_STATE_PERIOD.total_seconds(),
                         exceptions_logged(logger)(self.__publish_host_state))
        ]
Beispiel #3
0
    def __init__(self, *args, **kwargs):
        """Constructor."""
        super(UHostApp, self).__init__(*args, **kwargs)

        # The manager that just tracks the FS
        __internal_fsnotify_manager = FSNotifyManager()
        # The manager that supports the symlinks on toplevel.
        self.__fsnotify_manager = \
            SyncDirFSNotifyProxy(fsnotify_manager=__internal_fsnotify_manager)

        self.__syncer_starter = None
        self.auto_start_sync = True

        on_non_empty_cooling_down_to_store = \
            lambda: logger_status_cooling_down_to_store.info(
                        'Some files cooled down to store recently',
                         extra={'status': True})
        on_empty_cooling_down_to_store = \
            lambda: logger_status_cooling_down_to_store.info(
                        'No more files cooling down to store',
                        extra={'status': False})
        self.__cooling_down_to_store = \
            EventfulDict(_on_non_empty_cb=on_non_empty_cooling_down_to_store,
                         _on_empty_cb=on_empty_cooling_down_to_store)
        self.__cooling_down_to_store_lock = RLock()

        self.__file_states_ready_to_write = {}
        self.__file_states_ready_to_write_lock = Lock()

        self.__ignore_file_paths = dict()
        self.__ignore_file_paths_lock = RLock()

        self.ux = ux.UX()
        ux_handlers = [ux.RestoreFSM(self.ux.handle_event),
                       ux.BackupFSM(self.ux.handle_event),
                       ux.SyncFSM(self.ux.handle_event),
                       ux.IdleFSM(self.ux.handle_event),
                       ux.NetworkConnectionFSM(self.ux.handle_event),
                       ux.OccupationHandler(self.ux.handle_event),
                       ux.UXEventForwarder()]
        for ux_handler in ux_handlers:
            self.ux.add_handler(ux_handler)

        self.__network_connection_is_working = True

        logger.debug('Will attempt to create a dataset once every %s',
                     CREATE_DATASET_PERIOD)
        logger.debug('Will publish connection state every %s',
                     PUBLISH_HOST_STATE_PERIOD)
        self.__all_timer_services = [
            # We should not create a dataset until the previous attempt
            # of dataset creation has been completed.
            # Thus DelayServiceForCallback rather than TimerService.
            DelayServiceForCallback(period=CREATE_DATASET_PERIOD,
                                    callback=self.__on_create_dataset_timer),
            TimerService(PUBLISH_HOST_STATE_PERIOD.total_seconds(),
                         exceptions_logged(logger)(self.__publish_host_state))
        ]
    def __init__(self, server_process):
        assert isinstance(server_process, ServerProcess), \
               repr(server_process)
        self.__server_process = server_process

        self.__restore_timer = \
            TimerService(WEB_RESTORE_PERIOD.total_seconds(),
                         exceptions_logged(logger)(callInThread),
                         self.__poll_restore_requests_in_thread)
    def __init__(self, server_process):
        assert isinstance(server_process, ServerProcess), \
               repr(server_process)
        self.__server_process = server_process

        self.__restore_timer = \
            TimerService(WEB_RESTORE_PERIOD.total_seconds(),
                         exceptions_logged(logger)(callInThread),
                         self.__poll_restore_requests_in_thread)
Beispiel #6
0
    def _no_more_chunks_but_wait_for_progresses(self):
        """
        We are almost done with the transaction,
        but still probably need to wait for PROGRESS transaction
        before we can safely report backup as completed.

        @precondition: self.__progress_notif_deferredlist is None
        @postcondition: self.__progress_notif_deferredlist is not None
        """
        self.__progress_notif_deferredlist = \
            DeferredList(self.__progress_notif_deferreds)
        self.__progress_notif_deferredlist.addBoth(
            exceptions_logged(logger)(
                lambda ignore: self._no_more_chunks()))
        logger.debug('We have no more chunks to upload, '
                         'but waiting for the progresses: %r',
                     self.__progress_notif_deferreds)
Beispiel #7
0
 def __on_next_iteration_of_file_state_bunch(self):
     assert in_main_thread()
     d = threads.deferToThread(lambda: exceptions_logged(logger)(
         self.__try_save_next_bunch_of_file_states)())
     d.addBoth(lambda ignore: exceptions_logged(logger)
               (self.__do_next_iteration_of_file_state_bunch)())
Beispiel #8
0
    def __try_upload_next_chunks(self):
        """
        We received the information which hosts we should use;
        now find the next host and try to upload the next chunk
        (or maybe multiple chunks) to it.

        @returns: Whether any chunk was in fact uploaded.
        @rtype: bool

        @todo: If the connection fails to the url, we must delete it
               from the list.
        """
        assert not in_main_thread()

        _dataset = self.dataset

        logger.debug('Trying to upload next chunk(s)...')

        # If we could not proceed with something on this iteration,
        # but there is still other information which may be used,
        # we just retry it. We could've just call the same function again
        # and again, but risk failing due to the limited stack.
        while True:
            if not self.target_hosts:
                if self.__chunks_by_size_code:
                    # We still have some chunks not uploaded:
                    # the backup transaction failed!
                    self.ack_result_code = BackupMessage.ResultCodes \
                                                        .GENERAL_FAILURE
                    logger.debug('Error: no target hosts, '
                                     'but still some chunks: %r',
                                 self.__chunks_by_size_code)

                return False

            else:
                logger.debug('Backup targets %r', self.target_hosts)
                # Select any next host, pseudo-randomly (but reproducibly)
                target_host = \
                    self.__random.choice(sorted(self.target_hosts.keys()))
                chunk_count_by_size = self.target_hosts[target_host]

                if not chunk_count_by_size:
                    logger.debug('Good! No more chunks allowed for %r!',
                                 target_host)
                    del self.target_hosts[target_host]
                    # Try again, probably with the other host.
                    logger.debug('Remaining %r', self.target_hosts)
                    continue

                assert 0 not in chunk_count_by_size.itervalues(), \
                       repr(chunk_count_by_size)

                # Shall we send a single chunk or all chunks altogether?
                # Analyze chunk(s) to send and put it/them
                # into the try_next_chunks variable.

                logger.debug('We need to upload such chunks to %r: %r',
                             target_host, chunk_count_by_size)

                try_next_chunks = self.__take_next_chunks_to_upload(
                                      chunk_count_by_size)

                # We've found the list of one (or maybe more) chunks to send,
                # let's encrypt and post them.
                try_next_chunks_encrypted = \
                    map(partial(EncryptedChunkFromFiles.from_non_encrypted,
                                self.__cryptographer),
                        try_next_chunks)

                # We collected the chunks to upload!
                # Start the nested CHUNKS transaction then.

                logger.debug('Sending %d chunk(s) in a single batch',
                             len(try_next_chunks_encrypted))

                _message = self.message

                assert target_host.uuid in self.manager.app.known_peers, \
                       u'Host {!r} is not in {!r}' \
                           .format(target_host,
                                   self.manager.app.known_peers.keys())

                # repr(try_next_chunks) might be pretty long,
                # so let's '.verbose()' it.
                logger.verbose('b. Will upload chunks %r to %r',
                               try_next_chunks, target_host.urls)

                c_tr = self.manager.create_new_transaction(
                           name='CHUNKS',
                           src=_message.dst,
                           dst=target_host,
                           parent=self,
                           # CHUNKS-specific
                           chunks=try_next_chunks_encrypted)

                # Do NOT change the following lines to "addCallbacks":
                # "__on_chunks_failure()" must be called even if the internals
                # of "__on_chunks_success" have failed!
                c_tr.completed.addCallback(
                    self.__on_chunks_success,
                    _message.dst, target_host, try_next_chunks)
                c_tr.completed.addErrback(
                    self.__on_chunks_failure,
                    target_host, try_next_chunks)

                c_tr.completed.addBoth(
                    exceptions_logged(logger)(
                        lambda ignore: self.__upload_more_chunks()))
                # Try again for the next chunk
                # only when the started transaction succeeds.
                return True