Exemple #1
0
    def reload_config_info(self):
        """
        Refresh the configuration values.

        Reload the configuration, get configuration values from self.cfg
        and set them as attributes of self.
        To be called at least once.
        """
        # TODO: merge this method into the constructor, there is no reason to
        # keep it separated anymore.

        self.cfg.load()

        self.client_id = self.cfg.get('User', 'client_id')
        self.username = self.cfg.get('User', 'username')
        self.priv_key = self.cfg.get('Application Paths',
                                     'client_priv_key_file')
        self.host = self.cfg.get('System', 'server_hostname')
        self.port = self.cfg.getint('System', 'server_port')
        self.server_certificate = self.cfg.get('Application Paths',
                                               'server_certificate')
        self.storage_hostname = self.cfg.get('System', 'storage_endpoint')

        self.refused_declare_max = self.cfg.getint('System',
                                                   'refused_declare_max')
        self.refused_declare_waiting_time = self.cfg.getint(
            'System', 'refused_declare_waiting_time')
        self.commit_threshold_seconds = self.cfg.getint(
            'Client', 'commit_threshold_seconds')
        self.commit_threshold_operations = self.cfg.getint(
            'Client', 'commit_threshold_operations')
        self.commit_threshold_bytes = self.cfg.getint(
            'Client', 'commit_threshold_bytes')

        temp = self.cfg.get('Application Paths', 'transaction_cache_db')
        self.transaction_cache = TransactionCache(temp)
        self.integrity_manager = IntegrityManager(None)

        is_firt_startup = self._internal_facade.is_first_startup()

        self.cryptoAdapter = Adapter(self.cfg,
                                     self.warebox,
                                     self._input_queue,
                                     self._lockfile_fd,
                                     enc_dir='enc',
                                     first_startup=is_firt_startup)

        self.worker_pool = WorkerPool(self.warebox, self, self.cfg,
                                      self.cryptoAdapter)

        self.temp_dir = self.cryptoAdapter.get_enc_dir()
        self._ui_controller.update_config_info(self.cfg)
    def reload_config_info(self):
        """
        Refresh the configuration values.

        Reload the configuration, get configuration values from self.cfg
        and set them as attributes of self.
        To be called at least once.
        """
        # TODO: merge this method into the constructor, there is no reason to
        # keep it separated anymore.

        self.cfg.load()

        self.client_id = self.cfg.get('User', 'client_id')
        self.username = self.cfg.get('User', 'username')
        self.priv_key = self.cfg.get('Application Paths', 'client_priv_key_file')
        self.host = self.cfg.get('System', 'server_hostname')
        self.port = self.cfg.getint('System', 'server_port')
        self.server_certificate = self.cfg.get('Application Paths', 'server_certificate')
        self.storage_hostname = self.cfg.get('System', 'storage_endpoint')

        self.refused_declare_max = self.cfg.getint(
            'System', 'refused_declare_max')
        self.refused_declare_waiting_time = self.cfg.getint(
            'System', 'refused_declare_waiting_time')
        self.commit_threshold_seconds = self.cfg.getint(
            'Client', 'commit_threshold_seconds')
        self.commit_threshold_operations = self.cfg.getint(
            'Client', 'commit_threshold_operations')
        self.commit_threshold_bytes = self.cfg.getint(
            'Client', 'commit_threshold_bytes')

        temp = self.cfg.get('Application Paths', 'transaction_cache_db')
        self.transaction_cache = TransactionCache(temp)
        self.integrity_manager = IntegrityManager(None)

        is_firt_startup = self._internal_facade.is_first_startup()

        self.cryptoAdapter = Adapter(self.cfg,
                                     self.warebox,
                                     self._input_queue,
                                     self._lockfile_fd,
                                     enc_dir='enc',
                                     first_startup=is_firt_startup)

        self.worker_pool = WorkerPool(self.warebox,
                                      self,
                                      self.cfg,
                                      self.cryptoAdapter)

        self.temp_dir = self.cryptoAdapter.get_enc_dir()
        self._ui_controller.update_config_info(self.cfg)
Exemple #3
0
    def __init__(self,
                 warebox,
                 operation_queue,
                 server_session,
                 cfg,
                 cryptoAdapter,
                 worker_pool):
        """
        @param warebox:
                    Instance of filerockclient.warebox.Warebox.
        @param cfg:
                    Instance of filerockclient.config.ConfigManager.
        @param operation_queue:
                    A threading queue, where worker receives the operations
        @param worker_pool:
                    Instance of filerockclient.workers.worker.Worker
        """
        Thread.__init__(self, name=self.__class__.__name__)
        self.cfg = cfg
        self.operation_queue = operation_queue
        self._server_session = server_session
        self.warebox = warebox
        self.child = None

        self.input_queue = None
        self.communicationQueue = None
        self.child_logger = None
        self.child_logs_queue = None
        self.cryptoAdapter = cryptoAdapter
        self.integrity_manager = IntegrityManager(None)

        self._worker_pool = worker_pool
        self.must_die = threading.Event()
        self.last_send = datetime.now()
        self.communicationQueue = None
        self.child_logs_queue = None
    def __init__(self,
                 warebox,
                 operation_queue,
                 server_session,
                 cfg,
                 cryptoAdapter,
                 worker_pool):
        """
        @param warebox:
                    Instance of filerockclient.warebox.Warebox.
        @param cfg:
                    Instance of filerockclient.config.ConfigManager.
        @param operation_queue:
                    A threading queue, where worker receives the operations
        @param worker_pool:
                    Instance of filerockclient.workers.worker.Worker
        """
        Thread.__init__(self, name=self.__class__.__name__)
        self.cfg = cfg
        self.operation_queue = operation_queue
        self._server_session = server_session
        self.warebox = warebox
        self.child = None

        self.input_queue = None
        self.communicationQueue = None
        self.child_logger = None
        self.child_logs_queue = None
        self.cryptoAdapter = cryptoAdapter
        self.integrity_manager = IntegrityManager(None)

        self._worker_pool = worker_pool
        self.must_die = threading.Event()
        self.last_send = datetime.now()
        self.communicationQueue = None
        self.child_logs_queue = None
class Worker(Thread):
    '''A worker takes something to do and does it, and then again takes
    something to do etc.
    '''

    def __init__(self,
                 warebox,
                 operation_queue,
                 server_session,
                 cfg,
                 cryptoAdapter,
                 worker_pool):
        """
        @param warebox:
                    Instance of filerockclient.warebox.Warebox.
        @param cfg:
                    Instance of filerockclient.config.ConfigManager.
        @param operation_queue:
                    A threading queue, where worker receives the operations
        @param worker_pool:
                    Instance of filerockclient.workers.worker.Worker
        """
        Thread.__init__(self, name=self.__class__.__name__)
        self.cfg = cfg
        self.operation_queue = operation_queue
        self._server_session = server_session
        self.warebox = warebox
        self.child = None

        self.input_queue = None
        self.communicationQueue = None
        self.child_logger = None
        self.child_logs_queue = None
        self.cryptoAdapter = cryptoAdapter
        self.integrity_manager = IntegrityManager(None)

        self._worker_pool = worker_pool
        self.must_die = threading.Event()
        self.last_send = datetime.now()
        self.communicationQueue = None
        self.child_logs_queue = None

    def run(self):
        """
        Serves file operations until termination request is received
        """
        try:
            self.name += "_%s" % self.ident
            self.logger = logging.getLogger("FR.%s" % self.getName())
            self.logger.debug(u'Started.')
            while not self._termination_requested():
                self._serve_file_operations()
            self.logger.debug(u"I'm terminated.")
        finally:
            self._terminate_child()

    def _serve_file_operations(self):
        """
        Blocks on operation queue until a message is received

        If the message is a POISON_PILL the worker terminate it self
        If the message is a non aborted file operation,
        an abort handler is registered to it and the operation is handled
        """
        try:
            file_operation = self.operation_queue.get()
            self.warebox._check_blacklisted_dir()
            if file_operation == 'POISON_PILL':
                self._on_poison_pill()
            elif file_operation.is_aborted():
                self.logger.debug(u"Got an already aborted operation, "
                                  "giving up: %s" % file_operation)
            else:
                try:
                    self.logger.debug(u"Got an operation to handle: %s"
                                      % file_operation)
                    file_operation.register_abort_handler(self.on_operation_abort)
                    self._handle_file_operation(file_operation)
                except Exception as e:
                    self.logger.error(
                        u"Some problem occurred with the operation : %r" % e)
                    raise e
        finally:
            self.logger.debug("Releasing a worker")
            self._worker_pool.release_worker()

    def _handle_file_operation(self, file_operation):
        if file_operation.verb == 'UPLOAD':
            self._handle_upload_file_operation(file_operation)
        elif file_operation.verb == 'DOWNLOAD':
            self._handle_download_file_operation(file_operation)
        elif file_operation.verb == 'DELETE_LOCAL':
            self._handle_delete_local_file_operation(file_operation)
        elif file_operation.verb == 'CREATE_DIRECTORIES':
            self._handle_operation_create_directories(file_operation)
        elif file_operation.verb == 'RESOLVE_DELETION_CONFLICTS':
            self._handle_operation_resolve_deletion_conflicts(file_operation)
        else:
            self.logger.warning(u"I should not handle a '%s' operation! "
                                "I'm rejecting it", file_operation.verb)
            file_operation.reject()

    def _send_percentage(self, file_operation, status, percentage):
        now = datetime.now()
        delta = now - self.last_send
        if ((delta.seconds + delta.microseconds/1000000.) > 0.5) or (percentage == 100):
            file_operation.notify_pathname_status_change(status, {'percentage': percentage})
            self.last_send = now

    def _handle_network_transfer_operation(self, file_operation):
        '''By locking we mantain the following invariant: if the
        EventQueue tries to abort this operation due to a conflicting
        operation, then EventQueue waits until this operation either
        aborts or completes.
        This preserves the ordering of execution for the conflicting
        operations - that is, the EventQueue doesn't emit
        the conflicting operation while this one is still working.
        '''

        with file_operation.lock:
            if not file_operation.is_aborted():
                self.logger.debug(u"Starting child process to handle file"
                                  " operation: %s" % file_operation)
                try:
                    self._spawn_child(file_operation)
                    self.input_queue.put(('FileOperation', file_operation))
                except Exception as e:
                    self.logger.error(
                        u"Could not spawn a child process: %r" % e)
                    raise OperationRejection(file_operation)
            else:
                self.logger.debug(u"Got an already aborted operation, "
                                  "giving up: %s" % file_operation)
                return False

        if file_operation.verb == 'UPLOAD':
            status = PStatuses.UPLOADING
        else:
            status = PStatuses.DOWNLOADING

        self._send_percentage(file_operation, status, 0)
        termination = False
        max_retry = 3

        while not termination:
            message, content = self.communicationQueue.get()
            self.logger.debug(u'Worker send back %s with content %s'
                              % (message, content))

            if message == 'completed':
                termination = True
                if file_operation.verb == 'DOWNLOAD':
                    return {'actual_etag': content['actual_etag']}
                else:
                    return True

            elif message == 'interrupted':
                self.logger.debug(u"Child has been terminated by "
                                  "Software Operation: %s"
                                  % file_operation)
                file_operation.abort()
                termination = True
                return False

            elif message == 'failed':
                self.logger.error(u"Child has been terminated, "
                                  "Assuming failure for operation: %s"
                                  % file_operation)
                max_retry -= 1
                if max_retry == 0:
                    raise OperationRejection(file_operation)
                self.input_queue.put(('FileOperation', file_operation))

            elif message == 'percentage':
                self._send_percentage(file_operation, status, content)

            elif message == 'log':
                level, msg = content
                self.child_logger[level](msg)

            elif message == 'ShuttingDown':
                self.logger.debug(u"Get a shutting down message from process")
                termination = True
                return False

            elif message == 'DIED':
                self.child = None
                termination = True
                raise OperationRejection(file_operation)

    def _handle_upload_file_operation(self, operation):
        try:
            success = self._handle_network_transfer_operation(operation)
            if success:
                CryptoUtils.clean_env(operation, self.logger)
                self.logger.debug(u"Operation has been completed "
                                  "successfully: %s" % operation)
                self.logger.info(u'Synchronized pathname: %s "%s", which '
                                 'will be persisted after a commit'
                                 % (operation.verb, operation.pathname))
                operation.notify_pathname_status_change(PStatuses.UPLOADED,
                                                        {'percentage': 100})
                operation.complete()

        except Exception as e:
            self.logger.error(u"Error while uploading: %r."
                              " Rejecting the operation: %s" % (e, operation))
            operation.reject()

    def _handle_operation_create_directories(self, task):
        operations = sorted(task.operations, key=lambda op: op.pathname)
        actual_etag = 'd41d8cd98f00b204e9800998ecf8427e'

        for operation in operations:

            # Check integrity
            operation.notify_pathname_status_change(PStatuses.DOWNLOADING)
            res = self._check_download_integrity(operation, actual_etag)
            if not res['valid']:
                # Detected an integrity error. Badly bad.
                self._server_session.signal_download_integrity_error(
                    operation, res['reason'],
                    res['expected_etag'], res['expected_basis'],
                    res['actual_etag'], res['computed_basis'])
                return

            # The directory is valid, create it
            self.warebox.make_directory(operation.pathname)
            lmtime = self.warebox.get_last_modification_time(operation.pathname)
            operation.lmtime = lmtime
            operation.notify_pathname_status_change(PStatuses.ALIGNED)
            operation.complete()
            self.logger.info(u'Synchronized pathname: %s "%s"'
                             % (operation.verb, operation.pathname))
            self.logger.debug(u"Operation has been completed "
                              "successfully: %s" % operation)

        task.complete()

    def _handle_download_file_operation(self, operation):
        try:
            # Note: it is a normal file, not a directory
            CryptoUtils.set_temp_file(operation, self.cfg)
            success = self._handle_network_transfer_operation(operation)

            if success:

                actual_etag = success['actual_etag']
                res = self._check_download_integrity(operation, actual_etag)
                if not res['valid']:
                    # Detected an integrity error. Badly bad.
                    self._server_session.signal_download_integrity_error(
                        operation, res['reason'],
                        res['expected_etag'], res['expected_basis'],
                        res['actual_etag'], res['computed_basis'])
                    return

                if operation.to_decrypt:
                    # We have not finished yet, leaving the rest to decrypter.
                    # Note: the decrypter duplicates the following ending logic
                    self.cryptoAdapter.put(operation)
                    return

                # It is a valid cleartext file, move it to the warebox
                self.warebox.move(operation.temp_pathname,
                                  operation.pathname,
                                  operation.conflicted)

                self.logger.debug(u"Operation has been completed "
                                  "successfully: %s" % operation)
                self.logger.info(u'Synchronized pathname: %s "%s"'
                                 % (operation.verb, operation.pathname))

                lmtime = self.warebox.get_last_modification_time(operation.pathname)
                operation.lmtime = lmtime
                operation.notify_pathname_status_change(PStatuses.ALIGNED)
                operation.complete()

        except Exception as e:
            self.logger.error(u"Error while downloading: %r."
                              " Rejecting the operation: %s" % (e, operation))
            self.logger.error(u"Stacktrace: %r" % traceback.format_exc())
            operation.reject()

        finally:
            # Just in case the move had failed for any reason
            if not operation.to_decrypt:
                if operation.temp_pathname is not None \
                and os.path.exists(operation.temp_pathname):
                    _try_remove(operation.temp_pathname, self.logger)

    def _check_download_integrity(self, operation, actual_etag):
        pathname = operation.pathname
        proof = operation.download_info['proof']
        basis = operation.download_info['trusted_basis']

        self.integrity_manager.trusted_basis = basis

        result = {}
        result['valid'] = None
        result['reason'] = None
        result['expected_etag'] = operation.storage_etag
        result['expected_basis'] = basis
        result['actual_etag'] = actual_etag
        result['computed_basis'] = None

        if operation.storage_etag != actual_etag:
            # Note: the etag inside the proof will be
            # checked by the integrity manager.
            self.logger.debug(
                u"Invalid etag of download operation. "
                "Expected etag %s but found %s. %s"
                % (operation.storage_etag, actual_etag, operation))
            result['valid'] = False
            result['reason'] = "Expected etag different from actual etag"
            return result

        try:
            self.integrity_manager.addOperation('DOWNLOAD',
                                                pathname,
                                                proof,
                                                actual_etag)
            result['valid'] = True
            result['computed_basis'] = basis

        except MalformedProofException as e:
            self.logger.debug(u"Invalid proof of download operation: "
                              "%s. %r, %r" % (e, operation, proof))
            self.logger.debug(traceback.format_exc())
            result['valid'] = False
            result['reason'] = "%s" % e

        except WrongBasisFromProofException as e:
            self.logger.debug(u"Integrity check of download operation failed."
                              "Basis %s was expected but the proof computed "
                              "%s. Error details: %s. %r, %r" %
                              (basis, e.operation_basis, e, operation, proof))
            self.logger.debug(traceback.format_exc())
            result['valid'] = False
            result['reason'] = "%s" % e
            result['computed_basis'] = e.operation_basis

        except Exception as e:
            self.logger.debug(u"Integrity check of download operation failed"
                              " with unknown reason: %s. %r, %r"
                              % (e, operation, proof))
            self.logger.debug(traceback.format_exc())
            result['valid'] = False
            result['reason'] = "%s" % e

        finally:
            self.integrity_manager.clear()

        return result

    def _handle_delete_local_file_operation(self, task):
        pathnames = sorted(task.pathname2proof.keys())
        #self.logger.debug("Going to delete pathnames: %s" % pathnames)

        for pathname in pathnames:
            basis = task.trusted_basis
            proof = task.pathname2proof[pathname]
            res = self._check_deletelocal_integrity(pathname, proof, basis)
            if not res['valid']:
                # Detected an integrity error. Badly bad.
                self._server_session.signal_deletelocal_integrity_error(
                    pathname, proof, res['reason'],
                    res['expected_basis'], res['computed_basis'])
                return

        roots = {}
        for pathname in pathnames:
            found_ancestor = False
            for root in roots:
                if pathname.startswith(root):
                    found_ancestor = True
                    break
            if not found_ancestor:
                roots[pathname] = True

        try:
            for pathname in roots.iterkeys():
                self.warebox.delete_tree(pathname)
        except Exception as e:
            self.logger.error(
                u"Caught an operating system exception while "
                u"modifying the filesystem. Are you locking the Warebox? % r"
                % e)
            raise

        task.complete()

    def _check_deletelocal_integrity(self, pathname, proof, trusted_basis):
        self.integrity_manager.trusted_basis = trusted_basis

        result = {}
        result['valid'] = None
        result['reason'] = None
        result['expected_basis'] = trusted_basis
        result['computed_basis'] = None

        try:
            self.integrity_manager.addOperation('DELETE_LOCAL',
                                                pathname,
                                                proof,
                                                None)
            result['valid'] = True
            result['computed_basis'] = trusted_basis

        except MalformedProofException as e:
            self.logger.debug(u"Invalid proof of delete_local operation: "
                              "%s. %s, %s" % (e, pathname, proof.raw))
            self.logger.debug(traceback.format_exc())
            result['valid'] = False
            result['reason'] = "%s" % e

        except WrongBasisFromProofException as e:
            self.logger.debug(u"Integrity check of delete_local operation failed."
                              "Basis %s was expected but the proof computed "
                              "%s. Error details: %s. %r, %r" %
                              (trusted_basis, e.operation_basis, e, pathname, proof.raw))
            self.logger.debug(traceback.format_exc())
            result['valid'] = False
            result['reason'] = "%s" % e
            result['computed_basis'] = e.operation_basis

        except Exception as e:
            self.logger.debug(u"Integrity check of delete_local operation failed"
                              " with unknown reason: %s. %r, %r"
                              % (e, pathname, proof.raw))
            self.logger.debug(traceback.format_exc())
            result['valid'] = False
            result['reason'] = "%s" % e

        finally:
            self.integrity_manager.clear()

        return result

    def _find_new_name(self, pathname):
        # TODO: try harder in finding a name that is available
        curr_time = datetime.now().strftime('%Y-%m-%d %H_%M_%S')
        suffix = ' (Conflicted on %s)' % curr_time
        if pathname.endswith('/'):
            new_pathname = pathname[:-1] + suffix + '/'
        else:
            basename, ext = os.path.splitext(pathname)
            new_pathname = basename + suffix + ext
        return new_pathname

    def _rename_conflicting_pathname(self, pathname, prefix=None):
        new_pathname = self.warebox.rename(pathname, pathname, prefix)
        return new_pathname

    def _handle_operation_resolve_deletion_conflicts(self, task):
        """Solve deletion conflicts by renaming the local file to a new
        pathname. The old pathname will result implicitly deleted.

        Side effect on Content_to_upload to add the renamed files.

        Deletion conflicts are tough to resolve. A conflicting pathname:
        a) has been deleted by the server
        b) has an ancestor folder that has been deleted by the server
        c) both
        It must be checked if it's safe leaving the file in its original
        folder (that is, if it still exists).
        """
        conflicts = task.deletion_conflicts
        content_to_delete_locally = task.content_to_delete_locally

        for pathname in conflicts:
            basis = task.trusted_basis
            proof = task.pathname2proof[pathname]
            res = self._check_deletelocal_integrity(pathname, proof, basis)
            if not res['valid']:
                # Detected an integrity error. Badly bad.
                self._server_session.signal_deletelocal_integrity_error(
                    pathname, proof, res['reason'],
                    res['expected_basis'], res['computed_basis'])
                return

        try:
            backupped_folders = {}
            for pathname in conflicts:
                missing_ancestor_folders = filter(lambda p: pathname.startswith(p), content_to_delete_locally)
                # Is it safe leaving the file in its original folder?
                if len(missing_ancestor_folders) > 0:
                    # No, it's been deleted. Backup the whole deleted subtree
                    missing_ancestor_folders = sorted(missing_ancestor_folders)
                    highest_missing_folder = missing_ancestor_folders[0]
                    if not highest_missing_folder in backupped_folders:
                        backup_folder = self._find_new_name(highest_missing_folder)
                        self.warebox.make_directory(backup_folder)
                        backupped_folders[highest_missing_folder] = backup_folder
                    backup_folder = backupped_folders[highest_missing_folder]
                    new_pathname = pathname.replace(highest_missing_folder, backup_folder, 1)
                    self.warebox.make_directories_to(new_pathname)
                    if not self.warebox.is_directory(new_pathname):
                        self.warebox.rename(pathname, new_pathname)
                else:
                    # Yes, just rename the file
                    new_pathname = self._rename_conflicting_pathname(pathname, 'Deleted')
                    self.logger.warning(
                        u"Conflict detected for pathname %r, which has been "
                        u"remotely deleted. Moved the local copy to: %r"
                        % (pathname, new_pathname))

            task.complete()

        except Exception:
            self.logger.error(
                u"Caught an operating system exception while modifying the "
                u"filesystem. Are you locking the Warebox?")
            raise

    def _start_child_logger(self):
        self._stop_child_logger()
        self.child_logs_queue = multiprocessing.Queue()
        self.child_logger = LogsReceiver(self.getName(), self.child_logs_queue)
        self.child_logger.start()
        if self.child_logger is None:
            logger = logging.getLogger(u'FR.WorkerChild of %s' % self.getName())
            self.child_logger = {
                'info': logger.info,
                'debug': logger.debug,
                'warning': logger.warning,
                'error': logger.error,
                'critical': logger.critical
            }

    def _stop_child_logger(self):
        if self.child_logger is not None:
            self.child_logger.stop()
            self.child_logs_queue.put(('log', ('debug', 'Die please!')))
            self.child_logger.join()
            self.child_logger = None

        if self.child_logs_queue is not None:
            self.child_logs_queue.close()
            self.child_logs_queue.join_thread()
            self.child_logs_queue = None

    def _create_multiprocessing_queues(self):
        self._destroy_multiprocessing_queues()
        self.input_queue = Queue.Queue()
        self.communicationQueue = Queue.Queue()

    def _destroy_multiprocessing_queue(self, queue):
        if queue is not None:
            while not queue.empty():
                queue.get_nowait()
#             queue.close()
#             queue.join_thread()
            queue = None

    def _destroy_multiprocessing_queues(self):
        self._destroy_multiprocessing_queue(self.input_queue)
        self._destroy_multiprocessing_queue(self.communicationQueue)

    def _spawn_child(self, file_operation):
        if self.child is None or not self.child.is_alive():
#             self.terminationEvent = multiprocessing.Event()
            self.terminationEvent = threading.Event()
            self._create_multiprocessing_queues()
            self._start_child_logger()
            try:
                self.logger.debug(u"Allocating child process to handle file "
                                  "operation: %s" % file_operation)
                self.child = WorkerChild(self.warebox,
                                         self.input_queue,
                                         self.communicationQueue,
                                         self.terminationEvent,
                                         self._send_percentage,
                                         #self.child_logs_queue,
                                         self.cfg,
                                         self._worker_pool)

                self.child.start()
                self.logger.debug(u"Child process Started to handle file "
                                  "operation: %s" % file_operation)
            except Exception:
                self._stop_child_logger()
                raise

    def on_operation_abort(self, file_operation):
        self.logger.debug(u'Abort detected for the operation I am handling: '
                          '%s. Terminating child process...' % file_operation)
        self.abort_operation()

    def abort_operation(self):
        if self.child is not None and self.child.is_alive():
            try:
                if self.terminationEvent is not None:
                    self.terminationEvent.set()
#                 else:
#                     self.child.terminate()
            except:
                pass

    def _terminate_child(self):
        self.stop_network_transfer()
        if self.child is not None:
            self.input_queue.put(('PoisonPill', None))
            self.child.join(5)
            self.child = None

    def _clean_env(self):
        self._terminate_child()
        self._stop_child_logger()
        self._destroy_multiprocessing_queues()

    def _on_poison_pill(self):
        self.logger.debug(u"Got poison pill.")
        self.must_die.set()
        self._clean_env()

    def terminate(self):
        '''
        Signal the worker that the time has come.
        '''
        self.abort_operation()

    def stop_network_transfer(self):
        self.abort_operation()

    def _termination_requested(self):
        return self.must_die.wait(0.01)
Exemple #6
0
class Worker(Thread):
    '''A worker takes something to do and does it, and then again takes
    something to do etc.
    '''

    def __init__(self,
                 warebox,
                 operation_queue,
                 server_session,
                 cfg,
                 cryptoAdapter,
                 worker_pool):
        """
        @param warebox:
                    Instance of filerockclient.warebox.Warebox.
        @param cfg:
                    Instance of filerockclient.config.ConfigManager.
        @param operation_queue:
                    A threading queue, where worker receives the operations
        @param worker_pool:
                    Instance of filerockclient.workers.worker.Worker
        """
        Thread.__init__(self, name=self.__class__.__name__)
        self.cfg = cfg
        self.operation_queue = operation_queue
        self._server_session = server_session
        self.warebox = warebox
        self.child = None

        self.input_queue = None
        self.communicationQueue = None
        self.child_logger = None
        self.child_logs_queue = None
        self.cryptoAdapter = cryptoAdapter
        self.integrity_manager = IntegrityManager(None)

        self._worker_pool = worker_pool
        self.must_die = threading.Event()
        self.last_send = datetime.now()
        self.communicationQueue = None
        self.child_logs_queue = None

    def run(self):
        """
        Serves file operations until termination request is received
        """
        try:
            self.name += "_%s" % self.ident
            self.logger = logging.getLogger("FR.%s" % self.getName())
            self.logger.debug(u'Started.')
            while not self._termination_requested():
                self._serve_file_operations()
            self.logger.debug(u"I'm terminated.")
        finally:
            self._terminate_child()

    def _serve_file_operations(self):
        """
        Blocks on operation queue until a message is received

        If the message is a POISON_PILL the worker terminate it self
        If the message is a non aborted file operation,
        an abort handler is registered to it and the operation is handled
        """

        file_operation = self.operation_queue.get()
        if file_operation == 'POISON_PILL':
            self._on_poison_pill()
            return

        assert type(file_operation) in [PathnameOperation,
                                        CreateDirectoriesTask,
                                        DeleteLocalTask,
                                        ResolveDeletionConflictsTask]

        self.logger.debug(u"worker executing: %s", file_operation)
        if __debug__:
            self._worker_pool.track_assign_worker_to_pathname(
                self.ident,
                file_operation.pathname)

        try:
            self.warebox._check_blacklisted_dir()
            if file_operation.is_aborted():
                self.logger.debug(u"Got an already aborted operation, "
                                  "giving up: %s" % file_operation)
            else:
                self.logger.debug(u"Got an operation to handle: %s",
                                  file_operation)
                file_operation.register_abort_handler(self.on_operation_abort)
                self._handle_file_operation(file_operation)

        except Exception:
            self.logger.error(u"Some problem occurred in worker "
                              u"handling operation %r" % file_operation)
            raise

        finally:
            self.logger.debug("Releasing a worker")

            if __debug__:
                self._worker_pool.track_assert_assigned(
                    self.ident, file_operation.pathname)

            self._worker_pool.release_worker()

            if __debug__:
                self._worker_pool.track_release_worker(
                    self.ident,
                    file_operation.pathname)

    def _handle_file_operation(self, file_operation):
        if file_operation.verb == 'UPLOAD':
            self._handle_upload_file_operation(file_operation)
        elif file_operation.verb == 'DOWNLOAD':
            self._handle_download_file_operation(file_operation)
        elif file_operation.verb == 'DELETE_LOCAL':
            self._handle_delete_local_file_operation(file_operation)
        elif file_operation.verb == 'CREATE_DIRECTORIES':
            self._handle_operation_create_directories(file_operation)
        elif file_operation.verb == 'RESOLVE_DELETION_CONFLICTS':
            self._handle_operation_resolve_deletion_conflicts(file_operation)
        else:
            self.logger.warning(u"I should not handle a '%s' operation! "
                                "I'm rejecting it", file_operation.verb)
            file_operation.reject()

    def _send_percentage(self, file_operation, status, percentage):
        now = datetime.now()
        delta = now - self.last_send
        if ((delta.seconds + delta.microseconds/1000000.) > 0.5) \
                or (percentage == 100):
            file_operation.notify_pathname_status_change(
                status, {'percentage': percentage})
            self.last_send = now

    def _handle_network_transfer_operation(self, file_operation):
        '''By locking we mantain the following invariant: if the
        EventQueue tries to abort this operation due to a conflicting
        operation, then EventQueue waits until this operation either
        aborts or completes.
        This preserves the ordering of execution for the conflicting
        operations - that is, the EventQueue doesn't emit
        the conflicting operation while this one is still working.
        '''

        with file_operation.lock:
            if not file_operation.is_aborted():
                self.logger.debug(u"Starting child process to handle file"
                                  " operation: %s" % file_operation)
                try:
                    self._spawn_child(file_operation)
                    self.input_queue.put(('FileOperation', file_operation))
                except Exception as e:
                    self.logger.error(
                        u"Could not spawn a child process: %r" % e)
                    raise OperationRejection(file_operation)
            else:
                self.logger.debug(u"Got an already aborted operation, "
                                  "giving up: %s" % file_operation)
                return False

        if file_operation.verb == 'UPLOAD':
            status = PStatuses.UPLOADING
        else:
            status = PStatuses.DOWNLOADING

        self._send_percentage(file_operation, status, 0)
        termination = False
        max_retry = 3

        while not termination:
            message, content = self.communicationQueue.get()
            self.logger.debug(u'Worker send back %s with content %s'
                              % (message, content))

            if message == 'completed':
                termination = True
                if file_operation.verb == 'DOWNLOAD':
                    return {'actual_etag': content['actual_etag']}
                else:
                    return True

            elif message == 'interrupted':
                self.logger.debug(u"Child has been terminated by "
                                  "Software Operation: %s"
                                  % file_operation)
                file_operation.abort()
                termination = True
                return False

            elif message == 'failed':
                self.logger.error(u"Child has been terminated, "
                                  "Assuming failure for operation: %s"
                                  % file_operation)
                max_retry -= 1
                if max_retry == 0:
                    raise OperationRejection(file_operation)
                self.input_queue.put(('FileOperation', file_operation))

            elif message == 'percentage':
                self._send_percentage(file_operation, status, content)

            elif message == 'log':
                level, msg = content
                self.child_logger[level](msg)

            elif message == 'ShuttingDown':
                self.logger.debug(u"Get a shutting down message from process")
                termination = True
                return False

            elif message == 'DIED':
                self.child = None
                termination = True
                raise OperationRejection(file_operation)

    def _handle_upload_file_operation(self, operation):
        try:
            success = self._handle_network_transfer_operation(operation)
            if success:
                CryptoUtils.clean_env(operation, self.logger)
                self.logger.debug(u"Operation has been completed "
                                  "successfully: %s" % operation)
                self.logger.info(u'Synchronized pathname: %s "%s", which '
                                 'will be persisted after a commit'
                                 % (operation.verb, operation.pathname))
                operation.notify_pathname_status_change(PStatuses.UPLOADED,
                                                        {'percentage': 100})
                operation.complete()

        except Exception as e:
            self.logger.error(u"Error while uploading: %r."
                              " Rejecting the operation: %s" % (e, operation))
            operation.reject()

    def _handle_operation_create_directories(self, task):
        operations = sorted(task.operations, key=lambda op: op.pathname)
        actual_etag = 'd41d8cd98f00b204e9800998ecf8427e'

        for operation in operations:

            # Check integrity
            operation.notify_pathname_status_change(PStatuses.DOWNLOADING)
            res = self._check_download_integrity(operation, actual_etag)
            if not res['valid']:
                # Detected an integrity error. Badly bad.
                self._server_session.signal_download_integrity_error(
                    operation, res['reason'],
                    res['expected_etag'], res['expected_basis'],
                    res['actual_etag'], res['computed_basis'])
                return

            # The directory is valid, create it
            self._make_directories_to(operation.pathname)
            lmtime = self.warebox.get_last_modification_time(operation.pathname)
            operation.lmtime = lmtime
            operation.notify_pathname_status_change(PStatuses.ALIGNED)
            operation.complete()
            self.logger.info(u'Synchronized pathname: %s "%s"'
                             % (operation.verb, operation.pathname))
            self.logger.debug(u"Operation has been completed "
                              "successfully: %s" % operation)

        task.complete()

    def _handle_download_file_operation(self, operation):
        try:
            # Note: it is a normal file, not a directory
            CryptoUtils.set_temp_file(operation, self.cfg)
            success = self._handle_network_transfer_operation(operation)

            if success:

                actual_etag = success['actual_etag']
                res = self._check_download_integrity(operation, actual_etag)
                if not res['valid']:
                    # Detected an integrity error. Badly bad.
                    self._server_session.signal_download_integrity_error(
                        operation, res['reason'],
                        res['expected_etag'], res['expected_basis'],
                        res['actual_etag'], res['computed_basis'])
                    return

                self._make_directories_to(operation.pathname)

                if operation.to_decrypt:
                    # We have not finished yet, leaving the rest to decrypter.
                    # Note: the decrypter duplicates the following ending logic
                    self.cryptoAdapter.put(operation)
                    return

                # It is a valid cleartext file, move it to the warebox
                self.warebox.move(operation.temp_pathname,
                                  operation.pathname,
                                  operation.conflicted)

                self.logger.debug(u"Operation has been completed "
                                  "successfully: %s" % operation)
                self.logger.info(u'Synchronized pathname: %s "%s"'
                                 % (operation.verb, operation.pathname))

                lmtime = self.warebox.get_last_modification_time(operation.pathname)
                operation.lmtime = lmtime
                operation.notify_pathname_status_change(PStatuses.ALIGNED)
                operation.complete()

        except Exception as e:
            self.logger.error(u"Error while downloading: %r."
                              " Rejecting the operation: %s" % (e, operation))
            self.logger.error(u"Stacktrace: %r" % traceback.format_exc())
            operation.reject()

        finally:
            # Just in case the move had failed for any reason
            if not operation.to_decrypt:
                if operation.temp_pathname is not None \
                and os.path.exists(operation.temp_pathname):
                    _try_remove(operation.temp_pathname, self.logger)

    def _make_directories_to(self, pathname):
        """Make sure that the full path for this file exists. It may be
        either a file or a directory.

        There are a couple of reasons why the full path may not exist:
        1) Data has been loaded on the storage with an external tool
           that doesn't explicitly support directories, so they have
           not been created. Treating these directories as something
           to download is wrong: they wouldn't pass the integrity
           check (they really doesn't exist in the trusted dataset),
           so it is better to make them as new local modifications.
        2) The user has deleted while offline a directory that is
           needed by a download. We call it "hierarchy conflict" and
           is uncovered by the diff algorithm, since case 1 would
           still remain unhandled.
        """
        self.warebox.make_directories_to(pathname)

    def _check_download_integrity(self, operation, actual_etag):
        pathname = operation.pathname
        proof = operation.download_info['proof']
        basis = operation.download_info['trusted_basis']

        self.integrity_manager.trusted_basis = basis

        result = {}
        result['valid'] = None
        result['reason'] = None
        result['expected_etag'] = operation.storage_etag
        result['expected_basis'] = basis
        result['actual_etag'] = actual_etag
        result['computed_basis'] = None

        if operation.storage_etag != actual_etag:
            # Note: the etag inside the proof will be
            # checked by the integrity manager.
            self.logger.debug(
                u"Invalid etag of download operation. "
                "Expected etag %s but found %s. %s"
                % (operation.storage_etag, actual_etag, operation))
            result['valid'] = False
            result['reason'] = "Expected etag different from actual etag"
            return result

        try:
            self.integrity_manager.addOperation('DOWNLOAD',
                                                pathname,
                                                proof,
                                                actual_etag)
            result['valid'] = True
            result['computed_basis'] = basis

        except MalformedProofException as e:
            self.logger.debug(u"Invalid proof of download operation: "
                              "%s. %r, %r" % (e, operation, proof))
            self.logger.debug(traceback.format_exc())
            result['valid'] = False
            result['reason'] = "%s" % e

        except WrongBasisFromProofException as e:
            self.logger.debug(u"Integrity check of download operation failed."
                              "Basis %s was expected but the proof computed "
                              "%s. Error details: %s. %r, %r" %
                              (basis, e.operation_basis, e, operation, proof))
            self.logger.debug(traceback.format_exc())
            result['valid'] = False
            result['reason'] = "%s" % e
            result['computed_basis'] = e.operation_basis

        except Exception as e:
            self.logger.debug(u"Integrity check of download operation failed"
                              " with unknown reason: %s. %r, %r"
                              % (e, operation, proof))
            self.logger.debug(traceback.format_exc())
            result['valid'] = False
            result['reason'] = "%s" % e

        finally:
            self.integrity_manager.clear()

        return result

    def _handle_delete_local_file_operation(self, task):
        pathnames = sorted(task.pathname2proof.keys())
        #self.logger.debug("Going to delete pathnames: %s" % pathnames)

        for pathname in pathnames:
            basis = task.trusted_basis
            proof = task.pathname2proof[pathname]
            res = self._check_deletelocal_integrity(pathname, proof, basis)
            if not res['valid']:
                # Detected an integrity error. Badly bad.
                self._server_session.signal_deletelocal_integrity_error(
                    pathname, proof, res['reason'],
                    res['expected_basis'], res['computed_basis'])
                return

        roots = {}
        for pathname in pathnames:
            found_ancestor = False
            for root in roots:
                if pathname.startswith(root):
                    found_ancestor = True
                    break
            if not found_ancestor:
                roots[pathname] = True

        try:
            for pathname in roots.iterkeys():
                self.warebox.delete_tree(pathname)
        except Exception as e:
            self.logger.error(
                u"Caught an operating system exception while "
                u"modifying the filesystem. Are you locking the Warebox? % r"
                % e)
            raise

        task.complete()

    def _check_deletelocal_integrity(self, pathname, proof, trusted_basis):
        self.integrity_manager.trusted_basis = trusted_basis

        result = {}
        result['valid'] = None
        result['reason'] = None
        result['expected_basis'] = trusted_basis
        result['computed_basis'] = None

        try:
            self.integrity_manager.addOperation('DELETE_LOCAL',
                                                pathname,
                                                proof,
                                                None)
            result['valid'] = True
            result['computed_basis'] = trusted_basis

        except MalformedProofException as e:
            self.logger.debug(u"Invalid proof of delete_local operation: "
                              "%s. %s, %s" % (e, pathname, proof.raw))
            self.logger.debug(traceback.format_exc())
            result['valid'] = False
            result['reason'] = "%s" % e

        except WrongBasisFromProofException as e:
            self.logger.debug(u"Integrity check of delete_local operation failed."
                              "Basis %s was expected but the proof computed "
                              "%s. Error details: %s. %r, %r" %
                              (trusted_basis, e.operation_basis, e, pathname, proof.raw))
            self.logger.debug(traceback.format_exc())
            result['valid'] = False
            result['reason'] = "%s" % e
            result['computed_basis'] = e.operation_basis

        except Exception as e:
            self.logger.debug(u"Integrity check of delete_local operation"
                              " failed with unknown reason: %s. %r, %r"
                              % (e, pathname, proof.raw))
            self.logger.debug(traceback.format_exc())
            result['valid'] = False
            result['reason'] = "%s" % e

        finally:
            self.integrity_manager.clear()

        return result

    def _find_new_name(self, pathname):
        # TODO: try harder in finding a name that is available
        curr_time = datetime.now().strftime('%Y-%m-%d %H_%M_%S')
        suffix = ' (Conflicted on %s)' % curr_time
        if pathname.endswith('/'):
            new_pathname = pathname[:-1] + suffix + '/'
        else:
            basename, ext = os.path.splitext(pathname)
            new_pathname = basename + suffix + ext
        return new_pathname

    def _rename_conflicting_pathname(self, pathname, prefix=None):
        new_pathname = self.warebox.rename(pathname, pathname, prefix)
        return new_pathname

    def _handle_operation_resolve_deletion_conflicts(self, task):
        """Solve deletion conflicts by renaming the local file to a new
        pathname. The old pathname will result implicitly deleted.

        Deletion conflicts are tough to resolve. A conflicting pathname:
        a) has been deleted by the server
        b) has an ancestor folder that has been deleted by the server
        c) both
        It must be checked if it's safe leaving the file in its original
        folder (that is, if it still exists).
        """
        conflicts = task.deletion_conflicts
        content_to_delete_locally = task.content_to_delete_locally

        for pathname in conflicts:
            basis = task.trusted_basis
            proof = task.pathname2proof[pathname]
            res = self._check_deletelocal_integrity(pathname, proof, basis)
            if not res['valid']:
                # Detected an integrity error. Badly bad.
                self._server_session.signal_deletelocal_integrity_error(
                    pathname, proof, res['reason'],
                    res['expected_basis'], res['computed_basis'])
                return

        try:
            backupped_folders = {}
            for pathname in conflicts:
                missing_ancestor_folders = filter(lambda p: pathname.startswith(p), content_to_delete_locally)
                # Is it safe leaving the file in its original folder?
                if len(missing_ancestor_folders) > 0:
                    # No, it's been deleted. Backup the whole deleted subtree
                    missing_ancestor_folders = sorted(missing_ancestor_folders)
                    highest_missing_folder = missing_ancestor_folders[0]
                    if not highest_missing_folder in backupped_folders:
                        backup_folder = self._find_new_name(highest_missing_folder)
                        self.warebox.make_directory(backup_folder)
                        backupped_folders[highest_missing_folder] = backup_folder
                    backup_folder = backupped_folders[highest_missing_folder]
                    new_pathname = pathname.replace(highest_missing_folder, backup_folder, 1)
                    self.warebox.make_directories_to(new_pathname)
                    if not self.warebox.is_directory(new_pathname):
                        self.warebox.rename(pathname, new_pathname)
                else:
                    # Yes, just rename the file
                    new_pathname = self._rename_conflicting_pathname(pathname, 'Deleted')
                    self.logger.warning(
                        u"Conflict detected for pathname %r, which has been "
                        u"remotely deleted. Moved the local copy to: %r"
                        % (pathname, new_pathname))

            task.complete()

        except Exception:
            self.logger.error(
                u"Caught an operating system exception while modifying the "
                u"filesystem. Are you locking the Warebox?")
            raise

    def _start_child_logger(self):
        self._stop_child_logger()
        self.child_logs_queue = multiprocessing.Queue()
        self.child_logger = LogsReceiver(self.getName(), self.child_logs_queue)
        self.child_logger.start()
        if self.child_logger is None:
            logger = logging.getLogger(u'FR.WorkerChild of %s' % self.getName())
            self.child_logger = {
                'info': logger.info,
                'debug': logger.debug,
                'warning': logger.warning,
                'error': logger.error,
                'critical': logger.critical
            }

    def _stop_child_logger(self):
        if self.child_logger is not None:
            self.child_logger.stop()
            self.child_logs_queue.put(('log', ('debug', 'Die please!')))
            self.child_logger.join()
            self.child_logger = None

        if self.child_logs_queue is not None:
            self.child_logs_queue.close()
            self.child_logs_queue.join_thread()
            self.child_logs_queue = None

    def _create_multiprocessing_queues(self):
        self._destroy_multiprocessing_queues()
        self.input_queue = Queue.Queue()
        self.communicationQueue = Queue.Queue()

    def _destroy_multiprocessing_queue(self, queue):
        if queue is not None:
            while not queue.empty():
                queue.get_nowait()
#             queue.close()
#             queue.join_thread()
            queue = None

    def _destroy_multiprocessing_queues(self):
        self._destroy_multiprocessing_queue(self.input_queue)
        self._destroy_multiprocessing_queue(self.communicationQueue)

    def _spawn_child(self, file_operation):
        if self.child is None or not self.child.is_alive():
#             self.terminationEvent = multiprocessing.Event()
            self.terminationEvent = threading.Event()
            self._create_multiprocessing_queues()
            self._start_child_logger()
            try:
                self.logger.debug(u"Allocating child process to handle file "
                                  "operation: %s" % file_operation)
                self.child = WorkerChild(self.warebox,
                                         self.input_queue,
                                         self.communicationQueue,
                                         self.terminationEvent,
                                         self._send_percentage,
                                         #self.child_logs_queue,
                                         self.cfg,
                                         self._worker_pool)

                self.child.start()
                self.logger.debug(u"Child process Started to handle file "
                                  "operation: %s" % file_operation)
            except Exception:
                self._stop_child_logger()
                raise

    def on_operation_abort(self, file_operation):
        self.logger.debug(u'Abort detected for the operation I am handling: '
                          '%s. Terminating child process...' % file_operation)
        self.abort_operation()

    def abort_operation(self):
        if self.child is not None and self.child.is_alive():
            try:
                if self.terminationEvent is not None:
                    self.terminationEvent.set()
#                 else:
#                     self.child.terminate()
            except:
                pass

    def _terminate_child(self):
        self.stop_network_transfer()
        if self.child is not None:
            self.input_queue.put(('PoisonPill', None))
            self.child.join(5)
            self.child = None

    def _clean_env(self):
        self._terminate_child()
        self._stop_child_logger()
        self._destroy_multiprocessing_queues()

    def _on_poison_pill(self):
        self.logger.debug(u"Got poison pill.")
        self.must_die.set()
        self._clean_env()

    def terminate(self):
        '''
        Signal the worker that the time has come.
        '''
        self.abort_operation()

    def stop_network_transfer(self):
        self.abort_operation()

    def _termination_requested(self):
        return self.must_die.wait(0.01)
class ServerSession(threading.Thread):
    """
    The thread that controls the communication with the server and the
    internal execution of the client.

    ServerSession is the intelligent part of the client application. It
    is implemented as an event-loop machine, receiving events from many
    sources: the user, ServerSession itself, other application
    components and the server, each with a different priority level. It
    decides, for example, when to handle operations for synchronizing
    data and when to answer to messages from the server, thus
    implementing the communication protocol.
    The logic is implemented as an object-oriented state machine, which
    decides which events to handle in each state and how to do it.
    ServerSession is both a container (formally a "context") for "state"
    objects and the only public interface of this component. The context
    gives the states a shared memory space and access to other
    components.
    All the logic is actually implemented in the state objects, limiting
    ServerSession to execute the "current state". State objects also
    decide when to switch to another state, as a reaction to an input
    event. The states are implemented using object-oriented inheritance
    in order to common factor the logic of several states, making
    ServerSession precisely a "Hierarchical State Machine".
    See the "State" design pattern from the book: Gamma et al, "Design
    Patterns: Elements of Reusable Object-Oriented Software" for a
    reference to the design.
    """

    def __init__(self,
            cfg, warebox, storage_cache,
            startup_synchronization, filesystem_watcher, linker,
            metadata_db, hashes_db, internal_facade, ui_controller,
            lockfile_fd, auto_start, input_queue, scheduler):
        """
        @param cfg:
                    Instance of filerockclient.config.ConfigManager.
        @param warebox:
                    Instance of filerockclient.warebox.Warebox.
        @param storage_cache:
                    Instance of filerockclient.databases.storage_cache.
                    StorageCache.
        @param startup_synchronization:
                    Instance of filerockclient.serversession.
                    startup_synchronization.
        @param filesystem_watcher:
                    Instance of any class in the filerockclient.
                    filesystem_watcher package.
        @param linker:
                    Instance of filerockclient.linker.Linker.
        @param metadata_db:
                    Instance of filerockclient.databases.metadata.
                    MetadataDB.
        @param hashes_db:
                    Instance of filerockclient.databases.hashes.HashesDB.
        @param internal_facade:
                    Instance of filerockclient.internal_facade.
                    InternalFacade.
        @param ui_controller:
                    Instance of filerockclient.ui.ui_controller.
                    UIController.
        @param lockfile_fd:
                    File descriptor of the lock file which ensures there
                    is only one instance of FileRock Client running.
                    Child processes have to close it to avoid stale locks.
        @param auto_start:
                    Boolean flag telling whether ServerSession should
                    connect to the server when started.
        @param input_queue:
                    Instance of filerockclient.util.multi_queue.
                    MultiQueue. It is expected to have the following
                    queues:
                    usercommand: Commands sent by the user
                    sessioncommand: ServerSession internal use commands
                    systemcommand: Commands sent by other client components
                    servermessage: Messages sent by the server.
                    operation: PathnameOperation objects to handle
        @param scheduler:
                    Instance of filerockclient.util.scheduler.Scheduler.
        """

        threading.Thread.__init__(self, name=self.__class__.__name__)
        self.logger = logging.getLogger("FR.%s" % self.__class__.__name__)
        self._input_queue = input_queue
        self.warebox = warebox
        self.startup_synchronization = startup_synchronization
        self.filesystem_watcher = filesystem_watcher
        self._internal_facade = internal_facade
        self._ui_controller = ui_controller
        self.metadataDB = metadata_db
        self.hashesDB = hashes_db
        self.auto_start = auto_start
        self._scheduler = scheduler
        self.storage_cache = storage_cache
        self.linker = linker
        self.warebox = warebox
        self.cfg = cfg
        self._lockfile_fd = lockfile_fd

        self._started = False
        self.must_die = threading.Event()
        # TODO: this flag exists due to auto-disconnection. It will be removed
        # and replaced by a CONNECTFORCE command as soon as ServerSession will
        # stop going automatically to DisconnectedState.
        self.disconnect_other_client = False
        self.operation_responses = {}
        self._pathname2id = {}
        self.output_message_queue = Queue.Queue()
        self.input_keepalive_queue = Queue.Queue()
        self.current_state = None
        self.reconnection_time = 1
        self.num_connection_attempts = 0
        self.max_connection_attempts = MAX_CONNECTION_ATTEMPTS
        self._basis_lock = threading.Lock()
        self.server_basis = None
        self.session_id = None
        self.storage_ip_address = None
        self.refused_declare_count = 0
        self._current_basis = None
        self.id = 0
        self._sync_operations = []

        self.keepalive_timer = ConnectionLifeKeeper(
                                self._input_queue, self.input_keepalive_queue,
                                self.output_message_queue, True)
        self.transaction = Transaction()
        self.transaction_manager = TransactionManager(
                                          self.transaction, self.storage_cache)

        StateRegister.setup(self)

        self.client_id = None
        self.username = None
        self.priv_key = None
        self.host = None
        self.port = None
        self.server_certificate = None
        self.storage_hostname = None
        self.refused_declare_max = None
        self.refused_declare_waiting_time = None
        self.commit_threshold_seconds = None
        self.commit_threshold_operations = None
        self.commit_threshold_bytes = None
        self.transaction_cache = None
        self.integrity_manager = None
        self.cryptoAdapter = None
        self.temp_dir = None
        self.connection_reader = None
        self.connection_writer = None
        self.sock = None
        self.listening_operations = False

        self.reload_config_info()

    def reload_config_info(self):
        """
        Refresh the configuration values.

        Reload the configuration, get configuration values from self.cfg
        and set them as attributes of self.
        To be called at least once.
        """
        # TODO: merge this method into the constructor, there is no reason to
        # keep it separated anymore.

        self.cfg.load()

        self.client_id = self.cfg.get('User', 'client_id')
        self.username = self.cfg.get('User', 'username')
        self.priv_key = self.cfg.get('Application Paths', 'client_priv_key_file')
        self.host = self.cfg.get('System', 'server_hostname')
        self.port = self.cfg.getint('System', 'server_port')
        self.server_certificate = self.cfg.get('Application Paths', 'server_certificate')
        self.storage_hostname = self.cfg.get('System', 'storage_endpoint')

        self.refused_declare_max = self.cfg.getint(
            'System', 'refused_declare_max')
        self.refused_declare_waiting_time = self.cfg.getint(
            'System', 'refused_declare_waiting_time')
        self.commit_threshold_seconds = self.cfg.getint(
            'Client', 'commit_threshold_seconds')
        self.commit_threshold_operations = self.cfg.getint(
            'Client', 'commit_threshold_operations')
        self.commit_threshold_bytes = self.cfg.getint(
            'Client', 'commit_threshold_bytes')

        temp = self.cfg.get('Application Paths', 'transaction_cache_db')
        self.transaction_cache = TransactionCache(temp)
        self.integrity_manager = IntegrityManager(None)

        is_firt_startup = self._internal_facade.is_first_startup()

        self.cryptoAdapter = Adapter(self.cfg,
                                     self.warebox,
                                     self._input_queue,
                                     self._lockfile_fd,
                                     enc_dir='enc',
                                     first_startup=is_firt_startup)

        self.worker_pool = WorkerPool(self.warebox,
                                      self,
                                      self.cfg,
                                      self.cryptoAdapter)

        self.temp_dir = self.cryptoAdapter.get_enc_dir()
        self._ui_controller.update_config_info(self.cfg)

    def run(self):
        """Implementation of the threading.Thread.run() method."""
        self._started = True
        try:
            self.worker_pool.start_workers()
            self.keepalive_timer.start()
            self.current_state = StateRegister.get('DisconnectedState')
            curr_basis = self.current_state._load_trusted_basis()
            self.integrity_manager.setCurrentBasis(curr_basis)
            self.logger.info(u'Current basis is: %s' % curr_basis)
            self.current_state._on_entering()
            self._internal_facade.set_global_status(GStatuses.NC_STOPPED)
            if self.auto_start:
                self._input_queue.put(Command('CONNECT'), 'sessioncommand')
            self.cryptoAdapter.start()
            self._scheduler.schedule_action(
                self.check_encrypted_folder, name='check_encrypted_folder',
                seconds=5, repeating=True)

            # The event loop
            self._main_loop()

        except UnexpectedMessageException as e:
            self.logger.critical(
                u"Received an unexpected message from the Server while in "
                u"state '%s': %s. Forcing termination."
                % (self.current_state.__class__, str(e)))
            raise

        except ProtocolException as e:
            self.logger.critical(
                u"Detected an unrecoverable error, forcing termination: %s"
                % str(e))
            # Pre-emptive release, just stop before messing up the server
            self.release_network_resources()
            raise

        except Exception as e:
            self.logger.critical(
                u"Forcing termination due to uncaught exception '%s': %s"
                % (e.__class__, e))
            self.logger.debug(
                u"Last error stacktrace:\n%s" % traceback.format_exc())
            # Pre-emptive release, just stop before messing up the server
            self.release_network_resources()
            raise

    def _main_loop(self):
        """
        The event loop.

        A loop that contiuosly calls the current state of the state
        machine, executing its logic.
        It exits when self.terminate() is called.
        """
        while not self.must_die.is_set():
            next_state = self.current_state.do_execute()
            if next_state != self.current_state:
                self.current_state._on_leaving()
                next_state._on_entering()
                self.current_state = next_state

    def check_encrypted_folder(self):
        """
        Check if encryption preconditions are satisfied, try to
        satisfy them otherwise.

        This method is meant to be asynchronously called by a timer,
        precisely to be registered into self._scheduler.
        """
        if not self.cryptoAdapter.check_precondition(self._ui_controller):
            self._internal_facade.terminate()

    def acquire_network_resources(self):
        """
        Configure a network connection to the server.

        The resulting socket is handled by two instances of
        ServerConnectionReader and ServerConnectionWriter, which are
        created and run as well. It is possibile to send/receive
        message to/from them through the queues self.output_message_queue
        and self._input_queue (servermessage).

        Note: this private method should be actually "protected" for
        the ServerSession states, but Python doesn't have such a
        protection level.
        """
        try:
            self.logger.debug(u"Creating a socket on %s:%s", self.host, self.port)
            sock = socket.create_connection((self.host, self.port), timeout=10)
            ca_chain = os.path.abspath(self.server_certificate)
            self.sock = ssl.wrap_socket(
                sock, cert_reqs=ssl.CERT_REQUIRED, ca_certs=ca_chain,
                ssl_version=ssl.PROTOCOL_TLSv1)
            self.sock.setblocking(True)
            match_hostname(self.sock.getpeercert(), self.host)
        except CertificateError as e:
            self.logger.critical(u"SSL certificate validation failed: %s" % e)
            raise ssl.SSLError(e)
        except socket.error as exception:
            self.logger.debug(u"Error opening SSL socket: %s" % exception)
            self.logger.warning(
                u"Unable to connect, re-trying in %s seconds."
                % self.reconnection_time)
            self.reconnection_time = stoppable_exponential_backoff_waiting(
                self.reconnection_time, self.must_die, 10)
            self.num_connection_attempts += 1
            return False
        except socket.timeout as exception:
            self.logger.debug(u"Socket timeout: %s" % exception)
            self.logger.warning(u"Unable to connect, re-trying in %s seconds."
                                % self.reconnection_time)
            self.reconnection_time = stoppable_exponential_backoff_waiting(
                self.reconnection_time, self.must_die, 10)
            self.num_connection_attempts += 1
            return False
        self.connection_reader = ServerConnectionReader(
            self._input_queue, self.input_keepalive_queue, self.sock)
        self.connection_writer = ServerConnectionWriter(
            self._input_queue, self.output_message_queue, self.sock)
        self.connection_reader.start()
        self.connection_writer.start()
        self.reconnection_time = 1
        return True

    def release_network_resources(self):
        """
        Close the active connection to the server, if any.

        Note: this private method should be actually "protected" for
        the ServerSession states, but Python doesn't have such a
        protection level.
        """
        try:
            self.connection_reader.terminate()
        except AttributeError:
            pass
        try:
            self.connection_writer.terminate()
        except AttributeError:
            pass
        try:
            self.sock.shutdown(socket.SHUT_RDWR)
            self.sock.close()
        except socket.error:
            # Shutdown yelds an error on already closed sockets
            pass
        except AttributeError:
            pass

    def commit(self):
        """Make the client commit the current transaction."""
        self._input_queue.put(Command('USERCOMMIT'), 'usercommand')

    def connect(self):
        """Make the client connect to the server."""
        self._input_queue.put(Command('CONNECT'), 'usercommand')

    def disconnect(self):
        """Make the client disconnect from the client, if connected."""
        # TODO: this method has been temporarly replaced by PAUSE
        self._input_queue.put(Command('DISCONNECT'), 'usercommand')

    def signal_free_worker(self):
        """
        Tell ServerSession that a worker is free to receive new tasks.

        This method is meant to be called by WorkerPool.
        """
        self._input_queue.put(Command('WORKERFREE'), 'systemcommand')

    def signal_download_integrity_error(
            self, operation, reason,
            expected_etag, expected_basis,
            actual_etag, computed_basis):
        """Tell ServerSession that the integrity check of a downloaded
        file has failed.

        This method is meant to be called by Workers.

        @param operation:
                    Instance of PathnameOperation. Remember that it
                    contains also its Proof object.
        @param reason:
                    String shortly describing the error.
        @param expected_etag:
                    The etag the file was expected to have. It's the one
                    communicated by the server in its file list.
        @param expected_basis:
                    The trusted basis. It's the one the user had
                    accepted when the sync started.
        @param actual_etag:
                    The etag the file has turned to have after being
                    downloaded.
        @param computed_basis:
                    The basis returned by the IntegrityManager when
                    computing the given proof object. Possibly None.
        """
        cmd = Command('INTEGRITYERRORONDOWNLOAD')
        cmd.operation = operation
        cmd.proof = operation.download_info['proof']
        cmd.reason = reason
        cmd.expected_etag = expected_etag
        cmd.expected_basis = expected_basis
        cmd.actual_etag = actual_etag
        cmd.computed_basis = computed_basis
        self._input_queue.put(cmd, 'systemcommand')

    def signal_deletelocal_integrity_error(
            self, pathname, proof, reason, expected_basis, computed_basis):
        """Tell ServerSession that the integrity check of a
        pathname to delete locally has failed.

        This method is meant to be called by Workers.

        @param pathname:
                    String representing the pathname.
        @param reason:
                    String shortly describing the error.
        @param expected_basis:
                    The trusted basis. It's the one the user had
                    accepted when the sync started.
        @param computed_basis:
                    The basis returned by the IntegrityManager when
                    computing the given proof object. Possibly None.
        """
        cmd = Command('INTEGRITYERRORONDELETELOCAL')
        cmd.pathname = pathname
        cmd.proof = proof
        cmd.reason = reason
        cmd.expected_basis = expected_basis
        cmd.computed_basis = computed_basis
        self._input_queue.put(cmd, 'systemcommand')

    def get_current_basis(self):
        """
        Return the current trusted basis.

        @return The current trusted basis.
        """
        with self._basis_lock:
            return self._current_basis

    def print_transaction(self):
        """
        Print the list of operations in the current transaction.

        Debug method, it prints to stdout and thus works only when the
        application is attached to a console.
        """
        self.transaction.print_all()

    def terminate(self):
        """
        Termination routine for this component.

        Stops the running thread and releases any acquired resource.
        """
        self.logger.debug(u"Terminating Server Session...")
        if self._started:
            self.must_die.set()
            self.worker_pool.terminate()
            self._input_queue.put(Command('TERMINATE'), 'usercommand')
            self.transaction.can_be_committed.set()
            self.join() if self is not threading.current_thread() else None
            self.keepalive_timer.terminate()
            self.release_network_resources()
            self.cryptoAdapter.terminate()
        self.logger.debug(u"Server Session terminanted.")
Exemple #8
0
class ServerSession(threading.Thread):
    """
    The thread that controls the communication with the server and the
    internal execution of the client.

    ServerSession is the intelligent part of the client application. It
    is implemented as an event-loop machine, receiving events from many
    sources: the user, ServerSession itself, other application
    components and the server, each with a different priority level. It
    decides, for example, when to handle operations for synchronizing
    data and when to answer to messages from the server, thus
    implementing the communication protocol.
    The logic is implemented as an object-oriented state machine, which
    decides which events to handle in each state and how to do it.
    ServerSession is both a container (formally a "context") for "state"
    objects and the only public interface of this component. The context
    gives the states a shared memory space and access to other
    components.
    All the logic is actually implemented in the state objects, limiting
    ServerSession to execute the "current state". State objects also
    decide when to switch to another state, as a reaction to an input
    event. The states are implemented using object-oriented inheritance
    in order to common factor the logic of several states, making
    ServerSession precisely a "Hierarchical State Machine".
    See the "State" design pattern from the book: Gamma et al, "Design
    Patterns: Elements of Reusable Object-Oriented Software" for a
    reference to the design.
    """
    def __init__(self, cfg, warebox, storage_cache, startup_synchronization,
                 filesystem_watcher, linker, metadata_db, hashes_db,
                 internal_facade, ui_controller, lockfile_fd, auto_start,
                 input_queue, scheduler):
        """
        @param cfg:
                    Instance of filerockclient.config.ConfigManager.
        @param warebox:
                    Instance of filerockclient.warebox.Warebox.
        @param storage_cache:
                    Instance of filerockclient.databases.storage_cache.
                    StorageCache.
        @param startup_synchronization:
                    Instance of filerockclient.serversession.
                    startup_synchronization.
        @param filesystem_watcher:
                    Instance of any class in the filerockclient.
                    filesystem_watcher package.
        @param linker:
                    Instance of filerockclient.linker.Linker.
        @param metadata_db:
                    Instance of filerockclient.databases.metadata.
                    MetadataDB.
        @param hashes_db:
                    Instance of filerockclient.databases.hashes.HashesDB.
        @param internal_facade:
                    Instance of filerockclient.internal_facade.
                    InternalFacade.
        @param ui_controller:
                    Instance of filerockclient.ui.ui_controller.
                    UIController.
        @param lockfile_fd:
                    File descriptor of the lock file which ensures there
                    is only one instance of FileRock Client running.
                    Child processes have to close it to avoid stale locks.
        @param auto_start:
                    Boolean flag telling whether ServerSession should
                    connect to the server when started.
        @param input_queue:
                    Instance of filerockclient.util.multi_queue.
                    MultiQueue. It is expected to have the following
                    queues:
                    usercommand: Commands sent by the user
                    sessioncommand: ServerSession internal use commands
                    systemcommand: Commands sent by other client components
                    servermessage: Messages sent by the server.
                    operation: PathnameOperation objects to handle
        @param scheduler:
                    Instance of filerockclient.util.scheduler.Scheduler.
        """

        threading.Thread.__init__(self, name=self.__class__.__name__)
        self.logger = logging.getLogger("FR.%s" % self.__class__.__name__)
        self._input_queue = input_queue
        self.warebox = warebox
        self.startup_synchronization = startup_synchronization
        self.filesystem_watcher = filesystem_watcher
        self._internal_facade = internal_facade
        self._ui_controller = ui_controller
        self.metadataDB = metadata_db
        self.hashesDB = hashes_db
        self.auto_start = auto_start
        self._scheduler = scheduler
        self.storage_cache = storage_cache
        self.linker = linker
        self.warebox = warebox
        self.cfg = cfg
        self._lockfile_fd = lockfile_fd

        self._started = False
        self.must_die = threading.Event()
        # TODO: this flag exists due to auto-disconnection. It will be removed
        # and replaced by a CONNECTFORCE command as soon as ServerSession will
        # stop going automatically to DisconnectedState.
        self.disconnect_other_client = False
        self.operation_responses = {}
        self._pathname2id = {}
        self.output_message_queue = Queue.Queue()
        self.input_keepalive_queue = Queue.Queue()
        self.current_state = None
        self.reconnection_time = 1
        self.num_connection_attempts = 0
        self.max_connection_attempts = MAX_CONNECTION_ATTEMPTS
        self._basis_lock = threading.Lock()
        self.server_basis = None
        self.session_id = None
        self.storage_ip_address = None
        self.refused_declare_count = 0
        self._current_basis = None
        self.id = 0
        self._sync_operations = []

        self.keepalive_timer = ConnectionLifeKeeper(self._input_queue,
                                                    self.input_keepalive_queue,
                                                    self.output_message_queue,
                                                    True)
        self.transaction = Transaction()
        self.transaction_manager = TransactionManager(self.transaction,
                                                      self.storage_cache)

        StateRegister.setup(self)

        self.client_id = None
        self.username = None
        self.priv_key = None
        self.host = None
        self.port = None
        self.server_certificate = None
        self.storage_hostname = None
        self.refused_declare_max = None
        self.refused_declare_waiting_time = None
        self.commit_threshold_seconds = None
        self.commit_threshold_operations = None
        self.commit_threshold_bytes = None
        self.transaction_cache = None
        self.integrity_manager = None
        self.cryptoAdapter = None
        self.temp_dir = None
        self.connection_reader = None
        self.connection_writer = None
        self.sock = None
        self.listening_operations = False

        self.reload_config_info()

    def reload_config_info(self):
        """
        Refresh the configuration values.

        Reload the configuration, get configuration values from self.cfg
        and set them as attributes of self.
        To be called at least once.
        """
        # TODO: merge this method into the constructor, there is no reason to
        # keep it separated anymore.

        self.cfg.load()

        self.client_id = self.cfg.get('User', 'client_id')
        self.username = self.cfg.get('User', 'username')
        self.priv_key = self.cfg.get('Application Paths',
                                     'client_priv_key_file')
        self.host = self.cfg.get('System', 'server_hostname')
        self.port = self.cfg.getint('System', 'server_port')
        self.server_certificate = self.cfg.get('Application Paths',
                                               'server_certificate')
        self.storage_hostname = self.cfg.get('System', 'storage_endpoint')

        self.refused_declare_max = self.cfg.getint('System',
                                                   'refused_declare_max')
        self.refused_declare_waiting_time = self.cfg.getint(
            'System', 'refused_declare_waiting_time')
        self.commit_threshold_seconds = self.cfg.getint(
            'Client', 'commit_threshold_seconds')
        self.commit_threshold_operations = self.cfg.getint(
            'Client', 'commit_threshold_operations')
        self.commit_threshold_bytes = self.cfg.getint(
            'Client', 'commit_threshold_bytes')

        temp = self.cfg.get('Application Paths', 'transaction_cache_db')
        self.transaction_cache = TransactionCache(temp)
        self.integrity_manager = IntegrityManager(None)

        is_firt_startup = self._internal_facade.is_first_startup()

        self.cryptoAdapter = Adapter(self.cfg,
                                     self.warebox,
                                     self._input_queue,
                                     self._lockfile_fd,
                                     enc_dir='enc',
                                     first_startup=is_firt_startup)

        self.worker_pool = WorkerPool(self.warebox, self, self.cfg,
                                      self.cryptoAdapter)

        self.temp_dir = self.cryptoAdapter.get_enc_dir()
        self._ui_controller.update_config_info(self.cfg)

    def run(self):
        """Implementation of the threading.Thread.run() method."""
        self._started = True
        try:
            self.worker_pool.start_workers()
            self.keepalive_timer.start()
            self.current_state = StateRegister.get('DisconnectedState')
            curr_basis = self.current_state._load_trusted_basis()
            self.integrity_manager.setCurrentBasis(curr_basis)
            self.logger.info(u'Current basis is: %s' % curr_basis)
            self.current_state._on_entering()
            self._internal_facade.set_global_status(GStatuses.NC_STOPPED)
            if self.auto_start:
                self._input_queue.put(Command('CONNECT'), 'sessioncommand')
            self.cryptoAdapter.start()
            self._scheduler.schedule_action(self.check_encrypted_folder,
                                            name='check_encrypted_folder',
                                            seconds=5,
                                            repeating=True)

            # The event loop
            self._main_loop()

        except UnexpectedMessageException as e:
            self.logger.critical(
                u"Received an unexpected message from the Server while in "
                u"state '%s': %s. Forcing termination." %
                (self.current_state.__class__, str(e)))
            raise

        except ProtocolException as e:
            self.logger.critical(
                u"Detected an unrecoverable error, forcing termination: %s" %
                str(e))
            # Pre-emptive release, just stop before messing up the server
            self.release_network_resources()
            raise

        except Exception as e:
            self.logger.critical(
                u"Forcing termination due to uncaught exception '%s': %s" %
                (e.__class__, e))
            self.logger.debug(u"Last error stacktrace:\n%s" %
                              traceback.format_exc())
            # Pre-emptive release, just stop before messing up the server
            self.release_network_resources()
            raise

    def _main_loop(self):
        """
        The event loop.

        A loop that contiuosly calls the current state of the state
        machine, executing its logic.
        It exits when self.terminate() is called.
        """
        while not self.must_die.is_set():
            next_state = self.current_state.do_execute()
            if next_state != self.current_state:
                self.current_state._on_leaving()
                next_state._on_entering()
                self.current_state = next_state

    def check_encrypted_folder(self):
        """
        Check if encryption preconditions are satisfied, try to
        satisfy them otherwise.

        This method is meant to be asynchronously called by a timer,
        precisely to be registered into self._scheduler.
        """
        if not self.cryptoAdapter.check_precondition(self._ui_controller):
            self._internal_facade.terminate()

    def acquire_network_resources(self):
        """
        Configure a network connection to the server.

        The resulting socket is handled by two instances of
        ServerConnectionReader and ServerConnectionWriter, which are
        created and run as well. It is possibile to send/receive
        message to/from them through the queues self.output_message_queue
        and self._input_queue (servermessage).

        Note: this private method should be actually "protected" for
        the ServerSession states, but Python doesn't have such a
        protection level.
        """
        try:
            self.logger.debug(u"Creating a socket on %s:%s", self.host,
                              self.port)
            sock = socket.create_connection((self.host, self.port), timeout=10)
            ca_chain = os.path.abspath(self.server_certificate)
            self.sock = ssl.wrap_socket(sock,
                                        cert_reqs=ssl.CERT_REQUIRED,
                                        ca_certs=ca_chain,
                                        ssl_version=ssl.PROTOCOL_TLSv1)
            self.sock.setblocking(True)
            match_hostname(self.sock.getpeercert(), self.host)
        except CertificateError as e:
            self.logger.critical(u"SSL certificate validation failed: %s" % e)
            raise ssl.SSLError(e)
        except socket.error as exception:
            self.logger.debug(u"Error opening SSL socket: %s" % exception)
            self.logger.warning(
                u"Unable to connect, re-trying in %s seconds." %
                self.reconnection_time)
            self.reconnection_time = stoppable_exponential_backoff_waiting(
                self.reconnection_time, self.must_die, 10)
            self.num_connection_attempts += 1
            return False
        except socket.timeout as exception:
            self.logger.debug(u"Socket timeout: %s" % exception)
            self.logger.warning(
                u"Unable to connect, re-trying in %s seconds." %
                self.reconnection_time)
            self.reconnection_time = stoppable_exponential_backoff_waiting(
                self.reconnection_time, self.must_die, 10)
            self.num_connection_attempts += 1
            return False
        self.connection_reader = ServerConnectionReader(
            self._input_queue, self.input_keepalive_queue, self.sock)
        self.connection_writer = ServerConnectionWriter(
            self._input_queue, self.output_message_queue, self.sock)
        self.connection_reader.start()
        self.connection_writer.start()
        self.reconnection_time = 1
        return True

    def release_network_resources(self):
        """
        Close the active connection to the server, if any.

        Note: this private method should be actually "protected" for
        the ServerSession states, but Python doesn't have such a
        protection level.
        """
        try:
            self.connection_reader.terminate()
        except AttributeError:
            pass
        try:
            self.connection_writer.terminate()
        except AttributeError:
            pass
        try:
            self.sock.shutdown(socket.SHUT_RDWR)
            self.sock.close()
        except socket.error:
            # Shutdown yelds an error on already closed sockets
            pass
        except AttributeError:
            pass

    def commit(self):
        """Make the client commit the current transaction."""
        self._input_queue.put(Command('USERCOMMIT'), 'usercommand')

    def connect(self):
        """Make the client connect to the server."""
        self._input_queue.put(Command('CONNECT'), 'usercommand')

    def disconnect(self):
        """Make the client disconnect from the client, if connected."""
        # TODO: this method has been temporarly replaced by PAUSE
        self._input_queue.put(Command('DISCONNECT'), 'usercommand')

    def signal_free_worker(self):
        """
        Tell ServerSession that a worker is free to receive new tasks.

        This method is meant to be called by WorkerPool.
        """
        self._input_queue.put(Command('WORKERFREE'), 'systemcommand')

    def signal_download_integrity_error(self, operation, reason, expected_etag,
                                        expected_basis, actual_etag,
                                        computed_basis):
        """Tell ServerSession that the integrity check of a downloaded
        file has failed.

        This method is meant to be called by Workers.

        @param operation:
                    Instance of PathnameOperation. Remember that it
                    contains also its Proof object.
        @param reason:
                    String shortly describing the error.
        @param expected_etag:
                    The etag the file was expected to have. It's the one
                    communicated by the server in its file list.
        @param expected_basis:
                    The trusted basis. It's the one the user had
                    accepted when the sync started.
        @param actual_etag:
                    The etag the file has turned to have after being
                    downloaded.
        @param computed_basis:
                    The basis returned by the IntegrityManager when
                    computing the given proof object. Possibly None.
        """
        cmd = Command('INTEGRITYERRORONDOWNLOAD')
        cmd.operation = operation
        cmd.proof = operation.download_info['proof']
        cmd.reason = reason
        cmd.expected_etag = expected_etag
        cmd.expected_basis = expected_basis
        cmd.actual_etag = actual_etag
        cmd.computed_basis = computed_basis
        self._input_queue.put(cmd, 'systemcommand')

    def signal_deletelocal_integrity_error(self, pathname, proof, reason,
                                           expected_basis, computed_basis):
        """Tell ServerSession that the integrity check of a
        pathname to delete locally has failed.

        This method is meant to be called by Workers.

        @param pathname:
                    String representing the pathname.
        @param reason:
                    String shortly describing the error.
        @param expected_basis:
                    The trusted basis. It's the one the user had
                    accepted when the sync started.
        @param computed_basis:
                    The basis returned by the IntegrityManager when
                    computing the given proof object. Possibly None.
        """
        cmd = Command('INTEGRITYERRORONDELETELOCAL')
        cmd.pathname = pathname
        cmd.proof = proof
        cmd.reason = reason
        cmd.expected_basis = expected_basis
        cmd.computed_basis = computed_basis
        self._input_queue.put(cmd, 'systemcommand')

    def get_current_basis(self):
        """
        Return the current trusted basis.

        @return The current trusted basis.
        """
        with self._basis_lock:
            return self._current_basis

    def print_transaction(self):
        """
        Print the list of operations in the current transaction.

        Debug method, it prints to stdout and thus works only when the
        application is attached to a console.
        """
        self.transaction.print_all()

    def terminate(self):
        """
        Termination routine for this component.

        Stops the running thread and releases any acquired resource.
        """
        self.logger.debug(u"Terminating Server Session...")
        if self._started:
            self.must_die.set()
            self.worker_pool.terminate()
            self._input_queue.put(Command('TERMINATE'), 'usercommand')
            self.transaction.can_be_committed.set()
            self.join() if self is not threading.current_thread() else None
            self.keepalive_timer.terminate()
            self.release_network_resources()
            self.cryptoAdapter.terminate()
        self.logger.debug(u"Server Session terminanted.")