def _temporary_stop_watching(self, seconds, paths): """ @type paths: col.Iterable """ paths = list(paths) # will be reused self._remove_paths_from_watcher(paths) callLater(seconds, self._add_paths_to_watcher, paths)
def __restore_op_for_path(self, file_, file_blocks, is_whole_dataset_restored, base_dir_id, restore_directory, cryptographer, ds): """ Perform the restore-specific operation for a single file from the message. The actual operation may cause restoring the file (i.e. writing), or deleting. @param file_: file to run the restore operation on. @type file_: LocalPhysicalFileState @param file_blocks: the iterable of blocks used to restore the file (if needed). @type file_blocks: col.Iterable @param is_whole_dataset_restored: whether restoring the whole dataset (as a part of sync operation, maybe) or just some single files. @param base_dir_id: the ID of the base directory in the DB, to improve performance of queries. @type base_dir_id: int, NoneType @param restore_directory: to what directory should the file be restored. @type restore_directory: basestring @param cryptographer: C{Cryptographer} object to use for decrypting. @type cryptographer: Cryptographer @param ds: the dataset to bind the states to (or C{None} if not needed). @type ds: NoneType, AbstractBasicDatasetInfo """ _app = self.manager.app # What file paths should we use as a basis? # Store whole path if a selected files were requested # (for compatibility), but write the rel_path if whole dataset # was restored _base_file_name = \ '.' + os.sep + (file_.rel_path if is_whole_dataset_restored else file_.full_path) # This path can be overrided later file_path = sanitize_path(abspath(os.path.join(restore_directory, _base_file_name))) try: logger.verbose('Ignoring FS changes in %r', file_path) _app.ignore_fs_events_for_path(file_path) if file_.fingerprint is None: logger.debug('Removing the file %r at %r', file_, file_path) # Deleting the file. # UPD: not just deleting, but also may be adding # a directory. # TODO: ticket:141 - fix accordingly. assert file_.crc32 is None and not file_blocks, \ (file_, file_.crc32, file_blocks) with RestoreTransaction_Host._path_operation_lock: self.__delete_single_file(file_, file_path) else: logger.debug('Restoring the file %r at %r (%s bytes)', file_, file_path, file_.size) logger.verbose('In RESTORE, receiving file %r: %r', file_, file_blocks) if _app.is_path_hot(file_path): do_path_override = True # do not try to write file over dir try: do_path_override = os.path.isdir(file_path) except OSError: # can not access path do_path_override = True _file_path = file_path if do_path_override: ts = datetime.utcnow() if ds is None \ else ds.time_started counter = 0 while (os.path.exists(_file_path) or _app.is_path_hot(_file_path)): _file_path = self.suggest_override_path( _file_path, ts, counter) counter += 1 if counter >= \ MAX_TRIES_TO_GET_FILE_NAME_OVERRIDE: raise Exception('Too many tries to get override ' 'for path {!r}' .format(file_path)) logger.info('Restore conflict, writing file %r ' 'to %r instead of %r', file_, _file_path, file_path) file_path = _file_path # TODO: between this point and the actual writing, # the overriding _file_path may already become # inappropriate; in the best case, we need to do this # as close to actual writing as possible, # ideally atomically find name/open for writing. # Writing/updating the file assert file_.crc32 is not None, \ (file_, file_.crc32, file_blocks) # @todo: better solution for preventing cuncurrent # restore with RestoreTransaction_Host._path_operation_lock: self.__restore_single_file( file_path, file_, file_blocks, cryptographer) except CannotRestoreFile as e: # from __restore_single_file logger.error('Cannot restore the file, but nothing is broken: %s', e) except FileCorrupt as e: # from __restore_single_file logger.error('Cannot restore the file, and it was corrupted: %s', e) except Exception: # We don't even know what the problem was! # Possibly, at this point, if there were error during # writing file (partial write), we should restore # previous version of file... logger.exception('Some error has occured during restore ' 'of the file %r at path %r', file_, file_path) else: # We've wrote the file successfully, can now mark it # as already backed up: add the state and bind the dataset # to it. self.__bind_single_file_state_to_file_if_needed( ds, base_dir_id, file_) finally: # by now, we've finished writing the file ts = datetime.utcnow() # Ignore all events for file_path occured before this # moment. _app.ignore_fs_events_for_path(file_path, ts) # Delay removal from ignore # it required to drop events from fs notify, that happened # during restore (they can be received after restore end..) callLater(REMOVAL_FROM_FS_IGNORE_TIMEOUT.total_seconds(), _app.stop_ignoring_fs_events_for_path, file_path, ts) logger.verbose('Delaying remove from ignore, ' 'path: %r, ts: %s', file_path, ts)
def __restore_op_for_path(self, file_, file_blocks, is_whole_dataset_restored, base_dir_id, restore_directory, cryptographer, ds): """ Perform the restore-specific operation for a single file from the message. The actual operation may cause restoring the file (i.e. writing), or deleting. @param file_: file to run the restore operation on. @type file_: LocalPhysicalFileState @param file_blocks: the iterable of blocks used to restore the file (if needed). @type file_blocks: col.Iterable @param is_whole_dataset_restored: whether restoring the whole dataset (as a part of sync operation, maybe) or just some single files. @param base_dir_id: the ID of the base directory in the DB, to improve performance of queries. @type base_dir_id: int, NoneType @param restore_directory: to what directory should the file be restored. @type restore_directory: basestring @param cryptographer: C{Cryptographer} object to use for decrypting. @type cryptographer: Cryptographer @param ds: the dataset to bind the states to (or C{None} if not needed). @type ds: NoneType, AbstractBasicDatasetInfo """ _app = self.manager.app # What file paths should we use as a basis? # Store whole path if a selected files were requested # (for compatibility), but write the rel_path if whole dataset # was restored _base_file_name = \ '.' + os.sep + (file_.rel_path if is_whole_dataset_restored else file_.full_path) # This path can be overrided later file_path = sanitize_path( abspath(os.path.join(restore_directory, _base_file_name))) try: logger.verbose('Ignoring FS changes in %r', file_path) _app.ignore_fs_events_for_path(file_path) if file_.fingerprint is None: logger.debug('Removing the file %r at %r', file_, file_path) # Deleting the file. # UPD: not just deleting, but also may be adding # a directory. # TODO: ticket:141 - fix accordingly. assert file_.crc32 is None and not file_blocks, \ (file_, file_.crc32, file_blocks) with RestoreTransaction_Host._path_operation_lock: self.__delete_single_file(file_, file_path) else: logger.debug('Restoring the file %r at %r (%s bytes)', file_, file_path, file_.size) logger.verbose('In RESTORE, receiving file %r: %r', file_, file_blocks) if _app.is_path_hot(file_path): do_path_override = True # do not try to write file over dir try: do_path_override = os.path.isdir(file_path) except OSError: # can not access path do_path_override = True _file_path = file_path if do_path_override: ts = datetime.utcnow() if ds is None \ else ds.time_started counter = 0 while (os.path.exists(_file_path) or _app.is_path_hot(_file_path)): _file_path = self.suggest_override_path( _file_path, ts, counter) counter += 1 if counter >= \ MAX_TRIES_TO_GET_FILE_NAME_OVERRIDE: raise Exception('Too many tries to get override ' 'for path {!r}'.format(file_path)) logger.info( 'Restore conflict, writing file %r ' 'to %r instead of %r', file_, _file_path, file_path) file_path = _file_path # TODO: between this point and the actual writing, # the overriding _file_path may already become # inappropriate; in the best case, we need to do this # as close to actual writing as possible, # ideally atomically find name/open for writing. # Writing/updating the file assert file_.crc32 is not None, \ (file_, file_.crc32, file_blocks) # @todo: better solution for preventing cuncurrent # restore with RestoreTransaction_Host._path_operation_lock: self.__restore_single_file(file_path, file_, file_blocks, cryptographer) except CannotRestoreFile as e: # from __restore_single_file logger.error('Cannot restore the file, but nothing is broken: %s', e) except FileCorrupt as e: # from __restore_single_file logger.error('Cannot restore the file, and it was corrupted: %s', e) except Exception: # We don't even know what the problem was! # Possibly, at this point, if there were error during # writing file (partial write), we should restore # previous version of file... logger.exception( 'Some error has occured during restore ' 'of the file %r at path %r', file_, file_path) else: # We've wrote the file successfully, can now mark it # as already backed up: add the state and bind the dataset # to it. self.__bind_single_file_state_to_file_if_needed( ds, base_dir_id, file_) finally: # by now, we've finished writing the file ts = datetime.utcnow() # Ignore all events for file_path occured before this # moment. _app.ignore_fs_events_for_path(file_path, ts) # Delay removal from ignore # it required to drop events from fs notify, that happened # during restore (they can be received after restore end..) callLater(REMOVAL_FROM_FS_IGNORE_TIMEOUT.total_seconds(), _app.stop_ignoring_fs_events_for_path, file_path, ts) logger.verbose('Delaying remove from ignore, ' 'path: %r, ts: %s', file_path, ts)