def create_file(self, event, is_folder, session, excluded_dirs=(), initial_sync=False, check_by_uuid=True): if check_by_uuid: files = self._get_files_by_uuid(session) if files: file = files[0] file.last_skipped_event_id = None return file if event.file_id: return session.query(File).filter(File.id == event.file_id).one() try: folder = self.find_folder_by_uuid(session, event.folder_uuid) except FolderUUIDNotFound: folder = None logger.warning("No parent for event's %s file", event) file = File(name=event.file_name, uuid=event.file_uuid, is_folder=is_folder) file.folder = folder if folder: file.folder_id = folder.id file.excluded = (folder and folder.excluded or is_contained_in_dirs(file.path, excluded_dirs)) if self._is_smart_sync: file.is_offline = folder is not None and folder.is_offline return file
def delete_directories(self, dirs=[], session=None): paths_deleted = [] if not dirs: return paths_deleted files = session.query(File).all() dirs_rel = [self._pc.create_relpath(p) for p in dirs] for file in files: if is_contained_in_dirs(file.relative_path, dirs_rel): if not file.is_folder: paths_deleted.append(file.relative_path) session.delete(file) return paths_deleted
def _count_excluded_events(self, session): excluded_uuids = session.query(File.uuid) \ .filter(File.is_folder) \ .filter(File.excluded).all() excluded_uuids = [u.uuid for u in excluded_uuids] if not excluded_uuids: return 0 excluded_events = session.query(Event).from_statement(sql_text( """ select final_e.* from events final_e where final_e.id in ( select max(last_event.id) from events last_event where last_event.file_id in ( select moved_file.id from events move_event, files moved_file where moved_file.id = move_event.file_id and move_event.id in ( select max(event.id) from events event, files file where file.id = event.file_id and file.excluded and event.type == 'move' group by file.id ) and ( move_event.folder_uuid is null or move_event.folder_uuid not in ({}) ) ) group by last_event.file_id ) order by final_e.is_folder desc, final_e.id """.format( ','.join(["'{}'".format(uuid) for uuid in excluded_uuids]), ))) \ .all() if not excluded_events: return 0 excluded_count = len( list( filter( lambda e: not is_contained_in_dirs( self._db.get_path_from_event(e, session), self. _excluded_dirs), excluded_events))) return excluded_count
def get_excluded_dirs_to_change(self, excluded_dirs, src_path, dst_path=None): src_path = FilePath(src_path) if dst_path: dst_path = FilePath(dst_path) excluded_dirs = list(map(FilePath, excluded_dirs)) dirs_to_add = [] dirs_to_delete = list(filter(lambda ed: ed in src_path, excluded_dirs)) if dst_path is not None and \ not is_contained_in_dirs(dst_path, excluded_dirs): # we have to add new excluded dirs only if folder is not moved # to excluded dir l = len(src_path) dirs_to_add = [dst_path + d[l:] for d in dirs_to_delete] logger.debug( "get_excluded_dirs_to_change. " "excluded_dirs %s, src_path %s, dst_path %s, " "dirs_to_delete %s, dirs_to_add %s", excluded_dirs, src_path, dst_path, dirs_to_delete, dirs_to_add) return dirs_to_delete, dirs_to_add
def _restore_last_nonconflicted_state( self, session, fs, copies_storage, create_strategy_from_event, change_processing_events_count, excluded_dirs): assert self.event.file_id file = session.query(File).filter(File.id == self.event.file_id).one() before_conflict_event = self._get_last_nonconflicted_state(session, fs) logger.debug("_get_last_nonconflicted_state. event_id %s, file_hash %s", before_conflict_event.id, before_conflict_event.file_hash) assert before_conflict_event.id, \ "Non conflicted state must exist." \ "It can be absent only for create event. " \ "See method overload in create" conflict_event_type = self.event.type is_folder_move = conflict_event_type == 'move' and self.event.is_folder conflict_path = file.path if is_folder_move else "" delete_event_id = None if not is_folder_move: delete_event_id = session.query(Event.id) \ .filter(Event.file_id == self.event.file_id) \ .filter(Event.type == 'delete').limit(1).scalar() self._restore_nonconflicted_for_non_delete( fs, file, session, before_conflict_event.file_size, before_conflict_event.file_hash, create_strategy_from_event) if conflict_event_type == 'move' and before_conflict_event.file_size: copies_storage.add_copy_reference( self.event.file_hash, reason="process conflict. Event {}. File {}" .format(self.event.uuid, self.event.file_name)) file.event_id = before_conflict_event.id file.name = before_conflict_event.file_name folder = self.find_folder_by_uuid(session, before_conflict_event.folder_uuid) file.event = before_conflict_event file.folder = folder file.folder_id = folder.id if folder else None file_path = file.path is_path_excluded = is_contained_in_dirs(file_path, excluded_dirs) file.excluded = is_path_excluded if fs.path_exists(file_path, file.is_offline): self._rename_or_delete_dst_path(file_path, session, file.is_offline) if is_folder_move: if (not folder or not folder.is_deleted) and not is_path_excluded: try: # move folder back fs.accept_move( conflict_path, file_path, is_directory=True, events_file_id=file.id) except fs.Exceptions.FileNotFound: logger.warning( "File not found while moving folder %s back", conflict_path) else: fs.accept_delete( conflict_path, is_directory=True, events_file_id=file.id) elif ((not folder or not folder.is_deleted) and (conflict_event_type in ('update', 'move') and not delete_event_id and not is_path_excluded)): if before_conflict_event.file_size and file.is_offline: try: fs.restore_file_from_copy( file_name=file_path, copy_hash=before_conflict_event.file_hash, events_file_id=file.id) except fs.Exceptions.CopyDoesNotExists: logger.warning( "File copy not found when restoring {}, " "make event received".format(conflict_path)) before_conflict_event.state = "received" file.event_id = None file.event = None change_processing_events_count(remote_inc=1) else: fs.create_empty_file( file_name=file_path, file_hash=before_conflict_event.file_hash, events_file_id=file.id, is_offline=file.is_offline ) logger.debug("_restore_last_nonconflicted_state event.file.event %s", file.event) if is_folder_move: session.delete(self.event) self.event = None
def _apply_move_if_needed(self, session, fs, excluded_dirs, patches_storage, events_queue): event = self.event assert event.file_id parent_found = True folder = self.find_folder_by_uuid(session, event.folder_uuid) if folder == event.file.folder and event.file_name == event.file.name: return True, parent_found move_events = list( filter( lambda e: e.server_event_id and e.type == 'move' and (not event.server_event_id or e.server_event_id > event. server_event_id), event.file.events)) if move_events and not self._force_move and event.is_folder: # skip this if we have subsequent moves return False, parent_found # Calculate object path for further use event_path = event.file.path if folder and not folder.is_existing and not folder.excluded: logger.debug("Parent folder does not exist for %s", event_path) parent_found = False if self._process_parent_not_found(session): fs.accept_delete(event_path, is_directory=event.is_folder, events_file_id=event.file_id, is_offline=event.file.is_offline) return True, parent_found logger.debug('moving %s', event.file) new_path = ('/'.join([folder.path, event.file_name]) if folder else event.file_name) # Check whether event paths are excluded from sync is_path_excluded = is_contained_in_dirs(event_path, excluded_dirs) is_new_path_excluded = is_contained_in_dirs(new_path, excluded_dirs) # Both source and destination paths are excluded if is_path_excluded and is_new_path_excluded: assert False, 'Excluded-excluded must never occur' # None of source and destination paths are excluded elif not is_path_excluded and not is_new_path_excluded: # Regular move event processing try: fs.accept_move(event_path, new_path, is_directory=event.is_folder, events_file_id=event.file_id, is_offline=event.file.is_offline) except fs.Exceptions.FileAlreadyExists: if event.file.event_id and not event.file.is_deleted: if not self._rename_or_delete_dst_path( new_path, session, event.file.is_offline): raise SkipEventForNow() else: # retry move after renaming new path return self._apply_move_if_needed( session, fs, excluded_dirs, patches_storage, events_queue) except fs.Exceptions.FileNotFound: subsequent_local_moves_deletes = list( filter( lambda ev: ev.id > event.id and ev.type in ('delete', 'move') and ev.state in ('occured', 'conflicted', 'sent'), event.file.events)) if not subsequent_local_moves_deletes and \ not self.check_previous_delete( session, events_queue, fs): # file/folder moved or deleted locally and # no events in db for now # so wait logger.warning("Source file (folder) %s not found.", event_path) raise SkipEventForNow() except fs.Exceptions.WrongFileId: if not self.event.is_folder or \ not self._apply_folder_delete_if_any(session, fs): raise SkipEventForNow() # retry move after deleting folder return self._apply_move_if_needed(session, fs, excluded_dirs, patches_storage, events_queue) except Exception as e: # ignore move if file is unavailable logger.warning("Can't move file (folder) %s. Reason %s", event_path, e) raise SkipEventForNow() event.file.name = event.file_name event.file.folder = folder if folder: event.file.folder_id = folder.id # Source path is excluded elif is_path_excluded and not is_new_path_excluded: self.event.file.excluded = False self.event.file.folder = folder if event.is_folder: # Create directory at destination path fs.create_directory(new_path, self.event.file_id) else: # Create file at destination path if self.event.file_size: self._create_file_from_copy(new_path, fs) else: fs.create_empty_file(new_path, self.event.file_hash, self.event.file_id, is_offline=self.event.file.is_offline) # Destination path is excluded elif not is_path_excluded and is_new_path_excluded: if not hasattr(self, '_excluded_ready') or \ not self._excluded_ready: self._excluded_ready = False raise SkipExcludedMove self.event.file.excluded = True self.event.file.event_id = None if not self.event.is_folder: self.event.state = 'received' else: self.db.mark_child_excluded(self.event.file_id, session) # Delete object at source path fs.accept_delete(event_path, is_directory=event.is_folder, is_offline=event.file.is_offline) return True, parent_found
def check_event_path_excluded(self, excluded_dirs): event = self.event if not event.file: return False event_path = event.file.path return is_contained_in_dirs(event_path, excluded_dirs)
def load_excluded_events(self, session, events_count, exclude_files): if events_count <= 0: return [] start_time = time() excluded_uuids = session.query(File.uuid) \ .filter(File.is_folder) \ .filter(File.excluded).all() excluded_uuids = [u.uuid for u in excluded_uuids] offset = 0 excluded_events = [] limit = min(EVENTS_QUERY_LIMIT, events_count) while True: excluded_portion = session.query(Event).from_statement(sql_text( """ select final_e.* from events final_e where final_e.id in ( select max(last_event.id) from events last_event where last_event.file_id in ( select moved_file.id from events move_event, files moved_file where moved_file.id = move_event.file_id and move_event.id in ( select max(event.id) from events event, files file where file.id = event.file_id and file.excluded and event.type == 'move' group by file.id ) and move_event.file_id not in ({}) and ( move_event.folder_uuid is null or move_event.folder_uuid not in ({}) ) ) group by last_event.file_id ) order by final_e.is_folder desc, final_e.id limit {}, {} """.format( ','.join(map(str, exclude_files)), ','.join(["'{}'".format(uuid) for uuid in excluded_uuids]), offset, EVENTS_QUERY_LIMIT))) \ .all() if not excluded_portion: break excluded_portion_filtered = [ e for e in excluded_portion if not is_contained_in_dirs( self._db.get_path_from_event(e, session), self._excluded_dirs) ] excluded_events.extend(excluded_portion_filtered[:]) if len(excluded_portion) < EVENTS_QUERY_LIMIT or \ len(excluded_events) >= limit: break offset += EVENTS_QUERY_LIMIT if excluded_events: logger.debug( "excluded_events queried in %s sec: [%s]", time() - start_time, ', '.join(map(lambda ev: str(ev.id), excluded_events))) return excluded_events