def _scan_remote(self, from_state=None): """Recursively scan the bound remote folder looking for updates""" start_ms = current_milli_time() try: if from_state is None: from_state = self._dao.get_state_from_local('/') self._client = self._engine.get_remote_client() remote_info = self._client.get_info(from_state.remote_ref) self._dao.update_remote_state(from_state, remote_info, remote_parent_path=from_state.remote_parent_path) except NotFound: log.debug("Marking %r as remotely deleted.", from_state) # Should unbind ? # from_state.update_remote(None) self._dao.commit() self._metrics['last_remote_scan_time'] = current_milli_time() - start_ms return self._get_changes() self._save_changes_state() # recursive update self._do_scan_remote(from_state, remote_info) self._last_remote_full_scan = datetime.utcnow() self._dao.update_config('remote_last_full_scan', self._last_remote_full_scan) self._dao.clean_scanned() self._dao.commit() self._metrics['last_remote_scan_time'] = current_milli_time() - start_ms log.debug("Remote scan finished in %dms", self._metrics['last_remote_scan_time']) self.remoteScanFinished.emit()
def _scan_remote(self, from_state=None): """Recursively scan the bound remote folder looking for updates""" start_ms = current_milli_time() try: if from_state is None: from_state = self._dao.get_state_from_local('/') self._client = self._engine.get_remote_client() remote_info = self._client.get_info(from_state.remote_ref) self._dao.update_remote_state(from_state, remote_info, from_state.remote_parent_path) except NotFound: log.debug("Marking %r as remotely deleted.", from_state) # Should unbind ? # from_state.update_remote(None) self._dao.commit() self._metrics['last_remote_scan_time'] = current_milli_time() - start_ms return self._get_changes() self._save_changes_state() # recursive update self._scan_remote_recursive(from_state, remote_info) self._last_remote_full_scan = datetime.utcnow() self._dao.update_config('remote_last_full_scan', self._last_remote_full_scan) self._dao.clean_scanned() self._dao.commit() self._metrics['last_remote_scan_time'] = current_milli_time() - start_ms log.debug("Remote scan finished in %dms", self._metrics['last_remote_scan_time']) self.remoteScanFinished.emit()
def _scan(self): log.debug("Full scan started") start_ms = current_milli_time() self._suspend_queue() self._delete_files = dict() self._protected_files = dict() info = self.client.get_info(u'/') self._scan_recursive(info) self._scan_handle_deleted_files() self._metrics['last_local_scan_time'] = current_milli_time() - start_ms log.debug("Full scan finished in %dms", self._metrics['last_local_scan_time']) self._local_scan_finished = True self._engine.get_queue_manager().resume() self.localScanFinished.emit()
def _win_dequeue_delete(self): self._win_lock.acquire() try: delete_events = self._delete_events for evt in delete_events.values(): evt_time = evt[0] evt_pair = evt[1] if current_milli_time() - evt_time < WIN_MOVE_RESOLUTION_PERIOD: log.debug("Win: ignoring delete event as waiting for move resolution period expiration: %r", evt) continue if not self.client.exists(evt_pair.local_path): log.debug("Win: handling watchdog delete for event: %r", evt) self._handle_watchdog_delete(evt_pair) else: remote_id = self.client.get_remote_id(evt_pair.local_path) if remote_id == evt_pair.remote_ref or remote_id is None: log.debug("Win: ignoring delete event as file still exists: %r", evt) else: log.debug("Win: handling watchdog delete for event: %r", evt) self._handle_watchdog_delete(evt_pair) log.debug("Win: dequeuing delete event: %r", evt) del self._delete_events[evt_pair.remote_ref] except ThreadInterrupt: raise except Exception as e: log.exception(e) finally: self._win_lock.release()
def _scan(self): log.debug("Full scan started") start_ms = current_milli_time() self._delete_files = dict() self._protected_files = dict() info = self.client.get_info(u'/') self._scan_recursive(info) for deleted in self._delete_files: if deleted in self._protected_files: continue self._dao.delete_local_state(self._delete_files[deleted]) self._metrics['last_local_scan_time'] = current_milli_time() - start_ms log.debug("Full scan finished in %dms", self._metrics['last_local_scan_time']) self._local_scan_finished = True self.localScanFinished.emit()
def _handle_watchdog_event_on_known_acquired_pair(self, doc_pair, evt, rel_path): if evt.event_type == 'deleted': # Delay on Windows the delete event if self._windows: self._win_lock.acquire() log.debug('Add pair to delete events: %r', doc_pair) try: self._delete_events[doc_pair.remote_ref] = (current_milli_time(), doc_pair) finally: self._win_lock.release() else: # In case of case sensitive can be an issue if self.client.exists(doc_pair.local_path): remote_id = self.client.get_remote_id(doc_pair.local_path) if remote_id == doc_pair.remote_ref or remote_id is None: # This happens on update don't do anything return self._handle_watchdog_delete(doc_pair) return if evt.event_type == 'created': # NXDRIVE-471 case maybe remote_ref = self.client.get_remote_id(rel_path) if remote_ref is None: log.debug("Created event on a known pair with no remote_ref," " this should only happen in case of a quick move and copy-paste: %r", doc_pair) return else: # NXDRIVE-509 log.debug("Created event on a known pair with a remote_ref: %r", doc_pair) local_info = self.client.get_info(rel_path, raise_if_missing=False) if local_info is not None: # Unchanged folder if doc_pair.folderish: log.debug('Unchanged folder %s (watchdog event [%s]), only update last_local_updated', rel_path, evt.event_type) self._dao.update_local_modification_time(doc_pair, local_info) return if doc_pair.local_state == 'synchronized': digest = local_info.get_digest() # Unchanged digest, can be the case if only the last modification time or file permissions # have been updated if doc_pair.local_digest == digest: log.debug('Digest has not changed for %s (watchdog event [%s]), only update last_local_updated', rel_path, evt.event_type) if local_info.remote_ref is None: self.client.set_remote_id(rel_path, doc_pair.remote_ref) self._dao.update_local_modification_time(doc_pair, local_info) return doc_pair.local_digest = digest doc_pair.local_state = 'modified' if AbstractOSIntegration.is_mac() and evt.event_type == 'modified' and doc_pair.remote_ref is not None and doc_pair.remote_ref != local_info.remote_ref: original_pair = self._dao.get_normal_state_from_remote(local_info.remote_ref) original_info = None if original_pair is not None: original_info = self.client.get_info(original_pair.local_path, raise_if_missing=False) if original_info is not None and original_info.remote_ref == local_info.remote_ref: log.debug("MacOSX has postponed overwriting of xattr, need to reset remote_ref for %r", doc_pair) # We are in a copy/paste situation with OS overriding the xattribute self.client.set_remote_id(doc_pair.local_path, doc_pair.remote_ref) self._dao.update_local_state(doc_pair, local_info)
def _push_to_scan(self, info): if isinstance(info, FileInfo): ref = info.path super(SimpleWatcher, self)._push_to_scan(info) return else: ref = info log.warn("should scan: %s", ref) self._to_scan[ref] = current_milli_time()
def finish_action(): if (current_thread().ident in Action.actions and Action.actions[current_thread().ident] is not None): Action.actions[current_thread().ident].finished = True if isinstance(Action.actions[current_thread().ident], FileAction): Action.actions[current_thread().ident].end_time = current_milli_time() # Save last file actions Action.lastFileActions[current_thread().ident] = Action.actions[current_thread().ident] Action.actions[current_thread().ident] = None
def __init__(self, action_type, filepath, filename=None, size=None): super(FileAction, self).__init__(action_type, 0) self.filepath = filepath self.filename = filename or os.path.basename(filepath) if size is None: self.size = os.path.getsize(filepath) else: self.size = size self.start_time = current_milli_time() self.end_time = None
def finish_action(): thread_id = current_thread().ident action = Action.actions.get(thread_id) if action: Action.actions[thread_id].finished = True if isinstance(Action.actions[thread_id], FileAction): Action.actions[thread_id].end_time = current_milli_time() # Save last file actions Action.lastFileActions[thread_id] = Action.actions[thread_id] Action.actions[thread_id] = None
def __init__(self, action_type, filepath, filename=None, size=None): super(FileAction, self).__init__(action_type, 0) self.filepath = filepath if filename is None: self.filename = os.path.basename(filepath) else: self.filename = filename if size is None: self.size = os.path.getsize(filepath) else: self.size = size self.start_time = current_milli_time() self.end_time = None
def _execute(self): try: trigger_local_scan = False self._init() if not self.client.exists('/'): self.rootDeleted.emit() return self._action = Action("Setup watchdog") self._watchdog_queue = Queue() self._setup_watchdog() log.debug("Watchdog setup finished") self._action = Action("Full local scan") self._scan() self._end_action() # Check windows dequeue and folder scan only every 100 loops ( every 1s ) current_time_millis = int(round(time() * 1000)) self._win_delete_interval = current_time_millis self._win_folder_scan_interval = current_time_millis i = 0 while (1): self._interact() sleep(0.01) while (not self._watchdog_queue.empty()): # Dont retest if already local scan evt = self._watchdog_queue.get() self.handle_watchdog_event(evt) # Check to scan i += 1 if i % 100 != 0: continue i = 0 threshold_time = current_milli_time() - 1000 * self._scan_delay # Need to create a list of to scan as the dictionary cannot grow while iterating local_scan = [] for path, last_event_time in self._to_scan.iteritems(): if last_event_time < threshold_time: local_scan.append(path) for path in local_scan: self._scan_path(path) # Dont delete if the time has changed since last scan if self._to_scan[path] < threshold_time: del self._to_scan[path] if (len(self._delete_files)): # Enforce scan of all others folders to not loose track of moved file self._scan_handle_deleted_files() except ThreadInterrupt: raise finally: self._stop_watchdog()
def handle_watchdog_event(self, evt): self._metrics['last_event'] = current_milli_time() # For creation and deletion just update the parent folder src_path = normalize_event_filename(evt.src_path) rel_path = self.client.get_path(src_path) file_name = os.path.basename(src_path) if self.client.is_temp_file(file_name) or rel_path == '/.partials': return if evt.event_type == 'moved': self.handle_watchdog_move(evt, src_path, rel_path) return # Dont care about ignored file, unless it is moved if self.client.is_ignored(os.path.dirname(rel_path), file_name): return log.warn("Got evt: %r", evt) if len(rel_path) == 0 or rel_path == '/': self._push_to_scan('/') return # If not modified then we will scan the parent folder later if evt.event_type != 'modified': log.warn(rel_path) parent_rel_path = os.path.dirname(rel_path) if parent_rel_path == "": parent_rel_path = '/' self._push_to_scan(parent_rel_path) return file_name = os.path.basename(src_path) doc_pair = self._dao.get_state_from_local(rel_path) if not os.path.exists(src_path): log.warn("Event on a disappeared file: %r %s %s", evt, rel_path, file_name) return if doc_pair is not None and doc_pair.processor > 0: log.warn("Don't update as in process %r", doc_pair) return if isinstance(evt, DirModifiedEvent): self._push_to_scan(rel_path) else: local_info = self.client.get_info(rel_path, raise_if_missing=False) if local_info is None or doc_pair is None: # Suspicious return digest = local_info.get_digest() if doc_pair.local_state != 'created': if doc_pair.local_digest != digest: doc_pair.local_state = 'modified' doc_pair.local_digest = digest log.warn("file is updated: %r", doc_pair) self._dao.update_local_state(doc_pair, local_info, versionned=True)
def _execute(self): first_pass = True try: self._init() while True: self._interact() now = current_milli_time() if self._next_check < now: self._next_check = now + self.server_interval * 1000 if self._handle_changes(first_pass): first_pass = False sleep(0.01) except ThreadInterrupt: self.remoteWatcherStopped.emit() raise
def handle_watchdog_event(self, evt): self._metrics['last_event'] = current_milli_time() # For creation and deletion just update the parent folder src_path = normalize_event_filename(evt.src_path) rel_path = self.client.get_path(src_path) file_name = os.path.basename(src_path) if self.client.is_temp_file(file_name) or rel_path == '/.partials': return if evt.event_type == 'moved': self.handle_watchdog_move(evt, src_path, rel_path) return # Dont care about ignored file, unless it is moved if self.client.is_ignored(os.path.dirname(rel_path), file_name): return log.warn("Got evt: %r", evt) if len(rel_path) == 0 or rel_path == '/': self._push_to_scan('/') return # If not modified then we will scan the parent folder later if evt.event_type != 'modified': log.warn(rel_path) parent_rel_path = os.path.dirname(rel_path) if parent_rel_path == "": parent_rel_path = '/' self._push_to_scan(parent_rel_path) return file_name = os.path.basename(src_path) doc_pair = self._dao.get_state_from_local(rel_path) if not os.path.exists(src_path): log.warn("Event on a disappeared file: %r %s %s", evt, rel_path, file_name) return if doc_pair is not None and doc_pair.processor > 0: log.warn("Don't update as in process %r", doc_pair) return if isinstance(evt, DirModifiedEvent): self._push_to_scan(rel_path) else: local_info = self.client.get_info(rel_path, raise_if_missing=False) if local_info is None: # Suspicious return digest = local_info.get_digest() if doc_pair.local_state != 'created': if doc_pair.local_digest != digest: doc_pair.local_state = 'modified' doc_pair.local_digest = digest log.warn("file is updated: %r", doc_pair) self._dao.update_local_state(doc_pair, local_info, versionned=True)
def _execute(self): try: self._init() if not self.client.exists('/'): self.rootDeleted.emit() return self._action = Action("Setup watchdog") self._watchdog_queue = Queue() self._setup_watchdog() log.debug("Watchdog setup finished") self._action = Action("Full local scan") self._scan() self._end_action() # Check windows dequeue and folder scan only every 100 loops ( every 1s ) current_time_millis = int(round(time() * 1000)) self._win_delete_interval = current_time_millis self._win_folder_scan_interval = current_time_millis i = 0 while (1): self._interact() sleep(0.01) while (not self._watchdog_queue.empty()): # Dont retest if already local scan evt = self._watchdog_queue.get() self.handle_watchdog_event(evt) # Check to scan i += 1 if i % 100 != 0: continue i = 0 threshold_time = current_milli_time() - 1000 * self._scan_delay # Need to create a list of to scan as the dictionary cannot grow while iterating local_scan = [] for path, last_event_time in self._to_scan.iteritems(): if last_event_time < threshold_time: local_scan.append(path) for path in local_scan: self._scan_path(path) # Dont delete if the time has changed since last scan if self._to_scan[path] < threshold_time: del self._to_scan[path] if (len(self._delete_files)): # Enforce scan of all others folders to not loose track of moved file self._scan_handle_deleted_files() except ThreadInterrupt: raise finally: self._stop_watchdog()
def _check_last_sync(self): from nxdrive.engine.watcher.local_watcher import WIN_MOVE_RESOLUTION_PERIOD qm_active = self._queue_manager.active() qm_size = self._queue_manager.get_overall_size() empty_polls = self._remote_watcher.get_metrics()["empty_polls"] log.debug('Checking sync completed: queue manager is %s, overall size = %d, empty polls count = %d', 'active' if qm_active else 'inactive', qm_size, empty_polls) local_metrics = self._local_watcher.get_metrics() if (qm_size == 0 and not qm_active and empty_polls > 0 and (current_milli_time() - local_metrics["last_event"]) > WIN_MOVE_RESOLUTION_PERIOD): self._dao.update_config("last_sync_date", datetime.datetime.utcnow()) if local_metrics['last_event'] == 0: log.warn("No watchdog event detected but sync is completed") if self._sync_started: self._sync_started = False log.debug('Emitting syncCompleted for engine %s', self.get_uid()) self.syncCompleted.emit()
def _win_dequeue_folder_scan(self): self._win_lock.acquire() try: folder_scan_events = self._folder_scan_events.values() for evt in folder_scan_events: evt_time = evt[0] evt_pair = evt[1] local_path = evt_pair.local_path if current_milli_time() - evt_time < self._windows_folder_scan_delay: log.debug("Win: ignoring folder to scan as waiting for folder scan delay expiration: %r", local_path) continue if not self.client.exists(local_path): if local_path in self._folder_scan_events: log.debug("Win: dequeuing folder scan event as folder doesn't exist: %r", local_path) del self._folder_scan_events[local_path] continue local_info = self.client.get_info(local_path, raise_if_missing=False) if local_info is None: log.trace("Win: dequeuing folder scan event as folder doesn't exist: %r", local_path) del self._folder_scan_events[local_path] continue log.debug("Win: handling folder to scan: %r", local_path) self.scan_pair(local_path) local_info = self.client.get_info(local_path, raise_if_missing=False) if local_info is not None and mktime(local_info.last_modification_time.timetuple()) > evt_time: # Re-schedule scan as the folder has been modified since last check self._folder_scan_events[local_path] = (mktime(local_info.last_modification_time.timetuple()), evt_pair) else: log.debug("Win: dequeuing folder scan event: %r", evt) del self._folder_scan_events[local_path] except ThreadInterrupt: raise except Exception as e: log.exception(e) finally: self._win_lock.release()
def _debug_show_message(self): from nxdrive.utils import current_milli_time self.show_message("Debug Systray message", "This is a random message %d" % (current_milli_time()))
def _execute(self): self._current_metrics = dict() self._current_item = self._get_item() soft_lock = None while self._continue and self._current_item is not None: # Take client every time as it is cached in engine local_client = self._engine.get_local_client() remote_client = self._engine.get_remote_client() doc_pair = None try: doc_pair = self._dao.acquire_state(self._thread_id, self._current_item.id) if doc_pair and doc_pair.last_remote_modifier: self._engine._user_name_resolver.refresh_user(doc_pair.last_remote_modifier) except: log.trace("Cannot acquire state for: %r", self._current_item) self._engine.get_queue_manager().push(self._current_item) self._current_item = self._get_item() continue try: if doc_pair is None: log.trace("Didn't acquire state, dropping %r", self._current_item) self._current_item = self._get_item() continue log.debug('Executing processor on %r(%d)', doc_pair, doc_pair.version) self._current_doc_pair = doc_pair self._current_temp_file = None if (doc_pair.pair_state == 'synchronized' or doc_pair.pair_state == 'unsynchronized' or doc_pair.pair_state is None or doc_pair.pair_state.startswith('parent_')): log.trace("Skip as pair is in non-processable state: %r", doc_pair) self._current_item = self._get_item() if doc_pair.pair_state == 'synchronized': self._handle_readonly(local_client, doc_pair) continue # TODO Update as the server dont take hash to avoid conflict yet if (doc_pair.pair_state.startswith("locally") and doc_pair.remote_ref is not None): try: remote_info = remote_client.get_info(doc_pair.remote_ref) if remote_info.digest != doc_pair.remote_digest: doc_pair.remote_state = 'modified' self._refresh_remote(doc_pair, remote_client, remote_info) # Can run into conflict if doc_pair.pair_state == 'conflicted': self._current_item = self._get_item() continue doc_pair = self._dao.get_state_from_id(doc_pair.id) if doc_pair is None: self._current_item = self._get_item() continue except NotFound: doc_pair.remote_ref = None parent_path = doc_pair.local_parent_path if (parent_path == ''): parent_path = "/" if not local_client.exists(parent_path): if doc_pair.remote_state == "deleted": self._dao.remove_state(doc_pair) continue self._handle_no_parent(doc_pair, local_client, remote_client) self._current_item = self._get_item() continue self._current_metrics = dict() handler_name = '_synchronize_' + doc_pair.pair_state self._action = Action(handler_name) sync_handler = getattr(self, handler_name, None) if sync_handler is None: log.debug("Unhandled pair_state: %r for %r", doc_pair.pair_state, doc_pair) self.increase_error(doc_pair, "ILLEGAL_STATE") self._current_item = self._get_item() continue else: self._current_metrics = dict() self._current_metrics["handler"] = doc_pair.pair_state self._current_metrics["start_time"] = current_milli_time() log.trace("Calling %s on doc pair %r", sync_handler, doc_pair) try: soft_lock = self._lock_soft_path(doc_pair.local_path) sync_handler(doc_pair, local_client, remote_client) self._current_metrics["end_time"] = current_milli_time() self.pairSync.emit(doc_pair, self._current_metrics) # TO_REVIEW May have a call to reset_error log.trace("Finish %s on doc pair %r", sync_handler, doc_pair) except ThreadInterrupt: raise except PairInterrupt: from time import sleep # Wait one second to avoid retrying to quickly self._current_doc_pair = None log.debug("PairInterrupt wait 1s and requeue on %r", doc_pair) sleep(1) self._engine.get_queue_manager().push(doc_pair) continue except Exception as e: log.exception(e) self.increase_error(doc_pair, "SYNC HANDLER: %s" % handler_name, exception=e) self._current_item = self._get_item() continue except ThreadInterrupt: self._engine.get_queue_manager().push(doc_pair) raise except Exception as e: log.exception(e) self.increase_error(doc_pair, "EXCEPTION", exception=e) raise e finally: if soft_lock is not None: self._unlock_soft_path(soft_lock) self._dao.release_state(self._thread_id) self._interact() self._current_item = self._get_item() log.trace('%s processor terminated' if not self._continue else '%s processor finished, queue is empty', self.get_name())
def _handle_queues(self): uploaded = False # Lock any documents while (not self._lock_queue.empty()): try: item = self._lock_queue.get_nowait() ref = item[0] log.trace('Handling DirectEdit lock queue ref: %r', ref) except Empty: break uid = "" try: dir_path = os.path.dirname(ref) uid, engine, remote_client, _, _ = self._extract_edit_info(ref) if item[1] == 'lock': remote_client.lock(uid) self._local_client.set_remote_id(dir_path, "1", "nxdirecteditlock") # Emit the lock signal only when the lock is really set self._manager.get_autolock_service().documentLocked.emit( os.path.basename(ref)) else: remote_client.unlock(uid) if item[1] == 'unlock_orphan': path = self._local_client._abspath(ref) log.trace("Remove orphan: %s", path) self._manager.get_autolock_service().orphan_unlocked( path) # Clean the folder shutil.rmtree(self._local_client._abspath(path), ignore_errors=True) self._local_client.remove_remote_id( dir_path, "nxdirecteditlock") # Emit the signal only when the unlock is done - might want to avoid the call on orphan self._manager.get_autolock_service().documentUnlocked.emit( os.path.basename(ref)) except Exception as e: # Try again in 30s log.debug("Can't %s document '%s': %r", item[1], ref, e, exc_info=True) self.directEditLockError.emit(item[1], os.path.basename(ref), uid) # Unqueue any errors item = self._error_queue.get() while (item is not None): self._upload_queue.put(item.get()) item = self._error_queue.get() # Handle the upload queue while (not self._upload_queue.empty()): try: ref = self._upload_queue.get_nowait() log.trace('Handling DirectEdit queue ref: %r', ref) except Empty: break uid, engine, remote_client, digest_algorithm, digest = self._extract_edit_info( ref) # Don't update if digest are the same info = self._local_client.get_info(ref) try: current_digest = info.get_digest(digest_func=digest_algorithm) if current_digest == digest: continue start_time = current_milli_time() log.trace( "Local digest: %s is different from the recorded one: %s - modification detected for %r", current_digest, digest, ref) # TO_REVIEW Should check if server-side blob has changed ? # Update the document - should verify the remote hash - NXDRIVE-187 remote_info = remote_client.get_info(uid) if remote_info.digest != digest: # Conflict detect log.trace( "Remote digest: %s is different from the recorded one: %s - conflict detected for %r", remote_info.digest, digest, ref) self.directEditConflict.emit(os.path.basename(ref), ref, remote_info.digest) continue log.debug('Uploading file %s', self._local_client._abspath(ref)) remote_client.stream_update(uid, self._local_client._abspath(ref), apply_versioning_policy=True) # Update hash value dir_path = os.path.dirname(ref) self._local_client.set_remote_id(dir_path, current_digest, 'nxdirecteditdigest') self._last_action_timing = current_milli_time() - start_time self.editDocument.emit(remote_info) except ThreadInterrupt: raise except Exception as e: # Try again in 30s log.trace("Exception on direct edit: %r", e, exc_info=True) self._error_queue.push(ref, ref) continue uploaded = True if uploaded: log.debug('Emitting directEditUploadCompleted') self.directEditUploadCompleted.emit()
def _prepare_edit(self, server_url, doc_id, user=None, download_url=None): start_time = current_milli_time() engine = self._get_engine(server_url, user=user) if engine is None: values = dict() if user is None: values['user'] = '******' else: values['user'] = user values['server'] = server_url log.warn("No engine found for server_url=%s, user=%s, doc_id=%s", server_url, user, doc_id) self._display_modal("DIRECT_EDIT_CANT_FIND_ENGINE", values) return # Get document info remote_client = engine.get_remote_doc_client() # Avoid any link with the engine, remote_doc are not cached so we can do that remote_client.check_suspended = self.stop_client info = remote_client.get_info(doc_id) filename = info.filename # Create local structure dir_path = os.path.join(self._folder, doc_id) if not os.path.exists(dir_path): os.mkdir(dir_path) log.debug("Editing %r", filename) file_path = os.path.join(dir_path, filename) # Download the file url = None if download_url is not None: url = server_url if not url.endswith('/'): url += '/' url += download_url tmp_file = self._download_content(engine, remote_client, info, file_path, url=url) if tmp_file is None: log.debug("Download failed") return # Set the remote_id dir_path = self._local_client.get_path(os.path.dirname(file_path)) self._local_client.set_remote_id(dir_path, doc_id) self._local_client.set_remote_id(dir_path, server_url, "nxdirectedit") if user is not None: self._local_client.set_remote_id(dir_path, user, "nxdirectedituser") if info.digest is not None: self._local_client.set_remote_id(dir_path, info.digest, "nxdirecteditdigest") # Set digest algorithm if not sent by the server digest_algorithm = info.digest_algorithm if digest_algorithm is None: digest_algorithm = guess_digest_algorithm(info.digest) self._local_client.set_remote_id(dir_path, digest_algorithm, "nxdirecteditdigestalgorithm") self._local_client.set_remote_id(dir_path, filename, "nxdirecteditname") # Rename to final filename # Under Windows first need to delete target file if exists, otherwise will get a 183 WindowsError if sys.platform == 'win32' and os.path.exists(file_path): os.unlink(file_path) os.rename(tmp_file, file_path) self._last_action_timing = current_milli_time() - start_time self.openDocument.emit(info) return file_path
def _prepare_edit(self, server_url, doc_id, user=None, download_url=None): start_time = current_milli_time() engine = self._get_engine(server_url, user=user) if engine is None: values = dict() values['user'] = str(user) values['server'] = server_url log.warning('No engine found for server_url=%s, user=%s, doc_id=%s', server_url, user, doc_id) self._display_modal('DIRECT_EDIT_CANT_FIND_ENGINE', values) return None # Get document info remote_client = engine.get_remote_doc_client() # Avoid any link with the engine, remote_doc are not cached so we can do that remote_client.check_suspended = self.stop_client doc = remote_client.fetch( doc_id, extra_headers={'fetch-document': 'lock'}, enrichers=['permissions'], ) info = remote_client.doc_to_info(doc, fetch_parent_uid=False) if info.lock_owner is not None and info.lock_owner != engine.remote_user: log.debug("Doc %s was locked by %s on %s, won't download it for edit", info.name, info.lock_owner, info.lock_created) self.directEditLocked.emit(info.name, info.lock_owner, info.lock_created) return None if info.permissions is not None and 'Write' not in info.permissions: log.debug("Doc %s is readonly for %s, won't download it for edit", info.name, user) self.directEditReadonly.emit(info.name) return None filename = info.filename # Create local structure dir_path = os.path.join(self._folder, doc_id) if not os.path.exists(dir_path): os.mkdir(dir_path) log.debug("Editing %r", filename) file_path = os.path.join(dir_path, filename) # Download the file url = None if download_url is not None: url = server_url if not url.endswith('/'): url += '/' url += download_url tmp_file = self._download_content(engine, remote_client, info, file_path, url=url) if tmp_file is None: log.debug("Download failed") return None # Set the remote_id dir_path = self._local_client.get_path(os.path.dirname(file_path)) self._local_client.set_remote_id(dir_path, doc_id) self._local_client.set_remote_id(dir_path, server_url, "nxdirectedit") if user is not None: self._local_client.set_remote_id(dir_path, user, "nxdirectedituser") if info.digest is not None: self._local_client.set_remote_id(dir_path, info.digest, "nxdirecteditdigest") # Set digest algorithm if not sent by the server digest_algorithm = info.digest_algorithm if digest_algorithm is None: digest_algorithm = guess_digest_algorithm(info.digest) self._local_client.set_remote_id(dir_path, digest_algorithm, "nxdirecteditdigestalgorithm") self._local_client.set_remote_id(dir_path, filename, "nxdirecteditname") # Rename to final filename # Under Windows first need to delete target file if exists, otherwise will get a 183 WindowsError if sys.platform == 'win32' and os.path.exists(file_path): os.unlink(file_path) os.rename(tmp_file, file_path) self._last_action_timing = current_milli_time() - start_time self.openDocument.emit(info) return file_path
def _prepare_edit(self, server_url, doc_id, user=None, download_url=None): start_time = current_milli_time() engine = self._get_engine(server_url, user=user) if engine is None: values = dict() values['user'] = str(user) values['server'] = server_url log.warning( 'No engine found for server_url=%s, user=%s, doc_id=%s', server_url, user, doc_id) self._display_modal('DIRECT_EDIT_CANT_FIND_ENGINE', values) return None # Get document info remote_client = engine.get_remote_doc_client() # Avoid any link with the engine, remote_doc are not cached so we can do that remote_client.check_suspended = self.stop_client doc = remote_client.fetch( doc_id, extra_headers={'fetch-document': 'lock'}, enrichers=['permissions'], ) info = remote_client.doc_to_info(doc, fetch_parent_uid=False) if info.lock_owner is not None and info.lock_owner != engine.remote_user: log.debug( "Doc %s was locked by %s on %s, won't download it for edit", info.name, info.lock_owner, info.lock_created) self.directEditLocked.emit(info.name, info.lock_owner, info.lock_created) return None if info.permissions is not None and 'Write' not in info.permissions: log.debug("Doc %s is readonly for %s, won't download it for edit", info.name, user) self.directEditReadonly.emit(info.name) return None filename = info.filename # Create local structure dir_path = os.path.join(self._folder, doc_id) if not os.path.exists(dir_path): os.mkdir(dir_path) log.debug("Editing %r", filename) file_path = os.path.join(dir_path, filename) # Download the file url = None if download_url is not None: url = server_url if not url.endswith('/'): url += '/' url += download_url tmp_file = self._download_content(engine, remote_client, info, file_path, url=url) if tmp_file is None: log.debug("Download failed") return None # Set the remote_id dir_path = self._local_client.get_path(os.path.dirname(file_path)) self._local_client.set_remote_id(dir_path, doc_id) self._local_client.set_remote_id(dir_path, server_url, "nxdirectedit") if user is not None: self._local_client.set_remote_id(dir_path, user, "nxdirectedituser") if info.digest is not None: self._local_client.set_remote_id(dir_path, info.digest, "nxdirecteditdigest") # Set digest algorithm if not sent by the server digest_algorithm = info.digest_algorithm if digest_algorithm is None: digest_algorithm = guess_digest_algorithm(info.digest) self._local_client.set_remote_id(dir_path, digest_algorithm, "nxdirecteditdigestalgorithm") self._local_client.set_remote_id(dir_path, filename, "nxdirecteditname") # Rename to final filename # Under Windows first need to delete target file if exists, otherwise will get a 183 WindowsError if sys.platform == 'win32' and os.path.exists(file_path): os.unlink(file_path) os.rename(tmp_file, file_path) self._last_action_timing = current_milli_time() - start_time self.openDocument.emit(info) return file_path
def handle_watchdog_event(self, evt): log.trace("watchdog event: %r", evt) self._metrics['last_event'] = current_milli_time() self._action = Action("Handle watchdog event") if evt.event_type == 'moved': log.debug("Handling watchdog event [%s] on %s to %s", evt.event_type, evt.src_path, evt.dest_path) else: log.debug("Handling watchdog event [%s] on %r", evt.event_type, evt.src_path) try: src_path = normalize_event_filename(evt.src_path) rel_path = self.client.get_path(src_path) if len(rel_path) == 0 or rel_path == '/': self.handle_watchdog_root_event(evt) return file_name = os.path.basename(src_path) parent_path = os.path.dirname(src_path) parent_rel_path = self.client.get_path(parent_path) doc_pair = self._dao.get_state_from_local(rel_path) # Dont care about ignored file, unless it is moved if (self.client.is_ignored(parent_rel_path, file_name) and evt.event_type != 'moved'): return if self.client.is_temp_file(file_name): return if doc_pair is not None: if doc_pair.pair_state == 'unsynchronized': log.debug("Ignoring %s as marked unsynchronized", doc_pair.local_path) if (evt.event_type == 'deleted' or evt.event_type == 'moved' and not is_office_temp_file(os.path.basename(evt.dest_path))): log.debug('Removing pair state for deleted or moved event: %r', doc_pair) self._dao.remove_state(doc_pair) return self._handle_watchdog_event_on_known_pair(doc_pair, evt, rel_path) return if evt.event_type == 'deleted': log.debug('Unknown pair deleted: %s', rel_path) return if (evt.event_type == 'moved'): dest_filename = os.path.basename(evt.dest_path) if (self.client.is_ignored(parent_rel_path, dest_filename)): return # Ignore normalization of the filename on the file system # See https://jira.nuxeo.com/browse/NXDRIVE-188 if evt.dest_path == normalize_event_filename(evt.src_path): log.debug('Ignoring move from %r to normalized name: %r', evt.src_path, evt.dest_path) return src_path = normalize_event_filename(evt.dest_path) rel_path = self.client.get_path(src_path) local_info = self.client.get_info(rel_path, raise_if_missing=False) doc_pair = self._dao.get_state_from_local(rel_path) # If the file exists but not the pair if local_info is not None and doc_pair is None: # Check if it is a pair that we loose track of if local_info.remote_ref is not None: doc_pair = self._dao.get_normal_state_from_remote(local_info.remote_ref) if doc_pair is not None and not self.client.exists(doc_pair.local_path): log.debug("Pair re-moved detected for %r", doc_pair) # Can be a move inside a folder that has also moved self._handle_watchdog_event_on_known_pair(doc_pair, evt, rel_path) return rel_parent_path = self.client.get_path(os.path.dirname(src_path)) if rel_parent_path == '': rel_parent_path = '/' self._dao.insert_local_state(local_info, rel_parent_path) # An event can be missed inside a new created folder as # watchdog will put listener after it if local_info.folderish: self.scan_pair(rel_path) doc_pair = self._dao.get_state_from_local(rel_path) self._schedule_win_folder_scan(doc_pair) return # if the pair is modified and not known consider as created if evt.event_type == 'created' or evt.event_type == 'modified': # If doc_pair is not None mean # the creation has been catched by scan # As Windows send a delete / create event for reparent # Ignore .*.nxpart ? ''' for deleted in deleted_files: if deleted.local_digest == digest: # Move detected log.info('Detected a file movement %r', deleted) deleted.update_state('moved', deleted.remote_state) deleted.update_local(self.client.get_info( rel_path)) continue ''' local_info = self.client.get_info(rel_path, raise_if_missing=False) if local_info is None: log.trace("Event on a disappeared file: %r %s %s", evt, rel_path, file_name) return # This might be a move but Windows don't emit this event... if local_info.remote_ref is not None: moved = False from_pair = self._dao.get_normal_state_from_remote(local_info.remote_ref) if from_pair is not None: if from_pair.processor > 0 or from_pair.local_path == rel_path: # First condition is in process # Second condition is a race condition log.trace("Ignore creation or modification as the coming pair is being processed: %r", rel_path) return # If it is not at the origin anymore, magic teleportation, only on Windows ? if not self.client.exists(from_pair.local_path): log.debug('Move from %r to %r', from_pair.local_path, rel_path) from_pair.local_state = 'moved' self._dao.update_local_state(from_pair, self.client.get_info(rel_path)) moved = True else: # possible move-then-copy case, NXDRIVE-471 doc_pair_full_path = self.client._abspath(rel_path) doc_pair_creation_time = self.get_creation_time(doc_pair_full_path) from_pair_full_path = self.client._abspath(from_pair.local_path) from_pair_creation_time = self.get_creation_time(from_pair_full_path) log.trace('doc_pair_full_path=%s, doc_pair_creation_time=%s, from_pair_full_path=%s, version=%d', doc_pair_full_path, doc_pair_creation_time, from_pair_full_path, from_pair.version) # If file at the original location is newer, # it is moved to the new location earlier then copied back (what else can it be?) if (not from_pair_creation_time <= doc_pair_creation_time) and evt.event_type == 'created': log.trace("Found moved file: from_pair: %f doc_pair:%f for %s", from_pair_creation_time, doc_pair_creation_time, doc_pair_full_path) log.trace("Creation time are: from: %f | new: %f : boolean: %d", from_pair_creation_time, doc_pair_creation_time,(not from_pair_creation_time < doc_pair_creation_time) ) from_pair.local_state = 'moved' self._dao.update_local_state(from_pair, self.client.get_info(rel_path)) self._dao.insert_local_state(self.client.get_info(from_pair.local_path), os.path.dirname(from_pair.local_path)) self.client.remove_remote_id(from_pair.local_path) moved = True if self._windows: self._win_lock.acquire() try: if local_info.remote_ref in self._delete_events: log.debug('Found creation in delete event, handle move instead') # Should be cleaned if not moved: doc_pair = self._delete_events[local_info.remote_ref][1] doc_pair.local_state = 'moved' self._dao.update_local_state(doc_pair, self.client.get_info(rel_path)) del self._delete_events[local_info.remote_ref] return finally: self._win_lock.release() if from_pair is not None: if moved: # Stop the process here return log.debug('Copy paste from %r to %r', from_pair.local_path, rel_path) self._dao.insert_local_state(local_info, parent_rel_path) # An event can be missed inside a new created folder as # watchdog will put listener after it if local_info.folderish: self.scan_pair(rel_path) doc_pair = self._dao.get_state_from_local(rel_path) self._schedule_win_folder_scan(doc_pair) return log.debug('Unhandled case: %r %s %s', evt, rel_path, file_name) except Exception: log.error('Watchdog exception', exc_info=True) finally: self._end_action()
def _handle_queues(self): uploaded = False # Lock any documents while not self._lock_queue.empty(): try: item = self._lock_queue.get_nowait() except Empty: break else: ref = item[0] log.trace('Handling DirectEdit lock queue ref: %r', ref) uid = '' dir_path = os.path.dirname(ref) try: uid, _, remote_client, _, _ = self._extract_edit_info(ref) if item[1] == 'lock': remote_client.lock(uid) self._local_client.set_remote_id(dir_path, '1', 'nxdirecteditlock') # Emit the lock signal only when the lock is really set self._manager.get_autolock_service().documentLocked.emit( os.path.basename(ref)) else: purge = False try: remote_client.unlock(uid) except NotFound: purge = True if purge or item[1] == 'unlock_orphan': path = self._local_client.abspath(ref) log.trace('Remove orphan: %r', path) self._manager.get_autolock_service().orphan_unlocked( path) shutil.rmtree(path, ignore_errors=True) else: self._local_client.remove_remote_id( dir_path, 'nxdirecteditlock') # Emit the signal only when the unlock is done self._manager.get_autolock_service( ).documentUnlocked.emit(os.path.basename(ref)) except ThreadInterrupt: raise except: # Try again in 30s log.exception('Cannot %s document %r', item[1], ref) self.directEditLockError.emit(item[1], os.path.basename(ref), uid) # Unqueue any errors item = self._error_queue.get() while item: self._upload_queue.put(item.get()) item = self._error_queue.get() # Handle the upload queue while not self._upload_queue.empty(): try: ref = self._upload_queue.get_nowait() except Empty: break else: log.trace('Handling DirectEdit queue ref: %r', ref) uid, engine, remote_client, digest_algorithm, digest = self._extract_edit_info( ref) # Don't update if digest are the same info = self._local_client.get_info(ref) try: current_digest = info.get_digest(digest_func=digest_algorithm) if current_digest == digest: continue start_time = current_milli_time() log.trace( 'Local digest: %s is different from the recorded one:' ' %s - modification detected for %r', current_digest, digest, ref) # TO_REVIEW Should check if server-side blob has changed ? # Update the document, should verify the remote hash NXDRIVE-187 remote_info = remote_client.get_info(uid) if remote_info.digest != digest: # Conflict detect log.trace( 'Remote digest: %s is different from the recorded' ' one: %s - conflict detected for %r', remote_info.digest, digest, ref) self.directEditConflict.emit(os.path.basename(ref), ref, remote_info.digest) continue os_path = self._local_client.abspath(ref) log.debug('Uploading file %r', os_path) remote_client.stream_update(uid, os_path, apply_versioning_policy=True) # Update hash value dir_path = os.path.dirname(ref) self._local_client.set_remote_id(dir_path, current_digest, 'nxdirecteditdigest') self._last_action_timing = current_milli_time() - start_time self.editDocument.emit(remote_info) except ThreadInterrupt: raise except: # Try again in 30s log.exception('DirectEdit unhandled error for ref %r', ref) self._error_queue.push(ref, ref) continue uploaded = True if uploaded: log.debug('Emitting directEditUploadCompleted') self.directEditUploadCompleted.emit() while not self._watchdog_queue.empty(): evt = self._watchdog_queue.get() self.handle_watchdog_event(evt)
def handle_watchdog_event(self, evt): log.trace("watchdog event: %r", evt) self._metrics['last_event'] = current_milli_time() self._action = Action("Handle watchdog event") if evt.event_type == 'moved': log.debug("Handling watchdog event [%s] on %s to %s", evt.event_type, evt.src_path, evt.dest_path) else: log.debug("Handling watchdog event [%s] on %r", evt.event_type, evt.src_path) try: src_path = normalize_event_filename(evt.src_path) rel_path = self.client.get_path(src_path) if len(rel_path) == 0 or rel_path == '/': self.handle_watchdog_root_event(evt) return file_name = os.path.basename(src_path) parent_path = os.path.dirname(src_path) parent_rel_path = self.client.get_path(parent_path) doc_pair = self._dao.get_state_from_local(rel_path) # Dont care about ignored file, unless it is moved if (self.client.is_ignored(parent_rel_path, file_name) and evt.event_type != 'moved'): return if self.client.is_temp_file(file_name): return if doc_pair is not None: if doc_pair.pair_state == 'unsynchronized': log.debug("Ignoring %s as marked unsynchronized", doc_pair.local_path) return self._handle_watchdog_event_on_known_pair(doc_pair, evt, rel_path) return if evt.event_type == 'deleted': log.debug('Unknown pair deleted: %s', rel_path) return if (evt.event_type == 'moved'): dest_filename = os.path.basename(evt.dest_path) if (self.client.is_ignored(parent_rel_path, dest_filename)): return # Ignore normalization of the filename on the file system # See https://jira.nuxeo.com/browse/NXDRIVE-188 if evt.dest_path == normalize_event_filename(evt.src_path): log.debug('Ignoring move from %r to normalized name: %r', evt.src_path, evt.dest_path) return src_path = normalize_event_filename(evt.dest_path) rel_path = self.client.get_path(src_path) local_info = self.client.get_info(rel_path, raise_if_missing=False) doc_pair = self._dao.get_state_from_local(rel_path) # If the file exsit but not the pair if local_info is not None and doc_pair is None: rel_parent_path = self.client.get_path(os.path.dirname(src_path)) if rel_parent_path == '': rel_parent_path = '/' self._dao.insert_local_state(local_info, rel_parent_path) # An event can be missed inside a new created folder as # watchdog will put listener after it if local_info.folderish: self._scan_recursive(local_info) return # if the pair is modified and not known consider as created if evt.event_type == 'created' or evt.event_type == 'modified': # If doc_pair is not None mean # the creation has been catched by scan # As Windows send a delete / create event for reparent # Ignore .*.nxpart ? ''' for deleted in deleted_files: if deleted.local_digest == digest: # Move detected log.info('Detected a file movement %r', deleted) deleted.update_state('moved', deleted.remote_state) deleted.update_local(self.client.get_info( rel_path)) continue ''' local_info = self.client.get_info(rel_path) # This might be a move but Windows don't emit this event... if local_info.remote_ref is not None: from_pair = self._dao.get_normal_state_from_remote(local_info.remote_ref) if from_pair is not None and (from_pair.processor > 0 or from_pair.local_path == rel_path): # First condition is in process # Second condition is a race condition log.trace("Ignore creation or modification as the coming pair is being processed") return if self._windows: self._win_lock.acquire() try: if local_info.remote_ref in self._delete_events: log.debug('Found creation in delete event, handle move instead') doc_pair = self._delete_events[local_info.remote_ref][1] doc_pair.local_state = 'moved' self._dao.update_local_state(doc_pair, self.client.get_info(rel_path)) del self._delete_events[local_info.remote_ref] return finally: self._win_lock.release() self._dao.insert_local_state(local_info, parent_rel_path) # An event can be missed inside a new created folder as # watchdog will put listener after it if local_info.folderish: self._scan_recursive(local_info) return log.debug('Unhandled case: %r %s %s', evt, rel_path, file_name) except Exception: log.error('Watchdog exception', exc_info=True) finally: self._end_action()
def _prepare_edit(self, server_url, doc_id, user=None, download_url=None): start_time = current_milli_time() engine = self._get_engine(server_url, user=user) if engine is None: values = dict() if user is None: values['user'] = '******' else: values['user'] = user values['server'] = server_url log.warn("No engine found for server_url=%s, user=%s, doc_id=%s", server_url, user, doc_id) self._display_modal("DIRECT_EDIT_CANT_FIND_ENGINE", values) return # Get document info remote_client = engine.get_remote_doc_client() # Avoid any link with the engine, remote_doc are not cached so we can do that remote_client.check_suspended = self.stop_client info = remote_client.get_info(doc_id) filename = info.filename # Create local structure dir_path = os.path.join(self._folder, doc_id) if not os.path.exists(dir_path): os.mkdir(dir_path) log.debug("Editing %r", filename) file_path = os.path.join(dir_path, filename) # Download the file url = None if download_url is not None: url = server_url if not url.endswith('/'): url += '/' url += download_url tmp_file = self._download_content(engine, remote_client, info, file_path, url=url) if tmp_file is None: log.debug("Download failed") return # Set the remote_id dir_path = self._local_client.get_path(os.path.dirname(file_path)) self._local_client.set_remote_id(dir_path, doc_id) self._local_client.set_remote_id(dir_path, server_url, "nxdriveedit") if user is not None: self._local_client.set_remote_id(dir_path, user, "nxdriveedituser") if info.digest is not None: self._local_client.set_remote_id(dir_path, info.digest, "nxdriveeditdigest") # Set digest algorithm if not sent by the server digest_algorithm = info.digest_algorithm if digest_algorithm is None: digest_algorithm = guess_digest_algorithm(info.digest) self._local_client.set_remote_id(dir_path, digest_algorithm, "nxdriveeditdigestalgorithm") self._local_client.set_remote_id(dir_path, filename, "nxdriveeditname") # Rename to final filename # Under Windows first need to delete target file if exists, otherwise will get a 183 WindowsError if sys.platform == 'win32' and os.path.exists(file_path): os.unlink(file_path) os.rename(tmp_file, file_path) self._last_action_timing = current_milli_time() - start_time self.openDocument.emit(info) return file_path
def _handle_queues(self): uploaded = False # Lock any documents while (not self._lock_queue.empty()): try: item = self._lock_queue.get_nowait() ref = item[0] log.trace('Handling DriveEdit lock queue ref: %r', ref) except Empty: break uid = "" try: dir_path = os.path.dirname(ref) uid, engine, remote_client, _, _ = self._extract_edit_info(ref) if item[1] == 'lock': remote_client.lock(uid) self._local_client.set_remote_id(dir_path, "1", "nxdriveeditlock") # Emit the lock signal only when the lock is really set self._manager.get_autolock_service().documentLocked.emit(os.path.basename(ref)) else: remote_client.unlock(uid) if item[1] == 'unlock_orphan': path = self._local_client._abspath(ref) log.trace("Remove orphan: %s", path) self._manager.get_autolock_service().orphan_unlocked(path) # Clean the folder shutil.rmtree(self._local_client._abspath(path), ignore_errors=True) self._local_client.remove_remote_id(dir_path, "nxdriveeditlock") # Emit the signal only when the unlock is done - might want to avoid the call on orphan self._manager.get_autolock_service().documentUnlocked.emit(os.path.basename(ref)) except Exception as e: # Try again in 30s log.debug("Can't %s document '%s': %r", item[1], ref, e, exc_info=True) self.driveEditLockError.emit(item[1], os.path.basename(ref), uid) # Unqueue any errors item = self._error_queue.get() while (item is not None): self._upload_queue.put(item.get()) item = self._error_queue.get() # Handle the upload queue while (not self._upload_queue.empty()): try: ref = self._upload_queue.get_nowait() log.trace('Handling DriveEdit queue ref: %r', ref) except Empty: break uid, engine, remote_client, digest_algorithm, digest = self._extract_edit_info(ref) # Don't update if digest are the same info = self._local_client.get_info(ref) try: current_digest = info.get_digest(digest_func=digest_algorithm) if current_digest == digest: continue start_time = current_milli_time() log.trace("Local digest: %s is different from the recorded one: %s - modification detected for %r", current_digest, digest, ref) # TO_REVIEW Should check if server-side blob has changed ? # Update the document - should verify the remote hash - NXDRIVE-187 remote_info = remote_client.get_info(uid) if remote_info.digest != digest: # Conflict detect log.trace("Remote digest: %s is different from the recorded one: %s - conflict detected for %r", remote_info.digest, digest, ref) self.driveEditConflict.emit(os.path.basename(ref), ref, remote_info.digest) continue log.debug('Uploading file %s', self._local_client._abspath(ref)) remote_client.stream_update(uid, self._local_client._abspath(ref), apply_versioning_policy=True) # Update hash value dir_path = os.path.dirname(ref) self._local_client.set_remote_id(dir_path, current_digest, 'nxdriveeditdigest') self._last_action_timing = current_milli_time() - start_time self.editDocument.emit(remote_info) except ThreadInterrupt: raise except Exception as e: # Try again in 30s log.trace("Exception on drive edit: %r", e, exc_info=True) self._error_queue.push(ref, ref) continue uploaded = True if uploaded: log.debug('Emitting driveEditUploadCompleted') self.driveEditUploadCompleted.emit()
def _handle_watchdog_event_on_known_pair(self, doc_pair, evt, rel_path): log.trace("watchdog event %r on known pair: %r", evt, doc_pair) if (evt.event_type == 'moved'): # Ignore move to Office tmp file src_filename = os.path.basename(evt.src_path) dest_filename = os.path.basename(evt.dest_path) if dest_filename.endswith('.tmp'): if dest_filename.startswith('~') or len(dest_filename) == 12: # 12 is for Office 813DEFA7.tmp log.debug('Ignoring Office tmp file: %r', evt.dest_path) return # Ignore normalization of the filename on the file system # See https://jira.nuxeo.com/browse/NXDRIVE-188 if evt.dest_path == normalize_event_filename(evt.src_path): log.debug('Ignoring move from %r to normalized name: %r', evt.src_path, evt.dest_path) return src_path = normalize_event_filename(evt.dest_path) rel_path = self.client.get_path(src_path) # Office weird replacement handling if is_office_file(dest_filename): pair = self._dao.get_state_from_local(rel_path) remote_ref = self.client.get_remote_id(rel_path) if pair is not None and pair.remote_ref == remote_ref: local_info = self.client.get_info(rel_path, raise_if_missing=False) if local_info is not None: digest = local_info.get_digest() # Drop event if digest hasn't changed, can be the case if only file permissions have been updated if not doc_pair.folderish and pair.local_digest == digest: log.trace('Dropping watchdog event [%s] as digest has not changed for %s', evt.event_type, rel_path) self._dao.remove_state(doc_pair) return pair.local_digest = digest pair.local_state = 'modified' self._dao.update_local_state(pair, local_info) self._dao.remove_state(doc_pair) log.debug("Office substitution file: remove pair(%r) mark(%r) as modified", doc_pair, pair) return local_info = self.client.get_info(rel_path, raise_if_missing=False) if local_info is not None: old_local_path = None rel_parent_path = self.client.get_path(os.path.dirname(src_path)) if rel_parent_path == '': rel_parent_path = '/' # Ignore inner movement remote_parent_ref = self.client.get_remote_id(rel_parent_path) if (doc_pair.remote_name == local_info.name and doc_pair.remote_parent_ref == remote_parent_ref): # The pair was moved but it has been canceled manually doc_pair.local_state = 'synchronized' elif not (local_info.name == doc_pair.local_name and doc_pair.remote_parent_ref == remote_parent_ref): log.debug("Detect move for %r (%r)", local_info.name, doc_pair) if doc_pair.local_state != 'created': doc_pair.local_state = 'moved' old_local_path = doc_pair.local_path self._dao.update_local_state(doc_pair, local_info, versionned=False) if self._windows and old_local_path is not None: self._win_lock.acquire() try: if old_local_path in self._folder_scan_events: log.debug('Update queue of folders to scan: move from %r to %r', old_local_path, rel_path) del self._folder_scan_events[old_local_path] self._folder_scan_events[rel_path] = ( mktime(local_info.last_modification_time.timetuple()), doc_pair) finally: self._win_lock.release() return if doc_pair.processor > 0: log.trace("Don't update as in process %r", doc_pair) return if evt.event_type == 'deleted': # Delay on Windows the delete event if self._windows: self._win_lock.acquire() log.debug('Add pair to delete events: %r', doc_pair) try: self._delete_events[doc_pair.remote_ref] = (current_milli_time(), doc_pair) finally: self._win_lock.release() else: # In case of case sensitive can be an issue if self.client.exists(doc_pair.local_path): remote_id = self.client.get_remote_id(doc_pair.local_path) if remote_id == doc_pair.remote_ref or remote_id is None: # This happens on update don't do anything return self._handle_watchdog_delete(doc_pair) return local_info = self.client.get_info(rel_path, raise_if_missing=False) if local_info is not None: if doc_pair.local_state == 'synchronized': digest = local_info.get_digest() # Drop event if digest hasn't changed, can be the case if only file permissions have been updated if not doc_pair.folderish and doc_pair.local_digest == digest: log.debug('Dropping watchdog event [%s] as digest has not changed for %s', evt.event_type, rel_path) return doc_pair.local_digest = digest doc_pair.local_state = 'modified' queue = not (evt.event_type == 'modified' and doc_pair.folderish and doc_pair.local_state == 'modified') # No need to change anything on sync folder if (evt.event_type == 'modified' and doc_pair.folderish and doc_pair.local_state == 'modified'): doc_pair.local_state = 'synchronized' self._dao.update_local_state(doc_pair, local_info, queue=queue)