def tearDown(self): # Force to clean all observers self.controller_1.synchronizer.stop_observers(raise_on_error=False) self.controller_2.synchronizer.stop_observers(raise_on_error=False) # Note that unbinding a server revokes the related token if needed, # see Controller.unbind_server() self.controller_1.unbind_all() self.controller_2.unbind_all() # Don't need to revoke tokens for the file system remote clients # since they use the same users as the remote document clients self.root_remote_client.execute("NuxeoDrive.TearDownIntegrationTests") if os.path.exists(self.upload_tmp_dir): shutil.rmtree(safe_long_path(self.upload_tmp_dir)) if os.path.exists(self.local_test_folder_1): self.controller_1.dispose() try: shutil.rmtree(safe_long_path(self.local_test_folder_1)) except: pass if os.path.exists(self.local_test_folder_2): self.controller_2.dispose() try: shutil.rmtree(safe_long_path(self.local_test_folder_2)) except: pass
def _abspath(self, ref): """Absolute path on the operating system""" if not ref.startswith(u'/'): raise ValueError("LocalClient expects ref starting with '/'") path_suffix = ref[1:].replace('/', os.path.sep) path = normalized_path(os.path.join(self.base_folder, path_suffix)) return safe_long_path(path)
def test_long_path(self): self.engine_1.start() self.wait_sync(wait_for_async=True) parent_path = os.path.join(self.local_1.abspath('/'), FOLDER_A, FOLDER_B, FOLDER_C, FOLDER_D) log.info("Creating folder with path: %s", parent_path) if sys.platform == 'win32' and not os.path.exists(parent_path): log.debug('Add \\\\?\\ prefix to path %r', parent_path) parent_path = safe_long_path(parent_path) os.makedirs(parent_path) if sys.platform == 'win32': log.info("Convert path of FOLDER_D\File2.txt to short path format") parent_path = win32api.GetShortPathName(parent_path) new_file = os.path.join(parent_path, "File2.txt") log.info("Creating file with path: %s", new_file) with open(new_file, "w") as f: f.write("Hello world") self.wait_sync(wait_for_async=True, timeout=45, fail_if_timeout=False) remote_children_of_c = self.remote_1.get_children_info(self.folder_c) children_names = [item.name for item in remote_children_of_c] log.warn("Verify if FOLDER_D is uploaded to server") self.assertIn(FOLDER_D, children_names) folder_d = [item.uid for item in remote_children_of_c if item.name == FOLDER_D][0] remote_children_of_d = self.remote_1.get_children_info(folder_d) children_names = [item.name for item in remote_children_of_d] log.warn("Verify if FOLDER_D\File2.txt is uploaded to server") self.assertIn('File2.txt', children_names)
def get_info(self, ref, raise_if_missing=True): os_path = self._abspath(ref) if not os.path.exists(os_path): if raise_if_missing: raise NotFound("Could not found file '%s' under '%s'" % ( ref, self.base_folder)) else: return None folderish = os.path.isdir(os_path) stat_info = os.stat(os_path) if folderish: size = 0 else: size = stat_info.st_size mtime = datetime.utcfromtimestamp(stat_info.st_mtime) path = u'/' + os_path[len(safe_long_path(self.base_folder)) + 1:] path = path.replace(os.path.sep, u'/') # unix style path # TODO Do we need to load it everytime ? remote_ref = self.get_remote_id(ref) # On unix we could use the inode for file move detection but that won't # work on Windows. To reduce complexity of the code and the possibility # to have Windows specific bugs, let's not use the unix inode at all. # uid = str(stat_info.st_ino) return FileInfo(self.base_folder, path, folderish, mtime, digest_func=self._digest_func, check_suspended=self.check_suspended, remote_ref=remote_ref, size=size)
def clean_dir(_dir, retry=1, max_retries=5): # type: (unicode, int, int) -> None if not os.path.exists(_dir): return log.debug('%d/%d Removing directory %r', retry, max_retries, _dir) to_remove = safe_long_path(_dir) test_data = os.environ.get('TEST_SAVE_DATA') if test_data: shutil.move(to_remove, test_data) return try: for dirpath, folders, filenames in os.walk(to_remove): for folder in folders: BaseClient.unset_path_readonly(os.path.join(dirpath, folder)) for filename in filenames: BaseClient.unset_path_readonly(os.path.join(dirpath, filename)) shutil.rmtree(to_remove) except: if retry < max_retries: time.sleep(2) clean_dir(_dir, retry=retry + 1)
def tearDown(self): self.controller_1.unbind_all() self.controller_2.unbind_all() self.remote_document_client_1.revoke_token() self.remote_document_client_2.revoke_token() # Don't need to revoke tokens for the file system remote clients # since they use the same users as the remote document clients self.root_remote_client.execute(u"NuxeoDrive.TearDownIntegrationTests") self.root_remote_client.revoke_token() if os.path.exists(self.local_test_folder_1): self.controller_1.dispose() shutil.rmtree(safe_long_path(self.local_test_folder_1)) if os.path.exists(self.local_test_folder_2): self.controller_2.dispose() shutil.rmtree(safe_long_path(self.local_test_folder_2))
def clean_dir(_dir): if os.path.exists(_dir): to_remove = safe_long_path(_dir) try: for dirpath, dirnames, filenames in os.walk(to_remove): for dirname in dirnames: BaseClient.unset_path_readonly(os.path.join(dirpath, dirname)) for filename in filenames: BaseClient.unset_path_readonly(os.path.join(dirpath, filename)) shutil.rmtree(to_remove) except Exception as e: if type(e) == WindowsError: os.system('rmdir /S /Q %s' % to_remove)
def get_digest(self): """Lazy computation of the digest""" if self.folderish: return None digester = getattr(hashlib, self._digest_func, None) if digester is None: raise ValueError('Unknow digest method: ' + self.digest_func) h = digester() with open(safe_long_path(self.filepath), 'rb') as f: while True: buffer = f.read(BUFFER_SIZE) if buffer == '': break h.update(buffer) return h.hexdigest()
def stop(self): """Stop the Nuxeo Drive synchronization thread As the process asking the synchronization to stop might not be the same as the process running the synchronization (especially when used from the commandline without the graphical user interface and its tray icon menu) we use a simple empty marker file a cross platform way to pass the stop message between the two. """ pid = self.synchronizer.check_running(process_name="sync") if pid is not None: # Create a stop file marker for the running synchronization # process log.info("Telling synchronization process %d to stop." % pid) stop_file = os.path.join(self.config_folder, "stop_%d" % pid) open(safe_long_path(stop_file), 'wb').close() else: log.info("No running synchronization process to stop.")
def get_digest(self): """Lazy computation of the digest""" if self.folderish: return None digester = getattr(hashlib, self._digest_func, None) if digester is None: raise ValueError('Unknow digest method: ' + self.digest_func) h = digester() with open(safe_long_path(self.filepath), 'rb') as f: while True: # Check if synchronization thread was suspended if self.check_suspended is not None: self.check_suspended('Digest computation: %s' % self.filepath) buffer_ = f.read(FILE_BUFFER_SIZE) if buffer_ == '': break h.update(buffer_) return h.hexdigest()
def clean_dir(_dir: Path, retry: int = 1, max_retries: int = 5) -> None: _dir = safe_long_path(_dir) if not _dir.exists(): return test_data = os.environ.get("TEST_SAVE_DATA") if test_data: shutil.move(_dir, test_data) return try: for path, folders, filenames in os.walk(_dir): dirpath = normalized_path(path) for folder in folders: unset_path_readonly(dirpath / folder) for filename in filenames: unset_path_readonly(dirpath / filename) shutil.rmtree(_dir) except Exception: if retry < max_retries: sleep(2) clean_dir(_dir, retry=retry + 1)
def get_digest(self, digest_func=None): """Lazy computation of the digest""" if self.folderish: return None digest_func = digest_func if digest_func is not None else self._digest_func digester = getattr(hashlib, digest_func, None) if digester is None: raise ValueError("Unknow digest method: " + digest_func) h = digester() try: with open(safe_long_path(self.filepath), "rb") as f: while True: # Check if synchronization thread was suspended if self.check_suspended is not None: self.check_suspended("Digest computation: %s" % self.filepath) buffer_ = f.read(FILE_BUFFER_SIZE) if buffer_ == "": break h.update(buffer_) except IOError: return UNACCESSIBLE_HASH return h.hexdigest()
def get_digest(self, digest_func=None): """Lazy computation of the digest""" if self.folderish: return None digest_func = digest_func if digest_func is not None else self._digest_func digester = getattr(hashlib, digest_func, None) if digester is None: raise ValueError('Unknow digest method: ' + digest_func) h = digester() try: with open(safe_long_path(self.filepath), 'rb') as f: while True: # Check if synchronization thread was suspended if self.check_suspended is not None: self.check_suspended('Digest computation: %s' % self.filepath) buffer_ = f.read(FILE_BUFFER_SIZE) if buffer_ == '': break h.update(buffer_) except IOError: return UNACCESSIBLE_HASH return h.hexdigest()