def _start_background_subprocess(self, job_parameters): try: # Even the "short living" intermediate process here needs to close # the inherited file descriptors as soon as possible to prevent # race conditions with inherited locks that have been inherited # from the parent process. The job_initialization.lock is such a # thing we had a problem with when background jobs were initialized # while the apache tried to stop / restart. # # Had problems with closefrom() during the tests. Explicitly # closing the locks here instead of closing all fds to keep logging # related fds open. #daemon.closefrom(3) store.release_all_locks() self._jobstatus.update_status({"ppid": os.getpid()}) p = self._background_process_class(job_parameters) p.start() except Exception as e: self._logger.error("Error while starting subprocess: %s", e, exc_info=True) os._exit(1) os._exit(0)
def test_release_all_locks_already_closed(locked_file, path_type): path = path_type(locked_file) assert store.have_lock(path) is False store.aquire_lock(path) assert store.have_lock(path) is True os.close(store._locks._get_lock(str(path))) store.release_all_locks() assert store.have_lock(path) is False
def test_release_all_locks_already_closed(locked_file, path_type): path = path_type(locked_file) assert store.have_lock(path) is False store.aquire_lock(path) assert store.have_lock(path) is True os.close(store._locks._get_lock(str(path))) # pylint:disable=no-value-for-parameter store.release_all_locks() assert store.have_lock(path) is False
def test_release_all_locks_already_closed(tmp_path): locked_file = tmp_path / "locked_file" locked_file.write_text(u"", encoding="utf-8") path = str(locked_file) assert store.have_lock(path) is False store.aquire_lock(path) assert store.have_lock(path) is True os.close(store._acquired_locks[path]) store.release_all_locks() assert store.have_lock(path) is False
def test_release_all_locks_already_closed(tmpdir): locked_file = tmpdir.join("locked_file") locked_file.write("") path = "%s" % locked_file assert store.have_lock(path) is False store.aquire_lock(path) assert store.have_lock(path) is True os.close(store._acquired_locks[path]) store.release_all_locks() assert store.have_lock(path) is False
def cleanup_locks(): """Release all memorized locks at the end of the block. This is a hack which should be removed. In order to make this happen, every lock shall only be used as a context-manager. """ try: yield finally: try: store.release_all_locks() except Exception: logger.exception("error releasing locks after WSGI request") raise
def test_release_all_locks(tmp_path): locked_file1 = tmp_path / "locked_file1" locked_file1.write_text(u"", encoding="utf-8") locked_file2 = tmp_path / "locked_file2" locked_file2.write_text(u"", encoding="utf-8") path1 = str(locked_file1) path2 = str(locked_file2) assert store.have_lock(path1) is False store.aquire_lock(path1) assert store.have_lock(path1) is True assert store.have_lock(path2) is False store.aquire_lock(path2) assert store.have_lock(path2) is True store.release_all_locks() assert store.have_lock(path1) is False assert store.have_lock(path2) is False
def test_release_all_locks(tmpdir): locked_file1 = tmpdir.join("locked_file1") locked_file1.write("") locked_file2 = tmpdir.join("locked_file2") locked_file2.write("") path1 = "%s" % locked_file1 path2 = "%s" % locked_file2 assert store.have_lock(path1) is False store.aquire_lock(path1) assert store.have_lock(path1) is True assert store.have_lock(path2) is False store.aquire_lock(path2) assert store.have_lock(path2) is True store.release_all_locks() assert store.have_lock(path1) is False assert store.have_lock(path2) is False
def cleanup_locks(): yield store.release_all_locks()
def initialize_environment(self): super(WatoBackgroundProcess, self).initialize_environment() if self._jobstatus.get_status_from_file().get("lock_wato"): store.release_all_locks() store.lock_exclusive()