def run(self): # Ensure this process is not detected as apache process by the apache init script daemon.set_procname("cmk-activate-changes") # Detach from parent (apache) -> Remain running when apache is restarted os.setsid() # Cleanup existing livestatus connections (may be opened later when needed) cmk.gui.sites.disconnect() # Cleanup resources of the apache for x in range(3, 256): try: os.close(x) except OSError as e: if e.errno == errno.EBADF: pass else: raise # Reinitialize logging targets log.init_logging() try: self._do_run() except: logger.exception()
def run(self): # type: () -> None # Detach from parent and cleanup inherited file descriptors os.setsid() daemon.set_procname( six.ensure_binary(BackgroundJobDefines.process_name)) sys.stdin.close() sys.stdout.close() # Caused trouble with Python 3: # --- # mod_wsgi (pid=121215): Exception occurred within exit functions. # Traceback (most recent call last): # File "/omd/sites/heute/lib/python3.7/multiprocessing/process.py", line 297, in _bootstrap # self.run() # File "/omd/sites/heute/lib/python3/cmk/gui/background_job.py", line 166, in run # sys.stderr.close() # RuntimeError: log object has expired # --- # Disabling this for the moment and check whether or not it is OK to skip this. #sys.stderr.close() daemon.closefrom(0) try: self.initialize_environment() self._logger.log(VERBOSE, "Initialized background job (Job ID: %s)", self._job_parameters["job_id"]) self._jobstatus.update_status({ "pid": self.pid, "state": JobStatusStates.RUNNING, }) # The actual function call self._execute_function() # Final status update job_status = self._jobstatus.get_status_from_file() if job_status.get("loginfo", {}).get("JobException"): final_state = JobStatusStates.EXCEPTION else: final_state = JobStatusStates.FINISHED self._jobstatus.update_status({ "state": final_state, "duration": time.time() - job_status["started"], }) except MKTerminate: self._logger.warning("Job was stopped") self._jobstatus.update_status({"state": JobStatusStates.STOPPED}) except Exception: self._logger.error( "Exception while preparing background function environment", exc_info=True) self._jobstatus.update_status({"state": JobStatusStates.EXCEPTION})
def _detach_from_parent(self): # Detach from parent and cleanup inherited file descriptors os.setsid() daemon.set_procname(BackgroundJobDefines.process_name.encode()) sys.stdin.close() # NOTE # When forking off from an mod_wsgi process, these handles are not the standard stdout and # stderr handles but rather proxies to internal data-structures of mod_wsgi. If these are # closed then mod_wsgi will trigger a "RuntimeError: log object has expired" if you want to # use them again, as this is considered a programming error. The logging framework # installs an "atexit" callback which flushes the logs upon the process exiting. This # tries to write to the now closed fake stdout/err handles and triggers the RuntimeError. # This will happen even if sys.stdout and sys.stderr are reset to their originals because # the logging.StreamHandler will still hold a reference to the mod_wsgi stdout/err handles. # sys.stdout.close() # sys.stderr.close() daemon.closefrom(0)
def run(self): # type: () -> None # Detach from parent and cleanup inherited file descriptors os.setsid() daemon.set_procname( six.ensure_binary(BackgroundJobDefines.process_name)) sys.stdin.close() sys.stdout.close() sys.stderr.close() daemon.closefrom(0) try: self.initialize_environment() self._logger.log(VERBOSE, "Initialized background job (Job ID: %s)", self._job_parameters["job_id"]) self._jobstatus.update_status({ "pid": self.pid, "state": JobStatusStates.RUNNING, }) # The actual function call self._execute_function() # Final status update job_status = self._jobstatus.get_status_from_file() if job_status.get("loginfo", {}).get("JobException"): final_state = JobStatusStates.EXCEPTION else: final_state = JobStatusStates.FINISHED self._jobstatus.update_status({ "state": final_state, "duration": time.time() - job_status["started"], }) except MKTerminate: self._logger.warning("Job was stopped") self._jobstatus.update_status({"state": JobStatusStates.STOPPED}) except Exception: self._logger.error( "Exception while preparing background function environment", exc_info=True) self._jobstatus.update_status({"state": JobStatusStates.EXCEPTION})
def run(self): # Detach from parent and cleanup inherited file descriptors os.setsid() daemon.set_procname(BackgroundJobDefines.process_name) sys.stdin.close() sys.stdout.close() sys.stderr.close() daemon.closefrom(0) try: self.initialize_environment() self._logger.verbose("Initialized background job (Job ID: %s)" % self._job_parameters["job_id"]) self._jobstatus.update_status({ "pid": self.pid, "state": JobStatus.state_running, }) # The actual function call self._execute_function() # Final status update job_status = self._jobstatus.get_status() if job_status.get("loginfo", {}).get("JobException"): final_state = JobStatus.state_exception else: final_state = JobStatus.state_finished self._jobstatus.update_status({ "state": final_state, "duration": time.time() - job_status["started"], }) except MKTerminate: self._logger.warning("Job was stopped") self._jobstatus.update_status({"state": JobStatus.state_stopped}) except Exception: self._logger.error( "Exception while preparing background function environment", exc_info=True) self._jobstatus.update_status({"state": JobStatus.state_exception})