def main(): options = parseOptions() if options.pidFile: from daemon import pidlockfile pidfile = pidlockfile.PIDLockFile(options.pidFile) try: pidfile.acquire(timeout=1.0) pidfile.release() except LockFailed: raise else: pidfile = None if options.daemonMode: from daemon import DaemonContext with DaemonContext(pidfile=pidfile, working_directory=options.working_directory): run(options) else: if pidfile: with pidfile: run(options) else: run(options)
def setup_daemon(options, settings, pidfile): logfile = options.logfile level = logging.DEBUG daemon_settings = settings["daemon"] if daemon_settings["logging_level"] == "DEBUG": level = logging.DEBUG if daemon_settings["logging_level"] == "WARNING": level = logging.WARNING if daemon_settings["logging_level"] == "ERROR": level = logging.ERROR if daemon_settings["logging_level"] == "INFO": level = logging.INFO client_logger, handler = get_daemon_logger( None if options.foreground else logfile, loglevel=level, log_format=None if options.journal else '%(asctime)s:%(levelname)s:%(name)s:%(message)s', journal=options.journal) # noinspection PyArgumentList lockfile = pidlockfile.PIDLockFile(pidfile) if lockfile.is_locked(): logging.error("PIDFile %s already locked" % pidfile) sys.exit(os.EX_OSERR) context = daemon.DaemonContext( detach_process=not options.foreground, working_directory=os.getcwd(), pidfile=lockfile, files_preserve=None if options.journal else [handler.stream], stderr=sys.stderr if options.journal else handler.stream, stdout=sys.stdout if options.journal else handler.stream) return context
def daemonize(name, main): run_dir = user_data_dir + sep + 'run' pidf = pidlockfile.PIDLockFile(run_dir + sep + name + '.pid') pidf.acquire(timeout=1.0) pidf.release() with DaemonContext(pidfile=pidf): _main_wrapper(name, main)
def set_pidlockfile_scenario(testcase, scenario_name, clear_tracker=True): """ Set up the test case to the specified scenario. """ testcase.scenario = testcase.pidlockfile_scenarios[scenario_name] setup_lockfile_method_mocks(testcase, testcase.scenario, "LinkLockFile") testcase.pidlockfile_args = dict(path=testcase.scenario['path'], ) testcase.test_instance = pidlockfile.PIDLockFile( **testcase.pidlockfile_args) if clear_tracker: testcase.mock_tracker.clear()
def run(self): logger.info('Startup!') self.read_patterns() self.read_config_dir() pid_dirname = os.path.dirname(self.logshipper_config.pidfile) if not os.path.exists(pid_dirname): logger.debug('Creating pid dir at %r', pid_dirname) os.mkdir(pid_dirname, 0750) pidfile = pidlockfile.PIDLockFile(self.logshipper_config.pidfile) if pidfile.is_locked(): lockfile_pid = pidfile.read_pid() my_pid = os.getpid() if lockfile_pid != my_pid: raise RuntimeError( 'There is already a pid file at %s with pid %s.' % (self.logshipper_config.pidfile, lockfile_pid)) context = daemon.DaemonContext( detach_process=self.foreground is False, pidfile=pidfile, uid=username2uid(self.logshipper_config.user), gid=groupname2gid(self.logshipper_config.group), stdout=sys.stdout if self.foreground is True else None, stderr=sys.stderr if self.foreground is True else None) files_preserve = list() for handler in logging.root.handlers: files_preserve.append(handler.stream.fileno()) context.files_preserve = files_preserve context.signal_map = { signal.SIGTERM: self.shutdown, signal.SIGINT: self.shutdown, signal.SIGHUP: self.reload } with context: setproctitle.setproctitle(' '.join(sys.argv)) #self.scheduler = LogshipperScheduler( # threadpool=ThreadPool( # max_threads=len(self.logfile_configs), # ) #) self.prepare_scheduler() self.prepare_fs_notifer() self.start_scheduler() self.start_notifier() for path, config in self.logfile_configs.iteritems(): self.add_job_to_scheduler(path=path, config=config) while not self.stop_event.is_set(): sleep(0.1)
def handle(self, *args, **options): """ Takes the options and starts a daemon context from them. Example:: python manage.py linkconsumer --pidfile=/var/run/cb_link.pid --stdout=/var/log/cb/links.out --stderr=/var/log/cb/links.err """ context = daemon.DaemonContext() context.chroot_directory = self.get_option_value( options, 'chroot_directory') context.working_directory = self.get_option_value( options, 'working_directory', '/') context.umask = self.get_option_value(options, 'umask', 0) context.detach_process = self.get_option_value(options, 'detach_process') context.prevent_core = self.get_option_value(options, 'prevent_core', True) #Get file objects stdin = self.get_option_value(options, 'stdin') if stdin is not None: context.stdin = open(stdin, "r") stdout = self.get_option_value(options, 'stdout') if stdout is not None: context.stdout = open(stdout, "a+") stderr = self.get_option_value(options, 'stderr') if stderr is not None: context.stderr = open(stderr, "a+") #Make pid lock file pidfile = self.get_option_value(options, 'pidfile') if pidfile is not None: context.pidfile = pidlockfile.PIDLockFile(pidfile) uid = self.get_option_value(options, 'uid') if uid is not None: context.uid = uid gid = self.get_option_value(options, 'gid') if gid is not None: context.gid = uid context.open() self.handle_daemon(*args, **options)
def start(self): """ main loop. """ def main_loop(): while True: threadnames = [thread.name for thread in threading.enumerate()] for job_name, concrete_job in self.jobs.items(): if job_name not in threadnames: new_thread = Executor( name=job_name, job=concrete_job['method'], logger=self.logger, interval=concrete_job['interval']) new_thread.start() new_thread.join(1) else: thread.join(1) if not self.args.debug_mode: pid_file = pidlockfile.PIDLockFile(self.args.pid_file) self.logger.info( 'blackbird {0} : starting main process'.format(__version__)) with DaemonContext( files_preserve=[logger.get_handler_fp(self.logger)], detach_process=self.args.detach_process, uid=self.config['global']['user'], gid=self.config['global']['group'], stdout=None, stderr=None, pidfile=pid_file): main_loop() else: self.logger.info( 'blackbird {0} : started main process in debug mode' ''.format(__version__)) main_loop()
def daemonize(pid, chdir, chroot, umask, files_preserve=None, do_open=True): """ Uses python-daemonize to do all the junk needed to make a server a server. It supports all the features daemonize has, except that chroot probably won't work at all without some serious configuration on the system. """ context = daemon.DaemonContext() context.pidfile = pidlockfile.PIDLockFile(pid) context.stdout = open(os.path.join(chdir, "logs/salmon.out"), "a+") context.stderr = open(os.path.join(chdir, "logs/salmon.err"), "a+") context.files_preserve = files_preserve or [] context.working_directory = os.path.expanduser(chdir) if chroot: context.chroot_directory = os.path.expanduser(chroot) if umask != False: context.umask = umask if do_open: context.open() return context
def daemonise(pidfile, logfile): global child # pylint: disable=global-statement client_logger, watched_file_handler = getDaemonLogger( logfile, loglevel=logging.DEBUG) if isinstance(client_logger, Exception): print("Fatal error creating client_logger: " + str(client_logger)) sys.exit(os.EX_OSERR) # noinspection PyArgumentList lockfile = pidlockfile.PIDLockFile(pidfile) if lockfile.is_locked(): logging.error("PIDFile %s already locked", pidfile) sys.exit(os.EX_OSERR) context = daemon.DaemonContext( detach_process=True, working_directory=os.getcwd(), pidfile=lockfile, files_preserve=[watched_file_handler.stream], stderr=watched_file_handler.stream, stdout=watched_file_handler.stream) context.signal_map = { signal.SIGTERM: signal_handler, signal.SIGHUP: signal_handler } # pass the args down to the process to be run in the daemon context args = sys.argv args.pop(0) args.insert( 0, 'lava-server') # each daemon shares a call to the django wrapper arg_str = " ".join(args) with context: logging.info("Running LAVA Daemon") child = Popen(args) # lazy logging does not populate the strings at this point. logging.debug("LAVA Daemon: %s pid: %d" % (arg_str, child.pid)) # pylint: disable=logging-not-lazy child.communicate() logging.info("Closing LAVA Daemon.") return 0
print("No such directory for specified logfile '%s'" % logfile) open(logfile, "w").close() level = logging.INFO if options.loglevel == "DEBUG": level = logging.DEBUG if options.loglevel == "WARNING": level = logging.WARNING if options.loglevel == "ERROR": level = logging.ERROR client_logger, watched_file_handler = getDaemonLogger(logfile, loglevel=level) if isinstance(client_logger, Exception): print("Fatal error creating client_logger: " + str(client_logger)) sys.exit(os.EX_OSERR) # noinspection PyArgumentList lockfile = pidlockfile.PIDLockFile(pidfile) if lockfile.is_locked(): logging.error("PIDFile %s already locked" % pidfile) sys.exit(os.EX_OSERR) context = daemon.DaemonContext( detach_process=True, working_directory=os.getcwd(), pidfile=lockfile, files_preserve=[watched_file_handler.stream], stderr=watched_file_handler.stream, stdout=watched_file_handler.stream, ) starter = { "coordinator": True, "logging_level": options.loglevel, "host": settings["coordinator_hostname"],
def handle(self, *args, **options): """ Takes the options and starts a daemon context from them. Example:: python manage.py linkconsumer --pidfile=/var/run/cb_link.pid --stdout=/var/log/cb/links.out --stderr=/var/log/cb/links.err """ context = daemon.DaemonContext() context.chroot_directory = self.get_option_value(options, 'chroot_directory') context.working_directory = self.get_option_value(options, 'working_directory', '/') context.umask = self.get_option_value(options, 'umask', 0) context.detach_process = self.get_option_value(options, 'detach_process') context.prevent_core = self.get_option_value(options, 'prevent_core', True) #Get file objects stdin = self.get_option_value(options, 'stdin') if stdin is not None: context.stdin = open(stdin, "r") stdout = self.get_option_value(options, 'stdout') if stdout is not None: context.stdout = open(stdout, "a+") stderr = self.get_option_value(options, 'stderr') if stderr is not None: context.stderr = open(stderr, "a+") #Make pid lock file pidfile = self.get_option_value(options, 'pidfile') if pidfile is not None: context.pidfile=pidlockfile.PIDLockFile(pidfile) uid = self.get_option_value(options, 'uid') if uid is not None: context.uid = uid gid = self.get_option_value(options, 'gid') if gid is not None: context.gid = uid encoding = self.get_option_value(options, 'encoding') if encoding is not None: context.encoding = encoding scheduler_cls = self.get_option_value(options, 'scheduler_cls') if scheduler_cls is not None: context.scheduler_cls = scheduler_cls queues = self.get_option_value(options, 'queues') if queues is not None: context.queues = queues logfile = self.get_option_value(options, 'logfile') if queues is not None: context.logfile = logfile context.open() #raise Exception('handle daemon error! %s,%s'%(context.__dict__,self.scheduler)) self.handle_daemon(*args, **options)
# kwds += self.wrongKey(s) # kwds += self.skipLetter(s) # kwds += self.doubleLetter(s) # kwds += self.reverseLetter(s) # kwds += self.wrongVowel(s) # kwds += self.synonymSubstitution(s) # return kwds class SimpleThreadedXMLRPCServer(SocketServer.ThreadingMixIn, SimpleXMLRPCServer.SimpleXMLRPCServer): pass if __name__ == "__main__": pidlock = pidlockfile.PIDLockFile(PID_FILE) if pidlock.is_locked(): sys.exit(1) context = daemon.DaemonContext() context.pidfile = pidlock context.open() try: server = SimpleThreadedXMLRPCServer((LISTEN, PORT)) server.register_instance(TypoGenerator()) server.serve_forever() finally: context.close()