def nsDaemonStartInt(ns, *args, **kw): with daemon.DaemonContext( chroot_directory=None, working_directory=nsGet(ns, "/sys/env/apphome"), uid=nsGet(ns, "/sys/env/uid"), stdout=sys.stdout, stderr=sys.stderr, prevent_core=True): nsDaemonMain(ns, *args, **kw)
def __init__(self, app, parser_obj): """ Set up the parameters of a new runner. THIS METHOD INTENTIONALLY DO NOT INVOKE SUPER __init__() METHOD :param app: The application instance; see below. :return: ``None``. The `app` argument must have the following attributes: * `stdin_path`, `stdout_path`, `stderr_path`: Filesystem paths to open and replace the existing `sys.stdin`, `sys.stdout`, `sys.stderr`. * `pidfile_path`: Absolute filesystem path to a file that will be used as the PID file for the daemon. If ``None``, no PID file will be used. * `pidfile_timeout`: Used as the default acquisition timeout value supplied to the runner's PID lock file. * `run`: Callable that will be invoked when the daemon is started. """ super(runner.DaemonRunner, self).__init__() # update action_funcs to support more operations self.update_action_funcs() daemonize_parser(parser_obj, self.action_funcs, ExtendedDaemonRunner.help_menu) args = parser_obj.parse_args() self.action = str(args.action) self.app = app self.daemon_context = daemon.DaemonContext() self.daemon_context.stdin = open(app.stdin_path, 'rt') try: self.daemon_context.stdout = open(app.stdout_path, 'w+t') except IOError as err: # catch 'tty' error when launching server from remote location app.stdout_path = "/dev/null" self.daemon_context.stdout = open(app.stdout_path, 'w+t') self.daemon_context.stderr = open(app.stderr_path, 'a+t', buffering=0) self.pidfile = None if app.pidfile_path is not None: self.pidfile = make_pidlockfile(app.pidfile_path, app.pidfile_timeout) self.daemon_context.pidfile = self.pidfile
def main(): lock = PIDLock(PIDFILE) if lock.is_locked(): print >> sys.stderr, "virt-who seems to be already running. If not, remove %s" % PIDFILE sys.exit(1) logger, options = parseOptions() if options.background: # Do a daemon initialization with daemon.DaemonContext(pidfile=lock, files_preserve=[logger.handlers[0].stream]): _main(logger, options) else: with lock: _main(logger, options)
def main(): if len(sys.argv) < 2: print sys.argv[0] + " start|stop|restart " sys.exit(1) cmd = sys.argv[1] context = daemon.DaemonContext(pidfile=PIDLockFile('/tmp/translate.pid'), working_directory='/tmp') if cmd == "start": with context: translate.main() elif cmd == "stop": context.close() elif cmd == "restart": print "todo: implement" else: print "start, stop, restart"
def __init__(self, conf): self._conf = conf # Set up signal handlers before daemonizing. self.__setup_signal_handlers() self.__daemon_context = daemon.DaemonContext( working_directory='/var/run') if self._conf.daemon: self.__daemon_context.open() # Patch system modules to be greenthread-friendly. Must be after # daemonizing. eventlet.monkey_patch() self._pool = eventlet.GreenPool(size=self._conf.concurrency) self.__run_gt = None # Set up logging. Must be after daemonizing. if self._conf.debug: lvl = logging.DEBUG self.__spawn_backdoor_server() else: lvl = getattr(logging, self._conf.loglevel) if self._conf.logdest in { LogDestination.SYSLOG, LogDestination.STDOUT }: self._logger = utils.get_logger(self._conf.node_type, self._conf.logdest) else: self._logger = utils.get_logger(self._conf.node_type, LogDestination.LOGFILE, filehandler_args={ 'filename': self._conf.logdest, 'maxBytes': self._conf.logfilesize, 'backupCount': self._conf.logbackupcount }) self._logger.setLevel(lvl) self._mgmtserver = mgmt.MgmtServer(self._conf.udsfile, self._process, self._logger) self.__pidfd = utils.Pidfile(self._conf.pidfile, self._conf.node_type, uuid=self._conf.node_id)
def exec(main, cfg, *args, **kwargs): if cfg.daemon: if sys.stdin and sys.__stdin__ and sys.__stdin__.closed: sys.__stdin__ = sys.stdin from daemon import daemon with daemon.DaemonContext( umask=0o022, chroot_directory=None, working_directory=os.getcwd(), stdout=sys.stdout, stderr=sys.stderr, ) as ctx: init(cfg) with open(cfg.logfile, 'a') as log: # XXX redirect stdout in case of existing print() daemon.redirect_stream(ctx.stdout, log) daemon.redirect_stream(ctx.stderr, log) main(cfg, *args, **kwargs) else: init(cfg) main(cfg, *args, **kwargs)
from tornado.web import RequestHandler, Application, url import tornado.httpserver class HelloHandler(RequestHandler): def get(self): self.write("hello,world") class IndexHandler(RequestHandler): def get(self, input): greeting = self.get_argument('greeting', 'hi') self.write(greeting + ',man!!! your No. is ' + input) def write_error(self, status_code, **kwargs): self.write("so sorry!!!you got a %d error" % status_code) if __name__ == "__main__": daemonctx = daemon.DaemonContext() daemonctx.stdin = open('/dev/null', 'r') daemonctx.stdout = open('/dev/null', 'w+') daemonctx.stderr = open('/dev/null', 'w+', buffering=0) daemonctx.working_directory = os.getcwd() daemonctx.umask = 022 daemonctx.open() app = Application([(r"/", HelloHandler), (r"/hi/([0-9]+)", IndexHandler)]) http_server = tornado.httpserver.HTTPServer(app) app.listen(8888) IOLoop.current().start()
parser = argparse.ArgumentParser() parser.add_argument( "--no-headless", action="store_false", default=True, help= "Whether or not to run Selenium in headless mode. Don't enable if debugging" ) parser.add_argument( "--local", action="store_true", default=False, help= "Whether or not to run Selenium in headless mode. Don't enable if debugging" ) args = parser.parse_args() datestamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S") with open(os.path.join(ACTION_DIRECTORY, "{}_stdout.txt".format(datestamp)), "w") as stdout_log, \ open(os.path.join(ACTION_DIRECTORY, "{}_stderr.txt".format(datestamp)), "w") as stderr_log: if args.local: poll_loop(None, args.no_headless) else: with daemon.DaemonContext(stdout=stdout_log, stderr=stderr_log, uid=os.getuid(), gid=os.getgid()): poll_action_network(headless=args.no_headless)
print "west toggle" west.toggle() print "waiting..." #チャタリングと長押し対策 time.sleep(0.5) continue if int(open("/sys/class/gpio/gpio110/value", "r").read().split("\n")[0]) == 0: #右ボタンが押された時の処理 print "heater toggle" heater.toggle() print "waiting..." #チャタリングと長押し対策 time.sleep(0.5) continue print "nothing loop" time.sleep(0.2) #スクリプトとして動作された場合、ここから実行される if __name__ == '__main__': #daemonとして動作する場合の処理 #pidファイルは/etc/init.d/wemo内の指定と同じファイルにする with daemon.DaemonContext(pidfile=PIDLockFile('/var/run/wemo.pid')): #daemonとして起動した際に、networking側の処理が追いつかずにネットワークが確立していない場合がある。 #その場合Environment()などでexceptionを吐いて終了してしまう。 #対応として、exceptで全部のexceptionを受け止めてもう一度wemo_main()をやり直すようにした。 while True: try: wemo_main() except: time.sleep(3)
def main(): try: logger, options = parseOptions() except OptionError as e: print >> sys.stderr, str(e) exit(1, status="virt-who can't be started: %s" % str(e)) lock = PIDLock(PIDFILE) if lock.is_locked(): msg = "virt-who seems to be already running. If not, remove %s" % PIDFILE print >> sys.stderr, msg exit(1, status=msg) global RetryInterval if options.interval < RetryInterval: RetryInterval = options.interval global virtWho try: virtWho = VirtWho(logger, options) except (InvalidKeyFile, InvalidPasswordFormat) as e: logger.error(str(e)) exit(1, "virt-who can't be started: %s" % str(e)) if options.virtType is not None: config = Config("env/cmdline", options.virtType, **options.__dict__) config.checkOptions(options.smType, logger) virtWho.configManager.addConfig(config) for conffile in options.configs: try: virtWho.configManager.readFile(conffile) except Exception as e: logger.error('Config file "%s" skipped because of an error: %s' % (conffile, str(e))) if len(virtWho.configManager.configs) == 0: # In order to keep compatibility with older releases of virt-who, # fallback to using libvirt as default virt backend logger.info("No configurations found, using libvirt as backend") virtWho.configManager.addConfig(Config("env/cmdline", "libvirt")) for config in virtWho.configManager.configs: if config.name is None: logger.info( 'Using commandline or sysconfig configuration ("%s" mode)', config.type) else: logger.info('Using configuration "%s" ("%s" mode)' % (config.name, config.type)) log.closeLogger(logger) if options.background: locker = lambda: daemon.DaemonContext(pidfile=lock) else: locker = lambda: lock with locker(): signal.signal(signal.SIGHUP, reload) signal.signal(signal.SIGTERM, atexit_fn) virtWho.logger = logger = log.getLogger(options, queue=True) sd_notify("READY=1\nMAINPID=%d" % os.getpid()) while True: try: _main(virtWho) break except ReloadRequest: logger.info("Reloading") continue
def main(): """ Main execution function. Script will exit with a non-zero value based on the following: 1: Configuration problem 2: Yara rule validation problem 3: User interrupt 4: Unexpected Yara scan exception """ args = handle_arguments() # check for extended logging if args.debug: logger.setLevel(logging.DEBUG) # check for additional log file if args.log_file: use_log_file = os.path.abspath(os.path.expanduser(args.log_file)) formatter = logging.Formatter(logging_format) handler = logging.handlers.RotatingFileHandler( use_log_file, maxBytes=10 * 1000000, backupCount=10 ) handler.setFormatter(formatter) logger.addHandler(handler) # Verify the configuration file and load up important global variables try: ConfigurationInit(args.config_file, args.output_file) except Exception as err: logger.error(f"Unable to continue due to a configuration problem: {err}") sys.exit(1) if args.validate_yara_rules: """ RULE VALIDATION MODE OF OPERATION """ logger.info(f"Validating yara rules in directory: {globals.g_yara_rules_dir}") yara_rule_map = generate_rule_map(globals.g_yara_rules_dir) try: yara.compile(filepaths=yara_rule_map) logger.info("All yara rules compiled successfully") except Exception as err: logger.error(f"There were errors compiling yara rules: {err}") sys.exit(2) sys.exit() else: # Doing a real run # Exit condition and queues for doing work exit_event = Event() hash_queue = Queue() scanning_results_queue = Queue() write_pid_file(args.pid_file) # noinspection PyUnusedLocal # used for local minion handling in some scenarios local_minion = None exit_rc = 0 try: """ 3 modes of operation 1) task primary 2) standalone minion 3) minion+primary unit """ if args.daemon: logger.debug("RUNNING AS DEMON") # Get working dir setting working_dir = os.path.abspath(os.path.expanduser(args.working_dir)) # Mark files to be preserved files_preserve = get_log_file_handles(logger) files_preserve.extend([args.log_file, args.output_file]) context = daemon.DaemonContext( working_directory=working_dir, files_preserve=files_preserve, stdout=sys.stdout if args.debug else None, stderr=sys.stderr if args.debug else None ) # Operating mode - are we the primary? run_as_primary = "master" in globals.g_mode or "primary" in globals.g_mode # noinspection PyBroadException try: if run_as_primary and not test_database_conn(): sys.exit(1) except Exception as ex: logger.error(F"Failed database connection test: {ex}") sys.exit(1) # Signal handler sig_handler = partial(handle_sig, exit_event) context.signal_map = { signal.SIGTERM: sig_handler, signal.SIGQUIT: sig_handler, } # Make sure we close the deamon context at the end threads = [] with context: write_pid_file(args.pid_file) # only connect to cbr if we're the primary if run_as_primary: # initialize local resources init_local_resources() # start working threads threads = start_minions( exit_event, hash_queue, scanning_results_queue ) # start local celeryD worker if working mode is local if "worker" in globals.g_mode or "minion" in globals.g_mode: local_minion = worker(app=app) threads.append( start_celery_worker_thread( local_minion, globals.g_celery_worker_kwargs, args.config_file ) ) else: # otherwise, we must start a celeryD worker since we are not the master local_minion = worker(app=app) threads.append( start_celery_worker_thread( local_minion, globals.g_celery_worker_kwargs, args.config_file ) ) # run until the service/daemon gets a quitting sig try: logger.debug("Started as demon OK") run_to_exit_signal(exit_event) except Exception as e: logger.exception(f"Error while executing: {e}") finally: try: wait_all_worker_exit_threads(threads, timeout=4.0) finally: logger.info("Yara connector shutdown") # noinspection PyProtectedMember os._exit(exit_rc) else: # | | | BATCH MODE | | | logger.debug("BATCH MODE") init_local_resources() # start necessary worker threads threads = start_minions( exit_event, hash_queue, scanning_results_queue, run_only_once=True ) # Start a celery worker if we need one if "worker" in globals.g_mode or "minion" in globals.g_mode: local_minion = worker(app=app) threads.append( start_celery_worker_thread( local_minion, globals.g_celery_worker_kwargs, args.config_file ) ) run_to_exit_signal(exit_event) wait_all_worker_exit_threads(threads, timeout=4.0) except KeyboardInterrupt: logger.info("\n\n##### Interrupted by user!\n") exit_rc = 3 except Exception as err: logger.error(f"There were errors executing Yara rules: {err}") exit_rc = 4 finally: exit_event.set() sys.exit(exit_rc)
def start(): print("Start") with daemon.DaemonContext(): open(WORKDIR + "/createDaemon.log", "w").write(str(os.getpid()) + "\n") server = Server() syslog.syslog("Server has been started.")
def daemon_run() -> None: if ConfigControl.daemonize: with daemon.DaemonContext(): run() else: run()
# kwds += self.wrongKey(s) # kwds += self.skipLetter(s) # kwds += self.doubleLetter(s) # kwds += self.reverseLetter(s) # kwds += self.wrongVowel(s) # kwds += self.synonymSubstitution(s) # return kwds class SimpleThreadedXMLRPCServer(SocketServer.ThreadingMixIn, SimpleXMLRPCServer.SimpleXMLRPCServer): pass if __name__ == "__main__": pidlock = pidlockfile.PIDLockFile(PID_FILE) if pidlock.is_locked(): sys.exit(1) context = daemon.DaemonContext() context.pidfile = pidlock context.open() try: server = SimpleThreadedXMLRPCServer((LISTEN, PORT)) server.register_instance(TypoGenerator()) server.serve_forever() finally: context.close()