def start(ss): ss.debugger.warning("starting sucklesync") sucklesync.sucklesync_instance = ss # test that we can write to the log try: with open(ss.logging["filename"], "w"): ss.debugger.info("successfully writing to logfile") except IOError: ss.debugger.critical("failed to write to logfile: %s", (ss.logging["filename"], )) # test rsync -- run a NOP, only success returns command = ss.local["rsync"] + " -qh" _rsync(command) ss.debugger.info("successfully tested local rsync: %s", (command, )) # test ssh -- run a NOP find, only success returns command = ss.local["ssh"] + " " + ss.remote["hostname"] + " " + ss.local[ "ssh_flags"] + " " + ss.remote["find"] + " " + ss.remote[ "find"] + " -type d" _ssh(command, True) ss.debugger.info("successfully tested ssh to remote server: %s", (command, )) if ss.daemonize: try: import daemonize except Exception as e: ss.debugger.error("fatal exception: %s", (e, )) ss.debugger.critical( "failed to import daemonize (as user %s), try 'pip install daemonize', exiting", (ss.debugger.whoami())) ss.debugger.info("successfully imported daemonize") # test that we can write to the pidfile try: with open(ss.logging["pidfile"], "w"): ss.debugger.info("successfully writing to pidfile") except IOError: ss.debugger.critical("failed to write to pidfile: %s", (ss.logging["pidfile"], )) ss.debugger.warning("daemonizing, output redirected to log file: %s", (ss.logging["filename"], )) try: ss.debugger.logToFile() daemon = daemonize.Daemonize( app="sucklesync", pid=ss.logging["pidfile"], action=sucklesync, keep_fds=[ss.debugger.handler.stream.fileno()], logger=ss.logger, verbose=True) daemon.start() except Exception as e: ss.debugger.critical("Failed to daemonize: %s, exiting", (e, )) else: sucklesync()
def main(): """ Main entry point into mochad_dispatch. Processes command line arguments then hands off to Daemonize and MochadClient """ # parse command line args parser = argparse.ArgumentParser() parser.add_argument('-s', '--server', default="127.0.0.1", help="IP/host of server running mochad (default 127.0.0.1)") parser.add_argument('-f', '--foreground', action='store_true', default=False, help="Don't fork; run in foreground (for debugging)") parser.add_argument('--cafile', help="File containing trusted CA certificates") parser.add_argument('dispatch_uri', help='dispatch messages to this URI') global args args = parser.parse_args() # set dispatcher type based on dispatch_uri uri = urllib.parse.urlparse(args.dispatch_uri) global dispatcher_type if uri.scheme == 'mqtt': dispatcher_type = MqttDispatcher else: errordie("unsupported URI scheme '{}'".format(uri.scheme)) # daemonize global daemon pidfile = "/tmp/mochad_dispatch-{}.pid".format(os.getpid()) daemon = daemonize.Daemonize(app="mochad_dispatch", pid=pidfile, foreground=args.foreground, action=daemon_main) daemon.start()
def _background_proc(self, pidfile, quiet, stop_after): import daemonize action = lambda: self._foreground_proc(quiet, stop_after) daemon = daemonize.Daemonize(app="guild_op", action=action, pid=pidfile) if not quiet: log.info("Operation started in background (pidfile is %s)", pidfile) daemon.start()
def daemon(self): log.debug('Daemonizing task %s with pidfile %s', self.task.name, self.pid) daemon = daemonize.Daemonize(app=str(self), pid=self.pid, action=self.run) daemon.start()
def fork_daemon(daemon, timeout=5, lock='/tmp/vigilant.pid'): """ Fork the stats Daemon as a real system daemon to run in the background :param daemon: The StatServerDaemon from StatsAsyncCore :param timeout: optional keyword argument for the timeout to wait on daemon ready signal :param lock: optional specify the location of the daemon lock file :raise Exception: Raises an exception if the timeout elapses before daemon ready """ DaemonState.STATS_DAEMON_SERVER = daemon signal.signal(signal.SIGUSR1, _daemon_ready_handler) pid = os.fork() if pid == 0: daemon = daemonize.Daemonize(app=DaemonState.STATS_DAEMON_APP, pid=lock, action=_daemonize_daemon) daemon.start() sys.exit(0) for i in range(timeout): if DaemonState.STATS_DAEMON_READY: break time.sleep(1) if DaemonState.STATS_DAEMON_READY is False: raise Exception('Timeout of [%i] seconds, failed waiting for daemon to come alive' % timeout)
def as_daemon(method, name, *args, **kwds): '''Run long-standing process as daemon.''' sleep_time = kwds.pop('sleep_time', DEFAULT_SLEEP_TIME) def main(): '''Wrapper for the method. Handles connection loss and other errors.''' while True: try: # If the method completes, break. Otherwise, # just exit. method(*args, **kwds) break except tweepy.TweepError as error: LOGGER.error(error.reason) if api.is_connection_error(error): handle_connection_error(sleep_time) else: raise except requests.exceptions.RequestException as error: LOGGER.error(str(error)) handle_connection_error(sleep_time) except Exception as error: LOGGER.critical(str(error)) raise close_database_connections() pid = get_pid(name) daemon = daemonize.Daemonize(app=APP_NAME, pid=pid, action=main, keep_fds=KEEP_FDS) daemon.start()
def tornado(): setup() if args.daemon: user = args.user group = args.group pidfile = args.pidfile fds = [h.stream.fileno() for h in logger.handlers if isinstance(h, logging.StreamHandler)] logger.info("About to daemonize") try: daemon = daemonize.Daemonize(app="tardisremote", pid=pidfile, action=run_server, user=user, group=group, keep_fds=fds) daemon.start() except Exception as e: logger.critical("Caught Exception on Daemonize call: {}".format(e)) if args.exceptions: logger.exception(e) else: try: run_server() except KeyboardInterrupt: pass except Exception as e: logger.critical("Unable to run server: {}".format(e)) if args.exceptions: logger.exception(e)
def daemon(): pid = os.path.join(tempfile.gettempdir(), 'dns.pid') _daemon = daemonize.Daemonize(app='dns', pid=pid, logger=logging, action=main) if os.path.exists(pid): _daemon.exit() _daemon.start()
def start_daemon(): daemon = daemonize.Daemonize( app='snh48live-stats', pid=PIDFILE, action=periodic_updater, keep_fds=[handler.stream.fileno() for handler in logger.handlers], logger=logger, ) daemon.start()
def start(self): if 'daemon' in self.config and self.config.get('daemon'): import daemonize pid_file = self.get_pid_file() daemon = daemonize.Daemonize(app='Slack-SuperBot', pid=pid_file, action=self._start) daemon.start() else: self._start()
def main(): # avoid: UnboundLocalError: local variable '__doc__' referenced before assignment doc_ = __doc__ kpconf = Conf() if has_gui_support(): doc_ += " --gui".ljust(28) + "Use QT (PySide) for a graphical interface" # handle arguments arguments = docopt.docopt(doc_) is_daemon = arguments["--daemon"] database_path = arguments["<database_path>"] host = arguments["--host"] port = arguments["--port"] assert port.isdigit() loglevel = arguments["--loglevel"] gui = arguments.get("--gui", False) if gui: ui = Conf.UI.GUI else: ui = Conf.UI.CLI kpconf.select_ui(ui) kpconf.set_loglevel(loglevel) if not has_gui_support(): log.debug("\nIt seems that you don't have GUI support installed. Install it with:\n" " $ pip install -e '.[GUI]'\n" "or\n" " $ pip install keepass_http[GUI]\n") # backend backend = backends.BaseBackend.get_by_filepath(database_path) kpconf.set_backend(backend) success = kpconf.get_selected_ui().RequireDatabasePassphraseUi.do(MAX_TRY_COUNT) if success is False: sys.exit("Wrong or no passphrase") # config daemon run_server = partial(app.run, debug=False, host=host, port=int(port)) if is_daemon: pid_file = os.path.join(kpconf.confdir, "process.pid") log.info("Server started as daemon on %s:%s" % (host, port)) daemon = daemonize.Daemonize(app=APP_NAME, pid=pid_file, action=run_server, keep_fds=get_logging_filehandlers_streams_to_keep()) daemon.start() else: log.info("Server started on %s:%s" % (host, port)) run_server()
def _background_proc(self, pidfile, quiet, stop_after): assert self._run import daemonize action = lambda: self._foreground_proc(quiet, stop_after) daemon = daemonize.Daemonize(app="guild_op", action=action, pid=pidfile) if not quiet: log.info("%s started in background as %s (pidfile %s)", self.opdef.opref.to_opspec(), self._run.id, pidfile) daemon.start()
def _background_proc(self, pidfile): import daemonize def action(): return self._foreground_proc() daemon = daemonize.Daemonize(app="tracker_op", action=action, pid=pidfile) log.info("Operation started in background (pidfile is %s)", pidfile) daemon.start()
def run_server(server, config): daemon = daemonize.Daemonize( app=config['app'], pid=config['pidfile'], action=server.serve_forever, # keep open stdin, stdout, stderr and socket file keep_fds=[0, 1, 2, server.fileno()]) try: daemon.start() #this is required to do some stuff after server is daemonized except SystemExit as e: if e.code is 0: return True raise
def start(ng): import os pid = ng.is_running() if pid: ng.debugger.critical("Netgrasp is already running with pid %d.", (pid,)) ng.debugger.warning("Starting netgrasp...") if os.getuid() != 0: ng.debugger.critical("netgrasp must be run as root (currently running as %s), exiting", (ng.debugger.whoami())) netgrasp.netgrasp_instance = ng # @TODO: use pcap to set and test interface if not ng.listen["interface"]: ng.debugger.critical("Required [Listen] 'interface' not defined in configuration file, exiting.") if not ng.database["filename"]: ng.debugger.critical("Required [Database] 'filename' not defined in configuration file, exiting.") # Start netgrasp. if ng.daemonize: # Test that we can write to the log. try: with open(ng.logging["filename"], "w"): ng.debugger.info("successfully opened logfile for writing") except Exception as e: ng.debugger.dump_exception("start() exception") ng.debugger.critical("failed to open logfile '%s' for writing: %s", (ng.logging["filename"], e)) import daemonize # Test that we can write to the pidfile. try: with open(ng.logging["pidfile"], "w"): ng.debugger.info("successfully opened pidfile for writing") except IOError as e: ng.debugger.critical("failed to open pidfile '%s' for writing: %s", (ng.logging["pidfile"], e)) ng.debugger.info("daemonizing app=netgrasp, pidfile=%s, user=%s, group=%s, verbose=True", (ng.logging["pidfile"], ng.security["user"], ng.security["group"])) ng.debugger.warning("daemonizing, output redirected to log file: %s", (ng.logging["filename"],)) try: ng.debugger.logToFile() daemon = daemonize.Daemonize(app="netgrasp", pid=ng.logging["pidfile"], privileged_action=netgrasp.get_pcap, user=ng.security["user"], group=ng.security["group"], action=netgrasp.main, keep_fds=[ng.debugger.handler.stream.fileno()], logger=ng.logger, verbose=True) daemon.start() except Exception as e: ng.debugger.critical("Failed to daemonize: %s, exiting", (e,)) else: netgrasp.main()
def main(): cli_args = get_cli_args() setup_loggers(conf.dict("log_levels"), log_to_stdout=cli_args.debug, log_dir_path="/var/log", log_file="ps_orca_logger.log") if cli_args.no_daemonize: monitor_loop(cli_args.sleep_time, cli_args.debug) else: daemon = daemonize.Daemonize(app="Orca Group Change", pid=PID_FILE, action=monitor_loop, verbose=True) daemon.start()
def _start_in_background(run, op, pidfile, quiet, stop_after, extra_env): import daemonize action = lambda: _run(run, op, quiet, stop_after, extra_env) daemon = daemonize.Daemonize(app="guild_op", action=action, pid=pidfile) # Need to log before starting daemon, otherwise output isn't # visible. if not quiet: log.info("%s started in background as %s (pidfile %s)", run.opref.to_opspec(config.cwd()), run.id, pidfile) try: daemon.start() except SystemExit: op_util.clear_run_pending(run) raise
def _start(name, f, log): import daemonize pidfile = var.pidfile(name) if os.path.exists(pidfile): raise Running(name, pidfile) util.ensure_dir(os.path.dirname(pidfile)) # Save original log level to workaround issue with daemonization # (see note in _run). log_level = log.getEffectiveLevel() daemon = daemonize.Daemonize(app=name, action=lambda: _run(f, log, log_level), pid=pidfile, keep_fds=_log_fds(log)) daemon.start()
def main(): # avoid: UnboundLocalError: local variable '__doc__' referenced before assignment doc_ = __doc__ kpconf = Conf() if has_gui_support(): doc_ += " --gui Use TKinter for a graphical interface" # handle arguments arguments = docopt.docopt(doc_) is_daemon = arguments["--daemon"] database_path = arguments["<database_path>"] host = arguments["--host"] port = arguments["--port"] assert port.isdigit() loglevel = arguments["--loglevel"] gui = arguments.get("--gui", False) if gui: ui = Conf.UI.GUI else: ui = Conf.UI.CLI kpconf.select_ui(ui) kpconf.set_loglevel(loglevel) # backend backend = backends.BaseBackend.get_by_filepath(database_path) kpconf.set_backend(backend) success = kpconf.get_selected_ui().OpenDatabase.open(MAX_TRY_COUNT) if success is False: sys.exit("Wrong passphrase after %d attempts" % MAX_TRY_COUNT) # config daemon run_server = partial(app.run, debug=False, host=host, port=int(port)) if is_daemon: pid_file = os.path.join(kpconf.confdir, "process.pid") log.info("Server started as daemon on %s:%s" % (host, port)) daemon = daemonize.Daemonize( app=APP_NAME, pid=pid_file, action=run_server, keep_fds=get_logging_filehandlers_streams_to_keep()) daemon.start() else: log.info("Server started on %s:%s" % (host, port)) run_server()
def start( profile=None, configdir=None, tempdir=None, debug=None, restore=None, daemon=False): profiledir = _mkdprofile(profile, configdir) inst = Core(profiledir, tempdir, debug, restore) inst.start() if daemon: pidfile = mkstemp( suffix='.pid', prefix='daemon-', dir=inst.cachedir)[1] d = daemonize.Daemonize("pyLoad", pidfile, inst.join, logger=inst.log) d.start() return inst # returns process instance
def start(): # Keep the log file FD open when daemonizing keep_fds = [handler.stream.fileno() for handler in logger.handlers] # Set jobs scheduler = schedule.default_scheduler job = schedule.Job(current_app.config["DB_BACKUP_INTERVAL"], scheduler) getattr(job, current_app.config["DB_BACKUP_TIME_UNIT"]).do(backup_db_job) # schedule.every().day.do(backup_db_job) daemon = daemonize.Daemonize(app='webcomicsd', pid=PID_FILE, keep_fds=keep_fds, action=main) daemon.start()
def daemon_spawner(): app.logger.info("Daemonizing worker process with UUID: " + process_uuid) handlers = app.logger.handlers fhs = [] for handler in handlers: try: fhs.append(handler.stream.fileno()) except AttributeError: app.logger.debug("Incompatible handler") daemon = d.Daemonize( app="iis_flask_worker-"+process_uuid, pid=pid, action=task_wrapper, keep_fds=fhs ) daemon.start()
def main(): def tuple_or_int(value): if ',' in value: return [int(i) for i in value.split(',')] else: return int(value) parser = argparse.ArgumentParser() arg = parser.add_argument arg('--update-msec', default=1000, metavar='msec', type=tuple_or_int) arg('--truncate-msec', default=10000, metavar='msec', type=tuple_or_int) arg('--rate', default=1, metavar='msec', type=tuple_or_int) arg('--daemon', action='store_true') arg('--pid', default='/tmp/python-tailon-logsim.pid') arg('--seed', default=str(time())) arg('action', choices=['start', 'stop']) arg('files', nargs=argparse.REMAINDER) opts = parser.parse_args() opts.files = [os.path.abspath(fn) for fn in opts.files] print('using random seed: %s' % opts.seed) seed(opts.seed) def run(): try: lf = LogFiles(opts.files, opts.rate, opts.update_msec, opts.truncate_msec) lf.start() lf.join() except KeyboardInterrupt: lf.stop() if opts.daemon: import daemonize if opts.action == 'start': daemon = daemonize.Daemonize(app='tailon-logsim', pid=opts.pid, action=run) daemon.start() elif opts.action == 'stop': if os.path.exists(opts.pid): pid = open(opts.pid).read().strip() os.kill(int(pid), signal.SIGTERM) elif opts.action == 'start': run()
def main(): description = "Collect Mrm's wisdom nonstop." parser = argparse.ArgumentParser(description=description) parser.add_argument('-v', '--version', action='version', version='MrMsay %s' % __version__) parser.parse_args() fd = logger.logger_init(logfile=LOGFILE, level=logging.DEBUG) daemonize.Daemonize( app='mrmd', pid=PIDFILE, action=fetch_loop, keep_fds=[fd], logger=logger.logger, ).start()
def start(profile=None, configdir=None, debug=0, refresh=0, webui=None, rpc=None, update=True, daemon=False): p = Core(profile, configdir, debug, refresh, webui, rpc, update) p.start() if daemon: pidfile = tempfile.mkstemp(suffix='.pid', prefix='daemon-', dir=p.tmpdir)[1] d = daemonize.Daemonize("pyLoad", pidfile, p.join, logger=p.log) d.start() return p #: returns process instance
def __init__(self, name, chromedriver_path='chromedriver'): # Setup objects self._run_event = threading.Event() self._stop_event = threading.Event() self._thread = None self._chromedriver_path = chromedriver_path self._daemon = None self._name = name self._started = False # Parse config self._config = self._parse_config() if self._config.daemon: user = self._config.user if os.geteuid() == 0 else None group = self._config.group if os.geteuid() == 0 else None self._daemon = daemonize.Daemonize( name, pid=self._config.pidfile, action=self._start_internal, user=user, group=group)
def start(profile=None, configdir=None, debug=0, refresh=0, webui=None, rpc=None, daemon=False): #: Use virtualenv # from .. import setup # setup.run_venv() p = Core(profile, configdir, debug, refresh, webui, rpc) p.start() if daemon: pidfile = tempfile.mkstemp(suffix='.pid', prefix='daemon-', dir=p.tmpdir)[1] d = daemonize.Daemonize("pyLoad", pidfile, p.join, logger=p.log) d.start() return p #: returns process instance
formatter = logging.Formatter('%(asctime)s [%(levelname)s] %(message)s') keep_fds = None if args.log: handler = logging.FileHandler(args.log, 'a') keep_fds = [handler.stream.fileno()] else: handler = logging.StreamHandler() handler.setFormatter(formatter) config.logger.addHandler(handler) chdir = Path(__file__).parent if args.daemon: if not args.pidfile: print('PID file is required for daemon mode!') exit(1) d = daemonize.Daemonize(app='speech_tools_worker', pid=args.pidfile, action=worker.run, keep_fds=keep_fds, user=args.user, group=args.group, chdir=chdir) d.start() else: worker.run()
print("could not find configuration file", file=sys.stderr) sys.exit(1) for section in ["theme", "window_theme", "location"]: error_on_missing_section(section, config) latitude = config["location"].get("latitude") longitude = config["location"].get("longitude") signal.signal(signal.SIGUSR1, handle_sigusr1) # Setting last_is_day to not is_daytime() forces an update on app startup. last_is_day = not is_daytime(latitude, longitude) while True: update_if_needed() time.sleep(10 * 60) # 10 minutes (time.sleep arguments are in seconds) if __name__ == "__main__": pidfile = "/run/user/{}/azimutheme.pid".format(os.getuid()) parser = argparse.ArgumentParser( description="change the GTK theme based on current daylight state") parser.add_argument("-d", "--daemonize", dest="daemonize", action="store_true", help="fork into the background") args = parser.parse_args() if args.daemonize: daemon = daemonize.Daemonize(app="azimutheme", pid=pidfile, action=main) daemon.start() else: main()
r = requests.get(BASE_URL, params=args) data = r.json() for blob in data["data"]: img_path = blob["path"] theoretical_path = DOWNLOAD_DIR / img_path.split("/")[-1] if not theoretical_path.exists(): logger.info(f"Downloading {img_path}") img = requests.get(img_path) theoretical_path.write_bytes(img.content) fetched.append(theoretical_path) if len(fetched) >= how_many_to_fetch: break return fetched if __name__ == "__main__": if not DOWNLOAD_DIR.exists(): os.mkdir(DOWNLOAD_DIR) if PIDFILE.exists(): print("Daemon OFF") os.kill(int(PIDFILE.read_text()), signal.SIGINT) else: print("Daemon ON") daemon = daemonize.Daemonize(app="wallpapers", pid=PIDFILE, action=main) daemon.start()