def main(): parser = argparse.ArgumentParser() parser.add_argument('-d', '--db', type = str, help = 'SQLite3 DB to write to') parser.add_argument('-z', '--zmq', type = str, help = 'ZMQ to listen for', required = True) parser.add_argument('-L', '--log', type = str, help = 'Log file (will cause it to daemonise)') parser.add_argument('-P', '--pidfile', type = str, help = 'PID file (only used with --log)') args = parser.parse_args() if args.db != None: db = sqlite3.connect(args.db) c = db.cursor() c.execute('CREATE VIRTUAL TABLE IF NOT EXISTS pages_fts USING fts4(pid, msg);') c.execute(''' CREATE TABLE IF NOT EXISTS pages( id INTEGER PRIMARY KEY AUTOINCREMENT, type TEXT, chfreq REAL, captime INTEGER, msg TEXT ); ''') c.execute('CREATE INDEX IF NOT EXISTS pages_id ON pages(id)') c.execute('CREATE INDEX IF NOT EXISTS pages_captime ON pages(captime)') c.execute(''' CREATE TABLE IF NOT EXISTS pocsag_pages( pid INTEGER, rate INTEGER, address INTEGER, func INTEGER, ptype TEXT, FOREIGN KEY(pid) REFERENCES pages(id) ); ''') c.execute('CREATE INDEX IF NOT EXISTS pocsag_pages_id ON pocsag_pages(pid)') c.execute(''' CREATE TABLE IF NOT EXISTS flex_pages( pid INTEGER, msgtime INTEGER, baud INTEGER, level INTEGER, phaseno CHAR(1), cycleno INTEGER, frameno INTEGER, capcode INTEGER, FOREIGN KEY(pid) REFERENCES pages(id) ); ''') c.execute('CREATE INDEX IF NOT EXISTS flex_pages_id ON flex_pages(pid)') c.execute(''' CREATE TABLE IF NOT EXISTS frequencies ( freq REAL, name TEXT UNIQUE ); ''') # Configure logging global logger logger = logging.getLogger('zmqlog') logger.setLevel(logging.DEBUG) if args.log != None: lh = logging.handlers.WatchedFileHandler(args.log) else: lh = logging.StreamHandler() lh.setFormatter(logging.Formatter('%(asctime)s %(name)s:%(levelname)s: %(message)s', '%Y/%m/%d %H:%M:%S')) logger.addHandler(lh) daemon_context = daemon.DaemonContext() daemon_context.files_preserve = [lh.stream] # Daemonise if we have a log file if args.log != None: pidfile = None if args.pidfile != None: pidfile = daemon.pidfile.PIDLockFile(args.pidfile) ctxmgr = daemon.DaemonContext(pidfile = pidfile, files_preserve = [lh.stream]) else: ctxmgr = NullContextManager() with ctxmgr: try: dologging(args.zmq, args.db) except sqlite3.Error as e: logger.error("SQL error: " + str(e.args[0])) except sqlite3.OperationalError as e: logger.error("SQL operataional error: " + str(e.args[0])) except: e = sys.exc_info()[0] logger.error('Exception: ' + str(e)) logger.error('Exiting')
def run(): with daemon.DaemonContext( working_directory='/usr/local/games/dr_evil', pidfile=lockfile.FileLock('/usr/local/games/dr_evil/dr_evil.pid')): do_evil()
options, _ = parser.parse_args() if options.query_timeout: TIMEOUT = float(options.query_timeout) if options.dns_servers: DHOSTS = options.dns_servers.strip(" ,").split(',') if options.cache: LRUCACHE = lrucache(100) print '>> TCP DNS Proxy, https://github.com/henices/Tcp-DNS-proxy' print '>> DNS Servers:\n%s' % ('\n'.join(DHOSTS)) print '>> Query Timeout: %f' % (TIMEOUT) print '>> Enable Cache: %r' % (options.cache) print '>> Now you can set dns server to 127.0.0.1' if options.daemon: if os.name == 'nt': raise Exception("Windows doesn't support daemon process") else: try: import daemon print '>>> Run code in daemon process' except ImportError: print '*** Please install python-daemon' try: with daemon.DaemonContext(detach_process=True): main() except: main()
else: logging.debug('pidfile (%s) is not locked', constants.NSLCD_PIDFILE) sys.exit(1) # normal check for pidfile locked if pidfile.is_locked(): logging.error('daemon may already be active, cannot acquire lock (%s)', constants.NSLCD_PIDFILE) sys.exit(1) # daemonize if debugging or nofork: ctx = pidfile else: ctx = daemon.DaemonContext(pidfile=pidfile, signal_map={ signal.SIGTERM: u'terminate', signal.SIGINT: u'terminate', signal.SIGPIPE: None, }) # start daemon with ctx: try: # start normal logging as configured if not debugging: for method, level in cfg.logs: if method == 'syslog': handler = MySysLogHandler() handler.setFormatter( MyFormatter('%(prefix)s%(message)s')) else: handler = logging.FileHandler(method, encoding='utf-8') handler.setFormatter(
return outputs # main program #loadconfiguration('/etc/mm3d/mm3d.ini') #loadenvirchars('/etc/mm3d/envir.ini') loadconfiguration('/usr/local/etc/mm3d/mm3d.ini') loadenvirchars('/usr/local/etc/mm3d/envir.ini') initports() first = 1 exttemp = 18 prevtemperature = 0 prevhumidity = 0 previnputs = "" prevoutputs = "" with daemon.DaemonContext() as context: try: while True: # read input data from sensor writetodebuglog("i", "Measuring T/RH.") humidity, temperature = Adafruit_DHT.read_retry(sensor, prt_sens) if humidity is not None and temperature is not None: wrongvalues = 0 else: wrongvalues = 1 temperature = 18 humidity = 72 temperature = round(temperature) humidity = round(humidity) writetodebuglog("i", "Measure is done.") blinkactled()
if option.verbose: level = cfg.log.level else: level = logging.ERROR logging.basicConfig(level = level, format='%(asctime)s %(levelname)s %(message)s', stream = logfile) # As the default try to run as daemon. Silently degrade to running as a normal application if this fails # unless the user explicitly defined what he expected with the -a / -d parameter. try: if option.force_app: raise ImportError # Pretend that we couldn't import the daemon lib import daemon except ImportError: if option.force_daemon: print('Fatal error, could not daemonize process due to missing "daemon" library, ' \ 'please install the missing dependency and restart the authenticator', file=sys.stderr) sys.exit(1) do_main_program() else: context = daemon.DaemonContext(working_directory = sys.path[0], stderr = logfile) context.__enter__() try: do_main_program() finally: context.__exit__(None, None, None)
def ipfsDaemonRun(self): log = open(("%s/%s" % (self.datadir,"ipfs.log")),'w+') with daemon.DaemonContext(stdout=log,stderr=log): subprocess.Popen(self.runCmdList)
if not conf.get('ARCHIVE_SERVER'): sys.stderr.write( 'Archive server settings are missing from config file\n') sys.exit(1) # HubProxy will try to log some stuff, even though we # haven't configured our logging handlers yet. So we send logs to stderr # temporarily here, and configure it again below. log_to_stream(sys.stderr, level=logging.WARNING) try: transfer = Watchdog(conf=conf) except Exception, ex: sys.stderr.write("Error starting beaker-transfer: %s\n" % ex) sys.exit(1) if opts.foreground: log_to_stream(sys.stderr, level=logging.DEBUG) main_loop(transfer=transfer, conf=conf) else: # See BZ#977269 transfer.close() with daemon.DaemonContext(pidfile=pidfile.TimeoutPIDLockFile( pid_file, acquire_timeout=0), detach_process=True): log_to_syslog('beaker-transfer') main_loop(transfer=transfer, conf=conf) if __name__ == '__main__': main()
def main(): args = parse_argv(sys.argv[1:]) if args.version: print('Sendria %s' % __version__) sys.exit(0) # Do we just want to stop a running daemon? if args.stop: logger.get().msg( 'stopping Sendria', debug='enabled' if args.debug else 'disabled', pidfile=str(args.pidfile) if args.pidfile else None, ) stop(args.pidfile) sys.exit(0) logger.get().msg( 'starting Sendria', debug='enabled' if args.debug else 'disabled', pidfile=str(args.pidfile) if args.pidfile else None, db=str(args.db), foreground='true' if args.foreground else 'false', ) # Check if the static folder is writable if args.autobuild_assets and not os.access(STATIC_DIR, os.W_OK): exit_err('autobuilding assets requires write access to %s' % STATIC_DIR) if not args.autobuild_assets and (not ASSETS_DIR.exists() or not list(ASSETS_DIR.glob('*'))): exit_err( 'assets not found. Generate assets using: webassets -m sendria.build_assets build', 0) daemon_kw = {} if args.foreground: # Do not detach and keep std streams open daemon_kw.update({ 'detach_process': False, 'stdin': sys.stdin, 'stdout': sys.stdout, 'stderr': sys.stderr, }) if args.pidfile: if args.pidfile.exists(): try: pid = read_pidfile(args.pidfile) except Exception as exc: exit_err(f'Cannot read pid file: {exc}', 1) if not pid_exists(pid): logger.get().msg( 'deleting obsolete PID file (process %s does not exist)' % pid, pid=pid) args.pidfile.unlink() daemon_kw['pidfile'] = TimeoutPIDLockFile(str(args.pidfile), 5) # Unload threading module to avoid error on exit (it's loaded by lockfile) if 'threading' in sys.modules: del sys.modules['threading'] context = daemon.DaemonContext(**daemon_kw) with context: loop = asyncio.get_event_loop() run_sendria_servers(loop, args) loop.run_forever() logger.get().msg('stop signal received') loop.close() logger.get().msg('terminating') sys.exit(0)
try: os.makedirs(args['data_dir']) except OSError: pass lockname = os.path.join(args['data_dir'], LOCK_FILE) lock = lockfile.FileLock(lockname) if lock.is_locked(): print '%s is locked! I am probably already running.' % lockname print 'If you can find no selfspy process running, it is a stale lock and you can safely remove it.' print 'Shutting down.' sys.exit(1) context = daemon.DaemonContext(working_directory=args['data_dir'], pidfile=lock, stderr=sys.stderr) context.signal_map = { signal.SIGTERM: 'terminate', signal.SIGHUP: 'terminate' } if args['no_text']: args['password'] = "" if args['password'] is None: args['password'] = get_password(verify=check_with_encrypter) encrypter = make_encrypter(args['password'])
def main(): print('Launching gerritbot') try: (opts, args) = getopt.getopt(sys.argv[1:], 'vhc:f', ['verbose', 'help', 'config']) except getopt.GetoptError as err: # print help information and exit: print(str(err)) usage() sys.exit(2) verbose = False foreground = False config_file = '/etc/gerritbot/gerritbot.yaml' for (o, a) in opts: if o in ('-v', '--verbose'): verbose = True elif o in ('-h', '--help'): usage() sys.exit(1) elif o in ('-c', '--config'): config_file = a elif o == '-f': foreground = True if not os.path.exists(config_file): raise Exception('Unable to read config file at %s' % config_file) print('Reading config from ' + config_file) config = yaml.load(open(config_file)) gcfg = config['general'] # read config files if they are not inline for filekey, confkey in { 'channel_config': 'channels', 'log_config': 'logging', 'bot_config': 'ircbot', 'gerrit_config': 'gerrit' }.iteritems(): if filekey in gcfg and gcfg[filekey] != 'inline': config[confkey] = yaml.load(open(gcfg[filekey])) logconfig = config['logging'] if verbose: l = logconfig['loggers'] print("Turning verbosity up to 11") maxdebug = { 'handlers': ['file', 'syslog', 'console'], 'level': logging.DEBUG, 'qualname': None } for k in logconfig['handlers'].keys(): logconfig['handlers'][k]['level'] = logging.DEBUG for qualname in logging.Logger.manager.loggerDict.keys(): logger = logging.getLogger(qualname) logger.handlers = [] logger.propogate = False l[qualname] = maxdebug.copy() l[qualname]['qualname'] = qualname for qualname in l.keys(): l[qualname] = maxdebug.copy() l[qualname]['qualname'] = qualname # l['root']['level'] = logging.CRITICAL del l['irc'] logging.config.dictConfig(logconfig) for qualname in logging.Logger.manager.loggerDict.keys(): print('qualname: ' + qualname) global log log = logging.getLogger('gerritbot') log.propogate = False log.debug('Logging initialized for gerritbot') if foreground is False: pidfile = '/tmp/gerritbot.pid' if 'pid' in config['ircbot']: pidfile = config['ircbot']['pid'] log.debug('PID path: ' + pidfile) ensure_dir(pidfile) pid = pid_file_module.TimeoutPIDLockFile(pidfile, 10) log.debug('starting daemonized') with daemon.DaemonContext(pidfile=pid): _main(config) else: log.debug('starting in foreground') _main(config)
args = parser.parse_args() private_data_dir = args.private_data_dir pidfile = os.path.join(private_data_dir, 'pid') if args.command == 'start': # create a file to log stderr in case the daemonized process throws # an exception before it gets to `pexpect.spawn` stderr_path = os.path.join(private_data_dir, 'artifacts', 'daemon.log') if not os.path.exists(stderr_path): os.mknod(stderr_path, stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR) stderr = open(stderr_path, 'w+') import daemon from daemon.pidfile import TimeoutPIDLockFile context = daemon.DaemonContext(pidfile=TimeoutPIDLockFile(pidfile), stderr=stderr) with context: __run__(private_data_dir) sys.exit(0) try: with open(pidfile, 'r') as f: pid = int(f.readline()) except IOError: sys.exit(1) if args.command == 'stop': try: with open(os.path.join(private_data_dir, 'args'), 'r') as args: handle_termination(pid, json.load(args), 'bwrap') except IOError:
def main(): global handler_logger (opts, args) = parse_arguments(sys.argv[1:]) # Initialize logger lvl = logging.DEBUG if opts.debug else logging.INFO logger = logging.getLogger("ganeti.eventd") logger.setLevel(lvl) formatter = logging.Formatter( "%(asctime)s %(module)s[%(process)d] %(levelname)s: %(message)s", "%Y-%m-%d %H:%M:%S") handler = logging.FileHandler(opts.log_file) handler.setFormatter(formatter) logger.addHandler(handler) handler_logger = logger # Rename this process so 'ps' output looks like this is a native # executable. Can not seperate command-line arguments from actual name of # the executable by NUL bytes, so only show the name of the executable # instead. setproctitle.setproctitle("\x00".join(sys.argv)) setproctitle.setproctitle(sys.argv[0]) # Create pidfile pidf = daemon.pidlockfile.TimeoutPIDLockFile(opts.pid_file, 10) # Remove any stale PID files, left behind by previous invocations if daemon.runner.is_pidfile_stale(pidf): logger.warning("Removing stale PID lock file %s", pidf.path) pidf.break_lock() # Become a daemon: # Redirect stdout and stderr to handler.stream to catch # early errors in the daemonization process [e.g., pidfile creation] # which will otherwise go to /dev/null. daemon_context = daemon.DaemonContext(pidfile=pidf, umask=022, stdout=handler.stream, stderr=handler.stream, files_preserve=[handler.stream]) try: daemon_context.open() except (daemon.pidlockfile.AlreadyLocked, LockTimeout): logger.critical("Failed to lock pidfile %s, another instance running?", pidf.path) sys.exit(1) logger.info("Became a daemon") # Catch signals to ensure graceful shutdown signal(SIGINT, fatal_signal_handler) signal(SIGTERM, fatal_signal_handler) # Monitor the Ganeti job queue, create and push notifications wm = pyinotify.WatchManager() mask = (pyinotify.EventsCodes.ALL_FLAGS["IN_MOVED_TO"] | pyinotify.EventsCodes.ALL_FLAGS["IN_CLOSE_WRITE"]) cluster_name = find_cluster_name() handler = JobFileHandler(logger, cluster_name) notifier = pyinotify.Notifier(wm, handler) try: # Fail if adding the inotify() watch fails for any reason res = wm.add_watch(constants.QUEUE_DIR, mask) if res[constants.QUEUE_DIR] < 0: raise Exception("pyinotify add_watch returned negative descriptor") logger.info("Now watching %s of %s" % (constants.QUEUE_DIR, cluster_name)) while True: # loop forever # process the queue of events as explained above notifier.process_events() if notifier.check_events(): # read notified events and enqeue them notifier.read_events() except SystemExit: logger.info("SystemExit") except: logger.exception("Caught exception, terminating") finally: # destroy the inotify's instance on this interrupt (stop monitoring) notifier.stop() raise
" org.apache.phoenix.tracingwebapp.http.Main " + args if command == 'start': if not daemon_supported: print >> sys.stderr, "daemon mode not supported on this platform" sys.exit(-1) # run in the background d = os.path.dirname(out_file_path) if not os.path.exists(d): os.makedirs(d) with open(out_file_path, 'a+') as out: context = daemon.DaemonContext( pidfile=daemon.PidFile( pid_file_path, 'Trace Server already running, PID file found: %s' % pid_file_path), stdout=out, stderr=out, ) print 'starting Trace Server, logging to %s' % log_file_path with context: # this block is the main() for the forked daemon process child = None cmd = java_cmd % { 'java': java, 'root_logger': 'INFO,DRFA', 'log_dir': phoenix_log_dir, 'log_file': phoenix_log_file } # notify the child when we're killed
def main(): _parser = argparse.ArgumentParser() _parser.add_argument("-d", dest="daemonize", default=False, action="store_true", help="daemonize process [%(default)s]") _parser.add_argument("--progname", default="", type=str, help="programm name for sys.argv [%(default)s]") _parser.add_argument("--modname", default="", type=str, help="python module to load [%(default)s]") _parser.add_argument("--main-name", default="main", type=str, help="name of main function [%(default)s]") _parser.add_argument("--exename", default="", type=str, help="exe to start [%(default)s]") _parser.add_argument("--proctitle", default="", type=str, help="process title to set [%(default)s]") _parser.add_argument("--user", type=str, default="root", help="user to use for the process [%(default)s]") _parser.add_argument("--group", type=str, default="root", help="group to use for the process [%(default)s]") _parser.add_argument("--groups", type=str, default="", help="comma-separated list of groups for the process [%(default)s]") _parser.add_argument("--nice", type=int, default=0, help="set nice level of new process [%(default)d]") _parser.add_argument("--debug", default=False, action="store_true", help="enable debug mode (modify sys.path), [%(default)s]") _parser.add_argument("extra_args", nargs="*", help="extra arguments for module [%(default)s]") opts = _parser.parse_args() if opts.exename: _mode = "exe" _args = [opts.exename] else: _mode = "python" _args = [opts.progname] if opts.user != "root": uid = get_uid_from_name(opts.user)[0] else: uid = 0 if opts.group != "root": gid = get_gid_from_name(opts.group)[0] else: gid = 0 if opts.groups.strip(): gids = [get_gid_from_name(_gid)[0] for _gid in opts.groups.strip().split(",")] else: gids = [] _daemon_context = daemon.DaemonContext( detach_process=True, uid=uid, gid=gid, # gids=gids, # valid with python-daemonize-2.1.2 # init_groups=False ) if opts.nice: os.nice(opts.nice) if opts.daemonize: try: _daemon_context.open() except: # catastrophe from initat.tools import logging_tools, process_tools for _line in process_tools.icswExceptionInfo().log_lines: logging_tools.my_syslog(_line, logging_tools.LOG_LEVEL_ERROR) else: if gids: os.setgroups(gids) if uid or gid: os.setgid(gid) os.setuid(uid) if opts.extra_args: _args.extend(opts.extra_args) if _mode == "python": os.environ["LC_LANG"] = "en_us.UTF_8" # python path if opts.debug: # check via commandline args, do NOT import anything below init.at here abs_path = os.path.dirname(__file__) abs_path = os.path.split(os.path.split(abs_path)[0])[0] sys.path.insert(0, abs_path) sys.argv = _args setproctitle.setproctitle(opts.proctitle) main_module = importlib.import_module(opts.modname) if opts.daemonize: # redirect IO-streams from initat.logging_server.constants import icswLogHandleTypes from initat.tools.io_stream_helper import icswIOStream sys.stdout = icswIOStream(icswLogHandleTypes.log_py) sys.stderr = icswIOStream(icswLogHandleTypes.err_py) getattr(main_module, opts.main_name)() # was: main_module.main() else: # path for standard exe (munge, redis) setproctitle.setproctitle(opts.proctitle) os.execv(_args[0], _args)
def execute(self, *args, **options): """ Takes the options and starts a daemon context from them. Example:: python manage.py linkconsumer --pidfile=/var/run/cb_link.pid --stdout=/var/log/cb/links.out --stderr=/var/log/cb/links.err """ # print 20130610, 'execute', __file__ # print options if daemon is not None: context = daemon.DaemonContext() context.chroot_directory = self.get_option_value( options, 'chroot_directory') context.working_directory = self.get_option_value( options, 'working_directory', '/') context.umask = self.get_option_value(options, 'umask', 0) context.detach_process = self.get_option_value( options, 'detach_process') context.prevent_core = self.get_option_value( options, 'prevent_core', True) if self.preserve_loggers: context.files_preserve = get_logger_files( self.preserve_loggers) # Get file objects # stdin = self.get_option_value(options, 'stdin') stdin = options.pop('stdin', None) if stdin is not None: options['stdin'] = context.stdin = open(stdin, "r") # stdout = self.get_option_value(options, 'stdout') stdout = options.pop('stdout', None) if stdout is not None: options['stdout'] = context.stdout = open(stdout, "a+") # stderr = self.get_option_value(options, 'stderr') stderr = options.pop('stderr', None) if stderr is not None: self.stderr = options['stderr'] = context.stderr = open( stderr, "a+") # self.stderr is needed in case there is an exception # during execute. Django then would try to write to # sys.stderr which is None because we are a daemon # Make pid lock file pidfile = self.get_option_value(options, 'pidfile') if pidfile is not None: # context.pidfile=pidlockfile.PIDLockFile(pidfile) context.pidfile = pidlockfile.TimeoutPIDLockFile(pidfile, 0) uid = self.get_option_value(options, 'uid') if uid is not None: context.uid = int(uid) gid = self.get_option_value(options, 'gid') if gid is not None: context.gid = int(gid) context.open() # Django 1.5.1 needs them: # for k in ('stdout','stderr'): # options[k] = getattr(context,k,None) # self.handle_daemon(*args, **options) BaseCommand.execute(self, *args, **options)
def run(self): """Startup the processing and go!""" self.terminate = threading.Event() atexit.register(self.shutdown) self.context = daemon.DaemonContext(detach_process=True) iter = 0 if Bcfg2.Options.setup.daemon: self.logger.debug("Daemonizing") self.context.pidfile = TimeoutPIDLockFile( Bcfg2.Options.setup.daemon, acquire_timeout=5) # Attempt to ensure lockfile is able to be created and not stale try: self.context.pidfile.acquire() except LockFailed: self.logger.error("Failed to daemonize: %s" % sys.exc_info()[1]) self.shutdown() return except LockTimeout: try: # attempt to break the lock os.kill(self.context.pidfile.read_pid(), 0) except (OSError, TypeError): # No process with locked PID self.context.pidfile.break_lock() else: self.logger.error("Failed to daemonize: " "Failed to acquire lock on %s" % Bcfg2.Options.setup.daemon) self.shutdown() return else: self.context.pidfile.release() self.context.open() self.logger.info("Starting daemon") self.transport.start_monitor(self) while not self.terminate.isSet(): try: interaction = self.transport.fetch() if not interaction: continue if self.semaphore: self.semaphore.acquire() store_thread = ReportingStoreThread(interaction, self.storage, semaphore=self.semaphore) store_thread.start() self.children.append(store_thread) iter += 1 if iter >= self.cleanup_threshold: self.reap_children() iter = 0 except (SystemExit, KeyboardInterrupt): self.logger.info("Shutting down") self.shutdown() except: self.logger.error("Unhandled exception in main loop %s" % sys.exc_info()[1])
def main(): parser = argparse.ArgumentParser(description='manage ansible execution') parser.add_argument('--version', action='version', version=VERSION) parser.add_argument('command', choices=['run', 'start', 'stop', 'is-alive']) parser.add_argument('private_data_dir', help='Base directory containing Runner metadata (project, inventory, etc') group = parser.add_mutually_exclusive_group() group.add_argument("-p", "--playbook", default=DEFAULT_RUNNER_PLAYBOOK, help="The name of the playbook to execute") group.add_argument("-r", "--role", default=DEFAULT_RUNNER_ROLE, help="Invoke an Ansible role directly without a playbook") parser.add_argument("--hosts", help="Define the set of hosts to execute against") parser.add_argument("-i", "--ident", default=uuid4(), help="An identifier that will be used when generating the" "artifacts directory and can be used to uniquely identify a playbook run") parser.add_argument("--rotate-artifacts", default=0, type=int, help="Automatically clean up old artifact directories after a given number has been created, the default is 0 which disables rotation") parser.add_argument("--roles-path", default=DEFAULT_ROLES_PATH, help="Path to the Ansible roles directory") parser.add_argument("--role-vars", help="Variables to pass to the role at runtime") parser.add_argument("--role-skip-facts", action="store_true", default=False, help="Disable fact collection when executing a role directly") parser.add_argument("--artifact-dir", help="Optional Path for the artifact root directory, by default it is located inside the private data dir") parser.add_argument("--inventory", help="Override the default inventory location in private_data_dir") parser.add_argument("-j", "--json", action="store_true", help="Output the json event structure to stdout instead of Ansible output") parser.add_argument("-v", action="count", help="Increase the verbosity with multiple v's (up to 5) of the ansible-playbook output") parser.add_argument("-q", "--quiet", action="store_true", help="Disable all output") parser.add_argument("--cmdline", help="Command line options to pass to ansible-playbook at execution time") parser.add_argument("--debug", action="store_true", help="Enable Runner debug output logging") parser.add_argument("--logfile", help="Log output messages to a file") args = parser.parse_args() output.configure() # enable or disable debug mode output.set_debug('enable' if args.debug else 'disable') # set the output logfile if args.logfile: output.set_logfile(args.logfile) output.debug('starting debug logging') pidfile = os.path.join(args.private_data_dir, 'pid') try: os.makedirs(args.private_data_dir) except OSError as exc: if exc.errno == errno.EEXIST and os.path.isdir(args.private_data_dir): pass else: raise if args.command != 'run': stderr_path = os.path.join(args.private_data_dir, 'daemon.log') if not os.path.exists(stderr_path): os.close(os.open(stderr_path, os.O_CREAT, stat.S_IRUSR | stat.S_IWUSR)) stderr = open(stderr_path, 'w+') if args.command in ('start', 'run'): if args.role: role = {'name': args.role} if args.role_vars: role_vars = {} for item in shlex.split(args.role_vars): key, value = item.split('=') role_vars[key] = value role['vars'] = role_vars kwargs = dict(private_data_dir=args.private_data_dir, json_mode=args.json, ignore_logging=False, rotate_artifacts=args.rotate_artifacts) if args.artifact_dir: kwargs['artifact_dir'] = args.artifact_dir project_path = os.path.abspath(os.path.join(args.private_data_dir, 'project')) project_exists = os.path.exists(project_path) env_path = os.path.join(args.private_data_dir, 'env') env_exists = os.path.exists(env_path) envvars_path = os.path.join(args.private_data_dir, 'env/envvars') envvars_exists = os.path.exists(envvars_path) if args.cmdline: kwargs['cmdline'] = args.cmdline playbook = None tmpvars = None rc = 255 errmsg = None try: play = [{'hosts': args.hosts if args.hosts is not None else "all", 'gather_facts': not args.role_skip_facts, 'roles': [role]}] filename = str(uuid4().hex) playbook = dump_artifact(json.dumps(play), project_path, filename) kwargs['playbook'] = playbook output.debug('using playbook file %s' % playbook) if args.inventory: inventory_file = os.path.abspath(os.path.join(args.private_data_dir, 'inventory', args.inventory)) if not os.path.exists(inventory_file): raise AnsibleRunnerException('location specified by --inventory does not exist') kwargs['inventory'] = inventory_file output.debug('using inventory file %s' % inventory_file) roles_path = args.roles_path or os.path.join(args.private_data_dir, 'roles') roles_path = os.path.abspath(roles_path) output.debug('setting ANSIBLE_ROLES_PATH to %s' % roles_path) envvars = {} if envvars_exists: with open(envvars_path, 'rb') as f: tmpvars = f.read() envvars = safe_load(tmpvars) envvars['ANSIBLE_ROLES_PATH'] = roles_path kwargs['envvars'] = envvars res = run(**kwargs) rc = res.rc except AnsibleRunnerException as exc: errmsg = str(exc) finally: if not project_exists and os.path.exists(project_path): logger.debug('removing dynamically generated project folder') shutil.rmtree(project_path) elif playbook and os.path.isfile(playbook): logger.debug('removing dynamically generated playbook') os.remove(playbook) # if a previous envvars existed in the private_data_dir, # restore the original file contents if tmpvars: with open(envvars_path, 'wb') as f: f.write(tmpvars) elif not envvars_exists and os.path.exists(envvars_path): logger.debug('removing dynamically generated envvars folder') os.remove(envvars_path) # since ansible-runner created the env folder, remove it if not env_exists and os.path.exists(env_path): logger.debug('removing dynamically generated env folder') shutil.rmtree(env_path) if errmsg: print('ansible-runner: ERROR: %s' % errmsg) sys.exit(rc) elif args.command == 'start': import daemon from daemon.pidfile import TimeoutPIDLockFile context = daemon.DaemonContext( pidfile=TimeoutPIDLockFile(pidfile), stderr=stderr ) else: context = threading.Lock() with context: run_options = dict(private_data_dir=args.private_data_dir, ident=args.ident, playbook=args.playbook, verbosity=args.v, quiet=args.quiet, rotate_artifacts=args.rotate_artifacts, ignore_logging=False, json_mode=args.json) if args.hosts is not None: run_options.update(inventory=args.hosts) if args.cmdline: run_options['cmdline'] = args.cmdline if args.artifact_dir: kwargs['artifact_dir'] = args.artifact_dir res = run(**run_options) sys.exit(res.rc) try: with open(pidfile, 'r') as f: pid = int(f.readline()) except IOError: sys.exit(1) if args.command == 'stop': try: with open(os.path.join(args.private_data_dir, 'args'), 'r') as args: Runner.handle_termination(pid, json.load(args), 'bwrap') except IOError: Runner.handle_termination(pid, [], 'bwrap') elif args.command == 'is-alive': try: os.kill(pid, signal.SIG_DFL) sys.exit(0) except OSError: sys.exit(1)
def main(device_type): """Run ssh-agent using given hardware client factory.""" args = create_agent_parser(device_type=device_type).parse_args() util.setup_logging(verbosity=args.verbose, filename=args.log_file) public_keys = None filename = None if args.identity.startswith('/'): filename = args.identity contents = open(filename, 'rb').read().decode('utf-8') # Allow loading previously exported SSH public keys if filename.endswith('.pub'): public_keys = list(import_public_keys(contents)) identities = list(parse_config(contents)) else: identities = [ device.interface.Identity(identity_str=args.identity, curve_name=args.ecdsa_curve_name) ] for index, identity in enumerate(identities): identity.identity_dict['proto'] = 'ssh' log.info('identity #%d: %s', index, identity.to_string()) # override default PIN/passphrase entry tools (relevant for TREZOR/Keepkey): device_type.ui = device.ui.UI(device_type=device_type, config=vars(args)) conn = JustInTimeConnection( conn_factory=lambda: client.Client(device_type()), identities=identities, public_keys=public_keys) sock_path = _get_sock_path(args) command = args.command context = _dummy_context() if args.connect: command = ['ssh'] + ssh_args(conn) + args.command elif args.mosh: command = ['mosh'] + mosh_args(conn) + args.command elif args.daemonize: out = 'SSH_AUTH_SOCK={0}; export SSH_AUTH_SOCK;\n'.format(sock_path) sys.stdout.write(out) sys.stdout.flush() context = daemon.DaemonContext() log.info('running the agent as a daemon on %s', sock_path) elif args.foreground: log.info('running the agent on %s', sock_path) use_shell = bool(args.shell) if use_shell: command = os.environ['SHELL'] sys.stdin.close() if command or args.daemonize or args.foreground: with context: return run_server(conn=conn, command=command, sock_path=sock_path, debug=args.debug, timeout=args.timeout) else: for pk in conn.public_keys(): sys.stdout.write(pk) return 0 # success exit code
if newpid == 0: print("Mining monitor started.") sys.exit(0) try: os.setsid( ) # if 'fork failed: 1 (Operation not permitted)', it's just due to python's except OSError as ex: newpid = newpid # exception when you are already the group-leader ... os.umask(0) ### Ref: https://dpbl.wordpress.com/2017/02/12/a-tutorial-on-python-daemon/ with daemon.DaemonContext(chroot_directory=None, working_directory='/var/mining', files_preserve=[logFH.stream], signal_map={ signal.SIGTERM: shutdown, signal.SIGTSTP: shutdown }, pidfile=lockfile.LockFile(config.PIDFILE)): pid = os.getpid() with open(config.PIDFILE, 'w') as fh: fh.write(str(pid)) schedule.every(5).minutes.do(monitor_miners) schedule.every().day.at("4:30").do(cap_diff) logger.info("Miners Monitor started, pid=%i", pid) while True: schedule.run_pending() time.sleep(1)
def start_daemon(): import daemon with daemon.DaemonContext(): watch()
# the current next_val previous_values[pool.name] = next_val.copy() with lock: zpoolio_one = rv.copy() zpoolio_all = previous_values.copy() if __name__ == '__main__': pidfile = PidFile(PIDFILE) context = daemon.DaemonContext( working_directory='/root', umask=0o002, pidfile=pidfile, stdout=sys.stdout, stdin=sys.stdin, stderr=sys.stderr, detach_process=True, signal_map={signal.SIGTERM: cust_terminate}, ) with context: setproctitle('freenas-snmpd') loop_thread = Loop_Sockserver() loop_thread.start() z1 = zilOneWorker() z5 = zilFiveWorker() z10 = zilTenWorker() zio = zpoolioWorker() z1.start() z5.start()
GPIO.output(self.in1_right, GPIO.LOW) GPIO.output(self.in2_right, GPIO.HIGH) self.p_left.ChangeDutyCycle(75) self.p_right.ChangeDutyCycle(75) def joystick(self, right, left): if right >= 0: GPIO.output(self.in1_right, GPIO.LOW) GPIO.output(self.in2_right, GPIO.HIGH) else: GPIO.output(self.in1_right, GPIO.HIGH) GPIO.output(self.in1_right, GPIO.LOW) if left >= 0: GPIO.output(self.in1_left, GPIO.LOW) GPIO.output(self.in2_left, GPIO.HIGH) else: GPIO.output(self.in1_left, GPIO.HIGH) GPIO.output(self.in2_left, GPIO.LOW) self.p_left.ChangeDutyCycle(abs(left)) self.p_right.ChangeDutyCycle(abs(right)) def exit(self): GPIO.cleanup() print("GPIO Clean up") speedy = Speedy() if __name__ == '__main__': with daemon.DaemonContext(): speedy.run()
print >> sys.stderr, "daemon mode not supported on this platform" sys.exit(-1) if filename == '': print >> sys.stderr, "Need config file as input" sys.exit(-1) # run in the background d = os.path.dirname(out_file_path) if not os.path.exists(d): os.makedirs(d) with open(out_file_path, 'a+') as out: context = daemon.DaemonContext( pidfile=pidfile.PIDLockFile(pid_file_path), stdout=out, stderr=out, ) with context: # this block is the main() for the forked daemon process child = None cmd = java_cmd % {'java': java} print >> sys.stderr, cmd # notify the child when we're killed def handler(signum, frame): if child: child.send_signal(signum) sys.exit(0) signal.signal(signal.SIGTERM, handler)
if __name__ == '__main__': parser = argparse.ArgumentParser(description="YOGA") parser.add_argument('--pid', default='/var/run/yoga.pid') parser.add_argument('--log', default='/var/log/yoga/') parser.add_argument('--port', default=5005) parser.add_argument('--host', default='0.0.0.0') parser.add_argument('--debug', default=False) parser.add_argument('action', default="foreground") args = parser.parse_args() if args.action == "start": with daemon.DaemonContext( working_directory=".", stdout=open(os.path.join(args.log, "out.log"), "w"), stderr=open(os.path.join(args.log, "err.log"), "w"), pidfile=pidfile.TimeoutPIDLockFile(args.pid)): app.debug = args.debug app.run(host=args.host, port=args.port) elif args.action == "status": if os.path.isfile(args.pid): with open(args.pid, "r") as f: pid = int(f.read()) p = psutil.Process(pid) status = "running (pid: %i, name: %s, cmd: %s)" % ( pid, p.name(), p.cmdline()) else: status = "not running (reason: PID file not found!)" print("status: %s" % (status))
def main(): global SETUID_USER, SETGID_GROUP options = parse_args() SETUID_USER = options.user SETGID_GROUP = options.group root = desktop.lib.paths.get_run_root() log_dir = os.path.join(root, options.log_dir) if options.show_supervisees: for name, supervisee in get_supervisees().iteritems(): if name not in options.supervisee_exclusions: print(name) sys.exit(0) # Let our children know os.environ['DESKTOP_LOG_DIR'] = log_dir if not os.path.exists(log_dir): os.makedirs(log_dir) setup_user_info() pid_file = os.path.abspath(os.path.join(root, options.pid_file)) pidfile_context = TimeOutPIDLockFile(pid_file, LOCKFILE_TIMEOUT) existing_pid = pidfile_context.read_pid() if existing_pid: cmdline = get_pid_cmdline(existing_pid) if not cmdline.strip(): # pid is not actually running pidfile_context.break_lock() else: LOG.error( "Pid file %s indicates that Hue is already running (pid %d)" % (pid_file, existing_pid)) sys.exit(1) elif pidfile_context.is_locked(): # If there's no pidfile but there is a lock, it's a strange situation, # but we should break the lock because it doesn't seem to be actually running logging.warn("No existing pid file, but lock exists. Breaking lock.") pidfile_context.break_lock() if options.daemonize: outfile = file(os.path.join(log_dir, 'supervisor.out'), 'a+', 0) context = daemon.DaemonContext( working_directory=root, pidfile=pidfile_context, stdout=outfile, stderr=outfile, ) context.signal_map = { signal.SIGTERM: sig_handler, } context.open() os.umask(022) # Log initialization must come after daemonization, which closes all open files. # Log statements before this point goes to stderr. _init_log(log_dir) sups = [] try: for name, supervisee in get_supervisees().iteritems(): if name in options.supervisee_exclusions: continue if supervisee.drop_root: preexec_fn = drop_privileges else: preexec_fn = None if options.daemonize: log_stdout = file(os.path.join(log_dir, name + '.out'), 'a+', 0) log_stderr = log_stdout else: # Passing None to subprocess.Popen later makes the subprocess inherit the # standard fds from the supervisor log_stdout = None log_stderr = None sup = Supervisor(supervisee.cmdv, stdout=log_stdout, stderr=log_stderr, preexec_fn=preexec_fn) sup.start() sups.append(sup) wait_loop(sups, options) except MyBaseException, ex: LOG.exception("Exception in supervisor main loop") shutdown(sups) # shutdown() exits the process
# exit main routine and program if __name__ == '__main__': parser = argparse.ArgumentParser( description="Split sFlow data based on destination AS number") parser.add_argument("-c", "--configfile", help="Configuration file") parser.add_argument("-d", "--nodaemon", default=False, action='store_true', help="Do not enter daemon mode") parser.add_argument("-v", "--verbose", default=False, help="Show action of playlist player") # (options, args) = parser.parse_args() options = parser.parse_args() if options.configfile != None: config['configfile'] = options.configfile cfg = read_config(config, config['configfile'], 'common') fileout = open(cfg['outfile'], "w") if not options.nodaemon: with daemon.DaemonContext(stderr=fileout, stdout=fileout): mainroutine() fileout.close() else: mainroutine()
def main(): """ Runs MQTT loop to publish LifeSOS states. """ # Display application header print("{} v{} - {}\n".format(PROJECT_NAME, PROJECT_VERSION, PROJECT_DESCRIPTION)) # Parse command line arguments parser = argparse.ArgumentParser(prog="{}".format(PROJECT_NAME)) group = parser.add_mutually_exclusive_group() group.add_argument('-e', '--devices', help="list devices enrolled on base unit and exit", action='store_true') parser.add_argument('-v', '--verbose', help="display all logging output.", action='store_true') group.add_argument( '-d', '--daemon', help="put the application into the background after starting", action='store_true') parser.add_argument( '-w', '--workdir', help="work directory used to store config, log and pid files " "(default: %(default)s)", default=DEFAULT_WORKDIR) parser.add_argument('-c', '--configfile', help="configuration file name (default: %(default)s)", default=DEFAULT_CONFIGFILE) parser.add_argument( '-l', '--logfile', help="if specified, will write to a daily rolling log file " "(default: %(default)s)", nargs='?', const=DEFAULT_LOGFILE) parser.add_argument( '-p', '--pidfile', help="if specified, file will be create to record the process ID and " "is used for locking (default: %(default)s)", nargs='?', const=DEFAULT_PIDFILE) args = parser.parse_args() # Change to the work directory; create if needed workdir = os.path.expanduser(os.path.expandvars(args.workdir)) if not os.path.isdir(workdir): os.mkdir(workdir) os.chdir(workdir) # Create log handlers; attach daily rolling log file handler if needed handlers = [logging.StreamHandler()] logfile_handler = None if args.logfile: logfile = os.path.expanduser(os.path.expandvars(args.logfile)) logfile_handler = logging.handlers.TimedRotatingFileHandler( logfile, when='midnight', backupCount=5) handlers.append(logfile_handler) # Configure logger format, and set Debug level if verbose arg specified logging.basicConfig( format= "%(asctime)s %(levelname)-5s (%(threadName)s) [%(name)s] %(message)s", datefmt='%Y-%m-%d %H:%M:%S', level=None if not args.verbose else logging.DEBUG, handlers=handlers) # The python-daemin library isn't compatible with Windows if args.daemon and os.name == 'nt': _LOGGER.error("Daemon is not supported on Windows") sys.exit(EX_CONFIG) # Load the configuration file, or create default if none exists config = Config.load(args.configfile) if not config: sys.exit(EX_CONFIG) elif config.is_default: print("\nA default configuration file has been created:\n{}\n\n" "Please edit any settings as needed then restart.".format( os.path.abspath(args.configfile))) sys.exit(EX_CONFIG) # Apply the config settings to logger system _apply_logger_config(config, args) # List devices only and exit if args.devices: _list_devices(config) sys.exit(EX_OK) # Run the translator if args.daemon: import daemon from daemon.pidfile import TimeoutPIDLockFile print("Starting daemon.") with daemon.DaemonContext( working_directory=workdir, stderr=None if not args.verbose else sys.stderr, pidfile=None if not args.pidfile else \ TimeoutPIDLockFile(args.pidfile, acquire_timeout=-1), files_preserve=[None if not logfile_handler else \ logfile_handler.stream.fileno()], ): _run_translator(config) else: _run_translator(config)
def run(self, handle_opts=True): if handle_opts: opts = self.handle_opts() errors = self.config.merge_opts(opts) if errors: if errors: self.die(errors) if opts.gen_config: self.config.write() sys.exit(0) if opts.config_path: errors = self.config.read_file(opts.config_path) if errors: self.die(errors) (uid, gid) = self.get_uid_gid() self.uid = uid self.gid = gid if self.config.logging_method == 'file': self._check_create_log_file(uid, gid) try: self.log_file = open(self.config.log_file, 'a') self.on_stop(lambda: self.log_file.flush()) self.on_stop(lambda: self.log_file.close()) self.on_hup(lambda: self._reopen_log_files()) except IOError: self.die("Could not access log file %s" % self.config.log_file) self._log_fmt = '\t'.join( ('fakemtpd', '%(asctime)s', socket.gethostname(), '%(process)s', '%(name)s', '%(levelname)s', '%(message)s')) else: self.log_file = None if self.config.logging_method == 'syslog': self._log_fmt = 'fakemtpd[%(process)d]: %(message)s' else: self._log_fmt = '\t'.join( ('fakemtpd', '%(asctime)s', socket.gethostname(), '%(process)s', '%(name)s', '%(levelname)s', '%(message)s')) if self.config.pid_file: pidfile = BetterLockfile(os.path.realpath(self.config.pid_file)) try: pidfile.acquire() except lockfile.AlreadyLocked: self.die("%s is already locked; another instance running?" % self.config.pid_file) pidfile.release() else: pidfile = None # Do this before daemonizing so that the user can see any errors # that may occur sock = self.bind() self.config.merge_sock(sock) if self.config.daemonize: d = daemon.DaemonContext( files_preserve=[pidfile.file, self.log_file, sock], pidfile=pidfile, stdout=self.log_file, stderr=self.log_file) self.on_stop_user(d.close) d.open() elif self.config.log_file: os.dup2(self.log_file.fileno(), sys.stdout.fileno()) os.dup2(self.log_file.fileno(), sys.stderr.fileno()) if self.config.pid_file: self.on_stop_user(pidfile.destroy) signal.signal(signal.SIGINT, lambda signum, frame: self._signal_stop()) signal.signal(signal.SIGTERM, lambda signum, frame: self._signal_stop()) signal.signal(signal.SIGHUP, lambda signum, frame: self._signal_hup()) # This needs to happen after daemonization self._setup_logging() logging.info("Bound on port %d", self.config.port) if pidfile: print >> pidfile.file, os.getpid() pidfile.file.flush() io_loop = self.create_loop(sock) self.maybe_drop_privs() self.on_stop(io_loop.stop) logging.getLogger().handlers[0].flush() self._start(io_loop)
def run(): with daemon.DaemonContext(): do_something()