def main(): add_common_opts() CONF(project='libra', version=__version__) logging.setup('libra') LOG.debug('Configuration:') CONF.log_opt_values(LOG, std_logging.DEBUG) server = Server() if not CONF['daemon']: server.main() else: pidfile = daemon.pidfile.TimeoutPIDLockFile(CONF['mgm']['pid'], 10) if daemon.runner.is_pidfile_stale(pidfile): pidfile.break_lock() descriptors = get_descriptors() context = daemon.DaemonContext( working_directory='/', umask=0o022, pidfile=pidfile, files_preserve=descriptors ) if CONF['user']: context.uid = pwd.getpwnam(CONF['user']).pw_uid if CONF['group']: context.gid = grp.getgrnam(CONF['group']).gr_gid context.open() server.main() return 0
def main(): add_common_opts() CONF(project='libra', version=__version__) logging.setup('libra') LOG.debug('Configuration:') CONF.log_opt_values(LOG, std_logging.DEBUG) server = Server() if not CONF['daemon']: server.main() else: pidfile = daemon.pidfile.TimeoutPIDLockFile(CONF['mgm']['pid'], 10) if daemon.runner.is_pidfile_stale(pidfile): pidfile.break_lock() descriptors = get_descriptors() context = daemon.DaemonContext(working_directory='/', umask=0o022, pidfile=pidfile, files_preserve=descriptors) if CONF['user']: context.uid = pwd.getpwnam(CONF['user']).pw_uid if CONF['group']: context.gid = grp.getgrnam(CONF['group']).gr_gid context.open() server.main() return 0
def run(sleep_interval): pidfile = daemon.pidfile.PIDLockFile(path="/tmp/scheduler_daemon.pid") if pidfile.is_locked(): print "Existing lock file found" try: os.kill(pidfile.read_pid(), signal.SIG_DFL) print "An instance of scheduler daemon is already running. If you wish to restart, use the 'restart' " \ "command" return except: pidfile.break_lock() now_seconds = str(time.time()) stdout = open("/tmp/scheduler_daemon_%s.log" % now_seconds, "w+") stderr = open("/tmp/scheduler_daemon_error_%s.log" % now_seconds, "w+") print "Running scheduler daemon with refresh interval of %s seconds" % sleep_interval daemon_context = daemon.DaemonContext(stdout=stdout, stderr=stderr, detach_process=True, pidfile=pidfile, working_directory=os.getcwd()) with daemon_context: do_work(sleep_interval)
def _run_swarm_manager(args): containers_config_file = _containers_config_file(args) containers_conf = parse_containers_config(containers_config_file) container_conf = _container_conf(containers_conf, args.swarm) swarm_manager_conf = _swarm_manager_conf(container_conf) _configure_logging(args, swarm_manager_conf) docker_interface = build_container_interfaces(containers_config_file, containers_conf=containers_conf)[args.swarm] pidfile = _swarm_manager_pidfile(swarm_manager_conf) if not args.foreground: _swarm_manager_daemon(pidfile, swarm_manager_conf['log_file'], swarm_manager_conf, docker_interface) else: if swarm_manager_conf['terminate_when_idle']: log.info('running in the foreground, disabling automatic swarm manager termination') swarm_manager_conf['terminate_when_idle'] = False else: log.info("running in the foreground") try: pidfile.acquire() except lockfile.AlreadyLocked: pid = pidfile.read_pid() try: os.kill(pid, 0) log.warning("swarm manager is already running in pid %s", pid) return except OSError: log.warning("removing stale lockfile: %s", pidfile.path) pidfile.break_lock() pidfile.acquire() try: _swarm_manager(swarm_manager_conf, docker_interface) finally: pidfile.release()
def start_daemon(): # 전역변수로 처리 필요 config = configparser.ConfigParser() config.read('./config.cfg') cfg_server = config['SERVER_INFO'] cfg_default = config['DEFAULT_INFO'] # make logger instance logger = logging.getLogger("DA_daemonLog") # make formatter formatter = logging.Formatter( '[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s > %(message)s') # make handler to output Log for stream and file fileMaxByte = 1024 * 1024 * 100 #100MB fileHandler = logging.handlers.RotatingFileHandler( cfg_default['test_path'], maxBytes=fileMaxByte, backupCount=10) # fileHandler = logging.FileHandler(cfg_default['logging_path']) streamHandler = logging.StreamHandler() # specify formatter to each handler fileHandler.setFormatter(formatter) streamHandler.setFormatter(formatter) # attach stream and file handler to logger instance logger.addHandler(fileHandler) logger.addHandler(streamHandler) pidfile = PIDLockFile(cfg_default['test_pid_path']) try: pidfile.acquire() except AlreadyLocked: try: os.kill(pidfile.read_pid(), 0) print('Process already running!') exit(1) except OSError: #No process with locked PID pidfile.break_lock() daemon_context = daemon.DaemonContext( working_directory='/home/Toven/da/elda', umask=0o002, pidfile=PIDLockFile('/home/Toven/da/elda_daemon.pid'), ) print("Start daemon for EyeLink in python") with daemon_context: while True: logger.setLevel(logging.INFO) logger.info("==========================") logger.debug("Debug message") logger.info("Info message") logger.warn("Warning message") logger.error("Error message") logger.critical("critical debug message") logger.info("==========================")
def main(): add_common_opts() CONF(project='libra', version=__version__) logging.setup('libra') LOG.debug('Configuration:') CONF.log_opt_values(LOG, std_logging.DEBUG) drivers = [] pc = get_pecan_config() sock = server.make_socket(CONF['admin_api']['host'], CONF['admin_api']['port'], CONF['admin_api']['ssl_keyfile'], CONF['admin_api']['ssl_certfile']) if CONF['daemon']: pidfile = daemon.pidfile.TimeoutPIDLockFile(CONF['admin_api']['pid'], 10) if daemon.runner.is_pidfile_stale(pidfile): pidfile.break_lock() descriptors = get_descriptors() descriptors.append(sock.fileno()) context = daemon.DaemonContext( working_directory='/', umask=0o022, pidfile=pidfile, files_preserve=descriptors ) if CONF['user']: context.uid = pwd.getpwnam(CONF['user']).pw_uid if CONF['group']: context.gid = grp.getgrnam(CONF['group']).gr_gid context.open() try: check_gearman_ssl_files() except Exception as e: LOG.critical(str(e)) return # Use the root logger due to lots of services using logger LOG.info('Starting on %s:%d', CONF.admin_api.host, CONF.admin_api.port) api = setup_app(pc) for driver in CONF['admin_api']['stats_driver']: drivers.append(importutils.import_class(known_drivers[driver])) MaintThreads(drivers) sys.stderr = LogStdout() wsgi.server(sock, api, keepalive=False) return 0
def main(): add_common_opts() CONF(project='libra', version=__version__) logging.setup('libra') LOG.debug('Configuration:') CONF.log_opt_values(LOG, std_logging.DEBUG) drivers = [] pc = get_pecan_config() sock = server.make_socket(CONF['admin_api']['host'], CONF['admin_api']['port'], CONF['admin_api']['ssl_keyfile'], CONF['admin_api']['ssl_certfile']) if CONF['daemon']: pidfile = daemon.pidfile.TimeoutPIDLockFile(CONF['admin_api']['pid'], 10) if daemon.runner.is_pidfile_stale(pidfile): pidfile.break_lock() descriptors = get_descriptors() descriptors.append(sock.fileno()) context = daemon.DaemonContext(working_directory='/', umask=0o022, pidfile=pidfile, files_preserve=descriptors) if CONF['user']: context.uid = pwd.getpwnam(CONF['user']).pw_uid if CONF['group']: context.gid = grp.getgrnam(CONF['group']).gr_gid context.open() try: check_gearman_ssl_files() except Exception as e: LOG.critical(str(e)) return # Use the root logger due to lots of services using logger LOG.info('Starting on %s:%d', CONF.admin_api.host, CONF.admin_api.port) api = setup_app(pc) for driver in CONF['admin_api']['stats_driver']: drivers.append(importutils.import_class(known_drivers[driver])) MaintThreads(drivers) sys.stderr = LogStdout() wsgi.server(sock, api, keepalive=False) return 0
def main(): add_common_opts() CONF(project='libra', version=__version__) logging.setup('libra') LOG.debug('Configuration:') CONF.log_opt_values(LOG, std_logging.DEBUG) pc = get_pecan_config() # NOTE: Let's not force anyone to actually have to use SSL, it shouldn't be # up to us to decide. sock = server.make_socket(CONF['api']['host'], CONF['api']['port'], CONF['api']['ssl_keyfile'], CONF['api']['ssl_certfile']) if CONF['daemon']: pidfile = daemon.pidfile.TimeoutPIDLockFile(CONF['api']['pid'], 10) if daemon.runner.is_pidfile_stale(pidfile): pidfile.break_lock() descriptors = get_descriptors() descriptors.append(sock.fileno()) context = daemon.DaemonContext( working_directory='/', umask=0o022, pidfile=pidfile, files_preserve=descriptors ) if CONF['user']: context.uid = pwd.getpwnam(CONF['user']).pw_uid if CONF['group']: context.gid = grp.getgrnam(CONF['group']).gr_gid context.open() try: check_gearman_ssl_files() except Exception as e: LOG.critical(str(e)) return LOG.info('Starting on %s:%d', CONF.api.host, CONF.api.port) api = setup_app(pc) sys.stderr = LogStdout() wsgi.server(sock, api, keepalive=False, debug=CONF['debug']) return 0
def main(): add_common_opts() CONF(project='libra', version=__version__) logging.setup('libra') LOG.debug('Configuration:') CONF.log_opt_values(LOG, std_logging.DEBUG) pc = get_pecan_config() # NOTE: Let's not force anyone to actually have to use SSL, it shouldn't be # up to us to decide. sock = server.make_socket(CONF['api']['host'], CONF['api']['port'], CONF['api']['ssl_keyfile'], CONF['api']['ssl_certfile']) if CONF['daemon']: pidfile = daemon.pidfile.TimeoutPIDLockFile(CONF['api']['pid'], 10) if daemon.runner.is_pidfile_stale(pidfile): pidfile.break_lock() descriptors = get_descriptors() descriptors.append(sock.fileno()) context = daemon.DaemonContext(working_directory='/', umask=0o022, pidfile=pidfile, files_preserve=descriptors) if CONF['user']: context.uid = pwd.getpwnam(CONF['user']).pw_uid if CONF['group']: context.gid = grp.getgrnam(CONF['group']).gr_gid context.open() try: check_gearman_ssl_files() except Exception as e: LOG.critical(str(e)) return LOG.info('Starting on %s:%d', CONF.api.host, CONF.api.port) api = setup_app(pc) sys.stderr = LogStdout() wsgi.server(sock, api, keepalive=False, debug=CONF['debug']) return 0
def handler_proxy_run(options): """启动代理池节点""" if options.damon: pidfile = daemon.pidfile.PIDLockFile(os.path.join(options.log, 'proxy.pid')) if pidfile.is_locked(): pid = pidfile.read_pid() if psutil.pid_exists(pid): return f"已有实例正在运行,pid={pid}" else: pidfile.break_lock() print("OK") with daemon.DaemonContext(pidfile=pidfile, stderr=sys.stderr): set_file_logger(options, "proxy") return proxy.start(options.redis) else: return proxy.start(options.redis)
def handler_run(options): """运行爬虫""" db = redis.StrictRedis.from_url(options.redis) spider_configs = load_spider_configs(options.spider) proxies = parse_args(options.proxy) if proxies: for proxy_name in proxies: if not db.hexists("proxy:configs", proxy_name): return f"ERR: 未找到代理'{proxy_name}'" logger.info("使用代理运行", proxies) if not os.path.exists(os.path.join(options.spider, '__init__.py')): return "ERR: 未找到爬虫入口:'__init__.py'" if options.clear: logger.info("清空队列与代理数据") Spider(db, options.name).clear_proxy() Spider(db, options.name).clear_queue() if options.damon: pidfile = daemon.pidfile.PIDLockFile(os.path.join(options.log, f'{options.name}.pid')) if pidfile.is_locked(): pid = pidfile.read_pid() if psutil.pid_exists(pid): return f"已有实例正在运行,pid={pid}" else: pidfile.break_lock() logger.info("转入后台运行") with daemon.DaemonContext(pidfile=pidfile, stderr=sys.stderr): set_file_logger(options, options.name) return worker.start( options.spider, options.redis, spider_configs, proxies, options.processes, options.threads ) else: return worker.start( options.spider, options.redis, spider_configs, proxies, options.processes, options.threads )
def shutdown(): pidfile = daemon.pidfile.PIDLockFile(path="/tmp/scheduler_daemon.pid") if pidfile.is_locked(): pid = pidfile.read_pid() try: os.kill(pid, signal.SIG_DFL) except OSError: print "There doesn't seem to be any instance of scheduler daemon running but the lock file exists" print "Breaking lock file" pidfile.break_lock() return 0 try: print "Shutting down scheduler daemon (%d)" % pid __kill_and_wait(pid) pidfile.break_lock() print "Scheduler daemon (%d) successfully terminated" % pid return 1 except OSError: print traceback.format_exc() else: print "There doesn't seem to be any instance of scheduler daemon running" return 0
def main(): options = Options('mgm', 'Node Management Daemon') options.parser.add_argument( '--api_server', action='append', metavar='HOST:PORT', help='a list of API servers to connect to (for HP REST API driver)' ) options.parser.add_argument( '--datadir', dest='datadir', help='directory to store data files' ) options.parser.add_argument( '--az', type=int, help='The az number the node will reside in (to be passed to the API' ' server)' ) options.parser.add_argument( '--nodes', type=int, default=1, help='number of nodes' ) options.parser.add_argument( '--check_interval', type=int, default=5, help='how often to check if new nodes are needed (in minutes)' ) options.parser.add_argument( '--failed_interval', type=int, default=15, help='how often to retest nodes that failed to get added to the API' ' server (in minutes)' ) options.parser.add_argument( '--driver', dest='driver', choices=known_drivers.keys(), default='hp_rest', help='type of device to use' ) options.parser.add_argument( '--node_basename', dest='node_basename', help='prepend the name of all nodes with this' ) options.parser.add_argument( '--nova_auth_url', help='the auth URL for the Nova API' ) options.parser.add_argument( '--nova_user', help='the username for the Nova API' ) options.parser.add_argument( '--nova_pass', help='the password for the Nova API' ) options.parser.add_argument( '--nova_region', help='the region to use for the Nova API' ) options.parser.add_argument( '--nova_tenant', help='the tenant for the Nova API' ) options.parser.add_argument( '--nova_keyname', help='the key name for new nodes spun up in the Nova API' ) options.parser.add_argument( '--nova_secgroup', help='the security group for new nodes spun up in the Nova API' ) options.parser.add_argument( '--nova_image', help='the image ID or name to use for new nodes spun up in the' ' Nova API' ) options.parser.add_argument( '--nova_image_size', help='the image size ID (flavor ID) or name to use for new nodes spun' ' up in the Nova API' ) args = options.run() required_args = [ 'datadir', 'az', 'nova_image', 'nova_image_size', 'nova_secgroup', 'nova_keyname', 'nova_tenant', 'nova_region', 'nova_user', 'nova_pass', 'nova_auth_url' ] # NOTE(LinuxJedi): We are checking for required args here because the # parser can't yet check both command line and config file to see if an # option has been set missing_args = 0 for req in required_args: test_var = getattr(args, req) if test_var is None: missing_args += 1 sys.stderr.write( '{app}: error: argument --{test_var} is required\n' .format(app=os.path.basename(sys.argv[0]), test_var=req)) if missing_args: return 2 if not args.api_server: # NOTE(shrews): Can't set a default in argparse method because the # value is appended to the specified default. args.api_server.append('localhost:8889') elif not isinstance(args.api_server, list): # NOTE(shrews): The Options object cannot intelligently handle # creating a list from an option that may have multiple values. # We convert it to the expected type here. svr_list = args.api_server.split() args.api_server = svr_list logger = setup_logging('libra_mgm', args) server = Server(logger, args) if args.nodaemon: server.main() else: pidfile = daemon.pidfile.TimeoutPIDLockFile(args.pid, 10) if daemon.runner.is_pidfile_stale(pidfile): logger.warning("Cleaning up stale PID file") pidfile.break_lock() context = daemon.DaemonContext( working_directory='/', umask=0o022, pidfile=pidfile, files_preserve=[logger.handlers[0].stream] ) if args.user: try: context.uid = pwd.getpwnam(args.user).pw_uid except KeyError: logger.critical("Invalid user: %s" % args.user) return 1 # NOTE(LinuxJedi): we are switching user so need to switch # the ownership of the log file for rotation os.chown(logger.handlers[0].baseFilename, context.uid, -1) if args.group: try: context.gid = grp.getgrnam(args.group).gr_gid except KeyError: logger.critical("Invalid group: %s" % args.group) return 1 try: context.open() except lockfile.LockTimeout: logger.critical( "Failed to lock pidfile %s, another instance running?", args.pid ) return 1 server.main() return 0
directory )) config = yaml.load(file('rtmbot.conf', 'r')) debug = config["DEBUG"] bot = RtmBot(config["SLACK_TOKEN"]) site_plugins = [] files_currently_downloading = [] job_hash = {} if config.has_key("DAEMON"): if config["DAEMON"]: import daemon pidfile = daemon.pidfile.PIDLockFile('/tmp/alfred.pid') pidfile.timeout = 2 try: pidfile.acquire() except LockTimeout: try: os.kill(pidfile.read_pid(), 0) print 'Process already running!' exit(1) except OSError: pidfile.break_lock() with daemon.DaemonContext(pidfile=pidfile ,working_directory=os.getcwd()): main_loop() main_loop()
def main(): # Option handling parser = argparse.ArgumentParser( epilog='ELM327-emulator v' + __version__ + ' - ELM327 OBD-II adapter emulator') parser.prog = "elm" parser.add_argument( '-V', "--version", dest='version', action='store_true', help="print ELM327-emulator version and exit") if os.name != 'nt': parser.add_argument( '-t', "--terminate", dest='terminate', action='store_true', help="terminate the daemon process sending SIGTERM") parser.add_argument( "-d", "--daemon", dest = "daemon_mode", action='store_true', help = "Run ELM327-emulator in daemon mode. ") parser.add_argument( "-b", "--batch", dest="batch_mode", type=argparse.FileType('w'), help="Run ELM327-emulator in batch mode. " "Argument is the output file. " "The first line in that file will be the virtual serial device", default=0, nargs=1, metavar='FILE') parser.add_argument( '-p', '--port', dest = 'serial_port', help = "Set the com0com serial port listened by ELM327-emulator " "when running under windows OS. Default is COM3.", default = ['COM3'], nargs = 1, metavar = 'PORT' ) parser.add_argument( '-a', '--baudrate', dest = 'serial_baudrate', type=int, help = "Set the serial device baud rate used by ELM327-emulator.", default = None, nargs = 1, metavar = 'BAUDRATE' ) parser.add_argument( '-s', '--scenario', dest = 'scenario', help = "Set the scenario used by ELM327-emulator.", default = [''], nargs = 1, metavar = 'SCENARIO' ) parser.add_argument( '-n', '--net', dest = 'net_port', type=int, help = "Set the INET socket port used by ELM327-emulator.", default = None, nargs = 1, metavar = 'INET_PORT' ) parser.add_argument( '-H', '--forward_host', dest = 'forward_net_host', help = "Set the INET host used by ELM327-emulator." "when forwarding the client interaction to a remote OBD-II port.", default = None, nargs = 1, metavar = 'INET_FORWARD_HOST' ) parser.add_argument( '-N', '--forward_port', dest = 'forward_net_port', type=int, help = "Set the INET socket port used by ELM327-emulator " "when forwarding the client interaction to a remote OBD-II port.", default = None, nargs = 1, metavar = 'INET_FORWARD_PORT' ) parser.add_argument( '-S', '--forward_serial_port', dest = 'forward_serial_port', help = "Set the serial device port used by ELM327-emulator " "when forwarding the client interaction to a serial device.", default = None, nargs = 1, metavar = 'FORWARD_SERIAL_PORT' ) parser.add_argument( '-B', '--forward_serial_baudrate', dest = 'forward_serial_baudrate', type=int, help = "Set the device baud rate used by ELM327-emulator " "when forwarding the client interaction to a serial device.", default = None, nargs = 1, metavar = 'FORWARD_SERIAL_BAUDRATE' ) parser.add_argument( '-T', '--forward_timeout', dest = 'forward_timeout', type=float, help = "Set forward timeout as floating number " "(default is 5 seconds).", default = None, nargs = 1, metavar = 'FORWARD_TIMEOUT' ) args = parser.parse_args() if args.version: print(f'ELM327-emulator version {__version__}.') sys.exit(0) # Redirect stdout if args.batch_mode and not args.batch_mode[0].isatty(): sys.stdout = args.batch_mode[0] # Instantiate the class if os.name == 'nt': args.daemon_mode = False args.terminate = False emulator = Elm( batch_mode=args.batch_mode or args.daemon_mode, serial_port=args.serial_port[0], serial_baudrate=args.serial_baudrate[0] if args.serial_baudrate else None, net_port=args.net_port[0] if args.net_port else None, forward_net_host=args.forward_net_host[0] if args.forward_net_host else None, forward_net_port=args.forward_net_port[0] if args.forward_net_port else None, forward_serial_port=args.forward_serial_port[0] if args.forward_serial_port else None, forward_serial_baudrate = args.forward_serial_baudrate[0] if args.forward_serial_baudrate else None, forward_timeout = args.forward_timeout[0] if args.forward_timeout else None) if os.name != 'nt': if os.getuid() == 0: daemon_pid_fname = DAEMON_PIDFILE_DIR_ROOT + DAEMON_PIDFILE else: daemon_pid_fname = DAEMON_PIDFILE_DIR_NON_ROOT + DAEMON_PIDFILE pidfile = daemon.pidfile.PIDLockFile(daemon_pid_fname) pid = pidfile.read_pid() if args.terminate: if pid: print(f'Terminating daemon process {pid}.') try: Ret = os.kill(pid, signal.SIGTERM) except Exception as e: print(f'Error while terminating daemon process {pid}: {e}.') sys.exit(1) if Ret: print(f'Error while terminating daemon process {pid}.') sys.exit(1) else: sys.exit(0) else: print('Cannot terminate daemon process: not running.') sys.exit(0) if args.batch_mode and args.daemon_mode: try: print(emulator.get_pty()) print('ELM327-emulator service STARTED') if args.scenario[0]: set_scenario(emulator, args.scenario[0]) emulator.run() except (KeyboardInterrupt, SystemExit): emulator.terminate() print("\nELM327-emulator service ENDED") sys.exit(0) if args.daemon_mode and not args.batch_mode: if pid: try: pidfile.acquire() pidfile.release() # this might occur only in rare contention cases except AlreadyLocked: try: os.kill(pid, 0) print(f'Process {pid} already running. ' f'Check lockfile "{daemon_pid_fname}".') sys.exit(1) except OSError: #No process with locked PID pidfile.break_lock() print(f"Previous process {pid} terminated abnormally.") except NotLocked: print("Internal error: lockfile", daemon_pid_fname) context = daemon.DaemonContext( working_directory=DAEMON_DIR, umask=DAEMON_UMASK, pidfile=pidfile, detach_process=True, stdout=sys.stdout, stderr=sys.stderr, signal_map={ signal.SIGTERM: lambda signum, frame: emulator.terminate(), signal.SIGINT: lambda signum, frame: emulator.terminate() } ) try: with context: print('ELM327-emulator daemon service STARTED on ', emulator.get_pty()) if args.scenario[0]: set_scenario(emulator, args.scenario[0]) emulator.run() print("\nELM327-emulator daemon service ENDED") except LockFailed as e: print('Internal error: cannot start daemon', e) sys.exit(1) sys.exit(0) if os.name != 'nt' and pid: print(f'Warning: lockfile "{daemon_pid_fname}" reports pid {pid}.') p_elm = None pty_name = None try: with emulator as session: while session.threadState == session.THREAD.STARTING: time.sleep(0.1) if args.net_port: pty_name = "TCP network port " + str(args.net_port[0]) + "." else: pty_name = session.get_pty() if args.batch_mode: print(pty_name) sys.stdout.flush() if session.threadState == session.THREAD.TERMINATED: print('\nELM327-emulator cannot run. Exiting.\n') os._exit(1) # does not raise SystemExit if args.scenario[0]: set_scenario(session, args.scenario[0]) if pty_name == None: print("\nCannot start ELM327-emulator.\n") os._exit(1) # does not raise SystemExit p_elm = Interpreter(session, args) if args.batch_mode: p_elm.cmdloop_with_keyboard_interrupt( 'ELM327-emulator batch mode STARTED\n' 'Begin batch commands.') else: p_elm.cmdloop_with_keyboard_interrupt( 'Welcome to the ELM327 OBD-II adapter emulator.\n' 'ELM327-emulator is running on %s\n' 'Type help or ? to list commands.\n' % pty_name) except (KeyboardInterrupt, SystemExit): if not args.batch_mode and p_elm: p_elm.postloop() print('\n\nExiting.\n') else: print("\nELM327-emulator batch mode ENDED") sys.exit(1)
def main(): # Option handling parser = argparse.ArgumentParser( epilog=f"hostp2pd v.{__version__} - The Wi-Fi Direct " " Session Manager. wpa_cli controller of Wi-Fi " " Direct connections handled by wpa_supplicant.") parser.prog = "hostp2pd" parser.add_argument( "-V", "--version", dest="version", action="store_true", help="print hostp2pd version and exit", ) parser.add_argument( "-v", "--verbosity", dest="verbosity", action="store_true", help="print execution logging", ) parser.add_argument( "-vv", "--debug", dest="debug", action="store_true", help="print debug logging information", ) parser.add_argument( "-t", "--terminate", dest="terminate", action="store_true", help="terminate a daemon process sending SIGTERM", ) parser.add_argument( "-r", "--reload", dest="reload", action="store_true", help="reload configuration of a daemon process sending SIGHUP", ) parser.add_argument( "-c", "--config", dest="config_file", type=argparse.FileType("r"), help="Configuration file.", default=0, nargs=1, metavar="CONFIG_FILE", ) parser.add_argument( "-d", "--daemon", dest="daemon_mode", action="store_true", help="Run hostp2pd in daemon mode. ", ) parser.add_argument( "-b", "--batch", dest="batch_mode", type=argparse.FileType("w"), help="Run hostp2pd in batch mode. " "Argument is the output file. " "Use an hyphen (-) for standard output.", default=0, nargs=1, metavar="FILE", ) parser.add_argument( "-i", "--interface", dest="interface", help="Set the interface managed by hostp2pd.", default=["auto"], nargs=1, metavar="INTERFACE", ) parser.add_argument( "-p", "--run_program", dest="run_program", help="Name of the program to run with start and stop arguments. ", default=[""], nargs=1, metavar="RUN_PROGRAM", ) args = parser.parse_args() if args.version: print(f"hostp2pd version {__version__}.") sys.exit(0) # Redirect stdout if args.batch_mode and not args.batch_mode[0].isatty(): sys.stdout = args.batch_mode[0] # Configuration file if args.config_file and args.config_file[0].name: config_file = args.config_file[0].name else: config_file = None # Debug force_logging = None if args.verbosity: force_logging = logging.INFO if args.debug: force_logging = logging.DEBUG # Instantiate the class hostp2pd = HostP2pD(config_file, args.interface[0], args.run_program[0], force_logging) if os.getuid() == 0: daemon_pid_fname = (DAEMON_PIDFILE_DIR_ROOT + DAEMON_PIDFILE_BASE + args.interface[0] + ".pid") else: daemon_pid_fname = (DAEMON_PIDFILE_DIR_NON_ROOT + DAEMON_PIDFILE_BASE + args.interface[0] + ".pid") pidfile = daemon.pidfile.PIDLockFile(daemon_pid_fname) pid = pidfile.read_pid() if args.terminate: if pid: print(f"Terminating daemon process {pid}.") try: Ret = os.kill(pid, signal.SIGTERM) except Exception as e: print(f"Error while terminating daemon process {pid}: {e}.") sys.exit(1) if Ret: print(f"Error while terminating daemon process {pid}.") sys.exit(1) else: sys.exit(0) else: print("Cannot terminate daemon process: not running.") sys.exit(0) if args.reload: if pid: print(f"Reloading configuration file for daemon process {pid}.") try: Ret = os.kill(pid, signal.SIGHUP) except Exception as e: print(f"Error while reloading configuration file " f"for daemon process {pid}: {e}.") sys.exit(1) if Ret: print(f"Error while reloading configuration file " f"for daemon process {pid}.") sys.exit(1) else: sys.exit(0) else: print("Cannot reload the configuration of the daemon process: " "not running.") sys.exit(0) if args.daemon_mode and not args.batch_mode: if pid: try: pidfile.acquire() pidfile.release( ) # this might occur only in rare contention cases except AlreadyLocked: try: os.kill(pid, 0) print(f"Process {pid} already running" f' on the same interface "{args.interface[0]}". ' f'Check lockfile "{daemon_pid_fname}".') sys.exit(1) except OSError: # No process with locked PID pidfile.break_lock() print(f"Previous process {pid} terminated abnormally.") except NotLocked: print("Internal error: lockfile", daemon_pid_fname) context = daemon.DaemonContext( working_directory=DAEMON_DIR, umask=DAEMON_UMASK, pidfile=pidfile, detach_process=True, stdin=sys.stdin if args.debug else None, stdout=sys.stdout if args.debug else None, stderr=sys.stderr if args.debug else None, signal_map={ signal.SIGTERM: lambda signum, frame: hostp2pd.terminate(), signal.SIGINT: lambda signum, frame: hostp2pd.terminate(), signal.SIGHUP: lambda signum, frame: hostp2pd.read_configuration( configuration_file=hostp2pd.config_file, do_activation=True), }, ) try: with context: print("hostp2pd daemon service STARTED") hostp2pd.run() print("\nhostp2pd daemon service ENDED") except LockFailed as e: print("Internal error: cannot start daemon", e) sys.exit(1) sys.exit(0) if pid: print(f'Warning: lockfile "{daemon_pid_fname}" reports pid {pid}.') if args.batch_mode and args.daemon_mode: print("hostp2pd service STARTED") signal.signal( signal.SIGHUP, lambda signum, frame: hostp2pd.read_configuration( configuration_file=hostp2pd.config_file, do_activation=True), ) try: hostp2pd.run() except (KeyboardInterrupt, SystemExit): hostp2pd.terminate() print("\nhostp2pd service ENDED") sys.exit(0) else: w_p2p_interpreter = None try: with hostp2pd as session: if hostp2pd.process == None or hostp2pd.process.pid == None: print("\nCannot start hostp2pd.\n") os._exit(1) # does not raise SystemExit while hostp2pd.threadState == hostp2pd.THREAD.STARTING: time.sleep(0.1) if not args.batch_mode: logging.info(f"\n\nhostp2pd (v{__version__}) " "started in interactive mode.\n") sys.stdout.flush() w_p2p_interpreter = Interpreter(hostp2pd, args) if args.batch_mode: w_p2p_interpreter.cmdloop_with_keyboard_interrupt( "hostp2pd batch mode STARTED\n" "Begin batch commands.") else: w_p2p_interpreter.cmdloop_with_keyboard_interrupt( "Welcome to hostp2pd - " "The Wi-Fi Direct Session Manager.\n" "Copyright (c) Ircama 2021 - CC BY-NC-SA 4.0.\n" "https://github.com/Ircama/hostp2pd\n" "hostp2pd is running in interactive mode.\n" "Type help or ? to list commands.\n") except (KeyboardInterrupt, SystemExit): if not args.batch_mode and w_p2p_interpreter: w_p2p_interpreter.postloop() print("\nExiting.\n") else: print("hostp2pd batch mode ENDED.") sys.exit(1)
def main(): """ Main Python entry point for the worker utility. """ add_common_opts() CONF(project='libra', version=__version__) logging.setup('libra') LOG.debug('Configuration:') CONF.log_opt_values(LOG, std_logging.DEBUG) # Import the device driver we are going to use. This will be sent # along to the Gearman task that will use it to communicate with # the device. selected_driver = CONF['worker']['driver'] driver_class = importutils.import_class(known_drivers[selected_driver]) if selected_driver == 'haproxy': if CONF['user']: user = CONF['user'] else: user = getpass.getuser() if CONF['group']: group = CONF['group'] else: group = None haproxy_service = CONF['worker:haproxy']['service'] haproxy_logfile = CONF['worker:haproxy']['logfile'] driver = driver_class(haproxy_services[haproxy_service], user, group, haproxy_logfile=haproxy_logfile) else: driver = driver_class() server = EventServer() # Tasks to execute in parallel task_list = [ (config_thread, (driver,)) ] if not CONF['daemon']: server.main(task_list) else: pidfile = daemon.pidfile.TimeoutPIDLockFile(CONF['worker']['pid'], 10) if daemon.runner.is_pidfile_stale(pidfile): pidfile.break_lock() descriptors = get_descriptors() context = daemon.DaemonContext( working_directory='/etc/haproxy', umask=0o022, pidfile=pidfile, files_preserve=descriptors ) if CONF['user']: context.uid = pwd.getpwnam(CONF['user']).pw_uid if CONF['group']: context.gid = grp.getgrnam(CONF['group']).gr_gid context.open() server.main(task_list) return 0
def main(): """ Main Python entry point for the worker utility. """ add_common_opts() CONF(project='libra', version=__version__) logging.setup('libra') LOG.debug('Configuration:') CONF.log_opt_values(LOG, std_logging.DEBUG) # Import the device driver we are going to use. This will be sent # along to the Gearman task that will use it to communicate with # the device. selected_driver = CONF['worker']['driver'] driver_class = importutils.import_class(known_drivers[selected_driver]) if selected_driver == 'haproxy': if CONF['user']: user = CONF['user'] else: user = getpass.getuser() if CONF['group']: group = CONF['group'] else: group = None haproxy_service = CONF['worker:haproxy']['service'] haproxy_logfile = CONF['worker:haproxy']['logfile'] driver = driver_class(haproxy_services[haproxy_service], user, group, haproxy_logfile=haproxy_logfile) else: driver = driver_class() server = EventServer() # Tasks to execute in parallel task_list = [(config_thread, (driver, ))] if not CONF['daemon']: server.main(task_list) else: pidfile = daemon.pidfile.TimeoutPIDLockFile(CONF['worker']['pid'], 10) if daemon.runner.is_pidfile_stale(pidfile): pidfile.break_lock() descriptors = get_descriptors() context = daemon.DaemonContext(working_directory='/etc/haproxy', umask=0o022, pidfile=pidfile, files_preserve=descriptors) if CONF['user']: context.uid = pwd.getpwnam(CONF['user']).pw_uid if CONF['group']: context.gid = grp.getgrnam(CONF['group']).gr_gid context.open() server.main(task_list) return 0