}) try: resp, content = client.request(base_url + "/services", "POST", body=data) data = json.loads(content) except: print("Communication problem with stashboard") #def writeConfig(): #def readConfig(): #def pushToStash(): def run(self): ip = self.get_arduino_ip() server, port, device, baud = self.get_settings() print(ip) print(server) print(port) print(device) print(baud) #t = Timer(1, self.serialCom(device, baud)) #t.start() #self.connectToStash(ip, server, port) ardstash = ArdStash() daemon = runner.DaemonRunner(ardstash) daemon.do_action()
import time import grp import pwd import time from daemon import runner class Daemon(object): def __init__(self): """Initialize Daemon.""" self.stdin_path = '/dev/null' self.stdout_path = '/dev/null' self.stderr_path = '/dev/null' self.pidfile_path = '/tmp/pydaemon.pid' self.pidfile_timeout = 1 def run(self): while True: time.sleep(1) if __name__ == '__main__': pydaemon = Daemon() daemon_runner = runner.DaemonRunner(pydaemon) daemon_gid = grp.getgrnam('sbradley').gr_gid daemon_uid = pwd.getpwnam('sbradley').pw_uid daemon_runner.daemon_context.gid = daemon_gid daemon_runner.daemon_context.uid = daemon_uid daemon_runner.do_action()
#! /usr/bin/env python # encoding: utf-8 from daemon import runner from common.AgentDaemon import AgentDaemon if __name__ == "__main__": d = AgentDaemon() dd = runner.DaemonRunner(d) dd.do_action()
logger.info('database user name or password - connected OK') except mysql.connector.Error as err: if err.errno == errorcode.ER_ACCESS_DENIED_ERROR: logger.error( 'error :: something is wrong with your database user name or password' ) elif err.errno == errorcode.ER_BAD_DB_ERROR: logger.error('error :: the %s database does not exist' % settings.PANORAMA_DATABASE) else: logger.error('error :: mysql error - %s' % str(err)) except: try: if configuration_error: print('The database is not available') except: print('The database is not available') sys.exit(1) if not mysql_up: sys.exit(1) ionosphere = IonosphereAgent() if len(sys.argv) > 1 and sys.argv[1] == 'run': ionosphere.run() else: daemon_runner = runner.DaemonRunner(ionosphere) daemon_runner.daemon_context.files_preserve = [handler.stream] daemon_runner.do_action()
def main(provisioning_service_id, training_files): app = App(provisioning_service_id, training_files) deamon_runner = runner.DaemonRunner(app) deamon_runner.do_action()
def run(): """ Check that all the `ALGORITHMS` can be run. Start the AnalyzerAgent. Start the logger. """ if not isdir(settings.PID_PATH): print('pid directory does not exist at %s' % settings.PID_PATH) sys.exit(1) if not isdir(settings.LOG_PATH): print('log directory does not exist at %s' % settings.LOG_PATH) sys.exit(1) if len(sys.argv) > 1 and sys.argv[1] == 'stop': do_not_overwrite_log = True # This should hopefully take care of a TODO from the bin files, # TODO: write a real kill script # as above @earthgecko 20160520 pidfile_path = settings.PID_PATH + '/' + skyline_app + '.pid' pid = int(open(pidfile_path).read()) try: kill(pid, signal.SIGTERM) print('%s pid %s stopped' % (skyline_app, str(pid))) sys.exit(0) except OSError as e: print('Failed to kill pid %s - OSError - %s' % (str(pid), str(e))) sys.exit(1) logger.setLevel(logging.DEBUG) formatter = logging.Formatter("%(asctime)s :: %(process)s :: %(message)s", datefmt="%Y-%m-%d %H:%M:%S") if len(sys.argv) > 1 and sys.argv[1] == 'stop': handler = logging.FileHandler(settings.LOG_PATH + '/' + skyline_app + '.stop.log', mode='a', delay=False) else: handler = logging.handlers.TimedRotatingFileHandler(logfile, when="midnight", interval=1, backupCount=5) memory_handler = logging.handlers.MemoryHandler(256, flushLevel=logging.DEBUG, target=handler) handler.setFormatter(formatter) logger.addHandler(memory_handler) # Validate settings variables valid_settings = validate_settings_variables(skyline_app) if not valid_settings: print('error :: invalid variables in settings.py - cannot start') sys.exit(1) if len(sys.argv) > 1 and sys.argv[1] == 'stop': do_not_overwrite_log = True else: # Make sure we can run all the algorithms try: # from analyzer import algorithms import algorithms_dev logger.info('Testing algorithms') timeseries = map( list, zip(map(float, range(int(time()) - 86400, int(time()) + 1)), [1] * 86401)) # ensemble = [globals()[algorithm](timeseries) for algorithm in settings.ALGORITHMS] ensemble = [ getattr(algorithms_dev, algorithm)(timeseries) for algorithm in settings.ALGORITHMS ] logger.info('Tested algorithms OK') logger.info('ensemble: %s' % str(ensemble)) except KeyError as e: print( 'Algorithm %s deprecated or not defined; check settings.ALGORITHMS' % e) sys.exit(1) except Exception as e: print('Algorithm test run failed.') traceback.print_exc() sys.exit(1) logger.info('Tested algorithms') del timeseries del ensemble analyzer = AnalyzerDevAgent() if len(sys.argv) > 1 and sys.argv[1] == 'stop': do_not_overwrite_log = True else: logger.info('starting analyzer_dev.run') memory_handler.flush if len(sys.argv) > 1 and sys.argv[1] == 'run': analyzer.run() else: daemon_runner = runner.DaemonRunner(analyzer) daemon_runner.daemon_context.files_preserve = [handler.stream] daemon_runner.do_action() if len(sys.argv) > 1 and sys.argv[1] == 'stop': do_not_overwrite_log = True else: logger.info('stopped analyzer_dev')
def main(): """Daemon caller. """ stratux_screen = StratuxScreen() daemon_runner = runner.DaemonRunner(stratux_screen) daemon_runner.do_action()
outline=255, fill=255) # Top left, bottom right. # Draw the current (left) and max (right) numbers. draw.text((pad, 34), str(es_current), font=font2, fill=255) draw.text(((2 * pad) + text_margin + status_bar_width_max, 34), str(es_max), font=font2, fill=255) # Other stats. seq = (n / 5) % 2 t = "" if seq == 0: t = "CPU: %0.1fC, Towers: %d" % (CPUTemp, NumTowers) if seq == 1: t = "GPS Sat: %d/%d/%d" % ( getStatusData["GPS_satellites_locked"], getStatusData["GPS_satellites_seen"], getStatusData["GPS_satellites_tracked"]) if getStatusData[ "GPS_solution"] == "GPS + SBAS (WAAS / EGNOS)": t = t + " (WAAS)" #print t draw.text((pad, 45), t, font=font2, fill=255) n = n + 1 stratuxscreen = StratuxScreen() daemon_runner = runner.DaemonRunner(stratuxscreen) daemon_runner.do_action()
def shutdown(self, signum, frame): logger.critical("starting shutdown by %d" % signum) self.httpserver.shutdown() logger.critical("finished shutdown by %d" % signum) return try: alarmclock_daemon = AlarmClockDaemon() if sys.argv[1] == "test": stderrHandler = logging.StreamHandler(sys.stderr) stderrHandler.setFormatter(formatter) logger.propagate = False logger.addHandler(stderrHandler) logger.info("running in test mode, logging to stderr") alarmclock_daemon.run() else: daemonHandler = logging.handlers.RotatingFileHandler(LOGFILE, maxBytes=100000, backupCount=5) daemonHandler.setFormatter(formatter) logger.addHandler(daemonHandler) daemon_runner = runner.DaemonRunner(alarmclock_daemon) daemon_runner.daemon_context.files_preserve = [daemonHandler.stream] daemon_runner.do_action() pass except Exception as e: logger.error("failed: \"%s\"" % str(e)) pass
self.stdout_path = '/dev/tty' self.stderr_path = '/dev/tty' self.pidfile_path = '/var/run/vinyldealbot/vinyldealbot.pid' self.pidfile_timeout = 5 self.reddit = praw.Reddit('VinylDealBot') def run(self): while True: try: print(os.path.dirname(os.path.realpath(__file__))) conn = sqlite3.connect('/home/ec2-user/vinyldealbot/alerts.db') c = conn.cursor() logging.basicConfig(filename="vinylbot.log", level=logging.INFO, format="%(asctime)s - %(message)s") logging.info("Launching VinylDealBot...") subreddit = self.reddit.subreddit("vinyldeals") while True: logging.info("Reading posts") readPosts(conn, c, self.reddit, subreddit) logging.info("Checking alerts") alert(conn, c, self.reddit, subreddit) except Exception as e: logging.info("Error: " + traceback.format_exc()) vinyldealbot = VinylDealBot() daemon_runner = runner.DaemonRunner(vinyldealbot) daemon_runner.do_action()
def main(): app = App() # This application is envisioned to be run as a daemon deamon_runner = runner.DaemonRunner(app) deamon_runner.do_action()
settings = 'settings.ini' log_file = os.path.join(working_directory, log) settings_file = os.path.join(working_directory, settings) # creating daemon folder try: if not os.path.isdir(working_directory): os.mkdir(working_directory) except (IOError, OSError): print("can't create directory {0} for a daemon".format(working_directory)) exit(1) # create logger logger = logging.getLogger("subscribe_daemon") logger.setLevel(logging.DEBUG) formatter = logging.Formatter( "%(asctime)s - %(name)s - %(levelname)s - %(message)s") handler = logging.FileHandler(log_file) handler.setFormatter(formatter) logger.addHandler(handler) # creating daemon object daemon = Subscriber(settings_file, working_directory, logger) # starting daemon daemon_runner = runner.DaemonRunner(daemon) #This ensures that the logger file handle does not get closed during daemonization daemon_runner.daemon_context.files_preserve = [handler.stream] daemon_runner.do_action()
along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA """ from daemon import runner import meaner4 class Meaner4App: def __init__(self): self.stdin_path = "/dev/null" self.stdout_path = "/var/log/szarp/meaner4.stdin.log" self.stderr_path = "/var/log/szarp/meaner4.stderr.log" self.pidfile_path = "/var/run/meaner4.pid" self.pidfile_timeout = 5 def run(self): meaner4.go() if __name__ == "__main__": app = runner.DaemonRunner(Meaner4App()) try: app.do_action() except runner.DaemonRunnerStopFailureError as ex: # if the script was not running, the pid file won't be locked # and we don't want the stop action to fail if str(ex).find("not locked") < 0: raise ex
second_order_resolution_seconds = 86400 ensemble = [globals()[algorithm](timeseries, second_order_resolution_seconds) for algorithm in settings.MIRAGE_ALGORITHMS] except KeyError as e: print "Algorithm %s deprecated or not defined; check settings.MIRAGE_ALGORITHMS" % e sys.exit(1) except Exception as e: print "Algorithm test run failed." traceback.print_exc() sys.exit(1) mirage = MirageAgent() logger = logging.getLogger("MirageLog") logger.setLevel(logging.DEBUG) formatter = logging.Formatter("%(asctime)s :: %(process)s :: %(message)s", datefmt="%Y-%m-%d %H:%M:%S") handler = logging.handlers.TimedRotatingFileHandler( settings.LOG_PATH + '/mirage.log', when="midnight", interval=1, backupCount=5) handler.setFormatter(formatter) logger.addHandler(handler) if len(sys.argv) > 1 and sys.argv[1] == 'run': mirage.run() else: daemon_runner = runner.DaemonRunner(mirage) daemon_runner.daemon_context.files_preserve = [handler.stream] daemon_runner.do_action()
def run(): """ Start the Boundary agent. """ if not isdir(settings.PID_PATH): print('pid directory does not exist at %s' % settings.PID_PATH) sys.exit(1) if not isdir(settings.LOG_PATH): print('log directory does not exist at %s' % settings.LOG_PATH) sys.exit(1) logger.setLevel(logging.DEBUG) formatter = logging.Formatter("%(asctime)s :: %(process)s :: %(message)s", datefmt="%Y-%m-%d %H:%M:%S") handler = logging.handlers.TimedRotatingFileHandler(logfile, when="midnight", interval=1, backupCount=5) memory_handler = logging.handlers.MemoryHandler(256, flushLevel=logging.DEBUG, target=handler) handler.setFormatter(formatter) logger.addHandler(memory_handler) # Validate settings variables valid_settings = validate_settings_variables(skyline_app) if not valid_settings: print('error :: invalid variables in settings.py - cannot start') sys.exit(1) # Make sure all the BOUNDARY_ALGORITHMS are valid try: if settings.BOUNDARY_ALGORITHMS: configuration_error = False for algorithm in settings.BOUNDARY_ALGORITHMS: valid = True if not isinstance(algorithm, str): valid = False if not valid: configuration_error = True print('configuration error in tuple, expected: str') print( 'configuration error in BOUNDARY_ALGORITHMS tuple: %s' % str(algorithm)) except: try: if configuration_error: print( 'There are configuration issues in BOUNDARY_ALGORITHMS in settings.py' ) except: print( 'There are no BOUNDARY_ALGORITHMS in settings.py. try adding some, nothing to do' ) sys.exit(1) # Make sure we can run all the algorithms try: timeseries = map( list, zip(map(float, range(int(time()) - 86400, int(time()) + 1)), [1] * 86401)) # @added 20191021 - Branch #3262: py3 # Convert map to list if python_version == 3: if isinstance(timeseries, map): timeseries = list(timeseries) ensemble = [ globals()[algorithm](timeseries, 'test', 3600, 100, 300, 1) for algorithm in settings.BOUNDARY_ALGORITHMS ] except KeyError as e: print( 'Algorithm %s deprecated or not defined; check settings.BOUNDARY_ALGORITHMS' % e) sys.exit(1) except Exception as e: print('Algorithm test run failed.') traceback.print_exc() sys.exit(1) # Make sure all the BOUNDARY_METRICS are valid try: if settings.BOUNDARY_METRICS: configuration_error = False for metric in settings.BOUNDARY_METRICS: valid = True strings = [] strings.append(metric[0]) strings.append(metric[1]) strings.append(metric[7]) for string in strings: if not isinstance(string, str): valid = False values = [] values.append(metric[2]) values.append(metric[3]) values.append(metric[4]) values.append(metric[5]) values.append(metric[6]) for value in values: if not isinstance(value, int): valid = False alert_via = metric[7] for alerter in alert_via.split("|"): if not isinstance(alerter, str): valid = False if not valid: configuration_error = True print( 'configuration error in tuple, expected: str, str, int, int, int, int, str' ) print('configuration error in BOUNDARY_METRICS tuple: %s' % str(metric)) except: if configuration_error: print( 'There are configuration issues in BOUNDARY_METRICS in settings.py' ) else: print( 'There are no BOUNDARY_METRICS in settings.py. try adding some, nothing to do' ) sys.exit(1) logger.info('Tested algorithms') del timeseries del ensemble del strings del values boundary = BoundaryAgent() if len(sys.argv) > 1 and sys.argv[1] == 'run': boundary.run() else: daemon_runner = runner.DaemonRunner(boundary) daemon_runner.daemon_context.files_preserve = [handler.stream] daemon_runner.do_action()
class RxCmdDaemon(): def __init__(self): self.stdin_path = '/dev/null' # self.stdout_path = '/dev/tty' self.stdout_path = '/home/robot/pydir/daemon.log' self.stderr_path = '/home/robot/pydir/daemon.log' # self.stderr_path = '/dev/tty' self.pidfile_path = '/tmp/RxCmdDaemon.pid' self.pidfile_timeout = 5 def run(self): while True: server_sock = bluetooth.BluetoothSocket(bluetooth.RFCOMM) port = 1 server_sock.bind(("", port)) server_sock.listen(1) client_sock, address = server_sock.accept() print "Accepted connection from ", address try: while True: data = client_sock.recv(1024) print "received [%s]" % data os.system(data) except Exception as e: logging.exception(e) rxCmdDaemon = RxCmdDaemon() daemon_runner = runner.DaemonRunner(rxCmdDaemon) daemon_runner.do_action()
zip(map(float, range(int(time()) - 86400, int(time()) + 1)), [1] * 86401)) ensemble = [ globals()[algorithm](timeseries) for algorithm in settings.ALGORITHMS ] except KeyError as e: print "Algorithm %s deprecated or not defined; check settings.ALGORITHMS" % e sys.exit(1) except Exception as e: print "Algorithm test run failed." traceback.print_exc() sys.exit(1) analyzer = AnalyzerAgent() logger = logging.getLogger("AnalyzerLog") logger.setLevel(logging.DEBUG) formatter = logging.Formatter("%(asctime)s :: %(process)s :: %(message)s", datefmt="%Y-%m-%d %H:%M:%S") handler = logging.FileHandler(settings.LOG_PATH + '/analyzer.log') handler.setFormatter(formatter) logger.addHandler(handler) if len(sys.argv) > 1 and sys.argv[1] == 'run': analyzer.run() else: daemon_runner = runner.DaemonRunner(analyzer) daemon_runner.daemon_context.files_preserve = [handler.stream] daemon_runner.do_action()
from daemon import runner class App(): def __init__(self): self.stdin_path = '/dev/null' self.stdout_path = '/dev/tty' self.stderr_path = '/dev/tty' self.pidfile_path = '/var/run/test.pid' self.pidfile_timeout = 5 def run(self): i = 0 while True: logger.info("message %s" + str(i)) i += 1 time.sleep(10) if __name__ == '__main__': app = App() logger = logging.getLogger("testlog") logger.setLevel(logging.INFO) formatter = logging.Formatter("%(asctime)s - %(name)s - %(message)s") handler = logging.FileHandler("test.log") handler.setFormatter(formatter) logger.addHandler(handler) serv = runner.DaemonRunner(app) serv.daemon_context.files_preserve = [handler.stream] serv.do_action()
def entry_point(): print(CONST.META.epilog) possible_loglevels = map(logging.getLevelName, range(0, 51, 10)) possible_daemon_commands = ['start', 'stop', 'restart'] parser = argparse.ArgumentParser( description=' - '.join([CONST.META.prog, CONST.META.project])) parser.add_argument("daemon", default='NONE', choices=possible_daemon_commands + ['NONE'], help="daemon control - NONE to start as application", nargs='?') parser.add_argument('-c', '--configfile', default=CONST.CONFIG_DEFAULT_FILENAME, dest="config_file", help='configfile in json') parser.add_argument('--log_level', default=CONST.LOG_LEVEL, action='store', dest="log_level", choices=possible_loglevels, help="Set the logging level for console output") parser.add_argument('--logfile', default=CONST.LOG_DEFAULT_FILENAME, dest="log_file", help='Logfile with rotating file handler') parser.add_argument("--logfile_level", default=CONST.LOG_LEVEL, action="store", dest="logfile_level", choices=possible_loglevels, help="Set the logging level for the logfile") parser.add_argument("--logfile_maxBytes", default=5000000, dest="logfile_max_bytes", type=int, help="Set the logfile max size (each rotated file)") parser.add_argument("--logfile_maxFiles", default=10, dest="logfile_max_files", type=int, help="Set the logfile max rotation") parser.add_argument( "--skip_sudo_check", default=False, action="store_true", dest="skip_sudo_check", help= "if set DoorPi will not check sudo or root access and you have to configure the rights by yourself" ) parser.add_argument( "--install_daemon", default=False, action="store_true", dest="install_daemon", help= "install daemonfile, pip modul 'python-daemon' and register the daemonfile" ) #parser.add_argument("--use_last_known_config", default = False, action = "store_true", dest = "use_last_known_config", help = "use the last known working config") args = parser.parse_args() if os.geteuid() != 0 and args.skip_sudo_check is False: raise SystemExit( "DoorPi must run with sudo rights - maybe use --skip_sudo_check to skip this check" ) CONST.LOG_LEVEL = args.log_level logging.getLogger('').setLevel(CONST.LOG_LEVEL) DOORPI.prepare(args) DOORPI.logger.setLevel(CONST.LOG_LEVEL) DOORPI.logger.setFormatter(logging.Formatter(CONST.LOG_FORMAT)) logging.getLogger('').addHandler(DOORPI.logger) if args.log_file: try: logrotating = logging.handlers.RotatingFileHandler( args.log_file, maxBytes=args.logfile_max_bytes, backupCount=args.logfile_max_files) logrotating.setLevel(args.logfile_level) logrotating.setFormatter(logging.Formatter(CONST.LOG_FORMAT)) logging.getLogger('').addHandler(logrotating) except IOError as exp: logging.exception("Managed exception while open logfile %s" % exp) logger = init_own_logger(__name__) logger.debug('loaded with arguments: %s', str(args)) if args.daemon in possible_daemon_commands or args.install_daemon: if args.daemon in sys.argv and sys.argv[1] != args.daemon: sys.argv.remove(args.daemon) sys.argv = [sys.argv[0], args.daemon] + sys.argv[1:] if not DAEMON.DAEMON_AVAILABLE or args.install_daemon: try: if args.install_daemon: sys.argv.remove('--install_daemon') auto_uninstall('resources.daemon') auto_install('resources.daemon') logger.info('installed daemon for DoorPi - restart DoorPi now') os.execv(sys.argv[0], sys.argv) except OSError as exp: raise SystemExit( "restart of DoorPi failed - please restart it") except Exception as exp: raise SystemExit("error during autoinstaller: '%s'" % exp) from daemon import runner from daemon.runner import DaemonRunnerInvalidActionError from daemon.runner import DaemonRunnerStartFailureError from daemon.runner import DaemonRunnerStopFailureError from resources.functions.filesystem import files_preserve_by_path daemon_runner = runner.DaemonRunner(DOORPI) if args.log_file: daemon_runner.daemon_context.files_preserve = files_preserve_by_path( args.log_file) else: from resources.daemon import DaemonRunnerInvalidActionError from resources.daemon import DaemonRunnerStartFailureError from resources.daemon import DaemonRunnerStopFailureError try: if args.daemon in possible_daemon_commands: daemon_runner.do_action() else: DOORPI.start(start_as_daemon=False) except DaemonRunnerStartFailureError as ex: logger.error( "can't start DoorPi daemon - maybe it's running already? (Message: %s)", ex) except DaemonRunnerStopFailureError as ex: logger.error( "can't stop DoorPi daemon - maybe it's not running? (Message: %s)", ex) except KeyboardInterrupt: logger.info("KeyboardInterrupt -> DoorPi will shutdown") except CorruptConfigFileException as ex: logger.exception("CorruptConfigFileException: %s", ex) except Exception as ex: logger.exception("Exception: %s", ex) finally: DOORPI.stop() logger.info('finished')
if not os.path.exists(logDir): os.makedirs(logDir) handler = logging.FileHandler(logDir+'/message.log', "a", encoding = "UTF-8") formatter = logging.Formatter(logFormat) handler.setFormatter(formatter) root_logger = logging.getLogger() root_logger.addHandler(handler) root_logger.setLevel(logLevel) if sys.argv[1] == 'start': action = asteriskRESTActions(config) with check_mnp(config, action) as instance: daemon_runner = runner.DaemonRunner(instance) daemon_runner.daemon_context.files_preserve=[handler.stream] daemon_runner.do_action() elif sys.argv[1] == 'stop': instance = daemonApp() daemon_runner = runner.DaemonRunner(instance) daemon_runner.daemon_context.files_preserve=[handler.stream] daemon_runner.do_action() else: logging.basicConfig(format = logFormat, level = logLevel) action = asteriskRESTActions(config) with check_mnp(config, action) as instance:
def run(): """ Start the Crucible agent and ensure all the required directories exist, creating the crucible directories if they do not exist """ if not isdir(settings.PID_PATH): print('pid directory does not exist at %s' % settings.PID_PATH) sys.exit(1) if not isdir(settings.LOG_PATH): print('log directory does not exist at %s' % settings.LOG_PATH) sys.exit(1) # Make sure the required directories exists if not os.path.exists(settings.CRUCIBLE_CHECK_PATH): try: os.makedirs(settings.CRUCIBLE_CHECK_PATH, mode=0o755) except: print('failed to create directory - %s' % settings.CRUCIBLE_CHECK_PATH) sys.exit(1) if not os.path.exists(settings.CRUCIBLE_DATA_FOLDER): try: os.makedirs(settings.CRUCIBLE_DATA_FOLDER, mode=0o755) except: print('failed to create directory - %s' % settings.CRUCIBLE_DATA_FOLDER) sys.exit(1) failed_checks_dir = settings.CRUCIBLE_DATA_FOLDER + '/failed_checks' if not os.path.exists(failed_checks_dir): try: os.makedirs(failed_checks_dir, mode=0o755) except: print('failed to create directory - %s' % failed_checks_dir) sys.exit(1) logger.setLevel(logging.DEBUG) formatter = logging.Formatter("%(asctime)s :: %(process)s :: %(message)s", datefmt="%Y-%m-%d %H:%M:%S") handler = logging.handlers.TimedRotatingFileHandler(logfile, when="midnight", interval=1, backupCount=5) memory_handler = logging.handlers.MemoryHandler(256, flushLevel=logging.DEBUG, target=handler) handler.setFormatter(formatter) logger.addHandler(memory_handler) # Validate settings variables try: valid_settings = validate_settings_variables(skyline_app) except Exception as e: print('error :: validate_settings_variables failed - %s' % e) sys.exit(1) if not valid_settings: print('error :: invalid variables in settings.py - cannot start') sys.exit(1) crucible = CrucibleAgent() if len(sys.argv) > 1 and sys.argv[1] == 'run': logger.info('starting skyline crucible via run') crucible.run() else: logger.info('starting skyline crucible via daemon') daemon_runner = runner.DaemonRunner(crucible) daemon_runner.daemon_context.files_preserve = [handler.stream] daemon_runner.do_action()
POLL_INTERVAL = 30 DEBUG = False if __name__ == "__main__": if len(sys.argv) == 3: DEBUG = True if len(sys.argv) >= 2: if 'status' == sys.argv[1]: running, pid = is_running(PID_FILE) if running: print '%s is running as pid %s' % (sys.argv[0], pid) else: print '%s is not running.' % sys.argv[0] elif 'stop' == sys.argv[1] and not is_running(PID_FILE)[0]: print '%s is not running.' % sys.argv[0] else: collector = MetricsCollector(PID_FILE, poll_interval=POLL_INTERVAL, db_path=DB_FILE, debug=DEBUG) daemon = runner.DaemonRunner(collector) daemon.do_action() # start|stop|restart as sys.argv[1] running, pid = is_running(PID_FILE) sys.exit(0) else: print "Usage: %s start|stop|restart|status" % sys.argv[0] sys.exit(2) else: print "%s can't be included in another program." % sys.argv[0] sys.exit(1)
status = api.PostUpdate(message) class DaemonApp(): def __init__(self, pidFilePath, stdout_path='/dev/null', stderr_path='/dev/null'): self.stdin_path = '/dev/null' self.stdout_path = stdout_path self.stderr_path = stderr_path self.pidfile_path = pidFilePath self.pidfile_timeout = 1 def run(self): main(__file__, sys.argv[1:]) if __name__ == '__main__': main(__file__, sys.argv[1:]) workingDirectory = os.path.basename(os.path.realpath(__file__)) stdout_path = '/dev/null' stderr_path = '/dev/null' fileName, fileExt = os.path.split(os.path.realpath(__file__)) pidFilePath = os.path.join(workingDirectory, os.path.basename(fileName) + '.pid') from daemon import runner dRunner = runner.DaemonRunner(DaemonApp(pidFilePath, stdout_path, stderr_path)) dRunner.daemon_context.working_directory = workingDirectory dRunner.daemon_context.umask = 0o002 dRunner.daemon_context.signal_map = { signal.SIGTERM: 'terminate', signal.SIGUP: 'terminate' } dRunner.do_action()
def main(): app = App() deamon_runner = runner.DaemonRunner(app) deamon_runner.do_action()
if qid in image_dict: ans += '<br /><img class="answer-image" src="' + image_dict[qid] + '" />' print 'ans is',ans connection.send( ans ) else: rand_idx = random.randint(0, len(default_ans)-1) log_file.write( " A:[" + default_ans[rand_idx] + "] sim:0" ) print 'ans is',default_ans[rand_idx] connection.send( default_ans[rand_idx] ) log_file.write( " compute_time:" + str(answer_finish_time-answer_start_time) ) log_file.write( " compare_entry:" + str(search_time) ) log_file.write( '\n' ) except Exception as e: print "error at",sys.exc_traceback.tb_lineno connection.send("Error " + str(e)) finally: connection.close() except Exception as e: print "Error",e,"at line",sys.exc_traceback.tb_lineno finally: pass # Start daemon serv = AsukuServer() daemon_runner = runner.DaemonRunner(serv) daemon_runner.do_action()
if __name__ == '__main__': from daemon import runner class Runner(object): """ Runner class """ def __init__(self): """ Initializes Runner class """ self.stdin_path = '/dev/null' self.stdout_path = '/dev/null' self.stderr_path = '/dev/null' self.pidfile_path = '/var/run/zfs-snap-manager.pid' self.pidfile_timeout = 5 def run(self): """ Starts the program (can be blocking) """ _ = self Manager.start() runner_instance = Runner() daemon_runner = runner.DaemonRunner(runner_instance) daemon_runner.do_action()
def start_polling(): logmgmt_daemon = LogMgmtDaemon() logmgmt_runner = runner.DaemonRunner(logmgmt_daemon) logmgmt_runner.daemon_context.umask = 0o022 logmgmt_runner.do_action()
print ('The database is not available') # @modified 20191031 - Feature #3310: gracefully handle db failure # Branch 3262: py3 # sys.exit(1) if start_if_no_db: logger.warn('warning :: mysql_up is %s but START_IF_NO_DB is %s, so starting' % ( str(mysql_up), str(start_if_no_db))) mysql_up = True else: sys.exit(1) # @added 20191031 - Feature #3310: gracefully handle db failure # Branch 3262: py3 if start_if_no_db: if not mysql_up: logger.warn('warning :: mysql_up is %s but START_IF_NO_DB is %s, so starting' % ( str(mysql_up), str(start_if_no_db))) mysql_up = True if not mysql_up: sys.exit(1) panorama = PanoramaAgent() if len(sys.argv) > 1 and sys.argv[1] == 'run': panorama.run() else: daemon_runner = runner.DaemonRunner(panorama) daemon_runner.daemon_context.files_preserve = [handler.stream] daemon_runner.do_action()
if len(sys.argv) > 1: if 'start' in sys.argv: print "Starting %s in daemon mode" % sys.argv[0] elif 'stop' in sys.argv: print "Stopping %s daemon mode" % sys.argv[0] elif 'status' in sys.argv: try: # TODO make this a directive in the config file pf = file('/var/run/gd-ssl-sync.pid', 'r') pid = int(pf.read().strip()) pf.close() except OSError as e: if e.errno != errno.ENOENT: raise # re-raise exception if a different error occured except IOError: pid = None except SystemExit: pid = None if pid: print "%s is running as pid %s" % (sys.argv[0], pid) sys.exit(0) else: print "%s is not running." % sys.argv[0] sys.exit(1) app = App() daemon_runner = runner.DaemonRunner(app) daemon_runner.do_action() # else: # parser.print_help()
self.stderr_path = '/opt/hpfeeds/broker/logs/err.log' self.pidfile_path = '/opt/hpfeeds/broker/pid/broker.pid' self.pidfile_timeout = 5 self.logfile = '/opt/hpfeeds/broker/logs/broker.log' def run(self): logging.basicConfig( format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", filename=self.logfile, level=logging.DEBUG if config.DEBUG else logging.INFO) try: while True: log.info("broker starting up...") s = Server() s.serve_forever() except (SystemExit, KeyboardInterrupt): pass except: log.exception("Exception") finally: log.info("broker shutting down...") if __name__ == '__main__': broker_runner = runner.DaemonRunner(Broker()) # Docker causes is_detach_process_context_required() to return False # Explicitly set detach_process to True (override None) broker_runner.daemon_context.detach_process = True broker_runner.do_action()