def verify_singleton(): """Verifies that we are the single running navtopology process. If a navtopology process is already running, we exit this process. """ try: daemon.justme(PIDFILE_PATH) except daemon.AlreadyRunningError as error: print("navtopology is already running (%d)" % error.pid, file=sys.stderr) sys.exit(1) daemon.writepidfile(PIDFILE_PATH) atexit.register(daemon.daemonexit, PIDFILE_PATH)
def verify_singleton(quiet=False): """Verify that we are the single running logengine process. If a logengine process is already running, we exit this process. """ # Create a pidfile and delete it automagically when the process exits. # Although we're not a daemon, we do want to prevent multiple simultaineous # logengine processes. try: daemon.justme(PID_FILE) except daemon.AlreadyRunningError as err: if quiet: sys.exit(0) else: print("logengine is already running (%d)" % err.pid, file=sys.stderr) sys.exit(1) daemon.writepidfile(PID_FILE) atexit.register(daemon.daemonexit, PID_FILE)
unused_prefixes = Prefix.objects.filter( gwportprefix__isnull=True).exclude(holy_vlans) if unused_prefixes: _logger.info("deleting unused prefixes: %s", ", ".join(p.net_address for p in unused_prefixes)) cursor = django.db.connection.cursor() # Use raw SQL to avoid Django's emulated cascading deletes cursor.execute('DELETE FROM prefix WHERE prefixid IN %s', (tuple([p.id for p in unused_prefixes]), )) def verify_singleton(): """Verifies that we are the single running navtopology process. If a navtopology process is already running, we exit this process. """ try: daemon.justme(PIDFILE_PATH) except daemon.AlreadyRunningError, error: print >> sys.stderr, "navtopology is already running (%d)" % error.pid sys.exit(1) daemon.writepidfile(PIDFILE_PATH) atexit.register(daemon.daemonexit, PIDFILE_PATH) if __name__ == '__main__': main()
""" # Create a pidfile and delete it automagically when the process exits. # Although we're not a daemon, we do want to prevent multiple simultaineous # logengine processes. pidfile = os.path.join(localstatedir, 'run', 'logengine.pid') try: daemon.justme(pidfile) except daemon.AlreadyRunningError, e: if quiet: sys.exit(0) else: print >> sys.stderr, "logengine is already running (%d)" % e.pid sys.exit(1) daemon.writepidfile(pidfile) atexit.register(daemon.daemonexit, pidfile) def get_categories(cursor): categories = {} cursor.execute("select category from category") for category, in cursor.fetchall(): if not categories.has_key(category): categories[category] = category return categories def get_origins(cursor): origins = {} cursor.execute("select origin, name from origin")
def main(): bootstrap_django('snmptrapd') # Verify that subsystem exists, if not insert it into database verify_subsystem() # Initialize and read startupconfig global config config = SnmptrapdConfig() # Create parser and define options opts = parse_args() # When binding to a port < 1024 we need to be root minport = min(port for addr, port in opts.address) if minport < 1024: if os.geteuid() != 0: sys.exit("Must be root to bind to ports < 1024, exiting") # Check if already running try: daemon.justme(pidfile) except daemon.DaemonError as why: sys.exit(why) # Create SNMP agent object server = agent.TrapListener(*opts.address) server.open() # We have bound to a port and can safely drop privileges runninguser = NAV_CONFIG['NAV_USER'] try: if os.geteuid() == 0: daemon.switchuser(runninguser) except daemon.DaemonError as why: server.close() sys.exit(why) global handlermodules nav.logs.init_stderr_logging() _logger.debug("using %r as SNMP backend", agent.BACKEND) # Load handlermodules try: _logger.debug('Trying to load handlermodules') handlermodules = load_handler_modules( config.get('snmptrapd', 'handlermodules').split(',')) except ModuleLoadError as why: _logger.error("Could not load handlermodules %s" % why) sys.exit(1) addresses_text = ", ".join( address_to_string(*addr) for addr in opts.address) if not opts.foreground: # Daemonize and listen for traps try: _logger.debug("Going into daemon mode...") logfile = open(logfile_path, 'a') daemon.daemonize(pidfile, stderr=logfile, stdout=logfile) except daemon.DaemonError as why: _logger.error("Could not daemonize: %s", why) server.close() sys.exit(1) # Daemonized _logger.info('snmptrapd is now running in daemon mode') # Reopen lost db connection # This is a workaround for a double free bug in psycopg 2.0.7 # which is why we don't need to keep the return value getConnection('default') # Reopen log files on SIGHUP _logger.debug( 'Adding signal handler for reopening log files on SIGHUP.') signal.signal(signal.SIGHUP, signal_handler) # Exit on SIGTERM signal.signal(signal.SIGTERM, signal_handler) _logger.info("Snmptrapd started, listening on %s", addresses_text) try: server.listen(opts.community, trap_handler) except SystemExit: raise except Exception as why: _logger.critical("Fatal exception ocurred", exc_info=True) else: daemon.writepidfile(pidfile) # Start listening and exit cleanly if interrupted. try: _logger.info("Listening on %s", addresses_text) server.listen(opts.community, trap_handler) except KeyboardInterrupt as why: _logger.error("Received keyboard interrupt, exiting.") server.close()
""" # Create a pidfile and delete it automagically when the process exits. # Although we're not a daemon, we do want to prevent multiple simultaineous # logengine processes. pidfile = os.path.join(localstatedir, 'run', 'logengine.pid') try: daemon.justme(pidfile) except daemon.AlreadyRunningError, e: if quiet: sys.exit(0) else: print >> sys.stderr, "logengine is already running (%d)" % e.pid sys.exit(1) daemon.writepidfile(pidfile) atexit.register(daemon.daemonexit, pidfile) def get_categories(cursor): categories = {} cursor.execute("select category from category") for category, in cursor.fetchall(): if not categories.has_key(category): categories[category] = category return categories def get_origins(cursor): origins = {} cursor.execute("select origin, name from origin") for origin, name in cursor.fetchall():