def warning(*args): msg = make_msg(args) if sys.stderr.isatty(): logging.warning(msg) syslog.openlog(ident, syslog.LOG_NDELAY, facility) syslog.syslog(syslog.LOG_WARNING, msg) syslog.closelog()
def _print_line(self, data, use_syslog, splunk, idstring, cmd="compute"): if use_syslog is True: if os.uname()[0] == "Darwin": syslog.openlog("Python") if splunk is True: message = ("memory=%s,workload=%s,vcpus=%s,instances=%s," "vcpu_used=%s,memory_used=%s,hypervisor=%s," "cmd=%s" % ( data['memory_mb'], data['current_workload'], data['vcpus'], data['running_vms'], data['vcpus_used'], data['memory_mb_used'], data['hypervisor_hostname'], cmd)) else: message = ("%s,%s,%s,%s,%s,%s,%s,%s" % ( data['memory_mb'], data['current_workload'], data['vcpus'], data['running_vms'], data['vcpus_used'], data['memory_mb_used'], data['hypervisor_hostname'], cmd)) if idstring is not None: message = ("%s,%s" % (idstring, message)) syslog.syslog(syslog.LOG_ALERT, message) else: self.app.stdout.write(str(datetime.now()) + ",") if idstring is not None: self.app.stdout.write(idstring + ",") self.app.stdout.write(str(data['memory_mb']) + ",") self.app.stdout.write(str(data['current_workload']) + ",") self.app.stdout.write(str(data['vcpus']) + ",") self.app.stdout.write(str(data['running_vms']) + ",") self.app.stdout.write(str(data['vcpus_used']) + ",") self.app.stdout.write(str(data['memory_mb_used']) + ",") self.app.stdout.write(str(data['hypervisor_hostname']) + ",") self.app.stdout.write(str("%s\n" % cmd))
def __init__(self, ident=None): self.__last_message = '' self._log = None self._form = None self.__ident = ident self.__fac = syslog.LOG_LOCAL0 syslog.openlog(facility=self.__fac)
def debug(*args): msg = make_msg(args) if sys.stderr.isatty(): logging.debug(msg) syslog.openlog(ident, syslog.LOG_NDELAY, facility) syslog.syslog(syslog.LOG_DEBUG, msg) syslog.closelog()
def _log_syslog(self, prio): if self.__ident: syslog.openlog(ident=self.__ident, facility=self.__fac) syslog.syslog(prio, self.__last_message) syslog.closelog() else: syslog.syslog(prio | self.__fac, self.__last_message)
def __init__ (self, ltype, loglevel=0, ident=None, filename=None, format=loggingFormat, filemode='a+', maxBytes=10485760, backupCount=3): self.loggingLevel = loglevel self.ident = ident if ltype == 'syslog': self.log_ = self.syslogLog self.levels = syslogLevels if not ident: self.ident = 'isdlogger' syslog.openlog(self.ident) elif ltype == 'logging': self.log_ = self.loggingLog self.levels = loggingLevels #self.fflush = fflush #if not format: self.loggingFormat = loggingFormat #else: self.loggingFormat = format #if not filemode: self.filemode = 'a+' #else: self.filemode = filemode if not filename : raise Exception('Please specify the filename!') #else: self.filename = filename #logging.config logging.root.setLevel(self.levels[loglevel]) rtHdlr = handlers.RotatingFileHandler(filename, mode=filemode, maxBytes=maxBytes, backupCount=backupCount) rtHdlr.setFormatter(logging.Formatter(format)) logging.root.addHandler(rtHdlr) #logging.basicConfig(level=self.levels[loglevel], format=self.loggingFormat, filename=self.filename, filemode=self.filemode) else: raise Exception('Unknown logger type!')
def get_logger(): global logger global config if logger: return logger syslog.openlog(caller, syslog.LOG_PID, syslog.LOG_DAEMON) logger = SyslogWrapper(config) return logger
def run(): syslog.openlog(logoption=syslog.LOG_PID) syslog.syslog('Processing started') while True: line = sys.stdin.readline().strip() url = line.split(' ', 1)[0] spliturl = urlparse.urlparse(url) if " GET" in line: if spliturl.scheme == 'http': lpath = spliturl.path.lower() if lpath[-4:] in ('.png', '.jpg', '.gif'): response = urllib2.urlopen(url) if response.code == 200: image = StringIO(response.read()) try: pilimage = Image.open(image) width, height = pilimage.size except: width = 0 height = 0 if width and height: newurl = 'http://placekitten.com/%d/%d' % (width, height) syslog.syslog(newurl) sys.stdout.write('302:%s\n' % newurl) sys.stdout.flush() continue sys.stdout.write('\n') sys.stdout.flush()
def log(msg, level=syslog.LOG_NOTICE): if USE_STDERR: print >> sys.stderr, msg else: syslog.openlog( 'xs-activity-server', 0, syslog.LOG_USER ) syslog.syslog(level, msg) syslog.closelog()
def _log(priority, msg): syslog.openlog("slander", logoption=syslog.LOG_PID) syslog.syslog(priority, msg) syslog.closelog() if sys.stdout.isatty(): print(msg)
def syslog(message, ident = "", priority = "info", facility = "syslog", options = []): """ Send a string to syslog and return that same string. """ priority = { "emerg":SYSLOG.LOG_EMERG, "alert":SYSLOG.LOG_ALERT, "crit":SYSLOG.LOG_CRIT, "err":SYSLOG.LOG_ERR, "warning":SYSLOG.LOG_WARNING, "notice":SYSLOG.LOG_NOTICE, "info":SYSLOG.LOG_INFO, "debug":SYSLOG.LOG_DEBUG }.get(str(priority).lower(),0) facility = { "kern":SYSLOG.LOG_KERN, "user":SYSLOG.LOG_USER, "mail":SYSLOG.LOG_MAIL, "daemon":SYSLOG.LOG_DAEMON, "auth":SYSLOG.LOG_AUTH, "lpr":SYSLOG.LOG_LPR, "news":SYSLOG.LOG_NEWS, "uucp":SYSLOG.LOG_UUCP, "cron":SYSLOG.LOG_CRON, "syslog":SYSLOG.LOG_SYSLOG, "local0":SYSLOG.LOG_LOCAL0, "local1":SYSLOG.LOG_LOCAL1, "local2":SYSLOG.LOG_LOCAL2, "local3":SYSLOG.LOG_LOCAL3, "local4":SYSLOG.LOG_LOCAL4, "local5":SYSLOG.LOG_LOCAL5, "local6":SYSLOG.LOG_LOCAL6, "local7":SYSLOG.LOG_LOCAL7 }.get(str(facility).lower(),0) option = 0 for opt in options: option += { "pid":SYSLOG.LOG_PID, "cons":SYSLOG.LOG_CONS, "ndelay":SYSLOG.LOG_NDELAY, "nowait":SYSLOG.LOG_NOWAIT, "perror":SYSLOG.LOG_PERROR }.get(str(opt).lower(),0) message = str(message) ident = str(ident) if not ident: ident = os.path.basename(sys.argv[0]) SYSLOG.openlog(ident = ident, logoption = option, facility = facility) add = "" for line in message.split("\n"): if line: SYSLOG.syslog(priority, add + line) add = " " SYSLOG.closelog() return message
def main ():#funcion principal logs = open("FilesOutput/CompleteTables_Logs_error.txt", 'w')#se abre archivo log #abrimos syslog para registrar lo que realiza el script... syslog.openlog("CompleteTables.py", syslog.LOG_USER) syslog.syslog(syslog.LOG_INFO,"this script has created the next output files CompleteTables_Logs_error.txt") information_DB = ReadCSV(sys.argv[1])#obtenemos la informacion de la base de datos... syslog.syslog(syslog.LOG_INFO, "Completando datos en tablas no relacionadas") #se generan las conexiones data_connected_import = ConnectDB(information_DB['import'][0], information_DB['import'][1], information_DB['import'][2], information_DB['import'][3]) data_connected_export = ConnectDB(information_DB['export'][0], information_DB['export'][1], information_DB['export'][2], information_DB['export'][3]) #se realiza el mismo proceso para las diferentes tablas... #insertando elementos tabla tipodispositivo CompleteProcessInsertion(data_connected_export[1], data_connected_export[0], data_connected_import[1], data_connected_import[0], "tipo_dispositivo", "tipodispositivo", "tipo", "tipodispositivo_id_seq", 1) #insertando elementos tabla marca CompleteProcessInsertion(data_connected_export[1], data_connected_export[0], data_connected_import[1], data_connected_import[0], "marca", "marca", "marca", "marca_id_seq", 1) #insertando elementos tabla tipousuario CompleteProcessInsertion(data_connected_export[1], data_connected_export[0], data_connected_import[1], data_connected_import[0], "tipo_usuario", "tipousuario", "tipo", "tipousuario_id_seq", 0) #insertando elementos tabla estadodispositivo and tabla estadousuario CompleteProcessInsertion(data_connected_export[1], data_connected_export[0], data_connected_import[1], data_connected_import[0], "estado", "estadodispositivo", "descripcion", "estadodispositivo_id_seq", 1) logs.write("ok") logs.close() syslog.closelog()#cerramos syslog return 0
def main(): """Feed the snmp_xen MIB tree and start listening for snmp's passpersist""" global pp syslog.openlog(sys.argv[0], syslog.LOG_PID) retry_timestamp = int(time.time()) retry_counter = MAX_RETRY while retry_counter > 0: try: syslog.syslog(syslog.LOG_INFO, "Starting PostgreSQL Performance gathering...") # Load helpers pp = snmp.PassPersist(OID_BASE) pp.start(update_data, POOLING_INTERVAL) # Should'nt return (except if updater thread has died) except KeyboardInterrupt: print "Exiting on user request." sys.exit(0) except IOError, e: if e.errno == errno.EPIPE: syslog.syslog(syslog.LOG_INFO, "Snmpd had close the pipe, exiting...") sys.exit(0) else: syslog.syslog(syslog.LOG_WARNING, "Updater thread has died: IOError: %s" % (e)) except Exception, e: syslog.syslog(syslog.LOG_WARNING, "Main thread has died: %s: %s" % (e.__class__.__name__, e))
def run(self): syslog.openlog("vpnc-watch", syslog.LOG_PID, syslog.LOG_DAEMON) pids = pidof(self.cmd) if pids: pids = ", ".join(map(str, pids)) raise Error, "%s already running (%s)" % (self.cmd, pids) self.start() self.detach() try: signal.signal(signal.SIGHUP, self.signal) signal.signal(signal.SIGTERM, self.signal) self.do_exit = False while not self.do_exit: self.do_restart = False time.sleep(1) running = self.isrunning() if not running: syslog.syslog(syslog.LOG_WARNING, "%s died" % self.name) elif self.do_exit or self.do_restart: self.stop() if self.do_restart or not running: self.start() syslog.syslog(syslog.LOG_INFO, "exiting") except Error, e: syslog.syslog(syslog.LOG_ERR, "error: " + str(e)) sys.exit(1)
def daemonize(self): """Makeself a daemon process. Double fork, close standard pipes, start a new session and open logs. """ pid = os.fork() if pid == 0: # first child os.setsid() pid = os.fork() if pid == 0: # second child # Can't chdir to root if we have relative paths to # conffile and other modules #os.chdir('/') os.umask(0) else: os._exit(0) else: os._exit(0) # close stdin, stdout and stderr ... for fd in range(3): try: os.close(fd) except OSError: pass # ... and replace them with /dev/null os.open('/dev/null', os.O_RDWR) os.dup(0) os.dup(0) syslog.openlog('hip-mgmt-iface', syslog.LOG_PID | syslog.LOG_NDELAY, syslog.LOG_DAEMON) syslog.syslog('FirewallController started.')
def main(): syslog.openlog(b"restwatchdog") syslog.syslog(syslog.LOG_INFO, "REST API watchdog checking %s, every %d seconds" % (checkurl, interval)) while True: time.sleep(interval) status = runit_status(service) if status != 0: syslog.syslog(syslog.LOG_WARNING, "REST API not running (sv status: %d)" % (status, )) continue else: try: f = urllib2.urlopen(checkurl, timeout=timeout, context=sslctx) f.read() except Exception: global graceleft syslog.syslog(syslog.LOG_WARNING, "REST API not responding") if graceleft <= 0: syslog.syslog(syslog.LOG_ERR, "Killing unresponsive REST service") runit_kill_restart(service) graceleft = grace else: graceleft -= 1
def config(): global conf confFile = 'docker-covenant.yml' if os.path.isfile(confFile): with open(confFile, 'r') as f: conf = yaml.safe_load(f) if conf["debug"]: print("configuration file: ", confFile) else: print("Config file ", confFile, " doesn't exist.") sys.exit(1) try: if not conf["syslog_ident"]: logident = "docker-covenant" else: logident = conf["syslog_ident"] syslog.openlog(ident=logident) if conf["debug"]: print("syslog_ident ", logident) except (NameError): pass try: if conf["debug"]: print("Docker daemon info:\n", client.info()) except (NameError): pass
def log(self, msg, priority=None): """ Log an message string with a certain priority string. If that priority is greater than the pre-defined min priority log the message to /var/log/messages. The priority string must be one of EMERG, ALERT, CRIT, ERR, WARNING, NOTICE, INFO, DEBUG args : msg ... message to be logged priority ... priority of the msg EMERG, ALERT, CRIT, ERR, WARNING, NOTICE, INFO or DEBUG excepts : return : none """ # 'priority' is the actual level of the event, it must be one of ... # EMERG, ALERT, CRIT, ERR, WARNING, NOTICE, INFO, DEBUG # 'msg' will only be sent to syslog if 'priority' >= 'min_priority' # TODO: The Python syslog module is very broken - logging priorities are # ignored, this is a workaround ... if priority is None: priority = Logger.NOTICE msg = "{0}: [{1}] {2}".format(priority, self.module, msg) if self.case[priority] <= self.case[self.min_priority]: syslog.openlog(self.program, syslog.LOG_PID) syslog.syslog(msg) syslog.closelog()
def infomsg(msg): if logconsole: syslog.openlog("potiron", syslog.LOG_PID | syslog.LOG_PERROR, syslog.LOG_INFO) else: syslog.openlog("potiron", syslog.LOG_PID, syslog.LOG_INFO) syslog.syslog("[INFO] " + msg)
def main(args): ''' Notify the user ''' try: options, arguments = getopt.getopt(args[1:], 'f:') except getopt.error: sys.exit('Usage: neubot notifier [-f database]\n') if arguments: sys.exit('Usage: neubot notifier [-f database]\n') database = '/var/neubot/database.sqlite3' for name, value in options: if name == '-f': database = value syslog.openlog('neubot_notify', syslog.LOG_PID, syslog.LOG_USER) while True: if __should_adjust_privacy(database): __notify_adjust_privacy() privacy_interval = SHORT_PRIVACY_INTERVAL else: privacy_interval = LONG_PRIVACY_INTERVAL time.sleep(privacy_interval)
def log(txt, force=False): global log, log_enabled if not log_enabled and not force: return syslog.openlog("skolesys-backup") syslog.syslog(txt) syslog.closelog()
def __init__(self, **kwargs): defaults = { 'STAGE_DIR': STAGE_DIR, 'PROD_DIR': PROD_DIR, 'BIND_PREFIX': BIND_PREFIX, 'LOCK_FILE': LOCK_FILE, 'STOP_UPDATE_FILE': STOP_UPDATE_FILE, 'LAST_RUN_FILE': LAST_RUN_FILE, 'STAGE_ONLY': False, 'NAMED_CHECKZONE_OPTS': NAMED_CHECKZONE_OPTS, 'CLOBBER_STAGE': False, 'PUSH_TO_PROD': False, 'BUILD_ZONES': True, 'PRESERVE_STAGE': False, 'LOG_SYSLOG': True, 'DEBUG': False, 'FORCE': False, 'bs': DNSBuildRun() # Build statistic } for k, default in defaults.iteritems(): setattr(self, k, kwargs.get(k, default)) # This is very specific to python 2.6 syslog.openlog('dnsbuild', 0, syslog.LOG_LOCAL6) self.lock_fd = None
def log_warning(msg, syslog_identifier, also_print_to_console=False): syslog.openlog(syslog_identifier) syslog.syslog(syslog.LOG_WARNING, msg) syslog.closelog() if also_print_to_console: print msg
def log_error(msg, syslog_identifier, also_print_to_console=False): syslog.openlog(syslog_identifier) syslog.syslog(syslog.LOG_ERR, msg) syslog.closelog() if also_print_to_console: print msg
def open(self, verbosity=syslog.LOG_NOTICE): log_options = syslog.LOG_PID | syslog.LOG_CONS | syslog.LOG_NDELAY if self.options.debug: log_options |= syslog.LOG_PERROR syslog.openlog("conreality", logoption=log_options, facility=syslog.LOG_DAEMON) syslog.setlogmask(syslog.LOG_UPTO(verbosity)) return self
def main(): """Feed the snmp_xen MIB tree and start listening for snmp's passpersist""" global pp global node syslog.openlog(sys.argv[0],syslog.LOG_PID) retry_timestamp=int(time.time()) retry_counter=MAX_RETRY while retry_counter>0: try: syslog.syslog(syslog.LOG_INFO,"Starting Xen monitoring...") # Load helpers pp=snmp.PassPersist(OID_BASE) node=cxm.node.Node(platform.node()) # Set statics data pp.add_str('1.1.0',node.get_hostname()) pp.add_gau('1.2.0',int(node.metrics.get_host_nr_cpus())) oid=pp.encode("Domain-0") pp.add_str('1.9.1.' + oid,'Domain-0') pp.add_int('1.9.2.' + oid,0) pp.add_gau('1.9.4.' + oid,2) # Always 2 VPCU for Dom0 (TODO: ask the Xen-API) pp.start(update_data,POOLING_INTERVAL) # Should'nt return (except if updater thread has died) except IOError, e: if e.errno == errno.EPIPE: syslog.syslog(syslog.LOG_INFO,"Snmpd had close the pipe, exiting...") sys.exit(0) except Exception, e: syslog.syslog(syslog.LOG_WARNING,"Main thread as died: %s" % (e))
def error(*args): msg = make_msg(args) if sys.stderr.isatty(): logging.error(msg) syslog.openlog(ident, syslog.LOG_NDELAY, facility) syslog.syslog(syslog.LOG_ERR, msg) syslog.closelog()
def setUp(self): global config_path global cwd weewx.debug = 1 syslog.openlog('test_templates', syslog.LOG_CONS) syslog.setlogmask(syslog.LOG_UPTO(syslog.LOG_DEBUG)) # Save and set the current working directory in case some service changes it. if not cwd: cwd = os.getcwd() else: os.chdir(cwd) try : self.config_dict = configobj.ConfigObj(config_path, file_error=True) except IOError: sys.stderr.write("Unable to open configuration file %s" % config_path) # Reraise the exception (this will eventually cause the program to exit) raise except configobj.ConfigObjError: sys.stderr.write("Error while parsing configuration file %s" % config_path) raise # Remove the old directory: try: test_html_dir = os.path.join(self.config_dict['WEEWX_ROOT'], self.config_dict['StdReport']['HTML_ROOT']) shutil.rmtree(test_html_dir) except OSError, e: if os.path.exists(test_html_dir): print >> sys.stderr, "\nUnable to remove old test directory %s", test_html_dir print >> sys.stderr, "Reason:", e print >> sys.stderr, "Aborting" exit(1)
def main(): global isy, programs, syslogUse, syslogFacility, logfile # Setup syslog if requested if syslogUse: syslog.openlog(logoption=syslog.LOG_PID, facility=syslogFacilities[syslogFacility]) # Open logfile if requested if logfile: try: logfile = open(logfile, 'ab+') except IOError: usage('ERROR: Failed to open logfile! %s' % sys.exc_info()[1]) # Dump status on sigusr1 signal.signal(signal.SIGUSR1,status_dump) # Connect to ISY try: isy = Isy(addr=isyHost, userl=isyUser, userp=isyPass, debug=isyDebug) except: print "ERROR: Connection to ISY failed!" sys.exit(1) programs = get_proginfo(isy) # Get info about programs for trigger logging server = ISYEvent() server.subscribe(addr=isyHost, userl=isyUser, userp=isyPass, debug=isyDebug) server.set_process_func(parse_event, "") try: #print('Use Control-C to exit') server.events_loop() #no return except KeyboardInterrupt: print('Exiting')
def exception(): msg = traceback.format_exc() if sys.stderr.isatty(): logging.error(msg) syslog.openlog(ident, syslog.LOG_NDELAY, facility) syslog.syslog(syslog.LOG_ERR, msg) syslog.closelog()
config_dict['StdWXCalculate']['Calculations']['dewpoint'] = 'hardware' # Define a main entry point for basic testing without the weewx engine. # Invoke this as follows from the weewx root dir: # # PYTHONPATH=bin python bin/weewx/drivers/wmr9x8.py if __name__ == '__main__': import optparse usage = """Usage: %prog --help %prog --version %prog --gen-packets [--port=PORT]""" syslog.openlog('wmr9x8', syslog.LOG_PID | syslog.LOG_CONS) syslog.setlogmask(syslog.LOG_UPTO(syslog.LOG_DEBUG)) weewx.debug = 2 parser = optparse.OptionParser(usage=usage) parser.add_option('--version', dest='version', action='store_true', help='Display driver version') parser.add_option('--port', dest='port', metavar='PORT', help='The port to use. Default is %s' % DEFAULT_PORT, default=DEFAULT_PORT) parser.add_option('--gen-packets', dest='gen_packets',
#!/usr/bin/env python import ldap import sys import syslog adserver = 'sinatra.lwpca.net' adport = '389' binduser = '******' bindpw = 'w56aUH#8^+>jMxBG$v' # open a syslog handle syslog.openlog(facility=syslog.LOG_AUTH) # check that we got a username on the command line, abort if one # was not supplied and write a note in the auth.log. try: search_target = sys.argv[1] if "@" in search_target: search_target = search_target.split("@")[0] except IndexError: syslog.syslog(syslog.LOG_ERR, "User to check in LDAP not specified. Exiting with error.") sys.exit(1) # write an info line in the syslog that we're going to try authenticating. syslog.syslog( syslog.LOG_INFO, "running %s for authentication of %s." % (sys.argv[0], search_target)) # Initialize an LDAP handle and bind to the directory. Abort if we can't bind
def log(txt, lvl=syslog.LOG_NOTICE): syslog.openlog('golumn') syslog.syslog(lvl, txt) wx.LogDebug(txt)
# The driver to use: driver = weewx.drivers.acurite """ # define a main entry point for basic testing of the station without weewx # engine and service overhead. invoke this as follows from the weewx root dir: # # PYTHONPATH=bin python bin/weewx/drivers/acurite.py if __name__ == '__main__': import optparse usage = """%prog [options] [--help]""" syslog.openlog('acurite', syslog.LOG_PID | syslog.LOG_CONS) syslog.setlogmask(syslog.LOG_UPTO(syslog.LOG_DEBUG)) parser = optparse.OptionParser(usage=usage) parser.add_option('--version', dest='version', action='store_true', help='display driver version') (options, args) = parser.parse_args() if options.version: print "acurite driver version %s" % DRIVER_VERSION exit(0) test_r1 = True test_r2 = True test_r3 = False
# the pieces of time to iterate over (days, hours, minutes, etc) # - the first piece in each tuple is the suffix (d, h, w) # - the second piece is the length in seconds (a day is 60s * 60m * 24h) parts = [(suffixes[0], 60 * 60 * 24 * 7 * 52), (suffixes[1], 60 * 60 * 24 * 7), (suffixes[2], 60 * 60 * 24), (suffixes[3], 60 * 60), (suffixes[4], 60), (suffixes[5], 1)] # for each time piece, grab the value and remaining seconds, and add it to # the time string for suffix, length in parts: value = seconds / length if value > 0: seconds = seconds % length time.append('%s%s' % (str(value), (suffix, (suffix, suffix + 's')[value > 1])[add_s])) if seconds < 1: break return separator.join(time) if __name__ == '__main__': syslog.openlog('garage_controller') config_file = open('config.json') controller = Controller(json.load(config_file)) config_file.close() controller.run()
# config file is optional here try: cf = basicconfig.get_config("logging.conf") except basicconfig.ConfigReadError as err: warn(err, "Using default values.") FACILITY = "USER" LEVEL = "WARNING" else: FACILITY = cf.FACILITY LEVEL = cf.LEVEL del cf del basicconfig syslog.openlog(sys.argv[0].split("/")[-1], syslog.LOG_PID, getattr(syslog, "LOG_" + FACILITY)) _oldloglevel = syslog.setlogmask( syslog.LOG_UPTO(getattr(syslog, "LOG_" + LEVEL))) def close(): syslog.closelog() def debug(msg): syslog.syslog(syslog.LOG_DEBUG, _encode(msg)) def info(msg): syslog.syslog(syslog.LOG_INFO, _encode(msg))
def log_err(msg, also_print_to_console=False): syslog.openlog("sfputil") syslog.syslog(syslog.LOG_ERR, msg) syslog.closelog()
def log_info(msg, also_print_to_console=False): syslog.openlog("sfputil") syslog.syslog(syslog.LOG_INFO, msg) syslog.closelog()
def run(): ajenti.init() reload(sys) sys.setdefaultencoding('utf8') try: locale.setlocale(locale.LC_ALL, '') except: logging.warning('Couldn\'t set default locale') logging.info('Ajenti %s running on platform: %s' % (ajenti.version, ajenti.platform)) if not ajenti.platform in ['debian', 'centos', 'freebsd', 'mageia']: logging.warn('%s is not officially supported!' % ajenti.platform) if ajenti.debug: def cmd_list_instances(ctx=None): import pprint if not ctx: from ajenti.plugins import manager ctx = manager.context pprint.pprint(ctx._get_all_instances()) def cmd_sessions(): import pprint sessions = SessionMiddleware.get().sessions return sessions def cmd_list_instances_session(): cmd_list_instances(cmd_sessions().values()[0].appcontext) exconsole.register(commands=[ ('_manager', 'PluginManager', ajenti.plugins.manager), ('_instances', 'return all @plugin instances', cmd_list_instances), ('_sessions', 'return all Sessions', cmd_sessions), ('_instances_session', 'return all @plugin instances in session #0', cmd_list_instances_session), ]) # Load plugins ajenti.plugins.manager.load_all() Inflater.get().precache() bind_spec = (ajenti.config.tree.http_binding.host, ajenti.config.tree.http_binding.port) if ':' in bind_spec[0]: addrs = socket.getaddrinfo(bind_spec[0], bind_spec[1], socket.AF_INET6, 0, socket.SOL_TCP) bind_spec = addrs[0][-1] # Fix stupid socketio bug (it tries to do *args[0][0]) socket.socket.__getitem__ = lambda x, y: None logging.info('Starting server on %s' % (bind_spec, )) if bind_spec[0].startswith('/'): if os.path.exists(bind_spec[0]): os.unlink(bind_spec[0]) listener = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) try: listener.bind(bind_spec[0]) except: logging.error('Could not bind to %s' % bind_spec[0]) sys.exit(1) listener.listen(10) else: listener = socket.socket( socket.AF_INET6 if ':' in bind_spec[0] else socket.AF_INET, socket.SOCK_STREAM) if not ajenti.platform in ['freebsd', 'osx']: try: listener.setsockopt(socket.IPPROTO_TCP, socket.TCP_CORK, 1) except: logging.warn('Could not set TCP_CORK') listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) try: listener.bind(bind_spec) except: logging.error('Could not bind to %s' % (bind_spec, )) sys.exit(1) listener.listen(10) stack = [ SessionMiddleware.get(), AuthenticationMiddleware.get(), CentralDispatcher.get() ] ssl_args = {} if ajenti.config.tree.ssl.enable: ssl_args['certfile'] = ajenti.config.tree.ssl.certificate_path ## https://github.com/ajenti/ajenti/pull/796 >> ssl_args['ssl_version'] = gevent.ssl.PROTOCOL_TLSv1 ## << https://github.com/ajenti/ajenti/pull/796 logging.info('SSL enabled: %s' % ssl_args['certfile']) ajenti.server = SocketIOServer(listener, log=open(os.devnull, 'w'), application=HttpRoot(stack).dispatch, policy_server=False, handler_class=RootHttpHandler, resource='ajenti:socket', transports=[ str('websocket'), str('flashsocket'), str('xhr-polling'), str('jsonp-polling'), ], **ssl_args) # auth.log try: syslog.openlog( ident=str(b'ajenti'), facility=syslog.LOG_AUTH, ) except: syslog.openlog(b'ajenti') try: gevent.signal(signal.SIGINT, lambda: sys.exit(0)) gevent.signal(signal.SIGTERM, lambda: sys.exit(0)) except: pass ajenti.feedback.start() ajenti.ipc.IPCServer.get().start() ajenti.licensing.Licensing.get() ajenti.server.serve_forever() if hasattr(ajenti.server, 'restart_marker'): logging.warn('Restarting by request') fd = 20 # Close all descriptors. Creepy thing while fd > 2: try: os.close(fd) logging.debug('Closed descriptor #%i' % fd) except: pass fd -= 1 os.execv(sys.argv[0], sys.argv) else: logging.info('Stopped by request')
sys.exit(-1) prog = sys.argv.pop(0) optslist, args = getopt.getopt(sys.argv, 'tcp:f:o:') dopts = dict(optslist) save_path = None filename = None offset = 0 if dopts.has_key("-c"): send = send_console # print "logging to console" elif dopts.has_key("-t"): print "logging to test syslog" prio = syslog.LOG_DEBUG syslog.openlog('JOBSDATA', syslog.LOG_NOWAIT, syslog.LOG_USER) else: syslog.openlog('JOBSDATA', syslog.LOG_NOWAIT, syslog.LOG_USER) if dopts.has_key("-p"): save_path = dopts["-p"] checkCacheDir(save_path) if dopts.has_key("-f"): filename = dopts["-f"] if dopts.has_key("-o"): offset = int(dopts["-o"]) run_all(save_path, filename, offset)
if self.mc.set('%s:%s' % (self.blacklist, host), 1, blacklist_timeout): return True else: syslog.syslog("Failed to set blacklist timeout for host %s" % host) return False def usage(cmd = None): print "Usage:\t%s [options]" % (cmd, ) print "Options:" print "\t-h or --help\t\tRead this help and exit" print "\t-c or --config\t\tUse specified config file instead of default" print "\t-v or --verbose\t\tBe a little bit more verbose than usual" if __name__ == "__main__": # Priority levels (high to low): # LOG_EMERG, LOG_ALERT, LOG_CRIT, LOG_ERR, LOG_WARNING, LOG_NOTICE, # LOG_INFO, LOG_DEBUG # # Facilities: # LOG_KERN, LOG_USER, LOG_MAIL, LOG_DAEMON, LOG_AUTH, LOG_LPR, LOG_NEWS, # LOG_UUCP, LOG_CRON and LOG_LOCAL0 to LOG_LOCAL7 # # Log options: # LOG_PID, LOG_CONS, LOG_NDELAY, LOG_NOWAIT and LOG_PERROR # if defined in <syslog.h>. syslog.openlog('ZabbixSync', syslog.LOG_PID, syslog.LOG_NOTICE) syslog.syslog('Starting ZabbixSwitchSync') sys.exit(main())
import os import sys import time, datetime import json import re from threading import Thread import random, atexit, signal, inspect from threading import Lock import subprocess, collections, argparse, grp, pwd, shutil import ConfigParser import smtplib import socket import urllib import syslog syslog.openlog('blocky', logoption=syslog.LOG_PID, facility=syslog.LOG_LOCAL0) config = ConfigParser.ConfigParser() es = None hostname = socket.gethostname() if hostname.find(".apache.org") == -1: hostname = hostname + ".apache.org" syslog.syslog(syslog.LOG_INFO, "Starting blocky on %s" % hostname) class Daemonize: """A generic daemon class. Usage: subclass the daemon class and override the run() method.""" def __init__(self, pidfile):
return node_doc["alerting_state"] # Prepare for subprocess timeouts class Alarm(Exception): pass def alarm_handler(signum, frame): raise Alarm signal.signal(signal.SIGALRM, alarm_handler) # Prepare syslog syslog.openlog(os.path.basename(sys.argv[0]), syslog.LOG_NOWAIT, syslog.LOG_DAEMON) # Are we on a compute node? hostname = os.uname()[1] match = re.match("^n\d+", hostname) if match is None: sys.stderr.write("Not a compute node, exiting.\n") sys.exit(1) node = re.sub("^n", "", hostname) node = int(node) #
print "example: /dev/ttyUSB0 or /dev/ttyS0 or /dev/cua0." port = self._prompt('port', Station.DEFAULT_PORT) return {'port': port} # define a main entry point for basic testing. invoke this as follows from # the weewx root dir: # # PYTHONPATH=bin python bin/weewx/drivers/wmr89.py if __name__ == '__main__': import optparse usage = """%prog [options] [--help]""" syslog.openlog('wmr89', syslog.LOG_PID | syslog.LOG_CONS) syslog.setlogmask(syslog.LOG_UPTO(syslog.LOG_DEBUG)) parser = optparse.OptionParser(usage=usage) parser.add_option('--version', dest='version', action='store_true', help='display driver version') parser.add_option('--port', dest='port', metavar='PORT', help='serial port to which the station is connected', default=Station.DEFAULT_PORT) (options, args) = parser.parse_args() if options.version: print "%s driver version %s" % (DRIVER_NAME, DRIVER_VERSION) exit(0) with Station(options.port) as station: for pkt in station.get_data():
#!/usr/bin/python import serial import csv import RPi.GPIO as GPIO import time import syslog ser = serial.Serial('/dev/ttyUSB2') # Use this for debugging #ser = serial.serial_for_url('spy:///dev/ttyUSB2') syslog.openlog(facility=syslog.LOG_DAEMON) # GPIO pin 4 is actually pin 7 on the GPIO header. # It's a good choice since pin 5 is a ground. reset_pin = 4 def doCommand(command): ser.flushInput() ser.write(command + "\r") ser.flush() out = [] while True: line = ser.readline().rstrip() if (line.startswith("ERROR")): raise Exception("command returned error") if (line.startswith("OK")): break out.append(line) return out[1:] # Skip over the echo of the command
def main(TASK, o=False, s=False, q=False): if "source" in TASK["test"]["spec"].keys(): LEAD = TASK["test"]["spec"]["source"] else: LEAD = "localhost" tasks_url = "https://%s/pscheduler/tasks" % (LEAD) try: status, task_url = url_post(tasks_url, data=json_dump(TASK)) except Exception as ex: fail("Unable to post task: %s" % (str(ex))) # print # print "New task is", task_url # ----------------------------------------------------------------------------- # # Fetch the posted task with extra details. # try: status, task_data = url_get(task_url, params={"detail": True}) if status != 200: raise Exception(task_data) except Exception as ex: fail("Failed to post task: %s" % (str(ex))) try: first_run_url = task_data["detail"]["first-run-href"] except KeyError: fail("Server returned incomplete data.") if VERBOSE: print(" ") print(" ") print("Task with server-added detail:") print(" ") print(json_dump(task_data)) # ----------------------------------------------------------------------------- # # Get first run and make sure we have what we need to function. The # server will wait until the first run has been scheduled before # returning a result. # status, run_data = url_get(first_run_url) if status == 404: fail("The server never scheduled a run for the task.") if status != 200: fail("Error %d: %s" % (status, run_data)) for key in ["start-time", "end-time", "result-href"]: if key not in run_data: fail("Server did not return %s with run data" % (key)) if VERBOSE: print(" ") print("Data about first run:") print(" ") print(json_dump(run_data)) # ----------------------------------------------------------------------------- # # Wait for the end time to pass # try: # The end time comes back as ISO 8601. Parse it. end_time = dateparse.parse(run_data["end-time"]) except ValueError as ex: fail("Server did not return a valid end time for the task: %s" % (str(ex))) now = datetime.datetime.now(tzlocal()) sleep_time = end_time - now if end_time > now else datetime.timedelta() sleep_seconds = (sleep_time.days * 86400) \ + (sleep_time.seconds) \ + (sleep_time.microseconds / (10.0**6)) sleep_seconds += 15 # print # print "Waiting", sleep_seconds, "seconds for run to finish..." time.sleep(sleep_seconds) # ----------------------------------------------------------------------------- # # Wait for the result to be produced and fetch it. # # print # print "Waiting for result at", run_data["result-href"] status, result_data = url_get(run_data["result-href"], params={"wait-merged": True}) if status != 200: fail("Did not get a result: %s" % (result_data)) # ----------------------------------------------------------------------------- # # If the run succeeded, fetch a plain-text version of the result. # # This fetches the same endpoint as above but doesn't wait for the # merged (finished) result and asks for it in text format. Supported # formats are application/json, text/plain and text/html. Note that # not all tests generate proper text/html. # if not result_data["succeeded"]: fail("Test failed to run properly.") status, result_text = url_get(run_data["result-href"], params={"format": "text/plain"}, json=False) if status != 200: fail("Did not get a result: %s" % (result_text)) if (o): #stdout print print("JSON Result:") print(json_dump(result_data)) if (s): #syslog print syslog.openlog("urlsjson", 0, syslog.LOG_LOCAL3) syslog.syslog(syslog.LOG_DEBUG, run_data["result-href"]) syslog.closelog() if (q): message = json_dump(result_data) #rabbitmq #the result_data is the json object connection = pika.BlockingConnection( pika.URLParameters("amqp://*****:*****@elastic")) channel = connection.channel() channel.basic_publish(exchange="", routing_key="pscheduler_raw", body=message) channel.close() connection.close()
'blacklist download : unable to download file from %s (status_code: %d)' % (uri, req.status_code)) if __name__ == '__main__': # check for a running download process, this may take a while so it's better to check... try: lck = open('/tmp/unbound-download_blacklists.tmp', 'w+') fcntl.flock(lck, fcntl.LOCK_EX | fcntl.LOCK_NB) except IOError: # already running, exit status 99 sys.exit(99) startup_time = time.time() syslog.openlog('unbound', logoption=syslog.LOG_DAEMON, facility=syslog.LOG_LOCAL4) blacklist_items = set() if os.path.exists('/var/unbound/etc/blacklists.ini'): cnf = ConfigParser() cnf.read('/var/unbound/etc/blacklists.ini') # exclude (white) lists, compile to regex to be used to filter blacklist entries if cnf.has_section('exclude'): exclude_list = set() for exclude_item in cnf['exclude']: try: re.compile(cnf['exclude'][exclude_item], re.IGNORECASE) exclude_list.add(cnf['exclude'][exclude_item]) except re.error: syslog.syslog( syslog.LOG_ERR,
def Main(): syslog.openlog("iperf_daemon", syslog.LOG_CONS | syslog.LOG_PID | syslog.LOG_NDELAY, syslog.LOG_LOCAL7) syslog.setlogmask(syslog.LOG_UPTO(syslog.LOG_INFO)) signal.signal(signal.SIGHUP, signal.SIG_IGN) try: pid = os.fork() if pid > 0: sys.exit(0) print('after first fork') except OSError as err: sys.stderr.write('First fork failed !'.format(err)) sys.exit(1) try: pid = os.fork() if pid > 0: sys.exit(0) print('after second fork') except OSError as err: sys.stderr.write('Second fork failed !'.format(err)) sys.exit(1) try: os.chdir("/") os.setsid() os.umask(0) # Any persmission may be set ( read, write, execute) sys.stdout.flush( ) # write everything from stdout buffer on the terminal sys.stderr.flush( ) # write everything from stderr buffer on the terminal si = open(os.devnull, 'r') so = open(os.devnull, 'a+') se = open(os.devnull, 'a+') os.dup2( si.fileno(), sys.stdin.fileno() ) # Duplicates file descriptor si to sys.stdin.fileno(), we are using it bc os.dup2(so.fileno(), sys.stdout.fileno() ) # other modules could acquired reference to it os.dup2(se.fileno(), sys.stderr.fileno()) except Exception as msg: syslog.syslog(syslog.LOG_ERR, msg) pid = str(os.getpid()) syslog.syslog(syslog.LOG_NOTICE, 'daemon started: ' + pid) parser = argparse.ArgumentParser( description= 'IPERF like script, IP address of socket on/to which you want to connect is REQUIRED, also option if you want to run SERVER or CLIENT' ) parser.add_argument( '-i', '--ip', help= 'host/serv ip address, if you write ip address on server, server will expect connection only from this particular IP, if multicast is enabled and we ' ' pass something here, server will bind with mcastgrp addr ! ', nargs='?', type=str, default='0.0.0.0') parser.add_argument('-p', '--port', help='Port number, default port nr. is 8888', type=int, default=8888, nargs='?') parser.add_argument( '-l', '--len', help='Lenght of buffers to read and write, default = 128000', nargs='?', default=128000, type=int) parser.add_argument( '-bs', '--buffsize', help= 'Option which controls amound of data that is transmitted every datagram, default = 8000', nargs='?', default=8000, type=int) parser.add_argument( '-t', '--time', help= 'ONLY SERVER option -> how much time measurment lasts, default = 10s', default=10, nargs='?', type=int) parser.add_argument('-m', '--multicast', help='Turning on multicast ', action='store_true', default=False) parser.add_argument('-ttl', '--timetolive', help='Select the ttl for your packets, default = 20', nargs='?', default=20, type=int) parser.add_argument( '-mp', '--mcastport', help='Port number of multicast, default port nr. is 8000', type=int, default=8000, nargs='?') parser.add_argument( '-mg', '--mcastgrp', help= 'IP addr of multicast , default ipv4 => 224.0.0.1 ipv6 => ff15:7079:7468:6f6e:6465:6d6f:6d63:6173', type=str, default='224.0.0.1', nargs='?') parser.add_argument('-6', '--IPV6', help='Use of IPV6, default one is IPv4 !', action='store_true', default=False) parser.add_argument('-T', '--TCP', help='If you want to use TCP ', action='store_true', default=False) parser.add_argument('-U', '--UDP', help='If you want to use UDP', action='store_true', default=False) parser.add_argument('-n', '--nagle', help='Turning off Nagle algorithm', action='store_false', default=True) args = parser.parse_args() if args.TCP and not args.UDP: ServerTCP(args.port, args.len, args.buffsize, args.nagle, args.ip, args.IPV6) elif args.UDP and not args.TCP: ServerUDP(args.port, args.mcastport, args.mcastgrp, args.len, args.buffsize, args.timetolive, args.ip, args.multicast, args.IPV6) elif args.TCP and args.UDP: print('You should chose either TCP or UDP ! ') else: print('Something went wrong ! ')
def emit(self, record): hand = syslog.openlog(facility=self.facility) msg = self.format(record) syslog.syslog( self.priority_names.get(record.levelname.lower(), "debug"), msg) syslog.closelog()
def log_tvi_msg(msg): syslog.openlog('nagios', syslog.LOG_PID) syslog.syslog(msg)
syslog.syslog(syslog.LOG_INFO, "rsyncupload: rsync reported errors: %s" % stroutput) rsync_message = "rsync executed in %0.2f seconds" t2 = time.time() syslog.syslog(syslog.LOG_INFO, "rsyncupload: " + rsync_message % (t2 - t1)) if __name__ == '__main__': import weewx import configobj weewx.debug = 1 syslog.openlog('rsyncupload', syslog.LOG_PID | syslog.LOG_CONS) syslog.setlogmask(syslog.LOG_UPTO(syslog.LOG_DEBUG)) if len(sys.argv) < 2: print """Usage: rsyncupload.py path-to-configuration-file [path-to-be-rsync'd]""" exit() try: config_dict = configobj.ConfigObj(sys.argv[1], file_error=True) except IOError: print "Unable to open configuration file ", sys.argv[1] raise if len(sys.argv) == 2: try: rsync_dir = os.path.join(config_dict['WEEWX_ROOT'],
#!/usr/bin/env python3 import os import sys import time import syslog def _prefixed(level, message): now = time.strftime('%a, %d %b %Y %H:%M:%S', time.localtime()) return "%s %-8s %-6d %s" % (now, level, os.getpid(), message) syslog.openlog("ExaBGP") # When the parent dies we are seeing continual newlines, so we only access so many before stopping counter = 0 while True: try: line = sys.stdin.readline().strip() if line == "": counter += 1 if counter > 100: break continue counter = 0 syslog.syslog(syslog.LOG_ALERT, _prefixed('INFO', line)) except KeyboardInterrupt:
def __init__(self, loglevel, name): syslog.openlog('[TUNNELKING] %s' % name) self.loglevel = int(loglevel)
def doAuth(pamh): """Starts authentication in a seperate process""" # Abort is Howdy is disabled if config.getboolean("core", "disabled"): return pamh.PAM_AUTHINFO_UNAVAIL # Abort if we're in a remote SSH env if config.getboolean("core", "ignore_ssh"): if "SSH_CONNECTION" in os.environ or "SSH_CLIENT" in os.environ or "SSHD_OPTS" in os.environ: return pamh.PAM_AUTHINFO_UNAVAIL # Abort if lid is closed if config.getboolean("core", "ignore_closed_lid"): if any("closed" in open(f).read() for f in glob.glob("/proc/acpi/button/lid/*/state")): return pamh.PAM_AUTHINFO_UNAVAIL # Set up syslog syslog.openlog("[HOWDY]", 0, syslog.LOG_AUTH) # Alert the user that we are doing face detection if config.getboolean("core", "detection_notice"): pamh.conversation( pamh.Message(pamh.PAM_TEXT_INFO, "Attempting face detection")) syslog.syslog( syslog.LOG_INFO, "Attempting facial authentication for user " + pamh.get_user()) # Run compare as python3 subprocess to circumvent python version and import issues status = subprocess.call([ "/usr/bin/python3", os.path.dirname(os.path.abspath(__file__)) + "/compare.py", pamh.get_user() ]) # Status 10 means we couldn't find any face models if status == 10: if not config.getboolean("core", "suppress_unknown"): pamh.conversation( pamh.Message(pamh.PAM_ERROR_MSG, "No face model known")) syslog.syslog(syslog.LOG_NOTICE, "Failure, no face model known") syslog.closelog() return pamh.PAM_USER_UNKNOWN # Status 11 means we exceded the maximum retry count elif status == 11: pamh.conversation( pamh.Message(pamh.PAM_ERROR_MSG, "Face detection timeout reached")) syslog.syslog(syslog.LOG_INFO, "Failure, timeout reached") syslog.closelog() return pamh.PAM_AUTH_ERR # Status 12 means we aborted elif status == 12: syslog.syslog(syslog.LOG_INFO, "Failure, general abort") syslog.closelog() return pamh.PAM_AUTH_ERR # Status 13 means the image was too dark elif status == 13: syslog.syslog(syslog.LOG_INFO, "Failure, image too dark") syslog.closelog() pamh.conversation( pamh.Message(pamh.PAM_ERROR_MSG, "Face detection image too dark")) return pamh.PAM_AUTH_ERR # Status 0 is a successful exit elif status == 0: # Show the success message if it isn't suppressed if not config.getboolean("core", "no_confirmation"): pamh.conversation( pamh.Message(pamh.PAM_TEXT_INFO, "Identified face as " + pamh.get_user())) syslog.syslog(syslog.LOG_INFO, "Login approved") syslog.closelog() return pamh.PAM_SUCCESS # Otherwise, we can't discribe what happend but it wasn't successful pamh.conversation( pamh.Message(pamh.PAM_ERROR_MSG, "Unknown error: " + str(status))) syslog.syslog(syslog.LOG_INFO, "Failure, unknown error" + str(status)) syslog.closelog() return pamh.PAM_SYSTEM_ERR
while mqtt.connected != True: # Wait for connection time.sleep(0.1) while mqtt.connected == True: # Wait for connection time.sleep(0.1) mqtt.close() syslog.syslog(syslog.LOG_ERR, "reflect-domotizc stopped **") _fh_lock = 0 def run_once(): global _fh_lock fh = open(os.path.realpath(__file__), 'r') try: fcntl.flock(fh, fcntl.LOCK_EX | fcntl.LOCK_NB) except: os._exit(1) if __name__ == '__main__': run_once() syslog.openlog(logoption=syslog.LOG_PID, facility=syslog.LOG_DAEMON) main(sys.argv) try: main(sys.argv) except Exception as e: syslog.syslog(syslog.LOG_ERR, "** exception %s" % (str(e)))
rn = options.reactor + "reactor" getattr(__import__("twisted.internet", fromlist=[rn]), rn).install() if options.logfile: args1 += ["--logfile", options.logfile] if options.pidfile: args1 += ["--pidfile", options.pidfile] if options.syslog: args1 += ["--syslog"] if options.profile: args1 += ["--profile", options.profile] if options.profiler: args1 += ["--profiler", options.profiler] if options.syslog and options.syslog_prefix: import syslog syslog.openlog(options.syslog_prefix) if not options.tracebacks: args2.append("-n") if options.clogfile: args2 += ["--logfile", options.clogfile] if options.sslcertificate and options.sslkey: args2 += [ "--certificate", options.sslcertificate, "--privkey", options.sslkey, "--https", options.port ] if options.sslchain: args2 += ["--certificate-chain", options.sslchain] else: args2 += ["--port", options.port]
netaddr.IPNetwork(prefix): HierDict(defaults, info) for prefix, info in config_dict['prefixes'].items() } for zone in prefixes: from IPy import IP prefixes[zone]['version'] = IP(str(zone.cidr)).version() if not 'domain' in prefixes[zone]: prefixes[zone]['domain'] = IP(str(zone.cidr)).reverseName()[:-1] rtree = radix.Radix() for prefix in prefixes.keys(): node = rtree.add(str(prefix)) node.data['prefix'] = prefix return prefixes, rtree if __name__ == '__main__': syslog.openlog(os.path.basename(sys.argv[0]), syslog.LOG_PID) if len(sys.argv) > 1: config_path = sys.argv[1] if len(sys.argv) > 2: LOGLEVEL = int(sys.argv[2]) else: config_path = CONFIG prefixes, rtree = parse_config(config_path) sys.exit(parse(prefixes, rtree, sys.stdin, sys.stdout))
action='store_true', default=False, help="Import the Email as-is.") parser.add_argument("-e", "--event", default=False, help="Add indicators to this MISP event.") parser.add_argument("-u", "--urlsonly", default=False, action='store_true', help="Extract only URLs.") parser.add_argument('infile', nargs='?', type=argparse.FileType('rb')) args = parser.parse_args() syslog.openlog(logoption=syslog.LOG_PID, facility=syslog.LOG_USER) syslog.syslog("Job started.") os.chdir(Path(__file__).parent) configmodule = Path(__file__).name.replace('.py', '_config') if Path(f'{configmodule}.py').exists(): config = importlib.import_module(configmodule) try: misp_url = config.misp_url misp_key = config.misp_key misp_verifycert = config.misp_verifycert debug = config.debug except Exception as e: syslog.syslog(str(e)) print(