def prepare_logging(options, args, tango_args, start_time=None, log_messages=None): taurus.setLogLevel(taurus.Debug) root = Logger.getRootLog() # output logger configuration log_output_level = options.log_level log_level_map = { "0": taurus.Critical, "critical": taurus.Critical, "1": taurus.Error, "error": taurus.Error, "2": taurus.Warning, "warning": taurus.Warning, "3": taurus.Info, "info": taurus.Info, "4": taurus.Debug, "debug": taurus.Debug, "5": taurus.Trace, "trace": taurus.Trace, } log_output_level = log_level_map[log_output_level] root.handlers[0].setLevel(log_output_level) if not options.without_log_file: log_file_level = options.log_file_level log_file_level = log_level_map[log_file_level] # Create a file handler if options.log_file_name is None: _, ds_name = os.path.split(args[0]) ds_name, _ = os.path.splitext(ds_name) ds_instance = args[-1].lower() import getpass try: # include the user name to avoid permission errors tangodir = 'tango-%s' % getpass.getuser() except: tangodir = 'tango' % getpass.getuser() path = os.path.join(os.sep, "tmp", tangodir, ds_name, ds_instance) log_file_name = os.path.join(path, 'log.txt') else: log_file_name = options.log_file_name path = os.path.dirname(log_file_name) # because some versions of python have a bug in logging.shutdown (this # function is not protected against deleted handlers) we store the # handlers we create to make sure a strong reference exists when the # logging.shutdown is called taurus._handlers = handlers = [] try: if not os.path.exists(path): os.makedirs(path, 0777) from sardana import sardanacustomsettings maxBytes = getattr(sardanacustomsettings, 'LOG_FILES_SIZE', 1E7) backupCount = getattr(sardanacustomsettings, 'LOG_BCK_COUNT', 5) fmt = Logger.getLogFormat() f_h = logging.handlers.RotatingFileHandler(log_file_name, maxBytes=maxBytes, backupCount=backupCount) f_h.setFormatter(fmt) f_h.setLevel(log_file_level) root.addHandler(f_h) handlers.append(f_h) if start_time is not None: taurus.info("Started at %s", start_time) else: taurus.info("Starting up...") taurus.info("Log is being stored in %s", log_file_name) except: if start_time is not None: taurus.info("Started at %s", start_time) else: taurus.info("Starting up...") taurus.warning( "'%s' could not be created. Logs will not be stored", log_file_name) taurus.debug("Error description", exc_info=1) if log_messages is None: log_messages = [] for log_message in log_messages: taurus.info(*log_message) taurus.debug("Start args=%s", args) taurus.debug("Start tango args=%s", tango_args) taurus.debug("Start options=%s", options) taurus.debug("Using PyTango %s from %s", PyTango.Release.version, PyTango.__path__[0]) taurus.debug("Using taurus %s from %s", taurus.Release.version, taurus.__path__[0]) taurus.debug("Using sardana %s from %s", sardana.Release.version, sardana.__path__[0])
def prepare_logstash(args): """Prepare logstash handler based on the configuration stored in the Tango database. :param args: process execution arguments :type args: list<str> .. note:: The prepare_logstash function has been included in Sardana on a provisional basis. Backwards incompatible changes (up to and including its removal) may occur if deemed necessary by the core developers. """ log_messages = [] try: import logstash except ImportError: msg = ("Unable to import logstash. Skipping logstash " + "configuration...", ) log_messages.append(msg, ) return log_messages def get_logstash_conf(dev_name): try: props = db.get_device_property(dev_name, "LogstashHost") host = props["LogstashHost"][0] except IndexError: host = None try: props = db.get_device_property(dev_name, "LogstashPort") port = int(props["LogstashPort"][0]) except IndexError: port = 12345 return host, port db = Database() bin_name = args[0] instance_name = args[1] server_name = bin_name + "/" + instance_name if bin_name in ["Pool", "MacroServer"]: class_name = bin_name dev_name = get_dev_from_class_server(db, class_name, server_name)[0] host, port = get_logstash_conf(dev_name) else: dev_name = get_dev_from_class_server(db, "Pool", server_name)[0] host, port = get_logstash_conf(dev_name) if host is None: dev_name = get_dev_from_class_server(db, "MacroServer", server_name)[0] host, port = get_logstash_conf(dev_name) if host is not None: root = Logger.getRootLog() handler = logstash.TCPLogstashHandler(host, port, version=1) root.addHandler(handler) msg = ("Log is being sent to logstash listening on %s:%d", host, port) log_messages.append(msg) return log_messages
def prepare_logging(options, args, tango_args, start_time=None, log_messages=None): taurus.setLogLevel(taurus.Debug) root = Logger.getRootLog() # output logger configuration log_output_level = options.log_level log_level_map = { "0" : taurus.Critical, "critical" : taurus.Critical, "1" : taurus.Error, "error" : taurus.Error, "2" : taurus.Warning, "warning" : taurus.Warning, "3" : taurus.Info, "info" : taurus.Info, "4" : taurus.Debug, "debug" : taurus.Debug, "5" : taurus.Trace, "trace" : taurus.Trace, } log_output_level = log_level_map[log_output_level] root.handlers[0].setLevel(log_output_level) if not options.without_log_file: log_file_level = options.log_file_level log_file_level = log_level_map[log_file_level] # Create a file handler if options.log_file_name is None: _, ds_name = os.path.split(args[0]) ds_name, _ = os.path.splitext(ds_name) ds_instance = args[-1].lower() import getpass try: tangodir = 'tango-%s' % getpass.getuser() #include the user name to avoid permission errors except: tangodir = 'tango' % getpass.getuser() path = os.path.join(os.sep, "tmp", tangodir, ds_name, ds_instance) log_file_name = os.path.join(path, 'log.txt') else: log_file_name = options.log_file_name path = os.path.dirname(log_file_name) # because some versions of python have a bug in logging.shutdown (this # function is not protected against deleted handlers) we store the # handlers we create to make sure a strong reference exists when the # logging.shutdown is called taurus._handlers = handlers = [] try: if not os.path.exists(path): os.makedirs(path, 0777) from sardana import sardanacustomsettings maxBytes = getattr(sardanacustomsettings, 'LOG_FILES_SIZE', 1E7) backupCount = getattr(sardanacustomsettings, 'LOG_BCK_COUNT', 5) fmt = Logger.getLogFormat() f_h = logging.handlers.RotatingFileHandler(log_file_name, maxBytes=maxBytes, backupCount=backupCount) f_h.setFormatter(fmt) f_h.setLevel(log_file_level) root.addHandler(f_h) handlers.append(f_h) if start_time is not None: taurus.info("Started at %s", start_time) else: taurus.info("Starting up...") taurus.info("Log is being stored in %s", log_file_name) except: if start_time is not None: taurus.info("Started at %s", start_time) else: taurus.info("Starting up...") taurus.warning("'%s' could not be created. Logs will not be stored", log_file_name) taurus.debug("Error description", exc_info=1) if log_messages is None: log_messages = [] for log_message in log_messages: taurus.info(*log_message) taurus.debug("Start args=%s", args) taurus.debug("Start tango args=%s", tango_args) taurus.debug("Start options=%s", options) taurus.debug("Using PyTango %s from %s", PyTango.Release.version, PyTango.__path__[0]) taurus.debug("Using taurus %s from %s", taurus.Release.version, taurus.__path__[0]) taurus.debug("Using sardana %s from %s", sardana.Release.version, sardana.__path__[0])
def prepare_logstash(args): """Prepare logstash handler based on the configuration stored in the Tango database. :param args: process execution arguments :type args: list<str> .. note:: The prepare_logstash function has been included in Sardana on a provisional basis. Backwards incompatible changes (up to and including its removal) may occur if deemed necessary by the core developers. """ log_messages = [] try: from logstash_async.handler import AsynchronousLogstashHandler except ImportError: msg = ("Unable to import logstash_async. Skipping logstash " + "configuration...", ) log_messages.append(msg, ) return log_messages def get_logstash_conf(dev_name): try: props = db.get_device_property(dev_name, "LogstashHost") host = props["LogstashHost"][0] except IndexError: host = None try: props = db.get_device_property(dev_name, "LogstashPort") port = int(props["LogstashPort"][0]) except IndexError: port = None try: props = db.get_device_property(dev_name, "LogstashCacheDbPath") cache_db_path = props["LogstashCacheDbPath"][0] except IndexError: cache_db_path = None return host, port, cache_db_path db = Database() bin_name = args[0] try: instance_name = args[1] except IndexError: msg = ("Unknown %s instance name. " % bin_name + "Skipping logstash configuration...") log_messages.append(msg, ) return log_messages server_name = bin_name + "/" + instance_name if bin_name in ["Pool", "MacroServer"]: class_name = bin_name dev_name = get_dev_from_class_server(db, class_name, server_name)[0] host, port, cache = get_logstash_conf(dev_name) else: dev_name = get_dev_from_class_server(db, "Pool", server_name)[0] host, port, cache = get_logstash_conf(dev_name) if host is None: dev_name = get_dev_from_class_server(db, "MacroServer", server_name)[0] host, port, cache = get_logstash_conf(dev_name) if host is not None: root = Logger.getRootLog() handler = AsynchronousLogstashHandler(host, port, database_path=cache) # don't use full path for program_name handler._create_formatter_if_necessary() _, handler.formatter._program_name = os.path.split( handler.formatter._program_name) root.addHandler(handler) msg = ("Log is being sent to logstash listening on %s:%d", host, port) log_messages.append(msg) return log_messages
def prepare_logstash(args): """Prepare logstash handler based on the configuration stored in the Tango database. :param args: process execution arguments :type args: list<str> .. note:: The prepare_logstash function has been included in Sardana on a provisional basis. Backwards incompatible changes (up to and including its removal) may occur if deemed necessary by the core developers. """ log_messages = [] try: from logstash_async.handler import AsynchronousLogstashHandler except ImportError: msg = ("Unable to import logstash_async. Skipping logstash " + "configuration...", ) log_messages.append(msg,) return log_messages def get_logstash_conf(dev_name): try: props = db.get_device_property(dev_name, "LogstashHost") host = props["LogstashHost"][0] except IndexError: host = None try: props = db.get_device_property(dev_name, "LogstashPort") port = int(props["LogstashPort"][0]) except IndexError: port = None try: props = db.get_device_property(dev_name, "LogstashCacheDbPath") cache_db_path = props["LogstashCacheDbPath"][0] except IndexError: cache_db_path = None return host, port, cache_db_path db = Database() bin_name = args[0] try: instance_name = args[1] except IndexError: msg = ("Unknown %s instance name. " % bin_name + "Skipping logstash configuration...") log_messages.append(msg, ) return log_messages server_name = bin_name + "/" + instance_name if bin_name in ["Pool", "MacroServer"]: class_name = bin_name dev_name = get_dev_from_class_server(db, class_name, server_name)[0] host, port, cache = get_logstash_conf(dev_name) else: dev_name = get_dev_from_class_server(db, "Pool", server_name)[0] host, port, cache = get_logstash_conf(dev_name) if host is None: dev_name = get_dev_from_class_server(db, "MacroServer", server_name)[0] host, port, cache = get_logstash_conf(dev_name) if host is not None: root = Logger.getRootLog() handler = AsynchronousLogstashHandler(host, port, database_path=cache) # don't use full path for program_name handler._create_formatter_if_necessary() _, handler.formatter._program_name = os.path.split(handler.formatter._program_name) root.addHandler(handler) msg = ("Log is being sent to logstash listening on %s:%d", host, port) log_messages.append(msg) return log_messages