Пример #1
0
def Logging(console, syslog):
    import logging,  os
    # set up logging to file with logging format
    logging.basicConfig(level=logging.DEBUG,
                        format='%(asctime)s %(name)s: %(levelname)-12s %(message)s',
                        datefmt='%d-%m-%Y %H:%M:%S',
                        filename=logf,
                        filemode='a'
                        )

    procname = ' * ' + os.path.basename(sys.argv[0])
    logger = logging.getLogger(procname)

    if console:
        console = logging.StreamHandler()
        console.setLevel(logging.INFO)
        formatter = logging.Formatter('%(name)s: [%(levelname)s] %(message)s')
        console.setFormatter(formatter)
        logger.addHandler(console)

    if syslog:
        from logging.handlers import SysLogHandler

        syslog = SysLogHandler(address='/dev/log')
        syslog.setLevel(logging.INFO)
        formatter = logging.Formatter('%(name)s: %(levelname)-12s %(message)s')
        syslog.setFormatter(formatter)
        logger.addHandler(syslog)

    return logger
Пример #2
0
def _syslog_handler(facility):
        handler = SysLogHandler(_syslog_address(), facility)
        # The default SysLogHandler appends a zero-terminator,
        # which vmsyslogd does not consume and puts in the log file.
        handler.log_format_string = "<%d>%s"
        formatter = logging.Formatter(SYSLOG_FORMAT)
        return handler, formatter
Пример #3
0
 def init_app(cls, app):
     ProductionConfig.init_app(app)
     import logging
     from logging.handlers import SysLogHandler
     syslog_handler = SysLogHandler()
     syslog_handler.setLevel(logging.WARNING)
     app.logger.addHandler(syslog_handler)
Пример #4
0
def init(LOG_FILE=False, LOG_CONSOLE=False, LOG_SYSLOG=False):

    # remove all root handlers
    for handler in logging.root.handlers:
        logging.root.removeHandler(handler)

        # Log to file
    if LOG_FILE:
        logging.basicConfig(
            filename=LOG_FILE,
            level=logging.INFO,
            format="%(asctime)-15s %(levelname)s:%(filename)s:%(lineno)d -- %(message)s",
        )

        # Log to console
    if LOG_CONSOLE:
        console = logging.StreamHandler()
        console.setLevel(logging.DEBUG)
        console.setFormatter(logging.Formatter("%(levelname)s:%(filename)s:%(lineno)d -- %(message)s"))
        logging.getLogger().addHandler(console)

        # Log to syslog
    if LOG_SYSLOG:
        from logging.handlers import SysLogHandler

        syslog = SysLogHandler(address="/dev/log")
        syslog.setFormatter(logging.Formatter("%(asctime)-15s %(levelname)s:%(filename)s:%(lineno)d -- %(message)s"))
        logging.getLogger().addHandler(syslog)
Пример #5
0
def get_instance(plugin):
    name = plugin.get_name()
    logger.info("Get a Syslog broker for plugin %s" % (name))

    # syslog.syslog priority defaults to (LOG_INFO | LOG_USER)
    facility = syslog.LOG_USER
    priority = syslog.LOG_INFO

    # Get configuration values, if any
    if hasattr(plugin, 'facility'):
        facility = plugin.facility
    if hasattr(plugin, 'priority'):
        priority = plugin.priority

    # Ensure config values have a string type compatible with
    # SysLogHandler.encodePriority
    if type(facility) in types.StringTypes:
        facility = types.StringType(facility)
    if type(priority) in types.StringTypes:
        priority = types.StringType(priority)

    # Convert facility / priority (integers or strings) to aggregated
    # priority value
    sh = SysLogHandler()
    try:
        priority = sh.encodePriority(facility, priority)
    except TypeError, e:
        logger.error("[%s] Couldn't get syslog priority, "
                     "reverting to defaults" % (name))
Пример #6
0
 def __init__(self, session=None, port=None, inputname='', input=None,
              announce=False, authorize=True, **kwargs):
     #TODO: avoid duplication with __init__ above
     if inputname:
         # raise if no session
         input = session.get_input_by_name(inputname)
     if input:
         self.inputobj = input
         try:
             port = input.port
             self.inputname = input.name
         except AttributeError:
             raise ValueError("This doesn't look like a syslog input")
         if authorize:
             if port == 514:
                 # raise if no session
                 session._api_help('api/inputs/%s/add514' % input.id)
             else:
                 session._api_help('api/inputs/%s/adddevice' % input.id,
                                  method='POST')
         if ('tcp' in input.service['name'] and sys.version_info >= (2, 7)
                 and not 'socktype' in kwargs):
             kwargs['socktype'] = socket.SOCK_STREAM
     self.port = port
     session = session or LogglySession
     SysLogHandler.__init__(self, address=(session.proxy, port),
                            **kwargs)
Пример #7
0
def setupLogging(agentConfig):
    """Configure logging to use syslog whenever possible.
    Also controls debug_mode."""
    if agentConfig['debug_mode']:
        logFile = "/tmp/dd-agent.log"
        logging.basicConfig(filename=logFile, filemode='w', level=logging.DEBUG, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
        logging.info("Logging to %s" % logFile)
    else:
        try:
            from logging.handlers import SysLogHandler
            rootLog = logging.getLogger()
            rootLog.setLevel(logging.INFO)

            sys_log_addr = "/dev/log"

            # Special-case macs
            if sys.platform == 'darwin':
                sys_log_addr = "/var/run/syslog"

            handler = SysLogHandler(address=sys_log_addr, facility=SysLogHandler.LOG_DAEMON)
            formatter = logging.Formatter("dd-agent - %(name)s - %(levelname)s - %(message)s")
            handler.setFormatter(formatter)
            rootLog.addHandler(handler)
            logging.info('Logging to syslog is set up')
        except Exception,e:
            sys.stderr.write("Error while setting up syslog logging (%s). No logging available" % str(e))
            logging.disable(logging.ERROR)
Пример #8
0
def setup_logging(app, logfile, debug=False):
    cmdslog = TimedRotatingFileHandler(logfile,
                                       when='D',
                                       interval=1,
                                       backupCount=7)
    if debug:
        env = logging.DEBUG
        cmdslog.setFormatter(logging.Formatter(
            "%(name)s: [%(levelname)s] %(filename)s:%(lineno)d - %(message)s"))
    else:
        env = logging.INFO
        cmdslog.setFormatter(logging.Formatter(
            "%(name)s: [%(levelname)s] %(message)s"))

    cmdslog.setLevel(env)

    logger = logging.getLogger(app)
    logger.setLevel(env)
    logger.addHandler(cmdslog)
    if os.path.exists('/dev/log'):
        st_mode = os.stat('/dev/log').st_mode
        if stat.S_ISSOCK(st_mode):
            syslog_h = SysLogHandler(address='/dev/log')
            syslog_h.set_name(app)
            logger.addHandler(syslog_h)

    return _log(app, logger)
Пример #9
0
    def __custom(hostname='localhost', udp_port=514, name=hex(hash(random.random())), level=logging.INFO,
                 formatter=None):
        """
        Create a custom syslog.
        Private, called by Logger.__init__
        :param hostname: IP or DNS name for syslog host
        :type hostname: str
        :param udp_port: syslog listening port - UDP only, no TCP
        :type udp_port: int
        :param name: Unique name for logger
        :type name: str
        :param formatter: Format logging better, default is no bueno
        :type formatter: logging.Formatter
        :param level: minimum event level to log
        :type level: logging.level (logging.DEBUG for verbose, import logging)
        :return: custom
        :rtype: logging.getLogger
        """
        if formatter is None:
            formatter = logging.Formatter(
                '%(asctime)s.%(msecs)03d PID%(process)d:%(levelname)-8s:%(filename)s:%(funcName)-15s:%(message)s',
                '%Y-%m-%dT%H:%M:%S')

        # Create remote sylog destination - incl. for system logging
        custom_handler = SysLogHandler(address=(hostname, udp_port))
        custom_handler.setFormatter(formatter)
        # Create a new instance named custom - separate from root and customize
        custom = logging.getLogger(name)
        # Modify STDOUT to use both syslog and console_handler logging
        custom.addHandler(custom_handler)
        # Explicitly set minimum logging level INFO
        custom.setLevel(level)
        return custom
Пример #10
0
    def __init__(self):
        """
        Constructor for Utilities class
        """

        # Initiate Config Parser instance
        self.parser = config_parser()

        # Read config file inside
        self.parser.read(CONFIG_FILE)

        # Set Config object by config parser sections
        CONFIG.update(self.parser._sections)

        # Set global NOW value to be used in any where in application
        self.now = strftime(self.config_get("locale", "datetime_format"))

        # Set log destination
        # Convert Json string into DotDict/OrderedDict object
        log_destination = json.loads(unicode(self.config_get("logging", "destination")))

        # Initiate Syslog handler with log destination regarding to the system architecture
        syslog = SysLogHandler(address=log_destination[sys.platform])

        # Set syslog format
        syslog.setFormatter(
            logging.Formatter(self.config_get("logging", "format"))
        )
        self.logger.addHandler(syslog)
Пример #11
0
 def emit(self, record):
     original_msg = record.msg
     # this is needed to properly show the "initial-setup" prefix
     # for log messages in syslog/Journal
     record.msg = '%s: %s' % (self.tag, original_msg)
     SysLogHandler.emit(self, record)
     record.msg = original_msg
Пример #12
0
    def run(self):
        """ Configures logging and runs the command. Overridden from the super-class.
        """
        log_file_config = self.config_mod.log_file_config

        if log_file_config:
            logging.config.fileConfig(log_file_config)
        else:
            syslog_facility = self.config_mod.syslog_facility
            log_level = self.config_mod.log_level
            syslog_address = self.config_mod.syslog_address

            log_level = logging.getLevelName(log_level)

            handler = SysLogHandler(syslog_address, syslog_facility)
            handler.setFormatter(LoggingFormatter())

            logger = logging.getLogger('')
            logger.addHandler(handler)
            logger.setLevel(log_level)

        object_name = 'https_proxy_class' if self.is_https else 'http_proxy_class'

        proxy_class = self.app_ctx.get_object(object_name)
        proxy_class(self.config_mod, self.app_ctx).serve_forever()
Пример #13
0
def start_logger(logger_name, log_level, container_id):
    """

    Initialize logging of this process and set logger format

    :param logger_name: The name to report with
    :param log_level: The verbosity level. This should be selected
    :param container_id: container id
    """
    logging.raiseExceptions = False
    log_level = log_level.upper()

    # NOTE(takashi): currently logging.WARNING is defined as the same value
    #                as logging.WARN, so we can properly handle WARNING here
    try:
        level = getattr(logging, log_level)
    except AttributeError:
        level = logging.ERROR

    logger = logging.getLogger("CONT #" + container_id + ": " + logger_name)

    if log_level == 'OFF':
        logging.disable(logging.CRITICAL)
    else:
        logger.setLevel(level)

    log_handler = SysLogHandler('/dev/log')
    str_format = '%(name)-12s: %(levelname)-8s %(funcName)s' + \
                 ' %(lineno)s [%(process)d, %(threadName)s]' + \
                 ' %(message)s'
    formatter = logging.Formatter(str_format)
    log_handler.setFormatter(formatter)
    log_handler.setLevel(level)
    logger.addHandler(log_handler)
    return logger
Пример #14
0
def patch_settings():
    """
    Patches default project settings.
    Don't know if it's the best way to do it... but it works...
    I do that because I don't want to force users to manually edit lots of settings, except 'skissh' in INSTALLED_APPS.
    """
    settings.LOGIN_URL = "/skwissh/login"

    settings.LANGUAGES = (
        ('fr', _(u'Français')),
        ('en', _(u'Anglais')),
    )

    settings.CACHES = {
        'default': {
            'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
            'LOCATION': 'skwissh-cache'
        }
    }

    settings.MIDDLEWARE_CLASSES += (
        'django.middleware.locale.LocaleMiddleware',
        'django.middleware.clickjacking.XFrameOptionsMiddleware',
        'django.middleware.gzip.GZipMiddleware',
        'django.middleware.http.ConditionalGetMiddleware',
    )

    logger = logging.getLogger('skwissh')
    logger.setLevel(logging.DEBUG)
    syslog = SysLogHandler(address='/dev/log')
    formatter = logging.Formatter('%(name)s: %(levelname)s %(message)s')
    syslog.setFormatter(formatter)
    logger.addHandler(syslog)
Пример #15
0
    def add_syslog_handler(address):
        """Send events to a remote syslog."""
        from logging.handlers import SysLogHandler

        handler = SysLogHandler(address)
        handler.setFormatter(logging.Formatter(Logger.SYSLOG_FORMAT))
        Logger.logger.addHandler(handler)
Пример #16
0
    def remote(secondary=False):
        settings = configs["cirta"]["settings"]

        facilityCode = getattr(SysLogHandler, "LOG_%s" % settings["SYSLOG_FACILITY"].upper())
        if settings["SYSLOG_PROTOCOL"].lower() == "tcp":
            sock = socket.SOCK_STREAM
        elif settings["SYSLOG_PROTOCOL"].lower() == "udp":
            sock = socket.SOCK_DGRAM
        else:
            log.error("Unsupported syslog protocol configuration: %s" % settings["SYSLOG_PROTOCOL"])
            log.debug('msg="Usupported syslog protocol configuration" protocol="%s"' % settings["SYSLOG_PROTOCOL"])
            exit()

        try:
            if secondary:
                sysHandler = SysLogHandler(
                    address=(settings["SYSLOG_SECONDARY_SERVER"], int(settings["SYSLOG_PORT"])),
                    facility=facilityCode,
                    socktype=socket.SOCK_STREAM,
                )
            else:
                sysHandler = SysLogHandler(
                    address=(settings["SYSLOG_SERVER"], int(settings["SYSLOG_PORT"])),
                    facility=facilityCode,
                    socktype=socket.SOCK_STREAM,
                )
            sysHandler.addFilter(MultilineFilter())

            return sysHandler
        except:
            return None
Пример #17
0
def main():
    """Prepares the ephemeral devices"""
    logging.basicConfig(format="%(asctime)s - %(message)s", level=logging.INFO)
    log.setLevel(logging.INFO)
    syslog = SysLogHandler(address='/dev/log')
    formatter = logging.Formatter(
        'manage_instance_storage.py: %(name)s: %(levelname)s %(message)s')
    syslog.setFormatter(formatter)
    log.addHandler(syslog)

    devices = get_ephemeral_devices()
    if not devices:
        # no ephemeral devices, nothing to do, quit
        log.info('no ephemeral devices found')
        return
    lvm_devices = [d for d in devices if not needs_pvcreate(d)]
    if lvm_devices:
        maybe_fix_lvm_devices(lvm_devices)
    devices = [d for d in devices if needs_pvcreate(d)]
    if not devices:
        log.info('Ephemeral devices already in LVM')
        return

    root_vg = query_vg_name()
    root_lv = query_lv_path()
    for dev in devices:
        pvcreate(dev)
        run_cmd(["vgextend", root_vg, dev])

    run_cmd(["lvextend", "-l", "100%VG", root_lv])
    run_cmd(["resize2fs", root_lv])
Пример #18
0
def get_logger(name,
               level=logging.INFO,
               verbose=False,
               debug=False,
               syslog=False):
    global syslogh

    log = logging.getLogger(name)

    if verbose or debug:
        log.setLevel(level if not debug else logging.DEBUG)

        channel = logging.StreamHandler(sys.stdout if debug else sys.stderr)
        channel.setFormatter(logging.Formatter('%(asctime)s - '
                                               '%(levelname)s - %(message)s'))
        channel.setLevel(level if not debug else logging.DEBUG)
        log.addHandler(channel)

    if syslog:
        log.setLevel(level)
        syslogh = SysLogHandler(address='/dev/log')

        syslogh.setFormatter(logging.Formatter('%(message)s'))
        syslogh.setLevel(logging.INFO)
        log.addHandler(syslogh)

    return log
Пример #19
0
    def __init__(self,logfile='', scrlog=True, syslog='1', loglevel=logging.INFO):
        self.logfile=logfile
        self.scrlog=scrlog
        self.syslog=syslog
        self.loglevel=loglevel
        self.format=logging.Formatter(fmt='%(asctime)s %(levelname)s: %(message)s',datefmt=None)
        self.syslog_format=logging.Formatter(fmt='NSCAweb: %(message)s',datefmt=None)
        self.log = logging.getLogger(__name__)
        self.log.setLevel(self.loglevel)
        if logfile != '':
            self.file_handler = logging.FileHandler( logfile )
            self.file_handler.setFormatter(self.format)
            self.log.addHandler(self.file_handler)
        if scrlog == True:
            self.scr_handler = logging.StreamHandler()
            self.scr_handler.setFormatter(self.format)
            self.log.addHandler(self.scr_handler)
        if syslog == '1':
            from logging.handlers import SysLogHandler
            if os.path.exists('/var/run/syslog'):
                self.sys_handler = SysLogHandler(address='/var/run/syslog')
            else:
                self.sys_handler = SysLogHandler(address='/dev/log')

            self.sys_handler.setFormatter(self.syslog_format)
            self.log.addHandler(self.sys_handler)
Пример #20
0
 def __setupSysLogOutput(self):
     """Sets up the handler for to the local syslog daemon."""
     syslogHandler = SysLogHandler("/dev/log", SysLogHandler.LOG_DAEMON)
     syslogHandler.setLevel(logging.INFO)
     formatter = logging.Formatter('vNode: %(levelname)s %(funcName)s: %(message)s')
     syslogHandler.setFormatter(formatter)
     logging.getLogger('').addHandler(syslogHandler)
Пример #21
0
def setup_logging():
    global log

    progname = basename(argv[0])
    log = getLogger()
    log.setLevel(DEBUG)

    handlers = []
    buildlog_handler = FileHandler(getenv("HOME") + "/build.log")
    buildlog_handler.setFormatter(
        Log8601Formatter("%(asctime)s " + progname + " %(levelname)s " +
                         "%(filename)s:%(lineno)s: %(message)s"))
    handlers.append(buildlog_handler)

    stderr_handler = StreamHandler(stderr)
    stderr_handler.setFormatter(
        Log8601Formatter("%(asctime)s %(name)s %(levelname)s " +
                         "%(filename)s:%(lineno)s: %(message)s"))
    handlers.append(stderr_handler)
    
    if exists("/dev/log"):
        syslog_handler = SysLogHandler(
            address="/dev/log", facility=LOG_LOCAL1)
        syslog_handler.setFormatter(
            Log8601Formatter(progname +
                             " %(asctime)s %(levelname)s: %(message)s"))
        handlers.append(syslog_handler)


    log.addHandler(MultiHandler(handlers))

    getLogger("boto").setLevel(INFO)
    getLogger("boto3").setLevel(INFO)
    getLogger("botocore").setLevel(INFO)
    return
Пример #22
0
def main():
    """
    Main application loop.
    """

    env = os.environ

    try:
        host = env['SYSLOG_SERVER']
        port = int(env['SYSLOG_PORT'])
        socktype = socket.SOCK_DGRAM if env['SYSLOG_PROTO'] == 'udp' \
            else socket.SOCK_STREAM
    except KeyError:
        sys.exit("SYSLOG_SERVER, SYSLOG_PORT and SYSLOG_PROTO are required.")

    handler = SysLogHandler(
        address=(host, port),
        socktype=socktype,
    )
    handler.setFormatter(PalletFormatter())

    for event_headers, event_data in supervisor_events(sys.stdin, sys.stdout):
        event = logging.LogRecord(
            name=event_headers['processname'],
            level=logging.INFO,
            pathname=None,
            lineno=0,
            msg=event_data,
            args=(),
            exc_info=None,
        )
        event.process = int(event_headers['pid'])
        handler.handle(event)
Пример #23
0
def bootstrap_logger(name):
    l = logging.getLogger(name)
    l.setLevel(logging.DEBUG)

    #Catchall for all uncaught Exceptions
    def handleUncaughtException(excType, excValue, traceback):
        l.error("Uncaught exception", exc_info=(excType, excValue, traceback))
    sys.excepthook = handleUncaughtException

    prefix = "{0}[{1}] ".format(name, str(getpid()))

    #Console handler
    ch = logging.StreamHandler()
    ch.setLevel(logging.DEBUG)
    ch.setFormatter(logging.Formatter(prefix + LOG_FORMAT))
    l.addHandler(ch)

    #Syslog handler
    sh = SysLogHandler(address='/dev/log')
    sh.setLevel(logging.DEBUG)
    sh.setFormatter(logging.Formatter(prefix + LOG_FORMAT))
    l.addHandler(sh)

    l.debug("Starting {0} script.".format(name))
    return l
Пример #24
0
def syslogs_sender():
    # Initalize SysLogHandler
    logger = logging.getLogger()
    logger.setLevel(logging.INFO)
    syslog = SysLogHandler(address=(args.host, args.port))
    logger.addHandler(syslog)

    for message in range(1, args.count+1):
        # Randomize some fields
        time_output = time.strftime("%b %d %H:%M:%S")
        random_host = random.choice(range(1,11))
        random_tag = random.choice(tag)
        random_level = random.choice(syslog_level)
        fqdn = "{0}{1}{2}".format(hostname, random_host, domain_name)
        random_pid = random.choice(range(500,9999))

        message = open_sample_log(args.file)
        fields = {'host_field': fqdn, 'date_field': time_output,\
                'tag_field': random_tag}
	
        format = logging.Formatter\
                ('%(date_field)s %(host_field)s {0}[{1}]: %(message)s'\
                .format(random_tag, random_pid))
        syslog.setFormatter(format)
			
        print("[+] Sent: {0}: {1}".format(time_output, message), end='')

        getattr(logger, random_level)(message, extra=fields)

    logger.removeHandler(syslog)
    syslog.close()
Пример #25
0
def configure_syslog(request=None, logger=None, exceptions=False):
    """
    Configure syslog logging channel.
    It is turned on by setting `syslog_host` in the config file.
    The port default to 514 can be overridden by setting `syslog_port`.

    :param request: tornado.httputil.HTTPServerRequest instance
    :param exceptions: boolean - This indicates if we should raise
        exceptions encountered in the logging system.
    """
    syslog_host = getattr(options, 'syslog_host', None)
    if not syslog_host:
        return

    sys.modules["logging"].raiseExceptions = exceptions
    handler = SysLogHandler(address=(syslog_host, options.syslog_port))
    formatter = log_formatter(request)
    handler.setFormatter(formatter)

    if request:
        handler.addFilter(RequestFilter(request))

    if logger:
        logger.addHandler(handler)
    else:
        logging.getLogger().addHandler(handler)
Пример #26
0
 def addSysLogHandler (self, logger, host, port=SYSLOG_UDP_PORT,
                       minLevel=DEFAULT_LEVEL):
     fmt = logging.Formatter("%(levelname)-8s %(message)s")
     syslogHandler = SysLogHandler((host, port))
     syslogHandler.setLevel(minLevel)
     syslogHandler.setFormatter(fmt)
     logger.addHandler(syslogHandler)
Пример #27
0
def create_log(tofile=False, file_level=1, file_name=None, toconsole=False, console_level=1, tosyslog=False, syslog_level=1, syslog_address='/dev/log'):
    levels = [logging.ERROR, logging.WARNING, logging.INFO, logging.DEBUG]

    logger = logging.getLogger(prg_name())
    logger.setLevel(logging.DEBUG)

    fmt_general = logging.Formatter('%(asctime)s - %(name)s - %(thread)d - %(threadName)s - %(levelname)s - %(message)s')
    fmt_syslog = logging.Formatter('%(name)s: %(threadName)s; %(levelname)s; %(message)s')

    # logs to a file
    if tofile:
        if os.path.isdir(os.path.dirname(file_name)):
            fh = logging.FileHandler(file_name)
            fh.setLevel(levels[file_level])
            fh.setFormatter(fmt_general)
            logger.addHandler(fh)
        else:
            sys.stderr.write("\nLog file directory '%s' not found.\nCan't continue. Quitting...\n\n" % (os.path.dirname(file_name)))
            quit()

    # logs to the console
    if toconsole:
        ch = logging.StreamHandler()
        ch.setLevel(levels[console_level])
        ch.setFormatter(fmt_general)
        logger.addHandler(ch)

    # logs to syslog
    if tosyslog:
        sh = SysLogHandler(address=syslog_address)
        sh.setLevel(levels[syslog_level])
        sh.setFormatter(fmt_syslog)
        logger.addHandler(sh)

    return logger
Пример #28
0
 def emit(self, record):
     """
     Emit the specified log record.
     Provides the following:
     - Replace newlines with spaces per syslog RFCs.
     - Emit stack traces in following log records.
     :param record: A log record.
     :type record: LogRecord
     """
     records = [record]
     message = record.getMessage()
     record.msg = LogHandler.clean(message)
     record.args = tuple()
     if record.exc_info:
         msg = self.formatter.formatException(record.exc_info)
         for line in msg.split('\n'):
             _record = LogRecord(
                 name=record.name,
                 level=record.levelno,
                 pathname=record.pathname,
                 lineno=record.lineno,
                 msg=line,
                 args=tuple(),
                 exc_info=None)
             records.append(_record)
         record.exc_info = None
     for r in records:
         SysLogHandler.emit(self, r)
Пример #29
0
def get_logger(name):
    """
    Set up loggers for this class. There are two loggers in use. StreamLogger prints information on the screen with
    the default level ERROR (INFO if the verbose flag is set). FileLogger logs INFO entries to the report.log file.
    report.log is never purged, but information from new runs is appended to the end of the file.
    :return:
    """
    def _exception_hook(excType, excValue, traceback, logger):
        logger.error("", exc_info=(excType, excValue, traceback))

    logger = logging.getLogger(name)
    sys.excepthook = _exception_hook
    formatter = logging.Formatter('%(asctime)s - %(message)s')

    if is_console:
        stream_logger = logging.StreamHandler(sys.stdout)
        stream_logger.setLevel(logging_level)
        logger.addHandler(stream_logger)
    else:
        syslog_logger = SysLogHandler()
        syslog_logger.setLevel(logging_level)
        syslog_logger.setFormatter(formatter)
        logger.addHandler(syslog_logger)

    if is_filelog:
        file_logger = logging.FileHandler("log.txt") # os.path.join(get_config_dir(),
        file_logger.setLevel(logging_level)
        file_logger.setFormatter(formatter)
        logger.addHandler(file_logger)

    logger.level = logging_level

    return logger
Пример #30
0
 def _createLogger(self):
     self.watchedfiles = []
     logger = logging.getLogger(self.name)
     logger.setLevel(logging.NOTSET)
     logger.propagate = False
     # Create log formatter.
     format_dict = {'version': '1',
                    'timestamp': '%(asctime)s',
                    'hostname': config['hostname'],
                    'appname': self.files['tag'],
                    'procid': '-',
                    'msgid': '-',
                    'structured_data': '-',
                    'msg': '%(message)s'
                    }
     log_format = rfc5424_format.format(**format_dict)
     formatter = logging.Formatter(log_format, date_format)
     # Add log handler for each server.
     for server in self.servers:
         port = 'port' in server and server['port'] or 514
         syslog = SysLogHandler((server["host"], port))
         syslog.setFormatter(formatter)
         logger.addHandler(syslog)
     self.logger = logger
     # Create WatchedFile objects from list of files.
     for name in self.files['files']:
         self.watchedfiles.append(WatchedFile(name))
Пример #31
0
# modes
# r - read mode - reading a file.
# w - write mode - write to a file. if file doesnot exist it should create it.
# if it exist truncates it to zero.
# a - append mode - appends contents to the file.

# Advanced Logging.

# create logger
# one of the challenges in basic config, where we cannot set up a logger or logger is root by default.
logger = logging.getLogger('disk Monitor')  # loggger
logger.setLevel(logging.DEBUG)  # Filter for your logger

# create console handler and set level to debug
# https://docs.python.org/2/howto/logging.html#useful-handlers
ch = SysLogHandler(address="/dev/log")  # handler - StreamHandler.
ch.setLevel(logging.DEBUG)  # filter for the handler.

# create formatter
# logging.Formatter?
formatter = logging.Formatter(' - %(name)s - %(levelname)s - %(message)s')

# add formatter to ch
ch.setFormatter(formatter)  # handler and formatter

# add ch to logger
logger.addHandler(ch)  # logger and handlers

# lets make this more automated. I want to find out the disk / percentage.
# df -h /|tail -n 1|awk '{print $5}'|sed -e 's#%##g'
# https://docs.python.org/2/library/subprocess.html
Пример #32
0
def _prepare_logging(daemon, level=logging.INFO):
    syslog_handler = SysLogHandler(address='/dev/log')
    handlers = (syslog_handler,) if daemon else \
        (logging.StreamHandler(), syslog_handler,)
    logging.basicConfig(level=level, handlers=handlers)
Пример #33
0
 def mapPriority(self, level):
     """Map the priority level to a syslog level """
     return self.levelMap.get(level, SysLogHandler.mapPriority(self, level))
Пример #34
0
 def emit(self, record):
     original_msg = record.msg
     record.msg = '%s: %s' %(self.tag, original_msg)
     SysLogHandler.emit(self, record)
     record.msg = original_msg
Пример #35
0
 def __init__(self,
              address=('localhost', SYSLOG_UDP_PORT),
              facility=SysLogHandler.LOG_USER,
              tag=''):
     self.tag = tag
     SysLogHandler.__init__(self, address, facility)
Пример #36
0
    mail_handler = SMTPHandler(
        app.config['SMTP_SERVER'], app.config['MAIL_FROM'],
        app.config['ADMINS'],
        "[{}] Website error".format(app.config['STATION_NAME']))
    mail_handler.setFormatter(
        logging.Formatter('''
Message type:       %(levelname)s
Time:               %(asctime)s

%(message)s
'''))
    mail_handler.setLevel(logging.ERROR)
    app.logger.addHandler(mail_handler)

    if 'SYSLOG_ADDRESS' in app.config:
        syslog_handler = SysLogHandler(address=app.config['SYSLOG_ADDRESS'])
        syslog_handler.setLevel(logging.WARNING)
        app.logger.addHandler(syslog_handler)


def init_app():
    from wuvt import admin
    app.register_blueprint(admin.bp, url_prefix='/admin')

    app.register_blueprint(auth_manager.bp, url_prefix='/auth')

    from wuvt import blog
    app.register_blueprint(blog.bp)

    if app.config['DONATE_ENABLE']:
        from wuvt import donate
Пример #37
0
]

# Custom exception reporter to include some details
DEFAULT_EXCEPTION_REPORTER_FILTER = "weblate.trans.debug.WeblateExceptionReporterFilter"

# Default logging of Weblate messages
# - to syslog in production (if available)
# - otherwise to console
# - you can also choose "logfile" to log into separate file
#   after configuring it below

# Detect if we can connect to syslog
HAVE_SYSLOG = False
if platform.system() != "Windows":
    try:
        handler = SysLogHandler(address="/dev/log", facility=SysLogHandler.LOG_LOCAL2)
        handler.close()
        HAVE_SYSLOG = True
    except IOError:
        HAVE_SYSLOG = False

if DEBUG or not HAVE_SYSLOG:
    DEFAULT_LOG = "console"
else:
    DEFAULT_LOG = "syslog"
DEFAULT_LOGLEVEL = "DEBUG" if DEBUG else "INFO"

# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/stable/topics/logging for
Пример #38
0
def main():
    if options.output=='syslog':
        logger.addHandler(SysLogHandler(address=(options.sysloghostname,options.syslogport)))
    else:
        sh=logging.StreamHandler(sys.stderr)
        sh.setFormatter(formatter)
        logger.addHandler(sh)

    logger.debug('started')
    #logger.debug(options)
    try:
        es = ElasticsearchClient((list('{0}'.format(s) for s in options.esservers)))
        s = requests.Session()
        s.headers.update({'Accept': 'application/json'})
        s.headers.update({'Content-type': 'application/json'})
        s.headers.update({'Authorization':'SSWS {0}'.format(options.apikey)})

        #capture the time we start running so next time we catch any events created while we run.
        state = State(options.state_file)
        lastrun = toUTC(datetime.now()).isoformat()
        #in case we don't archive files..only look at today and yesterday's files.
        yesterday=date.strftime(datetime.utcnow()-timedelta(days=1),'%Y/%m/%d')
        today = date.strftime(datetime.utcnow(),'%Y/%m/%d')

        r = s.get('https://{0}/api/v1/events?startDate={1}&limit={2}'.format(
            options.oktadomain,
            toUTC(state.data['lastrun']).strftime('%Y-%m-%dT%H:%M:%S.000Z'),
            options.recordlimit
        ))

        if r.status_code == 200:
            oktaevents = json.loads(r.text)
            for event in oktaevents:
                if 'published' in event.keys():
                    if toUTC(event['published']) > toUTC(state.data['lastrun']):
                        try:
                            mozdefEvent = dict()
                            mozdefEvent['utctimestamp']=toUTC(event['published']).isoformat()
                            mozdefEvent['receivedtimestamp']=toUTC(datetime.now()).isoformat()
                            mozdefEvent['category'] = 'okta'
                            mozdefEvent['tags'] = ['okta']
                            if 'action' in event.keys() and 'message' in event['action'].keys():
                                mozdefEvent['summary'] = event['action']['message']
                            mozdefEvent['details'] = event
                            # Actor parsing
                            # While there are various objectTypes attributes, we just take any attribute that matches
                            # in case Okta changes it's structure around a bit
                            # This means the last instance of each attribute in all actors will be recorded in mozdef
                            # while others will be discarded
                            # Which ends up working out well in Okta's case.
                            if 'actors' in event.keys():
                                for actor in event['actors']:
                                    if 'ipAddress' in actor.keys():
                                        if netaddr.valid_ipv4(actor['ipAddress']):
                                            mozdefEvent['details']['sourceipaddress'] = actor['ipAddress']
                                    if 'login' in actor.keys():
                                        mozdefEvent['details']['username'] = actor['login']
                                    if 'requestUri' in actor.keys():
                                        mozdefEvent['details']['source_uri'] = actor['requestUri']

                            # We are renaming action to activity because there are
                            # currently mapping problems with the details.action field
                            mozdefEvent['details']['activity'] = mozdefEvent['details']['action']
                            mozdefEvent['details'].pop('action')

                            jbody=json.dumps(mozdefEvent)
                            res = es.save_event(doc_type='okta',body=jbody)
                            logger.debug(res)
                        except Exception as e:
                            logger.error('Error handling log record {0} {1}'.format(r, e))
                            continue
                else:
                    logger.error('Okta event does not contain published date: {0}'.format(event))
            state.data['lastrun'] = lastrun
            state.write_state_file()
        else:
            logger.error('Could not get Okta events HTTP error code {} reason {}'.format(r.status_code, r.reason))
    except Exception as e:
        logger.error("Unhandled exception, terminating: %r"%e)
Пример #39
0
def create_app(config_name):
    app = Flask(__name__)
    app.config.from_object(config[config_name])

    if not app.config['DEBUG'] and not app.config['TESTING']:
        # configure logging for production

        # email errors to the administrators
        if app.config.get('MAIL_ERROR_RECIPIENT') is not None:
            import logging
            from logging.handlers import SMTPHandler
            credentials = None
            secure = None
            if app.config.get('MAIL_USERNAME') is not None:
                credentials = (app.config['MAIL_USERNAME'],
                               app.config['MAIL_PASSWORD'])
                if app.config['MAIL_USE_TLS'] is not None:
                    secure = ()
            mail_handler = SMTPHandler(
                mailhost=(app.config['MAIL_SERVER'], app.config['MAIL_PORT']),
                fromaddr=app.config['MAIL_DEFAULT_SENDER'],
                toaddrs=[app.config['MAIL_ERROR_RECIPIENT']],
                subject='[Talks] Application Error',
                credentials=credentials,
                secure=secure)
            mail_handler.setLevel(logging.ERROR)
            app.logger.addHandler(mail_handler)

        # send standard logs to syslog
        import logging
        from logging.handlers import SysLogHandler
        syslog_handler = SysLogHandler()
        syslog_handler.setLevel(logging.WARNING)
        app.logger.addHandler(syslog_handler)
    app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
    # app.config['BOOTSTRAP_SERVE_LOCAL'] = True

    bootstrap.init_app(app)
    db.init_app(app)
    moment.init_app(app)
    pagedown.init_app(app)
    mail.init_app(app)
    login_manager.init_app(app)

    from .posts import posts as posts_blueprint
    app.register_blueprint(posts_blueprint)

    from .tags import tag as tag_blueprint
    app.register_blueprint(tag_blueprint)

    from .messages import message as message_bluepring
    app.register_blueprint(message_bluepring)

    from .comments import comment as comment_blueprint
    app.register_blueprint(comment_blueprint)

    from .auth import auth as auth_blueprint
    app.register_blueprint(auth_blueprint, url_prefix='/auth')

    from .api_1_0 import api as api_blueprint
    app.register_blueprint(api_blueprint, url_prefix='/api/1.0')

    with app.app_context():
        db.create_all()

    # from app.emails import start_email_thread
    # @app.before_first_request
    # def before_first_request():
    #     start_email_thread()

    return app
Пример #40
0
def set_signal_globals(t):
    global signal_t
    signal_t = t

def signal_handler(signal, frame):
    logger.debug("terminating")
    if signal_t is not None:
        logger.debug("terminating")
        t.cleanup_mounts()
        t.stop_udisks()
    exit(0)

if __name__ == '__main__':
    logger = logging.getLogger()
    handler = SysLogHandler(address = '/dev/log', 
                            facility = SysLogHandler.LOG_LOCAL6)
    formatter = logging.Formatter('MyDLP mountfs: %(levelname)s %(message)s')
    handler.setFormatter(formatter)
    logger.addHandler(handler)
    logger.setLevel(logging.DEBUG)
    
    if len(argv) != 2:
        print('usage: %s  <trace_device>' % argv[0])
        logger.error("Incorrect parameters")
        exit(1)
    
    trace_device = argv[1]
    
    logger.debug("Tracing mounts on " + trace_device)

    t = PartitionTracer(trace_device)
Пример #41
0
                           required=True,
                           help='bdb hostname/ip address')
    req_group.add_argument('--bdb-port',
                           type=int,
                           required=True,
                           help='bdb port number')
    req_group.add_argument('--public-key',
                           type=str,
                           required=True,
                           help='api service public key')
    req_group.add_argument('--private-key',
                           type=str,
                           required=True,
                           help='api service private key')
    args = parser.parse_args()
    # set up logging
    logger = getLogger('telemetry_service')
    logger.setLevel(DEBUG)
    # local syslog
    local_formatter = Formatter(
        "%(name)s %(threadName)s %(levelname)s -- %(message)s",
        datefmt='%Y-%m-%d %H:%M:%S')
    local_syslog = SysLogHandler(address='/dev/log',
                                 facility=SysLogHandler.LOG_SYSLOG)
    local_syslog.setFormatter(local_formatter)
    logger.addHandler(local_syslog)
    init_system(app, args.bdb_ip, args.bdb_port, args.public_key,
                args.private_key)
    app.run(debug=True, host='0.0.0.0')
# end main
Пример #42
0
    'weblate.trans.context_processors.weblate_context',
)

# Custom exception reporter to include some details
DEFAULT_EXCEPTION_REPORTER_FILTER = \
    'weblate.trans.debug.WeblateExceptionReporterFilter'

# Default logging of Weblate messages
# - to syslog in production (if available)
# - otherwise to console
# - you can also choose 'logfile' to log into separate file
#   after configuring it below

# Detect if we can connect to syslog
try:
    SysLogHandler(address='/dev/log', facility=SysLogHandler.LOG_LOCAL2)
    HAVE_SYSLOG = True
except IOError:
    HAVE_SYSLOG = False

if DEBUG or not HAVE_SYSLOG:
    DEFAULT_LOG = 'console'
else:
    DEFAULT_LOG = 'syslog'

# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/stable/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
Пример #43
0
def execute():
    p = argparse.ArgumentParser(description=docker_hostdns.__description__)
    p.add_argument('--zone', default="docker", help="Dns zone to update, defaults to \"docker\".")
    p.add_argument('--dns-server', default='127.0.0.1', action="store", help="Address of DNS server which will be updated, defaults to 127.0.0.1.")
    p.add_argument('--dns-key-secret', action="store", help="DNS Server key secret for use when updating zone. Use '-' to read from stdin.")
    p.add_argument('--dns-key-name', action="store", help="DNS Server key name for use when updating zone.")
    p.add_argument('--name', action="store", help="Name to differentiate between multiple instances inside same dns zone, defaults to current hostname.")
    
    if _has_daemon:
        p.add_argument('--daemonize', '-d', metavar="PIDFILE", action="store", default=None, help="Daemonize after start and store PID at given path.")
    
    p.add_argument('--verbose', '-v', default=0, action="count", help="Give more output. Option is additive, and can be used up to 3 times.")
    p.add_argument('--syslog', default=False, action="store_true", help="Enable logging to syslog.")
    
    conf = p.parse_args()
    
    keyring = None
    
    if conf.dns_key_name and conf.dns_key_secret:
        secret = conf.dns_key_secret
        
        if secret == "-":
            secret = sys.stdin.readline().strip()
        
        keyring={conf.dns_key_name: secret}
    
    levels = [
        logging.ERROR,
        logging.WARNING,
        logging.INFO,
        logging.DEBUG
    ]
    
    handlers = None
    
    if conf.syslog:
        h = SysLogHandler(facility=SysLogHandler.LOG_DAEMON, address='/dev/log')
        formatter = logging.Formatter(p.prog+' [%(name)s] %(message)s', '%b %e %H:%M:%S')
        h.setFormatter(formatter)
        handlers = [h]
    
    logging.basicConfig(level=levels[min(conf.verbose, len(levels)-1)], handlers=handlers)
    
    dns_updater = NamedUpdater(conf.zone, conf.dns_server, keyring, conf.name)
    d = DockerHandler(dns_updater)
    
    dns_updater.setup()
    d.setup()
    
    def run():
        signal.signal(signal.SIGTERM, do_quit)
        signal.signal(signal.SIGINT, do_quit)
        logger = logging.getLogger('console')
        try:
            d.run()
        except Exception as e:
            logger.exception(e)
            raise e
    
    if _has_daemon and conf.daemonize:
        pid_writer = PidWriter(os.path.realpath(conf.daemonize))
        with daemon.DaemonContext(pidfile=pid_writer):
            run()
    else:
        run()
Пример #44
0
def initialize_logging(logger_name):
    try:
        logging_config = get_logging_config()

        logging.basicConfig(
            format=get_log_format(logger_name),
            level=logging_config['log_level'] or logging.INFO,
        )

        log_file = logging_config.get('%s_log_file' % logger_name)
        if log_file is not None and not logging_config['disable_file_logging']:
            # make sure the log directory is writeable
            # NOTE: the entire directory needs to be writable so that rotation works
            if os.access(os.path.dirname(log_file), os.R_OK | os.W_OK):
                file_handler = logging.handlers.RotatingFileHandler(
                    log_file, maxBytes=LOGGING_MAX_BYTES, backupCount=1)
                formatter = logging.Formatter(get_log_format(logger_name),
                                              get_log_date_format())
                file_handler.setFormatter(formatter)

                root_log = logging.getLogger()
                root_log.addHandler(file_handler)
            else:
                sys.stderr.write("Log file is unwritable: '%s'\n" % log_file)

        # set up syslog
        if logging_config['log_to_syslog']:
            try:
                from logging.handlers import SysLogHandler

                if logging_config['syslog_host'] is not None and logging_config[
                        'syslog_port'] is not None:
                    sys_log_addr = (logging_config['syslog_host'],
                                    logging_config['syslog_port'])
                else:
                    sys_log_addr = "/dev/log"
                    # Special-case BSDs
                    if Platform.is_darwin():
                        sys_log_addr = "/var/run/syslog"
                    elif Platform.is_freebsd():
                        sys_log_addr = "/var/run/log"

                handler = SysLogHandler(address=sys_log_addr,
                                        facility=SysLogHandler.LOG_DAEMON)
                handler.setFormatter(
                    logging.Formatter(get_syslog_format(logger_name),
                                      get_log_date_format()))
                root_log = logging.getLogger()
                root_log.addHandler(handler)
            except Exception as e:
                sys.stderr.write("Error setting up syslog: '%s'\n" % str(e))
                traceback.print_exc()

        # Setting up logging in the event viewer for windows
        if get_os() == 'windows' and logging_config['log_to_event_viewer']:
            try:
                from logging.handlers import NTEventLogHandler
                nt_event_handler = NTEventLogHandler(
                    logger_name,
                    get_win32service_file('windows', 'win32service.pyd'),
                    'Application')
                nt_event_handler.setFormatter(
                    logging.Formatter(get_syslog_format(logger_name),
                                      get_log_date_format()))
                nt_event_handler.setLevel(logging.ERROR)
                app_log = logging.getLogger(logger_name)
                app_log.addHandler(nt_event_handler)
            except Exception as e:
                sys.stderr.write(
                    "Error setting up Event viewer logging: '%s'\n" % str(e))
                traceback.print_exc()

    except Exception as e:
        sys.stderr.write("Couldn't initialize logging: %s\n" % str(e))
        traceback.print_exc()

        # if config fails entirely, enable basic stdout logging as a fallback
        logging.basicConfig(
            format=get_log_format(logger_name),
            level=logging.INFO,
        )

    # re-get the log after logging is initialized
    global log
    log = logging.getLogger(__name__)
Пример #45
0
# Pipelines

WAREHOUSE_URL = os.environ['WAREHOUSE_URL']
TEST_WAREHOUSE_URL = os.environ.get('TEST_WAREHOUSE_URL', None)
ITEM_PIPELINES = {
    'collectors.base.pipelines.Warehouse': 100,
}

# Logging

logging.basicConfig(level=logging.DEBUG)
if os.environ.get('LOGGING_URL', None):
    root_logger = logging.getLogger()
    host, port = os.environ['LOGGING_URL'].split(':')
    syslog_handler = SysLogHandler(address=(host, int(port)))
    syslog_handler.setLevel(logging.INFO)
    root_logger.addHandler(syslog_handler)

# ICTRP

ICTRP_USER = os.environ.get('ICTRP_USER', None)
ICTRP_PASS = os.environ.get('ICTRP_PASS', None)

# HRA

HRA_ENV = os.environ.get('HRA_ENV', None)
HRA_URL = os.environ.get('HRA_URL', None)
HRA_USER = os.environ.get('HRA_USER', None)
HRA_PASS = os.environ.get('HRA_PASS', None)
Пример #46
0
 def __init__(self, msg_max_length=STD_MSG_LENGTH_LIMIT, *args, **kwargs):
     if msg_max_length >= self.MIN_MSG_LENGTH_LIMIT:
         self.max_length = msg_max_length
     else:
         self.max_length = self.STD_MSG_LENGTH_LIMIT
     SysLogHandler.__init__(self, *args, **kwargs)
Пример #47
0
import os
import logging
from logging.handlers import SysLogHandler

SYSLOG_ADDRESS = (
    os.environ.get('SYSLOG_HOST', 'localhost'),
    int(os.environ.get('SYSLOG_PORT', 514)),
)

# Add a special logger to log related occurrences in settings
formatter = logging.Formatter('SETTINGS %(levelname)-8s %(message)s')
settings_logger = logging.getLogger('settings')

if not os.environ.get('CONSOLE_LOGS'):
    handler = SysLogHandler(address=SYSLOG_ADDRESS)
    handler.setFormatter(formatter)
    settings_logger.addHandler(handler)

# Log settings also in stdout
handler = logging.StreamHandler()
handler.setFormatter(formatter)
settings_logger.addHandler(handler)

settings_logger.setLevel(logging.INFO)

# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))

# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
Пример #48
0
if not app.debug:
    logyaml = ""
    with open(data_file('config/log.yml'), 'r') as f:
        logyaml = yaml.load(f)
    try:
        formatter = logging.Formatter('%(asctime)s - %(message)s')
        if logyaml['type'] == "file":
            from logging.handlers import RotatingFileHandler
            file_handler = RotatingFileHandler(
                logyaml['logfile'], backupCount=logyaml['backupCount'])
            file_handler.setLevel(logging.INFO)
            file_handler.setFormatter(formatter)
            app.logger.addHandler(file_handler)
        elif logyaml['type'] == 'syslog':
            from logging.handlers import SysLogHandler
            syslog_handler = SysLogHandler()
            syslog_handler.setLevel(logging.INFO)
            syslog_handler.setFormatter(formatter)
            app.logger.addHandler(syslog_handler)
    except:
        pass


# Decorator to return JSON easily
def jsonify(f):
    def inner(*args, **kwargs):
        jsonstring = json.dumps(f(*args, **kwargs), default=json_fixup)
        return Response(jsonstring, mimetype='application/json')
    return inner

Пример #49
0
def main():
    """The main for the oresat linux updater daemon"""

    ret = 0
    pid_file = "/run/oresat-linux-updaterd.pid"

    parser = ArgumentParser()
    parser.add_argument("-d",
                        "--daemon",
                        action="store_true",
                        help="daemonize the process")
    parser.add_argument("-v",
                        "--verbose",
                        action="store_true",
                        help="enable debug log messaging")
    parser.add_argument("-w",
                        "--work-dir",
                        dest="work_dir",
                        default=WORK_DIR,
                        help="override the working directory")
    parser.add_argument("-c",
                        "--cache-dir",
                        dest="cache_dir",
                        default=CACHE_DIR,
                        help="override the update archive cache directory")
    args = parser.parse_args()

    if args.daemon:
        _daemonize(pid_file)
        log_handler = SysLogHandler(address="/dev/log")
    else:
        log_handler = logging.StreamHandler(sys.stderr)

    # turn on logging for debug messages
    if args.verbose:
        level = logging.DEBUG
    else:
        level = logging.INFO

    logging.basicConfig(level=level, handlers=[log_handler])

    log = logging.getLogger('oresat-linux-updater')

    # make updater
    updater = DBusServer(args.work_dir, args.cache_dir, log)

    # set up dbus wrapper
    bus = SystemBus()
    bus.publish(DBUS_INTERFACE_NAME, updater)
    loop = GLib.MainLoop()

    try:
        updater.run()
        loop.run()
    except KeyboardInterrupt:
        updater.quit()
        loop.quit()
    except Exception as exc:  # this should not happen
        log.critical(exc)
        updater.quit()
        loop.quit()
        ret = 1

    if args.daemon:
        os.remove(pid_file)  # clean up daemon

    return ret
Пример #50
0
from config import config

app = Flask(__name__)
app.config.from_object(config[os.getenv('FLASK_CONFIG') or 'default'])
__author__ = 'sonnyhcl'

import logging

log_path = os.path.join(app.config['PROJECT_PATH'], 'logs')
if not os.path.exists(log_path):
    os.mkdir(log_path)
web_log_file = os.path.join(log_path, app.config['LOG_FILE_NAME'])
logging.basicConfig(level=logging.DEBUG,
                    format='%(message)s',
                    filename=web_log_file,
                    filemode='a')
from logging.handlers import SysLogHandler

syslog_handler = SysLogHandler()
syslog_handler.setLevel(logging.DEBUG)
app.logger.addHandler(syslog_handler)

db_log_file = os.path.join(log_path, 'db_operator.log')
handler = logging.FileHandler(db_log_file)
app.logger.addHandler(handler)

from mylog import log
from webapp import views
# from webapp import auth
# from webapp import db
Пример #51
0
class SKABaseDevice(with_metaclass(DeviceMeta, Device)):
    """
    A generic base device for SKA.
    """
    # PROTECTED REGION ID(SKABaseDevice.class_variable) ENABLED START #
    global logger
    logger = logging.getLogger(__name__)
    syslogs = SysLogHandler(address='/dev/log', facility='syslog')
    formatter = logging.Formatter(
        '%(name)s: %(levelname)s %(module)s %(message)r')
    syslogs.setFormatter(formatter)
    logger.addHandler(syslogs)

    def _get_device_json(self, args_dict):
        """
        Returns device configuration in JSON format.
        :param args_dict:
        :return:
        """
        try:

            device_dict = {
                'component': self.get_name(),
            }
            if args_dict.get('with_metrics') or args_dict.get(
                    'with_attributes'):
                device_dict['attributes'] = self.get_device_attributes(
                    with_value=args_dict.get('with_value'),
                    with_metrics=args_dict.get('with_metrics'),
                    with_attributes=args_dict.get('with_attributes'),
                    with_context=False),
            if args_dict.get('with_commands') is True:
                device_dict['commands'] = self.get_device_commands(
                    with_context=False)
            return device_dict

        except Exception as ex:
            logger.fatal(str(ex), exc_info=True)
            raise

    def _parse_argin(self, argin, defaults=None, required=None):
        """
        Parses the argument passed to it and returns them in a dictionary form.
        :param argin: The argument to parse
        :param defaults:
        :param required:
        :return: Dictionary containing passed arguments.
        """
        args_dict = defaults.copy() if defaults else {}
        try:
            if argin:
                args_dict.update(json.loads(argin))
        except ValueError as ex:
            logger.fatal(str(ex), exc_info=True)
            raise

        missing_args = []
        if required:
            missing_args = set(required) - set(args_dict.keys())
        if missing_args:
            msg = ("Missing arguments: {}".format(', '.join(
                [str(m_arg) for m_arg in missing_args])))
            raise Exception(msg)
        return args_dict

    def get_device_commands(self, with_context=True):
        """ Get device proxy commands"""
        ### TBD - Why use DeviceProxy?
        ### Can this not be known through self which is a Device
        commands = []
        device_proxy = DeviceProxy(self.get_name())
        cmd_config_list = device_proxy.command_list_query()
        for device_cmd_config in cmd_config_list:
            commands.append(
                get_dp_command(device_proxy.dev_name(), device_cmd_config,
                               with_context))
        return commands

    def get_device_attributes(self,
                              with_value=False,
                              with_context=True,
                              with_metrics=True,
                              with_attributes=True,
                              attribute_name=None):
        """ Get device attributes"""

        multi_attribute = self.get_device_attr()
        attr_list = multi_attribute.get_attribute_list()

        attributes = {}

        # Cannot loop over the attr_list object (not python-wrapped): raises TypeError:
        # No to_python (by-value) converter found for C++ type: Tango::Attribute*
        for index in range(len(attr_list)):

            attrib = attr_list[index]
            attr_name = attrib.get_name()

            if attribute_name is not None:
                if attr_name != attribute_name:
                    continue

            attr_dict = {
                'name': attr_name,
                'polling_frequency': attrib.get_polling_period()
            }

            try:
                attr_dict['min_value'] = attrib.get_min_value()
            except AttributeError as attr_err:
                logger.info(str(attr_err), exc_info=True)
            except DevFailed as derr:
                logger.info(str(derr), exc_info=True)

            try:
                attr_dict['max_value'] = attrib.get_max_value()
            except AttributeError as attr_err:
                logger.info(str(attr_err), exc_info=True)
            except DevFailed as derr:
                logger.info(str(derr), exc_info=True)

            attr_dict['readonly'] = (attrib.get_writable() not in [
                AttrWriteType.READ_WRITE, AttrWriteType.WRITE,
                AttrWriteType.READ_WITH_WRITE
            ])

            # TODO (KM 2017-10-30): Add the data type of the attribute in the dict.

            if with_context:
                device_type, device_id = get_tango_device_type_id(
                    self.get_name())
                attr_dict['component_type'] = device_type
                attr_dict['component_id'] = device_id

            if with_value:
                # To get the values for the State and Status attributes, we need to call
                # their get methods, respectively. The device does not implement the
                # read_<attribute_name> methods for them.
                if attr_name in ['State', 'Status']:
                    attr_dict['value'] = coerce_value(
                        getattr(self, 'get_{}'.format(attr_name.lower()))())
                else:
                    attr_dict['value'] = coerce_value(
                        getattr(self, 'read_{}'.format(attr_name))())

                attr_dict[
                    'is_alarm'] = attrib.get_quality == AttrQuality.ATTR_ALARM

            # Define attribute type
            if attr_name in self.MetricList:
                attr_dict['attribute_type'] = 'metric'
            else:
                attr_dict['attribute_type'] = 'attribute'

            # Add to return attribute dict
            if (with_metrics and attr_dict['attribute_type'] == 'metric'
                    or with_attributes
                    and attr_dict['attribute_type'] == 'attribute'):
                attributes[attr_name] = attr_dict

        return attributes

    def dev_logging(self, dev_log_msg, dev_log_level):
        # Element Level Logging
        if self._element_logging_level >= int(
                tango.LogLevel.LOG_FATAL) and dev_log_level == int(
                    tango.LogLevel.LOG_FATAL):
            self.fatal_stream(dev_log_msg)
        elif self._element_logging_level >= int(
                tango.LogLevel.LOG_ERROR) and dev_log_level == int(
                    tango.LogLevel.LOG_ERROR):
            self.error_stream(dev_log_msg)
        elif self._element_logging_level >= int(
                tango.LogLevel.LOG_WARN) and dev_log_level == int(
                    tango.LogLevel.LOG_WARN):
            self.warn_stream(dev_log_msg)
        elif self._element_logging_level >= int(
                tango.LogLevel.LOG_INFO) and dev_log_level == int(
                    tango.LogLevel.LOG_INFO):
            self.info_stream(dev_log_msg)
        elif self._element_logging_level >= int(
                tango.LogLevel.LOG_DEBUG) and dev_log_level == int(
                    tango.LogLevel.LOG_DEBUG):
            self.debug_stream(dev_log_msg)

        # Central Level Logging
        if self._central_logging_level >= int(
                tango.LogLevel.LOG_FATAL) and dev_log_level == int(
                    tango.LogLevel.LOG_FATAL):
            self.fatal_stream(dev_log_msg)
        elif self._central_logging_level >= int(
                tango.LogLevel.LOG_ERROR) and dev_log_level == int(
                    tango.LogLevel.LOG_ERROR):
            self.error_stream(dev_log_msg)
        elif self._central_logging_level >= int(
                tango.LogLevel.LOG_WARN) and dev_log_level == int(
                    tango.LogLevel.LOG_WARN):
            self.warn_stream(dev_log_msg)
        elif self._central_logging_level >= int(
                tango.LogLevel.LOG_INFO) and dev_log_level == int(
                    tango.LogLevel.LOG_INFO):
            self.info_stream(dev_log_msg)
        elif self._central_logging_level >= int(
                tango.LogLevel.LOG_DEBUG) and dev_log_level == int(
                    tango.LogLevel.LOG_DEBUG):
            self.debug_stream(dev_log_msg)

        # Storage Level Logging
        if self._storage_logging_level >= int(
                tango.LogLevel.LOG_FATAL) and dev_log_level == int(
                    tango.LogLevel.LOG_FATAL):
            logger.fatal(dev_log_msg)
        elif self._storage_logging_level >= int(
                tango.LogLevel.LOG_ERROR) and dev_log_level == int(
                    tango.LogLevel.LOG_ERROR):
            logger.error(dev_log_msg)
        elif self._storage_logging_level >= int(
                tango.LogLevel.LOG_WARN) and dev_log_level == int(
                    tango.LogLevel.LOG_WARN):
            logger.warn(dev_log_msg)
        elif self._storage_logging_level >= int(
                tango.LogLevel.LOG_INFO) and dev_log_level == int(
                    tango.LogLevel.LOG_INFO):
            logger.info(dev_log_msg)
        elif self._storage_logging_level >= int(
                tango.LogLevel.LOG_DEBUG) and dev_log_level == int(
                    tango.LogLevel.LOG_DEBUG):
            logger.debug(dev_log_msg)
        else:
            pass

    # PROTECTED REGION END #    //  SKABaseDevice.class_variable

    # -----------------
    # Device Properties
    # -----------------

    SkaLevel = device_property(dtype='int16', default_value=4)

    MetricList = device_property(
        dtype=('str', ),
        default_value=["healthState", "adminMode", "controlMode"])

    GroupDefinitions = device_property(dtype=('str', ), )

    CentralLoggingTarget = device_property(dtype='str', )

    ElementLoggingTarget = device_property(dtype='str', )

    StorageLoggingTarget = device_property(dtype='str',
                                           default_value="localhost")

    # ----------
    # Attributes
    # ----------

    buildState = attribute(
        dtype='str',
        doc="Build state of this device",
    )

    versionId = attribute(
        dtype='str',
        doc="Version Id of this device",
    )

    centralLoggingLevel = attribute(
        dtype='uint16',
        access=AttrWriteType.READ_WRITE,
        doc="Current logging level to Central logging target for this device - "
        "\ninitialises to CentralLoggingLevelDefault on startup",
    )

    elementLoggingLevel = attribute(
        dtype='uint16',
        access=AttrWriteType.READ_WRITE,
        doc="Current logging level to Element logging target for this device - "
        "\ninitialises to ElementLoggingLevelDefault on startup",
    )

    storageLoggingLevel = attribute(
        dtype='uint16',
        access=AttrWriteType.READ_WRITE,
        memorized=True,
        doc="Current logging level to Syslog for this device - "
        "initialises from  StorageLoggingLevelDefault on first "
        "execution of device.Needs to be READ_WRITE To make it"
        " memorized - but writing this attribute should do the "
        "same as command SetStorageLoggingLevel to ensure the "
        "targets and adjustmentsare made correctly",
    )

    healthState = attribute(
        dtype='DevEnum',
        doc="The health state reported for this device. "
        "It interprets the current device"
        " condition and condition of all managed devices to set this. "
        "Most possibly an aggregate attribute.",
        enum_labels=[
            "OK",
            "DEGRADED",
            "FAILED",
            "UNKNOWN",
        ],
    )

    adminMode = attribute(
        dtype='DevEnum',
        access=AttrWriteType.READ_WRITE,
        memorized=True,
        doc=
        "The admin mode reported for this device. It may interpret the current "
        "device condition and condition of all managed devices to set this. "
        "Most possibly an aggregate attribute.",
        enum_labels=[
            "ON-LINE",
            "OFF-LINE",
            "MAINTENANCE",
            "NOT-FITTED",
            "RESERVED",
        ],
    )

    controlMode = attribute(
        dtype='DevEnum',
        access=AttrWriteType.READ_WRITE,
        memorized=True,
        doc="The control mode of the device. REMOTE, LOCAL"
        "\nTANGO Device accepts only from a ‘local’ client and ignores commands and "
        "queries received from TM or any other ‘remote’ clients. The Local clients"
        " has to release LOCAL control before REMOTE clients can take control again.",
        enum_labels=[
            "REMOTE",
            "LOCAL",
        ],
    )

    simulationMode = attribute(
        dtype='bool',
        access=AttrWriteType.READ_WRITE,
        memorized=True,
        doc=
        "Reports the simulation mode of the device. \nSome devices may implement "
        "both modes, while others will have simulators that set simulationMode "
        "to True while the real devices always set simulationMode to False.",
    )

    testMode = attribute(
        dtype='str',
        access=AttrWriteType.READ_WRITE,
        memorized=True,
        doc="The test mode of the device. \n"
        "Either no test mode (empty string) or an "
        "indication of the test mode.",
    )

    # ---------------
    # General methods
    # ---------------

    def init_device(self):
        """
        Method that initializes the tango device after startup.
        :return: None
        """
        Device.init_device(self)
        # PROTECTED REGION ID(SKABaseDevice.init_device) ENABLED START #

        # Initialize attribute values.
        self._build_state = '{}, {}, {}'.format(release.name, release.version,
                                                release.description)
        self._version_id = release.version
        self._central_logging_level = int(tango.LogLevel.LOG_OFF)
        self._element_logging_level = int(tango.LogLevel.LOG_OFF)
        self._storage_logging_level = int(tango.LogLevel.LOG_OFF)
        self._health_state = 0
        self._admin_mode = 0
        self._control_mode = 0
        self._simulation_mode = False
        self._test_mode = ""

        # create TANGO Groups objects dict, according to property
        self.debug_stream("Groups definitions: {}".format(
            self.GroupDefinitions))
        try:
            self.groups = get_groups_from_json(self.GroupDefinitions)
            self.info_stream("Groups loaded: {}".format(
                sorted(self.groups.keys())))
        except GroupDefinitionsError:
            self.info_stream("No Groups loaded for device: {}".format(
                self.get_name()))

        # PROTECTED REGION END #    //  SKABaseDevice.init_device

    def always_executed_hook(self):
        # PROTECTED REGION ID(SKABaseDevice.always_executed_hook) ENABLED START #
        """
        Method that is always executed before any device command gets executed.
        :return: None
        """
        pass
        # PROTECTED REGION END #    //  SKABaseDevice.always_executed_hook

    def delete_device(self):
        # PROTECTED REGION ID(SKABaseDevice.delete_device) ENABLED START #
        """
        Method to cleanup when device is stopped.
        :return: None
        """

        pass
        # PROTECTED REGION END #    //  SKABaseDevice.delete_device

    # ------------------
    # Attributes methods
    # ------------------

    def read_buildState(self):
        # PROTECTED REGION ID(SKABaseDevice.buildState_read) ENABLED START #
        """
        Reads the Build State of the device.
        :return: None
        """
        return self._build_state
        # PROTECTED REGION END #    //  SKABaseDevice.buildState_read

    def read_versionId(self):
        # PROTECTED REGION ID(SKABaseDevice.versionId_read) ENABLED START #
        """
        Reads the Version Id of the device.
        :return: None
        """
        return self._version_id
        # PROTECTED REGION END #    //  SKABaseDevice.versionId_read

    def read_centralLoggingLevel(self):
        # PROTECTED REGION ID(SKABaseDevice.centralLoggingLevel_read) ENABLED START #
        """
        Reads the central logging level of the device.
        :return: Central logging level of the device
        """
        return self._central_logging_level
        # PROTECTED REGION END #    //  SKABaseDevice.centralLoggingLevel_read

    def write_centralLoggingLevel(self, value):
        # PROTECTED REGION ID(SKABaseDevice.centralLoggingLevel_write) ENABLED START #
        """
        Sets central logging level of the device
        :param value: Logging level for Central Logger
        :return: None
        """
        self._central_logging_level = value
        # PROTECTED REGION END #    //  SKABaseDevice.centralLoggingLevel_write

    def read_elementLoggingLevel(self):
        # PROTECTED REGION ID(SKABaseDevice.elementLoggingLevel_read) ENABLED START #
        """
        Reads element logging level of the device.
        :return: Element logging level of the device.
        """
        return self._element_logging_level
        # PROTECTED REGION END #    //  SKABaseDevice.elementLoggingLevel_read

    def write_elementLoggingLevel(self, value):
        # PROTECTED REGION ID(SKABaseDevice.elementLoggingLevel_write) ENABLED START #
        """
        Sets element logging level of the device
        :param value: Logging Level for Element Logger
        :return: None
        """
        self._element_logging_level = value
        # PROTECTED REGION END #    //  SKABaseDevice.elementLoggingLevel_write

    def read_storageLoggingLevel(self):
        # PROTECTED REGION ID(SKABaseDevice.storageLoggingLevel_read) ENABLED START #
        """
        Reads storage logging level of the device.
        :return: Storage logging level of the device.
        """
        return self._storage_logging_level
        # PROTECTED REGION END #    //  SKABaseDevice.storageLoggingLevel_read

    def write_storageLoggingLevel(self, value):
        # PROTECTED REGION ID(SKABaseDevice.storageLoggingLevel_write) ENABLED START #
        """
        Sets logging level at storage.
        :param value: Logging Level for storage logger
        :return:
        """
        self._storage_logging_level = value
        if self._storage_logging_level == int(tango.LogLevel.LOG_FATAL):
            logger.setLevel(logging.FATAL)
        elif self._storage_logging_level == int(tango.LogLevel.LOG_ERROR):
            logger.setLevel(logging.ERROR)
        elif self._storage_logging_level == int(tango.LogLevel.LOG_WARN):
            logger.setLevel(logging.WARNING)
        elif self._storage_logging_level == int(tango.LogLevel.LOG_INFO):
            logger.setLevel(logging.INFO)
        elif self._storage_logging_level == int(tango.LogLevel.LOG_DEBUG):
            logger.setLevel(logging.DEBUG)
        else:
            logger.setLevel(logging.DEBUG)
        # PROTECTED REGION END #    //  SKABaseDevice.storageLoggingLevel_write

    def read_healthState(self):
        # PROTECTED REGION ID(SKABaseDevice.healthState_read) ENABLED START #
        """
        Reads Health State of the device.
        :return: Health State of the device
        """
        return self._health_state
        # PROTECTED REGION END #    //  SKABaseDevice.healthState_read

    def read_adminMode(self):
        # PROTECTED REGION ID(SKABaseDevice.adminMode_read) ENABLED START #
        """
        Reads Admin Mode of the device.
        :return: Admin Mode of the device
        """
        return self._admin_mode
        # PROTECTED REGION END #    //  SKABaseDevice.adminMode_read

    def write_adminMode(self, value):
        # PROTECTED REGION ID(SKABaseDevice.adminMode_write) ENABLED START #
        """
        Sets Admin Mode of the device.
        :param value: Admin Mode of the device.
        :return: None
        """
        self._admin_mode = value
        # PROTECTED REGION END #    //  SKABaseDevice.adminMode_write

    def read_controlMode(self):
        # PROTECTED REGION ID(SKABaseDevice.controlMode_read) ENABLED START #
        """
        Reads Control Mode of the device.
        :return: Control Mode of the device
        """
        return self._control_mode
        # PROTECTED REGION END #    //  SKABaseDevice.controlMode_read

    def write_controlMode(self, value):
        # PROTECTED REGION ID(SKABaseDevice.controlMode_write) ENABLED START #
        """
        Sets Control Mode of the device.
        :param value: Control mode value
        :return: None
        """
        self._control_mode = value
        # PROTECTED REGION END #    //  SKABaseDevice.controlMode_write

    def read_simulationMode(self):
        # PROTECTED REGION ID(SKABaseDevice.simulationMode_read) ENABLED START #
        """
        Reads Simulation Mode of the device.
        :return: Simulation Mode of the device.
        """
        return self._simulation_mode
        # PROTECTED REGION END #    //  SKABaseDevice.simulationMode_read

    def write_simulationMode(self, value):
        # PROTECTED REGION ID(SKABaseDevice.simulationMode_write) ENABLED START #
        """
        Sets Simulation Mode of the device
        :param value: SimulationMode
        :return: None
        """
        self._simulation_mode = value
        # PROTECTED REGION END #    //  SKABaseDevice.simulationMode_write

    def read_testMode(self):
        # PROTECTED REGION ID(SKABaseDevice.testMode_read) ENABLED START #
        """
        Reads Test Mode of the device.
        :return: Test Mode of the device
        """
        return self._test_mode
        # PROTECTED REGION END #    //  SKABaseDevice.testMode_read

    def write_testMode(self, value):
        # PROTECTED REGION ID(SKABaseDevice.testMode_write) ENABLED START #
        """
        Sets Test Mode of the device.
        :param value: Test Mode
        :return: None
        """
        self._test_mode = value
        # PROTECTED REGION END #    //  SKABaseDevice.testMode_write

    # --------
    # Commands
    # --------

    @command(
        dtype_out='str', )
    @DebugIt()
    def GetMetrics(self):
        # PROTECTED REGION ID(SKABaseDevice.GetMetrics) ENABLED START #
        ### TBD - read the value of each of the attributes in the MetricList
        with exception_manager(self):
            args_dict = {
                'with_value': True,
                'with_commands': False,
                'with_metrics': True,
                'with_attributes': False
            }
            device_dict = self._get_device_json(args_dict)
            argout = json.dumps(device_dict)

        return argout
        # PROTECTED REGION END #    //  SKABaseDevice.GetMetrics

    @command(
        dtype_in='str',
        doc_in=
        "Requests the JSON string representing this device, can be filtered "
        "\nby with_commands, with_metrics, with_attributes and \nwith_value. Defaults for empty string "
        "argin are:\n{`with_value`:false, `with_commands`:true, with_metrics`:true,"
        " `with_attributes`:false}",
        dtype_out='str',
        doc_out=
        "The JSON string representing this device, \nfiltered as per the input argument flags.",
    )
    @DebugIt()
    def ToJson(self, argin):
        # PROTECTED REGION ID(SKABaseDevice.ToJson) ENABLED START #

        # TBD - see how to use fandango's export_device_to_dict
        with exception_manager(self):
            defaults = {
                'with_value': False,
                'with_commands': True,
                'with_metrics': True,
                'with_attributes': False
            }
            args_dict = self._parse_argin(argin, defaults=defaults)
            device_dict = self._get_device_json(args_dict)
            argout = json.dumps(device_dict)
        return argout
        # PROTECTED REGION END #    //  SKABaseDevice.ToJson

    @command(
        dtype_out=('str', ),
        doc_out="[ name: EltTelState ]",
    )
    @DebugIt()
    def GetVersionInfo(self):
        # PROTECTED REGION ID(SKABaseDevice.GetVersionInfo) ENABLED START #
        """
        Returns the version information of the device.
        :return: Version version details of the device.
        """
        return [
            '{}, {}'.format(self.__class__.__name__, self.read_buildState())
        ]
        # PROTECTED REGION END #    //  SKABaseDevice.GetVersionInfo

    @command()
    @DebugIt()
    def Reset(self):
        # PROTECTED REGION ID(SKABaseDevice.Reset) ENABLED START #
        """
        Reset device to its default state.
        :return: None
        """
        pass
Пример #52
0
def main():
    if options.output == 'syslog':
        logger.addHandler(
            SysLogHandler(address=(options.sysloghostname,
                                   options.syslogport)))
    else:
        sh = logging.StreamHandler(sys.stderr)
        sh.setFormatter(formatter)
        logger.addHandler(sh)

    logger.debug('started')
    state = State(options.state_file_name)
    try:
        # capture the time we start running so next time we catch any events
        # created while we run.
        lastrun = toUTC(datetime.now()).isoformat()

        # get our credentials
        mozdefClient = json.loads(open(options.jsoncredentialfile).read())
        client_email = mozdefClient['client_email']
        private_key = mozdefClient['private_key']

        # set the oauth scope we will request
        scope = [
            'https://www.googleapis.com/auth/admin.reports.audit.readonly',
            'https://www.googleapis.com/auth/admin.reports.usage.readonly'
        ]

        # authorize our http object
        # we do this as a 'service account' so it's important
        # to specify the correct 'sub' option
        # or you will get access denied even with correct delegations/scope

        credentials = SignedJwtAssertionCredentials(client_email,
                                                    private_key,
                                                    scope=scope,
                                                    sub=options.impersonate)
        http = Http()
        credentials.authorize(http)

        # build a request to the admin sdk
        api = build('admin', 'reports_v1', http=http)
        response = api.activities().list(
            userKey='all',
            applicationName='login',
            startTime=toUTC(
                state.data['lastrun']).strftime('%Y-%m-%dT%H:%M:%S.000Z'),
            maxResults=options.recordlimit).execute()

        # fix up the event craziness to a flatter format
        events = []
        if 'items' in response.keys():
            for i in response['items']:
                # flatten the sub dict/lists to pull out the good parts
                event = dict(category='google')
                event['tags'] = ['google', 'authentication']
                event['severity'] = 'INFO'
                event['summary'] = 'google authentication: '

                details = dict()
                for keyValue in flattenDict(i):
                    # change key/values like:
                    # [email protected]
                    # to actor_email=value

                    key, value = keyValue.split('=')
                    key = key.replace('.', '_').lower()
                    details[key] = value

                # find important keys
                # and adjust their location/name
                if 'ipaddress' in details.keys():
                    # it's the source ip
                    details['sourceipaddress'] = details['ipaddress']
                    del details['ipaddress']

                if 'id_time' in details.keys():
                    event['timestamp'] = details['id_time']
                    event['utctimestamp'] = details['id_time']
                if 'events_name' in details.keys():
                    event['summary'] += details['events_name'] + ' '
                if 'actor_email' in details.keys():
                    event['summary'] += details['actor_email'] + ' '

                event['details'] = details
                events.append(event)

        # post events to mozdef
        logger.debug('posting {0} google events to mozdef'.format(len(events)))
        for e in events:
            requests.post(options.url, data=json.dumps(e))

        # record the time we started as
        # the start time for next time.
        state.data['lastrun'] = lastrun
        state.write_state_file()
    except Exception as e:
        logger.error("Unhandled exception, terminating: %r" % e)
Пример #53
0
def esRotateIndexes():
    if options.output == 'syslog':
        logger.addHandler(
            SysLogHandler(address=(options.sysloghostname,
                                   options.syslogport)))
    else:
        sh = logging.StreamHandler(sys.stderr)
        sh.setFormatter(formatter)
        logger.addHandler(sh)

    logger.debug('started')
    with open(options.default_mapping_file, 'r') as mapping_file:
        default_mapping_contents = json.loads(mapping_file.read())

    try:
        es = ElasticsearchClient(
            (list('{0}'.format(s) for s in options.esservers)))

        indices = es.get_indices()

        # calc dates for use in index names events-YYYYMMDD, alerts-YYYYMM, etc.
        odate_day = date.strftime(
            toUTC(datetime.now()) - timedelta(days=1), '%Y%m%d')
        odate_month = date.strftime(
            toUTC(datetime.now()) - timedelta(days=1), '%Y%m')
        ndate_day = date.strftime(toUTC(datetime.now()), '%Y%m%d')
        ndate_month = date.strftime(toUTC(datetime.now()), '%Y%m')
        # examine each index in the .conf file
        # for rotation settings
        for (index, dobackup, rotation,
             pruning) in zip(options.indices, options.dobackup,
                             options.rotation, options.pruning):
            try:
                if rotation != 'none':
                    oldindex = index
                    newindex = index
                    if rotation == 'daily':
                        oldindex += '-%s' % odate_day
                        newindex += '-%s' % ndate_day
                    elif rotation == 'monthly':
                        oldindex += '-%s' % odate_month
                        newindex += '-%s' % ndate_month
                        # do not rotate before the month ends
                        if oldindex == newindex:
                            logger.debug(
                                'do not rotate %s index, month has not changed yet'
                                % index)
                            continue
                    if newindex not in indices:
                        index_settings = {}
                        if 'events' in newindex:
                            index_settings = {
                                "index": {
                                    "refresh_interval":
                                    options.refresh_interval,
                                    "number_of_shards":
                                    options.number_of_shards,
                                    "number_of_replicas":
                                    options.number_of_replicas,
                                    "search.slowlog.threshold.query.warn":
                                    options.slowlog_threshold_query_warn,
                                    "search.slowlog.threshold.fetch.warn":
                                    options.slowlog_threshold_fetch_warn,
                                    "mapping.total_fields.limit":
                                    options.mapping_total_fields_limit
                                }
                            }
                        elif 'alerts' in newindex:
                            index_settings = {"index": {"number_of_shards": 1}}
                        default_mapping_contents['settings'] = index_settings
                        logger.debug('Creating %s index' % newindex)
                        es.create_index(newindex, default_mapping_contents)
                    # set aliases: events to events-YYYYMMDD
                    # and events-previous to events-YYYYMMDD-1
                    logger.debug('Setting {0} alias to index: {1}'.format(
                        index, newindex))
                    es.create_alias(index, newindex)
                    if oldindex in indices:
                        logger.debug(
                            'Setting {0}-previous alias to index: {1}'.format(
                                index, oldindex))
                        es.create_alias('%s-previous' % index, oldindex)
                    else:
                        logger.debug(
                            'Old index %s is missing, do not change %s-previous alias'
                            % (oldindex, index))
            except Exception as e:
                logger.error(
                    "Unhandled exception while rotating %s, terminating: %r" %
                    (index, e))

        indices = es.get_indices()
        # Create weekly aliases for certain indices
        week_ago_date = toUTC(datetime.now()) - timedelta(weeks=1)
        week_ago_str = week_ago_date.strftime('%Y%m%d')
        current_date = toUTC(datetime.now())
        for index in options.weekly_rotation_indices:
            weekly_index_alias = '%s-weekly' % index
            logger.debug('Trying to re-alias {0} to indices since {1}'.format(
                weekly_index_alias, week_ago_str))
            existing_weekly_indices = []
            for day_obj in daterange(week_ago_date, current_date):
                day_str = day_obj.strftime('%Y%m%d')
                day_index = index + '-' + str(day_str)
                if day_index in indices:
                    existing_weekly_indices.append(day_index)
                else:
                    logger.debug('%s not found, so cant assign weekly alias' %
                                 day_index)
            if existing_weekly_indices:
                logger.debug('Creating {0} alias for {1}'.format(
                    weekly_index_alias, existing_weekly_indices))
                es.create_alias_multiple_indices(weekly_index_alias,
                                                 existing_weekly_indices)
            else:
                logger.warning(
                    'No indices within the past week to assign events-weekly to'
                )
    except Exception as e:
        logger.error("Unhandled exception, terminating: %r" % e)
Пример #54
0
        self.streamer.load_pipelines("pipelines.gst", "Test")

        ret = self.streamer.pipeline_get_parameter(
            {"realpipeline": {
                "fakesrc0": {
                    "name": None
                }
            }})
        self.assertEqual(ret['realpipeline']['fakesrc0']['name'], 'fakesrc0')

    def tearDown(self):
        self.streamer.fini()
        del self.streamer


if __name__ == '__main__':
    from logging.handlers import SysLogHandler

    #    sysloghandler = SysLogHandler(address='/dev/log')
    sysloghandler = SysLogHandler()
    sysloghandler.setLevel(logging.DEBUG)
    sysloghandler.setFormatter(
        logging.Formatter('%(levelname)s:%(filename)s:%(lineno)s: %(msg)s'))

    logger = logging.getLogger('')
    logger.addHandler(sysloghandler)
    logger.setLevel(logging.DEBUG)

    unittest.main()
Пример #55
0
#! /usr/bin/python

import logging
from logging.handlers import SysLogHandler

if __name__ == "__main__":
    sh = SysLogHandler(address='/dev/log')
    sh.setFormatter(logging.Formatter('test: %(levelname)s: %(message)s'))
    root_logger = logging.getLogger()
    root_logger.setLevel(logging.DEBUG)
    root_logger.addHandler(sh)
    logging.info("test")

Пример #56
0
import sys
import os
import logging
import httplib
import traceback
from flask import jsonify
from logging import Formatter, StreamHandler
from logging.handlers import SysLogHandler

if not app.config.get('TESTING'):
    newrelic.agent.initialize('newrelic.ini')
os.environ['TZ'] = 'US/Eastern'

# Initialize logging
streamhandler = StreamHandler(sys.stdout)
sysloghandler = SysLogHandler(address=(PAPERTRAIL_URL, PAPERTRAIL_PORT))
formatter = Formatter(LOG_FORMAT)
streamhandler.setFormatter(formatter)
sysloghandler.setFormatter(formatter)
app.logger.addHandler(sysloghandler)
app.logger.addHandler(streamhandler)
app.logger.setLevel(logging.DEBUG)


def get_credentials(request_info):
    """Get credentials from request."""
    try:
        return getcredentials.get_credentials(request_info.get('args'))
    except ValueError as err:
        print "ValueError in credentials: " + err.message
        if DEBUG:
Пример #57
0
#!/usr/bin/env python

import sys
import vici
import daemon
import logging
from logging.handlers import SysLogHandler
import subprocess
import resource


logger = logging.getLogger('updownLogger')
handler = SysLogHandler(address='/dev/log', facility=SysLogHandler.LOG_DAEMON)
handler.setFormatter(logging.Formatter('charon-updown: %(message)s'))
logger.addHandler(handler)
logger.setLevel(logging.INFO)


def handle_interfaces(ike_sa, up):
    if_id_in = int(ike_sa['if-id-in'], 16)
    if_id_out = int(ike_sa['if-id-out'], 16)
    ifname_in = "xfrm-{}-in".format(if_id_in)
    ifname_out = "xfrm-{}-out".format(if_id_out)

    if up:
        logger.info("add XFRM interfaces %s and %s", ifname_in, ifname_out)
        subprocess.call(["/usr/local/libexec/ipsec/xfrmi", "-n", ifname_out,
                         "-i", str(if_id_out), "-d", "eth0"])
        subprocess.call(["/usr/local/libexec/ipsec/xfrmi", "-n", ifname_in,
                         "-i", str(if_id_in), "-d", "eth0"])
        subprocess.call(["ip", "link", "set", ifname_out, "up"])
Пример #58
0
def certidude_app(log_handlers=[]):
    from certidude import authority, config
    from .signed import SignedCertificateDetailResource
    from .request import RequestListResource, RequestDetailResource
    from .lease import LeaseResource, LeaseDetailResource
    from .script import ScriptResource
    from .tag import TagResource, TagDetailResource
    from .attrib import AttributeResource
    from .bootstrap import BootstrapResource
    from .token import TokenResource
    from .builder import ImageBuilderResource

    app = falcon.API(middleware=NormalizeMiddleware())
    app.req_options.auto_parse_form_urlencoded = True
    #app.req_options.strip_url_path_trailing_slash = False

    # Certificate authority API calls
    app.add_route("/api/certificate/", CertificateAuthorityResource())
    app.add_route("/api/signed/{cn}/", SignedCertificateDetailResource(authority))
    app.add_route("/api/request/{cn}/", RequestDetailResource(authority))
    app.add_route("/api/request/", RequestListResource(authority))
    app.add_route("/api/", SessionResource(authority))

    if config.USER_ENROLLMENT_ALLOWED: # TODO: add token enable/disable flag for config
        app.add_route("/api/token/", TokenResource(authority))

    # Extended attributes for scripting etc.
    app.add_route("/api/signed/{cn}/attr/", AttributeResource(authority, namespace="machine"))
    app.add_route("/api/signed/{cn}/script/", ScriptResource(authority))

    # API calls used by pushed events on the JS end
    app.add_route("/api/signed/{cn}/tag/", TagResource(authority))
    app.add_route("/api/signed/{cn}/lease/", LeaseDetailResource(authority))

    # API call used to delete existing tags
    app.add_route("/api/signed/{cn}/tag/{tag}/", TagDetailResource(authority))

    # Gateways can submit leases via this API call
    app.add_route("/api/lease/", LeaseResource(authority))

    # Bootstrap resource
    app.add_route("/api/bootstrap/", BootstrapResource(authority))

    # LEDE image builder resource
    app.add_route("/api/build/{profile}/{suggested_filename}", ImageBuilderResource())

    # Add CRL handler if we have any whitelisted subnets
    if config.CRL_SUBNETS:
        from .revoked import RevocationListResource
        app.add_route("/api/revoked/", RevocationListResource(authority))

    # Add SCEP handler if we have any whitelisted subnets
    if config.SCEP_SUBNETS:
        from .scep import SCEPResource
        app.add_route("/api/scep/", SCEPResource(authority))

    # Add sink for serving static files
    app.add_sink(StaticResource(os.path.join(__file__, "..", "..", "static")))

    if config.OCSP_SUBNETS:
        from .ocsp import OCSPResource
        app.add_sink(OCSPResource(authority), prefix="/api/ocsp")

    # Set up log handlers
    if config.LOGGING_BACKEND == "sql":
        from certidude.mysqllog import LogHandler
        from certidude.api.log import LogResource
        uri = config.cp.get("logging", "database")
        log_handlers.append(LogHandler(uri))
        app.add_route("/api/log/", LogResource(uri))
    elif config.LOGGING_BACKEND == "syslog":
        from logging.handlers import SysLogHandler
        log_handlers.append(SysLogHandler())
        # Browsing syslog via HTTP is obviously not possible out of the box
    elif config.LOGGING_BACKEND:
        raise ValueError("Invalid logging.backend = %s" % config.LOGGING_BACKEND)

    return app
Пример #59
0
def get_logger(
    conf,
    name=None,
    log_to_console=False,
    log_route=None,
    fmt='%(server)s %(message)s',
):
    """
    Get the current system logger using config settings.

    **Log config and defaults**::

        log_facility = LOG_LOCAL0
        log_level = INFO
        log_name = gate
        log_udp_host = (disabled)
        log_udp_port = logging.handlers.SYSLOG_UDP_PORT
        log_address = /dev/log
        log_statsd_host = (disabled)
        log_statsd_port = 8125
        log_statsd_default_sample_rate = 1.0
        log_statsd_sample_rate_factor = 1.0
        log_statsd_metric_prefix = (empty-string)

    :param conf: Configuration dict to read settings from
    :param name: Name of the logger
    :param log_to_console: Add handler which writes to console on stderr
    :param log_route: Route for the logging, not emitted to the log, just used
                      to separate logging configurations
    :param fmt: Override log format
    """

    if not conf:
        conf = {}
    if name is None:
        name = conf.get('log_name', 'gate')
    if not log_route:
        log_route = name
    logger = logging.getLogger(log_route)
    logger.propagate = False

    # all new handlers will get the same formatter

    formatter = GateLogFormatter(fmt)

    # get_logger will only ever add one SysLog Handler to a logger

    if not hasattr(get_logger, 'handler4logger'):
        get_logger.handler4logger = {}
    if logger in get_logger.handler4logger:
        logger.removeHandler(get_logger.handler4logger[logger])

    # facility for this logger will be set by last call wins

    facility = getattr(SysLogHandler, conf.get('log_facility', 'LOG_LOCAL0'),
                       SysLogHandler.LOG_LOCAL0)
    udp_host = conf.get('log_udp_host')
    if udp_host:
        udp_port = int(
            conf.get('log_udp_port', logging.handlers.SYSLOG_UDP_PORT))
        handler = SysLogHandler(address=(udp_host, udp_port),
                                facility=facility)
    else:
        log_address = conf.get('log_address', '/dev/log')
        try:
            handler = SysLogHandler(address=log_address, facility=facility)
        except socket.error, e:

            # Either /dev/log isn't a UNIX socket or it does not exist at all

            if e.errno not in [errno.ENOTSOCK, errno.ENOENT]:
                raise e
            handler = SysLogHandler(facility=facility)
Пример #60
0
__status__ 	= "Production"

import socket,struct,sys,time, logging, re,  mysql.connector, syslog, errno, signal, threading, unicodedata,json
from logging.handlers import SysLogHandler

logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)

try:
    with open('saslquota.json') as json_data_file:
        _conf = json.load(json_data_file)
except:
    sys.stderr.write("can't open saslquota.json\n")
    quit()

logger = logging.getLogger()
syslog = SysLogHandler(address='/dev/log', facility=str(_conf["_logfacility"]))
formatter = logging.Formatter('postfix/%(module)s[%(process)d]:%(message)s')
syslog.setFormatter(formatter)
logger.addHandler(syslog)
logger.setLevel(logging.getLevelName(_conf["_loglevel"]))

class Job(threading.Thread):

    def __init__(self,sock,name):
        threading.Thread.__init__(self)
        self.starttime = time.time()
        self.shutdown_flag = threading.Event()
        self.sock = sock
        self.name = name
        self.__sasl_username = None
        self.__recipient = None