Exemplo n.º 1
0
    def setup(self, level='warn', namespace_levels=None, text_file=sys.stderr,
              time_format='%H:%M:%S.%f', handle_stdlib=True, stdlib_level='notset',
              stdlib_prefix='stdlib.'):

        """
        Initiates the twisted.logger system:
        - level: default log level as a string (ie: 'warn', 'info', ....).
        - namespace_levels: a dict of namespaces/log level names.
        - text_file: where to write the log to.
        - time_format: as supported by datetime.strftime.
        - handle_stdlib: True/False.
        - stdlib_level: level name, above which stdlib logging is handled.
        - stdlib_prefix: added to stdlib logger name, used as namespace.
        """

        file_observer = textFileLogObserver(text_file, timeFormat=time_format)
        self._predicate = LogLevelFilterPredicate(
            defaultLogLevel=LogLevel.levelWithName(level),
        )
        if namespace_levels:
            for namespace, level_name in namespace_levels.items():
                level = LogLevel.levelWithName(level_name)
                self._predicate.setLogLevelForNamespace(namespace, level)
        globalLogBeginner.beginLoggingTo([self._filtered_observer(file_observer)])

        if handle_stdlib:
            self._handle_stdlib(stdlib_level, stdlib_prefix)
Exemplo n.º 2
0
    def emit(self, level, format=None, **kwargs):
        if level not in LogLevel.iterconstants():
            self.failure(
                "Got invalid log level {invalidLevel!r} in {logger}.emit().",
                Failure(InvalidLogLevelError(level)),
                invalidLevel=level,
                logger=self,
            )
            return

        event = kwargs
        event.update(
            log_logger=self, log_level=level, log_namespace=self.namespace,
            log_source=self.source, log_format=format, log_time=time.time(),
        )

        # ---------------------------------8<---------------------------------
        # this is a workaround for the mess between twisted's legacy log system
        # and twistd's --syslog option.
        event["system"] = "%s#%s" % (self.namespace, level.name)
        # ---------------------------------8<---------------------------------

        if "log_trace" in event:
            event["log_trace"].append((self, self.observer))

        self.observer(event)
Exemplo n.º 3
0
    def _set_log_level(self, level):
        # up to the desired level, we don't do anything, as we're a
        # "real" Twisted new-logger; for methods *after* the desired
        # level, we bind to the no_op method
        desired_index = log_levels.index(level)

        for (idx, name) in enumerate(log_levels):
            if name == 'none':
                continue

            if idx > desired_index:
                current = getattr(self, name, None)
                if not current == _no_op or current is None:
                    setattr(self, name, _no_op)
                if name == 'error':
                    setattr(self, 'failure', _no_op)

            else:
                if getattr(self, name, None) in (_no_op, None):

                    if name == 'trace':
                        setattr(self, "trace", self._trace)
                    else:
                        setattr(
                            self, name,
                            partial(self._log, LogLevel.lookupByName(name)))

                    if name == 'error':
                        setattr(self, "failure", self._failure)

        self._log_level = level
Exemplo n.º 4
0
def setLogLevel(namespace=None, levelStr='info'):
    '''
    Set a new log level for a given namespace
    LevelStr is: 'critical', 'error', 'warn', 'info', 'debug'
    '''
    level = LogLevel.levelWithName(levelStr)
    logLevelFilterPredicate.setLogLevelForNamespace(namespace=namespace, level=level)
Exemplo n.º 5
0
    def _set_log_level(self, level):
        # up to the desired level, we don't do anything, as we're a
        # "real" Twisted new-logger; for methods *after* the desired
        # level, we bind to the no_op method
        desired_index = log_levels.index(level)

        for (idx, name) in enumerate(log_levels):
            if name == 'none':
                continue

            if idx > desired_index:
                current = getattr(self, name, None)
                if not current == _no_op or current is None:
                    setattr(self, name, _no_op)
                if name == 'error':
                    setattr(self, 'failure', _no_op)

            else:
                if getattr(self, name, None) in (_no_op, None):

                    if name == 'trace':
                        setattr(self, "trace", self._trace)
                    else:
                        setattr(self, name,
                                partial(self._log, LogLevel.lookupByName(name)))

                    if name == 'error':
                        setattr(self, "failure", self._failure)

        self._log_level = level
 def __init__(self,
              logger_name,
              log_level="debug",
              log_format="json",
              log_output="stdout"):
     self._start = time.time()
     self.logger_name = logger_name
     self._filename = None
     self._log_level = LogLevel.lookupByName(log_level)
     self._output = None
     if not isinstance(log_output, str):
         self._output = log_output
     else:
         if log_output.lower() == "none":
             self.format_event = self.null_format
             return
         elif log_output.lower() == "stdout":
             self._output = sys.stdout
         elif log_output.lower() == "buffer":
             self._output = io.StringIO()
         else:
             self._filename = log_output
     try:
         self.format_event = getattr(self, "{}_format".format(log_format))
     except AttributeError:
         self.format_event = formatEventAsClassicLogText
Exemplo n.º 7
0
def make_wrapped_observer(observer, log_level_name):
    log_level = LogLevel.lookupByName(log_level_name.lower())
    observer = LegacyLogObserverWrapper(observer.emit)
    observer = wrap_observer(observer)
    predicate = LogLevelFilterPredicate(defaultLogLevel=log_level)
    observer = FilteringLogObserver(observer, [predicate])
    return observer
Exemplo n.º 8
0
 def __init__(self, logger_name, log_level="debug", log_format="json",
              log_output="stdout", sentry_dsn=None,
              firehose_delivery_stream=None):
     self.logger_name = "-".join([
         logger_name,
         pkg_resources.get_distribution("autopush").version
     ])
     self._filename = None
     self.log_level = LogLevel.lookupByName(log_level)
     if log_output == "stdout":
         self._output = sys.stdout
     elif log_output == "none":
         self._output = None
     else:
         self._filename = log_output
         self._output = "file"
     if log_format == "json":
         self.format_event = self.json_format
     else:
         self.format_event = formatEventAsClassicLogText
     if sentry_dsn:
         self.raven_client = raven.Client(
             release=raven.fetch_package_version("autopush"))
     else:
         self.raven_client = None
     if firehose_delivery_stream:
         self.firehose = FirehoseProcessor(
             stream_name=firehose_delivery_stream)
     else:
         self.firehose = None
Exemplo n.º 9
0
	def startService(self):
		self.log.info("Starting up...")
		self.startupTime = now()
		self.log.info("Loading configuration...")
		self.config.reload()
		self.name = self.config["server_name"]
		self.serverID = self.config["server_id"]
		self.log.info("Loading storage...")
		self.storage = shelve.open(self.config["datastore_path"], writeback=True)
		self.storageSyncer = LoopingCall(self.storage.sync)
		self.storageSyncer.start(self.config.get("storage_sync_interval", 5), now=False)
		self.log.info("Starting processes...")
		self.pruneRecentlyQuit = LoopingCall(self.pruneQuit)
		self.pruneRecentlyQuit.start(10, now=False)
		self.pruneRecentChannels = LoopingCall(self.pruneChannels)
		self.pruneRecentChannels.start(15, now=False)
		self.log.info("Loading modules...")
		self._loadModules()
		self.log.info("Binding ports...")
		self._bindPorts()
		self.log.info("txircd started!")
		try:
			self._logFilter.setLogLevelForNamespace("txircd", LogLevel.levelWithName(self.config["log_level"]))
		except (KeyError, InvalidLogLevelError):
			self._logFilter.setLogLevelForNamespace("txircd", LogLevel.warn)
		self.runActionStandard("startup")
Exemplo n.º 10
0
 def __init__(self,
              logger_name,
              log_level="debug",
              log_format="json",
              log_output="stdout",
              sentry_dsn=None,
              firehose_delivery_stream=None):
     self.logger_name = "-".join(
         [logger_name,
          pkg_resources.get_distribution("autopush").version])
     self._filename = None
     self.log_level = LogLevel.lookupByName(log_level)
     if log_output == "stdout":
         self._output = sys.stdout
     elif log_output == "none":
         self._output = None
     else:
         self._filename = log_output
         self._output = "file"
     if log_format == "json":
         self.format_event = self.json_format
     else:
         self.format_event = formatEventAsClassicLogText
     if sentry_dsn:
         self.raven_client = raven.Client(
             release=raven.fetch_package_version("autopush"))
     else:
         self.raven_client = None
     if firehose_delivery_stream:
         self.firehose = FirehoseProcessor(
             stream_name=firehose_delivery_stream)
     else:
         self.firehose = None
Exemplo n.º 11
0
def setLogLevel(namespace=None, levelStr='info'):
    '''
    Set a new log level for a given namespace
    LevelStr is: 'critical', 'error', 'warn', 'info', 'debug'
    '''
    level = LogLevel.levelWithName(levelStr)
    logLevelFilterPredicate.setLogLevelForNamespace(namespace=namespace, level=level)
def get_json_log_observer():
    f = logfile.LogFile("carbon_forwarder.log", log_dir, rotateLength=log_rotate_length, maxRotatedFiles=max_rotated_log_files)
    observer = jsonFileLogObserver(f)
    filterer = FilteringLogObserver(observer,
        [LogLevelFilterPredicate(
            LogLevel.levelWithName(log_level))])
    return filterer
Exemplo n.º 13
0
 def opt_log_level(self, levelName: str) -> None:
     """
     Set default log level.
     (options: {options}; default: "{default}")
     """
     try:
         self["logLevel"] = LogLevel.levelWithName(levelName)
     except InvalidLogLevelError:
         raise UsageError(f"Invalid log level: {levelName}")
Exemplo n.º 14
0
 def opt_log_level(self, levelName):
     """
     Set default log level.
     (options: {options}; default: "{default}")
     """
     try:
         self["logLevel"] = LogLevel.levelWithName(levelName)
     except InvalidLogLevelError:
         raise UsageError("Invalid log level: {}".format(levelName))
Exemplo n.º 15
0
 def opt_log_level(self, levelName):
     """
     Set default log level.
     (options: {options}; default: "{default}")
     """
     try:
         self["logLevel"] = LogLevel.levelWithName(levelName)
     except InvalidLogLevelError:
         raise UsageError("Invalid log level: {}".format(levelName))
Exemplo n.º 16
0
 def opt_log_level(self, levelName: str) -> None:
     """
     Set default log level.
     (options: {options}; default: "{default}")
     """
     try:
         self["logLevel"] = LogLevel.levelWithName(levelName)
     except InvalidLogLevelError:
         raise UsageError(f"Invalid log level: {levelName}")
Exemplo n.º 17
0
    def emit(self, level, *args, **kwargs):

        if log_levels.index(self._log_level) < log_levels.index(level):
            return

        if level == "trace":
            return self._trace(*args, **kwargs)

        level = LogLevel.lookupByName(level)
        return self._log(level, *args, **kwargs)
Exemplo n.º 18
0
    def emit(self, level, *args, **kwargs):

        if log_levels.index(self._log_level) < log_levels.index(level):
            return

        if level == "trace":
            return self._trace(*args, **kwargs)

        level = LogLevel.lookupByName(level)
        return self._log(level, *args, **kwargs)
Exemplo n.º 19
0
    def startLogging(cls, logOutput, levelStr='debug'):
        if isinstance(logOutput, str):
            dir = os.path.dirname(logOutput)
            if dir and not os.path.exists(dir):
                os.makedirs(dir)
            logOutput = open(logOutput, 'a+')

        level = LogLevel.levelWithName(levelStr)
        predicate = LogLevelFilterPredicate(defaultLogLevel=level)
        observer = FilteringLogObserver(textFileLogObserver(outFile=logOutput), [predicate])
        globalLogPublisher.addObserver(observer)
Exemplo n.º 20
0
def init_logging(log_level):
    """
    Initialise the logging by adding an observer to the global log publisher.

    :param str log_level: The minimum log level to log messages for.
    """
    log_level_filter = LogLevelFilterPredicate(
        LogLevel.levelWithName(log_level))
    log_level_filter.setLogLevelForNamespace(
        'twisted.web.client._HTTP11ClientFactory', LogLevel.warn)
    log_observer = FilteringLogObserver(textFileLogObserver(sys.stdout),
                                        [log_level_filter])
    globalLogPublisher.addObserver(log_observer)
Exemplo n.º 21
0
def init_logging(log_level):
    """
    Initialise the logging by adding an observer to the global log publisher.

    :param str log_level: The minimum log level to log messages for.
    """
    log_level_filter = LogLevelFilterPredicate(
        LogLevel.levelWithName(log_level))
    log_level_filter.setLogLevelForNamespace(
        'twisted.web.client._HTTP11ClientFactory', LogLevel.warn)
    log_observer = FilteringLogObserver(
        textFileLogObserver(sys.stdout), [log_level_filter])
    globalLogPublisher.addObserver(log_observer)
Exemplo n.º 22
0
    def opt_log_level(self, levelName):
        """
        Set default log level to one of: {levelNames}.

        (default: info)
        """
        try:
            self["logLevel"] = LogLevel.levelWithName(levelName)
        except InvalidLogLevelError:
            exit(
                ExitStatus.EX_USAGE,
                "Invalid log level: {}".format(levelName)
            )
Exemplo n.º 23
0
class GlobalLoggerSettings:

    log_level = LogLevel.levelWithName("info")
    _json_ipc = False  # TODO: Oh no... #1754

    @classmethod
    def set_log_level(cls, log_level_name):
        cls.log_level = LogLevel.levelWithName(log_level_name)

    @classmethod
    def start_console_logging(cls):
        globalLogPublisher.addObserver(console_observer)

    @classmethod
    def stop_console_logging(cls):
        globalLogPublisher.removeObserver(console_observer)

    @classmethod
    @contextmanager
    def pause_all_logging_while(cls):
        former_observers = tuple(globalLogPublisher._observers)
        for observer in former_observers:
            globalLogPublisher.removeObserver(observer)
        yield
        for observer in former_observers:
            globalLogPublisher.addObserver(observer)

    @classmethod
    def start_text_file_logging(cls):
        globalLogPublisher.addObserver(get_text_file_observer())

    @classmethod
    def stop_text_file_logging(cls):
        globalLogPublisher.removeObserver(get_text_file_observer())

    @classmethod
    def start_json_file_logging(cls):
        globalLogPublisher.addObserver(get_json_file_observer())

    @classmethod
    def stop_json_file_logging(cls):
        globalLogPublisher.removeObserver(get_json_file_observer())

    @classmethod
    def start_sentry_logging(cls, dsn: str):
        _SentryInitGuard.init(dsn)
        globalLogPublisher.addObserver(sentry_observer)

    @classmethod
    def stop_sentry_logging(cls):
        globalLogPublisher.removeObserver(sentry_observer)
Exemplo n.º 24
0
    def __init__(self, udp_registry, prometheus_port=8000, gateway_port=8888, log_level='INFO'):
        # TODO: add available log levels

        # TODO: change name to collector?
        self._metrics = udp_registry.get_normal_metrics()

        self._prometheus_port = prometheus_port
        self._gateway_port = gateway_port

        self._log_level = log_level

        level_predicate = LogLevelFilterPredicate(LogLevel.lookupByName(self._log_level.lower()))
        log_observer = FilteringLogObserver(textFileLogObserver(sys.stdout), predicates=[level_predicate])
        self.log = Logger(observer=log_observer)
Exemplo n.º 25
0
class TestObserveTwistedInternetTCP_Other(MAASTestCase):
    """Tests for `observe_tftp` with non-informational messages."""

    scenarios = tuple((log_level.name, {
        "log_level": log_level
    }) for log_level in LogLevel.iterconstants()
                      if log_level is not LogLevel.info)

    def test__propagates_other_events(self):
        event = make_event(log_level=self.log_level)
        with TwistedLoggerFixture() as logger:
            observe_tftp(event)
        self.assertThat(logger.events, Contains(event))
        self.assertThat(event["log_level"], Is(self.log_level))
Exemplo n.º 26
0
    def set_level(self, namespace=None, level_name=None):

        """
        Change the logging level of namespace to level.
        If namespace is None, sets all namespaces to level_name.
        If level_name is None, uses the default log level.
        """

        if level_name:
            level = LogLevel.levelWithName(level_name)
        else:
            level = self._predicate.defaultLogLevel
        if namespace:
            self._predicate.setLogLevelForNamespace(namespace, level)
        else:
            self._predicate.defaultLogLevel = level
            self._predicate.clearLogLevels()
Exemplo n.º 27
0
def boot(options=None):

    options = options or {}
    options.setdefault('--debug', False)
    options.setdefault('--debug_mqtt', False)
    options.setdefault('--debug_mqtt_driver', False)
    options.setdefault('--debug_io', False)
    options.setdefault('--debug_influx', False)

    setup_logging()
    log.info(u'Starting ' + APP_NAME)

    # Read settings from configuration file
    configfile = get_configuration_file(options['--config'])
    log.info("Using configuration file {configfile}", configfile=configfile)
    settings = get_configuration(configfile)

    # Apply default settings
    apply_default_settings(settings)

    # Merge command line options into settings
    settings.setdefault('options', Bunch())
    for key, value in options.items():
        key = key.lstrip(u'--')
        key = key.replace(u'-', u'_')
        settings.options[key] = value

    # Setup the logging subsystem
    log_level = 'info'
    if settings.options.debug:
        log_level = 'debug'
    startLogging(settings,
                 stream=sys.stderr,
                 level=LogLevel.levelWithName(log_level))

    # Boot all enabled applications and vendors
    loader = KotoriBootloader(settings=settings)
    loader.boot_applications()
    loader.boot_vendors()

    # Boot web configuration GUI
    if 'config-web' in settings:
        boot_frontend(settings, debug=settings.options.debug)

    return loader
Exemplo n.º 28
0
def setup_logging(log_level, log_name, log_directory=""):
    """
    Configure the logger to use the specified log file and log level
    """
    log_filter = LogLevelFilterPredicate()
    log_filter.setLogLevelForNamespace(
        "orscanner", LogLevel.levelWithName(log_level.lower()))

    # Set up logging
    log_file = DailyLogFile(log_name, log_directory)
    file_observer = FileLogObserver(log_file, log_event_format)
    console_observer = FileLogObserver(sys.stdout, log_event_format)

    file_filter_observer = FilteringLogObserver(file_observer, (log_filter, ))
    console_filter_observer = FilteringLogObserver(console_observer,
                                                   (log_filter, ))

    globalLogPublisher.addObserver(file_filter_observer)
    globalLogPublisher.addObserver(console_filter_observer)
Exemplo n.º 29
0
	def rehash(self):
		"""
		Reloads the configuration file and applies changes.
		"""
		self.log.info("Rehashing...")
		self.config.reload()
		d = self._unbindPorts() # Unbind the ports that are bound
		if d: # And then bind the new ones
			DeferredList(d).addCallback(lambda result: self._bindPorts())
		else:
			self._bindPorts()
		
		try:
			self._logFilter.setLogLevelForNamespace("txircd", LogLevel.levelWithName(self.config["log_level"]))
		except (KeyError, InvalidLogLevelError):
			pass # If we can't set a new log level, we'll keep the old one
		
		for module in self.loadedModules.itervalues():
			module.rehash()
Exemplo n.º 30
0
def reset_log_file():
    global log_observer
    if log_observer:
        print('removing log observer')
        globalLogPublisher.removeObserver(log_observer)
    log_level = parsed_args.log_level or config['log_level']
    info_predicate = LogLevelFilterPredicate(
        LogLevel.levelWithName(log_level.lower()))
    if mlog_file_path:
        mlog_file = open(mlog_file_path, 'a+')
    else:
        mlog_file = sys.stderr

    mlog_observer = FilteringLogObserver(textFileLogObserver(mlog_file),
                                         predicates=[info_predicate])
    globalLogPublisher.addObserver(mlog_observer)

    # logger.info('resetting log output file')
    return
Exemplo n.º 31
0
def run():

    setup_logging()
    log.info(u'Starting ' + APP_NAME)

    # Read commandline options
    # TODO: Do it the Twisted way
    options = docopt(__doc__, version=APP_NAME)

    # Read settings from configuration file
    configfile = get_configuration_file(options['--config'])
    log.info("Using configuration file {configfile}", configfile=configfile)
    settings = get_configuration(configfile)

    # Apply default settings
    apply_default_settings(settings)

    # Merge command line options into settings
    settings.setdefault('options', Bunch())
    for key, value in options.iteritems():
        key = key.lstrip(u'--')
        key = key.replace(u'-', u'_')
        settings.options[key] = value

    # Setup the logging subsystem
    log_level = 'info'
    if settings.options.debug:
        log_level = 'debug'
    startLogging(settings,
                 stream=sys.stderr,
                 level=LogLevel.levelWithName(log_level))

    # Boot all enabled applications and vendors
    loader = KotoriBootloader(settings=settings)
    loader.boot_applications()
    loader.boot_vendors()

    # Boot web configuration GUI
    if 'config-web' in settings:
        boot_frontend(settings, debug=settings.options.debug)

    # Enter Twisted reactor loop
    reactor.run()
Exemplo n.º 32
0
class GlobalConsoleLogger:

    log_level = LogLevel.levelWithName("info")
    started = False

    @classmethod
    def set_log_level(cls, log_level_name):
        cls.log_level = LogLevel.levelWithName(log_level_name)
        if not cls.started:
            cls.start()

    @classmethod
    def start(cls):
        globalLogPublisher.addObserver(getTextFileObserver())
        cls.started = True

    @classmethod
    def start_if_not_started(cls):
        if not cls.started:
            cls.start()
Exemplo n.º 33
0
def run():

    setup_logging()
    log.info(u'Starting ' + APP_NAME)

    # Read commandline options
    # TODO: Do it the Twisted way
    options = docopt(__doc__, version=APP_NAME)

    # Read settings from configuration file
    configfile = get_configuration_file(options['--config'])
    log.info("Using configuration file {configfile}", configfile=configfile)
    settings = get_configuration(configfile)

    # Apply default settings
    apply_default_settings(settings)

    # Merge command line options into settings
    settings.setdefault('options', Bunch())
    for key, value in options.iteritems():
        key = key.lstrip(u'--')
        key = key.replace(u'-', u'_')
        settings.options[key] = value

    # Setup the logging subsystem
    log_level = 'info'
    if settings.options.debug:
        log_level = 'debug'
    startLogging(settings, stream=sys.stderr, level=LogLevel.levelWithName(log_level))

    # Boot all enabled applications and vendors
    loader = KotoriBootloader(settings=settings)
    loader.boot_applications()
    loader.boot_vendors()

    # Boot web configuration GUI
    if 'config-web' in settings:
        boot_frontend(settings, debug=settings.options.debug)

    # Enter Twisted reactor loop
    reactor.run()
Exemplo n.º 34
0
class GlobalLoggerSettings:

    log_level = LogLevel.levelWithName("info")

    @classmethod
    def set_log_level(cls, log_level_name):
        cls.log_level = LogLevel.levelWithName(log_level_name)

    @classmethod
    def start_console_logging(cls):
        globalLogPublisher.addObserver(console_observer)

    @classmethod
    def stop_console_logging(cls):
        globalLogPublisher.removeObserver(console_observer)

    @classmethod
    def start_text_file_logging(cls):
        globalLogPublisher.addObserver(get_text_file_observer())

    @classmethod
    def stop_text_file_logging(cls):
        globalLogPublisher.removeObserver(get_text_file_observer())

    @classmethod
    def start_json_file_logging(cls):
        globalLogPublisher.addObserver(get_json_file_observer())

    @classmethod
    def stop_json_file_logging(cls):
        globalLogPublisher.removeObserver(get_json_file_observer())

    @classmethod
    def start_sentry_logging(cls, dsn: str):
        _SentryInitGuard.init(dsn)
        globalLogPublisher.addObserver(sentry_observer)

    @classmethod
    def stop_sentry_logging(cls):
        globalLogPublisher.removeObserver(sentry_observer)
Exemplo n.º 35
0
    def __init__(self, interface: bytes, config_dict: Dict[str, Any]) -> None:
        # logfile path relative to config dir if not abs path
        log_filename = logfile.get()
        if log_filename.strip():  # catches empty filename
            if not os.path.isabs(log_filename):
                log_filename = os.path.join(config.config_dir, log_filename)
            ensure_dir_exists(log_filename)
            if logging_rotate_daily.get():
                logging_file = DailyLogFile(log_filename, '.')
            else:
                logging_file = open(log_filename, 'a')
            predicate = LogLevelFilterPredicate(
                LogLevel.levelWithName(loglevel.get()))
            observers = [
                FilteringLogObserver(textFileLogObserver(sys.stderr),
                                     [predicate]),
                FilteringLogObserver(textFileLogObserver(logging_file),
                                     [predicate])
            ]
            globalLogBeginner.beginLoggingTo(observers)
            log.info('piqueserver started on %s' % time.strftime('%c'))

        self.config = config_dict
        if random_rotation.get():
            self.map_rotator_type = random_choice_cycle
        else:
            self.map_rotator_type = itertools.cycle
        self.default_time_limit = default_time_limit.get()
        self.default_cap_limit = cap_limit.get()
        self.advance_on_win = int(advance_on_win.get())
        self.win_count = itertools.count(1)
        self.bans = NetworkDict()

        # attempt to load a saved bans list
        try:
            with open(os.path.join(config.config_dir, bans_file.get()),
                      'r') as f:
                self.bans.read_list(json.load(f))
            log.debug("loaded {count} bans", count=len(self.bans))
        except FileNotFoundError:
            log.debug("skip loading bans: file unavailable",
                      count=len(self.bans))
        except IOError as e:
            log.error('Could not read bans.txt: {}'.format(e))
        except ValueError as e:
            log.error('Could not parse bans.txt: {}'.format(e))

        self.hard_bans = set()  # possible DDoS'ers are added here
        self.player_memory = deque(maxlen=100)
        if len(self.name) > MAX_SERVER_NAME_SIZE:
            log.warn('(server name too long; it will be truncated to "%s")' %
                     (self.name[:MAX_SERVER_NAME_SIZE]))
        self.respawn_time = respawn_time_option.get()
        self.respawn_waves = respawn_waves.get()

        # since AoS only supports CTF and TC at a protocol level, we need to get
        # the base game mode if we are using a custom game mode.
        game_mode_name = game_mode.get()
        if game_mode_name == 'ctf':
            self.game_mode = CTF_MODE
        elif game_mode.get() == 'tc':
            self.game_mode = TC_MODE
        elif self.game_mode not in [CTF_MODE, TC_MODE]:
            raise ValueError(
                'invalid game mode: custom game mode "{}" does not set '
                'protocol.game_mode to one of TC_MODE or CTF_MODE. Are '
                'you sure the thing you have specified is a game mode?'.format(
                    game_mode_name))

        self.game_mode_name = game_mode.get().split('.')[-1]
        self.team1_name = team1_name.get()[:9]
        self.team2_name = team2_name.get()[:9]
        self.team1_color = tuple(team1_color.get())
        self.team2_color = tuple(team2_color.get())
        self.friendly_fire = friendly_fire.get()
        self.friendly_fire_on_grief = friendly_fire_on_grief.get()
        self.friendly_fire_time = grief_friendly_fire_time.get()
        self.spade_teamkills_on_grief = spade_teamkills_on_grief.get()
        self.fall_damage = fall_damage.get()
        self.teamswitch_interval = teamswitch_interval.get()
        self.teamswitch_allowed = teamswitch_allowed.get()
        self.max_players = max_players.get()
        self.melee_damage = melee_damage.get()
        self.max_connections_per_ip = max_connections_per_ip.get()
        self.passwords = passwords.get()
        self.server_prefix = server_prefix.get()
        self.time_announcements = time_announcements.get()
        self.balanced_teams = balanced_teams.get()
        self.login_retries = login_retries.get()

        # voting configuration
        self.default_ban_time = default_ban_duration.get()

        self.speedhack_detect = speedhack_detect.get()
        self.rubberband_distance = rubberband_distance.get()
        if user_blocks_only.get():
            self.user_blocks = set()
        self.set_god_build = set_god_build.get()
        self.debug_log = debug_log_enabled.get()
        if self.debug_log:
            # TODO: make this configurable
            pyspades.debug.open_debug_log(
                os.path.join(config.config_dir, 'debug.log'))
        if ssh_enabled.get():
            from piqueserver.ssh import RemoteConsole
            self.remote_console = RemoteConsole(self)
        irc = irc_options.get()
        if irc.get('enabled', False):
            from piqueserver.irc import IRCRelay
            self.irc_relay = IRCRelay(self, irc)
        if status_server_enabled.get():
            from piqueserver.statusserver import StatusServer
            self.status_server = StatusServer(self)
            ensureDeferred(self.status_server.listen())
        if ban_publish.get():
            from piqueserver.banpublish import PublishServer
            self.ban_publish = PublishServer(self, ban_publish_port.get())
        if bans_urls.get():
            from piqueserver import bansubscribe
            self.ban_manager = bansubscribe.BanManager(self)
        self.start_time = time.time()
        self.end_calls = []
        # TODO: why is this here?
        create_console(self)

        for user_type, func_names in rights.get().items():
            for func_name in func_names:
                commands.add_rights(user_type, func_name)

        self.port = port_option.get()
        ServerProtocol.__init__(self, self.port, interface)
        self.host.intercept = self.receive_callback

        try:
            self.set_map_rotation(self.config['rotation'])
        except MapNotFound as e:
            log.critical('Invalid map in map rotation (%s), exiting.' % e.map)
            raise SystemExit

        map_load_d = self.advance_rotation()
        # discard the result of the map advance for now
        map_load_d.addCallback(lambda x: self._post_init())

        ip_getter = ip_getter_option.get()
        if ip_getter:
            ensureDeferred(as_deferred(self.get_external_ip(ip_getter)))

        self.new_release = None
        notify_new_releases = config.option("release_notifications",
                                            default=True)
        if notify_new_releases.get():
            ensureDeferred(as_deferred(self.watch_for_releases()))

        self.vacuum_loop = LoopingCall(self.vacuum_bans)
        # Run the vacuum every 6 hours, and kick it off it right now
        self.vacuum_loop.start(60 * 60 * 6, True)

        reactor.addSystemEventTrigger('before', 'shutdown',
                                      lambda: ensureDeferred(self.shutdown()))
Exemplo n.º 36
0
def get_logger(logname='yombolog', **kwargs):
    """
    Returns a logger object that allows logging of error messages.

    **Usage**:

    .. code-block:: python

       from yombo.core.log import get_logger

       logger = get_logger("module.ModuleName")
       logger.debug("Some status line, debug level items.")
       logger.info("ModuleName has finished starting is ready.")
       logger.warn("A warning!!")
       logger.error("Something really bad happened! I should quit.")

    :param logname: Name of the module or library.
    :type logname: string
    :return: logger object
    """
    global loggers

    # A simple cache or existing loggers...
    if logname in loggers:
        return loggers[logname]

    global configCache

    loglevel = None
    source = kwargs.get('source', logname)
    json = kwargs.get('source', False)

    # Determine the logging level
    if len(loggers) == 0:
        config_parser = ConfigParser.SafeConfigParser()
        try:
            fp = open('yombo.ini')
            config_parser.readfp(fp)
            ini = config_parser
            for option in ini.options('logging'):
                value =  ini.get('logging', option)
                configCache[option] = value
            fp.close()
        except IOError:
            pass
        except ConfigParser.NoSectionError:
            pass

    logFilter = LogLevelFilterPredicate()
    try:
        if logname in configCache:
          iniLogLevel = configCache[logname].lower()
          logFilter.setLogLevelForNamespace(logname, LogLevel.levelWithName(iniLogLevel))
#        else:
#          iniLogLevel = 'info'
#          iniLogLevel = False
#        print "iniLogLevel: %s, logname: %s" % (iniLogLevel, logname)
        invalidLogLevel = False
    except InvalidLogLevelError:
        logFilter.setLogLevelForNamespace(logname, LogLevel.info)
        invalidLogLevel = True

    # Yell at the user if they specified an invalid log level
    if invalidLogLevel:
        loggers[logname].warn("yombo.ini file contained invalid log level {invalidLevel}, level has been set to INFO instead.",
                           invalidLevel=configCache[logname].lower())

    # Set up logging
    consoleFilterObserver = FilteringLogObserver(consoleLogObserver, (logFilter,))

    logger = Logger(namespace=logname, source=source, observer=consoleFilterObserver)

    global logFirstRun
    if logFirstRun is True:
      logFirstRun = False
      # This doesn't appear to be working yet...
#      globalLogPublisher.addObserver(jsonFileLogObserver(io.open("yombo.log.json", "a")))

    loggers[logname] = logger
    
    return loggers[logname]
Exemplo n.º 37
0
        url: ws://master.example.com:9000/ws
        """
        self.url = url
        self.realm = realm
        self.session_class = session_class
        self.config = config

    def make(self):

        # connect to crossbar router/broker
        self.runner = ApplicationRunner(self.url, self.realm, extra=dict(self.config))

        # run application session
        self.deferred = self.runner.run(self.session_class, start_reactor=False)

        def croak(ex, *args):
            log.error('Problem in {name}, please check if "crossbar" WAMP broker is running. args={args}'.format(
                name=self.__class__.__name__, args=args))
            log.error("{ex}, args={args!s}", ex=ex.getTraceback(), args=args)
            reactor.stop()
            raise ex

        self.deferred.addErrback(croak)


if __name__ == '__main__':
    startLogging(sys.stdout, level=LogLevel.levelWithName('debug'))
    app = WampApplication(url=u'ws://localhost:9000/ws')
    app.make()
    reactor.run()
Exemplo n.º 38
0
class TwistOptions(Options):
    """
    Command line options for C{twist}.
    """

    defaultReactorName = "default"
    defaultLogLevel = LogLevel.info

    def __init__(self):
        Options.__init__(self)

        self["reactorName"] = self.defaultReactorName
        self["logLevel"] = self.defaultLogLevel
        self["logFile"] = stdout

    def getSynopsis(self):
        return "{} plugin [plugin_options]".format(Options.getSynopsis(self))

    def opt_version(self):
        """
        Print version and exit.
        """
        exit(ExitStatus.EX_OK, "{}".format(version))

    def opt_reactor(self, name):
        """
        The name of the reactor to use.
        (options: {options})
        """
        # Actually actually actually install the reactor right at this very
        # moment, before any other code (for example, a sub-command plugin)
        # runs and accidentally imports and installs the default reactor.
        try:
            self["reactor"] = self.installReactor(name)
        except NoSuchReactor:
            raise UsageError("Unknown reactor: {}".format(name))
        else:
            self["reactorName"] = name

    opt_reactor.__doc__ = dedent(opt_reactor.__doc__ or "").format(
        options=", ".join('"{}"'.format(rt.shortName)
                          for rt in getReactorTypes()), )

    def installReactor(self, name):
        """
        Install the reactor.
        """
        if name == self.defaultReactorName:
            from twisted.internet import reactor

            return reactor
        else:
            return installReactor(name)

    def opt_log_level(self, levelName):
        """
        Set default log level.
        (options: {options}; default: "{default}")
        """
        try:
            self["logLevel"] = LogLevel.levelWithName(levelName)
        except InvalidLogLevelError:
            raise UsageError("Invalid log level: {}".format(levelName))

    opt_log_level.__doc__ = dedent(opt_log_level.__doc__ or "").format(
        options=", ".join('"{}"'.format(constant.name)
                          for constant in LogLevel.iterconstants()),
        default=defaultLogLevel.name,
    )

    def opt_log_file(self, fileName):
        """
        Log to file. ("-" for stdout, "+" for stderr; default: "-")
        """
        if fileName == "-":
            self["logFile"] = stdout
            return

        if fileName == "+":
            self["logFile"] = stderr
            return

        try:
            self["logFile"] = openFile(fileName, "a")
        except EnvironmentError as e:
            exit(
                ExitStatus.EX_IOERR,
                "Unable to open log file {!r}: {}".format(fileName, e),
            )

    def opt_log_format(self, format):
        """
        Log file format.
        (options: "text", "json"; default: "text" if the log file is a tty,
        otherwise "json")
        """
        format = format.lower()

        if format == "text":
            self["fileLogObserverFactory"] = textFileLogObserver
        elif format == "json":
            self["fileLogObserverFactory"] = jsonFileLogObserver
        else:
            raise UsageError("Invalid log format: {}".format(format))
        self["logFormat"] = format

    opt_log_format.__doc__ = dedent(opt_log_format.__doc__ or "")

    def selectDefaultLogObserver(self):
        """
        Set C{fileLogObserverFactory} to the default appropriate for the
        chosen C{logFile}.
        """
        if "fileLogObserverFactory" not in self:
            logFile = self["logFile"]

            if hasattr(logFile, "isatty") and logFile.isatty():
                self["fileLogObserverFactory"] = textFileLogObserver
                self["logFormat"] = "text"
            else:
                self["fileLogObserverFactory"] = jsonFileLogObserver
                self["logFormat"] = "json"

    def parseOptions(self, options=None):
        self.selectDefaultLogObserver()

        Options.parseOptions(self, options=options)

        if "reactor" not in self:
            self["reactor"] = self.installReactor(self["reactorName"])

    @property
    def plugins(self):
        if "plugins" not in self:
            plugins = {}
            for plugin in getPlugins(IServiceMaker):
                plugins[plugin.tapname] = plugin
            self["plugins"] = plugins

        return self["plugins"]

    @property
    def subCommands(self):
        plugins = self.plugins
        for name in sorted(plugins):
            plugin = plugins[name]
            yield (
                plugin.tapname,
                None,
                # Avoid resolving the options attribute right away, in case
                # it's a property with a non-trivial getter (eg, one which
                # imports modules).
                lambda plugin=plugin: plugin.options(),
                plugin.description,
            )

    def postOptions(self):
        Options.postOptions(self)

        if self.subCommand is None:
            raise UsageError("No plugin specified.")
Exemplo n.º 39
0
 def __init__(self, message='Item received: {item}', level='debug'):
     self.level = LogLevel.levelWithName(level)
     self.message = message
Exemplo n.º 40
0
if __name__ == "__main__":
    # Parse the command line arguments
    parser = argparse.ArgumentParser(description="A modular Twisted IRC bot.")
    parser.add_argument("-c", "--config", help="The configuration file to use", type=str, default="heufybot.yaml")
    parser.add_argument("-f", "--logfile", help="The file the log will be written to", type=str, default="heufybot.log")
    parser.add_argument("-l", "--loglevel", help="The logging level the bot will use", type=str, default="INFO")
    options = parser.parse_args()

    # Start the bot
    heufybot = HeufyBot(options.config)

    # Determine the logging level
    logFilter = LogLevelFilterPredicate()
    try:
        logFilter.setLogLevelForNamespace("heufybot", LogLevel.levelWithName(options.loglevel.lower()))
        invalidLogLevel = False
    except InvalidLogLevelError:
        logFilter.setLogLevelForNamespace("heufybot", LogLevel.info)
        invalidLogLevel = True

    # Set up logging
    logFile = DailyLogFile("heufybot.log", "")
    fileObserver = FileLogObserver(logFile, logFormat)
    fileFilterObserver = FilteringLogObserver(fileObserver, (logFilter,))
    consoleFilterObserver = FilteringLogObserver(consoleLogObserver, (logFilter,))
    heufybot.log = Logger("heufybot")
    globalLogPublisher.addObserver(fileFilterObserver)
    globalLogPublisher.addObserver(consoleFilterObserver)

    heufybot.log.info("Starting bot...")
Exemplo n.º 41
0
class IMSOptions(Options):
    """
    Command line options for all IMS commands.
    """

    log: ClassVar[Logger] = Logger()
    defaultLogLevel: ClassVar = LogLevel.info

    subCommands: ClassVar = [
        ["server", None, ServerOptions, "Run the IMS server"],
        ["export", None, ExportOptions, "Export data"],
        ["import", None, ImportOptions, "Import data"],
        ["compare", None, CompareOptions, "Compare two export files"],
    ]

    # defaultSubCommand = "server"

    def getSynopsis(self) -> str:
        return f"{Options.getSynopsis(self)} command [command_options]"

    def opt_config(self, path: str) -> None:
        """
        Location of configuration file.
        """
        cast(MutableMapping, self)["configFile"] = Path(path)

    def opt_log_level(self, levelName: str) -> None:
        """
        Set default log level.
        (options: {options}; default: "{default}")
        """
        try:
            self["logLevel"] = LogLevel.levelWithName(levelName)
        except InvalidLogLevelError:
            raise UsageError(f"Invalid log level: {levelName}")

    opt_log_level.__doc__ = dedent(cast(str, opt_log_level.__doc__)).format(
        options=", ".join(f'"{l.name}"' for l in LogLevel.iterconstants()),
        default=defaultLogLevel.name,
    )

    def opt_log_file(self, fileName: str) -> None:
        """
        Log to file. ("-" for stdout, "+" for stderr; default: "-")
        """
        self["logFileName"] = fileName

    def opt_log_format(self, logFormatName: str) -> None:
        """
        Log file format.
        (options: "text", "json"; default: "text" if the log file is a tty,
        otherwise "json")
        """
        try:
            logFormat = LogFormat[logFormatName.lower()]
        except KeyError:
            raise UsageError(f"Invalid log format: {logFormatName}")

        if logFormat is LogFormat.text:
            self["fileLogObserverFactory"] = textFileLogObserver
        elif logFormat is LogFormat.json:
            self["fileLogObserverFactory"] = jsonFileLogObserver
        else:
            raise AssertionError(f"Unhandled LogFormat: {logFormat}")

        self["logFormat"] = logFormat

    opt_log_format.__doc__ = dedent(cast(str, opt_log_format.__doc__))

    def opt_option(self, arg: str) -> None:
        """
        Set a configuration option.
        Format is "[section]name=value", eg: "[Core]Host=0.0.0.0".
        """
        try:
            if arg.startswith("["):
                section, rest = arg[1:].split("]", 1)
            else:
                section = "Core"
                rest = arg
            name, value = rest.split("=", 1)
        except ValueError:
            raise UsageError(f"Invalid option specifier: {arg}")

        if "overrides" not in self:
            self["overrides"] = []

        self["overrides"].append(
            Override(section=section, name=name, value=value))

    def initConfig(self) -> None:
        try:
            configFile = cast(Optional[Path],
                              cast(Mapping, self).get("configFile"))

            if configFile and not configFile.is_file():
                self.log.info("Config file not found.")
                configFile = None

            configuration = Configuration.fromConfigFile(configFile)

            options = cast(MutableMapping, self)

            if "overrides" in options:
                for _override in options["overrides"]:
                    raise NotImplementedError("Option overrides unimplemented")

            if "logFileName" in options:
                configuration = configuration.replace(
                    logFilePath=Path(options["logFileName"]))

            self.opt_log_file(str(configuration.logFilePath))

            if "logFormat" in options:
                configuration = configuration.replace(
                    logFormat=options["logFormat"])
            elif configuration.logFormat is not None:
                self.opt_log_format(configuration.logFormat.name)

            if "logLevel" in options:
                configuration = configuration.replace(
                    logLevelName=options["logLevel"].name)
            elif configuration.logLevelName is not None:
                self.opt_log_level(configuration.logLevelName)

            options["configuration"] = configuration

        except Exception as e:
            exit(ExitStatus.EX_CONFIG, str(e))

    def initLogFile(self) -> None:
        self["logFile"] = openFile(self["logFileName"], "a")

    def selectDefaultLogObserver(self) -> None:
        """
        Set :func:`fileLogObserverFactory` to the default appropriate for the
        chosen log file.
        """
        if "fileLogObserverFactory" not in self:
            logFile = self["logFile"]

            if hasattr(logFile, "isatty") and logFile.isatty():
                self["fileLogObserverFactory"] = textFileLogObserver
                self["logFormat"] = "text"
            else:
                self["fileLogObserverFactory"] = jsonFileLogObserver
                self["logFormat"] = "json"

    def parseOptions(self, options: Optional[Sequence[str]] = None) -> None:
        Options.parseOptions(self, options=options)

        self.initLogFile()
        self.selectDefaultLogObserver()

    def postOptions(self) -> None:
        Options.postOptions(self)

        self.initConfig()
Exemplo n.º 42
0
def get_logger(logname='yombolog', **kwargs):
    """
    Returns a logger object that allows logging of error messages.

    **Usage**:

    .. code-block:: python

       from yombo.core.log import get_logger

       logger = get_logger("module.ModuleName")
       logger.debug("Some status line, debug level items.")
       logger.info("ModuleName has finished starting is ready.")
       logger.warn("A warning!!")
       logger.error("Something really bad happened! I should quit.")

    :param logname: Name of the module or library.
    :type logname: string
    :return: logger object
    """
    global loggers
    global observers
    global configCache
    global open_files

    # A simple cache or existing loggers...
    if logname in loggers:
        return loggers[logname]

    loglevel = None
    source = kwargs.get('source', logname)
    json = kwargs.get('source', False)

    # Determine the logging level
    if len(loggers) == 0:
        config_parser = configparser.SafeConfigParser()
        try:
            fp = open('yombo.ini')
            config_parser.readfp(fp)
            ini = config_parser
            for option in ini.options('logging'):
                value = ini.get('logging', option)
                configCache[option] = value
            fp.close()
        except IOError:
            pass
        except configparser.NoSectionError:
            pass

    logFilter = LogLevelFilterPredicate()
    try:
        if logname in configCache:
            iniLogLevel = configCache[logname].lower()
            logFilter.setLogLevelForNamespace(
                logname, LogLevel.levelWithName(iniLogLevel))


#        else:
#          iniLogLevel = 'info'
#          iniLogLevel = False
#        print "iniLogLevel: %s, logname: %s" % (iniLogLevel, logname)
        invalidLogLevel = False
    except InvalidLogLevelError:
        logFilter.setLogLevelForNamespace(logname, LogLevel.info)
        invalidLogLevel = True

    # Yell at the user if they specified an invalid log level
    if invalidLogLevel:
        loggers[logname].warn(
            "yombo.ini file contained invalid log level {invalidLevel}, level has been set to INFO instead.",
            invalidLevel=configCache[logname].lower())

    # Set up logging
    consoleFilterObserver = FilteringLogObserver(consoleLogObserver,
                                                 (logFilter, ))

    logger = Logger(namespace=logname,
                    source=source,
                    observer=consoleFilterObserver)
    loggers[logname] = logger

    # global logFirstRun
    # if logFirstRun is True:
    #   logFirstRun = False
    # This doesn't appear to be working yet...
    #   observers['json'] = jsonFileLogObserver(io.open("usr/log/yombo.json", "a"))
    #   globalLogPublisher.addObserver(observers['json'])
    #   observers['text'] = textFileLogObserver(io.open("usr/log/yombo.text", "a"))
    #   globalLogPublisher.addObserver(observers['text'])
    #
    #   # globalLogPublisher.addObserver(jsonFileLogObserver(io.open("usr/log/yombo.json", "a")))
    #   # globalLogPublisher.addObserver(textFileLogObserver(io.open("usr/log/yombo.text", "a")))
    #
    #
    # if get_logger.rotate_loop is None:
    #     get_logger.rotate_loop = LoopingCall(rotate_logs)
    #     get_logger.rotate_loop.start(5, False)  # about every 10 minutes
    #     # get_logger.rotate_loop.start(615, False)  # about every 10 minutes

    return loggers[logname]
Exemplo n.º 43
0
 def set_log_level(cls, log_level_name):
     cls.log_level = LogLevel.levelWithName(log_level_name)
     if not cls.started:
         cls.start()
Exemplo n.º 44
0
def stdlib_log_level_to_twisted(level: str) -> LogLevel:
    """
    Convert a stdlib log level to Twisted's log level.
    """
    lvl = level.lower().replace("warning", "warn")
    return LogLevel.levelWithName(lvl)