Example #1
0
def init_logging(log_level):
    """
    Initialise the logging by adding an observer to the global log publisher.

    :param str log_level: The minimum log level to log messages for.
    """
    log_level_filter = LogLevelFilterPredicate(
        LogLevel.levelWithName(log_level))
    log_level_filter.setLogLevelForNamespace(
        'twisted.web.client._HTTP11ClientFactory', LogLevel.warn)
    log_observer = FilteringLogObserver(
        textFileLogObserver(sys.stdout), [log_level_filter])
    globalLogPublisher.addObserver(log_observer)
Example #2
0
def getLogger(level):

    loglevel = getattr(LogLevel, level)
    filter_ = LogLevelFilterPredicate(defaultLogLevel=loglevel)
    if loglevel > LogLevel.debug:
        filter_.setLogLevelForNamespace('stdout', LogLevel.warn)
    observer = FilteringLogObserver(stdoutFileLogObserver(), [filter_])
#     observer = FilteringLogObserver(globalLogPublisher, [filter])
#     log = Logger()

#     globalLogBeginner.beginLoggingTo([observer])
    globalLogPublisher.addObserver(observer)
    return lambda event: None
Example #3
0
	def __init__(self, configFileName):
		self.config = Config(self, configFileName)
		
		self.boundPorts = {}
		self.loadedModules = {}
		self._loadedModuleData = {}
		self._unloadingModules = {}
		self.commonModules = set()
		self.userCommands = {}
		self.serverCommands = {}
		self.channelModes = ({}, {}, {}, {})
		self.channelStatuses = {}
		self.channelStatusSymbols = {}
		self.channelStatusOrder = []
		self.channelModeTypes = {}
		self.userModes = ({}, {}, {}, {})
		self.userModeTypes = {}
		self.actions = {}
		self.storage = None
		self.storageSyncer = None
		self.dataCache = {}
		self.functionCache = {}
		
		self.serverID = None
		self.name = None
		self.isupport_tokens = {
			"CASEMAPPING": "strict-rfc1459",
			"CHANTYPES": "#",
		}
		self._uid = self._genUID()
		
		self.users = {}
		self.userNicks = CaseInsensitiveDictionary()
		self.channels = CaseInsensitiveDictionary(WeakValueDictionary)
		self.servers = {}
		self.serverNames = CaseInsensitiveDictionary()
		self.recentlyQuitUsers = {}
		self.recentlyQuitServers = {}
		self.recentlyDestroyedChannels = CaseInsensitiveDictionary()
		self.pruneRecentlyQuit = None
		self.pruneRecentChannels = None
		
		self._logFilter = LogLevelFilterPredicate()
		filterObserver = FilteringLogObserver(globalLogPublisher, (self._logFilter,))
		self.log = Logger("txircd", observer=filterObserver)
		
		self.startupTime = None
Example #4
0
class Logger(_Logger):
    """
    Logging object.
    """

    filterPublisher = None
    filterObserver = None
    filterPredicate = LogLevelFilterPredicate(defaultLogLevel=LogLevel.info)

    logBeginner = None

    @classmethod
    def makeFilteredFileLogObserver(cls, stream, withTime=True):
        """
        For a child process that has its stdout captured by the master process to be logged by the master,
        we strip out the time from the log entry since the master process will always add one. Setting
        C{withTime} to L{False} will ensure no time is generated.
        """
        assert (cls.filterPublisher is None
                and cls.filterObserver is None), "Only call this once"

        timeFormat = formatTime if withTime else lambda _: u""
        cls.filterObserver = FileLogObserver(
            stream,
            lambda event: formatEventAsClassicLogText(event,
                                                      formatTime=timeFormat))
        cls.filterPublisher = LogPublisher(cls.filterObserver)
        return cls.filterPublisher

    @classmethod
    def addFilteredObserver(cls, observer):
        log.addObserver(FilteringLogObserver(observer, [cls.filterPredicate]))

    @classmethod
    def beginLoggingTo(cls, observers, redirectStandardIO=True):
        if cls.logBeginner:
            cls.logBeginner.beginLoggingTo(
                observers, redirectStandardIO=redirectStandardIO)

    def emit(self, level, format=None, **kwargs):
        """
        Fix {Logger.emit} to work with our legacy use and handle utf-8 properly.
        """

        # For old style logging kwargs will be empty but the format string could contain "{}", so
        # We turn the format string into a kwarg
        if len(kwargs) == 0:
            kwargs["msg"] = format
            format = "{msg}"

        # Assume that any L{bytes} in the format or kwargs are utf-8 encoded strings
        if format is not None and isinstance(format, bytes):
            format = format.decode("utf-8")
        for k, v in kwargs.items():
            if isinstance(v, bytes):
                kwargs[k] = v.decode("utf-8")

        super(Logger, self).emit(level, format=format, **kwargs)

    def levels(self):
        """
        Get the L{LogLevelFilterPredicate} associated with this logger.
        """
        return self.filterPredicate
Example #5
0
def startLogging(settings, stream=None, level=LogLevel.debug):
    global predicate

    fileObserver = logObserver(stream)
    predicate    = LogLevelFilterPredicate(defaultLogLevel=level)

    if settings.options.debug_mqtt:
        predicate.setLogLevelForNamespace('kotori.daq.services.mig', LogLevel.debug)
        predicate.setLogLevelForNamespace('kotori.daq.application.mqttkit', LogLevel.debug)

    if settings.options.debug_mqtt_driver:
        predicate.setLogLevelForNamespace('kotori.daq.intercom.mqtt', LogLevel.debug)
        predicate.setLogLevelForNamespace('mqtt', LogLevel.debug)
        predicate.setLogLevelForNamespace('paho.mqtt', LogLevel.debug)
    else:
        predicate.setLogLevelForNamespace('kotori.daq.intercom.mqtt', LogLevel.info)
        predicate.setLogLevelForNamespace('mqtt', LogLevel.info)
        predicate.setLogLevelForNamespace('paho.mqtt', LogLevel.info)

    if settings.options.debug_influx:
        predicate.setLogLevelForNamespace('kotori.daq.storage.influx', LogLevel.debug)

    if settings.options.debug_io:
        predicate.setLogLevelForNamespace('kotori.io', LogLevel.debug)

    observers    = [ FilteringLogObserver(observer=fileObserver, predicates=[predicate]) ]
    globalLogBeginner.beginLoggingTo(observers)
Example #6
0
def logger():
    predicate = LogLevelFilterPredicate(LogLevel.debug)
    return FilteringLogObserver(FileLogObserver(sys.stdout, formator),
                                [predicate])
Example #7
0
 def FileObserver(path=DEFAULT_LOG_FILE, log_level=LogLevel.warn):
     file_observer = textFileLogObserver(io.open(path, 'at'))
     return FilteringLogObserver(file_observer, [
         LogLevelFilterPredicate(log_level),
     ])
Example #8
0

def main(config_path=None):
    """
    main.

    @type config_path: String
    @param config_path: optional alternative path to a config file
    """
    log.debug("Welcome to the jungle!")

    try:
        cfg = load_config(config_path) if config_path else load_config()
    except ConfigObjError, e:
        log.failure(str(e))

    site_plugins = load_site_plugins(cfg['sites'])
    db_plugin = load_database_plugin(cfg['database'])
    plugins = {'sites': site_plugins, 'database': db_plugin}
    import_data(plugins)


if __name__ == '__main__':
    log_filter = LogLevelFilterPredicate(LogLevel.info)
    all_abserver = textFileLogObserver(open("spider.log", 'w'))
    filtered_observer = FilteringLogObserver(textFileLogObserver(sys.stdout),
                                             [log_filter])
    globalLogPublisher.addObserver(all_abserver)
    globalLogPublisher.addObserver(filtered_observer)
    sys.exit(main())
Example #9
0
def hendrixObserver(path=DEFAULT_LOG_FILE, log_level=LogLevel.warn):
    json_observer = jsonFileLogObserver(io.open(path, 'a'))
    return FilteringLogObserver(json_observer, [
        LogLevelFilterPredicate(log_level),
    ])
Example #10
0
    def __init__(self, interface: bytes, config_dict: Dict[str, Any]) -> None:
        # logfile path relative to config dir if not abs path
        log_filename = logfile.get()
        if log_filename.strip():  # catches empty filename
            if not os.path.isabs(log_filename):
                log_filename = os.path.join(config.config_dir, log_filename)
            ensure_dir_exists(log_filename)
            if logging_rotate_daily.get():
                logging_file = DailyLogFile(log_filename, '.')
            else:
                logging_file = open(log_filename, 'a')
            predicate = LogLevelFilterPredicate(
                LogLevel.levelWithName(loglevel.get()))
            observers = [
                FilteringLogObserver(
                    textFileLogObserver(sys.stderr), [predicate]),
                FilteringLogObserver(
                    textFileLogObserver(logging_file), [predicate])
            ]
            globalLogBeginner.beginLoggingTo(observers)
            log.info('piqueserver started on %s' % time.strftime('%c'))

        self.config = config_dict
        if random_rotation.get():
            self.map_rotator_type = random_choice_cycle
        else:
            self.map_rotator_type = itertools.cycle
        self.default_time_limit = default_time_limit.get()
        self.default_cap_limit = cap_limit.get()
        self.advance_on_win = int(advance_on_win.get())
        self.win_count = itertools.count(1)
        self.bans = NetworkDict()

        # attempt to load a saved bans list
        try:
            with open(os.path.join(config.config_dir, bans_file.get()), 'r') as f:
                self.bans.read_list(json.load(f))
            log.debug("loaded {count} bans", count=len(self.bans))
        except FileNotFoundError:
            log.debug("skip loading bans: file unavailable",
                      count=len(self.bans))
        except IOError as e:
            log.error('Could not read bans file ({}): {}'.format(bans_file.get(), e))
        except ValueError as e:
            log.error('Could not parse bans file ({}): {}'.format(bans_file.get(), e))

        self.hard_bans = set()  # possible DDoS'ers are added here
        self.player_memory = deque(maxlen=100)
        if len(self.name) > MAX_SERVER_NAME_SIZE:
            log.warn('(server name too long; it will be truncated to "%s")' % (
                self.name[:MAX_SERVER_NAME_SIZE]))
        self.respawn_time = respawn_time_option.get()
        self.respawn_waves = respawn_waves.get()

        # since AoS only supports CTF and TC at a protocol level, we need to get
        # the base game mode if we are using a custom game mode.
        game_mode_name = game_mode.get()
        if game_mode_name == 'ctf':
            self.game_mode = CTF_MODE
        elif game_mode.get() == 'tc':
            self.game_mode = TC_MODE
        elif self.game_mode not in [CTF_MODE, TC_MODE]:
            raise ValueError(
                'invalid game mode: custom game mode "{}" does not set '
                'protocol.game_mode to one of TC_MODE or CTF_MODE. Are '
                'you sure the thing you have specified is a game mode?'.format(
                    game_mode_name))

        self.game_mode_name = game_mode.get().split('.')[-1]
        self.team1_name = team1_name.get()[:9]
        self.team2_name = team2_name.get()[:9]
        self.team1_color = tuple(team1_color.get())
        self.team2_color = tuple(team2_color.get())
        self.friendly_fire = friendly_fire.get()
        self.friendly_fire_on_grief = friendly_fire_on_grief.get()
        self.friendly_fire_time = grief_friendly_fire_time.get()
        self.spade_teamkills_on_grief = spade_teamkills_on_grief.get()
        self.fall_damage = fall_damage.get()
        self.teamswitch_interval = teamswitch_interval.get()
        self.teamswitch_allowed = teamswitch_allowed.get()
        self.max_players = max_players.get()
        self.melee_damage = melee_damage.get()
        self.max_connections_per_ip = max_connections_per_ip.get()
        self.passwords = passwords.get()
        self.server_prefix = server_prefix.get()
        self.time_announcements = time_announcements.get()
        self.balanced_teams = balanced_teams.get()
        self.login_retries = login_retries.get()

        # voting configuration
        self.default_ban_time = default_ban_duration.get()

        self.speedhack_detect = speedhack_detect.get()
        self.rubberband_distance = rubberband_distance.get()
        if user_blocks_only.get():
            self.user_blocks = set()
        self.set_god_build = set_god_build.get()
        self.debug_log = debug_log_enabled.get()
        if self.debug_log:
            # TODO: make this configurable
            pyspades.debug.open_debug_log(
                os.path.join(config.config_dir, 'debug.log'))
        if ssh_enabled.get():
            from piqueserver.ssh import RemoteConsole
            self.remote_console = RemoteConsole(self)
        irc = irc_options.get()
        if irc.get('enabled', False):
            from piqueserver.irc import IRCRelay
            self.irc_relay = IRCRelay(self, irc)
        if status_server_enabled.get():
            from piqueserver.statusserver import StatusServer
            self.status_server = StatusServer(self)
            ensureDeferred(self.status_server.listen())
        if ban_publish.get():
            from piqueserver.banpublish import PublishServer
            self.ban_publish = PublishServer(self, ban_publish_port.get())
        if bans_config_urls.get():
            from piqueserver import bansubscribe
            self.ban_manager = bansubscribe.BanManager(self)
        self.start_time = time.time()
        self.end_calls = []
        # TODO: why is this here?
        create_console(self)

        for user_type, func_names in rights.get().items():
            for func_name in func_names:
                commands.add_rights(user_type, func_name)

        if everyone_is_admin.get():
            self.everyone_is_admin = True

        self.port = port_option.get()
        ServerProtocol.__init__(self, self.port, interface)
        self.host.intercept = self.receive_callback

        try:
            self.set_map_rotation(self.config['rotation'])
        except MapNotFound as e:
            log.critical('Invalid map in map rotation (%s), exiting.' % e.map)
            raise SystemExit

        map_load_d = self.advance_rotation()
        # discard the result of the map advance for now
        map_load_d.addCallback(lambda x: self._post_init())

        ip_getter = ip_getter_option.get()
        if ip_getter:
            ensureDeferred(as_deferred(self.get_external_ip(ip_getter)))

        self.new_release = None
        notify_new_releases = config.option(
            "release_notifications", default=True)
        if notify_new_releases.get():
            ensureDeferred(as_deferred(self.watch_for_releases()))

        self.vacuum_loop = LoopingCall(self.vacuum_bans)
        # Run the vacuum every 6 hours, and kick it off it right now
        self.vacuum_loop.start(60 * 60 * 6, True)

        reactor.addSystemEventTrigger(
            'before', 'shutdown', lambda: ensureDeferred(self.shutdown()))
Example #11
0
def get_logger(logname='yombolog', **kwargs):
    """
    Returns a logger object that allows logging of error messages.

    **Usage**:

    .. code-block:: python

       from yombo.core.log import get_logger

       logger = get_logger("module.ModuleName")
       logger.debug("Some status line, debug level items.")
       logger.info("ModuleName has finished starting is ready.")
       logger.warn("A warning!!")
       logger.error("Something really bad happened! I should quit.")

    :param logname: Name of the module or library.
    :type logname: string
    :return: logger object
    """
    global loggers

    # A simple cache or existing loggers...
    if logname in loggers:
        return loggers[logname]

    global configCache

    loglevel = None
    source = kwargs.get('source', logname)
    json = kwargs.get('source', False)

    # Determine the logging level
    if len(loggers) == 0:
        config_parser = ConfigParser.SafeConfigParser()
        try:
            fp = open('yombo.ini')
            config_parser.readfp(fp)
            ini = config_parser
            for option in ini.options('logging'):
                value =  ini.get('logging', option)
                configCache[option] = value
            fp.close()
        except IOError:
            pass
        except ConfigParser.NoSectionError:
            pass

    logFilter = LogLevelFilterPredicate()
    try:
        if logname in configCache:
          iniLogLevel = configCache[logname].lower()
          logFilter.setLogLevelForNamespace(logname, LogLevel.levelWithName(iniLogLevel))
#        else:
#          iniLogLevel = 'info'
#          iniLogLevel = False
#        print "iniLogLevel: %s, logname: %s" % (iniLogLevel, logname)
        invalidLogLevel = False
    except InvalidLogLevelError:
        logFilter.setLogLevelForNamespace(logname, LogLevel.info)
        invalidLogLevel = True

    # Yell at the user if they specified an invalid log level
    if invalidLogLevel:
        loggers[logname].warn("yombo.ini file contained invalid log level {invalidLevel}, level has been set to INFO instead.",
                           invalidLevel=configCache[logname].lower())

    # Set up logging
    consoleFilterObserver = FilteringLogObserver(consoleLogObserver, (logFilter,))

    logger = Logger(namespace=logname, source=source, observer=consoleFilterObserver)

    global logFirstRun
    if logFirstRun is True:
      logFirstRun = False
      # This doesn't appear to be working yet...
#      globalLogPublisher.addObserver(jsonFileLogObserver(io.open("yombo.log.json", "a")))

    loggers[logname] = logger
    
    return loggers[logname]
Example #12
0
    def makeService(store):
        return UpgraderService(store, options, output, reactor, config)

    def onlyUpgradeEvents(eventDict):
        text = formatEvent(eventDict)
        output.write(
            formatTime(eventDict.get("log_time", time.time())) + " " + text +
            "\n")
        output.flush()

    if not options["status"] and not options["check"]:
        # When doing an upgrade always send L{LogLevel.warn} logging to the tool output
        log.observer.addObserver(
            FilteringLogObserver(onlyUpgradeEvents, [
                LogLevelFilterPredicate(defaultLogLevel=LogLevel.warn),
            ]))

    def customServiceMaker():
        customService = CalDAVServiceMaker()
        customService.doPostImport = options["postprocess"]
        return customService

    def _patchConfig(config):
        config.FailIfUpgradeNeeded = options["status"] or options["check"]
        config.CheckExistingSchema = options["check"]
        if options["prefix"]:
            config.UpgradeHomePrefix = options["prefix"]
        if not options["status"] and not options["check"]:
            config.DefaultLogLevel = "debug"
Example #13
0
def get_logger(logname="yombolog", **kwargs):
    """
    Returns a logger object that allows logging of error messages.

    **Usage**:

    .. code-block:: python

       from yombo.core.log import get_logger

       logger = get_logger("module.ModuleName")
       logger.debug("Some status line, debug level items.")
       logger.info("ModuleName has finished starting is ready.")
       logger.warn("A warning!!")
       logger.error("Something really bad happened! I should quit.")

    :param logname: Name of the module or library.
    :type logname: string
    :return: logger object
    """
    global loggers
    global observers
    global log_levels
    global open_files

    # A simple cache or existing loggers...
    if logname in loggers:
        return loggers[logname]

    source = kwargs.get("source", logname)

    # Determine the logging level
    if len(loggers) == 0:
        try:
            from yombo.core.settings import logger_settings
            log_levels = logger_settings
        except ImportError:
            log_levels = []

    ini_log_level = "info"
    log_filter = LogLevelFilterPredicate()
    logname_search = copy(logname).lower()

    try:
        while len(logname_search) > 0 and len(log_levels):
            try:
                ini_log_level = log_levels[logname_search]
                break
            except KeyError:
                pass
            # This crazy line removes the last element in the string.
            logname_search = ".".join(logname_search.rsplit(".")[:-1])
    except InvalidLogLevelError:
        # Yell at the user if they specified an invalid log level
        loggers[logname].warn(
            "yombo.toml file contained invalid log level {invalidLevel}, "
            "level has been set to INFO instead.",
            invalidLevel=log_levels[logname].lower())

    if isinstance(ini_log_level, str) is False:
        ini_log_level = "info"

    log_filter.setLogLevelForNamespace(logname,
                                       LogLevel.levelWithName(ini_log_level))

    # Set up logging
    consoleFilterObserver = FilteringLogObserver(consoleLogObserver,
                                                 (log_filter, ))

    logger = Logger(namespace=logname,
                    source=source,
                    observer=consoleFilterObserver)
    loggers[logname] = logger
    return loggers[logname]
Example #14
0
    def __init__(self, interface: bytes, config_dict: Dict[str, Any]) -> None:
        # logfile path relative to config dir if not abs path
        log_filename = logfile.get()
        if log_filename.strip():  # catches empty filename
            if not os.path.isabs(log_filename):
                log_filename = os.path.join(config.config_dir, log_filename)
            ensure_dir_exists(log_filename)
            if logging_rotate_daily.get():
                logging_file = DailyLogFile(log_filename, '.')
            else:
                logging_file = open(log_filename, 'a')
            predicate = LogLevelFilterPredicate(
                LogLevel.levelWithName(loglevel.get()))
            observers = [
                FilteringLogObserver(textFileLogObserver(sys.stderr),
                                     [predicate]),
                FilteringLogObserver(textFileLogObserver(logging_file),
                                     [predicate])
            ]
            globalLogBeginner.beginLoggingTo(observers)
            log.info('piqueserver started on %s' % time.strftime('%c'))

        self.config = config_dict
        if random_rotation:
            self.map_rotator_type = random_choice_cycle
        else:
            self.map_rotator_type = itertools.cycle  # pylint: disable=redefined-variable-type
        self.default_time_limit = default_time_limit.get()
        self.default_cap_limit = cap_limit.get()
        self.advance_on_win = int(advance_on_win.get())
        self.win_count = itertools.count(1)
        self.bans = NetworkDict()

        # attempt to load a saved bans list
        try:
            with open(os.path.join(config.config_dir, bans_file.get()),
                      'r') as f:
                self.bans.read_list(json.load(f))
            log.debug("loaded {count} bans", count=len(self.bans))
        except FileNotFoundError:
            log.debug("skip loading bans: file unavailable",
                      count=len(self.bans))
        except IOError as e:
            log.error('Could not read bans.txt: {}'.format(e))
        except ValueError as e:
            log.error('Could not parse bans.txt: {}'.format(e))

        self.hard_bans = set()  # possible DDoS'ers are added here
        self.player_memory = deque(maxlen=100)
        if len(self.name) > MAX_SERVER_NAME_SIZE:
            log.warn('(server name too long; it will be truncated to "%s")' %
                     (self.name[:MAX_SERVER_NAME_SIZE]))
        self.respawn_time = respawn_time_option.get()
        self.respawn_waves = respawn_waves.get()
        if game_mode.get() == 'ctf':
            self.game_mode = CTF_MODE
        elif game_mode.get() == 'tc':
            self.game_mode = TC_MODE
        elif self.game_mode is None:
            raise NotImplementedError('invalid game mode: %s' % game_mode)
        self.game_mode_name = game_mode.get().split('.')[-1]
        self.team1_name = team1_name.get()[:9]
        self.team2_name = team2_name.get()[:9]
        self.team1_color = tuple(team1_color.get())
        self.team2_color = tuple(team2_color.get())
        self.friendly_fire = friendly_fire.get()
        self.friendly_fire_on_grief = friendly_fire_on_grief.get()
        self.friendly_fire_time = grief_friendly_fire_time.get()
        self.spade_teamkills_on_grief = spade_teamkills_on_grief.get()
        self.fall_damage = fall_damage.get()
        self.teamswitch_interval = teamswitch_interval.get()
        self.teamswitch_allowed = teamswitch_allowed.get()
        self.max_players = max_players.get()
        self.melee_damage = melee_damage.get()
        self.max_connections_per_ip = max_connections_per_ip.get()
        self.passwords = passwords.get()
        self.server_prefix = server_prefix.get()
        self.time_announcements = time_announcements.get()
        self.balanced_teams = balanced_teams.get()
        self.login_retries = login_retries.get()

        # voting configuration
        self.default_ban_time = default_ban_duration.get()

        self.speedhack_detect = speedhack_detect.get()
        if user_blocks_only.get():
            self.user_blocks = set()
        self.set_god_build = set_god_build.get()
        self.debug_log = debug_log_enabled.get()
        if self.debug_log:
            # TODO: make this configurable
            pyspades.debug.open_debug_log(
                os.path.join(config.config_dir, 'debug.log'))
        if ssh_enabled.get():
            from piqueserver.ssh import RemoteConsole
            self.remote_console = RemoteConsole(self)
        irc = irc_options.get()
        if irc.get('enabled', False):
            from piqueserver.irc import IRCRelay
            self.irc_relay = IRCRelay(self, irc)
        if status_server_enabled.get():
            from piqueserver.statusserver import StatusServerFactory
            self.status_server = StatusServerFactory(self)
        if ban_publish.get():
            from piqueserver.banpublish import PublishServer
            self.ban_publish = PublishServer(self, ban_publish_port.get())
        if bans_urls.get():
            from piqueserver import bansubscribe
            self.ban_manager = bansubscribe.BanManager(self)
        self.start_time = reactor.seconds()
        self.end_calls = []
        # TODO: why is this here?
        create_console(self)

        for user_type, func_names in rights.get().items():
            for func_name in func_names:
                commands.add_rights(user_type, func_name)

        port = self.port = port_option.get()
        ServerProtocol.__init__(self, port, interface)
        self.host.intercept = self.receive_callback
        try:
            self.set_map_rotation(self.config['rotation'])
        except MapNotFound as e:
            log.critical('Invalid map in map rotation (%s), exiting.' % e.map)
            raise SystemExit

        self.update_format()
        self.tip_frequency = tip_frequency.get()
        if self.tips is not None and self.tip_frequency > 0:
            reactor.callLater(self.tip_frequency * 60, self.send_tip)

        self.master = register_master_option.get()
        self.set_master()

        self.http_agent = web_client.Agent(reactor)

        ip_getter = ip_getter_option.get()
        if ip_getter:
            self.get_external_ip(ip_getter)
Example #15
0
class _LogManager(object):

    """
    Twisted logger manager: tracks filtering predicates to support dynamic
    per-namespace log level changes at runtime and supports adding/removing
    observers at runtime.
    """

    def __init__(self):

        self._predicate = None

        # Track dynamically added/removed observers:
        # - keys: observers
        # - values: FilteringLogObserver instances
        self._observers = {}


    def setup(self, level='warn', namespace_levels=None, text_file=sys.stderr,
              time_format='%H:%M:%S.%f', handle_stdlib=True, stdlib_level='notset',
              stdlib_prefix='stdlib.'):

        """
        Initiates the twisted.logger system:
        - level: default log level as a string (ie: 'warn', 'info', ....).
        - namespace_levels: a dict of namespaces/log level names.
        - text_file: where to write the log to.
        - time_format: as supported by datetime.strftime.
        - handle_stdlib: True/False.
        - stdlib_level: level name, above which stdlib logging is handled.
        - stdlib_prefix: added to stdlib logger name, used as namespace.
        """

        file_observer = textFileLogObserver(text_file, timeFormat=time_format)
        self._predicate = LogLevelFilterPredicate(
            defaultLogLevel=LogLevel.levelWithName(level),
        )
        if namespace_levels:
            for namespace, level_name in namespace_levels.items():
                level = LogLevel.levelWithName(level_name)
                self._predicate.setLogLevelForNamespace(namespace, level)
        globalLogBeginner.beginLoggingTo([self._filtered_observer(file_observer)])

        if handle_stdlib:
            self._handle_stdlib(stdlib_level, stdlib_prefix)


    def _filtered_observer(self, observer):

        # Wraps `observer` in a t.l.FilteringLogObserver using the
        # shared t.l.LogLevelFilterPredicate in `self._predicate`

        return FilteringLogObserver(observer, [self._predicate])


    @staticmethod
    def _handle_stdlib(level_name, prefix):

        """
        Directs standard library logging records to twisted.logger.
        Standard library log recods will be handled at `level_name` or
        above and logged to a namespace prefixed by `prefix`.
        """

        stdlib_root_logger = logging.getLogger()
        try:
            level = getattr(logging, level_name.upper())
        except AttributeError:
            raise ValueError(level_name)
        stdlib_root_logger.setLevel(level)
        handler = _TwistedLoggerHandler(prefix)
        stdlib_root_logger.addHandler(handler)


    def set_level(self, namespace=None, level_name=None):

        """
        Change the logging level of namespace to level.
        If namespace is None, sets all namespaces to level_name.
        If level_name is None, uses the default log level.
        """

        if level_name:
            level = LogLevel.levelWithName(level_name)
        else:
            level = self._predicate.defaultLogLevel
        if namespace:
            self._predicate.setLogLevelForNamespace(namespace, level)
        else:
            self._predicate.defaultLogLevel = level
            self._predicate.clearLogLevels()


    def add_observer(self, observer):

        """
        Wraps `observer` in a FilteringLogObserver and adds it to the global
        log publisher (filtering agrees with `setup` and `set_level` calls).
        """

        filtered_observer = self._filtered_observer(observer)
        globalLogPublisher.addObserver(filtered_observer)
        self._observers[observer] = filtered_observer


    def remove_observer(self, observer):

        """
        Locates the FilteringLogObserver wrapping `observer` and removes it
        from the global log publisher.
        """

        filtered_observer = self._observers[observer]
        globalLogPublisher.removeObserver(filtered_observer)
Example #16
0
def get_logger(logname="yombolog", **kwargs):
    """
    Returns a logger object that allows logging of error messages.

    **Usage**:

    .. code-block:: python

       from yombo.core.log import get_logger

       logger = get_logger("module.ModuleName")
       logger.debug("Some status line, debug level items.")
       logger.info("ModuleName has finished starting is ready.")
       logger.warn("A warning!!")
       logger.error("Something really bad happened! I should quit.")

    :param logname: Name of the module or library.
    :type logname: string
    :return: logger object
    """
    global loggers
    global observers
    global log_levels
    global open_files

    # A simple cache or existing loggers...
    if logname in loggers:
        return loggers[logname]

    source = kwargs.get("source", logname)

    # Determine the logging level
    if len(loggers) == 0:
        import yombo.core.settings as settings
        if settings.yombo_ini is not False and "logging" in settings.yombo_ini:
            log_levels = settings.yombo_ini["logging"]

    log_filter = LogLevelFilterPredicate()
    log_name_search = copy(logname)
    try:
        ini_log_level = "info"
        while len(log_name_search) > 0:
            if log_name_search in log_levels:
                ini_log_level = log_levels[log_name_search].lower()
                break
            # This crazy line removes the last element in the string.
            log_name_search = ".".join(log_name_search.rsplit(".")[:-1])
        log_filter.setLogLevelForNamespace(
            logname, LogLevel.levelWithName(ini_log_level))

    except InvalidLogLevelError:
        log_filter.setLogLevelForNamespace(logname, LogLevel.info)
        # Yell at the user if they specified an invalid log level
        loggers[logname].warn(
            "yombo.ini file contained invalid log level {invalidLevel}, level has been set to INFO instead.",
            invalidLevel=log_levels[logname].lower())

    # Set up logging
    consoleFilterObserver = FilteringLogObserver(consoleLogObserver,
                                                 (log_filter, ))

    logger = Logger(namespace=logname,
                    source=source,
                    observer=consoleFilterObserver)
    loggers[logname] = logger

    # global logFirstRun
    # if logFirstRun is True:
    #   logFirstRun = False
    # This doesn't appear to be working yet...
    #   observers['json'] = jsonFileLogObserver(io.open("usr/log/yombo.json", "a"))
    #   globalLogPublisher.addObserver(observers['json'])
    #   observers['text'] = textFileLogObserver(io.open("usr/log/yombo.text", "a"))
    #   globalLogPublisher.addObserver(observers['text'])
    #
    #   # globalLogPublisher.addObserver(jsonFileLogObserver(io.open("usr/log/yombo.json", "a")))
    #   # globalLogPublisher.addObserver(textFileLogObserver(io.open("usr/log/yombo.text", "a")))
    #
    #
    # if get_logger.rotate_loop is None:
    #     get_logger.rotate_loop = LoopingCall(rotate_logs)
    #     get_logger.rotate_loop.start(5, False)  # about every 10 minutes
    #     # get_logger.rotate_loop.start(615, False)  # about every 10 minutes

    return loggers[logname]
Example #17
0
def divert_logger():
    # noinspection PyPackageRequirements
    from twisted.logger import FilteringLogObserver, LogLevel, LogLevelFilterPredicate, STDLibLogObserver, globalLogBeginner
    showwarning = warnings.showwarning
    globalLogBeginner.beginLoggingTo([FilteringLogObserver(STDLibLogObserver(), [LogLevelFilterPredicate(defaultLogLevel=LogLevel.critical)])], redirectStandardIO=False)
    warnings.showwarning = showwarning  # twisted's beginLoggingTo() will divert python warnings to its own logging system. here we undo that.
Example #18
0
def get_observers(main_config, twistd_user, log_group):
    log_debug = main_config.get_bool("debug", False)
    log_to_file = main_config.get_bool("log_file", False)
    log_stdout = main_config.get_bool("log_stdout", False)
    log_syslog = main_config.get_bool("log_syslog", False)
    log_auth_events = main_config.get_bool("log_auth_events", False)
    log_sso_events = main_config.get_bool("log_sso_events", True)

    # Log to file if nothing else is turned on
    log_to_file = log_to_file or not (log_to_file or log_syslog or log_stdout)

    log_dir = main_config.get_str("log_dir", "log")
    log_max_size = main_config.get_int("log_max_size", 10 * (1 << 20))
    log_max_files = main_config.get_int("log_max_files", 6)
    if log_max_files == 0:
        # we need to pass None explicitly if we want there to be no limit
        # 0 would just mean no logfiles would get kept...
        log_max_files = None

    observers = []
    if log_to_file:
        log_txt = create_log_file(
            "authproxy.log",
            log_dir,
            log_max_size,
            log_max_files,
            twistd_user,
            log_group,
        )
        text_observer = textFileLogObserver(log_txt)
        text_filter = FilteringLogObserver(text_observer, [only_default_log_predicate])
        observers.append(text_filter)

    if log_stdout:
        stdout_observer = textFileLogObserver(sys.stdout)
        filtered_stdout = FilteringLogObserver(
            stdout_observer, [only_default_log_predicate]
        )
        observers.append(filtered_stdout)

    if log_syslog:
        if syslog is None:
            raise config_error.ConfigError("syslog not supported on Windows")

        facility_dict = {
            "LOG_KERN": pySyslog.LOG_KERN,
            "LOG_USER": pySyslog.LOG_USER,
            "LOG_MAIL": pySyslog.LOG_MAIL,
            "LOG_DAEMON": pySyslog.LOG_DAEMON,
            "LOG_AUTH": pySyslog.LOG_AUTH,
            "LOG_LPR": pySyslog.LOG_LPR,
            "LOG_NEWS": pySyslog.LOG_NEWS,
            "LOG_UUCP": pySyslog.LOG_UUCP,
            "LOG_CRON": pySyslog.LOG_CRON,
            "LOG_SYSLOG": pySyslog.LOG_SYSLOG,
            "LOG_LOCAL0": pySyslog.LOG_LOCAL0,
            "LOG_LOCAL1": pySyslog.LOG_LOCAL1,
            "LOG_LOCAL2": pySyslog.LOG_LOCAL2,
            "LOG_LOCAL3": pySyslog.LOG_LOCAL3,
            "LOG_LOCAL4": pySyslog.LOG_LOCAL4,
            "LOG_LOCAL5": pySyslog.LOG_LOCAL5,
            "LOG_LOCAL6": pySyslog.LOG_LOCAL6,
            "LOG_LOCAL7": pySyslog.LOG_LOCAL7,
        }
        syslog_facilitystr = main_config.get_str("syslog_facility", "LOG_USER")
        syslog_facility = facility_dict.get(syslog_facilitystr, None)
        if syslog_facility is None:
            raise config_error.ConfigError(
                "Unknown syslog_facility: {0}".format(syslog_facilitystr)
            )

        syslog_observer = syslog.SyslogObserver("Authproxy", facility=syslog_facility)
        wrapped_syslog_observer = LegacyLogObserverWrapper(syslog_observer.emit)
        syslog_filtering_observer = FilteringLogObserver(
            wrapped_syslog_observer, [only_default_log_predicate],
        )
        observers.append(syslog_filtering_observer)

    if log_debug:
        debug_predicate = LogLevelFilterPredicate(LogLevel.debug)
        for i, observer in enumerate(observers):
            observers[i] = FilteringLogObserver(observer, [debug_predicate])

    if log_auth_events:
        auth_log_file = create_log_file(
            "authevents.log",
            log_dir,
            log_max_size,
            log_max_files,
            twistd_user,
            log_group,
        )
        auth_observer = jsonFileLogObserver(auth_log_file, "")
        observers.append(FilteringLogObserver(auth_observer, [auth_type_predicate]))

    if log_sso_events:
        sso_log_file = create_log_file(
            "ssoevents.log",
            log_dir,
            log_max_size,
            log_max_files,
            twistd_user,
            log_group,
        )
        sso_observer = jsonFileLogObserver(sso_log_file, "")
        observers.append(FilteringLogObserver(sso_observer, [sso_type_predicate]))

    return observers
Example #19
0
    username = "******" + tester_id

    # Extract values for the IP address and the filename of the wav
    # file to play
    address = sys.argv[1]
    in_filename = sys.argv[2]

    # Setup logging
    logfile = open(f"client-{tester_id}.log", 'w')
    logtargets = []

    # Set up the log observer for stdout.
    logtargets.append(
        FilteringLogObserver(
            textFileLogObserver(sys.stdout),
            predicates=[LogLevelFilterPredicate(LogLevel.debug)] # was: warn
        )
    )

    # Set up the log observer for our log file. "debug" is the highest possible level.
    logtargets.append(
        FilteringLogObserver(
            textFileLogObserver(logfile),
            predicates=[LogLevelFilterPredicate(LogLevel.debug)]
        )
    )

    # Direct the Twisted Logger to log to both of our observers.
    globalLogBeginner.beginLoggingTo(logtargets)

    # Start a logger with a namespace for a particular subsystem of our application.
Example #20
0
class AmcrestRealm:
    def requestAvatar(self, avatarId, mind, *interfaces):
        if IFTPShell in interfaces:
            return (IFTPShell, FTPShell(filepath.FilePath(VIDEOROOT)),
                    lambda: None)
        if IMessageDelivery in interfaces:
            return (IMessageDelivery, ConsoleMessageDelivery(), lambda: None)

        raise NotImplementedError(
            "Unable to provide avatar for interfaces provided ({})".format(
                interfaces))


log = Logger("amcrest")
predicate = LogLevelFilterPredicate(LogLevel.warn)
predicate.setLogLevelForNamespace("amcrest", LogLevel.debug)
globalLogPublisher.addObserver(
    FilteringLogObserver(textFileLogObserver(sys.stderr), (predicate, )))

portal = Portal(AmcrestRealm(), [
    FilePasswordDB(
        os.path.join(os.path.dirname(os.path.abspath(__file__)),
                     "passwords.txt"))
])

ftpfactory = FTPFactory(portal)
ftpfactory.allowAnonymous = False
ftpfactory.protocol = MonitoredFTP

hafactory = HAFactory()
Example #21
0
def startLogging(settings, stream=None, level=LogLevel.debug):
    global predicate

    fileObserver = logObserver(stream)
    predicate    = LogLevelFilterPredicate(defaultLogLevel=level)

    if settings.options.debug_mqtt:
        predicate.setLogLevelForNamespace('kotori.daq.services.mig', LogLevel.debug)
        predicate.setLogLevelForNamespace('kotori.daq.application.mqttkit', LogLevel.debug)

    if settings.options.debug_mqtt_driver:
        predicate.setLogLevelForNamespace('kotori.daq.intercom.mqtt', LogLevel.debug)
        predicate.setLogLevelForNamespace('mqtt', LogLevel.debug)
        predicate.setLogLevelForNamespace('paho.mqtt', LogLevel.debug)
    else:
        predicate.setLogLevelForNamespace('kotori.daq.intercom.mqtt', LogLevel.info)
        predicate.setLogLevelForNamespace('mqtt', LogLevel.info)
        predicate.setLogLevelForNamespace('paho.mqtt', LogLevel.info)

    if settings.options.debug_influx:
        predicate.setLogLevelForNamespace('kotori.daq.storage.influx', LogLevel.debug)

    if settings.options.debug_io:
        predicate.setLogLevelForNamespace('kotori.io', LogLevel.debug)

    observers    = [ FilteringLogObserver(observer=fileObserver, predicates=[predicate]) ]
    globalLogBeginner.beginLoggingTo(observers)
Example #22
0
def get_logger(logname='yombolog', **kwargs):
    """
    Returns a logger object that allows logging of error messages.

    **Usage**:

    .. code-block:: python

       from yombo.core.log import get_logger

       logger = get_logger("module.ModuleName")
       logger.debug("Some status line, debug level items.")
       logger.info("ModuleName has finished starting is ready.")
       logger.warn("A warning!!")
       logger.error("Something really bad happened! I should quit.")

    :param logname: Name of the module or library.
    :type logname: string
    :return: logger object
    """
    global loggers
    global observers
    global configCache
    global open_files

    # A simple cache or existing loggers...
    if logname in loggers:
        return loggers[logname]

    loglevel = None
    source = kwargs.get('source', logname)
    json = kwargs.get('source', False)

    # Determine the logging level
    if len(loggers) == 0:
        config_parser = configparser.SafeConfigParser()
        try:
            fp = open('yombo.ini')
            config_parser.readfp(fp)
            ini = config_parser
            for option in ini.options('logging'):
                value = ini.get('logging', option)
                configCache[option] = value
            fp.close()
        except IOError:
            pass
        except configparser.NoSectionError:
            pass

    logFilter = LogLevelFilterPredicate()
    try:
        if logname in configCache:
            iniLogLevel = configCache[logname].lower()
            logFilter.setLogLevelForNamespace(
                logname, LogLevel.levelWithName(iniLogLevel))


#        else:
#          iniLogLevel = 'info'
#          iniLogLevel = False
#        print "iniLogLevel: %s, logname: %s" % (iniLogLevel, logname)
        invalidLogLevel = False
    except InvalidLogLevelError:
        logFilter.setLogLevelForNamespace(logname, LogLevel.info)
        invalidLogLevel = True

    # Yell at the user if they specified an invalid log level
    if invalidLogLevel:
        loggers[logname].warn(
            "yombo.ini file contained invalid log level {invalidLevel}, level has been set to INFO instead.",
            invalidLevel=configCache[logname].lower())

    # Set up logging
    consoleFilterObserver = FilteringLogObserver(consoleLogObserver,
                                                 (logFilter, ))

    logger = Logger(namespace=logname,
                    source=source,
                    observer=consoleFilterObserver)
    loggers[logname] = logger

    # global logFirstRun
    # if logFirstRun is True:
    #   logFirstRun = False
    # This doesn't appear to be working yet...
    #   observers['json'] = jsonFileLogObserver(io.open("usr/log/yombo.json", "a"))
    #   globalLogPublisher.addObserver(observers['json'])
    #   observers['text'] = textFileLogObserver(io.open("usr/log/yombo.text", "a"))
    #   globalLogPublisher.addObserver(observers['text'])
    #
    #   # globalLogPublisher.addObserver(jsonFileLogObserver(io.open("usr/log/yombo.json", "a")))
    #   # globalLogPublisher.addObserver(textFileLogObserver(io.open("usr/log/yombo.text", "a")))
    #
    #
    # if get_logger.rotate_loop is None:
    #     get_logger.rotate_loop = LoopingCall(rotate_logs)
    #     get_logger.rotate_loop.start(5, False)  # about every 10 minutes
    #     # get_logger.rotate_loop.start(615, False)  # about every 10 minutes

    return loggers[logname]
Example #23
0
class Readiness(resource.Resource):
    isLeaf = True

    def render_GET(self, request):
        data = {"msg": "Readiness checking"}
        request.setResponseCode(200)
        return json.dumps(data)


def getAppService(port, interface):
    root = resource.Resource()
    root.putChild("liveness", Liveness())
    root.putChild("readiness", Readiness())
    site = server.Site(root)
    return internet.TCPServer(port, site, interface=interface)


application = service.Application('app')
log_file = LogFile('daemon.log',
                   './',
                   rotateLength=1024 * 1024 * 1024,
                   maxRotatedFiles=2)
log_observer = FilteringLogObserver(textFileLogObserver(log_file),
                                    predicates=[
                                        LogLevelFilterPredicate(
                                            LogLevel.debug),
                                    ])
application.setComponent(ILogObserver, log_observer)
app_service = getAppService(5000, '0.0.0.0')
app_service.setServiceParent(application)
Example #24
0
import argparse


if __name__ == "__main__":
    # Parse the command line arguments
    parser = argparse.ArgumentParser(description="A modular Twisted IRC bot.")
    parser.add_argument("-c", "--config", help="The configuration file to use", type=str, default="heufybot.yaml")
    parser.add_argument("-f", "--logfile", help="The file the log will be written to", type=str, default="heufybot.log")
    parser.add_argument("-l", "--loglevel", help="The logging level the bot will use", type=str, default="INFO")
    options = parser.parse_args()

    # Start the bot
    heufybot = HeufyBot(options.config)

    # Determine the logging level
    logFilter = LogLevelFilterPredicate()
    try:
        logFilter.setLogLevelForNamespace("heufybot", LogLevel.levelWithName(options.loglevel.lower()))
        invalidLogLevel = False
    except InvalidLogLevelError:
        logFilter.setLogLevelForNamespace("heufybot", LogLevel.info)
        invalidLogLevel = True

    # Set up logging
    logFile = DailyLogFile("heufybot.log", "")
    fileObserver = FileLogObserver(logFile, logFormat)
    fileFilterObserver = FilteringLogObserver(fileObserver, (logFilter,))
    consoleFilterObserver = FilteringLogObserver(consoleLogObserver, (logFilter,))
    heufybot.log = Logger("heufybot")
    globalLogPublisher.addObserver(fileFilterObserver)
    globalLogPublisher.addObserver(consoleFilterObserver)
Example #25
0
def setup_structured_logging(
    hs,
    config,
    log_config: dict,
    logBeginner: LogBeginner,
    redirect_stdlib_logging: bool = True,
) -> LogPublisher:
    """
    Set up Twisted's structured logging system.

    Args:
        hs: The homeserver to use.
        config (HomeserverConfig): The configuration of the Synapse homeserver.
        log_config (dict): The log configuration to use.
    """
    if config.no_redirect_stdio:
        raise ConfigError(
            "no_redirect_stdio cannot be defined using structured logging.")

    logger = Logger()

    if "drains" not in log_config:
        raise ConfigError(
            "The logging configuration requires a list of drains.")

    observers = []  # type: List[ILogObserver]

    for observer in parse_drain_configs(log_config["drains"]):
        # Pipe drains
        if observer.type == DrainType.CONSOLE:
            logger.debug("Starting up the {name} console logger drain",
                         name=observer.name)
            observers.append(SynapseFileLogObserver(observer.location))
        elif observer.type == DrainType.CONSOLE_JSON:
            logger.debug("Starting up the {name} JSON console logger drain",
                         name=observer.name)
            observers.append(jsonFileLogObserver(observer.location))
        elif observer.type == DrainType.CONSOLE_JSON_TERSE:
            logger.debug(
                "Starting up the {name} terse JSON console logger drain",
                name=observer.name,
            )
            observers.append(
                TerseJSONToConsoleLogObserver(observer.location, metadata={}))

        # File drains
        elif observer.type == DrainType.FILE:
            logger.debug("Starting up the {name} file logger drain",
                         name=observer.name)
            log_file = open(observer.location,
                            "at",
                            buffering=1,
                            encoding="utf8")
            observers.append(SynapseFileLogObserver(log_file))
        elif observer.type == DrainType.FILE_JSON:
            logger.debug("Starting up the {name} JSON file logger drain",
                         name=observer.name)
            log_file = open(observer.location,
                            "at",
                            buffering=1,
                            encoding="utf8")
            observers.append(jsonFileLogObserver(log_file))

        elif observer.type == DrainType.NETWORK_JSON_TERSE:
            metadata = {"server_name": hs.config.server_name}
            log_observer = TerseJSONToTCPLogObserver(
                hs=hs,
                host=observer.location[0],
                port=observer.location[1],
                metadata=metadata,
                maximum_buffer=observer.options.maximum_buffer,
            )
            log_observer.start()
            observers.append(log_observer)
        else:
            # We should never get here, but, just in case, throw an error.
            raise ConfigError("%s drain type cannot be configured" %
                              (observer.type, ))

    publisher = StoppableLogPublisher(*observers)
    log_filter = LogLevelFilterPredicate()

    for namespace, namespace_config in log_config.get("loggers",
                                                      DEFAULT_LOGGERS).items():
        # Set the log level for twisted.logger.Logger namespaces
        log_filter.setLogLevelForNamespace(
            namespace,
            stdlib_log_level_to_twisted(namespace_config.get("level", "INFO")),
        )

        # Also set the log levels for the stdlib logger namespaces, to prevent
        # them getting to PythonStdlibToTwistedLogger and having to be formatted
        if "level" in namespace_config:
            logging.getLogger(namespace).setLevel(
                namespace_config.get("level"))

    f = FilteringLogObserver(publisher, [log_filter])
    lco = LogContextObserver(f)

    if redirect_stdlib_logging:
        stuff_into_twisted = PythonStdlibToTwistedLogger(lco)
        stdliblogger = logging.getLogger()
        stdliblogger.addHandler(stuff_into_twisted)

    # Always redirect standard I/O, otherwise other logging outputs might miss
    # it.
    logBeginner.beginLoggingTo([lco], redirectStandardIO=True)

    return publisher
Example #26
0
 def ConsoleObserver(log_level=LogLevel.warn):
     console_observer = textFileLogObserver(sys.stdout)
     return FilteringLogObserver(console_observer, [
         LogLevelFilterPredicate(log_level),
     ])
Example #27
0
 def buildLogger(self):
     LOG_LEVEL = LogLevel.debug
     observer = textFileLogObserver(sys.stdout, "%Y-%m-%dT%H:%M:%S,%fZ%z")
     filteringObs = LowLevelFilteringLogObserver(
         observer, [LogLevelFilterPredicate(defaultLogLevel=LOG_LEVEL)])
     return Logger(observer=filteringObs)
Example #28
0
class IRCd(Service):
	def __init__(self, configFileName):
		self.config = Config(self, configFileName)
		
		self.boundPorts = {}
		self.loadedModules = {}
		self._loadedModuleData = {}
		self._unloadingModules = {}
		self.commonModules = set()
		self.userCommands = {}
		self.serverCommands = {}
		self.channelModes = ({}, {}, {}, {})
		self.channelStatuses = {}
		self.channelStatusSymbols = {}
		self.channelStatusOrder = []
		self.channelModeTypes = {}
		self.userModes = ({}, {}, {}, {})
		self.userModeTypes = {}
		self.actions = {}
		self.storage = None
		self.storageSyncer = None
		self.dataCache = {}
		self.functionCache = {}
		
		self.serverID = None
		self.name = None
		self.isupport_tokens = {
			"CASEMAPPING": "strict-rfc1459",
			"CHANTYPES": "#",
		}
		self._uid = self._genUID()
		
		self.users = {}
		self.userNicks = CaseInsensitiveDictionary()
		self.channels = CaseInsensitiveDictionary(WeakValueDictionary)
		self.servers = {}
		self.serverNames = CaseInsensitiveDictionary()
		self.recentlyQuitUsers = {}
		self.recentlyQuitServers = {}
		self.recentlyDestroyedChannels = CaseInsensitiveDictionary()
		self.pruneRecentlyQuit = None
		self.pruneRecentChannels = None
		
		self._logFilter = LogLevelFilterPredicate()
		filterObserver = FilteringLogObserver(globalLogPublisher, (self._logFilter,))
		self.log = Logger("txircd", observer=filterObserver)
		
		self.startupTime = None
	
	def startService(self):
		self.log.info("Starting up...")
		self.startupTime = now()
		self.log.info("Loading configuration...")
		self.config.reload()
		self.name = self.config["server_name"]
		self.serverID = self.config["server_id"]
		self.log.info("Loading storage...")
		self.storage = shelve.open(self.config["datastore_path"], writeback=True)
		self.storageSyncer = LoopingCall(self.storage.sync)
		self.storageSyncer.start(self.config.get("storage_sync_interval", 5), now=False)
		self.log.info("Starting processes...")
		self.pruneRecentlyQuit = LoopingCall(self.pruneQuit)
		self.pruneRecentlyQuit.start(10, now=False)
		self.pruneRecentChannels = LoopingCall(self.pruneChannels)
		self.pruneRecentChannels.start(15, now=False)
		self.log.info("Loading modules...")
		self._loadModules()
		self.log.info("Binding ports...")
		self._bindPorts()
		self.log.info("txircd started!")
		try:
			self._logFilter.setLogLevelForNamespace("txircd", LogLevel.levelWithName(self.config["log_level"]))
		except (KeyError, InvalidLogLevelError):
			self._logFilter.setLogLevelForNamespace("txircd", LogLevel.warn)
		self.runActionStandard("startup")
	
	def stopService(self):
		stopDeferreds = []
		self.log.info("Disconnecting servers...")
		serverList = self.servers.values() # Take the list of server objects
		self.servers = {} # And then destroy the server dict to inhibit server objects generating lots of noise
		for server in serverList:
			if server.nextClosest == self.serverID:
				stopDeferreds.append(server.disconnectedDeferred)
				allUsers = self.users.keys()
				for user in allUsers:
					if user[:3] == server.serverID:
						del self.users[user]
				server.transport.loseConnection()
		self.log.info("Disconnecting users...")
		userList = self.users.values() # Basically do the same thing I just did with the servers
		self.users = {}
		for user in userList:
			if user.transport:
				stopDeferreds.append(user.disconnectedDeferred)
				user.transport.loseConnection()
		self.log.info("Unloading modules...")
		moduleList = self.loadedModules.keys()
		for module in moduleList:
			self._unloadModule(module, False) # Incomplete unload is done to save time and because side effects are destroyed anyway
		self.log.info("Stopping processes...")
		if self.pruneRecentlyQuit.running:
			self.pruneRecentlyQuit.stop()
		if self.pruneRecentChannels.running:
			self.pruneRecentChannels.stop()
		self.log.info("Closing data storage...")
		if self.storageSyncer.running:
			self.storageSyncer.stop()
		self.storage.close() # a close() will sync() also
		self.log.info("Releasing ports...")
		stopDeferreds.extend(self._unbindPorts())
		return DeferredList(stopDeferreds)
	
	def _loadModules(self):
		for module in getPlugins(IModuleData, txircd.modules):
			if module.name in self.loadedModules:
				continue
			if module.core or module.name in self.config["modules"]:
				self._loadModuleData(module)
		for moduleName in self.config["modules"]:
			if moduleName not in self.loadedModules:
				self.log.warn("The module {module} failed to load.", module=moduleName)
	
	def loadModule(self, moduleName):
		"""
		Loads a module of the specified name.
		Raises ModuleLoadError if the module cannot be loaded.
		If the specified module is currently being unloaded, returns the
		DeferredList specified by the module when it was unloading with a
		callback to try to load the module again once it succeeds.
		"""
		if moduleName in self._unloadingModules:
			deferList = self._unloadingModules[moduleName]
			deferList.addCallback(self._tryLoadAgain, moduleName)
			return deferList
		for module in getPlugins(IModuleData, txircd.modules):
			if module.name == moduleName:
				rebuild(importlib.import_module(module.__module__)) # getPlugins doesn't recompile modules, so let's do that ourselves.
				self._loadModuleData(module)
				self.log.info("Loaded module {module}.", module=moduleName)
				break
	
	def _tryLoadAgain(self, _, moduleName):
		self.loadModule(moduleName)
	
	def _loadModuleData(self, module):
		if not IModuleData.providedBy(module):
			raise ModuleLoadError ("???", "Module does not implement module interface")
		if not module.name:
			raise ModuleLoadError ("???", "Module did not provide a name")
		if module.name in self.loadedModules:
			self.log.debug("Not loading {module.name} because it's already loaded", module=module)
			return
		
		self.log.debug("Beginning to load {module.name}...", module=module)
		module.hookIRCd(self)
		try:
			module.verifyConfig(self.config)
		except ConfigError as e:
			raise ModuleLoadError(module.name, e)
		
		self.log.debug("Loading hooks from {module.name}...", module=module)
		moduleData = {
			"channelmodes": module.channelModes(),
			"usermodes": module.userModes(),
			"actions": module.actions(),
			"usercommands": module.userCommands(),
			"servercommands": module.serverCommands()
		}
		newChannelModes = ({}, {}, {}, {})
		newChannelStatuses = {}
		newUserModes = ({}, {}, {}, {})
		newActions = {}
		newUserCommands = {}
		newServerCommands = {}
		common = False
		self.log.debug("Processing hook data from {module.name}...", module=module)
		for mode in moduleData["channelmodes"]:
			if mode[0] in self.channelModeTypes:
				raise ModuleLoadError (module.name, "Tries to implement channel mode +{} when that mode is already implemented.".format(mode[0]))
			if not IMode.providedBy(mode[2]):
				raise ModuleLoadError (module.name, "Returns a channel mode object (+{}) that doesn't implement IMode.".format(mode[0]))
			if mode[1] == ModeType.Status:
				if mode[4] in self.channelStatusSymbols:
					raise ModuleLoadError (module.name, "Tries to create a channel rank with symbol {} when that symbol is already in use.".format(mode[4]))
				try:
					newChannelStatuses[mode[0]] = (mode[4], mode[3], mode[2])
				except IndexError:
					raise ModuleLoadError (module.name, "Specifies channel status mode {} without a rank or symbol".format(mode[0]))
			else:
				newChannelModes[mode[1]][mode[0]] = mode[2]
			common = True
		for mode in moduleData["usermodes"]:
			if mode[0] in self.userModeTypes:
				raise ModuleLoadError (module.name, "Tries to implement user mode +{} when that mode is already implemented.".format(mode[0]))
			if not IMode.providedBy(mode[2]):
				raise ModuleLoadError (module.name, "Returns a user mode object (+{}) that doesn't implement IMode.".format(mode[0]))
			newUserModes[mode[1]][mode[0]] = mode[2]
			common = True
		for action in moduleData["actions"]:
			if action[0] not in newActions:
				newActions[action[0]] = [(action[2], action[1])]
			else:
				newActions[action[0]].append((action[2], action[1]))
		for command in moduleData["usercommands"]:
			if not ICommand.providedBy(command[2]):
				raise ModuleLoadError (module.name, "Returns a user command object ({}) that doesn't implement ICommand.".format(command[0]))
			if command[0] not in newUserCommands:
				newUserCommands[command[0]] = []
			newUserCommands[command[0]].append((command[2], command[1]))
		for command in moduleData["servercommands"]:
			if not ICommand.providedBy(command[2]):
				raise ModuleLoadError (module.name, "Returns a server command object ({}) that doesnt implement ICommand.".format(command[0]))
			if command[0] not in newServerCommands:
				newServerCommands[command[0]] = []
			newServerCommands[command[0]].append((command[2], command[1]))
			common = True
		if not common:
			common = module.requiredOnAllServers

		self.log.debug("Loaded data from {module.name}; committing data and calling hooks...", module=module)
		
		module.load()
		
		self.loadedModules[module.name] = module
		self._loadedModuleData[module.name] = moduleData
		if common:
			self.commonModules.add(module.name)
		
		self.runActionStandard("moduleload", module.name)
		
		for modeType, typeSet in enumerate(newChannelModes):
			for mode, implementation in typeSet.iteritems():
				self.channelModeTypes[mode] = modeType
				self.channelModes[modeType][mode] = implementation
		for mode, data in newChannelStatuses.iteritems():
			self.channelModeTypes[mode] = ModeType.Status
			self.channelStatuses[mode] = data
			self.channelStatusSymbols[data[0]] = mode
			for index, status in enumerate(self.channelStatusOrder):
				if self.channelStatuses[status][1] < data[1]:
					self.channelStatusOrder.insert(index, mode)
					break
			else:
				self.channelStatusOrder.append(mode)
		for modeType, typeSet in enumerate(newUserModes):
			for mode, implementation in typeSet.iteritems():
				self.userModeTypes[mode] = modeType
				self.userModes[modeType][mode] = implementation
		for action, actionList in newActions.iteritems():
			if action not in self.actions:
				self.actions[action] = []
			for actionData in actionList:
				for index, handlerData in enumerate(self.actions[action]):
					if handlerData[1] < actionData[1]:
						self.actions[action].insert(index, actionData)
						break
				else:
					self.actions[action].append(actionData)
		for command, dataList in newUserCommands.iteritems():
			if command not in self.userCommands:
				self.userCommands[command] = []
			for data in dataList:
				for index, cmd in enumerate(self.userCommands[command]):
					if cmd[1] < data[1]:
						self.userCommands[command].insert(index, data)
						break
				else:
					self.userCommands[command].append(data)
		for command, dataList in newServerCommands.iteritems():
			if command not in self.serverCommands:
				self.serverCommands[command] = []
			for data in dataList:
				for index, cmd in enumerate(self.serverCommands[command]):
					if cmd[1] < data[1]:
						self.serverCommands[command].insert(index, data)
						break
				else:
					self.serverCommands[command].append(data)
		
		self.log.debug("Module {module.name} is now fully loaded.", module=module)
	
	def unloadModule(self, moduleName):
		"""
		Unloads the loaded module with the given name. Raises ValueError
		if the module cannot be unloaded because it's a core module.
		"""
		self._unloadModule(moduleName, True)
		self.log.info("Unloaded module {module}.", module=moduleName)
	
	def _unloadModule(self, moduleName, fullUnload):
		unloadDeferreds = []
		if moduleName not in self.loadedModules:
			return
		module = self.loadedModules[moduleName]
		if fullUnload and module.core:
			raise ValueError ("The module you're trying to unload is a core module.")
		moduleData = self._loadedModuleData[moduleName]
		d = module.unload()
		if d is not None:
			unloadDeferreds.append(d)
		
		if fullUnload:
			d = module.fullUnload()
			if d is not None:
				unloadDeferreds.append(d)
		
		for modeData in moduleData["channelmodes"]:
			if fullUnload: # Unset modes on full unload
				if modeData[1] == ModeType.Status:
					for channel in self.channels.itervalues():
						removeFromChannel = []
						for user, userData in channel.user.iteritems():
							if modeData[0] in userData["status"]:
								removeFromChannel.append((False, modeData[0], user.uuid))
						channel.setModes(removeFromChannel, self.serverID)
				elif modeData[1] == ModeType.List:
					for channel in self.channels.itervalues():
						if modeData[0] in channel.modes:
							removeFromChannel = []
							for paramData in channel.modes[modeData[0]]:
								removeFromChannel.append((False, modeData[0], paramData[0]))
							channel.setModes(removeFromChannel, self.serverID)
				else:
					for channel in self.channels.itervalues():
						if modeData[0] in channel.modes:
							channel.setModes([(False, modeData[0], channel.modes[modeData[0]])], self.serverID)
			
			if modeData[1] == ModeType.Status:
				del self.channelStatuses[modeData[0]]
				del self.channelStatusSymbols[modeData[4]]
				self.channelStatusOrder.remove(modeData[0])
			else:
				del self.channelModes[modeData[1]][modeData[0]]
			del self.channelModeTypes[modeData[0]]
		for modeData in moduleData["usermodes"]:
			if fullUnload: # Unset modes on full unload
				if modeData[1] == ModeType.List:
					for user in self.users.itervalues():
						if modeData[0] in user.modes:
							removeFromUser = []
							for paramData in user.modes[modeData[0]]:
								removeFromUser.append((False, modeData[0], paramData[0]))
							user.setModes(removeFromUser, self.serverID)
				else:
					for user in self.users.itervalues():
						if modeData[0] in user.modes:
							user.setModes([(False, modeData[0], user.modes[modeData[0]])], self.serverID)
			
			del self.userModes[modeData[1]][modeData[0]]
			del self.userModeTypes[modeData[0]]
		for actionData in moduleData["actions"]:
			self.actions[actionData[0]].remove((actionData[2], actionData[1]))
			if not self.actions[actionData[0]]:
				del self.actions[actionData[0]]
		for commandData in moduleData["usercommands"]:
			self.userCommands[commandData[0]].remove((commandData[2], commandData[1]))
			if not self.userCommands[commandData[0]]:
				del self.userCommands[commandData[0]]
		for commandData in moduleData["servercommands"]:
			self.serverCommands[commandData[0]].remove((commandData[2], commandData[1]))
			if not self.serverCommands[commandData[0]]:
				del self.serverCommands[commandData[0]]
		
		del self.loadedModules[moduleName]
		del self._loadedModuleData[moduleName]
		
		if fullUnload:
			self.runActionStandard("moduleunload", module.name)
		
		if unloadDeferreds:
			deferList = DeferredList(unloadDeferreds)
			self._unloadingModules[moduleName] = deferList
			deferList.addCallback(self._removeFromUnloadingList, moduleName)
			return deferList
	
	def _removeFromUnloadingList(self, _, moduleName):
		del self._unloadingModules[moduleName]
	
	def reloadModule(self, moduleName):
		"""
		Reloads the module with the given name.
		Returns a DeferredList if the module unloads with one or more Deferreds.
		May raise ModuleLoadError if the module cannot be loaded.
		"""
		deferList = self._unloadModule(moduleName, False)
		if deferList is None:
			deferList = self.loadModule(moduleName)
		else:
			deferList.addCallback(lambda result: self.loadModule(moduleName))
		return deferList

	def verifyConfig(self, config):
		# IRCd
		if "server_name" not in config:
			raise ConfigValidationError("server_name", "required item not found in configuration file.")
		if not isinstance(config["server_name"], basestring):
			raise ConfigValidationError("server_name", "value must be a string")
		if len(config["server_name"]) > 64:
			config["server_name"] = config["server_name"][:64]
			self.logConfigValidationWarning("server_name", "value is too long and has been truncated", config["server_name"])
		if not re.match(r"^[a-zA-Z0-9.-]+\.[a-zA-Z0-9.-]+$", config["server_name"]):
			raise ConfigValidationError("server_name", "server name must look like a valid hostname.")
		if "server_id" in config:
			if not isinstance(config["server_id"], basestring):
				raise ConfigValidationError("server_id", "value must be a string")
			else:
				config["server_id"] = config["server_id"].upper()
		else:
			randFromName = random.Random(config["server_name"])
			serverID = randFromName.choice(string.digits) + randFromName.choice(string.digits + string.ascii_uppercase) + randFromName.choice(string.digits + string.ascii_uppercase)
			config["server_id"] = serverID
		if len(config["server_id"]) != 3 or not config["server_id"].isalnum() or not config["server_id"][0].isdigit():
			raise ConfigValidationError("server_id", "value must be a 3-character alphanumeric string starting with a number.")
		if "server_description" not in config:
			raise ConfigValidationError("server_description", "required item not found in configuration file.")
		if not isinstance(config["server_description"], basestring):
			raise ConfigValidationError("server_description", "value must be a string")
		if not config["server_description"]:
			raise ConfigValidationError("server_description", "value must not be an empty string")
		if len(config["server_description"]) > 255:
			config["server_description"] = config["server_description"][:255]
			self.logConfigValidationWarning("server_description", "value is too long and has been truncated", config["server_description"])
		if "network_name" not in config:
			raise ConfigValidationError("network_name", "required item not found in configuration file.")
		if not isinstance(config["network_name"], basestring):
			raise ConfigValidationError("network_name", "value must be a string")
		if not config["network_name"]:
			raise ConfigValidationError("network_name", "value must not be an empty string")
		if " " in config["network_name"]:
			raise ConfigValidationError("network_name", "value cannot have spaces")
		if len(config["network_name"]) > 32:
			config["network_name"] = config["network_name"][:32]
			self.logConfigValidationWarning("network_name", "value is too long", config["network_name"])
		if "bind_client" not in config:
			config["bind_client"] = [ "tcp:6667:interface={::}" ]
			self.logConfigValidationWarning("bind_client", "no default client binding specified", "[ \"tcp:6667:interface={::}\" ]")
		if not isinstance(config["bind_client"], list):
			raise ConfigValidationError("bind_client", "value must be a list")
		for bindDesc in config["bind_client"]:
			if not isinstance(bindDesc, basestring):
				raise ConfigValidationError("bind_client", "every entry must be a string")
		if "bind_server" not in config:
			config["bind_server"] = []
		if not isinstance(config["bind_server"], list):
			raise ConfigValidationError("bind_server", "value must be a list")
		for bindDesc in config["bind_server"]:
			if not isinstance(bindDesc, basestring):
				raise ConfigValidationError("bind_server", "every entry must be a string")
		if "modules" not in config:
			config["modules"] = []
		if not isinstance(config["modules"], list):
			raise ConfigValidationError("modules", "value must be a list")
		for module in config["modules"]:
			if not isinstance(module, basestring):
				raise ConfigValidationError("modules", "every entry must be a string")
		if "links" in config:
			if not isinstance(config["links"], dict):
				raise ConfigValidationError("links", "value must be a dictionary")
			for desc, server in config["links"].iteritems():
				if not isinstance(desc, basestring):
					raise ConfigValidationError("links", "\"{}\" is an invalid server description".format(desc))
				if not isinstance(server, dict):
					raise ConfigValidationError("links", "values for \"{}\" must be a dictionary".format(desc))
				if "connect_descriptor" not in server:
					raise ConfigValidationError("links", "server \"{}\" must contain a \"connect_descriptor\" value".format(desc))
				if "in_password" in server:
					if not isinstance(server["in_password"], basestring):
						config["links"][desc]["in_password"] = str(server["in_password"])
				if "out_password" in server:
					if not isinstance(server["out_password"], basestring):
						config["links"][desc]["out_password"] = str(server["out_password"])
		if "datastore_path" not in config:
			config["datastore_path"] = "data.db"
		if "storage_sync_interval" in config and not isinstance(config["storage_sync_interval"], int):
			raise ConfigValidationError(config["storage_sync_interval"], "invalid number")

		# Channels
		if "channel_name_length" in config:
			if not isinstance(config["channel_name_length"], int) or config["channel_name_length"] < 0:
				raise ConfigValidationError("channel_name_length", "invalid number")
			elif config["channel_name_length"] > 64:
				config["channel_name_length"] = 64
				self.logConfigValidationWarning("channel_name_length", "value is too large", 64)
		if "modes_per_line" in config:
			if not isinstance(config["modes_per_line"], int) or config["modes_per_line"] < 0:
				raise ConfigValidationError("modes_per_line", "invalid number")
			elif config["modes_per_line"] > 20:
				config["modes_per_line"] = 20
				self.logConfigValidationWarning("modes_per_line", "value is too large", 20)
		if "channel_listmode_limit" in config:
			if not isinstance(config["channel_listmode_limit"], int) or config["channel_listmode_limit"] < 0:
				raise ConfigValidationError("channel_listmode_limit", "invalid number")
			if config["channel_listmode_limit"] > 256:
				config["channel_listmode_limit"] = 256
				self.logConfigValidationWarning("channel_listmode_limit", "value is too large", 256)

		# Users
		if "user_registration_timeout" in config:
			if not isinstance(config["user_registration_timeout"], int) or config["user_registration_timeout"] < 0:
				raise ConfigValidationError("user_registration_timeout", "invalid number")
			elif config["user_registration_timeout"] < 10:
				config["user_registration_timeout"] = 10
				self.logConfigValidationWarning("user_registration_timeout", "timeout could be too short for clients to register in time", 10)
		if "user_ping_frequency" in config and (not isinstance(config["user_ping_frequency"], int) or config["user_ping_frequency"] < 0):
			raise ConfigValidationError("user_ping_frequency", "invalid number")
		if "hostname_length" in config:
			if not isinstance(config["hostname_length"], int) or config["hostname_length"] < 0:
				raise ConfigValidationError("hostname_length", "invalid number")
			elif config["hostname_length"] > 64:
				config["hostname_length"] = 64
				self.logConfigValidationWarning("hostname_length", "value is too large", 64)
			elif config["hostname_length"] < 4:
				config["hostname_length"] = 4
				self.logConfigValidationWarning("hostname_length", "value is too small", 4)
		if "ident_length" in config:
			if not isinstance(config["ident_length"], int) or config["ident_length"] < 0:
				raise ConfigValidationError("ident_length", "invalid number")
			elif config["ident_length"] > 12:
				config["ident_length"] = 12
				self.logConfigValidationWarning("ident_length", "value is too large", 12)
			elif config["ident_length"] < 1:
				config["ident_length"] = 1
				self.logConfigValidationWarning("ident_length", "value is too small", 1)
		if "gecos_length" in config:
			if not isinstance(config["gecos_length"], int) or config["gecos_length"] < 0:
				raise ConfigValidationError("gecos_length", "invalid number")
			elif config["gecos_length"] > 128:
				config["gecos_length"] = 128
				self.logConfigValidationWarning("gecos_length", "value is too large", 128)
			elif config["gecos_length"] < 1:
				config["gecos_length"] = 1
				self.logConfigValidationWarning("gecos_length", "value is too small", 1)
		if "user_listmode_limit" in config:
			if not isinstance(config["user_listmode_limit"], int) or config["user_listmode_limit"] < 0:
				raise ConfigValidationError("user_listmode_limit", "invalid number")
			if config["user_listmode_limit"] > 256:
				config["user_listmode_limit"] = 256
				self.logConfigValidationWarning("user_listmode_limit", "value is too large", 256)

		# Servers
		if "server_registration_timeout" in config:
			if not isinstance(config["server_registration_timeout"], int) or config["server_registration_timeout"] < 0:
				raise ConfigValidationError("server_registration_timeout", "invalid number")
			elif config["server_registration_timeout"] < 10:
				config["server_registration_timeout"] = 10
				self.logConfigValidationWarning("server_registration_timeout", "timeout could be too short for servers to register in time", 10)
		if "server_ping_frequency" in config and (not isinstance(config["server_ping_frequency"], int) or config["server_ping_frequency"] < 0):
			raise ConfigValidationError("server_ping_frequency", "invalid number")

		for module in self.loadedModules.itervalues():
			module.verifyConfig(config)

	def logConfigValidationWarning(self, key, message, default):
		self.log.warn("Config value \"{configKey}\" is invalid ({message}); the value has been set to a default of \"{default}\".", configKey=key, message=message, default=default)

	def rehash(self):
		"""
		Reloads the configuration file and applies changes.
		"""
		self.log.info("Rehashing...")
		self.config.reload()
		d = self._unbindPorts() # Unbind the ports that are bound
		if d: # And then bind the new ones
			DeferredList(d).addCallback(lambda result: self._bindPorts())
		else:
			self._bindPorts()
		
		try:
			self._logFilter.setLogLevelForNamespace("txircd", LogLevel.levelWithName(self.config["log_level"]))
		except (KeyError, InvalidLogLevelError):
			pass # If we can't set a new log level, we'll keep the old one
		
		for module in self.loadedModules.itervalues():
			module.rehash()
	
	def _bindPorts(self):
		for bindDesc in self.config["bind_client"]:
			try:
				endpoint = serverFromString(reactor, unescapeEndpointDescription(bindDesc))
			except ValueError as e:
				self.log.error(e)
				continue
			listenDeferred = endpoint.listen(UserFactory(self))
			listenDeferred.addCallback(self._savePort, bindDesc, "client")
			listenDeferred.addErrback(self._logNotBound, bindDesc)
		for bindDesc in self.config["bind_server"]:
			try:
				endpoint = serverFromString(reactor, unescapeEndpointDescription(bindDesc))
			except ValueError as e:
				self.log.error(e)
				continue
			listenDeferred = endpoint.listen(ServerListenFactory(self))
			listenDeferred.addCallback(self._savePort, bindDesc, "server")
			listenDeferred.addErrback(self._logNotBound, bindDesc)
	
	def _unbindPorts(self):
		deferreds = []
		for port in self.boundPorts.itervalues():
			d = port.stopListening()
			if d:
				deferreds.append(d)
		return deferreds
	
	def _savePort(self, port, desc, portType):
		self.boundPorts[desc] = port
		self.log.debug("Bound endpoint '{endpointDescription}' for {portType} connections.", endpointDescription=desc, portType=portType)
	
	def _logNotBound(self, err, desc):
		self.log.error("Could not bind '{endpointDescription}': {errorMsg}", endpointDescription=desc, errorMsg=err)
	
	def createUUID(self):
		"""
		Gets the next UUID for a new client.
		"""
		newUUID = self.serverID + self._uid.next()
		while newUUID in self.users: # It'll take over 1.5 billion connections to loop around, but we still
			newUUID = self.serverID + self._uid.next() # want to be extra safe and avoid collisions
		self.log.debug("Generated new UUID {uuid}", uuid=newUUID)
		return newUUID
	
	def _genUID(self):
		uid = "AAAAAA"
		while True:
			yield uid
			uid = self._incrementUID(uid)
	
	def _incrementUID(self, uid):
		if uid == "Z": # The first character must be a letter
			return "A" # So wrap that around
		if uid[-1] == "9":
			return self._incrementUID(uid[:-1]) + "A"
		if uid[-1] == "Z":
			return uid[:-1] + "0"
		return uid[:-1] + chr(ord(uid[-1]) + 1)
	
	def pruneQuit(self):
		compareTime = now() - timedelta(seconds=10)
		remove = []
		for uuid, timeQuit in self.recentlyQuitUsers.iteritems():
			if timeQuit < compareTime:
				remove.append(uuid)
		for uuid in remove:
			del self.recentlyQuitUsers[uuid]
		
		remove = []
		for serverID, timeQuit in self.recentlyQuitServers.iteritems():
			if timeQuit < compareTime:
				remove.append(serverID)
		for serverID in remove:
			del self.recentlyQuitServers[serverID]
	
	def pruneChannels(self):
		removeChannels = []
		for channel, remove in self.recentlyDestroyedChannels.iteritems():
			if remove:
				removeChannels.append(channel)
			elif channel not in self.channels:
				self.recentlyDestroyedChannels[channel] = True
		for channel in removeChannels:
			del self.recentlyDestroyedChannels[channel]
	
	def generateISupportList(self):
		isupport = self.isupport_tokens.copy()
		statusSymbolOrder = "".join([self.channelStatuses[status][0] for status in self.channelStatusOrder])
		isupport["CHANMODES"] = ",".join(["".join(modes) for modes in self.channelModes])
		isupport["CHANNELLEN"] = self.config.get("channel_name_length", 64)
		isupport["NETWORK"] = self.config["network_name"]
		isupport["PREFIX"] = "({}){}".format("".join(self.channelStatusOrder), statusSymbolOrder)
		isupport["STATUSMSG"] = statusSymbolOrder
		isupport["USERMODES"] = ",".join(["".join(modes) for modes in self.userModes])
		self.runActionStandard("buildisupport", isupport)
		isupportList = []
		for key, val in isupport.iteritems():
			if val is None:
				isupportList.append(key)
			else:
				isupportList.append("{}={}".format(key, val))
		return isupportList
	
	def connectServer(self, name):
		"""
		Connect a server with the given name in the configuration.
		Returns a Deferred for the connection when we can successfully connect
		or None if the server is already connected or if we're unable to find
		information for that server in the configuration.
		"""
		if name in self.serverNames:
			return None
		if name not in self.config.get("links", {}):
			return None
		serverConfig = self.config["links"][name]
		endpoint = clientFromString(reactor, unescapeEndpointDescription(serverConfig["connect_descriptor"]))
		d = endpoint.connect(ServerConnectFactory(self))
		d.addCallback(self._completeServerConnection, name)
		return d
	
	def _completeServerConnection(self, result, name):
		self.log.info("Connected to server {serverName}", serverName=name)
		self.runActionStandard("initiateserverconnection", result)
	
	def broadcastToServers(self, fromServer, command, *params, **kw):
		"""
		Broadcasts a message to all connected servers. The fromServer parameter
		should be the server from which the message came; if this server is the
		originating server, specify None for fromServer.
		"""
		for server in self.servers.itervalues():
			if server.nextClosest == self.serverID and server != fromServer:
				server.sendMessage(command, *params, **kw)
	
	def _getActionModes(self, actionName, *params, **kw):
		users = []
		channels = []
		if "users" in kw:
			users = kw["users"]
		if "channels" in kw:
			channels = kw["channels"]
		
		functionList = []
		
		if users:
			genericUserActionName = "modeactioncheck-user-{}".format(actionName)
			genericUserActionNameWithChannel = "modeactioncheck-user-withchannel-{}".format(actionName)
			for modeType in self.userModes:
				for mode, modeObj in modeType.iteritems():
					if actionName not in modeObj.affectedActions:
						continue
					priority = modeObj.affectedActions[actionName]
					actionList = []
					# Because Python doesn't properly capture variables in lambdas, we have to force static capture
					# by wrapping lambdas in more lambdas.
					# I wish Python wasn't this gross.
					for action in self.actions.get("modeactioncheck-user", []):
						actionList.append(((lambda action, actionName, mode: lambda user, *params: action[0](actionName, mode, user, *params))(action, actionName, mode), action[1]))
					for action in self.actions.get("modeactioncheck-user-withchannel", []):
						for channel in channels:
							actionList.append(((lambda action, actionName, mode, channel: lambda user, *params: action[0](actionName, mode, user, channel, *params))(action, actionName, mode, channel), action[1]))
					for action in self.actions.get(genericUserActionName, []):
						actionList.append(((lambda action, mode: lambda user, *params: action[0](mode, user, *params))(action, mode), action[1]))
					for action in self.actions.get(genericUserActionNameWithChannel, []):
						for channel in channels:
							actionList.append(((lambda action, mode, channel: lambda user, *params: action[0](mode, user, channel, *params))(action, mode, channel), action[1]))
					modeUserActionName = "modeactioncheck-user-{}-{}".format(mode, actionName)
					modeUserActionNameWithChannel = "modeactioncheck-user-withchannel-{}-{}".format(mode, actionName)
					for action in self.actions.get(modeUserActionNameWithChannel, []):
						for channel in channels:
							actionList.append(((lambda action, channel: lambda user, *params: action[0](user, channel, *params))(action, channel), action[1]))
					actionList = sorted(self.actions.get(modeUserActionName, []) + actionList, key=lambda action: action[1], reverse=True)
					applyUsers = []
					for user in users:
						for action in actionList:
							param = action[0](user, *params)
							if param is not None:
								if param is not False:
									applyUsers.append((user, param))
								break
					for user, param in applyUsers:
						functionList.append(((lambda modeObj, actionName, user, param: lambda *params: modeObj.apply(actionName, user, param, *params))(modeObj, actionName, user, param), priority))
		
		if channels:
			genericChannelActionName = "modeactioncheck-channel-{}".format(actionName)
			genericChannelActionNameWithUser = "******".format(actionName)
			for modeType in self.channelModes:
				for mode, modeObj in modeType.iteritems():
					if actionName not in modeObj.affectedActions:
						continue
					priority = modeObj.affectedActions[actionName]
					actionList = []
					for action in self.actions.get("modeactioncheck-channel", []):
						actionList.append(((lambda action, actionName, mode: lambda channel, *params: action[0](actionName, mode, channel, *params))(action, actionName, mode), action[1]))
					for action in self.actions.get("modeactioncheck-channel-withuser", []):
						for user in users:
							actionList.append(((lambda action, actionName, mode, user: lambda channel, *params: action[0](actionName, mode, channel, user, *params))(action, actionName, mode, user), action[1]))
					for action in self.actions.get(genericChannelActionName, []):
						actionList.append(((lambda action, mode: lambda channel, *params: action[0](mode, channel, *params))(action, mode), action[1]))
					for action in self.actions.get(genericChannelActionNameWithUser, []):
						for user in users:
							actionList.append(((lambda action, mode, user: lambda channel, *params: action[0](mode, channel, user, *params))(action, mode, user), action[1]))
					modeChannelActionName = "modeactioncheck-channel-{}-{}".format(mode, actionName)
					modeChannelActionNameWithUser = "******".format(mode, actionName)
					for action in self.actions.get(modeChannelActionNameWithUser, []):
						for user in users:
							actionList.append(((lambda action, user: lambda channel, *params: action[0](channel, user, *params))(action, user), action[1]))
					actionList = sorted(self.actions.get(modeChannelActionName, []) + actionList, key=lambda action: action[1], reverse=True)
					applyChannels = []
					for channel in channels:
						for action in actionList:
							param = action[0](channel, *params)
							if param is not None:
								if param is not False:
									applyChannels.append((channel, param))
								break
					for channel, param in applyChannels:
						functionList.append(((lambda modeObj, actionName, channel, param: lambda *params: modeObj.apply(actionName, channel, param, *params))(modeObj, actionName, channel, param), priority))
		return functionList
	
	def _getActionFunctionList(self, actionName, *params, **kw):
		functionList = self.actions.get(actionName, [])
		functionList = functionList + self._getActionModes(actionName, *params, **kw)
		return sorted(functionList, key=lambda action: action[1], reverse=True)
	
	def _combineActionFunctionLists(self, actionLists):
		"""
		Combines multiple lists of action functions into one.
		Assumes all lists are sorted.
		Takes a dict mapping action names to their action function lists.
		Returns a list in priority order (highest to lowest) of (actionName, function) tuples.
		"""
		fullActionList = []
		for actionName, actionList in actionLists.iteritems():
			insertPos = 0
			for action in actionList:
				try:
					while fullActionList[insertPos][1] > action[1]:
						insertPos += 1
					fullActionList.insert(insertPos, (actionName, action[0]))
				except IndexError:
					fullActionList.append((actionName, action[0]))
				insertPos += 1
		return fullActionList
	
	def runActionStandard(self, actionName, *params, **kw):
		"""
		Calls all functions for a given action with the given parameters in
		priority order. Accepts the 'users' and 'channels' keyword arguments to
		determine which mode handlers should be included.
		"""
		actionList = self._getActionFunctionList(actionName, *params, **kw)
		for action in actionList:
			action[0](*params)
	
	def runActionUntilTrue(self, actionName, *params, **kw):
		"""
		Calls functions for a given action with the given parameters in
		priority order until one of them returns a true value. Returns True
		when one of the functions returned True. Accepts the 'users' and
		'channels' keyword arguments to determine which mode handlers should be
		included.
		"""
		actionList = self._getActionFunctionList(actionName, *params, **kw)
		for action in actionList:
			if action[0](*params):
				return True
		return False
	
	def runActionUntilFalse(self, actionName, *params, **kw):
		"""
		Calls functions for a given action with the given parameters in
		priority order until one of them returns a false value. Returns True
		when one of the functions returned False. Accepts the 'users' and
		'channels' keyword arguments to determine which mode handlers should be
		included.
		"""
		actionList = self._getActionFunctionList(actionName, *params, **kw)
		for action in actionList:
			if not action[0](*params):
				return True
		return False
	
	def runActionUntilValue(self, actionName, *params, **kw):
		"""
		Calls functions for a given action with the given parameters in
		priority order until one of them returns a non-None value. Returns the
		value returned by the function that returned a non-None value. Accepts
		the 'users' and 'channels' keyword arguments to determine which mode
		handlers should be included.
		"""
		actionList = self._getActionFunctionList(actionName, *params, **kw)
		for action in actionList:
			value = action[0](*params)
			if value is not None:
				return value
		return None
	
	def runActionFlagTrue(self, actionName, *params, **kw):
		"""
		Calls all functions for a given action with the given parameters in
		priority order. Returns True when one of the functions returns a true
		value. Accepts the 'users' and 'channels' keyword arguments to
		determine which mode handlers should be included.
		"""
		oneIsTrue = False
		actionList = self._getActionFunctionList(actionName, *params, **kw)
		for action in actionList:
			if action[0](*params):
				oneIsTrue = True
		return oneIsTrue
	
	def runActionFlagFalse(self, actionName, *params, **kw):
		"""
		Calls all functions for a given action with the given parameters in
		priority order. Returns True when one of the functions returns a false
		value. Accepts the 'users' and 'channels' keyword arguments to
		determine which mode handlers should be included.
		"""
		oneIsFalse = False
		actionList = self._getActionFunctionList(actionName, *params, **kw)
		for action in actionList:
			if action[0](*params):
				oneIsFalse = True
		return oneIsFalse
	
	def runActionProcessing(self, actionName, data, *params, **kw):
		"""
		Calls functions for a given action with the given parameters in
		priority order until the provided data is all processed (the data
		parameter becomes empty). Accepts 'users' and 'channels' keyword
		arguments to determine which mode handlers should be included.
		"""
		actionList = self._getActionFunctionList(actionName, data, *params, **kw)
		for action in actionList:
			action[0](data, *params)
			if not data:
				return
	
	def runActionProcessingMultiple(self, actionName, dataList, *params, **kw):
		"""
		Calls functions for a given action with the given parameters in
		priority order until the provided data is all processed (all of the
		data structures in the dataList parameter become empty). Accepts
		'users' and 'channels' keyword arguments to determine which mode
		handlers should be included.
		"""
		paramList = dataList + params
		actionList = self._getActionFunctionList(actionName, *paramList, **kw)
		for action in actionList:
			action[0](*paramList)
			for data in dataList:
				if data:
					break
			else:
				return
	
	def runComboActionStandard(self, actionList, **kw):
		"""
		Calls all functions for the given actions with the given parameters in
		priority order. Actions are specifed as a list of tuples:
		[ ("action1", param1, param2, ...), ("action2", param1, param2, ...) ]
		Accepts 'users' and 'channels' keyword arguments to determine which
		mode handlers should be included.
		"""
		actionFuncLists = {}
		actionParameters = {}
		for action in actionList:
			parameters = action[1:]
			actionParameters[action[0]] = parameters
			actionFuncLists[action[0]] = self._getActionFunctionList(action[0], *parameters, **kw)
		funcList = self._combineActionFunctionLists(actionFuncLists)
		for actionName, actionFunc in funcList:
			actionFunc(*actionParameters[actionName])
	
	def runComboActionUntilTrue(self, actionList, **kw):
		"""
		Calls functions for the given actions with the given parameters in
		priority order until one of the functions returns a true value. Actions
		are specified as a list of tuples:
		[ ("action1", param1, param2, ...), ("action2", param1, param2, ...) ]
		Returns True if one of the functions returned a true value. Accepts
		'users' and 'channels' keyword arguments to determine which mode
		handlers should be included.
		"""
		actionFuncLists = {}
		actionParameters = {}
		for action in actionList:
			parameters = action[1:]
			actionParameters[action[0]] = parameters
			actionFuncLists[action[0]] = self._getActionFunctionList(action[0], *parameters, **kw)
		funcList = self._combineActionFunctionLists(actionFuncLists)
		for actionName, actionFunc in funcList:
			if actionFunc(*actionParameters[actionName]):
				return True
		return False
	
	def runComboActionUntilFalse(self, actionList, **kw):
		"""
		Calls functions for the given actions with the given parameters in
		priority order until one of the functions returns a false value.
		Actions are specified as a list of tuples:
		[ ("action1", param1, param2, ...), ("action2", param1, param2, ...) ]
		Returns True if one of the functions returned a false value. Accepts
		'users' and 'channels' keyword arguments to determine which mode
		handlers should be included.
		"""
		actionFuncLists = {}
		actionParameters = {}
		for action in actionList:
			parameters = action[1:]
			actionParameters[action[0]] = parameters
			actionFuncLists[action[0]] = self._getActionFunctionList(action[0], *parameters, **kw)
		funcList = self._combineActionFunctionLists(actionFuncLists)
		for actionName, actionFunc in funcList:
			if not actionFunc(*actionParameters[actionName]):
				return True
		return False
	
	def runComboActionUntilValue(self, actionList, **kw):
		"""
		Calls functions for the given actions with the given parameters in
		priority order until one of the functions returns a non-None value.
		Actions are specified as a list of tuples:
		[ ("action1", param1, param2, ...), ("action2", param1, param2, ...) ]
		Returns the value returned by the function that returned a non-None
		value. Accepts 'users' and 'channels' keyword arguments to determine
		which mode handlers should be included.
		"""
		actionFuncLists = {}
		actionParameters = {}
		for action in actionList:
			parameters = action[1:]
			actionParameters[action[0]] = parameters
			actionFuncLists[action[0]] = self._getActionFunctionList(action[0], *parameters, **kw)
		funcList = self._combineActionFunctionLists(actionFuncLists)
		for actionName, actionFunc in funcList:
			value = actionFunc(*actionParameters[actionName])
			if value is not None:
				return value
		return None
	
	def runComboActionFlagTrue(self, actionList, **kw):
		"""
		Calls all functions for the given actions with the given parameters in
		priority order. Actions are specified as a list of tuples:
		[ ("action1", param1, param2, ...), ("action2", param1, param2, ...) ]
		Returns True if any of the functions called returned a true value.
		Accepts 'users' and 'channels' keyword arguments to determine which
		mode handlers should be included.
		"""
		actionFuncLists = {}
		actionParameters = {}
		for action in actionList:
			parameters = action[1:]
			actionParameters[action[0]] = parameters
			actionFuncLists[action[0]] = self._getActionFunctionList(action[0], *parameters, **kw)
		funcList = self._combineActionFunctionLists(actionFuncLists)
		oneIsTrue = False
		for actionName, actionFunc in funcList:
			if actionFunc(*actionParameters[actionName]):
				oneIsTrue = True
		return oneIsTrue
	
	def runComboActionFlagFalse(self, actionList, **kw):
		"""
		Calls all functions for the given actions with the given parameters in
		priority order. Actions are specified as a list of tuples:
		[ ("action1", param1, param2, ...), ("action2", param1, param2, ...) ]
		Returns True if any of the functions called returned a false value.
		Accepts 'users' and 'channels' keyword arguments to determine which
		mode handlers should be included.
		"""
		actionFuncLists = {}
		actionParameters = {}
		for action in actionList:
			parameters = action[1:]
			actionParameters[action[0]] = parameters
			actionFuncLists[action[0]] = self._getActionFunctionList(action[0], *parameters, **kw)
		funcList = self._combineActionFunctionLists(actionFuncLists)
		oneIsFalse = False
		for actionName, actionFunc in funcList:
			if not actionFunc(*actionParameters[actionName]):
				oneIsFalse = True
		return oneIsFalse
	
	def runComboActionProcessing(self, data, actionList, **kw):
		"""
		Calls functions for the given actions with the given parameters in
		priority order until the data given has been processed (the data
		parameter becomes empty). Actions are specified as a list of tuples:
		[ ("action1", param1, param2, ...), ("action2", param1, param2, ...) ]
		Accepts 'users' and 'channels' keyword arguments to determine which
		mode handlers should be included.
		"""
		actionFuncLists = {}
		actionParameters = {}
		for action in actionList:
			parameters = [data] + action[1:]
			actionParameters[action[0]] = parameters
			actionFuncLists[action[0]] = self._getActionFunctionList(action[0], *parameters, **kw)
		funcList = self._combineActionFunctionLists(actionFuncLists)
		for actionName, actionFunc in funcList:
			actionFunc(*actionParameters[actionName])
			if not data:
				break
	
	def runComboActionProcessingMultiple(self, dataList, actionList, **kw):
		"""
		Calls functions for the given actions with the given parameters in
		priority order until the data given has been processed (all the data
		items in the dataList parameter become empty). Actions are specified as
		a list of tuples:
		[ ("action1", param1, param2, ...), ("action2", param1, param2, ...) ]
		Accepts 'users' and 'channels' keyword arguments to determine which
		mode handlers should be included.
		"""
		actionFuncLists = {}
		actionParameters = {}
		for action in actionList:
			parameters = dataList + action[1:]
			actionParameters[action[0]] = parameters
			actionFuncLists[action[0]] = self._getActionFunctionList(action[0], *parameters, **kw)
		funcList = self._combineActionFunctionLists(actionFuncLists)
		for actionName, actionFunc in funcList:
			actionFunc(*actionParameters[actionName])
			for data in dataList:
				if data:
					break
			else:
				return
Example #29
0
                        "--logfile",
                        help="The file the log will be written to",
                        type=str,
                        default="desertbot.log")
    parser.add_argument("-l",
                        "--loglevel",
                        help="The logging level the bot will use",
                        type=str,
                        default="INFO")
    options = parser.parse_args()

    # Start the bot
    desertbot = DesertBot(options.config)

    # Determine the logging level
    logFilter = LogLevelFilterPredicate()
    try:
        logFilter.setLogLevelForNamespace(
            "desertbot", LogLevel.levelWithName(options.loglevel.lower()))
        invalidLogLevel = False
    except InvalidLogLevelError:
        logFilter.setLogLevelForNamespace("desertbot", LogLevel.info)
        invalidLogLevel = True

    # Set up logging
    logFile = DailyLogFile("desertbot.log", "")
    fileObserver = FileLogObserver(logFile, logFormat)
    fileFilterObserver = FilteringLogObserver(fileObserver, (logFilter, ))
    consoleFilterObserver = FilteringLogObserver(consoleLogObserver,
                                                 (logFilter, ))
    desertbot.log = Logger("desertbot")
Example #30
0
# ---------------

from twisted.logger   import (
    Logger, LogLevel, globalLogBeginner, textFileLogObserver, 
    FilteringLogObserver, LogLevelFilterPredicate)

# ----------------
# Module constants
# ----------------

# -----------------------
# Module global variables
# -----------------------

# Global object to control globally namespace logging
logLevelFilterPredicate = LogLevelFilterPredicate(defaultLogLevel=LogLevel.info)

# ------------------------
# Module Utility Functions
# ------------------------

def startLogging(console=True, filepath=None):
    '''
    Starts the global Twisted logger subsystem with maybe
    stdout and/or a file specified in the config file
    '''
    global logLevelFilterPredicate
   
    observers = []
    if console:
        observers.append( FilteringLogObserver(observer=textFileLogObserver(sys.stdout),  
Example #31
0
import sys

from twisted.logger import (Logger, LogLevel, LogLevelFilterPredicate,
                            FilteringLogObserver, textFileLogObserver,
                            globalLogPublisher)

__all__ = ["logger"]

predicate = LogLevelFilterPredicate(defaultLogLevel=LogLevel.debug)
observer = FilteringLogObserver(textFileLogObserver(sys.stdout), [predicate])
observer._encoding = "utf-8"
globalLogPublisher.addObserver(observer)

logger = Logger()