Example #1
0
def init_logging(context):
    session_files = context["session_files"]
    
    # Setup logging
    log_filename = session_files.session_dir / "singt.log"
    logfile = open(log_filename, 'w')
    logtargets = []

    # Set up the log observer for stdout.
    logtargets.append(
        FilteringLogObserver(
            textFileLogObserver(sys.stdout),
            predicates=[LogLevelFilterPredicate(LogLevel.debug)] # was: warn
        )
    )

    # Set up the log observer for our log file. "debug" is the highest possible level.
    logtargets.append(
        FilteringLogObserver(
            textFileLogObserver(logfile),
            predicates=[LogLevelFilterPredicate(LogLevel.debug)]
        )
    )

    # Direct the Twisted Logger to log to both of our observers.
    globalLogBeginner.beginLoggingTo(logtargets)

    # ASCII-art title
    title = art.text2art("Singt Client")
    log.info("\n"+title)
Example #2
0
    def __init__(self, logger, url, header=None, **kwargs):
        """X-Ray WebSocket client base class
        Arguments:
          url: The URI of the endpoint where the device is connected

        """
        # if necessary, convert serial to a unicode string
        u = urlparse(url)

        self.host = u.hostname
        if u.port:
            self.port = u.port
        else:
            if u.scheme == "ws":
                self.port = 80
            else:
                self.port = 443

        self.ws_factory = None
        self._logger = logger
        self._is_shutdown = False

        predicate = LogLevelFilterPredicate(LogLevel.error)

        try:
            if logger.isEnabledFor(logging.DEBUG):
                setDebugging(True)
                predicate = LogLevelFilterPredicate(LogLevel.debug)
                if logger.isEnabledFor(LOG_PROTOCOL_TRACE):
                    txaio.set_global_log_level('trace')
                else:
                    txaio.set_global_log_level('debug')
            else:
                txaio.set_global_log_level('info')
        except Exception as exc:
            logger.error(exc)

        globalLogPublisher.addObserver(
            FilteringLogObserver(STDLibLogObserver(name=logger.name),
                                 predicates=[predicate]))

        self.ws_factory = self.get_factory(url, header)
        self.ws_factory.d.addErrback(self._eb)

        if self.ws_factory.isSecure:
            contextFactory = ssl.ClientContextFactory()
        else:
            contextFactory = None

        def cleanup():
            self.ws_factory.d.cancel()

        reactor.addSystemEventTrigger('after', 'shutdown', cleanup)

        connectWS(self.ws_factory, contextFactory)
Example #3
0
def run_client(address, username):
    title = art.text2art("Singt")
    print(title)

    # Setup logging
    logfile = open(f"client-{username}.log", 'w')
    logtargets = []

    # Set up the log observer for stdout.
    logtargets.append(
        FilteringLogObserver(
            textFileLogObserver(sys.stdout),
            predicates=[LogLevelFilterPredicate(LogLevel.debug)]  # was: warn
        ))

    # Set up the log observer for our log file. "debug" is the highest possible level.
    logtargets.append(
        FilteringLogObserver(
            textFileLogObserver(logfile),
            predicates=[LogLevelFilterPredicate(LogLevel.debug)]))

    # Direct the Twisted Logger to log to both of our observers.
    globalLogBeginner.beginLoggingTo(logtargets)

    # Start a logger with a namespace for a particular subsystem of our application.
    log = Logger("client")

    # TCP
    # ===
    point = TCP4ClientEndpoint(reactor, address, 1234)
    client = TCPClient(username)
    d = connectProtocol(point, client)

    def err(failure):
        print("An error occurred:", failure)

    d.addErrback(err)

    # UDP
    # ===

    # 0 means any port, we don't care in this case
    udp_client = UDPClient(address, 12345)
    reactor.listenUDP(0, udp_client)

    # Reactor
    # =======

    print("Running reactor")
    reactor.run()

    print("Finished.")
Example #4
0
def make_wrapped_observer(observer, log_level_name):
    log_level = LogLevel.lookupByName(log_level_name.lower())
    observer = LegacyLogObserverWrapper(observer.emit)
    observer = wrap_observer(observer)
    predicate = LogLevelFilterPredicate(defaultLogLevel=log_level)
    observer = FilteringLogObserver(observer, [predicate])
    return observer
Example #5
0
def init(outFile):
    level = levels[config.LOG_LEVEL]
    predicate = LogLevelFilterPredicate(defaultLogLevel=level)
    observer = FilteringLogObserver(textFileLogObserver(outFile=outFile),
                                    [predicate])
    globalLogPublisher.addObserver(observer)
    log.info("Start logging with {l}", l=level)
Example #6
0
    def __init__(self, config):

        # Dictionary to hold active clients.
        self.clients = {}

        # Set up logging.
        # TODO: Use config parameters here.
        self.log = Logger()

        # Logging target.
        log_observer = textFileLogObserver(sys.stdout)
        # Filter out levels to the specified severity.
        logging_level_predicate = [LogLevelFilterPredicate(LogLevel.debug)]
        # Set up an Observer to actually perform the filtering.
        log_filter = FilteringLogObserver(textFileLogObserver(sys.stdout),
                                          predicates=logging_level_predicate)
        # And register global logging for the filtering observer.
        globalLogBeginner.beginLoggingTo([log_filter])

        # Passed-in game configuration.
        self.configuration = config

        # Game data.
        self.game = game.GameData()

        # Init main game loop.
        self.game_loop = task.LoopingCall(self.GameLoop)
        self.game_loop.start(30)

        # Holds a (cancelable! - just "self.shutdown.cancel()") callback for shutting down the server as needed.
        self.shutdown = None
Example #7
0
    def _log_summary_for_results(self, config_results, connectivity_results,
                                 logger):
        """
        Logs the summaries of the config and connectivity results for the
        tested sections

        Args:
            config_results (dict): map of section name to section result for the config validation
            connectivity_results (dict): map of section name to section result for the connectivity validation
            logger (Logger): the Twisted logger to write to

        """

        # Wrap the logger's observer inside a FilteringLogObserver so we can
        # control the minimum log level that gets printed via a predicate.
        filtering_predicate = LogLevelFilterPredicate()
        original_observer = logger.observer
        logger.observer = FilteringLogObserver(original_observer,
                                               [filtering_predicate])

        # Print the cross config results first.
        cross_config_result = config_results.get(self.CROSS_CONFIG_KEY)
        if cross_config_result and self._has_summary_to_print(
                cross_config_result):
            self._log_section_summary([cross_config_result], logger,
                                      filtering_predicate,
                                      "Cross-section results")

        # Then log the summaries for the real config sections
        self._log_section_summaries(config_results, connectivity_results,
                                    logger, filtering_predicate)
        # Put the logger's observer back to its original value
        logger.observer = original_observer
Example #8
0
def startLogging(settings, stream=None, level=LogLevel.debug):
    global predicate

    fileObserver = logObserver(stream)
    predicate = LogLevelFilterPredicate(defaultLogLevel=level)

    if settings.options.debug_mqtt:
        predicate.setLogLevelForNamespace('kotori.daq.services.mig',
                                          LogLevel.debug)
        predicate.setLogLevelForNamespace('kotori.daq.application.mqttkit',
                                          LogLevel.debug)

    if settings.options.debug_mqtt_driver:
        predicate.setLogLevelForNamespace('kotori.daq.intercom.mqtt',
                                          LogLevel.debug)
        predicate.setLogLevelForNamespace('mqtt', LogLevel.debug)
        predicate.setLogLevelForNamespace('paho.mqtt', LogLevel.debug)
    else:
        predicate.setLogLevelForNamespace('kotori.daq.intercom.mqtt',
                                          LogLevel.info)
        predicate.setLogLevelForNamespace('mqtt', LogLevel.info)
        predicate.setLogLevelForNamespace('paho.mqtt', LogLevel.info)

    if settings.options.debug_influx:
        predicate.setLogLevelForNamespace('kotori.daq.storage.influx',
                                          LogLevel.debug)

    if settings.options.debug_io:
        predicate.setLogLevelForNamespace('kotori.io', LogLevel.debug)

    if globalLogBeginner._temporaryObserver is not None:
        observers = [
            FilteringLogObserver(observer=fileObserver, predicates=[predicate])
        ]
        globalLogBeginner.beginLoggingTo(observers)
Example #9
0
    def setup(self, level='warn', namespace_levels=None, text_file=sys.stderr,
              time_format='%H:%M:%S.%f', handle_stdlib=True, stdlib_level='notset',
              stdlib_prefix='stdlib.'):

        """
        Initiates the twisted.logger system:
        - level: default log level as a string (ie: 'warn', 'info', ....).
        - namespace_levels: a dict of namespaces/log level names.
        - text_file: where to write the log to.
        - time_format: as supported by datetime.strftime.
        - handle_stdlib: True/False.
        - stdlib_level: level name, above which stdlib logging is handled.
        - stdlib_prefix: added to stdlib logger name, used as namespace.
        """

        file_observer = textFileLogObserver(text_file, timeFormat=time_format)
        self._predicate = LogLevelFilterPredicate(
            defaultLogLevel=LogLevel.levelWithName(level),
        )
        if namespace_levels:
            for namespace, level_name in namespace_levels.items():
                level = LogLevel.levelWithName(level_name)
                self._predicate.setLogLevelForNamespace(namespace, level)
        globalLogBeginner.beginLoggingTo([self._filtered_observer(file_observer)])

        if handle_stdlib:
            self._handle_stdlib(stdlib_level, stdlib_prefix)
Example #10
0
    def test_doubleEncodingError(self):
        """
        If it is not possible to encode a response to the request (for example,
        because L{xmlrpclib.dumps} raises an exception when encoding a
        L{Fault}) the exception which prevents the response from being
        generated is logged and the request object is finished anyway.
        """
        logObserver = EventLoggingObserver()
        filtered = FilteringLogObserver(
            logObserver,
            [LogLevelFilterPredicate(defaultLogLevel=LogLevel.critical)])
        globalLogPublisher.addObserver(filtered)
        self.addCleanup(lambda: globalLogPublisher.removeObserver(filtered))
        d = self.proxy().callRemote("echo", "")

        # *Now* break xmlrpclib.dumps.  Hopefully the client already used it.
        def fakeDumps(*args, **kwargs):
            raise RuntimeError("Cannot encode anything at all!")

        self.patch(xmlrpclib, "dumps", fakeDumps)

        # It doesn't matter how it fails, so long as it does.  Also, it happens
        # to fail with an implementation detail exception right now, not
        # something suitable as part of a public interface.
        d = self.assertFailure(d, Exception)

        def cbFailed(ignored):
            # The fakeDumps exception should have been logged.
            self.assertEquals(1, len(logObserver))
            self.assertIsInstance(logObserver[0]["log_failure"].value,
                                  RuntimeError)
            self.assertEqual(len(self.flushLoggedErrors(RuntimeError)), 1)

        d.addCallback(cbFailed)
        return d
def get_json_log_observer():
    f = logfile.LogFile("carbon_forwarder.log", log_dir, rotateLength=log_rotate_length, maxRotatedFiles=max_rotated_log_files)
    observer = jsonFileLogObserver(f)
    filterer = FilteringLogObserver(observer,
        [LogLevelFilterPredicate(
            LogLevel.levelWithName(log_level))])
    return filterer
Example #12
0
def start_logging(session_files):
    logfile = open(session_files.session_dir / "server.log", 'w')
    logtargets = []

    # Set up the log observer for stdout.
    logtargets.append(
        FilteringLogObserver(
            textFileLogObserver(sys.stdout),
            predicates=[LogLevelFilterPredicate(LogLevel.debug)]  # was: warn
        ))

    # Set up the log observer for our log file. "debug" is the highest possible level.
    logtargets.append(
        FilteringLogObserver(
            textFileLogObserver(logfile),
            predicates=[LogLevelFilterPredicate(LogLevel.debug)]))

    # Direct the Twisted Logger to log to both of our observers.
    globalLogBeginner.beginLoggingTo(logtargets)
Example #13
0
    def startLogging(cls, logOutput, levelStr='debug'):
        if isinstance(logOutput, str):
            dir = os.path.dirname(logOutput)
            if dir and not os.path.exists(dir):
                os.makedirs(dir)
            logOutput = open(logOutput, 'a+')

        level = LogLevel.levelWithName(levelStr)
        predicate = LogLevelFilterPredicate(defaultLogLevel=level)
        observer = FilteringLogObserver(textFileLogObserver(outFile=logOutput), [predicate])
        globalLogPublisher.addObserver(observer)
Example #14
0
def divert_logger():
    # noinspection PyPackageRequirements
    from twisted.logger import FilteringLogObserver, LogLevel, LogLevelFilterPredicate, STDLibLogObserver, globalLogBeginner
    showwarning = warnings.showwarning
    globalLogBeginner.beginLoggingTo([
        FilteringLogObserver(
            STDLibLogObserver(),
            [LogLevelFilterPredicate(defaultLogLevel=LogLevel.critical)])
    ],
                                     redirectStandardIO=False)
    warnings.showwarning = showwarning  # twisted's beginLoggingTo() will divert python warnings to its own logging system. here we undo that.
Example #15
0
def init_logging(log_level):
    """
    Initialise the logging by adding an observer to the global log publisher.

    :param str log_level: The minimum log level to log messages for.
    """
    log_level_filter = LogLevelFilterPredicate(
        LogLevel.levelWithName(log_level))
    log_level_filter.setLogLevelForNamespace(
        'twisted.web.client._HTTP11ClientFactory', LogLevel.warn)
    log_observer = FilteringLogObserver(textFileLogObserver(sys.stdout),
                                        [log_level_filter])
    globalLogPublisher.addObserver(log_observer)
Example #16
0
def getLogger(level):

    loglevel = getattr(LogLevel, level)
    filter_ = LogLevelFilterPredicate(defaultLogLevel=loglevel)
    if loglevel > LogLevel.debug:
        filter_.setLogLevelForNamespace('stdout', LogLevel.warn)
    observer = FilteringLogObserver(stdoutFileLogObserver(), [filter_])
#     observer = FilteringLogObserver(globalLogPublisher, [filter])
#     log = Logger()

#     globalLogBeginner.beginLoggingTo([observer])
    globalLogPublisher.addObserver(observer)
    return lambda event: None
Example #17
0
    def __init__(self, udp_registry, prometheus_port=8000, gateway_port=8888, log_level='INFO'):
        # TODO: add available log levels

        # TODO: change name to collector?
        self._metrics = udp_registry.get_normal_metrics()

        self._prometheus_port = prometheus_port
        self._gateway_port = gateway_port

        self._log_level = log_level

        level_predicate = LogLevelFilterPredicate(LogLevel.lookupByName(self._log_level.lower()))
        log_observer = FilteringLogObserver(textFileLogObserver(sys.stdout), predicates=[level_predicate])
        self.log = Logger(observer=log_observer)
Example #18
0
    def test_errors(self):
        """
        Verify that for each way a method exposed via XML-RPC can fail, the
        correct 'Content-type' header is set in the response and that the
        client-side Deferred is errbacked with an appropriate C{Fault}
        instance.
        """
        logObserver = EventLoggingObserver()
        filtered = FilteringLogObserver(
            logObserver,
            [LogLevelFilterPredicate(defaultLogLevel=LogLevel.critical)])
        globalLogPublisher.addObserver(filtered)
        self.addCleanup(lambda: globalLogPublisher.removeObserver(filtered))
        dl = []
        for code, methodName in [
            (666, "fail"),
            (666, "deferFail"),
            (12, "fault"),
            (23, "noSuchMethod"),
            (17, "deferFault"),
            (42, "SESSION_TEST"),
        ]:
            d = self.proxy().callRemote(methodName)
            d = self.assertFailure(d, xmlrpc.Fault)
            d.addCallback(
                lambda exc, code=code: self.assertEqual(exc.faultCode, code))
            dl.append(d)
        d = defer.DeferredList(dl, fireOnOneErrback=True)

        def cb(ign):
            for factory in self.factories:
                self.assertEqual(factory.headers[b"content-type"],
                                 b"text/xml; charset=utf-8")
            self.assertEquals(2, len(logObserver))
            f1 = logObserver[0]["log_failure"].value
            f2 = logObserver[1]["log_failure"].value

            if isinstance(f1, TestValueError):
                self.assertIsInstance(f2, TestRuntimeError)
            else:
                self.assertIsInstance(f1, TestRuntimeError)
                self.assertIsInstance(f2, TestValueError)

            self.flushLoggedErrors(TestRuntimeError, TestValueError)

        d.addCallback(cb)
        return d
Example #19
0
 def configure(verbose, quiet, shutup):
     client._HTTP11ClientFactory.noisy = False
     if not logging.getLogger().handlers:
         logging_handler = logging.StreamHandler(sys.stdout)
         logging_handler.setFormatter(logging.Formatter(LOG_FORMAT))
         logging.getLogger().addHandler(logging_handler)
         if verbose:
             from twisted.logger import (
                 LogLevel, globalLogBeginner, textFileLogObserver,
                 FilteringLogObserver, LogLevelFilterPredicate)
             twisted_log_filtler = LogLevelFilterPredicate(defaultLogLevel=LogLevel.warn)
             twisted_log_filtler.setLogLevelForNamespace(namespace="stdout", level=LogLevel.critical)
             twisted_log_filtler.setLogLevelForNamespace(namespace="twisted", level=LogLevel.warn)
             twisted_log_filtler.setLogLevelForNamespace(namespace="mqtt", level=LogLevel.warn)
             globalLogBeginner.beginLoggingTo([FilteringLogObserver(observer=textFileLogObserver(sys.stdout),
                                                                    predicates=[twisted_log_filtler])], redirectStandardIO=False)
     logging.getLogger().setLevel(logging.FATAL if shutup else logging.ERROR if quiet else logging.DEBUG if verbose else logging.INFO)
Example #20
0
    def startLogging(self):
        """
        Start the L{twisted.logger} logging system.
        """
        logFile = self.logFile

        fileLogObserverFactory = self.fileLogObserverFactory

        fileLogObserver = fileLogObserverFactory(logFile)

        logLevelPredicate = LogLevelFilterPredicate(
            defaultLogLevel=self.defaultLogLevel)

        filteringObserver = FilteringLogObserver(fileLogObserver,
                                                 [logLevelPredicate])

        globalLogBeginner.beginLoggingTo([filteringObserver])
Example #21
0
    def startLogging(self):
        logFile = self.options.get("logFile", sys.stderr)

        fileLogObserverFactory = self.options.get(
            "fileLogObserverFactory", textFileLogObserver
        )

        fileObserver = fileLogObserverFactory(logFile)

        logLevelPredicate = LogLevelFilterPredicate(
            defaultLogLevel=self.options.get("logLevel", LogLevel.info)
        )

        filteringObserver = FilteringLogObserver(
            fileObserver, [logLevelPredicate]
        )

        globalLogBeginner.beginLoggingTo([filteringObserver])
Example #22
0
def setup_logging(log_level, log_name, log_directory=""):
    """
    Configure the logger to use the specified log file and log level
    """
    log_filter = LogLevelFilterPredicate()
    log_filter.setLogLevelForNamespace(
        "orscanner", LogLevel.levelWithName(log_level.lower()))

    # Set up logging
    log_file = DailyLogFile(log_name, log_directory)
    file_observer = FileLogObserver(log_file, log_event_format)
    console_observer = FileLogObserver(sys.stdout, log_event_format)

    file_filter_observer = FilteringLogObserver(file_observer, (log_filter, ))
    console_filter_observer = FilteringLogObserver(console_observer,
                                                   (log_filter, ))

    globalLogPublisher.addObserver(file_filter_observer)
    globalLogPublisher.addObserver(console_filter_observer)
Example #23
0
    def startLogging(self):
        """
        Start the L{twisted.logger} logging system.
        """
        logFile = self.options.get(RunnerOptions.logFile, stderr)

        fileLogObserverFactory = self.options.get(
            RunnerOptions.fileLogObserverFactory, textFileLogObserver)

        fileLogObserver = fileLogObserverFactory(logFile)

        logLevelPredicate = LogLevelFilterPredicate(
            defaultLogLevel=self.options.get(RunnerOptions.defaultLogLevel,
                                             LogLevel.info))

        filteringObserver = FilteringLogObserver(fileLogObserver,
                                                 [logLevelPredicate])

        globalLogBeginner.beginLoggingTo([filteringObserver])
Example #24
0
def reset_log_file():
    global log_observer
    if log_observer:
        print('removing log observer')
        globalLogPublisher.removeObserver(log_observer)
    log_level = parsed_args.log_level or config['log_level']
    info_predicate = LogLevelFilterPredicate(
        LogLevel.levelWithName(log_level.lower()))
    if mlog_file_path:
        mlog_file = open(mlog_file_path, 'a+')
    else:
        mlog_file = sys.stderr

    mlog_observer = FilteringLogObserver(textFileLogObserver(mlog_file),
                                         predicates=[info_predicate])
    globalLogPublisher.addObserver(mlog_observer)

    # logger.info('resetting log output file')
    return
Example #25
0
    def start(self, console, logfile, debug):
        """Configure and start logging based on user preferences
        
        Args:
            console (bool): Console logging enabled
            logfile (str): Logfile path
            debug (bool): Debugging flag
        """
        global predicate

        # Set logging level
        level = LogLevel.debug if debug else LogLevel.info
        predicate = LogLevelFilterPredicate(defaultLogLevel=level)

        # Log to console option
        if console:
            f = sys.stdout

        # Log to file option
        else:
            # Check the file is valid and can be opened in append mode
            if os.path.exists(logfile) and not os.path.isfile(logfile):
                print "Logfile %s is not a valid file. Exiting." % logfile
                return False
            try:
                f = open(logfile, 'a')
            except IOError:
                print "Can't open logfile %s. Exiting." % logfile
                return False

        # Set the observer
        observer = textFileLogObserver(f)
        observers = [
            FilteringLogObserver(observer=observer, predicates=[predicate])
        ]
        # Begin logging
        globalLogBeginner.beginLoggingTo(observers)
        return True
Example #26
0
    def __init__(self,
                 port=8080,
                 encrypt=None,
                 config=0,
                 auth=LDAP_AUTH_SIMPLE,
                 validate=False):
        threading.Thread.__init__(self)
        self.is_running = False

        logfile = open("ldaptor.log", 'a')

        level = LogLevel.info
        predicate = LogLevelFilterPredicate(defaultLogLevel=level)
        observer = FilteringLogObserver(textFileLogObserver(logfile),
                                        [predicate])
        # remove std logger to stderr in case of Failure
        globalLogPublisher.removeObserver(globalLogPublisher._observers[0])
        globalLogPublisher.addObserver(observer)

        registerAdapter(lambda x: x.root, LDAPServerFactory,
                        IConnectedLDAPEntry)

        self._createListner(port, encrypt, config, auth, validate)
Example #27
0
    def __init__(self, interface: bytes, config_dict: Dict[str, Any]) -> None:
        # logfile path relative to config dir if not abs path
        log_filename = logfile.get()
        if log_filename.strip():  # catches empty filename
            if not os.path.isabs(log_filename):
                log_filename = os.path.join(config.config_dir, log_filename)
            ensure_dir_exists(log_filename)
            if logging_rotate_daily.get():
                logging_file = DailyLogFile(log_filename, '.')
            else:
                logging_file = open(log_filename, 'a')
            predicate = LogLevelFilterPredicate(
                LogLevel.levelWithName(loglevel.get()))
            observers = [
                FilteringLogObserver(textFileLogObserver(sys.stderr),
                                     [predicate]),
                FilteringLogObserver(textFileLogObserver(logging_file),
                                     [predicate])
            ]
            globalLogBeginner.beginLoggingTo(observers)
            log.info('piqueserver started on %s' % time.strftime('%c'))

        self.config = config_dict
        if random_rotation.get():
            self.map_rotator_type = random_choice_cycle
        else:
            self.map_rotator_type = itertools.cycle
        self.default_time_limit = default_time_limit.get()
        self.default_cap_limit = cap_limit.get()
        self.advance_on_win = int(advance_on_win.get())
        self.win_count = itertools.count(1)
        self.bans = NetworkDict()

        # attempt to load a saved bans list
        try:
            with open(os.path.join(config.config_dir, bans_file.get()),
                      'r') as f:
                self.bans.read_list(json.load(f))
            log.debug("loaded {count} bans", count=len(self.bans))
        except FileNotFoundError:
            log.debug("skip loading bans: file unavailable",
                      count=len(self.bans))
        except IOError as e:
            log.error('Could not read bans.txt: {}'.format(e))
        except ValueError as e:
            log.error('Could not parse bans.txt: {}'.format(e))

        self.hard_bans = set()  # possible DDoS'ers are added here
        self.player_memory = deque(maxlen=100)
        if len(self.name) > MAX_SERVER_NAME_SIZE:
            log.warn('(server name too long; it will be truncated to "%s")' %
                     (self.name[:MAX_SERVER_NAME_SIZE]))
        self.respawn_time = respawn_time_option.get()
        self.respawn_waves = respawn_waves.get()

        # since AoS only supports CTF and TC at a protocol level, we need to get
        # the base game mode if we are using a custom game mode.
        game_mode_name = game_mode.get()
        if game_mode_name == 'ctf':
            self.game_mode = CTF_MODE
        elif game_mode.get() == 'tc':
            self.game_mode = TC_MODE
        elif self.game_mode not in [CTF_MODE, TC_MODE]:
            raise ValueError(
                'invalid game mode: custom game mode "{}" does not set '
                'protocol.game_mode to one of TC_MODE or CTF_MODE. Are '
                'you sure the thing you have specified is a game mode?'.format(
                    game_mode_name))

        self.game_mode_name = game_mode.get().split('.')[-1]
        self.team1_name = team1_name.get()[:9]
        self.team2_name = team2_name.get()[:9]
        self.team1_color = tuple(team1_color.get())
        self.team2_color = tuple(team2_color.get())
        self.friendly_fire = friendly_fire.get()
        self.friendly_fire_on_grief = friendly_fire_on_grief.get()
        self.friendly_fire_time = grief_friendly_fire_time.get()
        self.spade_teamkills_on_grief = spade_teamkills_on_grief.get()
        self.fall_damage = fall_damage.get()
        self.teamswitch_interval = teamswitch_interval.get()
        self.teamswitch_allowed = teamswitch_allowed.get()
        self.max_players = max_players.get()
        self.melee_damage = melee_damage.get()
        self.max_connections_per_ip = max_connections_per_ip.get()
        self.passwords = passwords.get()
        self.server_prefix = server_prefix.get()
        self.time_announcements = time_announcements.get()
        self.balanced_teams = balanced_teams.get()
        self.login_retries = login_retries.get()

        # voting configuration
        self.default_ban_time = default_ban_duration.get()

        self.speedhack_detect = speedhack_detect.get()
        self.rubberband_distance = rubberband_distance.get()
        if user_blocks_only.get():
            self.user_blocks = set()
        self.set_god_build = set_god_build.get()
        self.debug_log = debug_log_enabled.get()
        if self.debug_log:
            # TODO: make this configurable
            pyspades.debug.open_debug_log(
                os.path.join(config.config_dir, 'debug.log'))
        if ssh_enabled.get():
            from piqueserver.ssh import RemoteConsole
            self.remote_console = RemoteConsole(self)
        irc = irc_options.get()
        if irc.get('enabled', False):
            from piqueserver.irc import IRCRelay
            self.irc_relay = IRCRelay(self, irc)
        if status_server_enabled.get():
            from piqueserver.statusserver import StatusServer
            self.status_server = StatusServer(self)
            ensureDeferred(self.status_server.listen())
        if ban_publish.get():
            from piqueserver.banpublish import PublishServer
            self.ban_publish = PublishServer(self, ban_publish_port.get())
        if bans_urls.get():
            from piqueserver import bansubscribe
            self.ban_manager = bansubscribe.BanManager(self)
        self.start_time = time.time()
        self.end_calls = []
        # TODO: why is this here?
        create_console(self)

        for user_type, func_names in rights.get().items():
            for func_name in func_names:
                commands.add_rights(user_type, func_name)

        self.port = port_option.get()
        ServerProtocol.__init__(self, self.port, interface)
        self.host.intercept = self.receive_callback

        try:
            self.set_map_rotation(self.config['rotation'])
        except MapNotFound as e:
            log.critical('Invalid map in map rotation (%s), exiting.' % e.map)
            raise SystemExit

        map_load_d = self.advance_rotation()
        # discard the result of the map advance for now
        map_load_d.addCallback(lambda x: self._post_init())

        ip_getter = ip_getter_option.get()
        if ip_getter:
            ensureDeferred(as_deferred(self.get_external_ip(ip_getter)))

        self.new_release = None
        notify_new_releases = config.option("release_notifications",
                                            default=True)
        if notify_new_releases.get():
            ensureDeferred(as_deferred(self.watch_for_releases()))

        self.vacuum_loop = LoopingCall(self.vacuum_bans)
        # Run the vacuum every 6 hours, and kick it off it right now
        self.vacuum_loop.start(60 * 60 * 6, True)

        reactor.addSystemEventTrigger('before', 'shutdown',
                                      lambda: ensureDeferred(self.shutdown()))
Example #28
0
    def makeService(store):
        return UpgraderService(store, options, output, reactor, config)

    def onlyUpgradeEvents(eventDict):
        text = formatEvent(eventDict)
        output.write(
            formatTime(eventDict.get("log_time", time.time())) + " " + text +
            "\n")
        output.flush()

    if not options["status"] and not options["check"]:
        # When doing an upgrade always send L{LogLevel.warn} logging to the tool output
        log.observer.addObserver(
            FilteringLogObserver(onlyUpgradeEvents, [
                LogLevelFilterPredicate(defaultLogLevel=LogLevel.warn),
            ]))

    def customServiceMaker():
        customService = CalDAVServiceMaker()
        customService.doPostImport = options["postprocess"]
        return customService

    def _patchConfig(config):
        config.FailIfUpgradeNeeded = options["status"] or options["check"]
        config.CheckExistingSchema = options["check"]
        if options["prefix"]:
            config.UpgradeHomePrefix = options["prefix"]
        if not options["status"] and not options["check"]:
            config.DefaultLogLevel = "debug"
Example #29
0
from function.handler import root

from twisted.internet import endpoints
from twisted.internet import reactor
from twisted.logger import FilteringLogObserver
from twisted.logger import LogLevel
from twisted.logger import LogLevelFilterPredicate
from twisted.logger import globalLogBeginner
from twisted.logger import textFileLogObserver
from twisted.web.server import Site


def main():
    """main."""
    site = Site(root)

    http_endpoint = endpoints.serverFromString(reactor, 'tcp:port=5000')
    http_endpoint.listen(site)


if __name__ == '__main__':
    log_filter = LogLevelFilterPredicate(LogLevel.debug)
    output = textFileLogObserver(sys.stderr)
    log_observer = FilteringLogObserver(observer=output,
                                        predicates=[log_filter])
    globalLogBeginner.beginLoggingTo([log_observer])

    reactor.callWhenRunning(main)
    reactor.run()
Example #30
0
from twisted.internet import reactor, task
from twisted.internet.endpoints import clientFromString
from twisted.application.internet import ClientService, backoffPolicy

from twisted.logger import (Logger, LogLevel, globalLogBeginner,
                            textFileLogObserver, FilteringLogObserver,
                            LogLevelFilterPredicate)

from mqtt.client.factory import MQTTFactory

# ----------------
# Global variables
# ----------------

# Global object to control globally namespace logging
logLevelFilterPredicate = LogLevelFilterPredicate(
    defaultLogLevel=LogLevel.info)

BROKER = "tcp:103.200.97.197:1883"

# -----------------
# Utility Functions
# -----------------


def startLogging(console=True, filepath=None):
    '''
    Starts the global Twisted logger subsystem with maybe
    stdout and/or a file specified in the config file
    '''
    global logLevelFilterPredicate