def test_rate_limit_messages_automatically(self):
        config = {'match': 'auto'}
        f = RateLimitingFilter(rate=1, per=1, burst=1, **config)

        result = []

        for i in range(20):
            mock_varying_record = Mock()
            mock_varying_record.msg = 'a rate limited varying message: {varying}'.format(
                varying=i)

            mock_rate_limited_record = Mock()
            mock_rate_limited_record.msg = 'a completely different message'

            if f.filter(mock_varying_record):
                # Only 2 of these get logged as they are considered the same message,
                # even though they are not identical
                result.append(mock_varying_record.msg)
            if f.filter(mock_rate_limited_record):
                # Only 2 of these get logged as they are the all identical
                result.append(mock_rate_limited_record.msg)
            self.mock_time.return_value += 0.1

        self.assertEqual(
            len([m for m in result if 'a rate limited varying message' in m]),
            2)
        self.assertEqual(
            len([m for m in result if 'a completely different message' in m]),
            2)
    def test_rate_limit_messages_matching_substring(self):
        config = {'match': ['rate limited']}
        f = RateLimitingFilter(rate=1, per=1, burst=1, **config)

        mock_matching_record = Mock()
        mock_matching_record.msg = 'a rate limited test message'

        mock_non_matching_record = Mock()
        mock_non_matching_record.msg = 'a different test message'

        result = []

        for _ in range(20):
            if f.filter(mock_matching_record):
                result.append(
                    mock_matching_record.msg
                )  # Only 2 of these get logged as they match the substring
            if f.filter(mock_non_matching_record):
                result.append(mock_non_matching_record.msg
                              )  # 20 of these get logged as they don't match
            self.mock_time.return_value += 0.1

        self.assertEqual(
            len([m for m in result if 'a rate limited test message' in m]), 2)
        self.assertEqual(result.count('a different test message'), 20)
    def test_limit_to_two_records_per_two_seconds_with_initial_burst_of_three(
            self):
        f = RateLimitingFilter(rate=2, per=2, burst=3)

        result = self._filter_twenty_records_over_two_seconds(f)

        self.assertEqual(result.count(True), 4)
Beispiel #4
0
def init_logging():
    """
    init logging config
    :param debug:
    :return:
    """
    base_logger = logging.getLogger("synch")
    debug = Settings.debug()
    if debug:
        base_logger.setLevel(logging.DEBUG)
    else:
        base_logger.setLevel(logging.INFO)
    fmt = logging.Formatter(
        fmt="%(asctime)s - %(name)s:%(lineno)d - %(levelname)s - %(message)s",
        datefmt="%Y-%m-%d %H:%M:%S",
    )
    sh = logging.StreamHandler(sys.stdout)
    sh.setLevel(logging.DEBUG)
    sh.setFormatter(fmt)
    base_logger.addHandler(sh)
    mail = Settings.get("mail")
    if mail:
        rate_limit = RateLimitingFilter(per=60)
        sh = logging.handlers.SMTPHandler(
            mailhost=mail.get("mailhost"),
            fromaddr=mail.get("fromaddr"),
            toaddrs=mail.get("toaddrs"),
            subject=mail.get("subject"),
            credentials=(mail.get("user"), mail.get("password")),
        )
        sh.setLevel(logging.ERROR)
        sh.setFormatter(fmt)
        sh.addFilter(rate_limit)
        base_logger.addHandler(sh)
Beispiel #5
0
    def _setup_logging(self, config):
        log_config_file = config[u'log'][u'config_file']
        self._logger.info(u'Logging configuration file: ' + log_config_file)

        try:
            logging.config.fileConfig(log_config_file)
        except Exception:
            exc_type, exc_value, exc_traceback = sys.exc_info()
            traceback.print_exception(exc_type,
                                      exc_value,
                                      exc_traceback,
                                      limit=2,
                                      file=sys.stderr)
            raise PanoptesConfigurationError(
                u'Could not instantiate logger with logging configuration provided in file "%s": (%s) %s'
                % (log_config_file, exc_type, exc_value))

        # Create a filter to rate limit logs so that a misconfiguration or failure does not make the disk I/O go
        # beserk or fill up the disk space. We do this in code instead if configuration for two reasons:
        # - It enforces a filter on every handler, so no chance of messing them up in configuration
        # - We use fileConfig (nof dictConfig) to setup our logging and fileConfig does not support filter configuration
        throttle = RateLimitingFilter(rate=config[u'log'][u'rate'],
                                      per=config[u'log'][u'per'],
                                      burst=config[u'log'][u'burst'])

        # Apply the filter to all handlers. Note that this would be a shared filter across ALL logs generated by this
        # process and thus the rate/burst should be set appropriately high
        for handler in logging._handlerList:
            # _handlerList is a list of weakrefs, so the object returned has to be dereferenced
            handler().addFilter(throttle)
    def test_append_num_limited_records_to_message(self):
        filtered = []
        f = RateLimitingFilter(rate=1, per=1, burst=4)

        for _ in range(30):
            mock_record = Mock()
            mock_record.msg = 'test message'
            if f.filter(mock_record):
                filtered.append(mock_record)
            self.mock_time.return_value += 0.1

        self.assertEqual(len(filtered), 6)
        self.assertTrue(
            filtered[4].msg.endswith(os.linesep +
                                     '... 6 additional messages suppressed'))
        self.assertTrue(
            filtered[5].msg.endswith(os.linesep +
                                     '... 9 additional messages suppressed'))
Beispiel #7
0
    def test_log_exception(self):
        logger = logging.getLogger('test')
        handler = logging.StreamHandler()
        throttle = RateLimitingFilter(rate=1, per=1, burst=1)
        handler.addFilter(throttle)
        logger.addHandler(handler)

        try:
            logger.error('First')
            raise RuntimeError('Expected exception')
        except RuntimeError as e:
            logger.exception(e)  # Should be throttled
Beispiel #8
0
 def __init__(self,
              image_name,
              server_name,
              data_bind,
              container_cmd,
              host_only,
              fixed_tag=None,
              registry='docker.io',
              proxies=None):
     """Initialize the client."""
     self.image_name = image_name
     self.server_name = server_name
     self.data_bind = data_bind
     self.container_cmd = container_cmd
     self.host_only = host_only
     self.fixed_tag = fixed_tag
     self.registry = registry
     self.proxies = proxies
     # TODO: plumb in registry
     self.logger = labslauncher.get_named_logger("DckrClnt")
     # throttle connection errors to once every 5 minutes
     spam = [
         'Could not create docker client', 'Failed to query docker client'
     ]
     self.logger.addFilter(
         RateLimitingFilter(rate=1, per=300, burst=1, match=spam))
     self.logger.info("""Creating docker client with options:
        image name: {}
        server name: {}
        data bind: {}
        command: {}
        host only: {}
        fixed tag: {}
        proxies: {}""".format(image_name, server_name, data_bind,
                              container_cmd, host_only, fixed_tag, proxies))
     self._client = None
     self.total_size = None
     self.final_stats = None
     self.last_failure = "Unknown error"
     self.last_failure_type = None
     self.is_running()  # sets up tag, status, and available
     # docker service heartbeat
     self.dheartbeat = QTimer()
     self.dheartbeat.setInterval(1000 * 5)  # 5 seconds
     self.dheartbeat.start()
     self.dheartbeat.timeout.connect(self.is_running)
     # container status heartbeat
     self.cheartbeat = QTimer()
     self.cheartbeat.setInterval(1000 * 5)  # 5 seconds
     self.cheartbeat.start()
     self.cheartbeat.timeout.connect(self.set_status)
    def test_limit_to_no_records_per_second(self):
        f = RateLimitingFilter(rate=0, per=0, burst=0)

        result = self._filter_twenty_records_over_two_seconds(f)

        self.assertEqual(result.count(True), 0)
    def test_limit_to_one_record_per_two_seconds(self):
        f = RateLimitingFilter(rate=1, per=2, burst=1)

        result = self._filter_twenty_records_over_two_seconds(f)

        self.assertEqual(result.count(True), 1)
Beispiel #11
0
import logging
import time

from ratelimitingfilter import RateLimitingFilter

logger = logging.getLogger('substring_example')
logger.setLevel(logging.DEBUG)

# Create a console handler
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(
    logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
logger.addHandler(ch)

# Create a rate limiting filter and add it to the console handler.
config = {'match': ['rate limited']}
throttle = RateLimitingFilter(rate=1, per=1, burst=1, **config)
ch.addFilter(throttle)

for i in range(31):
    # Rate limiting will apply to this message because of the substring match
    logger.debug('a message that is rate limited')
    # Rate limiting will not apply to this message
    logger.debug('some message')
    time.sleep(0.1)
Beispiel #12
0
if not app.debug:
    credentials = None
    if app.config['MAIL_USERNAME'] or app.config['MAIL_PASSWORD']:
        credentials = (app.config['MAIL_USERNAME'],
                       app.config['MAIL_PASSWORD'])

    mail_handler = SMTPHandler(
        (app.config['MAIL_SERVER'],
         app.config['MAIL_PORT']),
        app.config['MAIL_FROM'],
        app.config['ADMINS'],
        'webfaf exception', credentials)

    mail_handler.setLevel(logging.ERROR)
    rate_limiter = RateLimitingFilter(app.config['THROTTLING_RATE'],
                                      app.config['THROTTLING_TIMEFRAME'],
                                      app.config['THROTTLING_BURST'])

    mail_handler.addFilter(rate_limiter)
    app.logger.addHandler(mail_handler) # pylint: disable=no-member


@app.errorhandler(403)
def forbidden(_) -> Tuple[str, int]:
    return flask.render_template("403.html"), 403


@app.errorhandler(404)
def not_found(_) -> Tuple[str, int]:
    return flask.render_template("404.html"), 404
import logging
import time

from ratelimitingfilter import RateLimitingFilter

logger = logging.getLogger('console_example')
logger.setLevel(logging.DEBUG)

# Create a console handler
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(
    logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
logger.addHandler(ch)

# Create a rate limiting filter and add it to the console handler.
# We limit the throughput to 1 record per second, allowing a burst of 3.
throttle = RateLimitingFilter(rate=1, per=1, burst=3)
ch.addFilter(throttle)

for i in range(31):
    # This attempts to log 31 messages in a little over 3 seconds. The messages are equally
    # spaced with 1/10 second between them. This allows the first 3 messages through in the
    # first second based on the burst, and then just 1 message in the 2nd, 3rd and 4th seconds,
    # based on the 1 per second rate, which totals 6.
    logger.debug('This is message {0}'.format(i))
    time.sleep(0.1)
Beispiel #14
0
        file_handler = RotatingFileHandler(logFile,
                                           mode='a',
                                           maxBytes=LOG_FILE_MAX_SIZE_MB *
                                           1024 * 1024,
                                           backupCount=LOG_FILE_BACKUP_COUNT,
                                           encoding=None,
                                           delay=0)
        file_handler.setFormatter(log_formatter)
        file_handler.setLevel(logging.INFO)

        # Send email on critical errors.
        smtp_handler = SMTPHandler(
            mailhost=(EMAIL_HOST, EMAIL_PORT),
            fromaddr=EMAIL_FROM,
            toaddrs=EMAIL_TO,
            credentials=(EMAIL_USER, EMAIL_PASSWORD),
            subject=u"Anemio station: a critical error has occured.")
        smtp_handler.setLevel(logging.CRITICAL)
        ratelimit = RateLimitingFilter(rate=EMAIL_RATE,
                                       per=EMAIL_RATE_SECONDS,
                                       burst=EMAIL_RATE_BURST)
        smtp_handler.addFilter(ratelimit)

        logger = logging.getLogger('anemio')
        logger.setLevel(logging.INFO)
        logger.addHandler(file_handler)
        logger.addHandler(smtp_handler)

    radio_daemon = RadioDaemon(logger=logger)
    radio_daemon.run()
import logging.handlers
import time

from ratelimitingfilter import RateLimitingFilter

logger = logging.getLogger('throttled_smtp_example')

# Create an SMTPHandler
smtp = logging.handlers.SMTPHandler(mailhost='relay.somehost.net',
                                    fromaddr='*****@*****.**',
                                    toaddrs='*****@*****.**',
                                    subject='An error has occurred')
smtp.setLevel(logging.ERROR)

# Create an instance of the RateLimitingFilter, and add it to the handler
throttle = RateLimitingFilter()
smtp.addFilter(throttle)

# Create a formatter and set it on the handler
formatter = logging.Formatter(
    '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
smtp.setFormatter(formatter)

# Add the handler to the logger
logger.addHandler(smtp)

# Logged errors will now be restricted to 1 every 30 seconds
while True:
    logger.error('An error message')
    time.sleep(2)