Beispiel #1
0
    def test_upstream_response_timed_out(self):
        line = '2015/07/14 08:42:57 [error] 28386#28386: *38698 upstream timed out ' + \
               '(110: Connection timed out) while reading response header from upstream, ' + \
               'client: 127.0.0.1, server: localhost, request: "GET /1.0/ HTTP/1.0", ' + \
               'upstream: "uwsgi://127.0.0.1:3131", host: "localhost:5000"'

        parser = NginxErrorLogParser()
        parsed = parser.parse(line)
        assert_that(parsed, equal_to('nginx.upstream.response.failed'))
 def __init__(self,
              filename=None,
              level=None,
              log_format=None,
              tail=None,
              **kwargs):
     super(NginxErrorLogsCollector, self).__init__(**kwargs)
     self.filename = filename
     self.level = level
     self.parser = NginxErrorLogParser()
     self.tail = tail if tail is not None else FileTail(filename)
Beispiel #3
0
    def test_upstream_response_buffered(self):
        line = '2015/07/15 05:56:33 [warn] 28386#28386: *94149 an upstream response is buffered ' + \
               'to a temporary file /var/cache/nginx/proxy_temp/4/08/0000000084 while reading upstream, ' + \
               'client: 85.141.232.177, server: *.compute.amazonaws.com, request: ' + \
               '"POST /api/metrics/query/timeseries/ HTTP/1.1", upstream: ' + \
               '"http://127.0.0.1:3000/api/metrics/query/timeseries/", host: ' + \
               '"ec2-54-78-3-178.eu-west-1.compute.amazonaws.com:4000", referrer: ' + \
               '"http://ec2-54-78-3-178.eu-west-1.compute.amazonaws.com:4000/"'

        parser = NginxErrorLogParser()
        parsed = parser.parse(line)
        assert_that(parsed, equal_to('nginx.upstream.response.buffered'))
class NginxErrorLogsCollector(AbstractCollector):

    short_name = 'nginx_elog'

    counters = (
        'nginx.http.request.buffered',
        'nginx.upstream.response.buffered',
        'nginx.upstream.request.failed',
        'nginx.upstream.response.failed',
    )

    def __init__(self,
                 filename=None,
                 level=None,
                 log_format=None,
                 tail=None,
                 **kwargs):
        super(NginxErrorLogsCollector, self).__init__(**kwargs)
        self.filename = filename
        self.level = level
        self.parser = NginxErrorLogParser()
        self.tail = tail if tail is not None else FileTail(filename)

    def init_counters(self):
        for counter in self.counters:
            self.object.statsd.incr(counter, value=0)

    def collect(self):
        # If log_level is <= warn (e.g. debug, info, notice, warn)
        if ERROR_LOG_LEVELS.index(self.level) <= 3:
            self.init_counters()  # set all error counters to 0

        count = 0
        for line in self.tail:
            count += 1
            try:
                error = self.parser.parse(line)
            except:
                context.log.debug('could not parse line %s' % line,
                                  exc_info=True)
                error = None

            if error:
                try:
                    self.object.statsd.incr(error)
                except Exception as e:
                    exception_name = e.__class__.__name__
                    context.log.error(
                        'failed to collect error log metrics due to %s' %
                        exception_name)
                    context.log.debug('additional info:', exc_info=True)

        context.log.debug('%s processed %s lines from %s' %
                          (self.object.definition_hash, count, self.filename))
class NginxErrorLogsCollector(AbstractCollector):
    short_name = 'nginx_elog'

    zero_counters = (
        'nginx.http.request.buffered',
        'nginx.upstream.response.buffered',
        'nginx.upstream.request.failed',
        'nginx.upstream.response.failed',
    )

    def __init__(self,
                 filename=None,
                 level=None,
                 log_format=None,
                 tail=None,
                 **kwargs):
        super(NginxErrorLogsCollector, self).__init__(**kwargs)
        self.filename = filename
        self.level = level
        self.parser = NginxErrorLogParser()
        self.tail = tail if tail is not None else FileTail(filename)
        self.register(self.error_log_parsed)

    def collect(self):
        # If log_level is <= warn (e.g. debug, info, notice, warn)
        if ERROR_LOG_LEVELS.index(self.level) <= 3:
            self.init_counters()  # set all error counters to 0

        count = 0
        for line in self.tail:
            count += 1
            try:
                error = self.parser.parse(line)
            except:
                context.log.debug('could not parse line %s' % line,
                                  exc_info=True)
                error = None

            if error:
                super(NginxErrorLogsCollector, self).collect(error)

        tail_name = self.tail.name if isinstance(self.tail,
                                                 Pipeline) else 'list'
        context.log.debug('%s processed %s lines from %s' %
                          (self.object.definition_hash, count, tail_name))

    def error_log_parsed(self, error):
        self.object.statsd.incr(error)
class NginxErrorLogsCollector(AbstractCollector):

    short_name = 'nginx_elog'

    counters = (
        'nginx.http.request.buffered',
        'nginx.upstream.response.buffered',
        'nginx.upstream.request.failed',
        'nginx.upstream.response.failed',
    )

    def __init__(self, filename=None, level=None, log_format=None, tail=None, **kwargs):
        super(NginxErrorLogsCollector, self).__init__(**kwargs)
        self.filename = filename
        self.level = level
        self.parser = NginxErrorLogParser()
        self.tail = tail if tail is not None else FileTail(filename)

    def init_counters(self):
        for counter in self.counters:
            self.object.statsd.incr(counter, value=0)

    def collect(self):
        # If log_level is <= warn (e.g. debug, info, notice, warn)
        if ERROR_LOG_LEVELS.index(self.level) <= 3:
            self.init_counters()  # set all error counters to 0

        count = 0
        for line in self.tail:
            count += 1
            try:
                error = self.parser.parse(line)
            except:
                context.log.debug('could not parse line %s' % line, exc_info=True)
                error = None

            if error:
                try:
                    self.object.statsd.incr(error)
                except Exception as e:
                    exception_name = e.__class__.__name__
                    context.log.error('failed to collect error log metrics due to %s' % exception_name)
                    context.log.debug('additional info:', exc_info=True)

        context.log.debug('%s processed %s lines from %s' % (self.object.definition_hash, count, self.filename))
Beispiel #7
0
 def test_none_found(self):
     line = '2015/07/15 05:56:30 [info] 28386#28386: *94160 client 10.196.158.41 closed keepalive connection'
     parser = NginxErrorLogParser()
     parsed = parser.parse(line)
     assert_that(parsed, equal_to(None))
 def __init__(self, filename=None, level=None, log_format=None, tail=None, **kwargs):
     super(NginxErrorLogsCollector, self).__init__(**kwargs)
     self.filename = filename
     self.level = level
     self.parser = NginxErrorLogParser()
     self.tail = tail if tail is not None else FileTail(filename)