示例#1
0
    def test_monitorstream_events_validation(self):
        log_data = [
            {"msg_title": "title", "timestamp": 1336999561},
            {"msg_text": "body", "timestamp": 1336999561},
            {"none of the above": "should get filtered out", "timestamp": 1336999561},
        ]

        expected_output = {
            "monitorstreamEvents": [
                {
                    "timestamp": 1336999561,
                    "msg_title": "title",
                    "event_type": EventDefaults.EVENT_TYPE,
                    "aggregation_key": EventDefaults.EVENT_OBJECT,
                    "event_object": EventDefaults.EVENT_OBJECT,
                },

                {
                    "timestamp": 1336999561,
                    "msg_text": "body",
                    "event_type": EventDefaults.EVENT_TYPE,
                    "aggregation_key": EventDefaults.EVENT_OBJECT,
                    "event_object": EventDefaults.EVENT_OBJECT,
                },

            ]
        }

        self._write_log([repr(d) for d in log_data])

        monitorstream = monitorstreams.init(self.logger, {'monitorstreams': '{0}:{1}:repr_event_parser'.format(self.log_file.name, __name__)})
        actual_output = monitorstream.check(self.config, move_end=False)
        self.assertEquals(expected_output, actual_output)
示例#2
0
    def test_monitorstream_events(self):
        log_data = [
            '2012-05-14 12:46:01 [ERROR] - host0 is down (broke its collarbone)',
            '2012-05-14 12:48:07 [ERROR] - host1 is down (got a bloody nose)',
            '2012-05-14 12:52:03 [RECOVERY] - host0 is up (collarbone healed)',
            '2012-05-14 12:59:09 [RECOVERY] - host1 is up (nose stopped bleeding)',
        ]
        expected_output = {
            "monitorstreamEvents": [
                {
                    "timestamp": 1336999561,
                    "alert_type": "error",
                    "host": "host0",
                    "msg_title": "host0 is down (broke its collarbone)",
                    "msg_text": "2012-05-14 12:46:01 [ERROR] - host0 is down (broke its collarbone)",
                    "event_type": EventDefaults.EVENT_TYPE,
                    "aggregation_key": EventDefaults.EVENT_OBJECT,
                    "event_object": EventDefaults.EVENT_OBJECT,
                },

                {
                    "timestamp": 1336999687,
                    "alert_type": "error",
                    "host": "host1",
                    "msg_title": "host1 is down (got a bloody nose)",
                    "msg_text": "2012-05-14 12:48:07 [ERROR] - host1 is down (got a bloody nose)",
                    "event_type": EventDefaults.EVENT_TYPE,
                    "aggregation_key": EventDefaults.EVENT_OBJECT,
                    "event_object": EventDefaults.EVENT_OBJECT,
                },

                {
                    "timestamp": 1336999923,
                    "alert_type": "success",
                    "host": "host0",
                    "msg_title": "host0 is up (collarbone healed)",
                    "msg_text": "2012-05-14 12:52:03 [RECOVERY] - host0 is up (collarbone healed)",
                    "event_type": EventDefaults.EVENT_TYPE,
                    "aggregation_key": EventDefaults.EVENT_OBJECT,
                    "event_object": EventDefaults.EVENT_OBJECT,
                },

                {
                    "timestamp": 1337000349,
                    "alert_type": "success",
                    "host": "host1",
                    "msg_title": "host1 is up (nose stopped bleeding)",
                    "msg_text": "2012-05-14 12:59:09 [RECOVERY] - host1 is up (nose stopped bleeding)",
                    "event_type": EventDefaults.EVENT_TYPE,
                    "aggregation_key": EventDefaults.EVENT_OBJECT,
                    "event_object": EventDefaults.EVENT_OBJECT,
                },

            ]
        }
        self._write_log(log_data)

        monitorstream = monitorstreams.init(self.logger, {'monitorstreams': '{0}:{1}:parse_events'.format(self.log_file.name, __name__)})
        actual_output = monitorstream.check(self.config, move_end=False)
        self.assertEquals(expected_output, actual_output)
示例#3
0
    def setUp(self):
        TailTestCase.setUp(self)

        self.config = {
            'monitorstreams': self.log_file.name,
            'check_freq': 5,
        }
        log.info("Test config: %s" % self.config)
        self.monitorstream = monitorstreams.init(self.logger, self.config)
        self.maxDiff = None
示例#4
0
    def test_monitorstream_new_plugin(self):
        """Ensure that class-based stateful plugins work"""
        log_data = [
            'test.metric.accumulator 1000000000 1 metric_type=counter',
            'test.metric.accumulator 1100000000 1 metric_type=counter'
        ]
        expected_output = {
            "monitorstream": [
                ('foo.bar:test.metric.accumulator', 1000000000, 1, self.counter),
                ('foo.bar:test.metric.accumulator', 1100000000, 2, self.counter)]
        }
        self._write_log(log_data)

        statemonitor = monitorstreams.init(self.logger, {'monitorstreams': '{0}:{1}:ParseClassPlugin:foo:bar'.format(self.log_file.name, __name__)})
        actual_output = statemonitor.check(self.config, move_end=False)
        self.assertEquals(expected_output, actual_output)
示例#5
0
    def test_supervisord_parser(self):
        from monitorstream import supervisord_log
        log_data = """2012-07-16 22:30:48,335 INFO spawned: 'monitor' with pid 20216
2012-07-14 03:02:47,325 INFO success: foo_bar entered RUNNING state, process has stayed up for > than 2 seconds (startsecs)
2012-07-17 02:53:04,600 CRIT Server 'inet_http_server' running without any HTTP authentication checking
2012-07-14 04:54:34,193 WARN received SIGTERM indicating exit request
"""
        event_type = supervisord_log.EVENT_TYPE

        expected_output = {
            "monitorstreamEvents": [
                {
                    "alert_type": "info", "event_type": event_type,
                    "aggregation_key": "monitor",
                    "event_object": "monitor",
                    "msg_title": "spawned: 'monitor' with pid 20216",
                    "timestamp": int(time.mktime(datetime(2012, 7, 16, 22, 30, 48).timetuple())),
                }, {
                    "alert_type": "success", "event_type": event_type,
                    "aggregation_key": "foo_bar",
                    "event_object": "foo_bar",
                    "msg_title": "success: foo_bar entered RUNNING state, "
                    "process has stayed up for > than 2 seconds (startsecs)",
                    "timestamp": int(time.mktime(datetime(2012, 7, 14, 3, 2, 47).timetuple())),
                }, {
                    "alert_type": "error", "event_type": event_type,
                    "aggregation_key": "inet_http_server",
                    "event_object": "inet_http_server",
                    "msg_title": "Server 'inet_http_server' running without any HTTP authentication checking",
                    "timestamp": int(time.mktime(datetime(2012, 7, 17, 2, 53, 4).timetuple())),
                }, {
                    "alert_type": "warning", "event_type": event_type,
                    "aggregation_key": "SIGTERM",
                    "event_object": "SIGTERM",
                    "msg_title": "received SIGTERM indicating exit request",
                    "timestamp": int(time.mktime(datetime(2012, 7, 14, 4, 54, 34).timetuple())),
                },
            ]
        }
        self._write_log(log_data.split("\n"))

        monitorstream = monitorstreams.init(self.logger, {'monitorstreams': '%s:monitorstream.supervisord_log:parse_supervisord' % self.log_file.name})
        actual_output = monitorstream.check(self.config, move_end=False)
        self.assertEquals(expected_output, actual_output)
示例#6
0
    def __init__(self, agentConfig, emitters, systemStats, hostname):
        self.ip = get_ip(agentConfig)
        self.emit_duration = None
        self.agentConfig = agentConfig
        self.hostname = hostname
        self.agentConfig['system_stats'] = systemStats
        self.os = get_os()
        self.plugins = None
        self.emitters = emitters
        self.check_timings = agentConfig.get('check_timings')
        self.push_times = {
            'host_metadata': {
                'start': time.time(),
                'interval': int(agentConfig.get('metadata_interval', 4 * 60 * 60))
            },
            'external_host_tags': {
                'start': time.time() - 3 * 60,
                'interval': int(agentConfig.get('external_host_tags', 5 * 60))
            },
            'agent_checks': {
                'start': time.time(),
                'interval': int(agentConfig.get('agent_checks_interval', 10 * 60))
            },
        }
        socket.setdefaulttimeout(15)
        self.run_count = 0
        self.continue_running = True
        self.hostname_metadata_cache = None
        self.initialized_checks_d = []
        self.init_failed_checks_d = {}

        self._unix_system_checks = {
            'io': u.IO(log),
            'load': u.Load(log),
            'memory': u.Memory(log),
            'processes': u.Processes(log),
            'cpu': u.Cpu(log),
            'system': u.System(log)
        }

        self._win32_system_checks = {
            'io': w32.IO(log),
            'proc': w32.Processes(log),
            'memory': w32.Memory(log),
            'network': w32.Network(log),
            'cpu': w32.Cpu(log),
            'system': w32.System(log)
        }

        self._ganglia = Ganglia(log)
        self._monitorstream = monitorstreams.init(log, self.agentConfig)
        self._ddforwarder = DdForwarder(log, self.agentConfig)

        self._agent_metrics = None

        self._metrics_checks = []

        for module_spec in [s.strip() for s in self.agentConfig.get('custom_checks', '').split(',')]:
            if len(module_spec) == 0:
                continue
            try:
                self._metrics_checks.append(modules.load(module_spec, 'Check')(log))
                log.info("Registered custom check %s" % module_spec)
                log.warning(
                    "Old format custom checks are deprecated. They should be moved to the checks.d interface as old custom checks will be removed in a next version")
            except Exception:
                log.exception('Unable to load custom check module %s' % module_spec)
示例#7
0
    def test_cassandra_parser(self):
        from monitorstream import cassandra, common

        log_data = """ INFO [CompactionExecutor:1594] 2012-05-12 21:05:12,924 Saved test_data-Encodings-KeyCache (86400 items) in 85 ms
 INFO [CompactionExecutor:1595] 2012-05-12 21:05:15,144 Saved test_data-Metrics-KeyCache (86400 items) in 96 ms
 INFO [CompactionExecutor:1596] 2012-05-12 21:10:48,058 Compacting [SSTableReader(path='/var/cassandra/data/test_data/series-hc-6528-Data.db'), SSTableReader(path='/var/cassandra/data/test_data/series-hc-6531-Data.db'), SSTableReader(path='/var/cassandra/data/test_data/series-hc-6529-Data.db'), SSTableReader(path='/var/cassandra/data/test_data/series-hc-6530-Data.db')]
 INFO [CompactionExecutor:1596] 2012-05-12 21:10:54,851 Compacted to [/var/cassandra/a-hc-65-Data.db,].  102,079,134 to 101,546,397
 INFO [CompactionExecutor:1598] 2012-05-12 22:05:04,313 Saved test_data-ResourcesMetadata-KeyCache (1 items) in 10 ms
 INFO [CompactionExecutor:1599] 2012-05-12 22:05:14,813 Saved test_data-Encodings-KeyCache (86400 items) in 83 ms
 INFO [CompactionExecutor:1630] 2012-05-13 13:05:44,963 Saved test_data-Metrics-KeyCache (86400 items) in 77 ms
 INFO [CompactionExecutor:1631] 2012-05-13 13:15:01,923 Nothing to compact in data_log.  Use forceUserDefinedCompaction if you wish to force compaction of single sstables (e.g. for tombstone collection)
 INFO [CompactionExecutor:1632] 2012-05-13 13:15:01,927 Compacting [SSTableReader(path='/var/cassandra/data/test_data/series-hc-6527-Data.db'), SSTableReader(path='/var/cassandra/data/test_data/series-hc-6522-Data.db'), SSTableReader(path='/var/cassandra/data/test_data/series-hc-6532-Data.db'), SSTableReader(path='/var/cassandra/data/test_data/series-hc-6517-Data.db')]
 INFO [CompactionExecutor:1632] 2012-05-13 13:27:17,685 Compacting large row test_data/series:6c6f677c32 (782001077 bytes) incrementally
 INFO [CompactionExecutor:34] 2012-05-14 18:00:41,281 Saved test_data-Encodings-KeyCache (86400 items) in 78 ms
 INFO 13:27:17,685 Compacting large row test_data/series:6c6f677c32 (782001077 bytes) incrementally
"""
        alert_type = cassandra.ALERT_TYPES["INFO"]
        event_type = cassandra.EVENT_TYPE
        event_object = EventDefaults.EVENT_OBJECT

        expected_output = {
            "monitorstreamEvents": [
                {
                    "timestamp": cassandra.parse_date("2012-05-12 21:10:48,058"),
                    "msg_title": "Compacting [SSTableReader(path='/var/cassandra/data/test_data/series-hc-6528-Data.db'), SSTableReader(path='/var/cassandra/data/test_data/series-hc-6531-Data.db'), SSTableReader(path='/var/cassandra/data/test_data/series-hc-6529-Data.db'), SSTableReader(path='/var/cassandra/data/test_data/series-hc-6530-Data.db')]"[0:common.MAX_TITLE_LEN],
                    "msg_text": "Compacting [SSTableReader(path='/var/cassandra/data/test_data/series-hc-6528-Data.db'), SSTableReader(path='/var/cassandra/data/test_data/series-hc-6531-Data.db'), SSTableReader(path='/var/cassandra/data/test_data/series-hc-6529-Data.db'), SSTableReader(path='/var/cassandra/data/test_data/series-hc-6530-Data.db')]",
                    "alert_type": alert_type,
                    "auto_priority": 0,
                    "event_type": event_type,
                    "aggregation_key": event_object,
                    "event_object": event_object,
                },  {
                    "timestamp": cassandra.parse_date("2012-05-12 21:10:54,851"),
                    "msg_title": "Compacted to [/var/cassandra/a-hc-65-Data.db,].  102,079,134 to 101,546,397",
                    "alert_type": alert_type,
                    "auto_priority": 0,
                    "event_type": event_type,
                    "aggregation_key": event_object,
                    "event_object": event_object,
                },  {
                    "timestamp": cassandra.parse_date("2012-05-13 13:15:01,927"),
                    "msg_title": "Compacting [SSTableReader(path='/var/cassandra/data/test_data/series-hc-6527-Data.db'), SSTableReader(path='/var/cassandra/data/test_data/series-hc-6522-Data.db'), SSTableReader(path='/var/cassandra/data/test_data/series-hc-6532-Data.db'), SSTableReader(path='/var/cassandra/data/test_data/series-hc-6517-Data.db')]"[0:common.MAX_TITLE_LEN],
                    "msg_text": "Compacting [SSTableReader(path='/var/cassandra/data/test_data/series-hc-6527-Data.db'), SSTableReader(path='/var/cassandra/data/test_data/series-hc-6522-Data.db'), SSTableReader(path='/var/cassandra/data/test_data/series-hc-6532-Data.db'), SSTableReader(path='/var/cassandra/data/test_data/series-hc-6517-Data.db')]",
                    "alert_type": alert_type,
                    "event_type": event_type,
                    "auto_priority": 0,
                    "aggregation_key": event_object,
                    "event_object": event_object,
                },  {
                    "timestamp": cassandra.parse_date("2012-05-13 13:27:17,685"),
                    "msg_title": "Compacting large row test_data/series:6c6f677c32 (782001077 bytes) incrementally",
                    "alert_type": alert_type,
                    "event_type": event_type,
                    "auto_priority": 0,
                    "aggregation_key": event_object,
                    "event_object": event_object,
                },  {
                    "timestamp": cassandra.parse_date(datetime.utcnow().strftime("%Y-%m-%d") + " 13:27:17,685"),
                    "msg_title": "Compacting large row test_data/series:6c6f677c32 (782001077 bytes) incrementally",
                    "alert_type": alert_type,
                    "event_type": event_type,
                    "auto_priority": 0,
                    "aggregation_key": event_object,
                    "event_object": event_object,
                },
            ]
        }

        self._write_log(log_data.split("\n"))

        monitorstream = monitorstreams.init(self.logger, {'monitorstreams': '%s:monitorstream.cassandra:parse_cassandra' % self.log_file.name})
        actual_output = monitorstream.check(self.config, move_end=False)
        self.assertEquals(expected_output, actual_output)