Exemplo n.º 1
0
    def test_build_message(self):
        """Unit test supervisord build service check message."""
        process = {
            'now': 1414815513,
            'group': 'mysql',
            'description': 'pid 787, uptime 0:02:05',
            'pid': 787,
            'stderr_logfile': '/var/log/supervisor/mysql-stderr---supervisor-3ATI82.log',
            'stop': 0,
            'statename': 'RUNNING',
            'start': 1414815388,
            'state': 20,
            'stdout_logfile': '/var/log/mysql/mysql.log',
            'logfile': '/var/log/mysql/mysql.log',
            'exitstatus': 0,
            'spawnerr': '',
            'name': 'mysql'
        }

        expected_message = """Current time: 2014-11-01 04:18:33
Process name: mysql
Process group: mysql
Description: pid 787, uptime 0:02:05
Error log file: /var/log/supervisor/mysql-stderr---supervisor-3ATI82.log
Stdout log file: /var/log/mysql/mysql.log
Log file: /var/log/mysql/mysql.log
State: RUNNING
Start time: 2014-11-01 04:16:28
Stop time: \nExit Status: 0"""

        check, _ = get_check('supervisord', self.TEST_CASES[0]['yaml'])
        self.assertEquals(expected_message, check._build_message(process))
Exemplo n.º 2
0
    def testNetwork(self):
        # FIXME: cx_state to true, but needs sysstat installed
        config = """
init_config:

instances:
    - collect_connection_state: false
      excluded_interfaces:
        - lo
        - lo0
"""
        check, instances = get_check('network', config)

        check.check(instances[0])
        check.get_metrics()

        metric_names = [m[0] for m in check.aggregator.metrics]

        assert 'system.net.bytes_rcvd' in metric_names
        assert 'system.net.bytes_sent' in metric_names
        if Platform.is_linux():
            assert 'system.net.tcp.retrans_segs' in metric_names
            assert 'system.net.tcp.in_segs' in metric_names
            assert 'system.net.tcp.out_segs' in metric_names
        elif Platform.is_bsd():
            assert 'system.net.tcp.retrans_packs' in metric_names
            assert 'system.net.tcp.sent_packs' in metric_names
            assert 'system.net.tcp.rcv_packs' in metric_names
Exemplo n.º 3
0
    def test_check(self):
        """Integration test for supervisord check. Using a mocked supervisord."""
        for tc in self.TEST_CASES:
            check, instances = get_check('supervisord', tc['yaml'])
            self.assertTrue(check is not None, msg=check)
            self.assertEquals(tc['expected_instances'], instances)
            for instance in instances:
                name = instance['name']

                try:
                    # Run the check
                    check.check(instance)
                except Exception as e:
                    if 'error_message' in tc:  # excepted error
                        self.assertEquals(str(e), tc['error_message'])
                    else:
                        self.assertTrue(False, msg=str(e))
                else:
                    # Assert that the check collected the right metrics
                    expected_metrics = tc['expected_metrics'][name]
                    self.assert_metrics(expected_metrics, check.get_metrics())

                    # Assert that the check generated the right service checks
                    expected_service_checks = tc['expected_service_checks'][
                        name]
                    self.assert_service_checks(expected_service_checks,
                                               check.get_service_checks())
Exemplo n.º 4
0
    def test_check(self):
        check, instances = get_check('activemq_xml', self.config)
        check.requests = mock.Mock()

        def response_side_effect(*args, **kwargs):
            text = ''
            if '/admin/xml/topics.jsp' in args[0]:
                text = '<topics></topics>'
            elif '/admin/xml/queues.jsp' in args[0]:
                text = '<queues></queues>'
            elif '/admin/xml/subscribers.jsp' in args[0]:
                text = '<subscribers></subscribers>'
            # if text='' then we will get an xml parsing error
            # (which is what we want if we called with a url we dont know)
            return mock.Mock(text=text)

        check.requests.get.side_effect = response_side_effect
        check.check(instances[0])
        expected = {
            'url:http://localhost:8161': {
                'activemq.queue.count': (0, 'gauge'),
                'activemq.topic.count': (0, 'gauge'),
                'activemq.subscriber.count': (0, 'gauge'),
            }
        }
        self._assert_expected_metrics(expected, check.get_metrics())
Exemplo n.º 5
0
    def test_travis_supervisord(self):
        """Integration test for supervisord check. Using a supervisord on Travis."""

        # Load yaml config
        config_str = open(os.environ['VOLATILE_DIR'] + '/supervisor/supervisord.yaml', 'r').read()
        self.assertTrue(config_str is not None and len(config_str) > 0, msg=config_str)

        # init the check and get the instances
        check, instances = get_check('supervisord', config_str)
        self.assertTrue(check is not None, msg=check)
        self.assertEquals(len(instances), 1)

        # Supervisord should run 3 programs for 30, 60 and 90 seconds
        # respectively. The tests below will ensure that the process count
        # metric is reported correctly after (roughly) 10, 40, 70 and 100 seconds
        for i in range(4):
            try:
                # Run the check
                check.check(instances[0])
            except Exception, e:
                # Make sure that it ran successfully
                self.assertTrue(False, msg=str(e))
            else:
                up, down = 0, 0
                for name, timestamp, value, meta in check.get_metrics():
                    if name == 'supervisord.process.count':
                        if 'status:up' in meta['tags']:
                            up = value
                        elif 'status:down' in meta['tags']:
                            down = value
                self.assertEquals(up, 3 - i)
                self.assertEquals(down, i)
                sleep(10)
Exemplo n.º 6
0
    def testSqlServer(self):
        check, instances = get_check('sqlserver', CONFIG)
        check.check(instances[0])
        metrics = check.get_metrics()

        # Make sure the base metrics loaded
        base_metrics = [m[0] for m in check.METRICS]
        ret_metrics = [m[0] for m in metrics]
        for metric in base_metrics:
            assert metric in ret_metrics

        # Check our custom metrics
        assert 'sqlserver.clr.execution' in ret_metrics
        assert 'sqlserver.exec.in_progress' in ret_metrics
        assert 'sqlserver.db.commit_table_entries' in ret_metrics

        # Make sure the ALL custom metric is tagged
        tagged_metrics = [m for m in metrics
            if m[0] == 'sqlserver.db.commit_table_entries']
        for metric in tagged_metrics:
            for tag in metric[3]['tags']:
                assert tag.startswith('db')

        # Service checks
        service_checks = check.get_service_checks()
        service_checks_count = len(service_checks)
        self.assertTrue(type(service_checks) == type([]))
        self.assertTrue(service_checks_count > 0)
        self.assertEquals(len([sc for sc in service_checks if sc['check'] == check.SERVICE_CHECK_NAME]), 1, service_checks)
        # Assert that all service checks have the proper tags: host and port
        self.assertEquals(len([sc for sc in service_checks if "host:127.0.0.1,1433" in sc['tags']]), service_checks_count, service_checks)
        self.assertEquals(len([sc for sc in service_checks if "db:master" in sc['tags']]), service_checks_count, service_checks)
Exemplo n.º 7
0
    def testSqlServer(self):
        check, instances = get_check('sqlserver', CONFIG)
        check.check(instances[0])
        metrics = check.get_metrics()

        # Make sure the base metrics loaded
        base_metrics = [m[0] for m in check.METRICS]
        ret_metrics = [m[0] for m in metrics]
        for metric in base_metrics:
            assert metric in ret_metrics

        # Check our custom metrics
        assert 'sqlserver.clr.execution' in ret_metrics
        assert 'sqlserver.exec.in_progress' in ret_metrics
        assert 'sqlserver.db.commit_table_entries' in ret_metrics

        # Make sure the ALL custom metric is tagged
        tagged_metrics = [m for m in metrics
            if m[0] == 'sqlserver.db.commit_table_entries']
        for metric in tagged_metrics:
            for tag in metric[3]['tags']:
                assert tag.startswith('db')

        # Service checks
        service_checks = check.get_service_checks()
        service_checks_count = len(service_checks)
        self.assertTrue(isinstance(metrics, ListType))
        self.assertTrue(service_checks_count > 0)
        self.assertEquals(len([sc for sc in service_checks if sc['check'] == check.SERVICE_CHECK_NAME]), 1, service_checks)
        # Assert that all service checks have the proper tags: host and port
        self.assertEquals(len([sc for sc in service_checks if "host:127.0.0.1,1433" in sc['tags']]), service_checks_count, service_checks)
        self.assertEquals(len([sc for sc in service_checks if "db:master" in sc['tags']]), service_checks_count, service_checks)
Exemplo n.º 8
0
    def testNetwork(self):
        # FIXME: cx_state to true, but needs sysstat installed
        config = """
init_config:

instances:
    - collect_connection_state: false
      excluded_interfaces:
        - lo
        - lo0
"""
        check, instances = get_check('network', config)

        check.check(instances[0])
        check.get_metrics()

        metric_names = [m[0] for m in check.aggregator.metrics]

        assert 'system.net.bytes_rcvd' in metric_names
        assert 'system.net.bytes_sent' in metric_names
        if Platform.is_linux():
            assert 'system.net.tcp.retrans_segs' in metric_names
            assert 'system.net.tcp.in_segs' in metric_names
            assert 'system.net.tcp.out_segs' in metric_names
        elif Platform.is_bsd():
            assert 'system.net.tcp.retrans_packs' in metric_names
            assert 'system.net.tcp.sent_packs' in metric_names
            assert 'system.net.tcp.rcv_packs' in metric_names
Exemplo n.º 9
0
    def test_build_message(self):
        """Unit test supervisord build service check message."""
        process = {
            "now": 1414815513,
            "group": "mysql",
            "description": "pid 787, uptime 0:02:05",
            "pid": 787,
            "stderr_logfile": "/var/log/supervisor/mysql-stderr---supervisor-3ATI82.log",
            "stop": 0,
            "statename": "RUNNING",
            "start": 1414815388,
            "state": 20,
            "stdout_logfile": "/var/log/mysql/mysql.log",
            "logfile": "/var/log/mysql/mysql.log",
            "exitstatus": 0,
            "spawnerr": "",
            "name": "mysql",
        }

        expected_message = """Current time: 2014-11-01 04:18:33
Process name: mysql
Process group: mysql
Description: pid 787, uptime 0:02:05
Error log file: /var/log/supervisor/mysql-stderr---supervisor-3ATI82.log
Stdout log file: /var/log/mysql/mysql.log
Log file: /var/log/mysql/mysql.log
State: RUNNING
Start time: 2014-11-01 04:16:28
Stop time: \nExit Status: 0"""

        check, _ = get_check("supervisord", self.TEST_CASES[0]["yaml"])
        self.assertEquals(expected_message, check._build_message(process))
Exemplo n.º 10
0
    def test_check(self):
        check, instances = get_check('activemq_xml', self.config)
        check.requests = mock.Mock()

        def response_side_effect(*args, **kwargs):
            text = ''
            if '/admin/xml/topics.jsp' in args[0]:
                text = '<topics></topics>'
            elif '/admin/xml/queues.jsp' in args[0]:
                text = '<queues></queues>'
            elif '/admin/xml/subscribers.jsp' in args[0]:
                text = '<subscribers></subscribers>'
            # if text='' then we will get an xml parsing error
            # (which is what we want if we called with a url we dont know)
            return mock.Mock(text=text)

        check.requests.get.side_effect = response_side_effect
        check.check(instances[0])
        expected = {
            'url:http://localhost:8161': {
                'activemq.queue.count': (0, 'gauge'),
                'activemq.topic.count': (0, 'gauge'),
                'activemq.subscriber.count': (0, 'gauge'),
            }
        }
        self._assert_expected_metrics(expected, check.get_metrics())
Exemplo n.º 11
0
    def test_check(self):
        """Integration test for supervisord check. Using a mocked supervisord."""
        for tc in self.TEST_CASES:
            check, instances = get_check("supervisord", tc["yaml"])
            self.assertTrue(check is not None, msg=check)
            self.assertEquals(tc["expected_instances"], instances)
            for instance in instances:
                name = instance["name"]

                try:
                    # Run the check
                    check.check(instance)
                except Exception, e:
                    if "error_message" in tc:  # excepted error
                        self.assertEquals(str(e), tc["error_message"])
                    else:
                        self.assertTrue(False, msg=str(e))
                else:
                    # Assert that the check collected the right metrics
                    expected_metrics = tc["expected_metrics"][name]
                    self.assert_metrics(expected_metrics, check.get_metrics())

                    # Assert that the check generated the right service checks
                    expected_service_checks = tc["expected_service_checks"][name]
                    self.assert_service_checks(expected_service_checks, check.get_service_checks())
Exemplo n.º 12
0
    def test_build_message(self):
        """Unit test supervisord build service check message."""
        process = {
            'now': 1414815513,
            'group': 'mysql',
            'description': 'pid 787, uptime 0:02:05',
            'pid': 787,
            'stderr_logfile':
            '/var/log/supervisor/mysql-stderr---supervisor-3ATI82.log',
            'stop': 0,
            'statename': 'RUNNING',
            'start': 1414815388,
            'state': 20,
            'stdout_logfile': '/var/log/mysql/mysql.log',
            'logfile': '/var/log/mysql/mysql.log',
            'exitstatus': 0,
            'spawnerr': '',
            'name': 'mysql'
        }

        expected_message = """Current time: 2014-11-01 04:18:33
Process name: mysql
Process group: mysql
Description: pid 787, uptime 0:02:05
Error log file: /var/log/supervisor/mysql-stderr---supervisor-3ATI82.log
Stdout log file: /var/log/mysql/mysql.log
Log file: /var/log/mysql/mysql.log
State: RUNNING
Start time: 2014-11-01 04:16:28
Stop time: \nExit Status: 0"""

        check, _ = get_check('supervisord', self.TEST_CASES[0]['yaml'])
        self.assertEquals(expected_message, check._build_message(process))
Exemplo n.º 13
0
    def test_process_subscriber_data_normal(self):
        check, instances = get_check('activemq_xml', self.config)

        data = """
        <subscribers>
          <subscriber clientId="10"
                      subscriptionName="subscription1"
                      connectionId="10"
                      destinationName="Queue1"
                      selector="*"
                      active="yes" >
            <stats pendingQueueSize="5"
                   dispatchedQueueSize="15"
                   dispatchedCounter="15"
                   enqueueCounter="235"
                   dequeueCounter="175"/>
          </subscriber>
          <subscriber clientId="5"
                      subscriptionName="subscription2"
                      connectionId="15"
                      destinationName="Topic1"
                      selector="*"
                      active="no" >
            <stats pendingQueueSize="0"
                   dispatchedQueueSize="0"
                   dispatchedCounter="5"
                   enqueueCounter="12"
                   dequeueCounter="15"/>
          </subscriber>
        </subscribers>
        """
        check._process_subscriber_data(data, [], 300, [])
        expected = {
            'active:yes-clientId:10-connectionId:10-destinationName:Queue1-selector:*-subscriptionName:subscription1':
            {
                'activemq.subscriber.enqueue_counter': ('235', 'gauge'),
                'activemq.subscriber.dequeue_counter': ('175', 'gauge'),
                'activemq.subscriber.dispatched_counter': ('15', 'gauge'),
                'activemq.subscriber.dispatched_queue_size': ('15', 'gauge'),
                'activemq.subscriber.pending_queue_size': ('5', 'gauge'),
            },
            '': {
                'activemq.subscriber.count': (2, 'gauge'),
            },
            'active:no-clientId:5-connectionId:15-destinationName:Topic1-selector:*-subscriptionName:subscription2':
            {
                'activemq.subscriber.enqueue_counter': ('12', 'gauge'),
                'activemq.subscriber.dequeue_counter': ('15', 'gauge'),
                'activemq.subscriber.dispatched_counter': ('5', 'gauge'),
                'activemq.subscriber.dispatched_queue_size': ('0', 'gauge'),
                'activemq.subscriber.pending_queue_size': ('0', 'gauge'),
            },
        }

        self._assert_expected_metrics(expected, check.get_metrics())
Exemplo n.º 14
0
    def test_process_subscriber_data_normal(self):
        check, instances = get_check('activemq_xml', self.config)

        data = """
        <subscribers>
          <subscriber clientId="10"
                      subscriptionName="subscription1"
                      connectionId="10"
                      destinationName="Queue1"
                      selector="*"
                      active="yes" >
            <stats pendingQueueSize="5"
                   dispatchedQueueSize="15"
                   dispatchedCounter="15"
                   enqueueCounter="235"
                   dequeueCounter="175"/>
          </subscriber>
          <subscriber clientId="5"
                      subscriptionName="subscription2"
                      connectionId="15"
                      destinationName="Topic1"
                      selector="*"
                      active="no" >
            <stats pendingQueueSize="0"
                   dispatchedQueueSize="0"
                   dispatchedCounter="5"
                   enqueueCounter="12"
                   dequeueCounter="15"/>
          </subscriber>
        </subscribers>
        """
        check._process_subscriber_data(data, [], 300, [])
        expected = {
            'active:yes-clientId:10-connectionId:10-destinationName:Queue1-selector:*-subscriptionName:subscription1': {
                'activemq.subscriber.enqueue_counter': ('235', 'gauge'),
                'activemq.subscriber.dequeue_counter': ('175', 'gauge'),
                'activemq.subscriber.dispatched_counter': ('15', 'gauge'),
                'activemq.subscriber.dispatched_queue_size': ('15', 'gauge'),
                'activemq.subscriber.pending_queue_size': ('5', 'gauge'),
            },
            '': {
                'activemq.subscriber.count': (2, 'gauge'),
            },
            'active:no-clientId:5-connectionId:15-destinationName:Topic1-selector:*-subscriptionName:subscription2': {
                'activemq.subscriber.enqueue_counter': ('12', 'gauge'),
                'activemq.subscriber.dequeue_counter': ('15', 'gauge'),
                'activemq.subscriber.dispatched_counter': ('5', 'gauge'),
                'activemq.subscriber.dispatched_queue_size': ('0', 'gauge'),
                'activemq.subscriber.pending_queue_size': ('0', 'gauge'),
            },
        }

        self._assert_expected_metrics(expected, check.get_metrics())
Exemplo n.º 15
0
    def test_windows_event_log(self):
        import win32evtlog
        check, instances = get_check('win32_event_log', CONFIG)

        # Run the check against all instances to set the last_ts
        for instance in instances:
            check.check(instance)

        # Run checks again and make sure there are no events
        for instance in instances:
            check.check(instance)
            assert len(check.get_metrics()) == 0

        # Generate some events for the log
        for msg, ev_type in self.LOG_EVENTS:
            self.write_event(msg, ev_type)
        self.write_event('do not pick me',
                         win32evtlog.EVENTLOG_INFORMATION_TYPE,
                         source_name='EVENTLOGTESTBAD')

        # Run the checks again for them to pick up the new events
        inst1, inst2 = instances
        check.check(inst1)
        ev1 = check.get_events()
        assert len(ev1) > 0
        assert len(ev1) == len([
            ev for ev in self.LOG_EVENTS
            if ev[1] == win32evtlog.EVENTLOG_WARNING_TYPE
        ])
        for ev in ev1:
            # Make sure we only picked up our source
            assert 'EVENTLOGTESTBAD' not in ev['msg_title']
            # Make sure the tags match up
            assert ev['tags'] == inst1['tags']
            # Check that the notifications are there.
            for notify in inst1['notify']:
                assert '@%s' % notify in ev['msg_text']

        check.check(inst2)
        ev2 = check.get_events()
        assert len(ev2) > 0
        assert len(ev2) == len([
            ev for ev in self.LOG_EVENTS
            if ev[1] in (win32evtlog.EVENTLOG_ERROR_TYPE,
                         win32evtlog.EVENTLOG_INFORMATION_TYPE)
        ])
        for ev in ev2:
            # Make sure we only picked up our source
            assert 'EVENTLOGTESTBAD' not in ev['msg_title']
            # Make sure the tags match up
            assert ev['tags'] == inst1['tags']
Exemplo n.º 16
0
    def test_process_topic_data_no_data(self):
        check, instances = get_check('activemq_xml', self.config)

        data = """
        <topics>
        </topics>
        """
        check._process_data(data, "topic", [], 300, [])
        expected = {
            '': {
                'activemq.topic.count': (0, 'gauge')
            },
        }

        self._assert_expected_metrics(expected, check.get_metrics())
Exemplo n.º 17
0
    def test_process_topic_data_no_data(self):
        check, instances = get_check('activemq_xml', self.config)

        data = """
        <topics>
        </topics>
        """
        check._process_data(data, "topic", [], 300, [])
        expected = {
            '': {
                'activemq.topic.count': (0, 'gauge')
            },
        }

        self._assert_expected_metrics(expected, check.get_metrics())
Exemplo n.º 18
0
    def test_process_subscriber_data_no_data(self):
        check, instances = get_check('activemq_xml', self.config)

        data = """
        <subscribers>
        </subscribers>
        """
        check._process_subscriber_data(data, [], 300, [])
        expected = {
            '': {
                'activemq.subscriber.count': (0, 'gauge')
            },
        }

        self._assert_expected_metrics(expected, check.get_metrics())
Exemplo n.º 19
0
    def test_process_subscriber_data_no_data(self):
        check, instances = get_check('activemq_xml', self.config)

        data = """
        <subscribers>
        </subscribers>
        """
        check._process_subscriber_data(data, [], 300, [])
        expected = {
            '': {
                'activemq.subscriber.count': (0, 'gauge')
            },
        }

        self._assert_expected_metrics(expected, check.get_metrics())
Exemplo n.º 20
0
    def test_process_queue_data_normal(self):
        check, instances = get_check('activemq_xml', self.config)

        data = """
        <queues>
          <queue name="Queue1">
            <stats size="0"
                   consumerCount="6"
                   enqueueCount="64714"
                   dequeueCount="64714"/>
            <feed>
              <atom>queueBrowse/Queue1;jsessionid=sess_token?view=rss&amp;feedType=atom_1.0</atom>
              <rss>queueBrowse/Queue1;jsessionid=sess_token?view=rss&amp;feedType=rss_2.0</rss>
            </feed>
          </queue>
          <queue name="Queue2">
            <stats size="10"
                   consumerCount="3"
                   enqueueCount="1165"
                   dequeueCount="1165"/>
            <feed>
              <atom>queueBrowse/Queue2;jsessionid=sess_token?view=rss&amp;feedType=atom_1.0</atom>
              <rss>queueBrowse/Queue2;jsessionid=sess_token?view=rss&amp;feedType=rss_2.0</rss>
            </feed>
          </queue>
        </queues>
        """
        check._process_data(data, "queue", [], 300, [])
        expected = {
            'queue:Queue2': {
                'activemq.queue.size': ('10', 'gauge'),
                'activemq.queue.enqueue_count': ('1165', 'gauge'),
                'activemq.queue.dequeue_count': ('1165', 'gauge'),
                'activemq.queue.consumer_count': ('3', 'gauge')
            },
            '': {
                'activemq.queue.count': (2, 'gauge')
            },
            'queue:Queue1': {
                'activemq.queue.dequeue_count': ('64714', 'gauge'),
                'activemq.queue.consumer_count': ('6', 'gauge'),
                'activemq.queue.size': ('0', 'gauge'),
                'activemq.queue.enqueue_count': ('64714', 'gauge'),
            },
        }

        self._assert_expected_metrics(expected, check.get_metrics())
Exemplo n.º 21
0
    def test_process_queue_data_normal(self):
        check, instances = get_check('activemq_xml', self.config)

        data = """
        <queues>
          <queue name="Queue1">
            <stats size="0"
                   consumerCount="6"
                   enqueueCount="64714"
                   dequeueCount="64714"/>
            <feed>
              <atom>queueBrowse/Queue1;jsessionid=sess_token?view=rss&amp;feedType=atom_1.0</atom>
              <rss>queueBrowse/Queue1;jsessionid=sess_token?view=rss&amp;feedType=rss_2.0</rss>
            </feed>
          </queue>
          <queue name="Queue2">
            <stats size="10"
                   consumerCount="3"
                   enqueueCount="1165"
                   dequeueCount="1165"/>
            <feed>
              <atom>queueBrowse/Queue2;jsessionid=sess_token?view=rss&amp;feedType=atom_1.0</atom>
              <rss>queueBrowse/Queue2;jsessionid=sess_token?view=rss&amp;feedType=rss_2.0</rss>
            </feed>
          </queue>
        </queues>
        """
        check._process_data(data, "queue", [], 300, [])
        expected = {
            'queue:Queue2': {
                'activemq.queue.size': ('10', 'gauge'),
                'activemq.queue.enqueue_count': ('1165', 'gauge'),
                'activemq.queue.dequeue_count': ('1165', 'gauge'),
                'activemq.queue.consumer_count': ('3', 'gauge')
            },
            '': {
                'activemq.queue.count': (2, 'gauge')
            },
            'queue:Queue1': {
                'activemq.queue.dequeue_count': ('64714', 'gauge'),
                'activemq.queue.consumer_count': ('6', 'gauge'),
                'activemq.queue.size': ('0', 'gauge'),
                'activemq.queue.enqueue_count': ('64714', 'gauge'),
            },
        }

        self._assert_expected_metrics(expected, check.get_metrics())
Exemplo n.º 22
0
    def test_fetch_data(self):
        # not too concerned with the response body, just that requests.get was called
        # with the correct arguments
        check, instances = get_check('activemq_xml', self.config)
        check.requests = mock.Mock()
        check._fetch_data('http://localhost:8171', '/admin/xml/queues.jsp', None, None)
        assert check.requests.get.call_count == 1
        assert check.requests.get.call_args == mock.call(
            'http://localhost:8171/admin/xml/queues.jsp', auth=None
        )

        check.requests.get.reset_mock()
        check._fetch_data('http://localhost:8171', '/admin/xml/queues.jsp', 'user', 'pass')
        assert check.requests.get.call_count == 1
        assert check.requests.get.call_args == mock.call(
            'http://localhost:8171/admin/xml/queues.jsp', auth=('user', 'pass')
        )
Exemplo n.º 23
0
    def test_windows_event_log(self):
        import win32evtlog
        check, instances = get_check('win32_event_log', CONFIG)

        # Run the check against all instances to set the last_ts
        for instance in instances:
            check.check(instance)

        # Run checks again and make sure there are no events
        for instance in instances:
            check.check(instance)
            assert len(check.get_metrics()) == 0

        # Generate some events for the log
        for msg, ev_type in self.LOG_EVENTS:
            self.write_event(msg, ev_type)
        self.write_event('do not pick me', win32evtlog.EVENTLOG_INFORMATION_TYPE,
            source_name='EVENTLOGTESTBAD')

        # Run the checks again for them to pick up the new events
        inst1, inst2 = instances
        check.check(inst1)
        ev1 = check.get_events()
        assert len(ev1) > 0
        assert len(ev1) == len([ev for ev in self.LOG_EVENTS
            if ev[1] == win32evtlog.EVENTLOG_WARNING_TYPE])
        for ev in ev1:
            # Make sure we only picked up our source
            assert 'EVENTLOGTESTBAD' not in ev['msg_title']
            # Make sure the tags match up
            assert ev['tags'] == inst1['tags']
            # Check that the notifications are there.
            for notify in inst1['notify']:
                assert '@%s' % notify in ev['msg_text']

        check.check(inst2)
        ev2 = check.get_events()
        assert len(ev2) > 0
        assert len(ev2) == len([ev for ev in self.LOG_EVENTS
            if ev[1] in (win32evtlog.EVENTLOG_ERROR_TYPE, win32evtlog.EVENTLOG_INFORMATION_TYPE)])
        for ev in ev2:
            # Make sure we only picked up our source
            assert 'EVENTLOGTESTBAD' not in ev['msg_title']
            # Make sure the tags match up
            assert ev['tags'] == inst1['tags']
Exemplo n.º 24
0
    def test_fetch_data(self):
        # not too concerned with the response body, just that requests.get was called
        # with the correct arguments
        check, instances = get_check('activemq_xml', self.config)
        check.requests = mock.Mock()
        check._fetch_data('http://localhost:8171', '/admin/xml/queues.jsp',
                          None, None)
        assert check.requests.get.call_count == 1
        assert check.requests.get.call_args == mock.call(
            'http://localhost:8171/admin/xml/queues.jsp', auth=None)

        check.requests.get.reset_mock()
        check._fetch_data('http://localhost:8171', '/admin/xml/queues.jsp',
                          'user', 'pass')
        assert check.requests.get.call_count == 1
        assert check.requests.get.call_args == mock.call(
            'http://localhost:8171/admin/xml/queues.jsp',
            auth=('user', 'pass'))
Exemplo n.º 25
0
    def test_checks(self):
        raise SkipTest("Skipped for now as it needs sudo")
        self.config = self.stripHeredoc(
            """init_config:

        instances:
            - directory: %s
              queues:
                  - bounce
                  - maildrop
                  - incoming
                  - active
                  - deferred
        """
            % (self.queue_root)
        )

        # stuff 10K msgs in random queues
        for _ in xrange(1, 10000):
            shuffle(self.queues)
            rand_queue = sample(self.queues, 1)[0]
            queue_file = binascii.b2a_hex(os.urandom(7))

            open(os.path.join(self.queue_root, rand_queue, queue_file), "w")

            # keep track of what we put in
            self.in_count[rand_queue][0] += 1

        check, instances = get_check("postfix", self.config)

        check.check(instances[0])
        out_count = check.get_metrics()

        # output what went in... per queue
        print
        for queue, count in self.in_count.iteritems():
            print "Test messges put into", queue, "= ", self.in_count[queue][0]

        # output postfix.py dd-agent plugin counts... per queue
        print
        for tuple in out_count:
            queue = tuple[3]["tags"][0].split(":")[1]
            self.assertEquals(int(tuple[2]), int(self.in_count[queue][0]))
            print "Test messages counted by dd-agent for", queue, "= ", tuple[2]
Exemplo n.º 26
0
    def test_checks(self):
        raise SkipTest("Skipped for now as it needs sudo")
        self.config = self.stripHeredoc("""init_config:

        instances:
            - directory: %s
              queues:
                  - bounce
                  - maildrop
                  - incoming
                  - active
                  - deferred
        """ % (self.queue_root))

        # stuff 10K msgs in random queues
        for _ in xrange(1, 10000):
            shuffle(self.queues)
            rand_queue = sample(self.queues, 1)[0]
            queue_file = binascii.b2a_hex(os.urandom(7))

            open(os.path.join(self.queue_root, rand_queue, queue_file), 'w')

            # keep track of what we put in
            self.in_count[rand_queue][0] += 1

        check, instances = get_check('postfix', self.config)

        check.check(instances[0])
        out_count = check.get_metrics()

        # output what went in... per queue
        print
        for queue, count in self.in_count.iteritems():
            print 'Test messges put into', queue, '= ', self.in_count[queue][0]

        # output postfix.py dd-agent plugin counts... per queue
        print
        for tuple in out_count:
            queue = tuple[3]['tags'][0].split(':')[1]
            self.assertEquals(int(tuple[2]), int(self.in_count[queue][0]))
            print 'Test messages counted by dd-agent for', queue, '= ', tuple[
                2]
Exemplo n.º 27
0
    def test_process_topics_data_normal(self):
        check, instances = get_check('activemq_xml', self.config)

        data = """
        <topics>
          <topic name="Topic1">
            <stats size="5"
                   consumerCount="0"
                   enqueueCount="24"
                   dequeueCount="0"/>
          </topic>
          <topic name="Topic2">
            <stats size="1"
                   consumerCount="50"
                   enqueueCount="12"
                   dequeueCount="1200"/>
          </topic>
        </topics>
        """

        check._process_data(data, "topic", [], 300, [])
        expected = {
            'topic:Topic1': {
                'activemq.topic.size': ('5', 'gauge'),
                'activemq.topic.enqueue_count': ('24', 'gauge'),
                'activemq.topic.dequeue_count': ('0', 'gauge'),
                'activemq.topic.consumer_count': ('0', 'gauge')
            },
            '': {
                'activemq.topic.count': (2, 'gauge')
            },
            'topic:Topic2': {
                'activemq.topic.dequeue_count': ('1200', 'gauge'),
                'activemq.topic.consumer_count': ('50', 'gauge'),
                'activemq.topic.size': ('1', 'gauge'),
                'activemq.topic.enqueue_count': ('12', 'gauge'),
            },
        }

        self._assert_expected_metrics(expected, check.get_metrics())
Exemplo n.º 28
0
    def test_process_topics_data_normal(self):
        check, instances = get_check('activemq_xml', self.config)

        data = """
        <topics>
          <topic name="Topic1">
            <stats size="5"
                   consumerCount="0"
                   enqueueCount="24"
                   dequeueCount="0"/>
          </topic>
          <topic name="Topic2">
            <stats size="1"
                   consumerCount="50"
                   enqueueCount="12"
                   dequeueCount="1200"/>
          </topic>
        </topics>
        """

        check._process_data(data, "topic", [], 300, [])
        expected = {
            'topic:Topic1': {
                'activemq.topic.size': ('5', 'gauge'),
                'activemq.topic.enqueue_count': ('24', 'gauge'),
                'activemq.topic.dequeue_count': ('0', 'gauge'),
                'activemq.topic.consumer_count': ('0', 'gauge')
            },
            '': {
                'activemq.topic.count': (2, 'gauge')
            },
            'topic:Topic2': {
                'activemq.topic.dequeue_count': ('1200', 'gauge'),
                'activemq.topic.consumer_count': ('50', 'gauge'),
                'activemq.topic.size': ('1', 'gauge'),
                'activemq.topic.enqueue_count': ('12', 'gauge'),
            },
        }

        self._assert_expected_metrics(expected, check.get_metrics())
Exemplo n.º 29
0
    def test_travis_supervisord(self):
        """Integration test for supervisord check. Using a supervisord on Travis."""

        # Load yaml config
        config_str = open(
            os.environ['VOLATILE_DIR'] + '/supervisor/supervisord.yaml',
            'r').read()
        self.assertTrue(config_str is not None and len(config_str) > 0,
                        msg=config_str)

        # init the check and get the instances
        check, instances = get_check('supervisord', config_str)
        self.assertTrue(check is not None, msg=check)
        self.assertEquals(len(instances), 1)

        # Supervisord should run 3 programs for 30, 60 and 90 seconds
        # respectively. The tests below will ensure that the process count
        # metric is reported correctly after (roughly) 10, 40, 70 and 100 seconds
        for i in range(4):
            try:
                # Run the check
                check.check(instances[0])
            except Exception, e:
                # Make sure that it ran successfully
                self.assertTrue(False, msg=str(e))
            else:
                up, down = 0, 0
                for name, timestamp, value, meta in check.get_metrics():
                    if name == 'supervisord.process.count':
                        if 'status:up' in meta['tags']:
                            up = value
                        elif 'status:down' in meta['tags']:
                            down = value
                self.assertEquals(up, 3 - i)
                self.assertEquals(down, i)
                sleep(10)
Exemplo n.º 30
0
    def testChecks(self):
        check, instances = get_check('cacti', CONFIG)
        rrd_dir = os.path.join(self.tmp_dir, 'rrds')

        # Restore the RRDs from the XML dumps
        if not self._copy_rrds(self.rrd_dir):
            return

        # Do a check to establish the last timestamps
        check.check(instances[0])
        check.get_metrics()

        # Bump the last timestamps back 20 minutes so we have some actual data
        twenty_min = 20 * 60
        for k,v in check.last_ts.items():
            check.last_ts[k] = v - twenty_min

        # Do a first check
        check.check(instances[0])
        results1 = check.get_metrics()

        # Check again and make sure no new metrics are picked up
        # But we will still have the payload stats
        check.check(instances[0])
        results2 = check.get_metrics()
        last_ts1 = check.last_ts[rrd_dir + '/localhost_hdd_free_10.rrd.AVERAGE']

        # Check once more to make sure last_ts ignores None vals when calculating
        # where to start from
        check.check(instances[0])
        check.get_metrics()
        last_ts2 = check.last_ts[rrd_dir + '/localhost_hdd_free_10.rrd.AVERAGE']

        self.assertEquals(last_ts1, last_ts2)

        metrics = [r[0] for r in results2]

        # make sure diagnostic metrics are included
        assert 'cacti.metrics.count' in metrics
        assert 'cacti.rrd.count' in metrics
        assert 'cacti.hosts.count' in metrics

        metrics_count = [r for r in results2 if r[0] == 'cacti.metrics.count'][0][2]
        hosts_count = [r for r in results2 if r[0] == 'cacti.hosts.count'][0][2]
        rrd_count = [r for r in results2 if r[0] == 'cacti.rrd.count'][0][2]

        assert metrics_count == 0
        assert hosts_count == 1
        assert rrd_count == 3

        load1 = [m[2] for m in results1 if m[0] == 'system.load.1' and m[2]]

        # Make sure some load metrics were returned
        assert len(load1) > 0

        # Should not have any - not included in the whitelist
        current_users = [m[2] for m in results1 if m[0] == 'system.users.current' and m[2]]
        self.assertEquals(len(current_users), 0)

        disk_used = [m for m in results1 if m[0] == 'system.disk.used' and m[2]]
        assert len(disk_used) > 0

        # Make sure no None values are picked up
        none_metrics = [m[2] for m in results1 if m[2] is None]
        self.assertEquals(len(none_metrics), 0)
Exemplo n.º 31
0
    def testChecks(self):
        check, instances = get_check('cacti', CONFIG)
        rrd_dir = os.path.join(self.tmp_dir, 'rrds')

        # Restore the RRDs from the XML dumps
        if not self._copy_rrds(self.rrd_dir):
            return

        # Do a check to establish the last timestamps
        check.check(instances[0])
        check.get_metrics()

        # Bump the last timestamps back 20 minutes so we have some actual data
        twenty_min = 20 * 60
        for k, v in check.last_ts.items():
            check.last_ts[k] = v - twenty_min

        # Do a first check
        check.check(instances[0])
        results1 = check.get_metrics()

        # Check again and make sure no new metrics are picked up
        # But we will still have the payload stats
        check.check(instances[0])
        results2 = check.get_metrics()
        last_ts1 = check.last_ts[rrd_dir +
                                 '/localhost_hdd_free_10.rrd.AVERAGE']

        # Check once more to make sure last_ts ignores None vals when calculating
        # where to start from
        check.check(instances[0])
        check.get_metrics()
        last_ts2 = check.last_ts[rrd_dir +
                                 '/localhost_hdd_free_10.rrd.AVERAGE']

        self.assertEquals(last_ts1, last_ts2)

        metrics = [r[0] for r in results2]

        # make sure diagnostic metrics are included
        assert 'cacti.metrics.count' in metrics
        assert 'cacti.rrd.count' in metrics
        assert 'cacti.hosts.count' in metrics

        metrics_count = [r for r in results2
                         if r[0] == 'cacti.metrics.count'][0][2]
        hosts_count = [r for r in results2
                       if r[0] == 'cacti.hosts.count'][0][2]
        rrd_count = [r for r in results2 if r[0] == 'cacti.rrd.count'][0][2]

        assert metrics_count == 0
        assert hosts_count == 1
        assert rrd_count == 3

        load1 = [m[2] for m in results1 if m[0] == 'system.load.1' and m[2]]

        # Make sure some load metrics were returned
        assert len(load1) > 0

        # Should not have any - not included in the whitelist
        current_users = [
            m[2] for m in results1 if m[0] == 'system.users.current' and m[2]
        ]
        self.assertEquals(len(current_users), 0)

        disk_used = [
            m for m in results1 if m[0] == 'system.disk.used' and m[2]
        ]
        assert len(disk_used) > 0

        # Make sure no None values are picked up
        none_metrics = [m[2] for m in results1 if m[2] is None]
        self.assertEquals(len(none_metrics), 0)
Exemplo n.º 32
0
 def _create_check(self):
     # Create the jenkins check
     self.check, instances = get_check('jenkins', self.config_yaml)
     self.instance = instances[0]
Exemplo n.º 33
0
 def _create_check(self):
     # Create the jenkins check
     self.check, instances = get_check('jenkins', self.config_yaml)
     self.instance = instances[0]