コード例 #1
0
def test_check_np_ok(aggregator):
    reboot_required.check(CONFIG_STATUS_NP_OK)
    aggregator.assert_service_check('system.reboot_required',
                                    status=reboot_required.OK)
    assert (not isfile(
        join(gettempdir(),
             'reboot-required.created_at.should_not_be_present')))
コード例 #2
0
def test_check_missing_pid(aggregator):
    instance = {'name': 'foo', 'pid_file': '/foo/bar/baz'}
    process = ProcessCheck(common.CHECK_NAME, {}, {})
    process.check(instance)
    aggregator.assert_service_check('process.up',
                                    count=1,
                                    status=process.CRITICAL)
コード例 #3
0
def test_tags(aggregator, spin_up_powerdns):
    version = _get_pdns_version()

    pdns_check = PowerDNSRecursorCheck(CHECK_NAME, {}, {})
    tags = ['foo:bar']
    if version == 3:
        config = common.CONFIG.copy()
        config['tags'] = ['foo:bar']
        pdns_check.check(config)

        # Assert metrics v3
        for metric in metrics.GAUGE_METRICS:
            aggregator.assert_metric(metrics.METRIC_FORMAT.format(metric), tags=tags, count=1)

        for metric in metrics.RATE_METRICS:
            aggregator.assert_metric(metrics.METRIC_FORMAT.format(metric), tags=tags, count=1)

    elif version == 4:
        config = common.CONFIG_V4.copy()
        config['tags'] = ['foo:bar']
        pdns_check.check(config)

        # Assert metrics v3
        for metric in metrics.GAUGE_METRICS + metrics.GAUGE_METRICS_V4:
            aggregator.assert_metric(metrics.METRIC_FORMAT.format(metric), tags=tags, count=1)

        for metric in metrics.RATE_METRICS + metrics.RATE_METRICS_V4:
            aggregator.assert_metric(metrics.METRIC_FORMAT.format(metric), tags=tags, count=1)

    service_check_tags = common._config_sc_tags(common.CONFIG)
    aggregator.assert_service_check('powerdns.recursor.can_connect',
                                    status=PowerDNSRecursorCheck.OK,
                                    tags=service_check_tags+tags)

    aggregator.assert_all_metrics_covered()
コード例 #4
0
def test_check(aggregator, instance):
    """
    Testing Aqua check.
    """
    check = AquaCheck('aqua', {}, {})
    check.validate_instance = MagicMock(return_value=None)
    check.get_aqua_token = MagicMock(return_value="test")

    def mock_perform(inst, url, token):
        if url == '/api/v1/dashboard':
            with open(os.path.join(HERE, 'aqua_base_metrics.json'), 'r') as f:
                return json.load(f)
        elif url == '/api/v1/hosts':
            with open(os.path.join(HERE, 'aqua_hosts_metrics.json'), 'r') as f:
                return json.load(f)
        elif url == '/api/v1/audit/access_totals?alert=-1&limit=100&time=hour&type=all':
            with open(os.path.join(HERE, 'aqua_audit_metrics.json'), 'r') as f:
                return json.load(f)
        elif url == '/api/v1/scanqueue/summary':
            with open(os.path.join(HERE, 'aqua_scan_queues_metrics.json'), 'r') as f:
                return json.load(f)
    check._perform_query = MagicMock(side_effect=mock_perform)

    check.check(instance)
    for metric, value in EXPECTED_VALUES:
        aggregator.assert_metric(metric, value=value)

    aggregator.assert_service_check(check.SERVICE_CHECK_NAME)
    # Raises when COVERAGE=true and coverage < 100%
    aggregator.assert_all_metrics_covered()
コード例 #5
0
def test_service_check_ko(aggregator, instance):
    check = disable_thread_pool(VSphereCheck('disk', {}, {}, [instance]))

    with mock.patch('datadog_checks.vsphere.vsphere.connect.SmartConnect'
                    ) as SmartConnect:
        # SmartConnect fails
        SmartConnect.side_effect = Exception()

        with pytest.raises(ConnectionError):
            check.check(instance)

        aggregator.assert_service_check(VSphereCheck.SERVICE_CHECK_NAME,
                                        status=VSphereCheck.CRITICAL,
                                        count=1,
                                        tags=SERVICE_CHECK_TAGS)

        aggregator.reset()

        # SmartConnect succeeds, CurrentTime fails
        server = MagicMock()
        server.CurrentTime.side_effect = Exception()
        SmartConnect.side_effect = None
        SmartConnect.return_value = server

        with pytest.raises(ConnectionError):
            check.check(instance)

        aggregator.assert_service_check(VSphereCheck.SERVICE_CHECK_NAME,
                                        status=VSphereCheck.CRITICAL,
                                        count=1,
                                        tags=SERVICE_CHECK_TAGS)
コード例 #6
0
ファイル: test_kong.py プロジェクト: vsd6k/integrations-core
def test_connection_failure(aggregator, check):
    with pytest.raises(Exception):
        check.check(BAD_CONFIG)
    aggregator.assert_service_check('kong.can_connect', status=Kong.CRITICAL,
                                    tags=['kong_host:localhost', 'kong_port:1111'], count=1)

    aggregator.all_metrics_asserted()
コード例 #7
0
def test_response_time(aggregator, check):
    """
    Test the response time from a server expected to be up
    """
    instance = deepcopy(common.INSTANCE)
    instance['collect_response_time'] = True
    instance['name'] = 'instance:response_time'
    check.check(instance)

    # service check
    expected_tags = [
        'foo:bar', 'target_host:datadoghq.com', 'port:80',
        'instance:instance:response_time'
    ]
    aggregator.assert_service_check('tcp.can_connect',
                                    status=check.OK,
                                    tags=expected_tags)
    aggregator.assert_metric('network.tcp.can_connect',
                             value=1,
                             tags=expected_tags)

    # response time metric
    expected_tags = [
        'url:datadoghq.com:80', 'instance:instance:response_time', 'foo:bar'
    ]
    aggregator.assert_metric('network.tcp.response_time', tags=expected_tags)
    aggregator.assert_all_metrics_covered()
コード例 #8
0
ファイル: test_ceph.py プロジェクト: zaquaz/integrations-core
def test_warn_health(_, aggregator):
    ceph_check = Ceph(CHECK_NAME, {}, {})
    ceph_check.check(copy.deepcopy(BASIC_CONFIG))

    for metric in EXPECTED_METRICS:
        aggregator.assert_metric(metric, count=1, tags=EXPECTED_TAGS)

    aggregator.assert_service_check('ceph.overall_status', status=Ceph.WARNING, tags=EXPECTED_SERVICE_TAGS)
コード例 #9
0
def test_psutil_rw(aggregator, psutil_mocks):
    """
    Check for 'ro' option in the mounts
    """
    instances = [{'service_check_rw': 'yes'}]
    c = Disk('disk', None, {}, instances)
    c.check(instances[0])

    aggregator.assert_service_check('disk.read_write', status=Disk.CRITICAL)
コード例 #10
0
def test_bad_api_key(aggregator, spin_up_powerdns):
    pdns_check = PowerDNSRecursorCheck(CHECK_NAME, {}, {})
    with pytest.raises(Exception):
        pdns_check.check(common.BAD_API_KEY_CONFIG)

    service_check_tags = common._config_sc_tags(common.BAD_API_KEY_CONFIG)
    aggregator.assert_service_check('powerdns.recursor.can_connect',
                                    status=PowerDNSRecursorCheck.CRITICAL,
                                    tags=service_check_tags)
    assert len(aggregator._metrics) == 0
コード例 #11
0
def test_service_check_ko(instance, aggregator):
    """
    """
    instance['lighttpd_status_url'] = 'http://localhost:1337'
    tags = ['host:localhost', 'port:1337', 'instance:first']
    check = Lighttpd("lighttpd", {}, {})
    with pytest.raises(Exception):
        check.check(instance)
    aggregator.assert_service_check(check.SERVICE_CHECK_NAME,
                                    status=Lighttpd.CRITICAL,
                                    tags=tags)
コード例 #12
0
def test_up(aggregator, check, instance):
    """
    Service expected to be up
    """
    check.check(instance)
    expected_tags = [
        "instance:UpService", "target_host:datadoghq.com", "port:80", "foo:bar"
    ]
    aggregator.assert_service_check('tcp.can_connect',
                                    status=check.OK,
                                    tags=expected_tags)
コード例 #13
0
def test_service_check_ok(aggregator, instance):
    check = disable_thread_pool(VSphereCheck('disk', {}, {}, [instance]))
    with mock.patch('datadog_checks.vsphere.vsphere.vmodl'):
        with mock.patch('datadog_checks.vsphere.vsphere.connect.SmartConnect'
                        ) as SmartConnect:
            SmartConnect.return_value = get_mocked_server()
            check.check(instance)

            aggregator.assert_service_check(VSphereCheck.SERVICE_CHECK_NAME,
                                            status=VSphereCheck.OK,
                                            tags=SERVICE_CHECK_TAGS)
コード例 #14
0
def test_down(aggregator, check, instance_ko):
    """
    Service expected to be down
    """
    check.check(instance_ko)
    expected_tags = [
        "instance:DownService", "target_host:127.0.0.1", "port:65530",
        "foo:bar"
    ]
    aggregator.assert_service_check('tcp.can_connect',
                                    status=check.CRITICAL,
                                    tags=expected_tags)
コード例 #15
0
def test_lighttpd(aggregator, instance, lighttpd):
    """
    """
    tags = ['host:{}'.format(HOST), 'port:9449', 'instance:first']
    check = Lighttpd("lighttpd", {}, {})
    check.check(instance)

    aggregator.assert_service_check(check.SERVICE_CHECK_NAME,
                                    status=Lighttpd.OK,
                                    tags=tags)

    for gauge in CHECK_GAUGES:
        aggregator.assert_metric(gauge, tags=['instance:first'], count=1)
    aggregator.assert_all_metrics_covered()
コード例 #16
0
def test_up(aggregator, check):
    """
    Service expected to be up
    """
    check.check(deepcopy(common.INSTANCE))
    expected_tags = [
        "instance:UpService", "target_host:datadoghq.com", "port:80", "foo:bar"
    ]
    aggregator.assert_service_check('tcp.can_connect',
                                    status=check.OK,
                                    tags=expected_tags)
    aggregator.assert_metric('network.tcp.can_connect',
                             value=1,
                             tags=expected_tags)
コード例 #17
0
def test_check(aggregator, instance):
    """
    Testing Sortdb check.
    """
    check = SortdbCheck(CHECK_NAME, {}, {})
    with open(os.path.join(HERE, 'sortdb_metrics.json'), 'r') as f:
        check._get_response_from_url = MagicMock(return_value=json.load(f))

    check.check(instance)
    for metric, value in EXPECTED_VALUES:
        aggregator.assert_metric(metric, value=value)

    aggregator.assert_service_check(check.SORTDB_SERVICE_CHECK)
    # Raises when COVERAGE=true and coverage < 100%
    aggregator.assert_all_metrics_covered()
コード例 #18
0
def test_down(aggregator, check):
    """
    Service expected to be down
    """
    check.check(deepcopy(common.INSTANCE_KO))
    expected_tags = [
        "instance:DownService", "target_host:127.0.0.1", "port:65530",
        "foo:bar"
    ]
    aggregator.assert_service_check('tcp.can_connect',
                                    status=check.CRITICAL,
                                    tags=expected_tags)
    aggregator.assert_metric('network.tcp.can_connect',
                             value=0,
                             tags=expected_tags)
コード例 #19
0
ファイル: test_kong.py プロジェクト: vsd6k/integrations-core
def test_check(aggregator, check):
    for stub in CONFIG_STUBS:
        check.check(stub)
        expected_tags = stub['tags']

        for mname in GAUGES:
            aggregator.assert_metric(mname, tags=expected_tags, count=1)

        aggregator.assert_metric('kong.table.count', len(DATABASES), tags=expected_tags, count=1)

        for name in DATABASES:
            tags = expected_tags + ['table:{}'.format(name)]
            aggregator.assert_metric('kong.table.items', tags=tags, count=1)

        aggregator.assert_service_check('kong.can_connect', status=Kong.OK,
                                        tags=['kong_host:localhost', 'kong_port:8001'] + expected_tags, count=1)

        aggregator.all_metrics_asserted()
コード例 #20
0
def test_check_real_process_regex(aggregator):
    "Check to specifically find this python pytest running process using regex."
    from datadog_checks.utils.platform import Platform

    instance = {
        'name': 'py',
        'search_string': ['.*python.*pytest'],
        'exact_match': False,
        'ignored_denied_access': True,
        'thresholds': {
            'warning': [1, 10],
            'critical': [1, 100]
        },
    }
    process = ProcessCheck(common.CHECK_NAME, {}, {})
    expected_tags = generate_expected_tags(instance)
    process.check(instance)
    for mname in common.PROCESS_METRIC:
        # cases where we don't actually expect some metrics here:
        #  - if io_counters() is not available
        #  - if memory_info_ex() is not available
        #  - first run so no `cpu.pct`
        if (not _PSUTIL_IO_COUNTERS and '.io' in mname) or (not _PSUTIL_MEM_SHARED and 'mem.real' in mname) \
                or mname == 'system.processes.cpu.pct':
            continue

        if Platform.is_windows():
            metric = common.UNIX_TO_WINDOWS_MAP.get(mname, mname)
        else:
            metric = mname
        aggregator.assert_metric(metric, at_least=1, tags=expected_tags)

    aggregator.assert_service_check('process.up',
                                    count=1,
                                    tags=expected_tags + ['process:py'])

    # this requires another run
    process.check(instance)
    aggregator.assert_metric('system.processes.cpu.pct',
                             count=1,
                             tags=expected_tags)
    aggregator.assert_metric('system.processes.cpu.normalized_pct',
                             count=1,
                             tags=expected_tags)
コード例 #21
0
def test_health_event(aggregator, spin_up_elastic):
    dummy_tags = ['elastique:recherche']
    config = {
        'url': URL,
        'username': USER,
        'password': PASSWORD,
        'tags': dummy_tags
    }

    elastic_check = ESCheck(CHECK_NAME, {}, {})
    # Should be yellow at first
    requests.put(URL + '/_settings',
                 data='{"index": {"number_of_replicas": 100}')
    elastic_check.check(config)
    if get_es_version() < [2, 0, 0]:
        assert len(aggregator.events) == 1
        assert sorted(aggregator.events[0]['tags']) == sorted(
            set(['url:' + URL] + dummy_tags + CLUSTER_TAG))
    else:
        aggregator.assert_service_check('elasticsearch.cluster_health')
コード例 #22
0
    def test_valid_sc(self, aggregator):
        check = AgentCheck()

        check.service_check("testservicecheck",
                            AgentCheck.OK,
                            tags=None,
                            message="")
        aggregator.assert_service_check("testservicecheck",
                                        status=AgentCheck.OK)

        check.service_check("testservicecheckwithhostname",
                            AgentCheck.OK,
                            tags=["foo", "bar"],
                            hostname="testhostname",
                            message="a message")
        aggregator.assert_service_check("testservicecheckwithhostname",
                                        status=AgentCheck.OK,
                                        tags=["foo", "bar"],
                                        hostname="testhostname",
                                        message="a message")

        check.service_check("testservicecheckwithnonemessage",
                            AgentCheck.OK,
                            message=None)
        aggregator.assert_service_check(
            "testservicecheckwithnonemessage",
            status=AgentCheck.OK,
        )
コード例 #23
0
def test_ensure_auth_scope(aggregator):
    instance = common.MOCK_CONFIG["instances"][0]
    instance['tags'] = ['optional:tag1']

    with pytest.raises(KeyError):
        openstack_check.get_scope_for_instance(instance)

    with mock.patch(
        'datadog_checks.openstack_controller.openstack_controller.OpenStackProjectScope.request_auth_token',
        return_value=MOCK_HTTP_RESPONSE
    ):
        scope = openstack_check.ensure_auth_scope(instance)

        assert openstack_check.get_scope_for_instance(instance) == scope
        openstack_check._send_api_service_checks(scope, ['optional:tag1'])
        aggregator.assert_service_check(
            OpenStackControllerCheck.IDENTITY_API_SC, status=AgentCheck.OK, tags=[
                'optional:tag1', 'keystone_server:http://10.0.2.15:5000'])

        # URLs are nonexistant, so return CRITICAL
        aggregator.assert_service_check(OpenStackControllerCheck.COMPUTE_API_SC, status=AgentCheck.CRITICAL)
        aggregator.assert_service_check(OpenStackControllerCheck.NETWORK_API_SC, status=AgentCheck.CRITICAL)

        openstack_check._current_scope = scope

    openstack_check.delete_current_scope()

    with pytest.raises(KeyError):
        openstack_check.get_scope_for_instance(instance)
コード例 #24
0
ファイル: test_ceph.py プロジェクト: zaquaz/integrations-core
def test_luminous_warn_health(_, aggregator):
    ceph_check = Ceph(CHECK_NAME, {}, {})
    config = copy.deepcopy(BASIC_CONFIG)
    config["collect_service_check_for"] = ['OSD_NEARFULL', 'OSD_FULL']
    ceph_check.check(config)

    aggregator.assert_service_check('ceph.overall_status', status=Ceph.CRITICAL, tags=EXPECTED_SERVICE_TAGS)
    aggregator.assert_service_check('ceph.osd_nearfull', status=Ceph.WARNING, tags=EXPECTED_SERVICE_TAGS)
    aggregator.assert_service_check('ceph.osd_full', status=Ceph.CRITICAL, tags=EXPECTED_SERVICE_TAGS)
コード例 #25
0
ファイル: test_ceph.py プロジェクト: zaquaz/integrations-core
def test_luminous_ok_health(_, aggregator):

    ceph_check = Ceph(CHECK_NAME, {}, {})
    config = copy.deepcopy(BASIC_CONFIG)
    config["collect_service_check_for"] = ['OSD_NEARFULL']
    ceph_check.check(config)

    aggregator.assert_service_check('ceph.overall_status', status=Ceph.OK)
    aggregator.assert_service_check('ceph.osd_nearfull', status=Ceph.OK)
    aggregator.assert_service_check('ceph.pool_app_not_enabled', count=0)
コード例 #26
0
def test_check(aggregator, spin_up_powerdns):
    service_check_tags = common._config_sc_tags(common.CONFIG)

    # get version and test v3 first.
    version = _get_pdns_version()
    pdns_check = PowerDNSRecursorCheck(CHECK_NAME, {}, {})
    if version == 3:
        pdns_check.check(common.CONFIG)

        # Assert metrics
        for metric in metrics.GAUGE_METRICS:
            aggregator.assert_metric(metrics.METRIC_FORMAT.format(metric),
                                     tags=[],
                                     count=1)

        for metric in metrics.RATE_METRICS:
            aggregator.assert_metric(metrics.METRIC_FORMAT.format(metric),
                                     tags=[],
                                     count=1)

        aggregator.assert_service_check('powerdns.recursor.can_connect',
                                        status=PowerDNSRecursorCheck.OK,
                                        tags=service_check_tags)
        assert aggregator.metrics_asserted_pct == 100.0

    elif version == 4:
        pdns_check.check(common.CONFIG_V4)

        # Assert metrics
        for metric in metrics.GAUGE_METRICS + metrics.GAUGE_METRICS_V4:
            aggregator.assert_metric(metrics.METRIC_FORMAT.format(metric),
                                     tags=[],
                                     count=1)

        for metric in metrics.RATE_METRICS + metrics.RATE_METRICS_V4:
            aggregator.assert_metric(metrics.METRIC_FORMAT.format(metric),
                                     tags=[],
                                     count=1)

        aggregator.assert_service_check('powerdns.recursor.can_connect',
                                        status=PowerDNSRecursorCheck.OK,
                                        tags=service_check_tags)
        assert aggregator.metrics_asserted_pct == 100.0
    else:
        print("powerdns_recursor unknown version.")
        aggregator.assert_service_check('powerdns.recursor.can_connect',
                                        status=PowerDNSRecursorCheck.CRITICAL,
                                        tags=service_check_tags)
コード例 #27
0
def test_check_warning(aggregator):
    reboot_required.check(CONFIG_STATUS_WARNING)
    aggregator.assert_service_check('system.reboot_required',
                                    status=reboot_required.WARNING)
コード例 #28
0
def test_check_ok(aggregator):
    reboot_required.check(CONFIG_STATUS_OK)
    aggregator.assert_service_check('system.reboot_required',
                                    status=reboot_required.OK)
    assert (isfile(
        join(gettempdir(), 'reboot-required.created_at.freshly_minted')))
コード例 #29
0
def test_check_critical(aggregator):
    reboot_required.check(CONFIG_STATUS_CRITICAL)
    aggregator.assert_service_check('system.reboot_required',
                                    status=reboot_required.CRITICAL)
コード例 #30
0
def test_relocated_procfs(aggregator):
    from datadog_checks.utils.platform import Platform
    import tempfile
    import shutil
    import uuid

    already_linux = Platform.is_linux()
    unique_process_name = str(uuid.uuid4())
    my_procfs = tempfile.mkdtemp()

    def _fake_procfs(arg, root=my_procfs):
        for key, val in arg.iteritems():
            path = os.path.join(root, key)
            if isinstance(val, dict):
                os.mkdir(path)
                _fake_procfs(val, path)
            else:
                with open(path, "w") as f:
                    f.write(str(val))

    _fake_procfs({
        '1': {
            'status': ("Name:\t{}\nThreads:\t1\n").format(unique_process_name),
            'stat':
            ('1 ({}) S 0 1 1 ' + ' 0' * 46).format(unique_process_name),
            'cmdline': unique_process_name,
        },
        'stat': ("cpu  13034 0 18596 380856797 2013 2 2962 0 0 0\n"
                 "btime 1448632481\n"),
    })

    config = {
        'init_config': {
            'procfs_path': my_procfs
        },
        'instances': [{
            'name': 'moved_procfs',
            'search_string': [unique_process_name],
            'exact_match': False,
            'ignored_denied_access': True,
            'thresholds': {
                'warning': [1, 10],
                'critical': [1, 100]
            },
        }]
    }
    version = int(psutil.__version__.replace(".", ""))
    process = ProcessCheck(common.CHECK_NAME, config['init_config'], {},
                           config['instances'])

    try:

        def import_mock(name,
                        i_globals={},
                        i_locals={},
                        fromlist=[],
                        level=-1,
                        orig_import=__import__):
            # _psutil_linux and _psutil_posix are the
            #  C bindings; use a mock for those
            if name in ('_psutil_linux', '_psutil_posix') or level >= 1 and\
               ('_psutil_linux' in fromlist or '_psutil_posix' in fromlist):
                m = MagicMock()
                # the import system will ask us for our own name
                m._psutil_linux = m
                m._psutil_posix = m
                # there's a version safety check in psutil/__init__.py;
                # this skips it
                m.version = version
                return m
            return orig_import(name, i_globals, i_locals, fromlist, level)

        # contextlib.nested is deprecated in favor of with MGR1, MGR2, ... etc
        # but we have too many mocks to fit on one line and apparently \ line
        # continuation is not flake8 compliant, even when semantically
        # required (as here). Patch is unlikely to throw errors that are
        # suppressed, so the main downside of contextlib is avoided.
        with contextlib.nested(
                patch('sys.platform', 'linux'),
                patch('socket.AF_PACKET', create=True),
                patch('__builtin__.__import__', side_effect=import_mock)):
            if not already_linux:
                # Reloading psutil fails on linux, but we only
                # need to do so if we didn't start out on a linux platform
                reload(psutil)
            assert Platform.is_linux()
            process.check(config["instances"][0])
    finally:
        shutil.rmtree(my_procfs)
        if not already_linux:
            # restore the original psutil that doesn't have our mocks
            reload(psutil)
        else:
            psutil.PROCFS_PATH = '/proc'

    expected_tags = generate_expected_tags(config['instances'][0])
    expected_tags += ['process:moved_procfs']
    aggregator.assert_service_check('process.up', count=1, tags=expected_tags)