def test_response_time(aggregator, check):
    """
    Test the response time from a server expected to be up
    """
    instance = deepcopy(common.INSTANCE)
    instance['collect_response_time'] = True
    instance['name'] = 'instance:response_time'
    check.check(instance)

    # service check
    expected_tags = [
        'foo:bar', 'target_host:datadoghq.com', 'port:80',
        'instance:instance:response_time'
    ]
    aggregator.assert_service_check('tcp.can_connect',
                                    status=check.OK,
                                    tags=expected_tags)
    aggregator.assert_metric('network.tcp.can_connect',
                             value=1,
                             tags=expected_tags)

    # response time metric
    expected_tags = [
        'url:datadoghq.com:80', 'instance:instance:response_time', 'foo:bar'
    ]
    aggregator.assert_metric('network.tcp.response_time', tags=expected_tags)
    aggregator.assert_all_metrics_covered()
示例#2
0
def test_tags(aggregator, spin_up_powerdns):
    version = _get_pdns_version()

    pdns_check = PowerDNSRecursorCheck(CHECK_NAME, {}, {})
    tags = ['foo:bar']
    if version == 3:
        config = common.CONFIG.copy()
        config['tags'] = ['foo:bar']
        pdns_check.check(config)

        # Assert metrics v3
        for metric in metrics.GAUGE_METRICS:
            aggregator.assert_metric(metrics.METRIC_FORMAT.format(metric), tags=tags, count=1)

        for metric in metrics.RATE_METRICS:
            aggregator.assert_metric(metrics.METRIC_FORMAT.format(metric), tags=tags, count=1)

    elif version == 4:
        config = common.CONFIG_V4.copy()
        config['tags'] = ['foo:bar']
        pdns_check.check(config)

        # Assert metrics v3
        for metric in metrics.GAUGE_METRICS + metrics.GAUGE_METRICS_V4:
            aggregator.assert_metric(metrics.METRIC_FORMAT.format(metric), tags=tags, count=1)

        for metric in metrics.RATE_METRICS + metrics.RATE_METRICS_V4:
            aggregator.assert_metric(metrics.METRIC_FORMAT.format(metric), tags=tags, count=1)

    service_check_tags = common._config_sc_tags(common.CONFIG)
    aggregator.assert_service_check('powerdns.recursor.can_connect',
                                    status=PowerDNSRecursorCheck.OK,
                                    tags=service_check_tags+tags)

    aggregator.assert_all_metrics_covered()
示例#3
0
def test_check(aggregator, instance):
    """
    Testing Aqua check.
    """
    check = AquaCheck('aqua', {}, {})
    check.validate_instance = MagicMock(return_value=None)
    check.get_aqua_token = MagicMock(return_value="test")

    def mock_perform(inst, url, token):
        if url == '/api/v1/dashboard':
            with open(os.path.join(HERE, 'aqua_base_metrics.json'), 'r') as f:
                return json.load(f)
        elif url == '/api/v1/hosts':
            with open(os.path.join(HERE, 'aqua_hosts_metrics.json'), 'r') as f:
                return json.load(f)
        elif url == '/api/v1/audit/access_totals?alert=-1&limit=100&time=hour&type=all':
            with open(os.path.join(HERE, 'aqua_audit_metrics.json'), 'r') as f:
                return json.load(f)
        elif url == '/api/v1/scanqueue/summary':
            with open(os.path.join(HERE, 'aqua_scan_queues_metrics.json'), 'r') as f:
                return json.load(f)
    check._perform_query = MagicMock(side_effect=mock_perform)

    check.check(instance)
    for metric, value in EXPECTED_VALUES:
        aggregator.assert_metric(metric, value=value)

    aggregator.assert_service_check(check.SERVICE_CHECK_NAME)
    # Raises when COVERAGE=true and coverage < 100%
    aggregator.assert_all_metrics_covered()
示例#4
0
def test_check(aggregator, check):

    check.tags = []
    check.set_paths()

    with open(os.path.join(FIXTURE_DIR, "entropy_avail")) as f:
        m = mock_open(read_data=f.read())
        with patch('datadog_checks.linux_proc_extras.linux_proc_extras.open', m):
            check.get_entropy_info()

    with open(os.path.join(FIXTURE_DIR, "inode-nr")) as f:
        m = mock_open(read_data=f.read())
        with patch('datadog_checks.linux_proc_extras.linux_proc_extras.open', m):
            check.get_inode_info()

    with open(os.path.join(FIXTURE_DIR, "proc-stat")) as f:
        m = mock_open(read_data=f.read())
        with patch('datadog_checks.linux_proc_extras.linux_proc_extras.open', m):
            check.get_stat_info()

    with open(os.path.join(FIXTURE_DIR, "process_stats")) as f:
        with patch(
            'datadog_checks.linux_proc_extras.linux_proc_extras.get_subprocess_output',
            return_value=(f.read(), "", 0)
        ):
            check.get_process_states()

    # Assert metrics
    for metric in PROC_COUNTS + INODE_GAUGES + ENTROPY_GAUGES + PROCESS_STATS_GAUGES:
        aggregator.assert_metric(metric)

    aggregator.assert_all_metrics_covered()
示例#5
0
def test_istio(aggregator, mesh_mixture_fixture):
    """
    Test the full check
    """
    check = Istio('istio', {}, {}, [MOCK_INSTANCE])
    check.check(MOCK_INSTANCE)

    for metric in MESH_METRICS + MIXER_METRICS:
        aggregator.assert_metric(metric)

    aggregator.assert_all_metrics_covered()
    def test_check(self, aggregator, mock_get):
        """
        Testing kube_dns check.
        """

        check = KubeDNSCheck('kube_dns', {}, {}, [instance])
        check.check(instance)

        # check that we then get the count metrics also
        check.check(instance)
        for metric in self.METRICS + self.COUNT_METRICS:
            aggregator.assert_metric(metric)

        aggregator.assert_all_metrics_covered()
示例#7
0
def test_lighttpd(aggregator, instance, lighttpd):
    """
    """
    tags = ['host:{}'.format(HOST), 'port:9449', 'instance:first']
    check = Lighttpd("lighttpd", {}, {})
    check.check(instance)

    aggregator.assert_service_check(check.SERVICE_CHECK_NAME,
                                    status=Lighttpd.OK,
                                    tags=tags)

    for gauge in CHECK_GAUGES:
        aggregator.assert_metric(gauge, tags=['instance:first'], count=1)
    aggregator.assert_all_metrics_covered()
def test_check(aggregator, instance):
    """
    Testing Sortdb check.
    """
    check = SortdbCheck(CHECK_NAME, {}, {})
    with open(os.path.join(HERE, 'sortdb_metrics.json'), 'r') as f:
        check._get_response_from_url = MagicMock(return_value=json.load(f))

    check.check(instance)
    for metric, value in EXPECTED_VALUES:
        aggregator.assert_metric(metric, value=value)

    aggregator.assert_service_check(check.SORTDB_SERVICE_CHECK)
    # Raises when COVERAGE=true and coverage < 100%
    aggregator.assert_all_metrics_covered()
示例#9
0
def test_check_userspace(aggregator, mock_userspace):
    """
    Testing Kube_proxy in userspace mode.
    """
    c = KubeProxyCheck(CHECK_NAME, None, {}, [instance])
    c.check(instance)
    aggregator.assert_metric(NAMESPACE + '.cpu.time')
    aggregator.assert_metric(NAMESPACE + '.mem.resident')
    aggregator.assert_metric(NAMESPACE + '.mem.virtual')
    aggregator.assert_metric(
        NAMESPACE + '.client.http.requests',
        tags=['method:GET', 'host:127.0.0.1:8080', 'code:200'])
    aggregator.assert_metric(
        NAMESPACE + '.client.http.requests',
        tags=['method:POST', 'host:127.0.0.1:8080', 'code:201'])
    aggregator.assert_metric(
        NAMESPACE + '.client.http.requests',
        tags=['method:GET', 'host:127.0.0.1:8080', 'code:200'])
    aggregator.assert_metric(
        NAMESPACE + '.client.http.requests',
        tags=['method:POST', 'host:127.0.0.1:8080', 'code:201'])
    aggregator.assert_all_metrics_covered()
示例#10
0
def test_check_iptables(aggregator, mock_iptables):
    """
    Testing Kube_proxy in iptables mode.
    """

    c = KubeProxyCheck(CHECK_NAME, None, {}, [instance])
    c.check(instance)
    aggregator.assert_metric(NAMESPACE + '.cpu.time')
    aggregator.assert_metric(NAMESPACE + '.mem.resident')
    aggregator.assert_metric(NAMESPACE + '.mem.virtual')
    aggregator.assert_metric(
        NAMESPACE + '.client.http.requests',
        tags=['method:GET', 'code:200', 'host:127.0.0.1:8080'])
    aggregator.assert_metric(
        NAMESPACE + '.client.http.requests',
        tags=['method:POST', 'code:201', 'host:127.0.0.1:8080'])
    aggregator.assert_metric(
        NAMESPACE + '.client.http.requests',
        tags=['method:GET', 'code:404', 'host:127.0.0.1:8080'])
    aggregator.assert_metric(NAMESPACE + '.sync_rules.latency.count')
    aggregator.assert_metric(NAMESPACE + '.sync_rules.latency.sum')
    aggregator.assert_all_metrics_covered()
示例#11
0
    def test_check(self, aggregator, mock_get):
        """
        Testing kube_dns check.
        """

        check = KubeDNSCheck('kube_dns', {}, {}, [instance])
        check.check(instance)

        # check that we then get the count metrics also
        check.check(instance)
        for metric in self.METRICS + self.COUNT_METRICS:
            aggregator.assert_metric(metric)
            aggregator.assert_metric_has_tag(metric, customtag)

        aggregator.assert_all_metrics_covered()

        # Make sure instance tags are not modified, see #3066
        aggregator.reset()
        check.check(instance)
        name = self.NAMESPACE + ".request_duration.seconds.sum"
        aggregator.assert_metric(name)
        aggregator.assert_metric(name, tags=['custom:tag', 'system:reverse'])
示例#12
0
def test_check(aggregator, check):
    mock_conn = mock.MagicMock()
    mock_cursor = mock.MagicMock()

    mock_cursor.fetchall.return_value = MOCK_RRD_META
    mock_conn.cursor.return_value = mock_cursor

    mocks = [
        mock.patch('datadog_checks.cacti.cacti.rrdtool'),
        mock.patch('datadog_checks.cacti.cacti.pymysql.connect',
                   return_value=mock_conn),
        mock.patch('datadog_checks.cacti.Cacti._get_rrd_info',
                   return_value=MOCK_INFO),
        mock.patch('datadog_checks.cacti.Cacti._get_rrd_fetch',
                   return_value=MOCK_FETCH),
    ]

    for mock_func in mocks:
        mock_func.start()

    # Run the check twice to set the timestamps and capture metrics on the second run
    check.check(CACTI_CONFIG)
    check.check(CACTI_CONFIG)

    for mock_func in mocks:
        mock_func.stop()

    # We are mocking the MySQL call so we won't have cacti.rrd.count or cacti.hosts.count metrics,
    # check for metrics that are returned from our mock data.
    aggregator.assert_metric('cacti.metrics.count', value=10, tags=CUSTOM_TAGS)
    aggregator.assert_metric('system.mem.buffered.max',
                             value=2,
                             tags=CUSTOM_TAGS)
    aggregator.assert_metric('system.mem.buffered', value=2, tags=CUSTOM_TAGS)
    aggregator.assert_metric('cacti.rrd.count', value=5, tags=CUSTOM_TAGS)
    aggregator.assert_metric('cacti.hosts.count', value=1, tags=CUSTOM_TAGS)
    aggregator.assert_all_metrics_covered()
示例#13
0
def test_check(mock_get_usage, mock_device_list, aggregator):
    """
    Testing Btrfs check.
    """
    with mock.patch.object(btrfs_check,
                           'get_unallocated_space',
                           return_value=None):
        btrfs_check.check({})

    aggregator.assert_metric('system.disk.btrfs.unallocated', count=0)

    aggregator.reset()
    with mock.patch.object(btrfs_check,
                           'get_unallocated_space',
                           return_value=0):
        btrfs_check.check({})

    aggregator.assert_metric('system.disk.btrfs.total', count=4)
    aggregator.assert_metric('system.disk.btrfs.used', count=4)
    aggregator.assert_metric('system.disk.btrfs.free', count=4)
    aggregator.assert_metric('system.disk.btrfs.usage', count=4)
    aggregator.assert_metric('system.disk.btrfs.unallocated', count=1)

    aggregator.assert_all_metrics_covered()