Ejemplo n.º 1
0
def test_check_collect_children(mock_process, aggregator):
    instance = {'name': 'foo', 'pid': 1, 'collect_children': True}
    process = ProcessCheck(common.CHECK_NAME, {}, {})
    process.check(instance)
    aggregator.assert_metric('system.processes.number',
                             value=1,
                             tags=generate_expected_tags(instance))
def test_prometheus_default_instance(aggregator, poll_mock):
    """
    Testing prometheus with default instance
    """

    c = PrometheusCheck(CHECK_NAME,
                        None, {}, [],
                        default_instances={
                            'prometheus': {
                                'prometheus_url':
                                'http://localhost:10249/metrics',
                                'namespace':
                                'prometheus',
                                'metrics': [{
                                    'metric1': 'renamed.metric1'
                                }, 'metric2']
                            }
                        },
                        default_namespace='prometheus')
    c.check({
        'prometheus_url': 'http://custom:1337/metrics',
    })
    aggregator.assert_metric(
        CHECK_NAME + '.renamed.metric1',
        tags=['node:host1', 'flavor:test', 'matched_label:foobar'],
        metric_type=aggregator.GAUGE)
    aggregator.assert_metric(
        CHECK_NAME + '.metric2',
        tags=['timestamp:123', 'node:host2', 'matched_label:foobar'],
        metric_type=aggregator.GAUGE)
    assert aggregator.metrics_asserted_pct == 100.0
Ejemplo n.º 3
0
def test_check(aggregator, check):

    check.tags = []
    check.set_paths()

    with open(os.path.join(FIXTURE_DIR, "entropy_avail")) as f:
        m = mock_open(read_data=f.read())
        with patch('datadog_checks.linux_proc_extras.linux_proc_extras.open', m):
            check.get_entropy_info()

    with open(os.path.join(FIXTURE_DIR, "inode-nr")) as f:
        m = mock_open(read_data=f.read())
        with patch('datadog_checks.linux_proc_extras.linux_proc_extras.open', m):
            check.get_inode_info()

    with open(os.path.join(FIXTURE_DIR, "proc-stat")) as f:
        m = mock_open(read_data=f.read())
        with patch('datadog_checks.linux_proc_extras.linux_proc_extras.open', m):
            check.get_stat_info()

    with open(os.path.join(FIXTURE_DIR, "process_stats")) as f:
        with patch(
            'datadog_checks.linux_proc_extras.linux_proc_extras.get_subprocess_output',
            return_value=(f.read(), "", 0)
        ):
            check.get_process_states()

    # Assert metrics
    for metric in PROC_COUNTS + INODE_GAUGES + ENTROPY_GAUGES + PROCESS_STATS_GAUGES:
        aggregator.assert_metric(metric)

    aggregator.assert_all_metrics_covered()
Ejemplo n.º 4
0
def test_pshard_metrics(aggregator, spin_up_elastic):
    """ Tests that the pshard related metrics are forwarded and that the
        document count for primary indexes is twice smaller as the global
        document count when "number_of_replicas" is set to 1 """
    elastic_latency = 10
    config = {
        'url': URL,
        'pshard_stats': True,
        'username': USER,
        'password': PASSWORD
    }

    requests.put(URL + '/_settings',
                 data='{"index": {"number_of_replicas": 1}}')
    requests.put(URL + '/testindex/testtype/2',
                 data='{"name": "Jane Doe", "age": 27}')
    requests.put(URL + '/testindex/testtype/1',
                 data='{"name": "John Doe", "age": 42}')

    time.sleep(elastic_latency)
    elastic_check = ESCheck(CHECK_NAME, {}, {})
    elastic_check.check(config)

    pshard_stats_metrics = dict(ESCheck.PRIMARY_SHARD_METRICS)
    if get_es_version() >= [1, 0, 0]:
        pshard_stats_metrics.update(ESCheck.PRIMARY_SHARD_METRICS_POST_1_0)

    for m_name, desc in pshard_stats_metrics.iteritems():
        if desc[0] == "gauge":
            aggregator.assert_metric(m_name, count=1, tags=[])

    # Our pshard metrics are getting sent, let's check that they're accurate
    # Note: please make sure you don't install Maven on the CI for future
    # elastic search CI integrations. It would make the line below fail :/
    aggregator.assert_metric('elasticsearch.primaries.docs.count')
Ejemplo n.º 5
0
def test_pod_expiration(monkeypatch, aggregator, tagger):
    check = KubeletCheck('kubelet', None, {}, [{}])
    check.pod_list_url = "dummyurl"

    # Fixtures contains four pods:
    #   - dd-agent-ntepl old but running
    #   - hello1-1550504220-ljnzx succeeded and old enough to expire
    #   - hello5-1550509440-rlgvf succeeded but not old enough
    #   - hello8-1550505780-kdnjx has one old container and a recent container, don't expire
    monkeypatch.setattr(check, 'perform_kubelet_query',
                        mock.Mock(return_value=MockStreamResponse('pods_expired.json')))
    monkeypatch.setattr(check, '_compute_pod_expiration_datetime', mock.Mock(
        return_value=parse_rfc3339("2019-02-18T16:00:06Z")
        ))

    attrs = {'is_excluded.return_value': False}
    check.pod_list_utils = mock.Mock(**attrs)

    pod_list = check.retrieve_pod_list()
    assert pod_list['expired_count'] == 1

    expected_names = ['dd-agent-ntepl', 'hello5-1550509440-rlgvf', 'hello8-1550505780-kdnjx']
    collected_names = [p['metadata']['name'] for p in pod_list['items']]
    assert collected_names == expected_names

    # Test .pods.expired gauge is submitted
    check._report_container_state_metrics(pod_list, ["custom:tag"])
    aggregator.assert_metric("kubernetes.pods.expired", value=1, tags=["custom:tag"])
Ejemplo n.º 6
0
def test_response_time(aggregator, check):
    """
    Test the response time from a server expected to be up
    """
    instance = deepcopy(common.INSTANCE)
    instance['collect_response_time'] = True
    instance['name'] = 'instance:response_time'
    check.check(instance)

    # service check
    expected_tags = [
        'foo:bar', 'target_host:datadoghq.com', 'port:80',
        'instance:instance:response_time'
    ]
    aggregator.assert_service_check('tcp.can_connect',
                                    status=check.OK,
                                    tags=expected_tags)
    aggregator.assert_metric('network.tcp.can_connect',
                             value=1,
                             tags=expected_tags)

    # response time metric
    expected_tags = [
        'url:datadoghq.com:80', 'instance:instance:response_time', 'foo:bar'
    ]
    aggregator.assert_metric('network.tcp.response_time', tags=expected_tags)
    aggregator.assert_all_metrics_covered()
Ejemplo n.º 7
0
def test_device_tagging(aggregator, psutil_mocks):
    instances = [{
        'use_mount': 'no',
        'device_tag_re': {
            "/dev/sda.*": "type:dev,tag:two"
        },
        'tags': ["optional:tags1"]
    }]
    c = Disk('disk', None, {}, instances)
    c.check(instances[0])

    # Assert metrics
    tags = [
        "type:dev", "tag:two", "device:{}".format(DEFAULT_DEVICE_NAME),
        "optional:tags1"
    ]
    for name, value in GAUGES_VALUES.iteritems():
        aggregator.assert_metric(name, value=value, tags=tags)

    for name, value in RATES_VALUES.iteritems():
        aggregator.assert_metric(
            name,
            value=value,
            tags=['device:{}'.format(DEFAULT_DEVICE_NAME), "optional:tags1"])

    assert aggregator.metrics_asserted_pct == 100.0
Ejemplo n.º 8
0
def test_metrics(client, check, aggregator, memcached):
    """
    Test all the available metrics: default, options and slabs
    """
    # we need to successfully retrieve a key to produce `get_hit_percent`
    client.set("foo", "bar")
    client.get("foo")

    instance = {
        'url': "{}".format(HOST),
        'port': PORT,
        'options': {
            'items': True,
            'slabs': True,
        }
    }
    check.check(instance)

    expected_tags = ["url:{}:11211".format(HOST)]
    for m in GAUGES + RATES + SLABS_AGGREGATES:
        aggregator.assert_metric(m, tags=expected_tags, count=1)

    expected_tags = ["url:{}:11211".format(HOST), "slab:1"]
    for m in ITEMS_GAUGES + ITEMS_RATES + SLABS_RATES + SLABS_GAUGES:
        aggregator.assert_metric(m, tags=expected_tags, count=1)

    assert aggregator.metrics_asserted_pct == 100.0
Ejemplo n.º 9
0
def test_kubelet_check_prometheus(monkeypatch, aggregator):
    check = KubeletCheck('kubelet', None, {}, [{}])
    monkeypatch.setattr(check, 'retrieve_pod_list', mock.Mock(return_value=json.loads(mock_from_file('pods.json'))))
    monkeypatch.setattr(check, '_retrieve_node_spec', mock.Mock(return_value=NODE_SPEC))
    monkeypatch.setattr(check, '_perform_kubelet_check', mock.Mock(return_value=None))
    monkeypatch.setattr(check, 'process_cadvisor', mock.Mock(return_value=None))

    attrs = {
        'close.return_value': True,
        'iter_lines.return_value': mock_from_file('metrics.txt').split('\n')
    }
    mock_resp = mock.Mock(headers={'Content-Type': 'text/plain'}, **attrs)
    monkeypatch.setattr(check, 'poll', mock.Mock(return_value=mock_resp))

    check.check({})

    assert check.cadvisor_legacy_url is None
    check.retrieve_pod_list.assert_called_once()
    check._retrieve_node_spec.assert_called_once()
    check._perform_kubelet_check.assert_called_once()
    check.poll.assert_called_once()
    check.process_cadvisor.assert_not_called()

    # called twice so pct metrics are guaranteed to be there
    check.check({})
    for metric in EXPECTED_METRICS_COMMON:
        aggregator.assert_metric(metric)
    for metric in EXPECTED_METRICS_PROMETHEUS:
        aggregator.assert_metric(metric)
    assert aggregator.metrics_asserted_pct == 100.0
Ejemplo n.º 10
0
def test_luminous_osd_full_metrics(_, aggregator):

    ceph_check = Ceph(CHECK_NAME, {}, {})
    ceph_check.check(copy.deepcopy(BASIC_CONFIG))

    aggregator.assert_metric('ceph.num_full_osds', value=1)
    aggregator.assert_metric('ceph.num_near_full_osds', value=1)
Ejemplo n.º 11
0
def _test_kubelet_check_prometheus(monkeypatch, aggregator, instance_tags):
    instance = {}
    if instance_tags:
        instance["tags"] = instance_tags

    check = mock_kubelet_check(monkeypatch, [instance])
    monkeypatch.setattr(check, 'process_cadvisor', mock.Mock(return_value=None))

    check.check(instance)

    assert check.cadvisor_legacy_url is None
    check.retrieve_pod_list.assert_called_once()
    check._retrieve_node_spec.assert_called_once()
    check._perform_kubelet_check.assert_called_once()
    check.cadvisor_scraper.poll.assert_called_once()
    check.kubelet_scraper.poll.assert_called_once()
    check.process_cadvisor.assert_not_called()

    # called twice so pct metrics are guaranteed to be there
    check.check(instance)
    for metric in EXPECTED_METRICS_COMMON:
        aggregator.assert_metric(metric)
        if instance_tags:
            for tag in instance_tags:
                aggregator.assert_metric_has_tag(metric, tag)
    for metric in EXPECTED_METRICS_PROMETHEUS:
        aggregator.assert_metric(metric)
        if instance_tags:
            for tag in instance_tags:
                aggregator.assert_metric_has_tag(metric, tag)
    assert aggregator.metrics_asserted_pct == 100.0
Ejemplo n.º 12
0
def test_metrics(client, check, instance, aggregator, memcached):
    """
    Test all the available metrics: default, options and slabs
    """
    # we need to successfully retrieve a key to produce `get_hit_percent`
    for _ in range(100):
        assert client.set("foo", "bar") is True
        assert client.get("foo") == "bar"

    instance.update({'options': {
        'items': True,
        'slabs': True,
    }})
    check.check(instance)

    print(aggregator._metrics)

    expected_tags = ["url:{}:{}".format(HOST, PORT), 'foo:bar']
    for m in GAUGES + RATES + SLABS_AGGREGATES:
        aggregator.assert_metric(m, tags=expected_tags, count=1)

    expected_tags += ["slab:1"]
    for m in ITEMS_GAUGES + ITEMS_RATES + SLABS_RATES + SLABS_GAUGES:
        aggregator.assert_metric(m, tags=expected_tags, count=1)

    assert aggregator.metrics_asserted_pct == 100.0
Ejemplo n.º 13
0
def test_check(aggregator, instance):
    """
    Testing Aqua check.
    """
    check = AquaCheck('aqua', {}, {})
    check.validate_instance = MagicMock(return_value=None)
    check.get_aqua_token = MagicMock(return_value="test")

    def mock_perform(inst, url, token):
        if url == '/api/v1/dashboard':
            with open(os.path.join(HERE, 'aqua_base_metrics.json'), 'r') as f:
                return json.load(f)
        elif url == '/api/v1/hosts':
            with open(os.path.join(HERE, 'aqua_hosts_metrics.json'), 'r') as f:
                return json.load(f)
        elif url == '/api/v1/audit/access_totals?alert=-1&limit=100&time=hour&type=all':
            with open(os.path.join(HERE, 'aqua_audit_metrics.json'), 'r') as f:
                return json.load(f)
        elif url == '/api/v1/scanqueue/summary':
            with open(os.path.join(HERE, 'aqua_scan_queues_metrics.json'), 'r') as f:
                return json.load(f)
    check._perform_query = MagicMock(side_effect=mock_perform)

    check.check(instance)
    for metric, value in EXPECTED_VALUES:
        aggregator.assert_metric(metric, value=value)

    aggregator.assert_service_check(check.SERVICE_CHECK_NAME)
    # Raises when COVERAGE=true and coverage < 100%
    aggregator.assert_all_metrics_covered()
Ejemplo n.º 14
0
def test_check(mock_process, aggregator):
    (minflt, cminflt, majflt, cmajflt) = [1, 2, 3, 4]

    def mock_get_pagefault_stats(pid):
        return [minflt, cminflt, majflt, cmajflt]

    process = ProcessCheck(common.CHECK_NAME, {}, {})
    config = common.get_config_stubs()
    for idx in range(len(config)):
        instance = config[idx]['instance']
        if 'search_string' not in instance.keys():
            process.check(instance)
        else:
            with patch('datadog_checks.process.ProcessCheck.find_pids',
                       return_value=mock_find_pid(instance['name'],
                                                  instance['search_string'])):
                process.check(instance)

        # these are just here to ensure it passes the coverage report.
        # they don't really "test" for anything.
        for sname in common.PAGEFAULT_STAT:
            aggregator.assert_metric('system.processes.mem.page_faults.' +
                                     sname,
                                     at_least=0,
                                     tags=generate_expected_tags(instance))
Ejemplo n.º 15
0
def test_warn_health(_, aggregator):
    ceph_check = Ceph(CHECK_NAME, {}, {})
    ceph_check.check(copy.deepcopy(BASIC_CONFIG))

    for metric in EXPECTED_METRICS:
        aggregator.assert_metric(metric, count=1, tags=EXPECTED_TAGS)

    aggregator.assert_service_check('ceph.overall_status', status=Ceph.WARNING, tags=EXPECTED_SERVICE_TAGS)
Ejemplo n.º 16
0
def test_linkerd(aggregator, linkerd_fixture):
    """
    Test the full check
    """
    c = LinkerdCheck('linkerd', None, {}, [MOCK_INSTANCE])
    c.check(MOCK_INSTANCE)

    for metric in LINKERD_FIXTURE_VALUES:
        aggregator.assert_metric(metric, LINKERD_FIXTURE_VALUES[metric])
Ejemplo n.º 17
0
def test_disk_check(aggregator):
    """
    Basic check to see if all metrics are there
    """
    c = Disk('disk', None, {}, [{'use_mount': 'no'}])
    c.check({'use_mount': 'no'})
    for name in DISK_GAUGES + INODE_GAUGES + DISK_RATES:
        aggregator.assert_metric(name, tags=[])

    assert aggregator.metrics_asserted_pct == 100.0
Ejemplo n.º 18
0
def test_check_filter_user(mock_process, aggregator):
    instance = {'name': 'foo', 'pid': 1, 'user': '******'}
    process = ProcessCheck(common.CHECK_NAME, {}, {})
    with patch('datadog_checks.process.ProcessCheck._filter_by_user',
               return_value={1, 2}):
        process.check(instance)

    aggregator.assert_metric('system.processes.number',
                             value=2,
                             tags=generate_expected_tags(instance))
Ejemplo n.º 19
0
def test_istio(aggregator, mesh_mixture_fixture):
    """
    Test the full check
    """
    check = Istio('istio', {}, {}, [MOCK_INSTANCE])
    check.check(MOCK_INSTANCE)

    for metric in MESH_METRICS + MIXER_METRICS:
        aggregator.assert_metric(metric)

    aggregator.assert_all_metrics_covered()
Ejemplo n.º 20
0
def test_osd_status_metrics_non_osd_health(_, aggregator):
    """
    The `detail` key of `health detail` can contain info on the health of non-osd units:
    shouldn't make the check fail
    """

    ceph_check = Ceph(CHECK_NAME, {}, {})
    ceph_check.check(copy.deepcopy(BASIC_CONFIG))

    aggregator.assert_metric('ceph.num_full_osds', value=0, count=1, tags=EXPECTED_TAGS)
    aggregator.assert_metric('ceph.num_near_full_osds', value=0, count=1, tags=EXPECTED_TAGS)
Ejemplo n.º 21
0
def test_istio(aggregator, mesh_mixture_fixture):
    """
    Test the full check
    """
    c = Istio('istio', None, {}, [MOCK_INSTANCE])
    c.check(MOCK_INSTANCE)

    metrics = MESH_METRICS + MIXER_METRICS
    for metric in metrics:
        aggregator.assert_metric(metric)

    assert aggregator.metrics_asserted_pct == 100.0
Ejemplo n.º 22
0
def test_process_functions(aggregator, mesh_mixture_fixture):
    """
    Test the process functions, ensure that they process correctly
    """
    c = Istio('istio', None, {}, [MOCK_INSTANCE])
    c._process_istio_mesh(MOCK_INSTANCE)
    c._process_mixer(MOCK_INSTANCE)

    metrics = MESH_METRICS + MIXER_METRICS
    for metric in metrics:
        aggregator.assert_metric(metric)

    assert aggregator.metrics_asserted_pct == 100.0
Ejemplo n.º 23
0
def test_up(aggregator, check):
    """
    Service expected to be up
    """
    check.check(deepcopy(common.INSTANCE))
    expected_tags = [
        "instance:UpService", "target_host:datadoghq.com", "port:80", "foo:bar"
    ]
    aggregator.assert_service_check('tcp.can_connect',
                                    status=check.OK,
                                    tags=expected_tags)
    aggregator.assert_metric('network.tcp.can_connect',
                             value=1,
                             tags=expected_tags)
Ejemplo n.º 24
0
def test_prometheus_check(aggregator, poll_mock):
    """
    Testing prometheus check.
    """

    c = PrometheusCheck('prometheus', None, {}, [instance])
    c.check(instance)
    aggregator.assert_metric(
        CHECK_NAME + '.renamed.metric1',
        tags=['node:host1', 'flavor:test', 'matched_label:foobar'])
    aggregator.assert_metric(
        CHECK_NAME + '.metric2',
        tags=['timestamp:123', 'node:host2', 'matched_label:foobar'])
    assert aggregator.metrics_asserted_pct == 100.0
Ejemplo n.º 25
0
def test_lighttpd(aggregator, instance, lighttpd):
    """
    """
    tags = ['host:{}'.format(HOST), 'port:9449', 'instance:first']
    check = Lighttpd("lighttpd", {}, {})
    check.check(instance)

    aggregator.assert_service_check(check.SERVICE_CHECK_NAME,
                                    status=Lighttpd.OK,
                                    tags=tags)

    for gauge in CHECK_GAUGES:
        aggregator.assert_metric(gauge, tags=['instance:first'], count=1)
    aggregator.assert_all_metrics_covered()
Ejemplo n.º 26
0
def test_tags(aggregator, spin_up_powerdns):
    version = _get_pdns_version()

    pdns_check = PowerDNSRecursorCheck(CHECK_NAME, {}, {})
    tags = ['foo:bar']
    if version == 3:
        config = common.CONFIG.copy()
        config['tags'] = ['foo:bar']
        pdns_check.check(config)

        # Assert metrics v3
        for metric in metrics.GAUGE_METRICS:
            aggregator.assert_metric(metrics.METRIC_FORMAT.format(metric), tags=tags, count=1)

        for metric in metrics.RATE_METRICS:
            aggregator.assert_metric(metrics.METRIC_FORMAT.format(metric), tags=tags, count=1)

    elif version == 4:
        config = common.CONFIG_V4.copy()
        config['tags'] = ['foo:bar']
        pdns_check.check(config)

        # Assert metrics v3
        for metric in metrics.GAUGE_METRICS + metrics.GAUGE_METRICS_V4:
            aggregator.assert_metric(metrics.METRIC_FORMAT.format(metric), tags=tags, count=1)

        for metric in metrics.RATE_METRICS + metrics.RATE_METRICS_V4:
            aggregator.assert_metric(metrics.METRIC_FORMAT.format(metric), tags=tags, count=1)

    service_check_tags = common._config_sc_tags(common.CONFIG)
    aggregator.assert_service_check('powerdns.recursor.can_connect',
                                    status=PowerDNSRecursorCheck.OK,
                                    tags=service_check_tags+tags)

    aggregator.assert_all_metrics_covered()
Ejemplo n.º 27
0
    def test_check(self, aggregator, mock_get):
        """
        Testing kube_dns check.
        """

        check = KubeDNSCheck('kube_dns', {}, {}, [instance])
        check.check(instance)

        # check that we then get the count metrics also
        check.check(instance)
        for metric in self.METRICS + self.COUNT_METRICS:
            aggregator.assert_metric(metric)

        aggregator.assert_all_metrics_covered()
Ejemplo n.º 28
0
def test_file_metrics(aggregator):
    """
    File metric coverage
    """
    config_stubs = get_config_stubs(temp_dir, filegauges=True)

    config = {'instances': config_stubs}

    for config in config_stubs:
        aggregator.reset()
        dir_check.check(config)
        dirtagname = config.get('dirtagname', "name")
        name = config.get('name', temp_dir)
        filetagname = config.get('filetagname', "filename")
        dir_tags = [dirtagname + ":%s" % name, 'optional:tag1']

        # File metrics
        for mname in FILE_METRICS:
            if config.get('pattern') != "file_*":
                # 2 '*.log' files in 'temp_dir'
                for i in xrange(1, 3):
                    file_tag = [
                        filetagname + ":%s" %
                        os.path.normpath(temp_dir + "/log_" + str(i) + ".log")
                    ]
                    aggregator.assert_metric(mname,
                                             tags=dir_tags + file_tag,
                                             count=1)

            if config.get('pattern') != "*.log":
                # Files in 'temp_dir'
                for i in xrange(0, 10):
                    file_tag = [
                        filetagname +
                        ":%s" % os.path.normpath(temp_dir + "/file_" + str(i))
                    ]
                    aggregator.assert_metric(mname,
                                             tags=dir_tags + file_tag,
                                             count=1)

            if not config.get('pattern'):
                # Files in 'temp_dir/subfolder'
                if config.get('recursive'):
                    for i in xrange(0, 5):
                        file_tag = [
                            filetagname +
                            ":%s" % os.path.normpath(temp_dir + "/subfolder" +
                                                     "/file_" + str(i))
                        ]
                        aggregator.assert_metric(mname,
                                                 tags=dir_tags + file_tag,
                                                 count=1)

        # Common metrics
        for mname in COMMON_METRICS:
            aggregator.assert_metric(mname, tags=dir_tags, count=1)

        # Raises when coverage < 100%
        assert aggregator.metrics_asserted_pct == 100.0
Ejemplo n.º 29
0
def test_check(aggregator, instance):
    """
    Testing Sortdb check.
    """
    check = SortdbCheck(CHECK_NAME, {}, {})
    with open(os.path.join(HERE, 'sortdb_metrics.json'), 'r') as f:
        check._get_response_from_url = MagicMock(return_value=json.load(f))

    check.check(instance)
    for metric, value in EXPECTED_VALUES:
        aggregator.assert_metric(metric, value=value)

    aggregator.assert_service_check(check.SORTDB_SERVICE_CHECK)
    # Raises when COVERAGE=true and coverage < 100%
    aggregator.assert_all_metrics_covered()
Ejemplo n.º 30
0
def test_down(aggregator, check):
    """
    Service expected to be down
    """
    check.check(deepcopy(common.INSTANCE_KO))
    expected_tags = [
        "instance:DownService", "target_host:127.0.0.1", "port:65530",
        "foo:bar"
    ]
    aggregator.assert_service_check('tcp.can_connect',
                                    status=check.CRITICAL,
                                    tags=expected_tags)
    aggregator.assert_metric('network.tcp.can_connect',
                             value=0,
                             tags=expected_tags)