def test_service_check(aggregator, riak_server):
    init_config = {
        'keys': [
            "riak_repl.server_bytes_sent", "riak_repl.server_bytes_recv",
            "riak_repl.server_connects", "riak_repl.server_connect_errors",
            "riak_repl.server_fullsyncs", "riak_repl.client_bytes_sent",
            "riak_repl.client_bytes_recv", "riak_repl.client_connects",
            "riak_repl.client_connect_errors", "riak_repl.client_redirect",
            "riak_repl.objects_dropped_no_clients",
            "riak_repl.objects_dropped_no_leader", "riak_repl.objects_sent",
            "riak_repl.objects_forwarded", "riak_repl.elections_elected",
            "riak_repl.elections_leader_changed", "riak_repl.rt_source_errors",
            "riak_repl.rt_sink_errors", "riak_repl.rt_dirty",
            "riak_repl.realtime_send_kbps", "riak_repl.realtime_recv_kbps",
            "riak_repl.fullsync_send_kbps", "riak_repl.fullsync_recv_kbps",
            "riak_repl.realtime_queue_stats.percent_bytes_used",
            "riak_repl.realtime_queue_stats.bytes",
            "riak_repl.realtime_queue_stats.max_bytes",
            "riak_repl.realtime_queue_stats.overload_drops",
            "riak_repl.fullsync_coordinator.riak_west_1.queued",
            "riak_repl.fullsync_coordinator.riak_west_1.in_progress",
            "riak_repl.fullsync_coordinator.riak_west_1.waiting_for_retry",
            "riak_repl.fullsync_coordinator.riak_west_1.starting",
            "riak_repl.fullsync_coordinator.riak_west_1.successful_exits",
            "riak_repl.fullsync_coordinator.riak_west_1.error_exits",
            "riak_repl.fullsync_coordinator.riak_west_1.retry_exits",
            "riak_repl.fullsync_coordinator.riak_west_1.soft_retry_exits",
            "riak_repl.fullsync_coordinator.riak_west_1.busy_nodes",
            "riak_repl.fullsync_coordinator.riak_west_1.fullsyncs_completed"
        ]
    }

    c = RiakReplCheck('riak_repl', init_config, {}, None)

    instance = {
        'default_timeout': 5,
        'url': 'http://{}:8098/riak-repl/stats'.format(get_docker_hostname())
    }
    c.check(instance)
    for key in init_config['keys']:
        aggregator.assert_metric(key, tags=[])

    # Assert coverage for this check on this instance
    aggregator.assert_all_metrics_covered()
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import os
import subprocess

import mock
import pytest
import requests

from datadog_checks.dev import WaitFor, docker_run
from datadog_checks.dev.utils import ON_WINDOWS
from datadog_checks.utils.common import get_docker_hostname

HERE = os.path.dirname(os.path.abspath(__file__))
CONFIG_FOLDER = os.path.join(HERE, 'docker', 'coredns')
HOST = get_docker_hostname()
ATHOST = "@{}".format(HOST)
PORT = '9153'
URL = "http://{}:{}/metrics".format(HOST, PORT)

# One lookup each for the forward and proxy plugins
DIG_ARGS = ["dig", "google.com", ATHOST, "example.com", ATHOST, "-p", "54"]


def init_coredns():
    res = requests.get(URL)
    if not ON_WINDOWS:
        # create some metrics by using dig
        subprocess.check_call(DIG_ARGS)
    res.raise_for_status()
Beispiel #3
0
# (C) Datadog, Inc. 2018
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)

import os

from datadog_checks.utils.common import get_docker_hostname

CHECK_NAME = 'activemq_xml'

HERE = os.path.dirname(os.path.abspath(__file__))

URL = "http://{}:8161".format(get_docker_hostname())

CONFIG = {'url': URL, 'username': "******", 'password': "******"}

GENERAL_METRICS = ["activemq.subscriber.count", "activemq.topic.count", "activemq.queue.count"]

QUEUE_METRICS = [
    "activemq.queue.consumer_count",
    "activemq.queue.dequeue_count",
    "activemq.queue.enqueue_count",
    "activemq.queue.size",
]

SUBSCRIBER_METRICS = [
    "activemq.subscriber.pending_queue_size",
    "activemq.subscriber.dequeue_counter",
    "activemq.subscriber.enqueue_counter",
    "activemq.subscriber.dispatched_queue_size",
    "activemq.subscriber.dispatched_counter",