def test_check(instance, dd_run_check, aggregator, tag_condition, base_tags):
    check = ArangodbCheck('arangodb', {}, [instance])

    def mock_requests_get(url, *args, **kwargs):
        fixture = url.rsplit('/', 1)[-1]
        return MockResponse(file_path=os.path.join(os.path.dirname(
            __file__), 'fixtures', tag_condition, fixture))

    with mock.patch('requests.get',
                    side_effect=mock_requests_get,
                    autospec=True):
        dd_run_check(check)

    aggregator.assert_service_check(
        'arangodb.openmetrics.health',
        ArangodbCheck.OK,
        count=1,
        tags=['endpoint:http://localhost:8529/_admin/metrics/v2'],
    )
    aggregator.assert_metrics_using_metadata(get_metadata_metrics())
    for metric in METRICS:
        aggregator.assert_metric(metric)
        for tag in base_tags:
            aggregator.assert_metric_has_tag(metric, tag)

    aggregator.assert_all_metrics_covered()
Example #2
0
def test_submitted_metrics_count(aggregator):
    instance = common.generate_instance_config(common.SCALAR_OBJECTS)
    check = common.create_check(instance)
    check.check(instance)

    for metric in common.SCALAR_OBJECTS:
        metric_name = "snmp." + (metric.get('name') or metric.get('symbol'))
        aggregator.assert_metric(metric_name, tags=common.CHECK_TAGS, count=1)

    total_snmp_submitted_metrics = len(common.SCALAR_OBJECTS) + 1  # +1 for snmp.sysUpTimeInstance

    aggregator.assert_metric('snmp.sysUpTimeInstance', count=1)
    aggregator.assert_metric(
        'datadog.snmp.submitted_metrics',
        value=total_snmp_submitted_metrics,
        metric_type=aggregator.GAUGE,
        count=1,
        tags=common.CHECK_TAGS,
    )
    aggregator.assert_metric(
        'datadog.snmp.check_duration', metric_type=aggregator.GAUGE, count=1, tags=common.CHECK_TAGS
    )
    aggregator.assert_metric(
        'datadog.snmp.check_interval', metric_type=aggregator.MONOTONIC_COUNT, count=1, tags=common.CHECK_TAGS
    )
    common.assert_common_device_metrics(aggregator, tags=common.CHECK_TAGS)
    aggregator.all_metrics_asserted()
    aggregator.assert_metrics_using_metadata(
        get_metadata_metrics(),
        check_submission_type=True,
        exclude=['snmp.snmpEngineTime', 'snmp.tcpInSegs', 'snmp.udpDatagrams'],
    )
Example #3
0
def test_check_docker(dd_agent_check, init_config, instance_e2e):
    # run run sync to ensure only a single run of both
    instance_e2e['query_activity'] = {'run_sync': True}
    instance_e2e['query_metrics'] = {'run_sync': True}
    aggregator = dd_agent_check(
        {
            'init_config': init_config,
            'instances': [instance_e2e]
        }, rate=True)

    aggregator.assert_metric_has_tag('sqlserver.db.commit_table_entries',
                                     'db:master')

    # ignore DBM debug metrics for the following tests as they're not currently part of the public set of product
    # metrics
    dbm_debug_metrics = [
        m for m in aggregator._metrics.keys() if m.startswith('dd.sqlserver.')
    ]
    for m in dbm_debug_metrics:
        del aggregator._metrics[m]

    for mname in EXPECTED_METRICS_DBM_ENABLED:
        aggregator.assert_metric(mname)

    for mname in UNEXPECTED_FCI_METRICS + UNEXPECTED_QUERY_EXECUTOR_AO_METRICS:
        aggregator.assert_metric(mname, count=0)

    aggregator.assert_service_check('sqlserver.can_connect',
                                    status=SQLServer.OK)
    aggregator.assert_all_metrics_covered()

    aggregator.assert_metrics_using_metadata(get_metadata_metrics(),
                                             exclude=CUSTOM_METRICS)
Example #4
0
def test(aggregator, dd_default_hostname, dd_run_check,
         mock_performance_objects):
    mock_performance_objects(PERFORMANCE_OBJECTS)
    check = DotnetclrCheck('dotnetclr', {}, [{'host': dd_default_hostname}])
    check.hostname = dd_default_hostname
    dd_run_check(check)

    global_tags = ['server:{}'.format(dd_default_hostname)]
    aggregator.assert_service_check('dotnetclr.windows.perf.health',
                                    ServiceCheck.OK,
                                    count=1,
                                    tags=global_tags)

    for object_name, (instances, _) in PERFORMANCE_OBJECTS.items():
        config = METRICS_CONFIG[object_name]
        counters = config['counters'][0]

        for data in counters.values():
            if isinstance(data, str):
                metric = 'dotnetclr.{}.{}'.format(config['name'], data)
            else:
                metric = 'dotnetclr.{}'.format(data.get('metric_name'))

            for instance in instances:
                if instance is None:
                    tags = global_tags
                else:
                    tags = ['instance:{}'.format(instance)]
                    tags.extend(global_tags)

                aggregator.assert_metric(metric, 9000, count=1, tags=tags)

    aggregator.assert_all_metrics_covered()
    aggregator.assert_metrics_using_metadata(get_metadata_metrics())
Example #5
0
def test_e2e(dd_agent_check):
    aggregator = dd_agent_check(CONFIG, rate=True)
    assert_check(aggregator, METRICS_TO_TEST)
    # Excluding gitlab.rack.http_requests_total because it is a distribution metric
    # (its sum and count metrics are in the metadata)
    aggregator.assert_metrics_using_metadata(
        get_metadata_metrics(), exclude=["gitlab.rack.http_requests_total"])
Example #6
0
def test_e2e(dd_agent_check, instance_complex):
    aggregator = dd_agent_check(instance_complex)

    _assert_complex_config(aggregator)
    aggregator.assert_metrics_using_metadata(get_metadata_metrics(),
                                             exclude=['alice.age', 'bob.age'] +
                                             variables.STATEMENT_VARS)
Example #7
0
def test_check(aggregator, instance):
    # type: (AggregatorStub, Dict[str, Any]) -> None
    check = {check_class}('{check_name}', {{}}, [instance])
    check.check(instance)

    aggregator.assert_all_metrics_covered()
    aggregator.assert_metrics_using_metadata(get_metadata_metrics())
Example #8
0
def test_integration_mongos(instance_integration, aggregator, check):
    mongos_check = check(instance_integration)
    mongos_check._last_states_by_server = {0: 1, 1: 2, 2: 2}

    with mock_pymongo("mongos"):
        mongos_check.check(None)

    _assert_metrics(
        aggregator,
        [
            'default', 'custom-queries', 'dbstats', 'indexes-stats',
            'collection', 'connection-pool', 'jumbo', 'sessions'
        ],
        ['sharding_cluster_role:mongos'],
    )

    aggregator.assert_all_metrics_covered()
    aggregator.assert_metrics_using_metadata(
        get_metadata_metrics(),
        exclude=[
            'dd.custom.mongo.aggregate.total',
            'dd.custom.mongo.count',
            'dd.custom.mongo.query_a.amount',
            'dd.custom.mongo.query_a.el',
        ],
        check_submission_type=True,
    )
    assert len(aggregator._events) == 0
Example #9
0
def test_minimal_config(aggregator, instance_basic):
    mysql_check = MySql(common.CHECK_NAME, {}, [instance_basic])
    mysql_check.check(instance_basic)

    # Test service check
    aggregator.assert_service_check('mysql.can_connect',
                                    status=MySql.OK,
                                    tags=tags.SC_TAGS_MIN,
                                    count=1)

    # Test metrics
    testable_metrics = variables.STATUS_VARS + variables.VARIABLES_VARS + variables.INNODB_VARS + variables.BINLOG_VARS

    for mname in testable_metrics:
        aggregator.assert_metric(mname, at_least=1)

    optional_metrics = (variables.COMPLEX_STATUS_VARS +
                        variables.COMPLEX_VARIABLES_VARS +
                        variables.COMPLEX_INNODB_VARS +
                        variables.SYSTEM_METRICS + variables.SYNTHETIC_VARS)

    _test_optional_metrics(aggregator, optional_metrics)
    aggregator.assert_all_metrics_covered()
    aggregator.assert_metrics_using_metadata(get_metadata_metrics(),
                                             check_submission_type=True)
def test_check(cursor_factory, aggregator, instance, dd_run_check,
               expected_metrics):
    with cursor_factory():
        check = TeradataCheck(CHECK_NAME, {}, [instance])
        dd_run_check(check)
    for metric in expected_metrics:
        aggregator.assert_metric(
            metric['name'],
            metric['value'],
            sorted(metric['tags'] + ['td_env:dev']),
            count=1,
            metric_type=metric['type'],
        )
    aggregator.assert_all_metrics_covered()
    aggregator.assert_metrics_using_metadata(get_metadata_metrics(),
                                             check_submission_type=True)
    aggregator.assert_service_check(
        SERVICE_CHECK_CONNECT,
        ServiceCheck.OK,
        tags=['teradata_server:tdserver', 'teradata_port:1025', 'td_env:dev'],
    )
    aggregator.assert_service_check(
        SERVICE_CHECK_QUERY,
        ServiceCheck.OK,
        tags=['teradata_server:tdserver', 'teradata_port:1025', 'td_env:dev'],
    )
def test_check(dd_run_check, aggregator, instance):
    # type: (Callable[[AgentCheck, bool], None], AggregatorStub, Dict[str, Any]) -> None
    check = {check_class}('{check_name}', {{}}, [instance])
    dd_run_check(check)

    aggregator.assert_all_metrics_covered()
    aggregator.assert_metrics_using_metadata(get_metadata_metrics())
def test_integration_replicaset_secondary(instance_integration, aggregator,
                                          check):
    mongo_check = check(instance_integration)
    mongo_check.last_states_by_server = {0: 2, 1: 1, 2: 7, 3: 2}

    with mock_pymongo("replica-secondary"):
        mongo_check.check(None)

    replica_tags = ['replset_name:replset', 'replset_state:secondary']
    metrics_categories = [
        'default',
        'custom-queries',
        'oplog',
        'replset-secondary',
        'top',
        'dbstats-local',
        'fsynclock',
    ]
    _assert_metrics(aggregator, metrics_categories, replica_tags)

    aggregator.assert_all_metrics_covered()
    aggregator.assert_metrics_using_metadata(
        get_metadata_metrics(),
        exclude=[
            'dd.custom.mongo.aggregate.total',
            'dd.custom.mongo.count',
            'dd.custom.mongo.query_a.amount',
            'dd.custom.mongo.query_a.el',
        ],
        check_metric_type=False,
    )
    assert len(aggregator._events) == 0
Example #13
0
def test_e2e(dd_agent_check):
    instance = {}
    aggregator = dd_agent_check(instance)
    metrics = [
        'resin.thread_pool.thread_active_count',
        'resin.thread_pool.thread_count',
        'resin.thread_pool.thread_idle_count',
        'resin.thread_pool.thread_max',
        'resin.thread_pool.thread_wait_count',
        'resin.connection_pool.connection_active_count',
        'resin.connection_pool.connection_count',
        'resin.connection_pool.connection_create_count',
        'resin.connection_pool.connection_idle_count',
        'resin.connection_pool.max_connections',
        'resin.connection_pool.max_create_connections',
        'resin.connection_pool.max_overflow_connections',
    ]
    for metric in metrics:
        aggregator.assert_metric(metric, at_least=0)

    # needed because https://github.com/DataDog/integrations-core/pull/9501
    jvm_e2e_metrics_new = list(JVM_E2E_METRICS)
    jvm_e2e_metrics_new.remove('jvm.gc.cms.count')
    jvm_e2e_metrics_new.remove('jvm.gc.parnew.time')
    for metric in jvm_e2e_metrics_new:
        aggregator.assert_metric(metric)

    aggregator.assert_all_metrics_covered()
    aggregator.assert_metrics_using_metadata(get_metadata_metrics(),
                                             exclude=jvm_e2e_metrics_new)
Example #14
0
def test_check(aggregator, dd_run_check, check):
    c = check(DEFAULT_INSTANCE)
    dd_run_check(c)
    dd_run_check(c)

    for metric in PROMETHEUS_METRICS:
        formatted_metric = "envoy.{}".format(metric)
        if metric in FLAKY_METRICS:
            aggregator.assert_metric(formatted_metric, at_least=0)
            continue
        aggregator.assert_metric(formatted_metric)

        collected_metrics = aggregator.metrics(METRIC_PREFIX + metric)
        legacy_metric = METRICS.get(metric)
        if collected_metrics and legacy_metric and metric not in SKIP_TAG_ASSERTION:
            expected_tags = [t for t in legacy_metric.get('tags', []) if t]
            for tag_set in expected_tags:
                assert all(
                    all(any(tag in mt for mt in m.tags) for tag in tag_set)
                    for m in collected_metrics
                    if m.tags), ('tags ' + str(expected_tags) +
                                 ' not found in ' + formatted_metric)

    aggregator.assert_all_metrics_covered()
    aggregator.assert_metrics_using_metadata(get_metadata_metrics())
Example #15
0
def test_mongo_replset(instance_shard, aggregator, check):
    mongo_check = check(instance_shard)
    mongo_check.check(None)

    replset_metrics = [
        'mongodb.replset.health',
        'mongodb.replset.replicationlag',
        'mongodb.replset.state',
        'mongodb.replset.votefraction',
        'mongodb.replset.votes',
    ]
    replset_common_tags = [
        "replset_name:shard01",
        "server:mongodb://localhost:27018/",
        "sharding_cluster_role:shardsvr",
    ]
    for metric in replset_metrics:
        aggregator.assert_metric(metric, tags=replset_common_tags + ['replset_state:primary'])
    aggregator.assert_metric(
        'mongodb.replset.optime_lag', tags=replset_common_tags + ['replset_state:primary', 'member:shard01a:27018']
    )
    aggregator.assert_metric(
        'mongodb.replset.optime_lag', tags=replset_common_tags + ['replset_state:secondary', 'member:shard01b:27018']
    )
    aggregator.assert_metrics_using_metadata(get_metadata_metrics(), check_submission_type=True)
def test_check_with_filters(aggregator):
    # type: (AggregatorStub) -> None
    check = MarklogicCheck('marklogic', {}, [INSTANCE_FILTERS])

    check.check(INSTANCE_FILTERS)

    _assert_metrics(aggregator, COMMON_TAGS)

    # Resource filter only
    for metric in STORAGE_HOST_METRICS + RESOURCE_STORAGE_FOREST_METRICS:
        aggregator.assert_metric_has_tag(metric,
                                         'forest_name:Security',
                                         count=1)
    for metric in RESOURCE_STATUS_DATABASE_METRICS:
        aggregator.assert_metric_has_tag(metric,
                                         'database_name:Documents',
                                         count=1)
    for metric in [
            'marklogic.requests.query-count',
            'marklogic.requests.total-requests',
            'marklogic.requests.update-count',
    ]:
        aggregator.assert_metric(metric,
                                 tags=COMMON_TAGS +
                                 ['server_name:Admin', 'group_name:Default'],
                                 count=1)

    aggregator.assert_all_metrics_covered()

    # Service checks
    _assert_service_checks(aggregator, COMMON_TAGS)

    aggregator.assert_no_duplicate_all()

    aggregator.assert_metrics_using_metadata(get_metadata_metrics())
Example #17
0
def test_e2e(dd_agent_check, instance):
    aggregator = dd_agent_check(instance)
    aggregator.assert_metric('network.tcp.can_connect', value=1, count=1)
    aggregator.assert_service_check('tcp.can_connect', status=TCPCheck.OK, count=1)

    aggregator.assert_all_metrics_covered()
    aggregator.assert_metrics_using_metadata(get_metadata_metrics())
Example #18
0
def test_integration_replicaset_arbiter(instance_integration, aggregator,
                                        check, dd_run_check):
    for query in instance_integration['custom_queries']:
        query['run_on_secondary'] = True
    instance_integration['is_arbiter'] = True
    mongo_check = check(instance_integration)
    mongo_check.last_states_by_server = {0: 2, 1: 1, 2: 7, 3: 2}

    with mock_pymongo("replica-arbiter"):
        dd_run_check(mongo_check)

    replica_tags = ['replset_name:replset', 'replset_state:arbiter']
    metrics_categories = ['serverStatus', 'replset-arbiter']

    _assert_metrics(aggregator, metrics_categories, replica_tags)

    aggregator.assert_all_metrics_covered()
    aggregator.assert_metrics_using_metadata(
        get_metadata_metrics(),
        exclude=[
            'dd.custom.mongo.aggregate.total',
            'dd.custom.mongo.count',
            'dd.custom.mongo.query_a.amount',
            'dd.custom.mongo.query_a.el',
        ],
        check_submission_type=True,
    )
    assert len(aggregator._events) == 0
Example #19
0
def test_standalone(instance_integration, aggregator, check):
    mongo_check = check(instance_integration)
    mongo_check.last_states_by_server = {0: 2, 1: 1, 2: 7, 3: 2}

    with mock_pymongo("standalone"):
        mongo_check.check(None)

    metrics_categories = [
        'default',
        'custom-queries',
        'top',
        'dbstats-local',
        'fsynclock',
        'dbstats',
        'indexes-stats',
        'collection',
    ]
    _assert_metrics(aggregator, metrics_categories)

    aggregator.assert_all_metrics_covered()
    aggregator.assert_metrics_using_metadata(
        get_metadata_metrics(),
        exclude=[
            'dd.custom.mongo.aggregate.total',
            'dd.custom.mongo.count',
            'dd.custom.mongo.query_a.amount',
            'dd.custom.mongo.query_a.el',
        ],
        check_submission_type=True,
    )
    assert len(aggregator._events) == 0
Example #20
0
def test_check(aggregator, instance):
    # type: (AggregatorStub, Dict[str, Any]) -> None
    check = IbmICheck('ibm_i', {}, [instance])
    check.check(instance)

    aggregator.assert_all_metrics_covered()
    aggregator.assert_metrics_using_metadata(get_metadata_metrics())
Example #21
0
def test_integration_replicaset_primary_in_shard(instance_integration, aggregator, check):
    mongo_check = check(instance_integration)
    mongo_check.last_states_by_server = {0: 2, 1: 1, 2: 7, 3: 2}

    with mock_pymongo("replica-primary-in-shard"):
        mongo_check.check(None)

    replica_tags = ['replset_name:mongo-mongodb-sharded-shard-0', 'replset_state:primary']
    metrics_categories = ['default', 'custom-queries', 'oplog', 'replset-primary', 'top', 'dbstats-local', 'fsynclock']
    _assert_metrics(aggregator, metrics_categories, replica_tags)

    aggregator.assert_all_metrics_covered()
    aggregator.assert_metrics_using_metadata(
        get_metadata_metrics(),
        exclude=[
            'dd.custom.mongo.aggregate.total',
            'dd.custom.mongo.count',
            'dd.custom.mongo.query_a.amount',
            'dd.custom.mongo.query_a.el',
        ],
        check_metric_type=False,
    )
    assert len(aggregator._events) == 3
    aggregator.assert_event(
        "MongoDB mongo-mongodb-sharded-shard0-data-0.mongo-mongodb-sharded-headless.default.svc.cluster.local:27017 "
        "(_id: 0, mongodb://testUser2:*****@localhost:27017/test) just reported as Primary (PRIMARY) for "
        "mongo-mongodb-sharded-shard-0; it was SECONDARY before.",
        tags=[
            'action:mongo_replset_member_status_change',
            'member_status:PRIMARY',
            'previous_member_status:SECONDARY',
            'replset:mongo-mongodb-sharded-shard-0',
        ],
        count=1,
    )
    aggregator.assert_event(
        "MongoDB mongo-mongodb-sharded-shard0-arbiter-0.mongo-mongodb-sharded-headless.default.svc.cluster.local:27017 "
        "(_id: 1, mongodb://testUser2:*****@localhost:27017/test) just reported as Arbiter (ARBITER) for "
        "mongo-mongodb-sharded-shard-0; it was PRIMARY before.",
        tags=[
            'action:mongo_replset_member_status_change',
            'member_status:ARBITER',
            'previous_member_status:PRIMARY',
            'replset:mongo-mongodb-sharded-shard-0',
        ],
        count=1,
    )
    aggregator.assert_event(
        "MongoDB mongo-mongodb-sharded-shard0-data-1.mongo-mongodb-sharded-headless.default.svc.cluster.local:27017 "
        "(_id: 2, mongodb://testUser2:*****@localhost:27017/test) just reported as Secondary (SECONDARY) "
        "for mongo-mongodb-sharded-shard-0; it was ARBITER before.",
        tags=[
            'action:mongo_replset_member_status_change',
            'member_status:SECONDARY',
            'previous_member_status:ARBITER',
            'replset:mongo-mongodb-sharded-shard-0',
        ],
        count=1,
    )
Example #22
0
def test_integration(instance_custom_queries, aggregator, mock_pymongo, check):
    instance_custom_queries["additional_metrics"] = [
        "metrics.commands", "tcmalloc", "collection", "top"
    ]
    instance_custom_queries["collections"] = ["foo", "bar"]
    instance_custom_queries["collections_indexes_stats"] = True
    mongo_check = check(instance_custom_queries)
    # Set node as "secondary" initially to trigger an event
    mongo_check._last_state_by_server[mongo_check.clean_server_name] = 2

    mongo_check.check(instance_custom_queries)

    expected_metrics = []
    with open(os.path.join(HERE, "results", "metrics.json"), 'r') as f:
        expected_metrics = json.load(f)

    for metric in expected_metrics:
        aggregator.assert_metric(metric['name'],
                                 value=metric['value'],
                                 tags=metric['tags'],
                                 metric_type=metric['type'])

    aggregator.assert_all_metrics_covered()

    metadata_metrics = get_metadata_metrics()
    aggregator.assert_metrics_using_metadata(
        metadata_metrics,
        exclude=[
            'dd.custom.mongo.aggregate.total',
            'dd.custom.mongo.count',
            'dd.custom.mongo.query_a.amount',
            'dd.custom.mongo.query_a.el',
        ],
        check_metric_type=False,
    )

    # Additionally assert that all metrics in the metadata.csv were submitted in this check run:
    for metric in metadata_metrics:
        assert [
            x for x in expected_metrics if x['name'] == metric
        ], "Metric {} is in metadata.csv but was not submitted.".format(metric)

    assert len(aggregator._service_checks) == 1
    aggregator.assert_service_check(
        'mongodb.can_connect',
        AgentCheck.OK,
        tags=['db:test', 'host:localhost', 'port:27017'])

    assert len(aggregator._events) == 1
    aggregator.assert_event(
        "MongoDB stubbed.hostname (mongodb://testUser2:*****@localhost:27017/test) "
        "just reported as Primary (PRIMARY) for shard01; it was SECONDARY before.",
        tags=[
            'action:mongo_replset_member_status_change',
            'member_status:PRIMARY',
            'previous_member_status:SECONDARY',
            'replset:shard01',
        ],
    )
Example #23
0
def test_e2e(dd_agent_check):
    aggregator = dd_agent_check(CONFIG)

    for metric in METRICS:
        aggregator.assert_metric(metric)

    aggregator.assert_all_metrics_covered()
    aggregator.assert_metrics_using_metadata(get_metadata_metrics())
def test_e2e(dd_agent_check, instance_e2e):
    aggregator = dd_agent_check(instance_e2e)

    for metric in EXPECTED_METRICS:
        aggregator.assert_metric(metric, at_least=0)

    aggregator.assert_all_metrics_covered()
    aggregator.assert_metrics_using_metadata(get_metadata_metrics())
Example #25
0
def test_check(aggregator, instance):
    check = MaprCheck('mapr', {}, [instance])
    check.check(instance)

    for m in METRICS_IN_FIXTURE:
        aggregator.assert_metric(m)
    aggregator.assert_all_metrics_covered()
    aggregator.assert_metrics_using_metadata(get_metadata_metrics())
def test_check(mock_client, get_expected_metrics, aggregator, unit_instance, dd_run_check):
    check = AviVantageCheck('avi_vantage', {}, [unit_instance])
    dd_run_check(check)
    aggregator.assert_service_check("avi_vantage.can_connect", AviVantageCheck.OK)
    for metric in get_expected_metrics():
        aggregator.assert_metric(metric['name'], metric['value'], metric['tags'], metric_type=metric['type'])
    aggregator.assert_all_metrics_covered()
    aggregator.assert_metrics_using_metadata(get_metadata_metrics())
Example #27
0
def test_agent_check(aggregator, agent_instance_use_openmetrics,
                     mock_agent_data, dd_run_check, check):
    c = check(agent_instance_use_openmetrics(True))
    dd_run_check(c)
    for m in AGENT_V2_METRICS:
        aggregator.assert_metric(m)
    aggregator.assert_all_metrics_covered()
    aggregator.assert_metrics_using_metadata(get_metadata_metrics())
Example #28
0
def test_check_ok(dd_agent_check):
    aggregator = dd_agent_check(rate=True)
    metrics = common.FORMATTED_EXTRA_METRICS

    for metric in metrics:
        aggregator.assert_metric(metric)

    aggregator.assert_metrics_using_metadata(get_metadata_metrics())
Example #29
0
def test_complex_config(aggregator, instance_complex):
    mysql_check = MySql(common.CHECK_NAME, {}, instances=[instance_complex])
    mysql_check.check(instance_complex)

    _assert_complex_config(aggregator)
    aggregator.assert_metrics_using_metadata(
        get_metadata_metrics(), check_submission_type=True, exclude=['alice.age', 'bob.age'] + variables.STATEMENT_VARS
    )
Example #30
0
def test_check_ok_omv2(dd_agent_check, omv2_instance):
    aggregator = dd_agent_check(omv2_instance, rate=True)
    for metric in METRICS_V2:
        aggregator.assert_metric(metric)

    aggregator.assert_all_metrics_covered()
    aggregator.assert_metrics_using_metadata(get_metadata_metrics(),
                                             check_submission_type=True)