def dd_environment(): """Start the cassandra cluster with required configuration.""" env = os.environ compose_file = os.path.join(common.HERE, 'compose', 'docker-compose.yaml') env['CONTAINER_PORT'] = common.PORT with docker_run(compose_file, service_name=common.CASSANDRA_CONTAINER_NAME, log_patterns=['Listening for thrift clients']): cassandra_seed = get_container_ip("{}".format( common.CASSANDRA_CONTAINER_NAME)) env['CASSANDRA_SEEDS'] = cassandra_seed with docker_run( compose_file, service_name=common.CASSANDRA_CONTAINER_NAME_2, log_patterns=[ 'All sessions completed', 'Starting listening for CQL clients' ], ): subprocess.check_call([ "docker", "exec", common.CASSANDRA_CONTAINER_NAME, "cqlsh", "-e", "CREATE KEYSPACE IF NOT EXISTS test \ WITH REPLICATION={'class':'SimpleStrategy', 'replication_factor':2}", ]) yield common.CONFIG_INSTANCE, E2E_METADATA
def generate_container_instance_config(metrics): conf = copy.deepcopy(SNMP_CONF) conf['ip_address'] = get_container_ip(SNMP_CONTAINER_NAME) return { 'init_config': {}, 'instances': [generate_instance_config(metrics, template=conf)], }
def dd_environment(): """ Start the cassandra cluster with required configuration """ env = os.environ compose_file = os.path.join(common.HERE, 'compose', 'docker-compose.yaml') env['CONTAINER_PORT'] = common.PORT # We need to restrict permission on the password file # Create a temporary file so if we have to run tests more than once on a machine # the original file's perms aren't modified with TempDir() as tmpdir: jmx_pass_file = os.path.join(common.HERE, "compose", 'jmxremote.password') copy_path(jmx_pass_file, tmpdir) temp_jmx_file = os.path.join(tmpdir, 'jmxremote.password') env['JMX_PASS_FILE'] = temp_jmx_file os.chmod(temp_jmx_file, stat.S_IRWXU) with docker_run(compose_file, service_name=common.CASSANDRA_CONTAINER_NAME, log_patterns=['Listening for thrift clients']): cassandra_seed = get_container_ip("{}".format( common.CASSANDRA_CONTAINER_NAME)) env['CASSANDRA_SEEDS'] = cassandra_seed with docker_run(compose_file, service_name=common.CASSANDRA_CONTAINER_NAME_2, log_patterns=['All sessions completed']): subprocess.check_call([ "docker", "exec", common.CASSANDRA_CONTAINER_NAME, "cqlsh", "-e", "CREATE KEYSPACE IF NOT EXISTS test \ WITH REPLICATION={'class':'SimpleStrategy', 'replication_factor':2}" ]) yield common.CONFIG_INSTANCE, 'local'
def create_datadog_conf_file(tmp_dir): container_ip = get_container_ip(SNMP_CONTAINER_NAME) prefix = ".".join(container_ip.split('.')[:3]) datadog_conf = { 'snmp_listener': { 'workers': 4, 'discovery_interval': 10, 'configs': [ { 'network': '{}.0/29'.format(prefix), 'port': PORT, 'community': 'generic-router', 'version': 2, 'timeout': 1, 'retries': 2, 'tags': [ "tag1:val1", "tag2:val2", ], 'loader': 'core', }, { 'network': '{}.0/28'.format(prefix), 'port': PORT, 'community': 'apc_ups', 'version': 2, 'timeout': 1, 'retries': 2, }, { 'network': '{}.0/27'.format(prefix), 'port': PORT, 'version': 3, 'timeout': 1, 'retries': 2, 'user': '******', 'authentication_key': 'doggiepass', 'authentication_protocol': 'sha', 'privacy_key': 'doggiePRIVkey', 'privacy_protocol': 'des', 'context_engine_id': 'my-engine-id', 'context_name': 'my-context-name', 'ignored_ip_addresses': { '{}.2'.format(prefix): True }, }, ], }, 'listeners': [{ 'name': 'snmp' }], } datadog_conf_file = os.path.join(tmp_dir, 'datadog.yaml') with open(datadog_conf_file, 'w') as file: file.write(yaml.dump(datadog_conf)) return datadog_conf_file
def create_datadog_conf_file(tmp_dir): container_ip = get_container_ip(SNMP_CONTAINER_NAME) prefix = ".".join(container_ip.split('.')[:3]) datadog_conf = { # Set check_runners to -1 to avoid checks being run in background when running `agent check` for e2e testing # Setting check_runners to a negative number to disable check runners is a workaround, # Datadog Agent might not guarantee this behaviour in the future. 'check_runners': -1, 'snmp_listener': { 'workers': 4, 'discovery_interval': 10, 'configs': [ { 'network': '{}.0/29'.format(prefix), 'port': PORT, 'community': 'generic-router', 'version': 2, 'timeout': 1, 'retries': 2, 'tags': [ "tag1:val1", "tag2:val2", ], 'loader': 'core', }, { 'network': '{}.0/28'.format(prefix), 'port': PORT, 'community': 'apc_ups', 'version': 2, 'timeout': 1, 'retries': 2, 'loader': 'python', }, { 'network': '{}.0/27'.format(prefix), 'port': PORT, 'version': 3, 'timeout': 1, 'retries': 2, 'user': '******', 'authentication_key': 'doggiepass', 'authentication_protocol': 'sha', 'privacy_key': 'doggiePRIVkey', 'privacy_protocol': 'des', 'context_name': 'public', 'ignored_ip_addresses': {'{}.2'.format(prefix): True}, 'loader': 'core', }, ], }, 'listeners': [{'name': 'snmp'}], } datadog_conf_file = os.path.join(tmp_dir, 'datadog.yaml') with open(datadog_conf_file, 'wb') as file: file.write(yaml.dump(datadog_conf)) return datadog_conf_file
def generate_container_profile_config(profile): conf = copy.deepcopy(SNMP_CONF) conf['ip_address'] = get_container_ip(SNMP_CONTAINER_NAME) init_config = {} instance = generate_instance_config([], template=conf) instance['community_string'] = profile instance['enforce_mib_constraints'] = False return { 'init_config': init_config, 'instances': [instance], }
def test_metric_tags(aggregator, instance): c = GnatsdCheck(CHECK_NAME, {}, {}) c.check(instance) aggregator.assert_metric_has_tag_prefix('gnatsd.varz.connections', 'gnatsd-server_id', at_least=1) aggregator.assert_metric_has_tag_prefix('gnatsd.connz.connections.foo-sub.out_msgs', 'gnatsd-cid', at_least=1) aggregator.assert_metric_has_tag_prefix('gnatsd.connz.connections.foo-sub.out_msgs', 'gnatsd-ip', at_least=1) aggregator.assert_metric_has_tag_prefix('gnatsd.connz.connections.foo-sub.out_msgs', 'gnatsd-name', at_least=1) aggregator.assert_metric_has_tag_prefix('gnatsd.connz.connections.foo-sub.out_msgs', 'gnatsd-lang', at_least=1) aggregator.assert_metric_has_tag_prefix('gnatsd.connz.connections.foo-sub.out_msgs', 'gnatsd-version', at_least=1) route_ip = get_container_ip('docker_nats_serverA_1').replace('.', '_') aggregator.assert_metric_has_tag_prefix( 'gnatsd.routez.routes.{}.in_msgs'.format(route_ip), 'gnatsd-rid', at_least=1) aggregator.assert_metric_has_tag_prefix( 'gnatsd.routez.routes.{}.in_msgs'.format(route_ip), 'gnatsd-remote_id', at_least=1) aggregator.assert_metric_has_tag_prefix('gnatsd.routez.routes.{}.in_msgs'.format(route_ip), 'gnatsd-ip', at_least=1)
def generate_container_profile_config_with_ad(profile): host = socket.gethostbyname(get_container_ip(SNMP_CONTAINER_NAME)) network = ipaddress.ip_network(u'{}/29'.format(host), strict=False).with_prefixlen conf = { # Make sure the check handles bytes 'network_address': to_native_string(network), 'port': PORT, 'community_string': 'apc_ups', } init_config = {} instance = generate_instance_config([], template=conf) instance['community_string'] = profile instance['enforce_mib_constraints'] = False return { 'init_config': init_config, 'instances': [instance], }
def test_metrics(aggregator, instance): c = GnatsdCheck(CHECK_NAME, {}, [instance]) c.check(instance) aggregator.assert_service_check('gnatsd.can_connect', status=GnatsdCheck.OK, count=1) aggregator.assert_metric('gnatsd.varz.connections', metric_type=aggregator.GAUGE) aggregator.assert_metric('gnatsd.varz.subscriptions', metric_type=aggregator.GAUGE) aggregator.assert_metric('gnatsd.varz.slow_consumers', metric_type=aggregator.COUNT) aggregator.assert_metric('gnatsd.varz.remotes', metric_type=aggregator.GAUGE) aggregator.assert_metric('gnatsd.varz.routes', metric_type=aggregator.GAUGE) aggregator.assert_metric('gnatsd.varz.in_msgs', metric_type=aggregator.COUNT) aggregator.assert_metric('gnatsd.varz.out_msgs', metric_type=aggregator.COUNT) aggregator.assert_metric('gnatsd.varz.in_bytes', metric_type=aggregator.COUNT) aggregator.assert_metric('gnatsd.varz.out_bytes', metric_type=aggregator.COUNT) aggregator.assert_metric('gnatsd.varz.mem', metric_type=aggregator.GAUGE) aggregator.assert_metric('gnatsd.connz.num_connections', metric_type=aggregator.GAUGE) aggregator.assert_metric('gnatsd.connz.total', metric_type=aggregator.COUNT) aggregator.assert_metric('gnatsd.connz.connections.foo-sub.pending_bytes', metric_type=aggregator.GAUGE) aggregator.assert_metric('gnatsd.connz.connections.foo-sub.in_msgs', metric_type=aggregator.COUNT) # We sent 2 messages to this queue in the ci setup aggregator.assert_metric('gnatsd.connz.connections.foo-sub.out_msgs', metric_type=aggregator.COUNT) aggregator.assert_metric('gnatsd.connz.connections.foo-sub.subscriptions', metric_type=aggregator.GAUGE) aggregator.assert_metric('gnatsd.connz.connections.foo-sub.in_bytes', metric_type=aggregator.COUNT) aggregator.assert_metric('gnatsd.connz.connections.foo-sub.out_bytes', metric_type=aggregator.COUNT) aggregator.assert_metric('gnatsd.connz.connections.unnamed.pending_bytes', metric_type=aggregator.GAUGE) aggregator.assert_metric('gnatsd.connz.connections.unnamed.in_msgs', metric_type=aggregator.COUNT) # We sent 1 message to this queue in the ci setup aggregator.assert_metric('gnatsd.connz.connections.unnamed.out_msgs', metric_type=aggregator.COUNT, value=1) aggregator.assert_metric('gnatsd.connz.connections.unnamed.subscriptions', metric_type=aggregator.GAUGE) aggregator.assert_metric('gnatsd.connz.connections.unnamed.in_bytes', metric_type=aggregator.COUNT) aggregator.assert_metric('gnatsd.connz.connections.unnamed.out_bytes', metric_type=aggregator.COUNT) aggregator.assert_metric('gnatsd.routez.num_routes', metric_type=aggregator.GAUGE) route_ip = get_container_ip('docker_nats_serverA_1').replace('.', '_') aggregator.assert_metric('gnatsd.routez.routes.{}.in_msgs'.format(route_ip), metric_type=aggregator.COUNT) aggregator.assert_metric('gnatsd.routez.routes.{}.out_msgs'.format(route_ip), metric_type=aggregator.COUNT) aggregator.assert_metric('gnatsd.routez.routes.{}.subscriptions'.format(route_ip), metric_type=aggregator.GAUGE) aggregator.assert_metric('gnatsd.routez.routes.{}.in_bytes'.format(route_ip), metric_type=aggregator.COUNT) aggregator.assert_metric('gnatsd.routez.routes.{}.out_bytes'.format(route_ip), metric_type=aggregator.COUNT) aggregator.assert_metric('gnatsd.routez.routes.{}.pending_size'.format(route_ip), metric_type=aggregator.GAUGE) aggregator.assert_all_metrics_covered()
def test_e2e_core_discovery(dd_agent_check): config = common.generate_container_profile_config_with_ad('apc_ups') config['init_config']['loader'] = 'core' aggregator = common.dd_agent_check_wrapper(dd_agent_check, config, rate=False, times=5) network = config['instances'][0]['network_address'] ip_address = get_container_ip(SNMP_CONTAINER_NAME) tags = [ # profile 'snmp_profile:apc_ups', 'model:APC Smart-UPS 600', 'firmware_version:2.0.3-test', 'serial_num:test_serial', 'ups_name:testIdentName', 'device_vendor:apc', 'device_namespace:default', # autodiscovery 'autodiscovery_subnet:' + network, 'snmp_device:' + ip_address, ] tags_with_loader = tags + ['loader:core'] # test that for a specific metric we are getting as many times as we are running the check # it might be off by 1 due to devices not being discovered yet at first check run aggregator.assert_metric('snmp.devices_monitored', metric_type=aggregator.GAUGE, tags=tags_with_loader, at_least=4, value=1) aggregator.assert_metric('snmp.upsAdvBatteryTemperature', metric_type=aggregator.GAUGE, tags=tags, at_least=4)
def couchbase_container_ip(): """ Modular fixture that depends on couchbase being initialized """ return get_container_ip(CB_CONTAINER_NAME)
def container_ip(): return get_container_ip(SNMP_CONTAINER_NAME)
def test_e2e_meraki_cloud_controller(dd_agent_check): config = common.generate_container_instance_config([]) config['init_config']['loader'] = 'core' instance = config['instances'][0] instance.update({ 'community_string': 'meraki-cloud-controller', }) # run a rate check, will execute two check runs to evaluate rate metrics aggregator = common.dd_agent_check_wrapper(dd_agent_check, config, rate=True) ip_address = get_container_ip(SNMP_CONTAINER_NAME) common_tags = [ 'snmp_profile:meraki-cloud-controller', 'snmp_host:dashboard.meraki.com', 'device_vendor:meraki', 'device_namespace:default', 'snmp_device:' + ip_address, ] common.assert_common_metrics(aggregator, tags=common_tags, is_e2e=True, loader='core') dev_metrics = ['devStatus', 'devClientCount'] dev_tags = [ 'product:MR16-HW', 'network:L_NETWORK', 'mac_address:02:02:00:66:f5:7f' ] + common_tags for metric in dev_metrics: aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=dev_tags, count=2, device='Gymnasium') if_tags = ['interface:wifi0', 'index:4', 'mac_address:02:02:00:66:f5:00' ] + common_tags if_metrics = [ 'devInterfaceSentPkts', 'devInterfaceRecvPkts', 'devInterfaceSentBytes', 'devInterfaceRecvBytes' ] for metric in if_metrics: aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=if_tags, count=2) # IF-MIB if_tags = ['interface:eth0'] + common_tags for metric in metrics.IF_COUNTS: aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.COUNT, tags=if_tags, count=1) for metric in metrics.IF_GAUGES: aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=if_tags, count=2) for metric in metrics.IF_RATES: aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=if_tags, count=1) for metric in metrics.IF_BANDWIDTH_USAGE: aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=if_tags, count=1) aggregator.assert_metric('snmp.sysUpTimeInstance', count=2, tags=common_tags) aggregator.assert_all_metrics_covered()