def check(self, instance): if instance is None: instance = {} self._excluded_ifaces = instance.get('excluded_interfaces', []) if not isinstance(self._excluded_ifaces, list): raise ConfigurationError( "Expected 'excluded_interfaces' to be a list, got '{}'".format( type(self._excluded_ifaces).__name__)) self._collect_cx_state = instance.get('collect_connection_state', False) self._collect_rate_metrics = instance.get('collect_rate_metrics', True) self._collect_count_metrics = instance.get('collect_count_metrics', False) # This decides whether we should split or combine connection states, # along with a few other things self._setup_metrics(instance) self._exclude_iface_re = None exclude_re = instance.get('excluded_interface_re', None) if exclude_re: self.log.debug("Excluding network devices matching: %s", exclude_re) self._exclude_iface_re = re.compile(exclude_re) if Platform.is_linux(): self._check_linux(instance) elif Platform.is_bsd(): self._check_bsd(instance) elif Platform.is_solaris(): self._check_solaris(instance) elif Platform.is_windows(): self._check_psutil(instance)
def check(self, instance): if instance is None: instance = {} self._excluded_ifaces = instance.get('excluded_interfaces', []) self._collect_cx_state = instance.get('collect_connection_state', False) self._collect_rate_metrics = instance.get('collect_rate_metrics', True) self._collect_count_metrics = instance.get('collect_count_metrics', False) # This decides whether we should split or combine connection states, # along with a few other things self._setup_metrics(instance) self._exclude_iface_re = None exclude_re = instance.get('excluded_interface_re', None) if exclude_re: self.log.debug("Excluding network devices matching: %s" % exclude_re) self._exclude_iface_re = re.compile(exclude_re) if Platform.is_linux(): self._check_linux(instance) elif Platform.is_bsd(): self._check_bsd(instance) elif Platform.is_solaris(): self._check_solaris(instance) elif Platform.is_windows(): self._check_psutil(instance)
def _assert_complex_config(aggregator): # Test service check aggregator.assert_service_check('mysql.can_connect', status=MySql.OK, tags=tags.SC_TAGS, count=1) aggregator.assert_service_check('mysql.replication.slave_running', status=MySql.OK, tags=tags.SC_TAGS, at_least=1) testable_metrics = ( variables.STATUS_VARS + variables.VARIABLES_VARS + variables.INNODB_VARS + variables.BINLOG_VARS + variables.SYSTEM_METRICS + variables.SCHEMA_VARS + variables.SYNTHETIC_VARS ) if MYSQL_VERSION_PARSED >= parse_version('5.6') and environ.get('MYSQL_FLAVOR') != 'mariadb': testable_metrics.extend(variables.PERFORMANCE_VARS) # Test metrics for mname in testable_metrics: # These two are currently not guaranteed outside of a Linux # environment. if mname == 'mysql.performance.user_time' and not Platform.is_linux(): continue if mname == 'mysql.performance.kernel_time' and not Platform.is_linux(): continue if mname == 'mysql.performance.cpu_time' and Platform.is_windows(): continue if mname == 'mysql.performance.query_run_time.avg': aggregator.assert_metric(mname, tags=tags.METRIC_TAGS + ['schema:testdb'], count=1) aggregator.assert_metric(mname, tags=tags.METRIC_TAGS + ['schema:mysql'], count=1) elif mname == 'mysql.info.schema.size': aggregator.assert_metric(mname, tags=tags.METRIC_TAGS + ['schema:testdb'], count=1) aggregator.assert_metric(mname, tags=tags.METRIC_TAGS + ['schema:information_schema'], count=1) aggregator.assert_metric(mname, tags=tags.METRIC_TAGS + ['schema:performance_schema'], count=1) else: aggregator.assert_metric(mname, tags=tags.METRIC_TAGS, at_least=0) # TODO: test this if it is implemented # Assert service metadata # version_metadata = mysql_check.service_metadata['version'] # assert len(version_metadata) == 1 # test custom query metrics aggregator.assert_metric('alice.age', value=25) aggregator.assert_metric('bob.age', value=20) # test optional metrics optional_metrics = ( variables.OPTIONAL_REPLICATION_METRICS + variables.OPTIONAL_INNODB_VARS + variables.OPTIONAL_STATUS_VARS + variables.OPTIONAL_STATUS_VARS_5_6_6 ) _test_optional_metrics(aggregator, optional_metrics, 1) # Raises when coverage < 100% aggregator.assert_all_metrics_covered()
def mock_server(): if Platform.is_windows(): compose_filename = 'docker-compose-windows.yaml' else: compose_filename = 'docker-compose.yaml' compose_file = os.path.join(common.HERE, 'compose', 'mock_server', compose_filename) env_vars = {"MOCK_SERVER_PORT": str(common.MOCK_SERVER_PORT)} with docker_run(compose_file, env_vars=env_vars): yield
def test_check_real_process_regex(aggregator, dd_run_check): "Check to specifically find this python pytest running process using regex." from datadog_checks.base.utils.platform import Platform instance = { 'name': 'py', 'search_string': ['.*python.*pytest'], 'exact_match': False, 'ignored_denied_access': True, 'thresholds': { 'warning': [1, 10], 'critical': [1, 100] }, } process = ProcessCheck(common.CHECK_NAME, {}, [instance]) expected_tags = generate_expected_tags(instance) dd_run_check(process) for mname in common.PROCESS_METRIC: # cases where we don't actually expect some metrics here: # - if io_counters() is not available # - if memory_info_ex() is not available # - first run so no `cpu.pct` if ((not _PSUTIL_IO_COUNTERS and '.io' in mname) or (not _PSUTIL_MEM_SHARED and 'mem.real' in mname) or mname == 'system.processes.cpu.pct'): continue if Platform.is_windows(): metric = common.UNIX_TO_WINDOWS_MAP.get(mname, mname) else: metric = mname aggregator.assert_metric(metric, at_least=1, tags=expected_tags) aggregator.assert_service_check('process.up', count=1, tags=expected_tags + ['process:py']) # this requires another run dd_run_check(process) aggregator.assert_metric('system.processes.cpu.pct', count=1, tags=expected_tags) aggregator.assert_metric('system.processes.cpu.normalized_pct', count=1, tags=expected_tags)
def uds_path(): if Platform.is_mac(): # See: https://github.com/docker/for-mac/issues/483 pytest.skip('Sharing Unix sockets is not supported by Docker for Mac.') if Platform.is_windows(): pytest.skip('Nginx does not run on Windows.') with TempDir() as tmp_dir: compose_file = os.path.join(HERE, 'compose', 'uds.yaml') uds_filename = 'tmp.sock' uds_path = os.path.join(tmp_dir, uds_filename) with docker_run( compose_file=compose_file, env_vars={ "UDS_HOST_DIRECTORY": tmp_dir, 'UDS_FILENAME': uds_filename, }, ): yield uds_path
def test_complex_config_replica(aggregator, instance_complex): mysql_check = MySql(common.CHECK_NAME, {}, {}) config = copy.deepcopy(instance_complex) config['port'] = common.SLAVE_PORT mysql_check.check(config) # self.assertMetricTag('mysql.replication.seconds_behind_master', 'channel:default') # Test service check aggregator.assert_service_check('mysql.can_connect', status=MySql.OK, tags=tags.SC_TAGS_REPLICA, count=1) # Travis MySQL not running replication - FIX in flavored test. aggregator.assert_service_check( 'mysql.replication.slave_running', status=MySql.OK, tags=tags.SC_TAGS_REPLICA, at_least=1 ) testable_metrics = ( variables.STATUS_VARS + variables.VARIABLES_VARS + variables.INNODB_VARS + variables.BINLOG_VARS + variables.SYSTEM_METRICS + variables.SCHEMA_VARS + variables.SYNTHETIC_VARS ) if MYSQL_VERSION_PARSED >= parse_version('5.6') and environ.get('MYSQL_FLAVOR') != 'mariadb': testable_metrics.extend(variables.PERFORMANCE_VARS) # Test metrics for mname in testable_metrics: # These two are currently not guaranteed outside of a Linux # environment. if mname == 'mysql.performance.user_time' and not Platform.is_linux(): continue if mname == 'mysql.performance.kernel_time' and not Platform.is_linux(): continue if mname == 'mysql.performance.cpu_time' and Platform.is_windows(): continue if mname == 'mysql.performance.query_run_time.avg': aggregator.assert_metric(mname, tags=tags.METRIC_TAGS + ['schema:testdb'], at_least=1) elif mname == 'mysql.info.schema.size': aggregator.assert_metric(mname, tags=tags.METRIC_TAGS + ['schema:testdb'], count=1) aggregator.assert_metric(mname, tags=tags.METRIC_TAGS + ['schema:information_schema'], count=1) aggregator.assert_metric(mname, tags=tags.METRIC_TAGS + ['schema:performance_schema'], count=1) else: aggregator.assert_metric(mname, tags=tags.METRIC_TAGS, at_least=0) # test custom query metrics aggregator.assert_metric('alice.age', value=25) aggregator.assert_metric('bob.age', value=20) # test optional metrics optional_metrics = ( variables.OPTIONAL_REPLICATION_METRICS + variables.OPTIONAL_INNODB_VARS + variables.OPTIONAL_STATUS_VARS + variables.OPTIONAL_STATUS_VARS_5_6_6 ) _test_optional_metrics(aggregator, optional_metrics, 1) # Raises when coverage < 100% aggregator.assert_all_metrics_covered()
def test_complex_config_replica(aggregator, instance_complex): config = copy.deepcopy(instance_complex) config['port'] = common.SLAVE_PORT mysql_check = MySql(common.CHECK_NAME, {}, instances=[config]) mysql_check.check(config) # Test service check aggregator.assert_service_check('mysql.can_connect', status=MySql.OK, tags=tags.SC_TAGS_REPLICA, count=1) # Travis MySQL not running replication - FIX in flavored test. aggregator.assert_service_check( 'mysql.replication.slave_running', status=MySql.OK, tags=tags.SC_TAGS_REPLICA + ['replication_mode:replica'], at_least=1, ) testable_metrics = (variables.STATUS_VARS + variables.COMPLEX_STATUS_VARS + variables.VARIABLES_VARS + variables.COMPLEX_VARIABLES_VARS + variables.INNODB_VARS + variables.COMPLEX_INNODB_VARS + variables.BINLOG_VARS + variables.SYSTEM_METRICS + variables.SCHEMA_VARS + variables.SYNTHETIC_VARS + variables.STATEMENT_VARS) if MYSQL_VERSION_PARSED >= parse_version('5.6') and environ.get( 'MYSQL_FLAVOR') != 'mariadb': testable_metrics.extend(variables.PERFORMANCE_VARS) # Test metrics for mname in testable_metrics: # These two are currently not guaranteed outside of a Linux # environment. if mname == 'mysql.performance.user_time' and not Platform.is_linux(): continue if mname == 'mysql.performance.kernel_time' and not Platform.is_linux( ): continue if mname == 'mysql.performance.cpu_time' and Platform.is_windows(): continue if mname == 'mysql.performance.query_run_time.avg': aggregator.assert_metric(mname, tags=tags.METRIC_TAGS + ['schema:testdb'], at_least=1) elif mname == 'mysql.info.schema.size': aggregator.assert_metric(mname, tags=tags.METRIC_TAGS + ['schema:testdb'], count=1) aggregator.assert_metric(mname, tags=tags.METRIC_TAGS + ['schema:information_schema'], count=1) aggregator.assert_metric(mname, tags=tags.METRIC_TAGS + ['schema:performance_schema'], count=1) else: aggregator.assert_metric(mname, tags=tags.METRIC_TAGS, at_least=0) # test custom query metrics aggregator.assert_metric('alice.age', value=25) aggregator.assert_metric('bob.age', value=20) # test optional metrics optional_metrics = (variables.OPTIONAL_REPLICATION_METRICS + variables.OPTIONAL_INNODB_VARS + variables.OPTIONAL_STATUS_VARS + variables.OPTIONAL_STATUS_VARS_5_6_6) # Note, this assertion will pass even if some metrics are not present. # Manual testing is required for optional metrics _test_optional_metrics(aggregator, optional_metrics) # Raises when coverage < 100% aggregator.assert_all_metrics_covered() aggregator.assert_metrics_using_metadata(get_metadata_metrics(), check_submission_type=True, exclude=['alice.age', 'bob.age'] + variables.STATEMENT_VARS)
# In order to collect connection state we need `ss` command included in `iproute2` package E2E_METADATA = { 'start_commands': ['apt-get update', 'apt-get install iproute2 -y'] } EXPECTED_METRICS = [ 'system.net.bytes_rcvd', 'system.net.bytes_sent', 'system.net.packets_in.count', 'system.net.packets_in.error', 'system.net.packets_out.count', 'system.net.packets_out.error', ] if Platform.is_linux() or Platform.is_windows(): EXPECTED_METRICS.extend([ 'system.net.packets_in.drop', 'system.net.packets_out.drop', ]) E2E_EXPECTED_METRICS = EXPECTED_METRICS + [ "system.net.tcp4.closing", "system.net.tcp4.established", "system.net.tcp4.listening", "system.net.tcp4.opening", "system.net.tcp4.time_wait", "system.net.tcp6.closing", "system.net.tcp6.established", "system.net.tcp6.listening", "system.net.tcp6.opening",
# (C) Datadog, Inc. 2018 # All rights reserved # Licensed under a 3-clause BSD style license (see LICENSE) import os import sys from datadog_checks.base.utils.platform import Platform EMBEDDED_DIR = 'embedded' if Platform.is_windows(): EMBEDDED_DIR += str(sys.version_info[0]) def get_ca_certs_path(): """ Get a path to the trusted certificates of the system """ for f in _get_ca_certs_paths(): if os.path.exists(f): return f return None def _get_ca_certs_paths(): """ Get a list of possible paths containing certificates Check is installed via pip to: * Windows: embedded/lib/site-packages/datadog_checks/http_check * Linux: embedded/lib/python2.7/site-packages/datadog_checks/http_check
def _assert_complex_config(aggregator, hostname='stubbed.hostname'): # Test service check aggregator.assert_service_check('mysql.can_connect', status=MySql.OK, tags=tags.SC_TAGS, hostname=hostname, count=1) if MYSQL_REPLICATION == 'classic': aggregator.assert_service_check( 'mysql.replication.slave_running', status=MySql.OK, tags=tags.SC_TAGS + ['replication_mode:source'], hostname=hostname, at_least=1, ) testable_metrics = (variables.STATUS_VARS + variables.COMPLEX_STATUS_VARS + variables.VARIABLES_VARS + variables.COMPLEX_VARIABLES_VARS + variables.INNODB_VARS + variables.COMPLEX_INNODB_VARS + variables.BINLOG_VARS + variables.SYSTEM_METRICS + variables.SCHEMA_VARS + variables.SYNTHETIC_VARS + variables.STATEMENT_VARS + variables.TABLE_VARS) if MYSQL_REPLICATION == 'group': testable_metrics.extend(variables.GROUP_REPLICATION_VARS) aggregator.assert_service_check( 'mysql.replication.group.status', status=MySql.OK, tags=tags.SC_TAGS + [ 'channel_name:group_replication_applier', 'member_role:PRIMARY', 'member_state:ONLINE' ], count=1, ) if MYSQL_VERSION_PARSED >= parse_version('5.6'): testable_metrics.extend(variables.PERFORMANCE_VARS) # Test metrics for mname in testable_metrics: # These three are currently not guaranteed outside of a Linux # environment. if mname == 'mysql.performance.user_time' and not Platform.is_linux(): continue if mname == 'mysql.performance.kernel_time' and not Platform.is_linux( ): continue if mname == 'mysql.performance.cpu_time' and Platform.is_windows(): continue if mname == 'mysql.performance.query_run_time.avg': aggregator.assert_metric(mname, tags=tags.METRIC_TAGS + ['schema:testdb'], count=1) aggregator.assert_metric(mname, tags=tags.METRIC_TAGS + ['schema:mysql'], count=1) elif mname == 'mysql.info.schema.size': aggregator.assert_metric(mname, tags=tags.METRIC_TAGS + ['schema:testdb'], count=1) aggregator.assert_metric(mname, tags=tags.METRIC_TAGS + ['schema:information_schema'], count=1) aggregator.assert_metric(mname, tags=tags.METRIC_TAGS + ['schema:performance_schema'], count=1) else: aggregator.assert_metric(mname, tags=tags.METRIC_TAGS, at_least=0) # TODO: test this if it is implemented # Assert service metadata # version_metadata = mysql_check.service_metadata['version'] # assert len(version_metadata) == 1 # test custom query metrics aggregator.assert_metric('alice.age', value=25) aggregator.assert_metric('bob.age', value=20) # test optional metrics optional_metrics = (variables.OPTIONAL_REPLICATION_METRICS + variables.OPTIONAL_INNODB_VARS + variables.OPTIONAL_STATUS_VARS + variables.OPTIONAL_STATUS_VARS_5_6_6) # Note, this assertion will pass even if some metrics are not present. # Manual testing is required for optional metrics _test_optional_metrics(aggregator, optional_metrics) # Raises when coverage < 100% aggregator.assert_all_metrics_covered()