def test_device_tagging(aggregator, gauge_metrics, rate_metrics): instance = { 'use_mount': 'no', 'device_tag_re': { '{}.*'.format(DEFAULT_DEVICE_NAME[:-1]): 'type:dev,tag:two' }, 'tags': ['optional:tags1'], 'tag_by_label': False, } c = Disk('disk', None, {}, [instance]) with mock.patch('datadog_checks.disk.disk.Disk._get_devices_label'): # _get_devices_label is only called on linux, so devices_label is manually filled # to make the test run on everything c.devices_label = {DEFAULT_DEVICE_NAME: 'label:mylab'} c.check(instance) # Assert metrics tags = [ 'type:dev', 'tag:two', 'device:{}'.format(DEFAULT_DEVICE_NAME), 'optional:tags1', 'label:mylab' ] for name, value in iteritems(gauge_metrics): aggregator.assert_metric(name, value=value, tags=tags) for name, value in iteritems(rate_metrics): aggregator.assert_metric(name, value=value, tags=[ 'device:{}'.format(DEFAULT_DEVICE_NAME), 'optional:tags1', 'label:mylab' ]) aggregator.assert_all_metrics_covered()
def test_device_tagging(aggregator, gauge_metrics, rate_metrics): instance = { 'use_mount': 'no', 'device_tag_re': { '{}.*'.format(DEFAULT_DEVICE_NAME[:-1]): 'type:dev,tag:two' }, 'tags': ['optional:tags1'] } c = Disk('disk', None, {}, [instance]) c.check(instance) # Assert metrics tags = [ 'type:dev', 'tag:two', 'device:{}'.format(DEFAULT_DEVICE_NAME), 'optional:tags1' ] for name, value in iteritems(gauge_metrics): aggregator.assert_metric(name, value=value, tags=tags) for name, value in iteritems(rate_metrics): aggregator.assert_metric( name, value=value, tags=['device:{}'.format(DEFAULT_DEVICE_NAME), 'optional:tags1']) aggregator.assert_all_metrics_covered()
def test_no_psutil_freebsd(aggregator, gauge_metrics): instance = { 'use_mount': 'no', 'excluded_filesystems': ['devfs'], 'excluded_disk_re': 'zroot/.+', 'tag_by_label': False, } c = Disk('disk', None, {}, [instance]) # disable psutil c._psutil = lambda: False mock_statvfs = mock.patch('os.statvfs', return_value=MockInodesMetrics(), __name__='statvfs') mock_output = mock.patch( 'datadog_checks.disk.disk.get_subprocess_output', return_value=mock_df_output('freebsd-df-Tk'), __name__='get_subprocess_output', ) with mock_statvfs, mock_output: c.check(instance) for name, value in iteritems(gauge_metrics): aggregator.assert_metric(name, value=value, tags=['device:zroot']) aggregator.assert_all_metrics_covered()
def test_no_psutil_debian(aggregator, gauge_metrics): instance = { 'use_mount': 'no', 'excluded_filesystems': ['tmpfs'], 'tag_by_label': False } c = Disk('disk', None, {}, [instance]) # disable psutil c._psutil = lambda: False mock_statvfs = mock.patch('os.statvfs', return_value=MockInodesMetrics(), __name__='statvfs') mock_output = mock.patch( 'datadog_checks.disk.disk.get_subprocess_output', return_value=mock_df_output('debian-df-Tk'), __name__='get_subprocess_output', ) with mock_statvfs, mock_output: c.check(instance) for name, value in iteritems(gauge_metrics): aggregator.assert_metric( name, value=value, tags=['device:{}'.format(DEFAULT_DEVICE_NAME)]) # backward compatibility with the old check aggregator.assert_metric(name, tags=['device:udev']) aggregator.assert_all_metrics_covered()
def test_no_psutil_centos(aggregator, gauge_metrics): instance = { 'use_mount': 'no', 'excluded_filesystems': ['devfs', 'tmpfs'], 'excluded_disks': ['/dev/sda1'], 'tag_by_label': False, } c = Disk('disk', None, {}, [instance]) # disable psutil c._psutil = lambda: False mock_statvfs = mock.patch('os.statvfs', return_value=MockInodesMetrics(), __name__='statvfs') mock_output = mock.patch( 'datadog_checks.disk.disk.get_subprocess_output', return_value=mock_df_output('centos-df-Tk'), __name__='get_subprocess_output', ) with mock_statvfs, mock_output: c.check(instance) for device in ['/dev/sda3', '10.1.5.223:/vil/cor']: for name in gauge_metrics: aggregator.assert_metric(name, tags=['device:{}'.format(device)]) aggregator.assert_all_metrics_covered()
def test_default(aggregator, gauge_metrics, rate_metrics): """ Mock psutil and run the check """ for tag_by in ['true', 'false']: instance = {'tag_by_filesystem': tag_by, 'tag_by_label': False} c = Disk('disk', {}, [instance]) c.check(instance) if tag_by == 'true': tags = [ DEFAULT_FILE_SYSTEM, 'filesystem:{}'.format(DEFAULT_FILE_SYSTEM), 'device:{}'.format(DEFAULT_DEVICE_NAME), 'device_name:{}'.format(DEFAULT_DEVICE_BASE_NAME), ] else: tags = [] for name, value in iteritems(gauge_metrics): aggregator.assert_metric(name, value=value, tags=tags) for name, value in iteritems(rate_metrics): aggregator.assert_metric( name, value=value, tags=[ 'device:{}'.format(DEFAULT_DEVICE_NAME), 'device_name:{}'.format(DEFAULT_DEVICE_BASE_NAME) ], ) aggregator.assert_all_metrics_covered()
def test_timeout_warning(aggregator, gauge_metrics, rate_metrics, count_metrics): """Test a warning is raised when there is a Timeout exception.""" # Raise exception for "/faulty" mountpoint def faulty_timeout(fun): def f(mountpoint): if mountpoint == "/faulty": raise TimeoutException else: return fun(mountpoint) return f c = Disk('disk', {}, [{}]) c.log = mock.MagicMock() m = MockDiskMetrics() m.total = 0 with mock.patch('psutil.disk_partitions', return_value=[MockPart(), MockPart(mountpoint="/faulty")]), mock.patch( 'psutil.disk_usage', return_value=m, __name__='disk_usage' ), mock.patch('datadog_checks.disk.disk.timeout', return_value=faulty_timeout): c.check({}) # Check that the warning is called once for the faulty disk c.log.warning.assert_called_once() for name in gauge_metrics: aggregator.assert_metric(name, count=0) for name in chain(rate_metrics, count_metrics): aggregator.assert_metric_has_tag(name, 'device:{}'.format(DEFAULT_DEVICE_NAME)) aggregator.assert_metric_has_tag(name, 'device_name:{}'.format(DEFAULT_DEVICE_BASE_NAME)) aggregator.assert_all_metrics_covered()
def test_device_tagging(aggregator, psutil_mocks): instances = [{ 'use_mount': 'no', 'device_tag_re': { "/dev/sda.*": "type:dev,tag:two" }, 'tags': ["optional:tags1"] }] c = Disk('disk', None, {}, instances) c.check(instances[0]) # Assert metrics tags = [ "type:dev", "tag:two", "device:{}".format(DEFAULT_DEVICE_NAME), "optional:tags1" ] for name, value in GAUGES_VALUES.iteritems(): aggregator.assert_metric(name, value=value, tags=tags) for name, value in RATES_VALUES.iteritems(): aggregator.assert_metric( name, value=value, tags=['device:{}'.format(DEFAULT_DEVICE_NAME), "optional:tags1"]) assert aggregator.metrics_asserted_pct == 100.0
def test_use_mount(aggregator, instance_basic_mount, gauge_metrics, rate_metrics): """ Same as above, using mount to tag """ c = Disk('disk', {}, [instance_basic_mount]) c.check(instance_basic_mount) for name, value in iteritems(gauge_metrics): aggregator.assert_metric( name, value=value, tags=[ 'device:{}'.format(DEFAULT_MOUNT_POINT), 'device_name:{}'.format(DEFAULT_DEVICE_BASE_NAME) ], ) for name, value in iteritems(rate_metrics): aggregator.assert_metric( name, value=value, tags=[ 'device:{}'.format(DEFAULT_DEVICE_NAME), 'device_name:{}'.format(DEFAULT_DEVICE_BASE_NAME) ], ) aggregator.assert_all_metrics_covered()
def test_psutil_rw(aggregator, psutil_mocks): """ Check for 'ro' option in the mounts """ instances = [{'service_check_rw': 'yes'}] c = Disk('disk', None, {}, instances) c.check(instances[0]) aggregator.assert_service_check('disk.read_write', status=Disk.CRITICAL)
def test_rw(aggregator): """ Check for 'ro' option in the mounts """ instance = {'service_check_rw': 'yes', 'tag_by_label': False} c = Disk('disk', {}, [instance]) c.check(instance) aggregator.assert_service_check('disk.read_write', status=Disk.CRITICAL)
def test_psutil_rw(aggregator): """ Check for 'ro' option in the mounts """ instance = {'service_check_rw': 'yes'} c = Disk('disk', None, {}, [instance]) c.check(instance) aggregator.assert_service_check('disk.read_write', status=Disk.CRITICAL)
def test_disk_check(aggregator): """ Basic check to see if all metrics are there """ c = Disk('disk', None, {}, [{'use_mount': 'no'}]) c.check({'use_mount': 'no'}) for name in DISK_GAUGES + INODE_GAUGES + DISK_RATES: aggregator.assert_metric(name, tags=[]) assert aggregator.metrics_asserted_pct == 100.0
def test_labels_from_blkid_cache_file(aggregator, instance_blkid_cache_file, gauge_metrics, rate_metrics): """ Verify that the disk labels are set when the blkid_cache_file option is set """ c = Disk('disk', {}, [instance_blkid_cache_file]) c.check(instance_blkid_cache_file) for metric in chain(gauge_metrics, rate_metrics): aggregator.assert_metric(metric, tags=['device:/dev/sda1', 'label:MYLABEL'])
def test_blkid_cache_file_contains_no_labels( aggregator, instance_blkid_cache_file_no_label, gauge_metrics, rate_metrics): """ Verify that the disk labels are ignored if the cache file doesn't contain any """ c = Disk('disk', {}, [instance_blkid_cache_file_no_label]) c.check(instance_blkid_cache_file_no_label) for metric in chain(gauge_metrics, rate_metrics): aggregator.assert_metric(metric, tags=['device:/dev/sda1'])
def test_check(aggregator, instance_basic_volume, gauge_metrics, rate_metrics): """ Basic check to see if all metrics are there """ c = Disk('disk', {}, [instance_basic_volume]) c.check(instance_basic_volume) for name in chain(gauge_metrics, rate_metrics): aggregator.assert_metric(name) aggregator.assert_all_metrics_covered()
def test_include_all_devices(aggregator, gauge_metrics, rate_metrics): c = Disk('disk', {}, [{}]) with mock.patch('psutil.disk_partitions', return_value=[]) as m: c.check({}) # By default, we include all devices m.assert_called_with(all=True) instance = {'include_all_devices': False} c = Disk('disk', {}, [instance]) with mock.patch('psutil.disk_partitions', return_value=[]) as m: c.check({}) m.assert_called_with(all=False)
def test_use_mount(aggregator, psutil_mocks): """ Same as above, using mount to tag """ instances = [{'use_mount': 'yes'}] c = Disk('disk', None, {}, instances) c.check(instances[0]) for name, value in GAUGES_VALUES.iteritems(): aggregator.assert_metric(name, value=value, tags=['device:/']) for name, value in RATES_VALUES.iteritems(): aggregator.assert_metric(name, value=value, tags=['device:{}'.format(DEFAULT_DEVICE_NAME)]) assert aggregator.metrics_asserted_pct == 100.0
def test_no_psutil_freebsd(aggregator): p1 = mock.patch('os.statvfs', return_value=MockInodesMetrics(), __name__="statvfs") p2 = mock.patch('datadog_checks.disk.disk.get_subprocess_output', return_value=mock_df_output('freebsd-df-Tk'), __name__="get_subprocess_output") p1.start() p2.start() instances = [{'use_mount': 'no', 'excluded_filesystems': ['devfs'], 'excluded_disk_re': 'zroot/.+'}] c = Disk('disk', None, {}, instances) c._psutil = lambda: False # disable psutil c.check(instances[0]) for name, value in GAUGES_VALUES.iteritems(): aggregator.assert_metric(name, value=value, tags=['device:zroot']) assert aggregator.metrics_asserted_pct == 100.0
def test_no_psutil_centos(aggregator): p1 = mock.patch('os.statvfs', return_value=MockInodesMetrics(), __name__="statvfs") p2 = mock.patch('datadog_checks.disk.disk.get_subprocess_output', return_value=mock_df_output('centos-df-Tk'), __name__="get_subprocess_output") p1.start() p2.start() instances = [{'use_mount': 'no', 'excluded_filesystems': ['devfs', 'tmpfs'], 'excluded_disks': ['/dev/sda1']}] c = Disk('disk', None, {}, instances) c._psutil = lambda: False # disable psutil c.check(instances[0]) for device in ['/dev/sda3', '10.1.5.223:/vil/cor']: for name, _ in GAUGES_VALUES.iteritems(): aggregator.assert_metric(name, tags=['device:{}'.format(device)]) assert aggregator.metrics_asserted_pct == 100.0
def test_no_psutil_debian(aggregator): p1 = mock.patch('os.statvfs', return_value=MockInodesMetrics(), __name__="statvfs") p2 = mock.patch('datadog_checks.disk.disk.get_subprocess_output', return_value=mock_df_output('debian-df-Tk'), __name__="get_subprocess_output") p1.start() p2.start() instances = [{'use_mount': 'no', 'excluded_filesystems': ['tmpfs']}] c = Disk('disk', None, {}, instances) c._psutil = lambda: False # disable psutil c.check(instances[0]) for name, value in GAUGES_VALUES.iteritems(): aggregator.assert_metric(name, value=value, tags=['device:{}'.format(DEFAULT_DEVICE_NAME)]) # backward compatibility with the old check aggregator.assert_metric(name, tags=['device:udev']) assert aggregator.metrics_asserted_pct == 100.0
def test_psutil(aggregator, psutil_mocks): """ Mock psutil and run the check """ for tag_by in ['yes', 'no']: instances = [{'tag_by_filesystem': tag_by}] c = Disk('disk', None, {}, instances) c.check(instances[0]) tags = ['ext4', 'filesystem:ext4', 'device:{}'.format(DEFAULT_DEVICE_NAME)] if tag_by == 'yes' else [] for name, value in GAUGES_VALUES.iteritems(): aggregator.assert_metric(name, value=value, tags=tags) for name, value in RATES_VALUES.iteritems(): aggregator.assert_metric(name, value=value, tags=['device:{}'.format(DEFAULT_DEVICE_NAME)]) assert aggregator.metrics_asserted_pct == 100.0
def test_min_disk_size(aggregator, gauge_metrics, rate_metrics): instance = {'min_disk_size': 0.001} c = Disk('disk', {}, [instance]) m = MockDiskMetrics() m.total = 0 with mock.patch('psutil.disk_usage', return_value=m, __name__='disk_usage'): c.check(instance) for name in gauge_metrics: aggregator.assert_metric(name, count=0) for name in rate_metrics: aggregator.assert_metric_has_tag( name, 'device:{}'.format(DEFAULT_DEVICE_NAME)) aggregator.assert_all_metrics_covered()
def test_timeout_config(aggregator): """Test timeout configuration value is used on every timeout on the check.""" # Arbitrary value TIMEOUT_VALUE = 42 instance = {'timeout': TIMEOUT_VALUE} c = Disk('disk', {}, [instance]) # Mock timeout version def no_timeout(fun): return lambda *args: fun(args) with mock.patch('psutil.disk_partitions', return_value=[MockPart()]), mock.patch( 'datadog_checks.disk.disk.timeout', return_value=no_timeout ) as mock_timeout: c.check(instance) mock_timeout.assert_called_with(TIMEOUT_VALUE)
def test_disk_basic(disk_io_counters, disk_usage, disk_partitions): from datadog_checks.disk import Disk # delayed import for good patching disk_partitions.return_value = MOCK_PARTITIONS aggregator = MetricsAggregator( HOSTNAME, interval=1.0, histogram_aggregates=None, histogram_percentiles=None, ) total_gauges, expected_gauges = generate_expected_gauges() total_rates, expected_rates = generate_expected_rates() c = Disk("disk", {}, {}, aggregator) c.check({}) metrics = c.aggregator.flush( )[:-1] # we remove the datadog.agent.running metric assert len(metrics) == total_gauges time.sleep(1) c.check({}) metrics = c.aggregator.flush( )[:-1] # we remove the datadog.agent.running metric assert len(metrics) == (total_gauges + total_rates) for metric in metrics: assert metric['metric'] in expected_gauges or metric[ 'metric'] in expected_rates assert len(metric['points']) == 1 assert metric['host'] == HOSTNAME assert metric['type'] == GAUGE assert is_metric_expected(expected_gauges, metric) or is_metric_expected( expected_rates, metric)