Example #1
0
def test_device_tagging(aggregator, gauge_metrics, rate_metrics):
    instance = {
        'use_mount': 'no',
        'device_tag_re': {
            '{}.*'.format(DEFAULT_DEVICE_NAME[:-1]): 'type:dev,tag:two'
        },
        'tags': ['optional:tags1']
    }
    c = Disk('disk', None, {}, [instance])
    c.check(instance)

    # Assert metrics
    tags = [
        'type:dev', 'tag:two', 'device:{}'.format(DEFAULT_DEVICE_NAME),
        'optional:tags1'
    ]
    for name, value in iteritems(gauge_metrics):
        aggregator.assert_metric(name, value=value, tags=tags)

    for name, value in iteritems(rate_metrics):
        aggregator.assert_metric(
            name,
            value=value,
            tags=['device:{}'.format(DEFAULT_DEVICE_NAME), 'optional:tags1'])

    aggregator.assert_all_metrics_covered()
Example #2
0
def test_device_tagging(aggregator, psutil_mocks):
    instances = [{
        'use_mount': 'no',
        'device_tag_re': {
            "/dev/sda.*": "type:dev,tag:two"
        },
        'tags': ["optional:tags1"]
    }]
    c = Disk('disk', None, {}, instances)
    c.check(instances[0])

    # Assert metrics
    tags = [
        "type:dev", "tag:two", "device:{}".format(DEFAULT_DEVICE_NAME),
        "optional:tags1"
    ]
    for name, value in GAUGES_VALUES.iteritems():
        aggregator.assert_metric(name, value=value, tags=tags)

    for name, value in RATES_VALUES.iteritems():
        aggregator.assert_metric(
            name,
            value=value,
            tags=['device:{}'.format(DEFAULT_DEVICE_NAME), "optional:tags1"])

    assert aggregator.metrics_asserted_pct == 100.0
Example #3
0
def test_no_psutil_debian(aggregator, gauge_metrics):
    instance = {
        'use_mount': 'no',
        'excluded_filesystems': ['tmpfs'],
        'tag_by_label': False
    }
    c = Disk('disk', None, {}, [instance])
    # disable psutil
    c._psutil = lambda: False

    mock_statvfs = mock.patch('os.statvfs',
                              return_value=MockInodesMetrics(),
                              __name__='statvfs')
    mock_output = mock.patch(
        'datadog_checks.disk.disk.get_subprocess_output',
        return_value=mock_df_output('debian-df-Tk'),
        __name__='get_subprocess_output',
    )

    with mock_statvfs, mock_output:
        c.check(instance)

    for name, value in iteritems(gauge_metrics):
        aggregator.assert_metric(
            name, value=value, tags=['device:{}'.format(DEFAULT_DEVICE_NAME)])
        # backward compatibility with the old check
        aggregator.assert_metric(name, tags=['device:udev'])

    aggregator.assert_all_metrics_covered()
Example #4
0
def test_device_tagging(aggregator, gauge_metrics, rate_metrics):
    instance = {
        'use_mount': 'no',
        'device_tag_re': {
            '{}.*'.format(DEFAULT_DEVICE_NAME[:-1]): 'type:dev,tag:two'
        },
        'tags': ['optional:tags1'],
        'tag_by_label': False,
    }
    c = Disk('disk', None, {}, [instance])

    with mock.patch('datadog_checks.disk.disk.Disk._get_devices_label'):
        # _get_devices_label is only called on linux, so devices_label is manually filled
        # to make the test run on everything
        c.devices_label = {DEFAULT_DEVICE_NAME: 'label:mylab'}
        c.check(instance)

    # Assert metrics
    tags = [
        'type:dev', 'tag:two', 'device:{}'.format(DEFAULT_DEVICE_NAME),
        'optional:tags1', 'label:mylab'
    ]

    for name, value in iteritems(gauge_metrics):
        aggregator.assert_metric(name, value=value, tags=tags)

    for name, value in iteritems(rate_metrics):
        aggregator.assert_metric(name,
                                 value=value,
                                 tags=[
                                     'device:{}'.format(DEFAULT_DEVICE_NAME),
                                     'optional:tags1', 'label:mylab'
                                 ])

    aggregator.assert_all_metrics_covered()
Example #5
0
def test_no_psutil_freebsd(aggregator, gauge_metrics):
    instance = {
        'use_mount': 'no',
        'excluded_filesystems': ['devfs'],
        'excluded_disk_re': 'zroot/.+',
        'tag_by_label': False,
    }
    c = Disk('disk', None, {}, [instance])
    # disable psutil
    c._psutil = lambda: False

    mock_statvfs = mock.patch('os.statvfs',
                              return_value=MockInodesMetrics(),
                              __name__='statvfs')
    mock_output = mock.patch(
        'datadog_checks.disk.disk.get_subprocess_output',
        return_value=mock_df_output('freebsd-df-Tk'),
        __name__='get_subprocess_output',
    )

    with mock_statvfs, mock_output:
        c.check(instance)

    for name, value in iteritems(gauge_metrics):
        aggregator.assert_metric(name, value=value, tags=['device:zroot'])

    aggregator.assert_all_metrics_covered()
Example #6
0
def test_no_psutil_centos(aggregator, gauge_metrics):
    instance = {
        'use_mount': 'no',
        'excluded_filesystems': ['devfs', 'tmpfs'],
        'excluded_disks': ['/dev/sda1'],
        'tag_by_label': False,
    }
    c = Disk('disk', None, {}, [instance])
    # disable psutil
    c._psutil = lambda: False

    mock_statvfs = mock.patch('os.statvfs',
                              return_value=MockInodesMetrics(),
                              __name__='statvfs')
    mock_output = mock.patch(
        'datadog_checks.disk.disk.get_subprocess_output',
        return_value=mock_df_output('centos-df-Tk'),
        __name__='get_subprocess_output',
    )

    with mock_statvfs, mock_output:
        c.check(instance)

    for device in ['/dev/sda3', '10.1.5.223:/vil/cor']:
        for name in gauge_metrics:
            aggregator.assert_metric(name, tags=['device:{}'.format(device)])

    aggregator.assert_all_metrics_covered()
Example #7
0
def test_default(aggregator, gauge_metrics, rate_metrics):
    """
    Mock psutil and run the check
    """
    for tag_by in ['true', 'false']:
        instance = {'tag_by_filesystem': tag_by, 'tag_by_label': False}
        c = Disk('disk', {}, [instance])
        c.check(instance)

        if tag_by == 'true':
            tags = [
                DEFAULT_FILE_SYSTEM,
                'filesystem:{}'.format(DEFAULT_FILE_SYSTEM),
                'device:{}'.format(DEFAULT_DEVICE_NAME),
                'device_name:{}'.format(DEFAULT_DEVICE_BASE_NAME),
            ]
        else:
            tags = []

        for name, value in iteritems(gauge_metrics):
            aggregator.assert_metric(name, value=value, tags=tags)

        for name, value in iteritems(rate_metrics):
            aggregator.assert_metric(
                name,
                value=value,
                tags=[
                    'device:{}'.format(DEFAULT_DEVICE_NAME),
                    'device_name:{}'.format(DEFAULT_DEVICE_BASE_NAME)
                ],
            )

    aggregator.assert_all_metrics_covered()
Example #8
0
def test_timeout_warning(aggregator, gauge_metrics, rate_metrics, count_metrics):
    """Test a warning is raised when there is a Timeout exception."""

    # Raise exception for "/faulty" mountpoint
    def faulty_timeout(fun):
        def f(mountpoint):
            if mountpoint == "/faulty":
                raise TimeoutException
            else:
                return fun(mountpoint)

        return f

    c = Disk('disk', {}, [{}])
    c.log = mock.MagicMock()
    m = MockDiskMetrics()
    m.total = 0

    with mock.patch('psutil.disk_partitions', return_value=[MockPart(), MockPart(mountpoint="/faulty")]), mock.patch(
        'psutil.disk_usage', return_value=m, __name__='disk_usage'
    ), mock.patch('datadog_checks.disk.disk.timeout', return_value=faulty_timeout):
        c.check({})

    # Check that the warning is called once for the faulty disk
    c.log.warning.assert_called_once()

    for name in gauge_metrics:
        aggregator.assert_metric(name, count=0)

    for name in chain(rate_metrics, count_metrics):
        aggregator.assert_metric_has_tag(name, 'device:{}'.format(DEFAULT_DEVICE_NAME))
        aggregator.assert_metric_has_tag(name, 'device_name:{}'.format(DEFAULT_DEVICE_BASE_NAME))

    aggregator.assert_all_metrics_covered()
Example #9
0
def test_use_mount(aggregator, instance_basic_mount, gauge_metrics,
                   rate_metrics):
    """
    Same as above, using mount to tag
    """
    c = Disk('disk', {}, [instance_basic_mount])
    c.check(instance_basic_mount)

    for name, value in iteritems(gauge_metrics):
        aggregator.assert_metric(
            name,
            value=value,
            tags=[
                'device:{}'.format(DEFAULT_MOUNT_POINT),
                'device_name:{}'.format(DEFAULT_DEVICE_BASE_NAME)
            ],
        )

    for name, value in iteritems(rate_metrics):
        aggregator.assert_metric(
            name,
            value=value,
            tags=[
                'device:{}'.format(DEFAULT_DEVICE_NAME),
                'device_name:{}'.format(DEFAULT_DEVICE_BASE_NAME)
            ],
        )

    aggregator.assert_all_metrics_covered()
Example #10
0
def test_legacy_option():
    """
    Ensure check option overrides datadog.conf
    """
    c = Disk('disk', None, {'use_mount': 'yes'}, [{}])
    assert c._use_mount is True

    c = Disk('disk', None, {'use_mount': 'yes'}, [{'use_mount': 'no'}])
    assert c._use_mount is False
Example #11
0
def test_legacy_option(instance_basic_mount, instance_basic_volume):
    """
    Ensure check option overrides datadog.conf
    """
    c = Disk('disk', None, instance_basic_mount, [{}])
    assert c._use_mount is True

    c = Disk('disk', None, instance_basic_mount, [instance_basic_volume])
    assert c._use_mount is False
Example #12
0
def test_psutil_rw(aggregator):
    """
    Check for 'ro' option in the mounts
    """
    instance = {'service_check_rw': 'yes'}
    c = Disk('disk', None, {}, [instance])
    c.check(instance)

    aggregator.assert_service_check('disk.read_write', status=Disk.CRITICAL)
Example #13
0
def test_psutil_rw(aggregator, psutil_mocks):
    """
    Check for 'ro' option in the mounts
    """
    instances = [{'service_check_rw': 'yes'}]
    c = Disk('disk', None, {}, instances)
    c.check(instances[0])

    aggregator.assert_service_check('disk.read_write', status=Disk.CRITICAL)
Example #14
0
def test_rw(aggregator):
    """
    Check for 'ro' option in the mounts
    """
    instance = {'service_check_rw': 'yes', 'tag_by_label': False}
    c = Disk('disk', {}, [instance])
    c.check(instance)

    aggregator.assert_service_check('disk.read_write', status=Disk.CRITICAL)
Example #15
0
def test_get_devices_label():
    c = Disk('disk', None, {}, [{}])

    with mock.patch(
            "datadog_checks.disk.disk.get_subprocess_output",
            return_value=mock_blkid_output(),
            __name__='get_subprocess_output',
    ):
        labels = c._get_devices_label()
        assert labels.get("/dev/mapper/vagrant--vg-root") == "label:DATA"
Example #16
0
def test_labels_from_blkid_cache_file(aggregator, instance_blkid_cache_file,
                                      gauge_metrics, rate_metrics):
    """
    Verify that the disk labels are set when the blkid_cache_file option is set
    """
    c = Disk('disk', {}, [instance_blkid_cache_file])
    c.check(instance_blkid_cache_file)
    for metric in chain(gauge_metrics, rate_metrics):
        aggregator.assert_metric(metric,
                                 tags=['device:/dev/sda1', 'label:MYLABEL'])
Example #17
0
def test_disk_check(aggregator):
    """
    Basic check to see if all metrics are there
    """
    c = Disk('disk', None, {}, [{'use_mount': 'no'}])
    c.check({'use_mount': 'no'})
    for name in DISK_GAUGES + INODE_GAUGES + DISK_RATES:
        aggregator.assert_metric(name, tags=[])

    assert aggregator.metrics_asserted_pct == 100.0
Example #18
0
def test_blkid_cache_file_contains_no_labels(
        aggregator, instance_blkid_cache_file_no_label, gauge_metrics,
        rate_metrics):
    """
    Verify that the disk labels are ignored if the cache file doesn't contain any
    """
    c = Disk('disk', {}, [instance_blkid_cache_file_no_label])
    c.check(instance_blkid_cache_file_no_label)
    for metric in chain(gauge_metrics, rate_metrics):
        aggregator.assert_metric(metric, tags=['device:/dev/sda1'])
Example #19
0
def test_check(aggregator, instance_basic_volume, gauge_metrics, rate_metrics):
    """
    Basic check to see if all metrics are there
    """
    c = Disk('disk', {}, [instance_basic_volume])
    c.check(instance_basic_volume)

    for name in chain(gauge_metrics, rate_metrics):
        aggregator.assert_metric(name)

    aggregator.assert_all_metrics_covered()
def test_device_exclusion_logic_no_name():
    """
    Same as above but with default configuration values and device='' to expose a bug in #2359
    """
    instances = [{
        'use_mount': 'yes',
        'excluded_mountpoint_re': '^/run$',
        'all_partitions': 'yes'
    }]
    c = Disk('disk', None, {}, instances)

    assert c._exclude_disk_psutil(MockPart(device='', mountpoint='/run')) is True
    assert c._exclude_disk_psutil(MockPart(device='', mountpoint='/run/shm')) is False
Example #21
0
def test_include_all_devices(aggregator, gauge_metrics, rate_metrics,
                             dd_run_check):
    c = Disk('disk', {}, [{}])

    with mock.patch('psutil.disk_partitions', return_value=[]) as m:
        dd_run_check(c)
        # By default, we include all devices
        m.assert_called_with(all=True)

    instance = {'include_all_devices': False}
    c = Disk('disk', {}, [instance])

    with mock.patch('psutil.disk_partitions', return_value=[]) as m:
        dd_run_check(c)
        m.assert_called_with(all=False)
def test_use_mount(aggregator, psutil_mocks):
    """
    Same as above, using mount to tag
    """
    instances = [{'use_mount': 'yes'}]
    c = Disk('disk', None, {}, instances)
    c.check(instances[0])

    for name, value in GAUGES_VALUES.iteritems():
        aggregator.assert_metric(name, value=value, tags=['device:/'])

    for name, value in RATES_VALUES.iteritems():
        aggregator.assert_metric(name, value=value, tags=['device:{}'.format(DEFAULT_DEVICE_NAME)])

    assert aggregator.metrics_asserted_pct == 100.0
def test_no_psutil_freebsd(aggregator):
    p1 = mock.patch('os.statvfs', return_value=MockInodesMetrics(), __name__="statvfs")
    p2 = mock.patch('datadog_checks.disk.disk.get_subprocess_output',
                    return_value=mock_df_output('freebsd-df-Tk'), __name__="get_subprocess_output")
    p1.start()
    p2.start()

    instances = [{'use_mount': 'no', 'excluded_filesystems': ['devfs'], 'excluded_disk_re': 'zroot/.+'}]
    c = Disk('disk', None, {}, instances)
    c._psutil = lambda: False  # disable psutil
    c.check(instances[0])

    for name, value in GAUGES_VALUES.iteritems():
        aggregator.assert_metric(name, value=value, tags=['device:zroot'])
    assert aggregator.metrics_asserted_pct == 100.0
Example #24
0
def test_min_disk_size(aggregator, gauge_metrics, rate_metrics, count_metrics,
                       dd_run_check):
    instance = {'min_disk_size': 0.001}
    c = Disk('disk', {}, [instance])

    m = MockDiskMetrics()
    m.total = 0
    with mock.patch('psutil.disk_usage', return_value=m,
                    __name__='disk_usage'):
        dd_run_check(c)

    for name in gauge_metrics:
        aggregator.assert_metric(name, count=0)

    for name in rate_metrics:
        aggregator.assert_metric_has_tag(
            name, 'device:{}'.format(DEFAULT_DEVICE_NAME))
        aggregator.assert_metric_has_tag(
            name, 'device_name:{}'.format(DEFAULT_DEVICE_BASE_NAME))

    for name in count_metrics:
        aggregator.assert_metric_has_tag(
            name, 'device:{}'.format(DEFAULT_DEVICE_NAME))
        aggregator.assert_metric_has_tag(
            name, 'device_name:{}'.format(DEFAULT_DEVICE_BASE_NAME))

    aggregator.assert_all_metrics_covered()
Example #25
0
def test_ignore_empty_regex():
    """
    Ignore empty regex as they match all strings
    (and so exclude all disks from the check)
    """
    check = Disk('disk', None, {'device_blacklist_re': ''}, [{}])
    assert check._excluded_disk_re == re.compile('^$')
Example #26
0
def test_bad_config():
    """
    Check creation will fail if more than one `instance` is passed to the
    constructor
    """
    with pytest.raises(Exception):
        Disk('disk', {}, [{}, {}])
def test_no_psutil_centos(aggregator):
    p1 = mock.patch('os.statvfs', return_value=MockInodesMetrics(), __name__="statvfs")
    p2 = mock.patch('datadog_checks.disk.disk.get_subprocess_output',
                    return_value=mock_df_output('centos-df-Tk'), __name__="get_subprocess_output")
    p1.start()
    p2.start()

    instances = [{'use_mount': 'no', 'excluded_filesystems': ['devfs', 'tmpfs'], 'excluded_disks': ['/dev/sda1']}]
    c = Disk('disk', None, {}, instances)
    c._psutil = lambda: False  # disable psutil
    c.check(instances[0])

    for device in ['/dev/sda3', '10.1.5.223:/vil/cor']:
        for name, _ in GAUGES_VALUES.iteritems():
            aggregator.assert_metric(name, tags=['device:{}'.format(device)])
    assert aggregator.metrics_asserted_pct == 100.0
def test_no_psutil_debian(aggregator):
    p1 = mock.patch('os.statvfs', return_value=MockInodesMetrics(), __name__="statvfs")
    p2 = mock.patch('datadog_checks.disk.disk.get_subprocess_output',
                    return_value=mock_df_output('debian-df-Tk'), __name__="get_subprocess_output")
    p1.start()
    p2.start()

    instances = [{'use_mount': 'no', 'excluded_filesystems': ['tmpfs']}]
    c = Disk('disk', None, {}, instances)
    c._psutil = lambda: False  # disable psutil
    c.check(instances[0])

    for name, value in GAUGES_VALUES.iteritems():
        aggregator.assert_metric(name, value=value, tags=['device:{}'.format(DEFAULT_DEVICE_NAME)])
        # backward compatibility with the old check
        aggregator.assert_metric(name, tags=['device:udev'])
    assert aggregator.metrics_asserted_pct == 100.0
Example #29
0
def test_get_devices_label_from_lsblk():
    """
    Test lsblk output parsing.
    """
    c = Disk('disk', {}, [{}])

    with mock.patch(
            "datadog_checks.disk.disk.get_subprocess_output",
            return_value=mock_lsblk_output(),
            __name__='get_subprocess_output',
    ):
        labels = c._get_devices_label_from_lsblk()

    assert labels == {
        "/dev/sda1": ["label:MYLABEL", "device_label:MYLABEL"],
        "/dev/sda15": ["label: WITH SPACES ", "device_label: WITH SPACES "],
    }
def test_psutil(aggregator, psutil_mocks):
    """
    Mock psutil and run the check
    """
    for tag_by in ['yes', 'no']:
        instances = [{'tag_by_filesystem': tag_by}]
        c = Disk('disk', None, {}, instances)
        c.check(instances[0])

        tags = ['ext4', 'filesystem:ext4', 'device:{}'.format(DEFAULT_DEVICE_NAME)] if tag_by == 'yes' else []

        for name, value in GAUGES_VALUES.iteritems():
            aggregator.assert_metric(name, value=value, tags=tags)

        for name, value in RATES_VALUES.iteritems():
            aggregator.assert_metric(name, value=value, tags=['device:{}'.format(DEFAULT_DEVICE_NAME)])

    assert aggregator.metrics_asserted_pct == 100.0