def test_ad_cache(aggregator, dd_run_check):
    config = {
        'instances': [{
            'name': 'python',
            'search_string': ['python'],
            'ignore_denied_access': 'false'
        }]
    }
    process = ProcessCheck(common.CHECK_NAME, {}, config['instances'])

    def deny_name(obj):
        raise psutil.AccessDenied()

    with patch.object(psutil.Process, 'name', deny_name):
        with pytest.raises(Exception):
            dd_run_check(process)

    assert len(process.ad_cache) > 0

    # The next run shouldn't throw an exception
    dd_run_check(process)
    # The ad cache should still be valid
    assert process.should_refresh_ad_cache('python') is False

    # Reset caches
    process.last_ad_cache_ts = {}
    process.last_pid_cache_ts = {}

    # Shouldn't throw an exception
    dd_run_check(process)
Exemple #2
0
def test_check_collect_children(mock_process, aggregator):
    instance = {'name': 'foo', 'pid': 1, 'collect_children': True}
    process = ProcessCheck(common.CHECK_NAME, {}, {})
    process.check(instance)
    aggregator.assert_metric('system.processes.number',
                             value=1,
                             tags=generate_expected_tags(instance))
Exemple #3
0
def test_check_missing_pid(aggregator):
    instance = {'name': 'foo', 'pid_file': '/foo/bar/baz'}
    process = ProcessCheck(common.CHECK_NAME, {}, {})
    process.check(instance)
    aggregator.assert_service_check('process.up',
                                    count=1,
                                    status=process.CRITICAL)
Exemple #4
0
def test_check_filter_user(mock_process, aggregator):
    instance = {'name': 'foo', 'pid': 1, 'user': '******'}
    process = ProcessCheck(common.CHECK_NAME, {}, {})
    with patch('datadog_checks.process.ProcessCheck._filter_by_user', return_value={1, 2}):
        process.check(instance)

    aggregator.assert_metric('system.processes.number', value=2, tags=generate_expected_tags(instance))
def test_check_missing_process(aggregator, caplog):
    caplog.set_level(logging.DEBUG)
    instance = {'name': 'foo', 'search_string': ['fooprocess', '/usr/bin/foo'], 'exact_match': False}
    process = ProcessCheck(common.CHECK_NAME, {}, {})
    process.check(instance)
    aggregator.assert_service_check('process.up', count=1, status=process.CRITICAL)
    assert "Unable to find process named ['fooprocess', '/usr/bin/foo'] among processes" in caplog.text
Exemple #6
0
def test_psutil_wrapper_accessors(aggregator):
    # Load check with empty config
    process = ProcessCheck(common.CHECK_NAME, {}, {})
    meminfo = process.psutil_wrapper(get_psutil_proc(), 'memory_info', ['rss', 'vms', 'foo'], False)
    assert 'rss' in meminfo
    assert 'vms' in meminfo
    assert 'foo' not in meminfo
def test_process_service_check(aggregator):
    process = ProcessCheck(common.CHECK_NAME, {}, {})

    process._process_service_check('warning', 3, {
        'warning': [4, 6],
        'critical': [2, 10]
    }, [])
    process._process_service_check('no_top_ok', 3, {
        'warning': [2, float('inf')],
        'critical': [2, float('inf')]
    }, [])
    process._process_service_check('no_top_critical', 0, {
        'warning': [2, float('inf')],
        'critical': [2, float('inf')]
    }, [])

    aggregator.assert_service_check('process.up',
                                    count=1,
                                    tags=['process:warning'],
                                    status=process.WARNING)
    aggregator.assert_service_check('process.up',
                                    count=1,
                                    tags=['process:no_top_ok'],
                                    status=process.OK)
    aggregator.assert_service_check('process.up',
                                    count=1,
                                    tags=['process:no_top_critical'],
                                    status=process.CRITICAL)
def test_check_collect_children(mock_process, reset_process_list_cache,
                                aggregator, dd_run_check):
    instance = {'name': 'foo', 'pid': 1, 'collect_children': True}
    process = ProcessCheck(common.CHECK_NAME, {}, [instance])
    dd_run_check(process)
    aggregator.assert_metric('system.processes.number',
                             value=1,
                             tags=generate_expected_tags(instance))
def test_process_list_cache(aggregator, dd_run_check):
    config = {
        'instances': [{
            'name': 'python',
            'search_string': ['python']
        }, {
            'name': 'python',
            'search_string': ['python']
        }]
    }
    process1 = ProcessCheck(common.CHECK_NAME, {}, [config['instances'][0]])
    process2 = ProcessCheck(common.CHECK_NAME, {}, [config['instances'][1]])

    with patch('datadog_checks.process.cache.ProcessListCache.reset'):
        dd_run_check(process1)
        dd_run_check(process2)

    # Should always succeed
    assert process1.process_list_cache.elements[0].name() == "Process 1"
    # Fails with 'assert "Process 2" == "Process 1"' if process list cache is not shared
    assert process2.process_list_cache.elements[0].name() == "Process 1"
Exemple #10
0
def test_check(mock_process, aggregator):
    (minflt, cminflt, majflt, cmajflt) = [1, 2, 3, 4]

    def mock_get_pagefault_stats(pid):
        return [minflt, cminflt, majflt, cmajflt]

    process = ProcessCheck(common.CHECK_NAME, {}, {})
    config = common.get_config_stubs()
    for idx in range(len(config)):
        instance = config[idx]['instance']
        if 'search_string' not in instance.keys():
            process.check(instance)
        else:
            with patch(
                'datadog_checks.process.ProcessCheck.find_pids',
                return_value=mock_find_pid(instance['name'], instance['search_string']),
            ):
                process.check(instance)

        # these are just here to ensure it passes the coverage report.
        # they don't really "test" for anything.
        for sname in common.PAGEFAULT_STAT:
            aggregator.assert_metric(
                'system.processes.mem.page_faults.' + sname, at_least=0, tags=generate_expected_tags(instance)
            )
def test_process_list_cache(aggregator):
    config = {
        'instances': [{'name': 'python', 'search_string': ['python']}, {'name': 'python', 'search_string': ['python']}]
    }
    process1 = ProcessCheck(common.CHECK_NAME, {}, {}, [config['instances'][0]])
    process2 = ProcessCheck(common.CHECK_NAME, {}, {}, [config['instances'][1]])

    process1.check(config['instances'][0])
    process2.check(config['instances'][1])

    # Should always succeed
    assert process1.process_list_cache.elements[0].name() == "Process 1"
    # Fails with 'assert "Process 2" == "Process 1"' if process list cache is not shared
    assert process2.process_list_cache.elements[0].name() == "Process 1"
def test_check_real_process_regex(aggregator, dd_run_check):
    "Check to specifically find this python pytest running process using regex."
    from datadog_checks.base.utils.platform import Platform

    instance = {
        'name': 'py',
        'search_string': ['.*python.*pytest'],
        'exact_match': False,
        'ignored_denied_access': True,
        'thresholds': {
            'warning': [1, 10],
            'critical': [1, 100]
        },
    }
    process = ProcessCheck(common.CHECK_NAME, {}, [instance])
    expected_tags = generate_expected_tags(instance)
    dd_run_check(process)
    for mname in common.PROCESS_METRIC:
        # cases where we don't actually expect some metrics here:
        #  - if io_counters() is not available
        #  - if memory_info_ex() is not available
        #  - first run so no `cpu.pct`
        if ((not _PSUTIL_IO_COUNTERS and '.io' in mname)
                or (not _PSUTIL_MEM_SHARED and 'mem.real' in mname)
                or mname == 'system.processes.cpu.pct'):
            continue

        if Platform.is_windows():
            metric = common.UNIX_TO_WINDOWS_MAP.get(mname, mname)
        else:
            metric = mname
        aggregator.assert_metric(metric, at_least=1, tags=expected_tags)

    aggregator.assert_service_check('process.up',
                                    count=1,
                                    tags=expected_tags + ['process:py'])

    # this requires another run
    dd_run_check(process)
    aggregator.assert_metric('system.processes.cpu.pct',
                             count=1,
                             tags=expected_tags)
    aggregator.assert_metric('system.processes.cpu.normalized_pct',
                             count=1,
                             tags=expected_tags)
def test_check(mock_process, reset_process_list_cache, aggregator,
               dd_run_check):
    config = common.get_config_stubs()
    for idx in range(len(config)):
        instance = config[idx]['instance']
        process = ProcessCheck(common.CHECK_NAME, {}, [instance])
        if 'search_string' not in instance.keys():
            dd_run_check(process)
        else:
            with patch(
                    'datadog_checks.process.ProcessCheck.find_pids',
                    return_value=mock_find_pid(instance['name'],
                                               instance['search_string']),
            ):
                dd_run_check(process)

        # these are just here to ensure it passes the coverage report.
        # they don't really "test" for anything.
        for sname in common.PAGEFAULT_STAT:
            aggregator.assert_metric('system.processes.mem.page_faults.' +
                                     sname,
                                     at_least=0,
                                     tags=generate_expected_tags(instance))
Exemple #14
0
def check():
    return ProcessCheck(common.CHECK_NAME, {}, {})
Exemple #15
0
def test_psutil_wrapper_simple_fail(aggregator):
    # Load check with empty config
    process = ProcessCheck(common.CHECK_NAME, {}, {})
    name = process.psutil_wrapper(get_psutil_proc(), 'blah', None, False)
    assert name is None
Exemple #16
0
def test_relocated_procfs(aggregator):
    import tempfile
    import shutil
    import uuid

    unique_process_name = str(uuid.uuid4())
    my_procfs = tempfile.mkdtemp()

    def _fake_procfs(arg, root=my_procfs):
        for key, val in iteritems(arg):
            path = os.path.join(root, key)
            if isinstance(val, dict):
                os.mkdir(path)
                _fake_procfs(val, path)
            else:
                with open(path, "w") as f:
                    f.write(str(val))

    _fake_procfs(
        {
            '1': {
                'status': ("Name:\t{}\nThreads:\t1\n").format(unique_process_name),
                'stat': ('1 ({}) S 0 1 1 ' + ' 0' * 46).format(unique_process_name),
                'statm': '10970 3014 2404 77 0 2242 0',
                'cmdline': unique_process_name,
                'fd': {},
                'io': (
                    'rchar: 397865373\n'
                    'wchar: 32186\n'
                    'syscr: 2695852\n'
                    'syscw: 202\n'
                    'read_bytes: 1208320\n'
                    'write_bytes: 0\n'
                    'cancelled_write_bytes: 0\n'
                ),
            },
            'stat': ("cpu  13034 0 18596 380856797 2013 2 2962 0 0 0\n" "btime 1448632481\n"),
        }
    )

    config = {
        'init_config': {'procfs_path': my_procfs},
        'instances': [
            {
                'name': 'moved_procfs',
                'search_string': [unique_process_name],
                'exact_match': False,
                'ignored_denied_access': True,
                'thresholds': {'warning': [1, 10], 'critical': [1, 100]},
            }
        ],
    }
    process = ProcessCheck(common.CHECK_NAME, config['init_config'], {}, config['instances'])

    try:
        with patch('socket.AF_PACKET', create=True), patch('sys.platform', 'linux'), patch(
            'psutil._psutil_linux', create=True
        ), patch('psutil._psutil_posix', create=True):
            process.check(config["instances"][0])
    finally:
        shutil.rmtree(my_procfs)
        psutil.PROCFS_PATH = '/proc'

    expected_tags = generate_expected_tags(config['instances'][0])
    expected_tags += ['process:moved_procfs']
    aggregator.assert_service_check('process.up', count=1, tags=expected_tags)
def test_psutil_wrapper_simple(aggregator):
    # Load check with empty config
    process = ProcessCheck(common.CHECK_NAME, {}, [{}])
    name = process.psutil_wrapper(get_psutil_proc(), 'name')
    assert name is not None
Exemple #18
0
def check():
    return ProcessCheck(common.CHECK_NAME, {}, [common.INSTANCE])
Exemple #19
0
def test_relocated_procfs(aggregator):
    from datadog_checks.utils.platform import Platform
    import tempfile
    import shutil
    import uuid

    already_linux = Platform.is_linux()
    unique_process_name = str(uuid.uuid4())
    my_procfs = tempfile.mkdtemp()

    def _fake_procfs(arg, root=my_procfs):
        for key, val in arg.iteritems():
            path = os.path.join(root, key)
            if isinstance(val, dict):
                os.mkdir(path)
                _fake_procfs(val, path)
            else:
                with open(path, "w") as f:
                    f.write(str(val))

    _fake_procfs({
        '1': {
            'status': ("Name:\t{}\nThreads:\t1\n").format(unique_process_name),
            'stat':
            ('1 ({}) S 0 1 1 ' + ' 0' * 46).format(unique_process_name),
            'cmdline': unique_process_name,
        },
        'stat': ("cpu  13034 0 18596 380856797 2013 2 2962 0 0 0\n"
                 "btime 1448632481\n"),
    })

    config = {
        'init_config': {
            'procfs_path': my_procfs
        },
        'instances': [{
            'name': 'moved_procfs',
            'search_string': [unique_process_name],
            'exact_match': False,
            'ignored_denied_access': True,
            'thresholds': {
                'warning': [1, 10],
                'critical': [1, 100]
            },
        }]
    }
    version = int(psutil.__version__.replace(".", ""))
    process = ProcessCheck(common.CHECK_NAME, config['init_config'], {},
                           config['instances'])

    try:

        def import_mock(name,
                        i_globals={},
                        i_locals={},
                        fromlist=[],
                        level=-1,
                        orig_import=__import__):
            # _psutil_linux and _psutil_posix are the
            #  C bindings; use a mock for those
            if name in ('_psutil_linux', '_psutil_posix') or level >= 1 and\
               ('_psutil_linux' in fromlist or '_psutil_posix' in fromlist):
                m = MagicMock()
                # the import system will ask us for our own name
                m._psutil_linux = m
                m._psutil_posix = m
                # there's a version safety check in psutil/__init__.py;
                # this skips it
                m.version = version
                return m
            return orig_import(name, i_globals, i_locals, fromlist, level)

        # contextlib.nested is deprecated in favor of with MGR1, MGR2, ... etc
        # but we have too many mocks to fit on one line and apparently \ line
        # continuation is not flake8 compliant, even when semantically
        # required (as here). Patch is unlikely to throw errors that are
        # suppressed, so the main downside of contextlib is avoided.
        with contextlib.nested(
                patch('sys.platform', 'linux'),
                patch('socket.AF_PACKET', create=True),
                patch('__builtin__.__import__', side_effect=import_mock)):
            if not already_linux:
                # Reloading psutil fails on linux, but we only
                # need to do so if we didn't start out on a linux platform
                reload(psutil)
            assert Platform.is_linux()
            process.check(config["instances"][0])
    finally:
        shutil.rmtree(my_procfs)
        if not already_linux:
            # restore the original psutil that doesn't have our mocks
            reload(psutil)
        else:
            psutil.PROCFS_PATH = '/proc'

    expected_tags = generate_expected_tags(config['instances'][0])
    expected_tags += ['process:moved_procfs']
    aggregator.assert_service_check('process.up', count=1, tags=expected_tags)