def test_only_max_dbs_are_scanned(aggregator, gauges):
    configs = []
    for node in [common.NODE1, common.NODE2, common.NODE3]:
        config = deepcopy(node)
        config["max_dbs_per_check"] = 1
        configs.append(config)

    for config in configs:
        check = CouchDb(common.CHECK_NAME, {}, [config])
        check.check(config)

    for db in ['kennel', '_replicator']:
        expected_tags = ["db:{}".format(db)]
        for gauge in gauges["by_db_gauges"]:
            aggregator.assert_metric(gauge, tags=expected_tags, count=0)

    for db in ['_global_changes', '_users']:
        expected_tags = ["db:{}".format(db)]
        for gauge in gauges["by_db_gauges"]:
            aggregator.assert_metric(gauge, tags=expected_tags, count=1)
def test_config(test_case, extra_config, expected_http_kwargs):
    instance = deepcopy(common.BASIC_CONFIG)
    instance.update(extra_config)
    check = CouchDb(common.CHECK_NAME, {}, {}, instances=[instance])

    with mock.patch('datadog_checks.base.utils.http.requests') as r:
        r.get.return_value = mock.MagicMock(status_code=200, content='{}')

        check.check(instance)

        http_wargs = dict(auth=mock.ANY,
                          cert=mock.ANY,
                          headers=mock.ANY,
                          proxies=mock.ANY,
                          timeout=mock.ANY,
                          verify=mock.ANY)
        http_wargs.update(expected_http_kwargs)

        r.get.assert_called_with('http://localhost:5984/_all_dbs/',
                                 **http_wargs)
Beispiel #3
0
def test_db_blacklisting(aggregator, gauges):
    configs = []

    for node in [common.NODE1, common.NODE2, common.NODE3]:
        config = deepcopy(node)
        config['db_blacklist'] = ['kennel']
        configs.append(config)

    for config in configs:
        check = CouchDb(common.CHECK_NAME, {}, [config])
        check.check(config)

    for _ in configs:
        for db in ['_users', '_global_changes', '_replicator']:
            expected_tags = ["db:{}".format(db)]
            for gauge in gauges["by_db_gauges"]:
                aggregator.assert_metric(gauge, tags=expected_tags)

        for gauge in gauges["by_db_gauges"]:
            expected_tags = ["db:kennel"]
            aggregator.assert_metric(gauge, tags=expected_tags, count=0)
def test_collect_metadata_instance(aggregator, datadog_agent, instance):
    check = CouchDb(common.CHECK_NAME, {}, [instance])
    check.check_id = common.CHECK_ID
    check.check(instance)
    version = common.COUCH_RAW_VERSION

    # CouchDB2 version is formatted differently for the datadog hosted image
    if common.COUCH_MAJOR_VERSION == 2:
        version = COUCHDB2_VERSIONS[common.COUCH_RAW_VERSION]

    major, minor, patch = version.split('.')
    version_metadata = {
        'version.raw': version,
        'version.scheme': 'semver',
        'version.major': major,
        'version.minor': minor,
        'version.patch': patch,
    }

    datadog_agent.assert_metadata(common.CHECK_ID, version_metadata)
    datadog_agent.assert_metadata_count(5)
Beispiel #5
0
def test_db_exclusion(aggregator, gauges, param_name):
    configs = []

    for node in [common.NODE1, common.NODE2, common.NODE3]:
        config = deepcopy(node)
        config[param_name] = ['db0']
        configs.append(config)

    for config in configs:
        check = CouchDb(common.CHECK_NAME, {}, [config])
        check.check(config)

    for _ in configs:
        for db in ['db1']:
            expected_tags = ["db:{}".format(db)]
            for gauge in gauges["by_db_gauges"]:
                aggregator.assert_metric(gauge, tags=expected_tags)

        for db in ['db0']:
            expected_tags = ["db:{}".format(db)]
            for gauge in gauges["by_db_gauges"]:
                aggregator.assert_metric(gauge, tags=expected_tags, count=0)
Beispiel #6
0
def test_check_without_names(aggregator, gauges):
    config = deepcopy(common.NODE1)
    config.pop('name')
    check = CouchDb(common.CHECK_NAME, {}, [config])
    check.check(config)

    configs = [common.NODE1, common.NODE2, common.NODE3]

    for config in configs:
        expected_tags = ["instance:{}".format(config["name"])]
        for gauge in gauges["cluster_gauges"]:
            aggregator.assert_metric(gauge, tags=expected_tags)

        for gauge in gauges["erlang_gauges"]:
            aggregator.assert_metric(gauge)

        for gauge in gauges["replication_tasks_gauges"]:
            aggregator.assert_metric(gauge)

    for db, dd in {"kennel": "dummy"}.items():
        expected_tags = ["design_document:{}".format(dd), "language:javascript", "db:{}".format(db)]
        for gauge in gauges["by_dd_gauges"]:
            aggregator.assert_metric(gauge, tags=expected_tags)

    for db in ["kennel"]:
        expected_tags = ["db:{}".format(db)]
        for gauge in gauges["by_db_gauges"]:
            aggregator.assert_metric(gauge, tags=expected_tags)

    expected_tags = ["instance:{}".format(config["server"])]
    # One for the version, one for the server stats
    aggregator.assert_service_check(CouchDb.SERVICE_CHECK_NAME, status=CouchDb.OK, tags=expected_tags, count=1)

    for node in [common.NODE2, common.NODE3]:
        expected_tags = ["instance:{}".format(node["name"])]
        # One for the server stats, the version is already loaded
        aggregator.assert_service_check(CouchDb.SERVICE_CHECK_NAME, status=CouchDb.OK, tags=expected_tags, count=1)

    aggregator.assert_all_metrics_covered()
Beispiel #7
0
def test_db_whitelisting(aggregator, gauges):
    configs = []

    for n in [common.NODE1, common.NODE2, common.NODE3]:
        node = deepcopy(n)
        node['db_whitelist'] = ['db0']
        configs.append(node)

    for config in configs:
        check = CouchDb(common.CHECK_NAME, {}, [config])
        check.check(config)

    for _ in configs:
        for db in ['db0']:
            expected_tags = ["db:{}".format(db)]
            for gauge in gauges["by_db_gauges"]:
                aggregator.assert_metric(gauge, tags=expected_tags)

        for db in ['db1']:
            expected_tags = ["db:{}".format(db)]
            for gauge in gauges["by_db_gauges"]:
                aggregator.assert_metric(gauge, tags=expected_tags, count=0)
def test_indexing_metrics(aggregator, gauges, active_tasks):
    """
    Testing metrics coming from a running indexer would be extremely flaky,
    let's use mock.
    """
    from datadog_checks.couch import couch

    def _get(url, tags, run_check=False):
        if '_active_tasks' in url:
            return active_tasks
        return {}

    # run the check on all instances
    for config in [common.NODE1, common.NODE2, common.NODE3]:
        check = CouchDb(common.CHECK_NAME, {}, [config])
        check.checker = couch.CouchDB2(check)
        check.get = _get
        check.check(config)

    for node in [common.NODE1, common.NODE2, common.NODE3]:
        expected_tags = ['database:kennel', 'design_document:dummy', 'instance:{}'.format(node['name'])]
        for gauge in gauges["indexing_tasks_gauges"]:
            aggregator.assert_metric(gauge, tags=expected_tags)
Beispiel #9
0
def check():
    if common.COUCH_MAJOR_VERSION == 1:
        return CouchDb(common.CHECK_NAME, {}, instances=[common.BASIC_CONFIG])
    else:
        return CouchDb(common.CHECK_NAME, {},
                       instances=[common.BASIC_CONFIG_V2])
Beispiel #10
0
def test_check(aggregator, gauges):
    for config in deepcopy(INSTANCES):
        check = CouchDb(common.CHECK_NAME, {}, [config])
        check.check(config)
    _assert_check(aggregator, gauges)
Beispiel #11
0
def test_view_compaction_metrics(aggregator, gauges):
    class LoadGenerator(threading.Thread):
        STOP = 0
        RUN = 1

        def __init__(self, server, auth):
            self._server = server
            self._auth = auth
            self._status = self.RUN
            threading.Thread.__init__(self)

        def run(self):
            docs = []
            count = 0
            while self._status == self.RUN:
                count += 1
                if count % 5 == 0:
                    self.compact_views()
                theid = ''.join(
                    random.choice(string.ascii_uppercase + string.digits)
                    for _ in range(10))
                docs.append(self.post_doc(theid))
                docs = list(map(lambda x: self.update_doc(x), docs))
                self.generate_views()

        def generate_views(self):
            url = '{}/kennel/_design/dummy/_view/all'.format(self._server)
            try:
                r = requests.get(url, auth=self._auth, timeout=1)
                r.raise_for_status()
            except requests.exceptions.Timeout:
                None
            url = '{}/kennel/_design/dummy/_view/by_data'.format(self._server)
            try:
                r = requests.get(url, auth=self._auth, timeout=1)
                r.raise_for_status()
            except requests.exceptions.Timeout:
                None

        def update_doc(self, doc):
            body = {
                'data': str(random.randint(0, 1000000000)),
                '_rev': doc['rev']
            }

            url = '{}/kennel/{}'.format(self._server, doc['id'])
            r = requests.put(url,
                             auth=self._auth,
                             headers={'Content-Type': 'application/json'},
                             json=body)
            r.raise_for_status()
            return r.json()

        def post_doc(self, doc_id):
            body = {"_id": doc_id, "data": str(time.time())}
            url = '{}/kennel'.format(self._server)
            r = requests.post(url,
                              auth=self._auth,
                              headers={'Content-Type': 'application/json'},
                              json=body)
            r.raise_for_status()
            return r.json()

        def compact_views(self):
            url = '{}/kennel/_compact/dummy'.format(self._server)
            r = requests.post(url,
                              auth=self._auth,
                              headers={'Content-Type': 'application/json'})
            r.raise_for_status()

        def stop(self):
            self._status = self.STOP

    threads = []
    for _ in range(40):
        t = LoadGenerator(common.NODE1['server'],
                          (common.NODE1['user'], common.NODE1['password']))
        t.start()
        threads.append(t)

    tries = 0
    try:
        metric_found = False
        while not metric_found and tries < 40:
            tries += 1

            try:
                for config in [common.NODE1, common.NODE2, common.NODE3]:
                    check = CouchDb(common.CHECK_NAME, {}, [config])
                    check.check(config)
            except Exception:
                time.sleep(1)
                continue

            for m_name in aggregator._metrics:
                if re.search(r'view_compaction\.progress',
                             str(m_name)) is not None:
                    metric_found = True
                    for gauge in gauges["view_compaction_tasks_gauges"]:
                        aggregator.assert_metric(gauge)
                    break
    finally:
        for t in threads:
            t.stop()

        for t in threads:
            t.join()

    if tries >= 20:
        raise AssertionError('Could not find the view_compaction happening')
Beispiel #12
0
def test_only_max_nodes_are_scanned(aggregator, gauges):
    config = deepcopy(common.NODE1)
    config.pop("name")
    config['max_nodes_per_check'] = 2

    check = CouchDb(common.CHECK_NAME, {}, [config])
    check.check(config)

    for gauge in gauges["erlang_gauges"]:
        aggregator.assert_metric(gauge)

    for gauge in gauges["replication_tasks_gauges"]:
        aggregator.assert_metric(gauge)

    for config in [common.NODE1, common.NODE2]:
        expected_tags = ["instance:{}".format(config["name"])]
        for gauge in gauges["cluster_gauges"]:
            aggregator.assert_metric(gauge, tags=expected_tags)

    for db in ["_users", "_global_changes", "_replicator"]:
        expected_tags = ["db:{}".format(db)]
        for gauge in gauges["by_db_gauges"]:
            aggregator.assert_metric(gauge, tags=expected_tags)

    for db, dd in {"_replicator": "_replicator", "_users": "_auth"}.items():
        expected_tags = [
            "design_document:{}".format(dd), "language:javascript",
            "db:{}".format(db)
        ]
        for gauge in gauges["by_dd_gauges"]:
            aggregator.assert_metric(gauge, tags=expected_tags)

    expected_tags = ["instance:{}".format(config["server"])]
    # One for the version as we don't have any names to begin with
    aggregator.assert_service_check(CouchDb.SERVICE_CHECK_NAME,
                                    status=CouchDb.OK,
                                    tags=expected_tags,
                                    count=1)

    for node in [common.NODE1, common.NODE2]:
        expected_tags = ["instance:{}".format(node["name"])]
        # One for the server stats, the version is already loaded
        aggregator.assert_service_check(CouchDb.SERVICE_CHECK_NAME,
                                        status=CouchDb.OK,
                                        tags=expected_tags,
                                        count=1)

    expected_tags = ["instance:{}".format(common.NODE3["name"])]
    for gauge in gauges["cluster_gauges"]:
        aggregator.assert_metric(gauge, tags=expected_tags, count=0)

    for db in ['_users', '_global_changes', '_replicator', 'kennel']:
        expected_tags = [expected_tags[0], "db:{}".format(db)]
        for gauge in gauges["by_db_gauges"]:
            aggregator.assert_metric(gauge, tags=expected_tags, count=0)

    expected_tags = ["instance:{}".format(common.NODE3["name"])]
    aggregator.assert_service_check(CouchDb.SERVICE_CHECK_NAME,
                                    status=CouchDb.OK,
                                    tags=expected_tags,
                                    count=0)

    aggregator.assert_all_metrics_covered()
Beispiel #13
0
def check():
    return CouchDb(common.CHECK_NAME, {}, {})