def test_replication_metrics(aggregator, gauges): for config in [common.NODE1, common.NODE2, common.NODE3]: check = CouchDb(common.CHECK_NAME, {}, [config]) check.check(config) for gauge in gauges["replication_tasks_gauges"]: aggregator.assert_metric(gauge)
def test_only_max_nodes_are_scanned(aggregator, gauges): config = deepcopy(common.NODE1) config.pop("name") config['max_nodes_per_check'] = 2 check = CouchDb(common.CHECK_NAME, {}, [config]) check.check(config) for gauge in gauges["erlang_gauges"]: aggregator.assert_metric(gauge) for config in [common.NODE1, common.NODE2]: expected_tags = ["instance:{}".format(config["name"])] for gauge in gauges["cluster_gauges"]: aggregator.assert_metric(gauge, tags=expected_tags) for db in ["_users", "_global_changes", "_replicator"]: expected_tags = ["db:{}".format(db)] for gauge in gauges["by_db_gauges"]: aggregator.assert_metric(gauge, tags=expected_tags) for db, dd in {"_replicator": "_replicator", "_users": "_auth"}.items(): expected_tags = [ "design_document:{}".format(dd), "language:javascript", "db:{}".format(db) ] for gauge in gauges["by_dd_gauges"]: aggregator.assert_metric(gauge, tags=expected_tags) expected_tags = ["instance:{}".format(config["server"])] # One for the version as we don't have any names to begin with aggregator.assert_service_check(CouchDb.SERVICE_CHECK_NAME, status=CouchDb.OK, tags=expected_tags, count=1) for node in [common.NODE1, common.NODE2]: expected_tags = ["instance:{}".format(node["name"])] # One for the server stats, the version is already loaded aggregator.assert_service_check(CouchDb.SERVICE_CHECK_NAME, status=CouchDb.OK, tags=expected_tags, count=1) expected_tags = ["instance:{}".format(common.NODE3["name"])] for gauge in gauges["cluster_gauges"]: aggregator.assert_metric(gauge, tags=expected_tags, count=0) for db in ['_users', '_global_changes', '_replicator', 'kennel']: expected_tags = [expected_tags[0], "db:{}".format(db)] for gauge in gauges["by_db_gauges"]: aggregator.assert_metric(gauge, tags=expected_tags, count=0) expected_tags = ["instance:{}".format(common.NODE3["name"])] aggregator.assert_service_check(CouchDb.SERVICE_CHECK_NAME, status=CouchDb.OK, tags=expected_tags, count=0) aggregator.assert_all_metrics_covered()
def test_replication_metrics(aggregator, check, gauges, instance): url = "{}/_replicator".format(common.NODE1['server']) replication_body = { '_id': 'my_replication_id', 'source': 'http://*****:*****@127.0.0.1:5984/kennel', 'target': 'http://*****:*****@127.0.0.1:5984/kennel_replica', 'create_target': True, 'continuous': True, } r = requests.post( url, auth=(common.NODE1['user'], common.NODE1['password']), headers={'Content-Type': 'application/json'}, json=replication_body, ) r.raise_for_status() count = 0 attempts = 0 url = "{}/_active_tasks".format(common.NODE1['server']) while count != 1 and attempts < 20: attempts += 1 time.sleep(1) r = requests.get(url, auth=(common.NODE1['user'], common.NODE1['password'])) r.raise_for_status() count = len(r.json()) check = CouchDb(common.CHECK_NAME, {}, {}, instances=[instance]) for config in [common.NODE1, common.NODE2, common.NODE3]: check.check(config) for gauge in gauges["replication_tasks_gauges"]: aggregator.assert_metric(gauge)
def check(): env = os.environ couch_version = env["COUCH_VERSION"][0] if couch_version == '1': return CouchDb(common.CHECK_NAME, {}, {}, instances=[common.BASIC_CONFIG]) elif couch_version == '2': return CouchDb(common.CHECK_NAME, {}, {}, instances=[common.BASIC_CONFIG_V2])
def test_check(aggregator, gauges): """ Testing Couchdb2 check. """ configs = [ deepcopy(common.NODE1), deepcopy(common.NODE2), deepcopy(common.NODE3) ] for config in configs: check = CouchDb(common.CHECK_NAME, {}, [config]) check.check(config) for config in configs: expected_tags = ["instance:{}".format(config["name"])] for gauge in gauges["cluster_gauges"]: aggregator.assert_metric(gauge, tags=expected_tags) for gauge in gauges["erlang_gauges"]: aggregator.assert_metric(gauge) for db, dd in { "kennel": "dummy", "_replicator": "_replicator", "_users": "_auth" }.items(): for gauge in gauges["by_dd_gauges"]: expected_tags = [ "design_document:{}".format(dd), "language:javascript", "db:{}".format(db) ] aggregator.assert_metric(gauge, tags=expected_tags) for db in ["_users", "_global_changes", "_replicator", "kennel"]: for gauge in gauges["by_db_gauges"]: expected_tags = ["db:{}".format(db)] aggregator.assert_metric(gauge, tags=expected_tags) expected_tags = ["instance:{}".format(common.NODE1["name"])] # One for the version, one for the server stats aggregator.assert_service_check(CouchDb.SERVICE_CHECK_NAME, status=CouchDb.OK, tags=expected_tags, count=2) for node in [common.NODE2, common.NODE3]: expected_tags = ["instance:{}".format(node["name"])] # One for the server stats, the version is already loaded aggregator.assert_service_check(CouchDb.SERVICE_CHECK_NAME, status=CouchDb.OK, tags=expected_tags, count=2) aggregator.assert_all_metrics_covered()
def test_compaction_metrics(aggregator, gauges): url = "{}/kennel".format(common.NODE1['server']) body = { '_id': 'fsdr2345fgwert249i9fg9drgsf4SDFGWE', 'data': str(time.time()) } r = requests.post( url, auth=(common.NODE1['user'], common.NODE1['password']), headers={'Content-Type': 'application/json'}, json=body, ) r.raise_for_status() update_url = '{}/{}'.format(url, body['_id']) for _ in range(100): rev = r.json()['rev'] body['data'] = str(time.time()) body['_rev'] = rev r = requests.put( update_url, auth=(common.NODE1['user'], common.NODE1['password']), headers={'Content-Type': 'application/json'}, json=body, ) r.raise_for_status() r2 = requests.post( url, auth=(common.NODE1['user'], common.NODE1['password']), headers={'Content-Type': 'application/json'}, json={"_id": str(time.time())}, ) r2.raise_for_status() url = '{}/kennel/_compact'.format(common.NODE1['server']) r = requests.post(url, auth=(common.NODE1['user'], common.NODE1['password']), headers={'Content-Type': 'application/json'}) r.raise_for_status() url = '{}/_global_changes/_compact'.format(common.NODE1['server']) r = requests.post(url, auth=(common.NODE1['user'], common.NODE1['password']), headers={'Content-Type': 'application/json'}) r.raise_for_status() for config in [common.NODE1, common.NODE2, common.NODE3]: check = CouchDb(common.CHECK_NAME, {}, [config]) check.check(config) for gauge in gauges["compaction_tasks_gauges"]: aggregator.assert_metric(gauge)
def test_config_tags(aggregator, gauges): TEST_TAG = "test_tag:test" config = deepcopy(common.NODE1) config['tags'] = [TEST_TAG] check = CouchDb(common.CHECK_NAME, {}, [config]) check.check(config) for gauge in gauges["erlang_gauges"]: aggregator.assert_metric_has_tag(gauge, TEST_TAG) for gauge in gauges["by_db_gauges"]: aggregator.assert_metric_has_tag(gauge, TEST_TAG) expected_tags = ["instance:{0}".format(config["name"]), TEST_TAG] aggregator.assert_service_check(CouchDb.SERVICE_CHECK_NAME, tags=expected_tags)
def test_check_without_names(aggregator, gauges): config = deepcopy(common.NODE1) config.pop('name') check = CouchDb(common.CHECK_NAME, {}, [config]) check.check(config) configs = [common.NODE1, common.NODE2, common.NODE3] for config in configs: expected_tags = ["instance:{}".format(config["name"])] for gauge in gauges["cluster_gauges"]: aggregator.assert_metric(gauge, tags=expected_tags) for gauge in gauges["erlang_gauges"]: aggregator.assert_metric(gauge) for gauge in gauges["replication_tasks_gauges"]: aggregator.assert_metric(gauge) for db, dd in {"kennel": "dummy"}.items(): expected_tags = [ "design_document:{}".format(dd), "language:javascript", "db:{}".format(db) ] for gauge in gauges["by_dd_gauges"]: aggregator.assert_metric(gauge, tags=expected_tags) for db in ["kennel"]: expected_tags = ["db:{}".format(db)] for gauge in gauges["by_db_gauges"]: aggregator.assert_metric(gauge, tags=expected_tags) expected_tags = ["instance:{}".format(config["server"])] # One for the version, one for the server stats aggregator.assert_service_check(CouchDb.SERVICE_CHECK_NAME, status=CouchDb.OK, tags=expected_tags, count=1) for node in [common.NODE2, common.NODE3]: expected_tags = ["instance:{}".format(node["name"])] # One for the server stats, the version is already loaded aggregator.assert_service_check(CouchDb.SERVICE_CHECK_NAME, status=CouchDb.OK, tags=expected_tags, count=1) aggregator.assert_all_metrics_covered()
def test_collect_metadata_instance(aggregator, datadog_agent, instance): check = CouchDb(common.CHECK_NAME, {}, [instance]) check.check_id = common.CHECK_ID check.check(instance) version = common.COUCH_RAW_VERSION major, minor, patch = version.split('.') version_metadata = { 'version.raw': version, 'version.scheme': 'semver', 'version.major': major, 'version.minor': minor, 'version.patch': patch, } datadog_agent.assert_metadata(common.CHECK_ID, version_metadata) datadog_agent.assert_metadata_count(5)
def test_compaction_metrics(aggregator, gauges, active_tasks): """ Database compaction tasks are super quick to run on small amounts of data, leading to the task sometimes being complete before the check queries for active tasks. This can lead to flaky results, so let's mock. """ from datadog_checks.couch import couch def _get_active_tasks(server, name, tags): return active_tasks check = CouchDb(common.CHECK_NAME, {}, [common.NODE1]) check.checker = couch.CouchDB2(check) check.checker._get_active_tasks = _get_active_tasks check.check(common.NODE1) for gauge in gauges["compaction_tasks_gauges"]: aggregator.assert_metric(gauge)
def test_only_max_dbs_are_scanned(aggregator, gauges, number_db): config = deepcopy(common.NODE1) config["max_dbs_per_check"] = number_db check = CouchDb(common.CHECK_NAME, {}, [config]) check.check(config) metrics = [] for metric_list in aggregator._metrics.values(): metrics.extend(metric_list) db_tags = set() for m in metrics: for tag in m.tags: if tag.startswith('db:'): db_tags.add(tag) assert len(db_tags) == number_db
def test_only_max_nodes_are_scanned(aggregator, gauges, number_nodes): config = deepcopy(common.NODE1) config.pop("name") config['max_nodes_per_check'] = number_nodes check = CouchDb(common.CHECK_NAME, {}, [config]) check.check(config) metrics = [] for metric_list in aggregator._metrics.values(): metrics.extend(metric_list) instance_tags = set() for m in metrics: for tag in m.tags: if tag.startswith('instance:'): instance_tags.add(tag) assert len(instance_tags) == number_nodes
def test_only_max_dbs_are_scanned(aggregator, gauges): configs = [] for node in [common.NODE1, common.NODE2, common.NODE3]: config = deepcopy(node) config["max_dbs_per_check"] = 1 configs.append(config) for config in configs: check = CouchDb(common.CHECK_NAME, {}, [config]) check.check(config) for db in ['kennel', '_replicator']: expected_tags = ["db:{}".format(db)] for gauge in gauges["by_db_gauges"]: aggregator.assert_metric(gauge, tags=expected_tags, count=0) for db in ['_global_changes', '_users']: expected_tags = ["db:{}".format(db)] for gauge in gauges["by_db_gauges"]: aggregator.assert_metric(gauge, tags=expected_tags, count=1)
def test_config(test_case, extra_config, expected_http_kwargs): instance = deepcopy(common.BASIC_CONFIG) instance.update(extra_config) check = CouchDb(common.CHECK_NAME, {}, instances=[instance]) with mock.patch('datadog_checks.base.utils.http.requests') as r: r.get.return_value = mock.MagicMock(status_code=200, content='{}') check.check(instance) http_wargs = dict(auth=mock.ANY, cert=mock.ANY, headers=mock.ANY, proxies=mock.ANY, timeout=mock.ANY, verify=mock.ANY) http_wargs.update(expected_http_kwargs) r.get.assert_called_with( 'http://{}:5984/_all_dbs/'.format(common.HOST), **http_wargs)
def test_collect_metadata_instance(aggregator, datadog_agent, instance): check = CouchDb(common.CHECK_NAME, {}, [instance]) check.check_id = common.CHECK_ID check.check(instance) version = common.COUCH_RAW_VERSION # CouchDB2 version is formatted differently for the datadog hosted image if common.COUCH_MAJOR_VERSION == 2: version = COUCHDB2_VERSIONS[common.COUCH_RAW_VERSION] major, minor, patch = version.split('.') version_metadata = { 'version.raw': version, 'version.scheme': 'semver', 'version.major': major, 'version.minor': minor, 'version.patch': patch, } datadog_agent.assert_metadata(common.CHECK_ID, version_metadata) datadog_agent.assert_metadata_count(5)
def test_db_blacklisting(aggregator, gauges): configs = [] for node in [common.NODE1, common.NODE2, common.NODE3]: config = deepcopy(node) config['db_blacklist'] = ['kennel'] configs.append(config) for config in configs: check = CouchDb(common.CHECK_NAME, {}, [config]) check.check(config) for _ in configs: for db in ['_users', '_global_changes', '_replicator']: expected_tags = ["db:{}".format(db)] for gauge in gauges["by_db_gauges"]: aggregator.assert_metric(gauge, tags=expected_tags) for gauge in gauges["by_db_gauges"]: expected_tags = ["db:kennel"] aggregator.assert_metric(gauge, tags=expected_tags, count=0)
def test_indexing_metrics(aggregator, gauges, active_tasks): """ Testing metrics coming from a running indexer would be extremely flaky, let's use mock. """ from datadog_checks.couch import couch def _get(url, tags, run_check=False): if '_active_tasks' in url: return active_tasks return {} # run the check on all instances for config in [common.NODE1, common.NODE2, common.NODE3]: check = CouchDb(common.CHECK_NAME, {}, [config]) check.checker = couch.CouchDB2(check) check.get = _get check.check(config) for node in [common.NODE1, common.NODE2, common.NODE3]: expected_tags = [ 'database:kennel', 'design_document:dummy', 'instance:{}'.format(node['name']) ] for gauge in gauges["indexing_tasks_gauges"]: aggregator.assert_metric(gauge, tags=expected_tags)
def test_db_exclusion(aggregator, gauges, param_name): configs = [] for node in [common.NODE1, common.NODE2, common.NODE3]: config = deepcopy(node) config[param_name] = ['db0'] configs.append(config) for config in configs: check = CouchDb(common.CHECK_NAME, {}, [config]) check.check(config) for _ in configs: for db in ['db1']: expected_tags = ["db:{}".format(db)] for gauge in gauges["by_db_gauges"]: aggregator.assert_metric(gauge, tags=expected_tags) for db in ['db0']: expected_tags = ["db:{}".format(db)] for gauge in gauges["by_db_gauges"]: aggregator.assert_metric(gauge, tags=expected_tags, count=0)
def test_db_whitelisting(aggregator, gauges): configs = [] for n in [common.NODE1, common.NODE2, common.NODE3]: node = deepcopy(n) node['db_whitelist'] = ['db0'] configs.append(node) for config in configs: check = CouchDb(common.CHECK_NAME, {}, [config]) check.check(config) for _ in configs: for db in ['db0']: expected_tags = ["db:{}".format(db)] for gauge in gauges["by_db_gauges"]: aggregator.assert_metric(gauge, tags=expected_tags) for db in ['db1']: expected_tags = ["db:{}".format(db)] for gauge in gauges["by_db_gauges"]: aggregator.assert_metric(gauge, tags=expected_tags, count=0)
def check(): if common.COUCH_MAJOR_VERSION == 1: return CouchDb(common.CHECK_NAME, {}, instances=[common.BASIC_CONFIG]) else: return CouchDb(common.CHECK_NAME, {}, instances=[common.BASIC_CONFIG_V2])
def test_view_compaction_metrics(aggregator, gauges): class LoadGenerator(threading.Thread): STOP = 0 RUN = 1 def __init__(self, server, auth): self._server = server self._auth = auth self._status = self.RUN threading.Thread.__init__(self) def run(self): docs = [] count = 0 while self._status == self.RUN: count += 1 if count % 5 == 0: self.compact_views() theid = ''.join( random.choice(string.ascii_uppercase + string.digits) for _ in range(10)) docs.append(self.post_doc(theid)) docs = list(map(lambda x: self.update_doc(x), docs)) self.generate_views() def generate_views(self): url = '{}/kennel/_design/dummy/_view/all'.format(self._server) try: r = requests.get(url, auth=self._auth, timeout=1) r.raise_for_status() except requests.exceptions.Timeout: None url = '{}/kennel/_design/dummy/_view/by_data'.format(self._server) try: r = requests.get(url, auth=self._auth, timeout=1) r.raise_for_status() except requests.exceptions.Timeout: None def update_doc(self, doc): body = { 'data': str(random.randint(0, 1000000000)), '_rev': doc['rev'] } url = '{}/kennel/{}'.format(self._server, doc['id']) r = requests.put(url, auth=self._auth, headers={'Content-Type': 'application/json'}, json=body) r.raise_for_status() return r.json() def post_doc(self, doc_id): body = {"_id": doc_id, "data": str(time.time())} url = '{}/kennel'.format(self._server) r = requests.post(url, auth=self._auth, headers={'Content-Type': 'application/json'}, json=body) r.raise_for_status() return r.json() def compact_views(self): url = '{}/kennel/_compact/dummy'.format(self._server) r = requests.post(url, auth=self._auth, headers={'Content-Type': 'application/json'}) r.raise_for_status() def stop(self): self._status = self.STOP threads = [] for _ in range(40): t = LoadGenerator(common.NODE1['server'], (common.NODE1['user'], common.NODE1['password'])) t.start() threads.append(t) tries = 0 try: metric_found = False while not metric_found and tries < 40: tries += 1 try: for config in [common.NODE1, common.NODE2, common.NODE3]: check = CouchDb(common.CHECK_NAME, {}, [config]) check.check(config) except Exception: time.sleep(1) continue for m_name in aggregator._metrics: if re.search(r'view_compaction\.progress', str(m_name)) is not None: metric_found = True for gauge in gauges["view_compaction_tasks_gauges"]: aggregator.assert_metric(gauge) break finally: for t in threads: t.stop() for t in threads: t.join() if tries >= 20: raise AssertionError('Could not find the view_compaction happening')
def check(): return CouchDb(common.CHECK_NAME, {}, {})
def test_check(aggregator, gauges): for config in deepcopy(INSTANCES): check = CouchDb(common.CHECK_NAME, {}, [config]) check.check(config) _assert_check(aggregator, gauges)