def test_not_kubeflow_model(): # Tests that unit will BlockStatus if deployed outside a model named kubeflow # Remove when this bug is resolved: https://github.com/kubeflow/kubeflow/issues/6136 harness = Harness(Operator) harness.begin_with_initial_hooks() assert harness.charm.model.unit.status == BlockedStatus( "kubeflow-dashboard must be deployed to model named `kubeflow`:" " https://git.io/J6d35")
class TestCharm(unittest.TestCase): def setUp(self): self.harness = Harness(CassandraStressOperatorCharm) self.addCleanup(self.harness.cleanup) self.harness.set_leader(True) self.harness.begin_with_initial_hooks() def test_pass(self): pass
def test_mrrm(role): if role == "provides": harness = Harness(ProviderCharm, meta=ProviderCharm.META) harness.begin_with_initial_hooks() provider = harness.charm.rel requirer = MockRequirer(harness) local, remote = provider, requirer elif role == "requires": harness = Harness(RequirerCharm, meta=RequirerCharm.META) harness.begin_with_initial_hooks() provider = MockProvider(harness) requirer = harness.charm.rel local, remote = requirer, provider assert not provider.is_available() assert not provider.is_ready() assert not requirer.is_available() assert not requirer.is_ready() relation = remote.relate() # mock remote is always leader, so their versions will be sent to the local charm assert local.is_available() assert not local.is_ready() # local charm under test is not leader yet, so its will not be sent to the remote assert not remote.is_available() assert not remote.is_ready() harness.set_leader(True) assert provider.is_available() assert not provider.is_ready() assert requirer.is_available() assert not requirer.is_ready() requirer.wrap(relation, {requirer.unit: {"request": "foo"}}) assert provider.is_available() assert provider.is_ready() assert requirer.is_available() assert not requirer.is_ready() data = provider.unwrap(relation) assert data[requirer.unit] == {"request": "foo"} provider.wrap(relation, {provider.app: {"response": "bar"}}) assert provider.is_available() assert provider.is_ready() assert requirer.is_available() assert requirer.is_ready() data = requirer.unwrap(relation) assert data[provider.app] == {"response": "bar"}
class TestRemoteWriteConsumer(unittest.TestCase): def setUp(self): self.harness = Harness(RemoteWriteConsumerCharm, meta=METADATA) self.harness.add_resource( "promql-transform-amd64", open("./promql-transform", "rb").read(), ) self.addCleanup(self.harness.cleanup) self.harness.set_leader(True) self.harness.begin_with_initial_hooks() def test_address_is_set(self): rel_id = self.harness.add_relation(RELATION_NAME, "provider") self.harness.add_relation_unit(rel_id, "provider/0") self.harness.update_relation_data( rel_id, "provider/0", {"remote_write": json.dumps({"url": "http://1.1.1.1:9090/api/v1/write"})}, ) assert list(self.harness.charm.remote_write_consumer.endpoints) == [ {"url": "http://1.1.1.1:9090/api/v1/write"} ] @patch.object(RemoteWriteConsumerCharm, "_handle_endpoints_changed") def test_config_is_set(self, mock_handle_endpoints_changed): rel_id = self.harness.add_relation(RELATION_NAME, "provider") self.harness.add_relation_unit(rel_id, "provider/0") self.harness.update_relation_data( rel_id, "provider/0", {"remote_write": json.dumps({"url": "http://1.1.1.1:9090/api/v1/write"})}, ) mock_handle_endpoints_changed.assert_called() event = mock_handle_endpoints_changed.call_args.args[0] self.assertEqual(rel_id, event.relation_id) assert list(self.harness.charm.remote_write_consumer.endpoints) == [ {"url": "http://1.1.1.1:9090/api/v1/write"} ] def test_no_remote_write_endpoint_provided(self): rel_id = self.harness.add_relation(RELATION_NAME, "provider") self.harness.add_relation_unit(rel_id, "provider/0") self.harness.update_relation_data(rel_id, "provider/0", {}) assert list(self.harness.charm.remote_write_consumer.endpoints) == []
class TestRemoteWriteProvider(unittest.TestCase): @patch_network_get(private_address="1.1.1.1") def setUp(self, *unused): self.harness = Harness(PrometheusCharm) self.harness.set_model_info("lma", "123456") self.addCleanup(self.harness.cleanup) @patch.object(KubernetesServicePatch, "_service_object", new=lambda *args: None) @patch.object(Prometheus, "reload_configuration", new=lambda _: True) @patch_network_get(private_address="1.1.1.1") def test_port_is_set(self, *unused): self.harness.begin_with_initial_hooks() rel_id = self.harness.add_relation(RELATION_NAME, "consumer") self.harness.add_relation_unit(rel_id, "consumer/0") self.assertEqual( self.harness.get_relation_data(rel_id, self.harness.charm.unit.name), {"remote_write": json.dumps({"url": "http://1.1.1.1:9090/api/v1/write"})}, ) self.assertIsInstance(self.harness.charm.unit.status, ActiveStatus) @patch.object(KubernetesServicePatch, "_service_object", new=lambda *args: None) @patch.object(Prometheus, "reload_configuration", new=lambda _: True) @patch_network_get(private_address="1.1.1.1") def test_alert_rules(self, *unused): self.harness.begin_with_initial_hooks() rel_id = self.harness.add_relation(RELATION_NAME, "consumer") self.harness.update_relation_data( rel_id, "consumer", {"alert_rules": json.dumps(ALERT_RULES)}, ) self.harness.add_relation_unit(rel_id, "consumer/0") alerts = self.harness.charm.remote_write_provider.alerts() alerts = list(alerts.values())[0] # drop the topology identifier self.assertEqual(len(alerts), 1) self.assertDictEqual(alerts, ALERT_RULES)
class TestManilaNetappCharm(TestCase): REQUIRED_CHARM_CONFIG_BY_DEFAULT = { 'management-address': '10.0.0.1', 'admin-password': '******', 'vserver-name': 'svm0', } def setUp(self): self.harness = Harness(charm.ManilaNetappCharm) self.addCleanup(self.harness.cleanup) def test_custom_status_check_default_config(self): self.harness.disable_hooks() self.harness.begin() self.assertFalse(self.harness.charm.custom_status_check()) expected_status = BlockedStatus('Missing configs: {}'.format( list(self.REQUIRED_CHARM_CONFIG_BY_DEFAULT.keys()))) self.assertEqual(self.harness.charm.unit.status, expected_status) def test_custom_status_check_valid_config(self): self.harness.update_config(self.REQUIRED_CHARM_CONFIG_BY_DEFAULT) self.harness.disable_hooks() self.harness.begin() self.assertTrue(self.harness.charm.custom_status_check()) @mock.patch.object(charm.ops_openstack.core.OSBaseCharm, 'install_pkgs') @mock.patch.object(charm.interface_manila_plugin.ManilaPluginProvides, 'send_backend_config') @mock.patch('charmhelpers.contrib.openstack.templating.get_loader') @mock.patch('charmhelpers.core.templating.render') def test_send_config_dhss_disabled(self, _render, _get_loader, _send_backend_config, _install_pkgs): _render.return_value = 'test-rendered-manila-backend-config' _get_loader.return_value = 'test-loader' self.harness.update_config(self.REQUIRED_CHARM_CONFIG_BY_DEFAULT) rel_id = self.harness.add_relation('manila-plugin', 'manila') self.harness.add_relation_unit(rel_id, 'manila/0') self.harness.begin_with_initial_hooks() self.assertTrue(self.harness.charm.state.is_started) _render.assert_called_once_with(source='manila.conf', template_loader='test-loader', target=None, context=self.harness.charm.adapters) _get_loader.assert_called_once_with('templates/', 'default') _send_backend_config.assert_called_once_with( 'netapp-ontap', 'test-rendered-manila-backend-config') _install_pkgs.assert_called_once_with() self.assertEqual(self.harness.charm.unit.status, ActiveStatus('Unit is ready')) @mock.patch.object(charm.ops_openstack.core.OSBaseCharm, 'install_pkgs') @mock.patch.object(charm.interface_manila_plugin.ManilaPluginProvides, 'send_backend_config') @mock.patch('charmhelpers.contrib.openstack.templating.get_loader') @mock.patch('charmhelpers.core.templating.render') def test_send_config_dhss_enabled(self, _render, _get_loader, _send_backend_config, _install_pkgs): _render.return_value = 'test-rendered-manila-backend-config' _get_loader.return_value = 'test-loader' config = copy.deepcopy(self.REQUIRED_CHARM_CONFIG_BY_DEFAULT) config['driver-handles-share-servers'] = True config['root-volume-aggregate-name'] = 'test_cluster_01_VM_DISK_1' self.harness.update_config(config) self.harness.begin_with_initial_hooks() # Validate workflow with incomplete relation data self.assertFalse(self.harness.charm.state.is_started) _render.assert_not_called() _get_loader.assert_not_called() _send_backend_config.assert_not_called() _install_pkgs.assert_called_once_with() self.assertEqual(self.harness.charm.unit.status, MaintenanceStatus('')) # Validate workflow with complete relation data rel_id = self.harness.add_relation('manila-plugin', 'manila') self.harness.add_relation_unit(rel_id, 'manila/0') self.harness.update_relation_data(rel_id, 'manila/0', { '_authentication_data': json.dumps({'data': 'test-manila-auth-data'}) }) self.assertTrue(self.harness.charm.state.is_started) _render.assert_called_once_with(source='manila.conf', template_loader='test-loader', target=None, context=self.harness.charm.adapters) _get_loader.assert_called_once_with('templates/', 'default') _send_backend_config.assert_called_once_with( 'netapp-ontap', 'test-rendered-manila-backend-config') self.assertEqual(self.harness.charm.unit.status, ActiveStatus('Unit is ready'))
class TestReloadAlertRules(unittest.TestCase): """Feature: Provider charm can manually invoke reloading of alerts. Background: In use cases such as cos-configuration-k8s-operator, the last hook can fire before the alert files show up on disk. In that case relation data would remain empty of alerts. To circumvent that, a public method for reloading alert rules is offered. """ NO_ALERTS = json.dumps({}) # relation data representation for the case of "no alerts" # use a short-form free-standing alert, for brevity ALERT = yaml.safe_dump({"alert": "free_standing", "expr": "avg(some_vector[5m]) > 5"}) def setUp(self): self.sandbox = TempFolderSandbox() alert_rules_path = os.path.join(self.sandbox.root, "alerts") self.alert_rules_path = alert_rules_path class ConsumerCharm(CharmBase): metadata_yaml = textwrap.dedent( """ provides: metrics-endpoint: interface: prometheus_scrape """ ) def __init__(self, *args, **kwargs): super().__init__(*args) self.rules_provider = PrometheusRulesProvider(self, dir_path=alert_rules_path) self.harness = Harness(ConsumerCharm, meta=ConsumerCharm.metadata_yaml) # self.harness = Harness(FakeConsumerCharm, meta=FakeConsumerCharm.metadata_yaml) self.addCleanup(self.harness.cleanup) self.harness.begin_with_initial_hooks() self.harness.set_leader(True) rel_id = self.harness.add_relation("metrics-endpoint", "prom") self.harness.add_relation_unit(rel_id, "prom/0") def test_reload_when_dir_is_still_empty_changes_nothing(self): """Scenario: The reload method is called when the alerts dir is still empty.""" # GIVEN relation data contains no alerts relation = self.harness.charm.model.get_relation("metrics-endpoint") self.assertEqual(relation.data[self.harness.charm.app].get("alert_rules"), self.NO_ALERTS) # WHEN no rule files are present # AND the reload method is called self.harness.charm.rules_provider._reinitialize_alert_rules() # THEN relation data is unchanged relation = self.harness.charm.model.get_relation("metrics-endpoint") self.assertEqual(relation.data[self.harness.charm.app].get("alert_rules"), self.NO_ALERTS) def test_reload_after_dir_is_populated_updates_relation_data(self): """Scenario: The reload method is called after some alert files are added.""" # GIVEN relation data contains no alerts relation = self.harness.charm.model.get_relation("metrics-endpoint") self.assertEqual(relation.data[self.harness.charm.app].get("alert_rules"), self.NO_ALERTS) # WHEN some rule files are added to the alerts dir self.sandbox.put_file(os.path.join(self.alert_rules_path, "alert.rule"), self.ALERT) # AND the reload method is called self.harness.charm.rules_provider._reinitialize_alert_rules() # THEN relation data is updated relation = self.harness.charm.model.get_relation("metrics-endpoint") self.assertNotEqual( relation.data[self.harness.charm.app].get("alert_rules"), self.NO_ALERTS ) def test_reload_after_dir_is_emptied_updates_relation_data(self): """Scenario: The reload method is called after all the loaded alert files are removed.""" # GIVEN alert files are present and relation data contains respective alerts alert_filename = os.path.join(self.alert_rules_path, "alert.rule") self.sandbox.put_file(alert_filename, self.ALERT) self.harness.charm.rules_provider._reinitialize_alert_rules() relation = self.harness.charm.model.get_relation("metrics-endpoint") self.assertNotEqual( relation.data[self.harness.charm.app].get("alert_rules"), self.NO_ALERTS ) # WHEN all rule files are deleted from the alerts dir self.sandbox.remove(alert_filename) # AND the reload method is called self.harness.charm.rules_provider._reinitialize_alert_rules() # THEN relation data is empty again relation = self.harness.charm.model.get_relation("metrics-endpoint") self.assertEqual(relation.data[self.harness.charm.app].get("alert_rules"), self.NO_ALERTS) def test_reload_after_dir_itself_removed_updates_relation_data(self): """Scenario: The reload method is called after the alerts dir doesn't exist anymore.""" # GIVEN alert files are present and relation data contains respective alerts alert_filename = os.path.join(self.alert_rules_path, "alert.rule") self.sandbox.put_file(alert_filename, self.ALERT) self.harness.charm.rules_provider._reinitialize_alert_rules() relation = self.harness.charm.model.get_relation("metrics-endpoint") self.assertNotEqual( relation.data[self.harness.charm.app].get("alert_rules"), self.NO_ALERTS ) # WHEN the alerts dir itself is deleted self.sandbox.remove(alert_filename) self.sandbox.rmdir(self.alert_rules_path) # AND the reload method is called self.harness.charm.rules_provider._reinitialize_alert_rules() # THEN relation data is empty again relation = self.harness.charm.model.get_relation("metrics-endpoint") self.assertEqual(relation.data[self.harness.charm.app].get("alert_rules"), self.NO_ALERTS) def test_only_files_with_rule_or_rules_suffixes_are_loaded(self): """Scenario: User has both short-form rules (*.rule) and long-form rules (*.rules).""" # GIVEN various tricky combinations of files present filenames = ["alert.rule", "alert.rules", "alert.ruless", "alertrule", "alertrules"] for filename in filenames: alert_filename = os.path.join(self.alert_rules_path, filename) rule_file = yaml.safe_dump({"alert": filename, "expr": "avg(some_vector[5m]) > 5"}) self.sandbox.put_file(alert_filename, rule_file) # AND the reload method is called self.harness.charm.rules_provider._reinitialize_alert_rules() # THEN only the *.rule and *.rules files are loaded relation = self.harness.charm.model.get_relation("metrics-endpoint") alert_rules = json.loads(relation.data[self.harness.charm.app].get("alert_rules")) alert_names = [groups["rules"][0]["alert"] for groups in alert_rules["groups"]] self.assertEqual(set(alert_names), {"alert.rule", "alert.rules"})
class TestEndpointAggregator(unittest.TestCase): def setUp(self): self.harness = Harness(EndpointAggregatorCharm, meta=AGGREGATOR_META) self.harness.set_model_info(name="testmodel", uuid="1234567890") self.addCleanup(self.harness.cleanup) self.harness.set_leader(True) self.harness.begin_with_initial_hooks() def test_adding_prometheus_then_target_forwards_a_labeled_scrape_job(self): prometheus_rel_id = self.harness.add_relation(PROMETHEUS_RELATION, "prometheus") self.harness.add_relation_unit(prometheus_rel_id, "prometheus/0") target_rel_id = self.harness.add_relation(SCRAPE_TARGET_RELATION, "target-app") self.harness.add_relation_unit(target_rel_id, "target-app/0") hostname = "scrape_target_0" port = "1234" self.harness.update_relation_data( target_rel_id, "target-app/0", { "hostname": f"{hostname}", "port": f"{port}", }, ) prometheus_rel_data = self.harness.get_relation_data( prometheus_rel_id, self.harness.model.app.name) scrape_jobs = json.loads(prometheus_rel_data.get("scrape_jobs", "[]")) expected_jobs = [{ "job_name": "juju_testmodel_1234567_target-app_prometheus_scrape", "static_configs": [{ "targets": ["scrape_target_0:1234"], "labels": { "juju_model": "testmodel", "juju_model_uuid": "1234567890", "juju_application": "target-app", "juju_unit": "target-app/0", "host": "scrape_target_0", }, }], "relabel_configs": [RELABEL_INSTANCE_CONFIG], }] self.assertListEqual(scrape_jobs, expected_jobs) def test_adding_prometheus_then_target_forwards_a_labeled_alert_rule(self): prometheus_rel_id = self.harness.add_relation(PROMETHEUS_RELATION, "prometheus") self.harness.add_relation_unit(prometheus_rel_id, "prometheus/0") alert_rules_rel_id = self.harness.add_relation(ALERT_RULES_RELATION, "rules-app") self.harness.add_relation_unit(alert_rules_rel_id, "rules-app/0") self.harness.update_relation_data(alert_rules_rel_id, "rules-app/0", {"groups": ALERT_RULE_1}) prometheus_rel_data = self.harness.get_relation_data( prometheus_rel_id, self.harness.model.app.name) alert_rules = json.loads(prometheus_rel_data.get("alert_rules", "{}")) groups = alert_rules.get("groups", []) self.assertEqual(len(groups), 1) group = groups[0] expected_group = { "name": "juju_testmodel_1234567_rules-app_alert_rules", "rules": [{ "alert": "CPU_Usage", "expr": 'cpu_usage_idle{is_container!="True", group="promoagents-juju"} < 10', "for": "5m", "labels": { "override_group_by": "host", "severity": "page", "cloud": "juju", "juju_model": "testmodel", "juju_model_uuid": "1234567", "juju_application": "rules-app", "juju_unit": "rules-app/0", }, "annotations": { "description": "Host {{ $labels.host }} has had < 10% idle cpu for the last 5m\n", "summary": "Host {{ $labels.host }} CPU free is less than 10%", }, }], } self.assertDictEqual(group, expected_group) def test_adding_target_then_prometheus_forwards_a_labeled_scrape_job(self): target_rel_id = self.harness.add_relation(SCRAPE_TARGET_RELATION, "target-app") self.harness.add_relation_unit(target_rel_id, "target-app/0") hostname = "scrape_target_0" port = "1234" self.harness.update_relation_data( target_rel_id, "target-app/0", { "hostname": f"{hostname}", "port": f"{port}", }, ) prometheus_rel_id = self.harness.add_relation(PROMETHEUS_RELATION, "prometheus") self.harness.add_relation_unit(prometheus_rel_id, "prometheus/0") prometheus_rel_data = self.harness.get_relation_data( prometheus_rel_id, self.harness.model.app.name) scrape_jobs = json.loads(prometheus_rel_data.get("scrape_jobs", "[]")) expected_jobs = [{ "job_name": "juju_testmodel_1234567_target-app_prometheus_scrape", "static_configs": [{ "targets": ["scrape_target_0:1234"], "labels": { "juju_model": "testmodel", "juju_model_uuid": "1234567890", "juju_application": "target-app", "juju_unit": "target-app/0", "host": "scrape_target_0", }, }], "relabel_configs": [RELABEL_INSTANCE_CONFIG], }] self.assertListEqual(scrape_jobs, expected_jobs) def test_adding_target_then_prometheus_forwards_a_labeled_alert_rule(self): prometheus_rel_id = self.harness.add_relation(PROMETHEUS_RELATION, "prometheus") self.harness.add_relation_unit(prometheus_rel_id, "prometheus/0") alert_rules_rel_id = self.harness.add_relation(ALERT_RULES_RELATION, "rules-app") self.harness.add_relation_unit(alert_rules_rel_id, "rules-app/0") self.harness.update_relation_data(alert_rules_rel_id, "rules-app/0", {"groups": ALERT_RULE_1}) prometheus_rel_data = self.harness.get_relation_data( prometheus_rel_id, self.harness.model.app.name) alert_rules = json.loads(prometheus_rel_data.get("alert_rules", "{}")) groups = alert_rules.get("groups", []) self.assertEqual(len(groups), 1) group = groups[0] expected_group = { "name": "juju_testmodel_1234567_rules-app_alert_rules", "rules": [{ "alert": "CPU_Usage", "expr": 'cpu_usage_idle{is_container!="True", group="promoagents-juju"} < 10', "for": "5m", "labels": { "override_group_by": "host", "severity": "page", "cloud": "juju", "juju_model": "testmodel", "juju_model_uuid": "1234567", "juju_application": "rules-app", "juju_unit": "rules-app/0", }, "annotations": { "description": "Host {{ $labels.host }} has had < 10% idle cpu for the last 5m\n", "summary": "Host {{ $labels.host }} CPU free is less than 10%", }, }], } self.assertDictEqual(group, expected_group) def test_scrape_jobs_from_multiple_target_applications_are_forwarded(self): prometheus_rel_id = self.harness.add_relation(PROMETHEUS_RELATION, "prometheus") self.harness.add_relation_unit(prometheus_rel_id, "prometheus/0") target_rel_id_1 = self.harness.add_relation(SCRAPE_TARGET_RELATION, "target-app-1") self.harness.add_relation_unit(target_rel_id_1, "target-app-1/0") self.harness.update_relation_data( target_rel_id_1, "target-app-1/0", { "hostname": "scrape_target_0", "port": "1234", }, ) target_rel_id_2 = self.harness.add_relation(SCRAPE_TARGET_RELATION, "target-app-2") self.harness.add_relation_unit(target_rel_id_2, "target-app-2/0") self.harness.update_relation_data( target_rel_id_2, "target-app-2/0", { "hostname": "scrape_target_1", "port": "5678", }, ) prometheus_rel_data = self.harness.get_relation_data( prometheus_rel_id, self.harness.model.app.name) scrape_jobs = json.loads(prometheus_rel_data.get("scrape_jobs", "[]")) self.assertEqual(len(scrape_jobs), 2) expected_jobs = [ { "job_name": "juju_testmodel_1234567_target-app-1_prometheus_scrape", "static_configs": [{ "targets": ["scrape_target_0:1234"], "labels": { "juju_model": "testmodel", "juju_model_uuid": "1234567890", "juju_application": "target-app-1", "juju_unit": "target-app-1/0", "host": "scrape_target_0", }, }], "relabel_configs": [RELABEL_INSTANCE_CONFIG], }, { "job_name": "juju_testmodel_1234567_target-app-2_prometheus_scrape", "static_configs": [{ "targets": ["scrape_target_1:5678"], "labels": { "juju_model": "testmodel", "juju_model_uuid": "1234567890", "juju_application": "target-app-2", "juju_unit": "target-app-2/0", "host": "scrape_target_1", }, }], "relabel_configs": [RELABEL_INSTANCE_CONFIG], }, ] self.assertListEqual(scrape_jobs, expected_jobs) def test_alert_rules_from_multiple_target_applications_are_forwarded(self): prometheus_rel_id = self.harness.add_relation(PROMETHEUS_RELATION, "prometheus") self.harness.add_relation_unit(prometheus_rel_id, "prometheus/0") alert_rules_rel_id_1 = self.harness.add_relation( ALERT_RULES_RELATION, "rules-app-1") self.harness.add_relation_unit(alert_rules_rel_id_1, "rules-app-1/0") self.harness.update_relation_data( alert_rules_rel_id_1, "rules-app-1/0", {"groups": ALERT_RULE_1}, ) alert_rules_rel_id_2 = self.harness.add_relation( ALERT_RULES_RELATION, "rules-app-2") self.harness.add_relation_unit(alert_rules_rel_id_2, "rules-app-2/0") self.harness.update_relation_data( alert_rules_rel_id_2, "rules-app-2/0", {"groups": ALERT_RULE_2}, ) prometheus_rel_data = self.harness.get_relation_data( prometheus_rel_id, self.harness.model.app.name) alert_rules = json.loads(prometheus_rel_data.get("alert_rules", "{}")) groups = alert_rules.get("groups", []) self.assertEqual(len(groups), 2) expected_groups = [ { "name": "juju_testmodel_1234567_rules-app-1_alert_rules", "rules": [{ "alert": "CPU_Usage", "expr": 'cpu_usage_idle{is_container!="True", group="promoagents-juju"} < 10', "for": "5m", "labels": { "override_group_by": "host", "severity": "page", "cloud": "juju", "juju_model": "testmodel", "juju_model_uuid": "1234567", "juju_application": "rules-app-1", "juju_unit": "rules-app-1/0", }, "annotations": { "description": "Host {{ $labels.host }} has had < 10% idle cpu for the last 5m\n", "summary": "Host {{ $labels.host }} CPU free is less than 10%", }, }], }, { "name": "juju_testmodel_1234567_rules-app-2_alert_rules", "rules": [{ "alert": "DiskFull", "expr": 'disk_free{is_container!="True", fstype!~".*tmpfs|squashfs|overlay"} <1024', "for": "5m", "labels": { "override_group_by": "host", "severity": "page", "juju_model": "testmodel", "juju_model_uuid": "1234567", "juju_application": "rules-app-2", "juju_unit": "rules-app-2/0", }, "annotations": { "description": "Host {{ $labels.host}} {{ $labels.path }} is full\nsummary: Host {{ $labels.host }} {{ $labels.path}} is full\n" }, }], }, ] self.assertListEqual(groups, expected_groups) def test_scrape_job_removal_differentiates_between_applications(self): prometheus_rel_id = self.harness.add_relation(PROMETHEUS_RELATION, "prometheus") self.harness.add_relation_unit(prometheus_rel_id, "prometheus/0") target_rel_id_1 = self.harness.add_relation("prometheus-target", "target-app-1") self.harness.add_relation_unit(target_rel_id_1, "target-app-1/0") self.harness.update_relation_data( target_rel_id_1, "target-app-1/0", { "hostname": "scrape_target_0", "port": "1234", }, ) target_rel_id_2 = self.harness.add_relation("prometheus-target", "target-app-2") self.harness.add_relation_unit(target_rel_id_2, "target-app-2/0") self.harness.update_relation_data( target_rel_id_2, "target-app-2/0", { "hostname": "scrape_target_1", "port": "5678", }, ) prometheus_rel_data = self.harness.get_relation_data( prometheus_rel_id, self.harness.model.app.name) scrape_jobs = json.loads(prometheus_rel_data.get("scrape_jobs", "[]")) self.assertEqual(len(scrape_jobs), 2) self.harness.remove_relation_unit(target_rel_id_2, "target-app-2/0") scrape_jobs = json.loads(prometheus_rel_data.get("scrape_jobs", "[]")) self.assertEqual(len(scrape_jobs), 1) expected_jobs = [{ "job_name": "juju_testmodel_1234567_target-app-1_prometheus_scrape", "static_configs": [{ "targets": ["scrape_target_0:1234"], "labels": { "juju_model": "testmodel", "juju_model_uuid": "1234567890", "juju_application": "target-app-1", "juju_unit": "target-app-1/0", "host": "scrape_target_0", }, }], "relabel_configs": [RELABEL_INSTANCE_CONFIG], }] self.assertListEqual(scrape_jobs, expected_jobs) def test_alert_rules_removal_differentiates_between_applications(self): prometheus_rel_id = self.harness.add_relation(PROMETHEUS_RELATION, "prometheus") self.harness.add_relation_unit(prometheus_rel_id, "prometheus/0") alert_rules_rel_id_1 = self.harness.add_relation( "prometheus-rules", "rules-app-1") self.harness.add_relation_unit(alert_rules_rel_id_1, "rules-app-1/0") self.harness.update_relation_data( alert_rules_rel_id_1, "rules-app-1/0", {"groups": ALERT_RULE_1}, ) alert_rules_rel_id_2 = self.harness.add_relation( "prometheus-rules", "rules-app-2") self.harness.add_relation_unit(alert_rules_rel_id_2, "rules-app-2/0") self.harness.update_relation_data( alert_rules_rel_id_2, "rules-app-2/0", {"groups": ALERT_RULE_2}, ) prometheus_rel_data = self.harness.get_relation_data( prometheus_rel_id, self.harness.model.app.name) alert_rules = json.loads(prometheus_rel_data.get("alert_rules", "{}")) groups = alert_rules.get("groups", []) self.assertEqual(len(groups), 2) self.harness.remove_relation_unit(alert_rules_rel_id_2, "rules-app-2/0") alert_rules = json.loads(prometheus_rel_data.get("alert_rules", "{}")) groups = alert_rules.get("groups", []) self.assertEqual(len(groups), 1) expected_groups = [ { "name": "juju_testmodel_1234567_rules-app-1_alert_rules", "rules": [{ "alert": "CPU_Usage", "expr": 'cpu_usage_idle{is_container!="True", group="promoagents-juju"} < 10', "for": "5m", "labels": { "override_group_by": "host", "severity": "page", "cloud": "juju", "juju_model": "testmodel", "juju_model_uuid": "1234567", "juju_application": "rules-app-1", "juju_unit": "rules-app-1/0", }, "annotations": { "description": "Host {{ $labels.host }} has had < 10% idle cpu for the last 5m\n", "summary": "Host {{ $labels.host }} CPU free is less than 10%", }, }], }, ] self.assertListEqual(groups, expected_groups) def test_removing_scrape_jobs_differentiates_between_units(self): prometheus_rel_id = self.harness.add_relation(PROMETHEUS_RELATION, "prometheus") self.harness.add_relation_unit(prometheus_rel_id, "prometheus/0") target_rel_id = self.harness.add_relation("prometheus-target", "target-app") self.harness.add_relation_unit(target_rel_id, "target-app/0") self.harness.update_relation_data( target_rel_id, "target-app/0", { "hostname": "scrape_target_0", "port": "1234", }, ) self.harness.add_relation_unit(target_rel_id, "target-app/1") self.harness.update_relation_data( target_rel_id, "target-app/1", { "hostname": "scrape_target_1", "port": "5678", }, ) prometheus_rel_data = self.harness.get_relation_data( prometheus_rel_id, self.harness.model.app.name) scrape_jobs = json.loads(prometheus_rel_data.get("scrape_jobs", "[]")) self.assertEqual(len(scrape_jobs), 1) self.assertEqual(len(scrape_jobs[0].get("static_configs")), 2) self.harness.remove_relation_unit(target_rel_id, "target-app/1") scrape_jobs = json.loads(prometheus_rel_data.get("scrape_jobs", "[]")) self.assertEqual(len(scrape_jobs), 1) self.assertEqual(len(scrape_jobs[0].get("static_configs")), 1) expected_jobs = [{ "job_name": "juju_testmodel_1234567_target-app_prometheus_scrape", "static_configs": [{ "targets": ["scrape_target_0:1234"], "labels": { "juju_model": "testmodel", "juju_model_uuid": "1234567890", "juju_application": "target-app", "juju_unit": "target-app/0", "host": "scrape_target_0", }, }], "relabel_configs": [RELABEL_INSTANCE_CONFIG], }] self.assertListEqual(scrape_jobs, expected_jobs) def test_removing_alert_rules_differentiates_between_units(self): prometheus_rel_id = self.harness.add_relation(PROMETHEUS_RELATION, "prometheus") self.harness.add_relation_unit(prometheus_rel_id, "prometheus/0") alert_rules_rel_id = self.harness.add_relation("prometheus-rules", "rules-app") self.harness.add_relation_unit(alert_rules_rel_id, "rules-app/0") self.harness.update_relation_data( alert_rules_rel_id, "rules-app/0", {"groups": ALERT_RULE_1}, ) self.harness.add_relation_unit(alert_rules_rel_id, "rules-app/1") self.harness.update_relation_data( alert_rules_rel_id, "rules-app/1", {"groups": ALERT_RULE_2}, ) prometheus_rel_data = self.harness.get_relation_data( prometheus_rel_id, self.harness.model.app.name) alert_rules = json.loads(prometheus_rel_data.get("alert_rules", "{}")) groups = alert_rules.get("groups", []) self.assertEqual(len(groups), 1) self.harness.remove_relation_unit(alert_rules_rel_id, "rules-app/1") alert_rules = json.loads(prometheus_rel_data.get("alert_rules", "{}")) groups = alert_rules.get("groups", []) self.assertEqual(len(groups), 1) expected_groups = [ { "name": "juju_testmodel_1234567_rules-app_alert_rules", "rules": [{ "alert": "CPU_Usage", "expr": 'cpu_usage_idle{is_container!="True", group="promoagents-juju"} < 10', "for": "5m", "labels": { "override_group_by": "host", "severity": "page", "cloud": "juju", "juju_model": "testmodel", "juju_model_uuid": "1234567", "juju_application": "rules-app", "juju_unit": "rules-app/0", }, "annotations": { "description": "Host {{ $labels.host }} has had < 10% idle cpu for the last 5m\n", "summary": "Host {{ $labels.host }} CPU free is less than 10%", }, }], }, ] self.assertListEqual(groups, expected_groups)
class TestCharm(unittest.TestCase): @patch("charm.KubernetesServicePatch", lambda x, y: None) @patch_network_get(private_address="1.1.1.1") def setUp(self, *unused): self.harness = Harness(PrometheusCharm) self.addCleanup(self.harness.cleanup) self.harness.begin_with_initial_hooks() @patch_network_get(private_address="1.1.1.1") def test_grafana_is_provided_port_and_source(self, *unused): rel_id = self.harness.add_relation("grafana-source", "grafana") self.harness.add_relation_unit(rel_id, "grafana/0") grafana_host = self.harness.get_relation_data( rel_id, self.harness.model.unit.name)["grafana_source_host"] self.assertEqual(grafana_host, "{}:{}".format("1.1.1.1", "9090")) def test_default_cli_log_level_is_info(self): plan = self.harness.get_container_pebble_plan("prometheus") self.assertEqual(cli_arg(plan, "--log.level"), "info") def test_invalid_log_level_defaults_to_debug(self): bad_log_config = {"log_level": "bad-level"} with self.assertLogs(level="ERROR") as logger: self.harness.update_config(bad_log_config) expected_logs = [ "ERROR:root:Invalid loglevel: bad-level given, " "debug/info/warn/error/fatal allowed. " "defaulting to DEBUG loglevel." ] self.assertEqual(sorted(logger.output), expected_logs) plan = self.harness.get_container_pebble_plan("prometheus") self.assertEqual(cli_arg(plan, "--log.level"), "debug") def test_valid_log_level_is_accepted(self): valid_log_config = {"log_level": "warn"} self.harness.update_config(valid_log_config) plan = self.harness.get_container_pebble_plan("prometheus") self.assertEqual(cli_arg(plan, "--log.level"), "warn") @patch_network_get(private_address="1.1.1.1") def test_ingress_relation_not_set(self): self.harness.set_leader(True) plan = self.harness.get_container_pebble_plan("prometheus") self.assertEqual(cli_arg(plan, "--web.external-url"), "http://1.1.1.1:9090") @patch_network_get(private_address="1.1.1.1") def test_ingress_relation_set(self): self.harness.set_leader(True) rel_id = self.harness.add_relation("ingress", "traefik-ingress") self.harness.add_relation_unit(rel_id, "traefik-ingress/0") plan = self.harness.get_container_pebble_plan("prometheus") self.assertEqual(cli_arg(plan, "--web.external-url"), "http://1.1.1.1:9090") @patch_network_get(private_address="1.1.1.1") def test_web_external_url_has_precedence_over_ingress_relation(self): self.harness.set_leader(True) self.harness.update_config({"web_external_url": "http://*****:*****@patch_network_get(private_address="1.1.1.1") def test_web_external_url_set(self): self.harness.set_leader(True) self.harness.update_config({"web_external_url": "http://*****:*****@patch("prometheus_server.Prometheus.reload_configuration") def test_configuration_reload(self, trigger_configuration_reload): self.harness.container_pebble_ready("prometheus") trigger_configuration_reload.assert_called() self.harness.update_config({"log_level": "INFO"}) trigger_configuration_reload.assert_called()
class TestCharm(unittest.TestCase): @patch.object(CassandraOperatorCharm, "_goal_units", new=lambda x: 1) @patch.object(CassandraOperatorCharm, "_bind_address", new=lambda x: "1.1.1.1") @patch.object(ops.model.Container, "pull", new=fake_pull) @patch.object(ops.model.Container, "push", new=fake_push) def setUp(self): self.harness = Harness(CassandraOperatorCharm) self.addCleanup(self.harness.cleanup) self.harness.begin_with_initial_hooks() self.harness.set_leader(True) def tearDown(self): global FILES FILES = {} def test_relation_is_set(self): rel_id = self.harness.add_relation("database", "otherapp") self.assertIsInstance(rel_id, int) self.harness.add_relation_unit(rel_id, "otherapp/0") self.harness.update_relation_data(rel_id, "otherapp", {}) self.assertEqual( self.harness.get_relation_data(rel_id, self.harness.model.app.name)["port"], "9042", ) self.assertEqual( self.harness.get_relation_data(rel_id, self.harness.model.app.name)["address"], "cassandra-k8s-0.cassandra-k8s-endpoints.None.svc.cluster.local", ) def test_root_password_is_set(self): rel = self.harness.charm.model.get_relation("cassandra-peers") self.assertEqual(rel.data[self.harness.charm.app].get("root_password", None), None) self.assertEqual(bool(self.harness.charm.cassandra.root_password(None)), True) def test_config_file_is_set(self): self.harness.container_pebble_ready("cassandra") sample_content = yaml.safe_load(SAMPLE_CONFIG) content_str = ( self.harness.charm.unit.get_container("cassandra") .pull("/etc/cassandra/cassandra.yaml") .read() ) content = yaml.safe_load(content_str) assert content == sample_content @patch("ops.testing._TestingModelBackend.network_get") @patch("ops.testing._TestingPebbleClient.list_files") def test_prometheus_data_set(self, mock_net_get, mock_list_files): bind_address = "1.1.1.1" fake_network = { "bind-addresses": [ { "interface-name": "eth0", "addresses": [{"hostname": "cassandra-tester-0", "value": bind_address}], } ] } mock_net_get.return_value = fake_network rel_id = self.harness.add_relation("monitoring", "otherapp") self.assertIsInstance(rel_id, int) self.harness.add_relation_unit(rel_id, "otherapp/0") self.harness.update_relation_data(rel_id, "otherapp", {}) self.assertEqual( json.loads( self.harness.get_relation_data(rel_id, self.harness.model.app.name)["scrape_jobs"] )[0]["static_configs"][0]["targets"], ["*:9500"], ) @patch("ops.testing._TestingModelBackend.network_get") @patch("ops.testing._TestingPebbleClient.list_files") def test_heap_size_default(self, mock_net_get, mock_list_files): cassandra_environment = self._start_cassandra_and_get_pebble_service().environment self.assertEqual(cassandra_environment["JVM_OPTS"], "-Xms6G -Xmx6G") self.assertEqual(self.harness.model.unit.status, ops.model.ActiveStatus()) @patch("ops.testing._TestingModelBackend.network_get") @patch("ops.testing._TestingPebbleClient.list_files") def test_heap_size_config_success(self, mock_net_get, mock_list_files): self.harness.update_config({"heap_size": "1g"}) cassandra_environment = self._start_cassandra_and_get_pebble_service().environment self.assertEqual(cassandra_environment["JVM_OPTS"], "-Xms1g -Xmx1g") self.assertEqual(self.harness.model.unit.status, ops.model.ActiveStatus()) @patch("ops.testing._TestingModelBackend.network_get") @patch("ops.testing._TestingPebbleClient.list_files") def test_heap_size_config_invalid(self, mock_net_get, mock_list_files): self.harness.update_config({"heap_size": "0.5g"}) self.assertEqual( self.harness.model.unit.status, ops.model.BlockedStatus("Invalid Cassandra heap size setting: '0.5g'"), ) def _start_cassandra_and_get_pebble_service(self): container = self.harness.model.unit.get_container("cassandra") self.harness.charm.on.cassandra_pebble_ready.emit(container) pebble_plan = self.harness.get_container_pebble_plan("cassandra") return pebble_plan.services["cassandra"]
def test_install(self, mock_check, mock_check_output): harness = Harness(Microk8STestCharm) self.addCleanup(harness.cleanup) harness.begin_with_initial_hooks() mock_check.assert_called_with('juju models', user='******') mock_check_output.assert_called_with('snap list')