def on_install(self, event): """Handle install state.""" self.unit.status = MaintenanceStatus("Installing charm software") # Perform install tasks self.unit.status = MaintenanceStatus("Install complete") logging.info("Install of software complete") self.state.installed = True
def test__it_blocks_until_pod_is_ready(self, mock_pod_spec, mock_juju_pod_spec, mock_time, mock_k8s_mod, mock_build_juju_unit_status_func): # Setup mock_fw_adapter_cls = \ create_autospec(framework.FrameworkAdapter, spec_set=True) mock_fw_adapter = mock_fw_adapter_cls.return_value mock_juju_unit_states = [ MaintenanceStatus(str(uuid4())), MaintenanceStatus(str(uuid4())), ActiveStatus(str(uuid4())), ] mock_build_juju_unit_status_func.side_effect = mock_juju_unit_states mock_event_cls = create_autospec(EventBase, spec_set=True) mock_event = mock_event_cls.return_value # Exercise charm.on_config_changed_handler(mock_event, mock_fw_adapter) # Assert assert mock_fw_adapter.set_unit_status.call_count == \ len(mock_juju_unit_states) assert mock_fw_adapter.set_unit_status.call_args_list == [ call(status) for status in mock_juju_unit_states ]
def configure_pod(self, _=None) -> NoReturn: """Assemble the pod spec and apply it, if possible.""" if not self.unit.is_leader(): self.unit.status = ActiveStatus("ready") return self.unit.status = MaintenanceStatus("Assembling pod spec") # Fetch image information try: self.unit.status = MaintenanceStatus("Fetching image information") image_info = self.image.fetch() except OCIImageResourceError: self.unit.status = BlockedStatus( "Error fetching image information") return try: pod_spec = make_pod_spec( image_info, self.config, self.model.name, self.model.app.name, ) except ValueError as exc: error_message = f"Config data validation error: {str(exc)}" logger.exception(error_message) self.unit.status = BlockedStatus(str(exc)) return if self.state.pod_spec != pod_spec: self.model.pod.set_spec(pod_spec) self.state.pod_spec = pod_spec self.unit.status = ActiveStatus("ready")
def _on_install(self, event): self.unit.status = MaintenanceStatus("Installing packages") RunnerManager.install_deps() runner_manager = self._get_runner_manager() if runner_manager: self.unit.status = MaintenanceStatus("Installing runner binary") try: self._stored.runner_bin_url = runner_manager.get_latest_runner_bin_url( ) runner_manager.update_runner_bin(self._stored.runner_bin_url) except Exception as e: logger.exception("Failed to update runner binary") self.unit.status = BlockedStatus( f"Failed to update runner binary: {e}") return self.unit.status = MaintenanceStatus("Starting runners") try: self._reconcile_runners(runner_manager) except RunnerError as e: logger.exception("Failed to start runners") self.unit.status = BlockedStatus( f"Failed to start runners: {e}") else: self.unit.status = ActiveStatus() else: self.unit.status = BlockedStatus( "Missing token or org/repo path config")
def on_pebble_ready(self, event: WorkloadEvent) -> None: """Run the pebble ready hook. Args: event: the event object """ if self._num_units() != self._goal_units(): self.unit.status = MaintenanceStatus("Waiting for units") event.defer() return if self._set_config_file(event) is False: self.unit.status = MaintenanceStatus("Waiting for IP address") event.defer() return if self._set_layer(): self._container.restart("cassandra") if self.unit.is_leader(): if not self.cassandra.root_password(event): self.unit.status = MaintenanceStatus("Waiting for Database") event.defer() return if self.model.relations["monitoring"]: self._setup_monitoring() self.provider.update_address("database", self._hostname()) self.unit.status = ActiveStatus()
def on_install(self, event): """Handle install state.""" self.unit.status = MaintenanceStatus('Installing charm software') apt.install('postgresql') self.unit.status = MaintenanceStatus('Install complete') logging.info('Install of software complete') self.state.installed = True
def handle(self, event): for resource in self._resources.keys(): if not self._resources[resource].fetch(self._framework.resources): self._framework.unit_status_set( BlockedStatus( 'Missing or invalid image resource: {}'.format( resource))) logger.info( 'Missing or invalid image resource: {}'.format(resource)) return if not self._framework.unit_is_leader: self._framework.unit_status_set( WaitingStatus('Waiting for leader')) logger.info('Delegating pod configuration to the leader') return spec = self._builder.build_spec() self._framework.unit_status_set( MaintenanceStatus('Configuring container')) self._framework.pod_spec_set(spec) if self._pod.is_ready: self._framework.unit_status_set(ActiveStatus('ready')) logger.info('Pod is ready') return self._framework.unit_status_set(MaintenanceStatus('Pod is not ready')) logger.info('Pod is not ready')
def _on_remove(self, event): """Remove artifacts created by the K8s API.""" self.unit.status = MaintenanceStatus("Removing supplementary " "Kubernetes objects") utils.remove_k8s_objects(self._stored.namespace) self.unit.status = MaintenanceStatus("Removing pod") self._stored.started = False self._stored.k8s_objects_created = False
def on_start(self, event): """Handle start state.""" # do things on start, like install packages # once done, mark state as done self.unit.status = MaintenanceStatus("Installing charm software") # perform installation and common configuration bootstrap tasks self.unit.status = MaintenanceStatus("Software installed, performing configuration") self.state._started = True
def remove(self, event): """Remove charm.""" self.model.unit.status = MaintenanceStatus("Calculating manifests") self.ensure_state() manifest = self.get_manifest() self.model.unit.status = MaintenanceStatus("Removing manifests") self.remove_manifest(manifest)
def build_juju_unit_status(pod_status): if pod_status.is_unknown: unit_status = MaintenanceStatus("Waiting for pod to appear") elif not pod_status.is_running: unit_status = MaintenanceStatus("Pod is starting") elif pod_status.is_running and not pod_status.is_ready: unit_status = MaintenanceStatus("Pod is getting ready") elif pod_status.is_running and pod_status.is_ready: unit_status = ActiveStatus() return unit_status
def _on_install(self, _): self.unit.status = MaintenanceStatus("Installing dependencies") subprocess.run(["apt", "update"]) subprocess.run(["apt", "install", "-y", "git", "python3-pip"])#, "openssh-server"]) self.unit.status = MaintenanceStatus("Installing ML app") repoPath="https://github.com/daviddvs/ml_nfv_ec.git" wd=os.path.expanduser('~')+"/ml_nfv_ec" subprocess.run(["git", "clone", repoPath, wd]) wd=wd+"/mon" subprocess.run(["git", "checkout", "devel"], cwd=wd) subprocess.run(["pip3", "install", "-r", "requirements.txt"], cwd=wd) self.unit.status = ActiveStatus("ML app installed")
def on_stop(self, event): if not self.state.configured: logging.warning( "Start called before configuration complete, deferring event: {}" .format(event.handle)) self._defer_once(event) return self.unit.status = MaintenanceStatus("Stopping charm software") subprocess.check_call(["systemctl", "stop", "aa-prometheus.service"]) ports.close_port("8000") self.unit.status = MaintenanceStatus("Unit stopped") self.state.started = False logging.info("Stopped")
def build_juju_unit_status(pod_status): if pod_status.is_unknown: log.debug("k8s pod status is unknown") unit_status = MaintenanceStatus("Waiting for pod to appear") elif not pod_status.is_running: log.debug("k8s pod status is running") unit_status = MaintenanceStatus("Pod is starting") elif pod_status.is_running and not pod_status.is_ready: log.debug("k8s pod status is running but not ready") unit_status = MaintenanceStatus("Pod is getting ready") elif pod_status.is_running and pod_status.is_ready: log.debug("k8s pod status is running and ready") unit_status = ActiveStatus() return unit_status
def test_relation_changed_with_node_and_unit_mismatch(self, mock_es_nodes): self.harness.set_leader(True) seed_config = MINIMAL_CONFIG.copy() self.harness.update_config(seed_config) expected_num_es_nodes = 2 mock_es_nodes.return_value = expected_num_es_nodes expected_num_units = 3 # add a different number of units than number of es_nodes rel_id = self.harness.add_relation('elasticsearch', 'elasticsearch') rel = self.harness.model.get_relation('elasticsearch') for i in range(1, expected_num_units): self.harness.add_relation_unit(rel_id, 'elasticsearch-operator/{}'.format(i)) # check that there is a mismatch self.assertEqual(expected_num_es_nodes, self.harness.charm.num_es_nodes) self.assertEqual(expected_num_units, self.harness.charm.num_hosts) # check that the proper status has been set in _elasticsearch_relation_changed self.harness.charm.on.elasticsearch_relation_changed.emit(rel) self.assertEqual( self.harness.charm.unit.status, MaintenanceStatus('Waiting for nodes to join ES cluster') )
def on_start(self, event): """Called when the charm is being installed""" if not self.peers.is_joined: event.defer() return unit = self.model.unit if not SSHProxy.has_ssh_key(): unit.status = MaintenanceStatus("Generating SSH keys...") pubkey = None privkey = None if self.is_leader: if self.peers.is_cluster_initialized: SSHProxy.write_ssh_keys( public=self.peers.ssh_public_key, private=self.peers.ssh_private_key, ) else: SSHProxy.generate_ssh_key() self.on.ssh_keys_initialized.emit( SSHProxy.get_ssh_public_key(), SSHProxy.get_ssh_private_key()) unit.status = ActiveStatus() else: unit.status = WaitingStatus( "Waiting for leader to populate the keys")
def _on_config_changed(self, event) -> None: # Defer the config-changed event if we do not have sufficient privileges if not self._k8s_auth(): event.defer() return # Default StatefulSet needs patching for extra volume mounts. Ensure that # the StatefulSet is patched on each invocation. if not self._statefulset_patched: self._patch_stateful_set() self.unit.status = MaintenanceStatus( "waiting for changes to apply") try: # Configure and start the Metrics Scraper self._config_scraper() # Configure and start the Kubernetes Dashboard self._config_dashboard() except ConnectionError: logger.info( "pebble socket not available, deferring config-changed") event.defer() return self.unit.status = ActiveStatus()
def on_db_relation_changed(self, event): """Handle an example db relation's change event.""" self.password = event.relation.data[event.unit].get("password") self.unit.status = MaintenanceStatus("Configuring database") if self.mysql.is_ready: event.log("Database relation complete") self.state._db_configured = True
def test_on_install(self, _call, _render, _setup, _install): self.harness.charm.on.install.emit() self.assertEqual(self.harness.charm.unit.status, MaintenanceStatus("installing pip and virtualenv")) _install.assert_called_with(["python3-pip", "python3-virtualenv"]) _setup.assert_called_once() _render.assert_called_once()
def _on_config_changed(self, event): """ Any configuration change trigger a complete reconfigure of the php and apache and also a restart of apache. :param event: :return: """ self.unit.status = MaintenanceStatus("Begin config apache2.") utils.config_apache2(Path(self.charm_dir / 'templates'), 'nextcloud.conf.j2') self._stored.apache_configured = True self.unit.status = MaintenanceStatus("apache2 config complete.") self._config_php() # self._config_website() sp.check_call(['systemctl', 'restart', 'apache2.service']) self._on_update_status(event)
def refresh(self, event): logger.info("Refresh! is leader %s", self.unit.is_leader) logger.info("Refresh! is leader() %s", self.unit.is_leader()) event.set_results({ #FIXME 'message': "Backup ended ok.", "time": 1361.77241278412983472378946237868, }) return #FIXME self.unit.status = MaintenanceStatus("Refreshing blog") logger.info("Updating blog files") os.chdir(BLOGDIR) run(['git', 'pull']) logger.info("Rebuilding the blog") self._build_blog() #FIXME: log, set the active status and answer with the git hash git_hash = "FIXME" msg = "Blog refreshed to HEAD {}".format(git_hash) logger.info(msg) self.unit.status = ActiveStatus(msg) fail = False #FIXME if fail: event.fail("Device error: no space left") else: event.set_results({"HEAD hash": git_hash})
def configure_pod(self, _=None) -> NoReturn: """Assemble the pod spec and apply it, if possible.""" try: if self.unit.is_leader(): self.unit.status = MaintenanceStatus("Assembling pod spec") image_info = self.image.fetch() kwargs = self._get_build_pod_spec_kwargs() pod_spec = self.build_pod_spec(image_info, **kwargs) self._debug_if_needed(pod_spec) self._set_pod_spec(pod_spec) self.unit.status = ActiveStatus("ready") except OCIImageResourceError: self.unit.status = BlockedStatus( "Error fetching image information") except ValidationError as e: logger.error(f"Config data validation error: {e}") logger.debug(traceback.format_exc()) self.unit.status = BlockedStatus(str(e)) except RelationsMissing as e: logger.error(f"Relation missing error: {e.message}") logger.debug(traceback.format_exc()) self.unit.status = BlockedStatus(e.message) except ModelError as e: self.unit.status = BlockedStatus(str(e)) except Exception as e: error_message = f"Unknown exception: {e}" logger.error(error_message) logger.debug(traceback.format_exc()) self.unit.status = BlockedStatus(error_message)
def configure_pod(self): """Set Juju / Kubernetes pod spec built from `_build_pod_spec()`.""" # check for valid high availability (or single node) configuration self._check_high_availability() # in the case where we have peers but no DB connection, # don't set the pod spec until it is resolved if self.unit.status == BlockedStatus('Need database relation for HA.'): log.error('Application is in a blocked state. ' 'Please resolve before pod spec can be set.') return if not self.unit.is_leader(): self.unit.status = ActiveStatus() return # general pod spec component updates self.unit.status = MaintenanceStatus('Building pod spec.') pod_spec = self._build_pod_spec() if not pod_spec: return self._update_pod_data_source_config_file(pod_spec) self._update_pod_config_ini_file(pod_spec) # set the pod spec with Juju self.model.pod.set_spec(pod_spec) self.unit.status = ActiveStatus()
def test_send_config_dhss_enabled(self, _render, _get_loader, _send_backend_config, _install_pkgs): _render.return_value = 'test-rendered-manila-backend-config' _get_loader.return_value = 'test-loader' config = copy.deepcopy(self.REQUIRED_CHARM_CONFIG_BY_DEFAULT) config['driver-handles-share-servers'] = True config['root-volume-aggregate-name'] = 'test_cluster_01_VM_DISK_1' self.harness.update_config(config) self.harness.begin_with_initial_hooks() # Validate workflow with incomplete relation data self.assertFalse(self.harness.charm.state.is_started) _render.assert_not_called() _get_loader.assert_not_called() _send_backend_config.assert_not_called() _install_pkgs.assert_called_once_with() self.assertEqual(self.harness.charm.unit.status, MaintenanceStatus('')) # Validate workflow with complete relation data rel_id = self.harness.add_relation('manila-plugin', 'manila') self.harness.add_relation_unit(rel_id, 'manila/0') self.harness.update_relation_data(rel_id, 'manila/0', { '_authentication_data': json.dumps({'data': 'test-manila-auth-data'}) }) self.assertTrue(self.harness.charm.state.is_started) _render.assert_called_once_with(source='manila.conf', template_loader='test-loader', target=None, context=self.harness.charm.adapters) _get_loader.assert_called_once_with('templates/', 'default') _send_backend_config.assert_called_once_with( 'netapp-ontap', 'test-rendered-manila-backend-config') self.assertEqual(self.harness.charm.unit.status, ActiveStatus('Unit is ready'))
def on_config_changed(self, event): logging.info('CONFIG CHANGED') podSpec = self.makePodSpec() if self.state.podSpec != podSpec: self.model.unit.status = MaintenanceStatus('Configuring pod') self.model.pod.set_spec(podSpec) self.model.unit.status = ActiveStatus('ready')
def build_juju_unit_status(pod_status): if pod_status.is_unknown: unit_status = MaintenanceStatus("Waiting for pod to appear") elif not pod_status.is_running: unit_status = MaintenanceStatus("Pod is starting") elif pod_status.is_running and not pod_status.is_ready: unit_status = MaintenanceStatus("Pod is getting ready") elif pod_status.is_running and pod_status.is_ready: unit_status = ActiveStatus() else: # Covering a "variable referenced before assignment" linter error unit_status = BlockedStatus( "Error: Unexpected pod_status received: {0}".format( pod_status.raw_status)) return unit_status
def configure_pod(self, _=None) -> NoReturn: """Assemble the pod spec and apply it, if possible.""" if not self.unit.is_leader(): self.unit.status = ActiveStatus("ready") return self.unit.status = MaintenanceStatus("Assembling pod spec") # Fetch image information image_info = self.config["image"] try: pod_spec = make_pod_spec( image_info, self.model.config, self.model.app.name, ) except ValueError as exc: logger.exception("Config data validation error") self.unit.status = BlockedStatus(str(exc)) return if self.state.pod_spec != pod_spec: self.model.pod.set_spec(pod_spec) self.state.pod_spec = pod_spec self.unit.status = ActiveStatus("ready")
def _on_config_changed(self, _): self._stored.configured = False self.framework.model.unit.status = MaintenanceStatus("Configuring pod") logger.info('Reapplying the updated pod spec') self.set_pod_spec() self.framework.model.unit.status = ActiveStatus("Ready") self._stored.configured = True
def on_install(self, event): unit = self.framework.model.unit # Install the Munge snap. munge_snap = self.framework.model.resources.fetch("munge-snap") unit.status = MaintenanceStatus("Installing Munge snap.") snap_local_install(snap_path=munge_snap) # Install SlurmDBD snap. slurmdbd_snap = self.framework.model.resources.fetch("slurmdbd-snap") unit.status = MaintenanceStatus("Installing SlurmDBD snap.") snap_local_install(snap_path=slurmdbd_snap) unit.status = ActiveStatus() self.state.slurmdbd_snap_installed = True
def update_status(self): logging.info("Updating status") if self.state.series_upgrade: self.unit.status = BlockedStatus( 'Ready for do-release-upgrade and reboot. ' 'Set complete when finished.') return if self.state.is_paused: self.unit.status = MaintenanceStatus( "Paused. Use 'resume' action to resume normal service.") return missing_relations = [] for relation in self.REQUIRED_RELATIONS: if not self.model.get_relation(relation): missing_relations.append(relation) if missing_relations: self.unit.status = BlockedStatus('Missing relations: {}'.format( ', '.join(missing_relations))) return try: # Custom checks return True if the checked passed else False. # If the check failed the custom check will have set the status. if not self.custom_status_check(): return except NotImplementedError: pass if self.state.is_started: self.unit.status = ActiveStatus('Unit is ready') else: self.unit.status = WaitingStatus('Charm configuration in progress') logging.info("Status updated")