def _check(self, *, with_dispatch=False, dispatch_path=''): """Helper for below tests.""" class MyCharm(CharmBase): def __init__(self, framework): super().__init__(framework) fake_environ = { 'JUJU_UNIT_NAME': 'test_main/0', 'JUJU_MODEL_NAME': 'mymodel', } if dispatch_path: fake_environ['JUJU_DISPATCH_PATH'] = dispatch_path fake_environ['JUJU_VERSION'] = '2.8.0' else: fake_environ['JUJU_VERSION'] = '2.7.0' with tempfile.TemporaryDirectory() as tmpdir: tmpdir = Path(tmpdir) fake_metadata = tmpdir / 'metadata.yaml' with fake_metadata.open('wb') as fh: fh.write(b'name: test') if with_dispatch: dispatch = tmpdir / 'dispatch' with dispatch.open('wt', encoding='utf8') as fh: fh.write('') os.fchmod(fh.fileno(), 0o755) with patch.dict(os.environ, fake_environ): with patch('ops.main._emit_charm_event') as mock_charm_event: with patch('ops.main._get_charm_dir') as mock_charmdir: mock_charmdir.return_value = tmpdir main(MyCharm) self.assertEqual(mock_charm_event.call_count, 1) return mock_charm_event.call_args[0][1]
def _check(self, charm_class): """Helper for below tests.""" fake_environ = { 'JUJU_UNIT_NAME': 'test_main/0', 'JUJU_MODEL_NAME': 'mymodel', 'JUJU_VERSION': '2.7.0', } with patch.dict(os.environ, fake_environ): with patch('ops.main._emit_charm_event'): with patch('ops.main._get_charm_dir') as mock_charmdir: with tempfile.TemporaryDirectory() as tmpdirname: tmpdirname = Path(tmpdirname) fake_metadata = tmpdirname / 'metadata.yaml' with fake_metadata.open('wb') as fh: fh.write(b'name: test') mock_charmdir.return_value = tmpdirname with warnings.catch_warnings(record=True) as warnings_cm: main(charm_class) return warnings_cm
logger = logging.getLogger() class FooCharm(CharmBase): bar = Bar def __init__(self, *args): super().__init__(*args) self._bar = self.bar(self, 'bar') self.db = MySQLClient(self, 'db') self.framework.observe( self.db.on.database_available, self._on_database_available ) self.framework.observe( self._bar.on.config_changed, self._on_config_changed ) def _on_database_available(self, event): self._bar.foo() logger.info(event.db_info.user) self.unit.status = ActiveStatus("db related") def _on_config_changed(self, event): logger.info("start application") self.unit.status = ActiveStatus("application started") if __name__ == "__main__": main(FooCharm)
wd = wd + "/backend" subprocess.run(["git", "checkout", "devel"], cwd=wd) subprocess.run(["pip3", "install", "-r", "requirements.txt"], cwd=wd) self.unit.status = ActiveStatus("ML app installed") def _on_start(self, _): self.unit.status = MaintenanceStatus("Starting ML app") wd = os.path.expanduser('~') + "/ml_nfv_ec/backend" subprocess.Popen([ "python3", "model.py", "--classifier", "--regressor", "--clustering", "-i", "5" ], cwd=wd) time.sleep(2) # wait until runs self.unit.status = ActiveStatus("ML app started") def get_server_ipaddr(self, event): self.unit.status = MaintenanceStatus("Reading Server IP") ip = event.relation.data[event.unit].get("ip") wd = os.path.expanduser('~') + "/ml_nfv_ec/backend" if (ip != None): subprocess.run( ["python3", "model.py", "--addhost", str(ip) + ",root,root"], cwd=wd) self.unit.status = ActiveStatus(f"Added server {ip}") if __name__ == "__main__": main(ModelerCharm)
"""Return self._stored.default_partition.""" return self._stored.default_partition def is_slurm_installed(self): """Return true/false based on whether or not slurm is installed.""" return self._stored.slurm_installed def set_slurmctld_available(self, slurmctld_available): """Set slurmctld_available.""" self._stored.slurmctld_available = slurmctld_available def set_slurmdbd_available(self, slurmdbd_available): """Set slurmdbd_available.""" self._stored.slurmdbd_available = slurmdbd_available def set_default_partition(self, partition_name): """Set self._stored.default_partition.""" self._stored.default_partition = partition_name def set_slurmd_available(self, slurmd_available): """Set slurmd_available.""" self._stored.slurmd_available = slurmd_available def set_slurmrestd_available(self, slurmrestd_available): """Set slurmrestd_available.""" self._stored.slurmrestd_available = slurmrestd_available if __name__ == "__main__": main(SlurmConfiguratorCharm)
return ImageMeta(**resource_dict) def _get_unit_status_setter_func(charm): def set_unit_status(status): charm.unit.status = status return set_unit_status # MODELS ImageMeta = namedtuple('ImageMeta', [ 'registrypath', 'username', 'password' ]) class ResourceError(ModelError): def __init__(self, resource_name, message): super().__init__(resource_name) msg = "{}: {}".format('resource_name', 'message') self.status = BlockedStatus(msg) if __name__ == "__main__": main(DoperatorCharm)
def on_run_action(self, event): """Run an arbitrary command on the remote host.""" cmd = event.params["command"] proxy = self.get_ssh_proxy() stdout, stderr = proxy.run(cmd) event.set_results({"output": stdout}) if len(stderr): event.fail(stderr) def on_verify_ssh_credentials_action(self, event): """Verify the SSH credentials for this unit.""" proxy = self.get_ssh_proxy() verified = proxy.verify_credentials() if verified: print("Verified!") event.set_results({"verified": True}) else: print("Verification failed!") event.set_results({"verified": False}) if __name__ == "__main__": main(SimpleCharm)
peers = list(peers.values()) for p in peers: p['thruk_id'] = hashlib.md5( p['nagios_context'].encode('utf-8')).hexdigest() return peers @contextlib.contextmanager def restart_if_changed(self, container, *filenames): pre_hashes = [file_hash(container, f) for f in filenames] yield post_hashes = [file_hash(container, f) for f in filenames] try: service = container.get_service(THRUK_SERVICE) except ModelError: # NOTE: Most likely the PebbleReadyEvent didn't fire yet, so there's no service to restart. return if any([pre != post for pre, post in zip(pre_hashes, post_hashes) ]) and service.is_running(): self.unit.status = MaintenanceStatus(f'Restarting {THRUK_SERVICE}') container.stop(THRUK_SERVICE) container.start(THRUK_SERVICE) self.unit.status = ActiveStatus() if __name__ == "__main__": main(ThrukMasterCharm)
"INSERT INTO app_data (", "username, password, endpoints, version, read_only_endpoints)", "VALUES (%s, %s, %s, %s, %s)", )), (username, password, endpoints, version, read_only_endpoints), ) def _read_test_data(self, cursor, user) -> List[Tuple]: """Reads test data from the database.""" cursor.execute(f"SELECT * FROM app_data where username = '******'") return cursor.fetchall() @property def _peers(self): """Retrieve the peer relation (`ops.model.Relation`).""" return self.model.get_relation(PEER) def _get_legacy_mysql_credentials(self, event: ActionEvent) -> None: """Retrieve legacy mariadb credentials.""" local_app_data = self._peers.data[self.app] event.set_results({ "username": local_app_data["mysql_user"], "password": local_app_data["mysql_password"], "host": local_app_data["mysql_host"], "database": local_app_data["mysql_database"], }) if __name__ == "__main__": main(ApplicationCharm)
"name": "grafana-config-ini", "mountPath": "/etc/grafana", "files": { "grafana.ini": config_content }, }], "config": {}, # used to store hashes of config file text }] } return spec def _build_grafana_ini(self): config_text = textwrap.dedent(""" [server] http_port = {0} [security] admin_user = {1} admin_password = {2} """.format( self.model.config["grafana_port"], self.model.config["admin_username"], self.model.config["admin_password"], )) return config_text if __name__ == "__main__": main(TrainingCharm)
"""Check for prereqs before writing config/restart of slurmdbd.""" if not self._check_status(): event.defer() return slurmdbd_host_port_addr = { 'slurmdbd_hostname': socket.gethostname().split(".")[0], 'slurmdbd_port': "6819", } slurmdbd_config = { 'munge_key': self._stored.munge_key, **slurmdbd_host_port_addr, **self.model.config, **self._stored.db_info, } self._slurm_manager.render_config_and_restart(slurmdbd_config) self._slurmdbd.set_slurmdbd_available_on_unit_relation_data() self.unit.status = ActiveStatus("Slurmdbd Available") def set_munge_key(self, munge_key): """Set the munge key in the stored state.""" self._stored.munge_key = munge_key def set_db_info(self, db_info): """Set the db_info in the stored state.""" self._stored.db_info = db_info if __name__ == "__main__": main(SlurmdbdCharm)
ports = self._make_pod_ports() env_config = self._make_pod_envconfig() command = self._make_pod_command() volume_config = self._make_pod_volume_config() ingress_resources = self._make_pod_ingress_resources() secrets = self._make_pod_secrets() pod_spec = { "version": 3, "containers": [ { "name": self.framework.model.app.name, "imageDetails": image_details, "ports": ports, "envConfig": env_config, "command": command, "volumeConfig": volume_config, } ], "kubernetesResources": { "ingressResources": ingress_resources or [], "secrets": secrets, }, } self.model.pod.set_spec(pod_spec) self.unit.status = ActiveStatus() if __name__ == "__main__": main(TransmissionCharm)
'capabilities': { 'add': ['NET_ADMIN', 'NET_RAW', 'SYS_ADMIN'], 'drop': ['ALL'] }, }, # fields do not exist in pod_spec # 'TerminationGracePeriodSeconds': 2, }, }], 'kubernetesResources': { 'secrets': [{ 'name': 'memberlist', 'type': 'Opaque', 'data': { 'secretkey': b64encode(secret.encode('utf-8')).decode('utf-8') } }] }, 'service': { 'annotations': { 'prometheus.io/port': '7472', 'prometheus.io/scrape': 'true' } }, }, ) if __name__ == "__main__": main(MetallbSpeakerCharm)
root_password=self.model.config['root_password']) def makePodSpec(self): logging.info('MAKING POD SPEC') if self.model.config['ha-mode']: with open("templates/spec_template_ha.yaml") as spec_file: podSpecTemplate = spec_file.read() dockerImage = self.model.config['ha-image'] else: with open("templates/spec_template.yaml") as spec_file: podSpecTemplate = spec_file.read() dockerImage = self.model.config['image'] data = { "name": self.model.app.name, "docker_image": dockerImage, "mysql_port": int(self.model.config['mysql_port']), "root_password": self.model.config['root_password'], "application_name": self.meta.name, "user": self.model.config['user'], "password": self.model.config['password'], "database": self.model.config['database'], } podSpec = podSpecTemplate % data podSpec = yaml.load(podSpec) return podSpec if __name__ == "__main__": main(MariaDbCharm)
container_config = {} else: container_config = \ yaml.safe_load(self.framework.model.config["container_config"]) if not isinstance(container_config, dict): self.framework.model.unit.status = \ BlockedStatus("container_config is not a YAML mapping") return None return container_config def log(message, level=None): """Write a message to the juju log""" command = ['juju-log'] if level: command += ['-l', level] if not isinstance(message, str): message = repr(message) # https://elixir.bootlin.com/linux/latest/source/include/uapi/linux/binfmts.h # PAGE_SIZE * 32 = 4096 * 32 MAX_ARG_STRLEN = 131072 command += [message[:MAX_ARG_STRLEN]] # Missing juju-log should not cause failures in unit tests # Send log output to stderr subprocess.call(command) if __name__ == '__main__': main(MSSQLCharm)
}, 'initialDelaySeconds': 5, 'periodSeconds': 30, }, }, }, ], 'serviceAccount': { 'roles': [{ 'global': True, 'rules': [ { 'apiGroups': ["*"], 'resources': ["*"], 'verbs': ["*"], }, { 'nonResourceURLs': ["*"], 'verbs': ["*"], }, ], }], }, }) self.model.unit.status = ActiveStatus() if __name__ == "__main__": main(RancherCharm)
'versions': [{ 'name': 'v1', 'served': True, 'storage': True }], 'validation': { 'openAPIV3Schema': { 'type': 'object', 'properties': { 'spec': { 'type': 'object', 'properties': { 'config': { 'type': 'string' } } } } } } } }] } } self.model.pod.set_spec(pod_spec) self.model.unit.status = ActiveStatus() if __name__ == '__main__': main(MultusCharm)
self._apply_spec(new_spec) unit.status = ActiveStatus() def _on_fortune_action(self, event): fail = event.params["fail"] if fail: event.fail(fail) else: event.set_results({ "fortune": "A bug in the code is worth two in the documentation." }) def on_upgrade_charm(self, event): """Upgrade the charm.""" # raise NotImplementedError("TODO") unit = self.model.unit # Mark the unit as under Maintenance. unit.status = MaintenanceStatus("Upgrading charm") self.on_start(event) # When maintenance is done, return to an Active state unit.status = ActiveStatus() if __name__ == "__main__": main(CharmK8SSparkCharm)
def on_mon_relation_departed(self, event): assert event.app is not None, 'application name cannot be None for a relation-departed event' self._state['on_mon_relation_departed'].append(type(event)) self._state['observed_event_types'].append(type(event)) self._state['mon_relation_departed_data'] = event.snapshot() self._write_state() def on_ha_relation_broken(self, event): assert event.app is None, 'relation-broken events cannot have a reference to a remote application' assert event.unit is None, 'relation broken events cannot have a reference to a remote unit' self._state['on_ha_relation_broken'].append(type(event)) self._state['observed_event_types'].append(type(event)) self._state['ha_relation_broken_data'] = event.snapshot() self._write_state() def on_start_action(self, event): assert event.handle.kind == 'start_action', 'event action name cannot be different from the one being handled' self._state['on_start_action'].append(type(event)) self._state['observed_event_types'].append(type(event)) self._write_state() def on_foo_bar_action(self, event): assert event.handle.kind == 'foo_bar_action', 'event action name cannot be different from the one being handled' self._state['on_foo_bar_action'].append(type(event)) self._state['observed_event_types'].append(type(event)) self._write_state() if __name__ == '__main__': main(Charm)
pass def _on_db_info_available(self, event): """Store the db_info in the StoredState for later use. """ db_info = { 'user': event.db_info.user, 'password': event.db_info.password, 'host': event.db_info.host, 'port': event.db_info.port, 'database': event.db_info.database, } self._stored.db_info = db_info self.slurm_ops.on.configure_slurm.emit() self.unit.status = ActiveStatus("db info available") def _on_configure_slurm(self, event): """Render the slurmdbd.yaml and set the snap.mode. """ hostname = socket.gethostname().split(".")[0] self.slurm_ops.render_slurm_config( f"{os.getcwd()}/src/slurmdbd.yaml.tmpl", "/var/snap/slurm/common/etc/slurm-configurator/slurmdbd.yaml", context={**{"hostname": hostname}, **self._stored.db_info} ) self.unit.status = ActiveStatus("rendered to snap_common") if __name__ == "__main__": main(SlurmDBDCharm)
event.params['name'], event.params['hcl'].format(backend=event.params['backend'])) def _new_app_role_action(self, event): approle_name = event.params['name'] policy_name = event.params['policy'] cidr = event.params['cidr'] self.client.create_role(approle_name, token_ttl='60s', token_max_ttl='60s', policies=[policy_name], bind_secret_id='true', bound_cidr_list=cidr) def _get_token_action(self, event): name = event.params['name'] cidr = event.params['cidr'] response = self.client.write( 'auth/approle/role/{}/secret-id'.format(name), wrap_ttl='1h', cidr_list=cidr) event.set_results({"token": response['wrap_info']['token']}) def _get_root_token_action(self, event): event.set_results({"token": self.peers.root_token}) if __name__ == "__main__": # use_juju_for_storage is used to workaround main(VaultCharm)
import sys sys.path.append('lib') import yaml from ops.charm import CharmBase from ops.main import main class CDKCats(CharmBase): def __init__(self, *args): super().__init__(*args) self.framework.observe(self.on.start, self.on_start) def on_start(self, event): # Deploy Container with open("cdk-cats-manifest.yaml", 'r') as stream: try: response = yaml.safe_load(stream) self._apply_spec(response) except yaml.YAMLError as exc: pass if __name__ == "__main__": main(CDKCats)
Redis, for instances using the redis Python library. """ if not self.unit.is_leader(): logger.debug("Relation changes ignored by non-leader") return event.relation.data[self.unit]['hostname'] = str( self.bind_address(event)) event.relation.data[self.unit]['port'] = str(DEFAULT_PORT) # The reactive Redis charm exposes also 'password'. When tackling # https://github.com/canonical/redis-operator/issues/7 add 'password' # field so that it matches the exposed interface information from it. # event.relation.data[self.unit]['password'] = '' def bind_address(self, event): relation = self.model.get_relation(event.relation.name, event.relation.id) if address := self.model.get_binding(relation).network.bind_address: return address return self.app.name def set_ready_status(self): logger.debug('Pod is ready.') self.unit.status = ActiveStatus() self.app.status = ActiveStatus() if __name__ == "__main__": main(RedisCharm)
'-pubkey -noout').split()) with open('/etc/ceph/iscsi-gateway-pub.key', 'w') as f: f.write(cert_out.decode('UTF-8')) self.state.enable_tls = True self.on_pools_available(event) def custom_status_check(self): if self.peers.unit_count not in self.ALLOWED_UNIT_COUNTS: self.unit.status = ops.model.BlockedStatus( '{} is an invalid unit count'.format(self.peers.unit_count)) return False return True @ops_openstack.charm_class class CephISCSIGatewayCharmJewel(CephISCSIGatewayCharmBase): state = StoredState() release = 'jewel' @ops_openstack.charm_class class CephISCSIGatewayCharmOcto(CephISCSIGatewayCharmBase): state = StoredState() release = 'octopus' if __name__ == '__main__': main(ops_openstack.get_charm_class_for_release())
"prefix": "/katib/", "service": self.model.app.name, "port": self.model.config["port"], } ) def _check_leader(self): if not self.unit.is_leader(): # We can't do anything useful when not the leader, so do nothing. raise CheckFailed("Waiting for leadership", WaitingStatus) def _get_interfaces(self): try: interfaces = get_interfaces(self) except NoVersionsListed as err: raise CheckFailed(err, WaitingStatus) except NoCompatibleVersions as err: raise CheckFailed(err, BlockedStatus) return interfaces def _check_image_details(self): try: image_details = self.image.fetch() except OCIImageResourceError as e: raise CheckFailed(f"{e.status.message}", e.status_type) return image_details if __name__ == "__main__": main(Operator)
f"Waiting for {relation_name} relation data", WaitingStatus) # Unpack data (we care only about the first element) data_dict = list(unpacked_relation_data.values())[0] # Catch if empty data dict is received (JSONSchema ValidationError above does not raise # when this happens) # Remove once addressed in: # https://github.com/canonical/serialized-data-interface/issues/28 if len(data_dict) == 0: raise CheckFailedError( f"Found incomplete/incorrect relation data for {relation_name}.", BlockedStatus, ) return data_dict class CheckFailedError(Exception): """Raise this exception if one of the checks in main fails.""" def __init__(self, msg, status_type=None): super().__init__() self.msg = msg self.status_type = status_type self.status = status_type(msg) if __name__ == "__main__": main(KfpUiOperator)
return self.unit.status = MaintenanceStatus("Starting charm software") # Start software self.unit.status = ActiveStatus("Unit is ready") self.state.started = True logging.info("Started") def _defer_once(self, event): """Defer the given event, but only once.""" notice_count = 0 handle = str(event.handle) for event_path, _, _ in self.framework._storage.notices(None): if event_path.startswith(handle.split('[')[0]): notice_count += 1 logging.debug("Found event: {} x {}".format( event_path, notice_count)) if notice_count > 1: logging.debug("Not deferring {} notice count of {}".format( handle, notice_count)) else: logging.debug("Deferring {} notice count of {}".format( handle, notice_count)) event.defer() if __name__ == "__main__": main(BusyboxCharm)
unit = self.model.unit # Install your software and its dependencies unit.status = ActiveStatus() def on_upgrade_charm(self, event): """Upgrade the charm.""" unit = self.model.unit # Mark the unit as under Maintenance. unit.status = MaintenanceStatus("Upgrading charm") self.on_install(event) # When maintenance is done, return to an Active state unit.status = ActiveStatus() def on_touch_action(self, event): """Touch a file.""" try: filename = event.params["filename"] stdout = subprocess.check_output("touch {}".format(filename), shell=True) event.set_results({"output": stdout}) except subprocess.CalledProcessError as ex: event.fail(ex) if __name__ == "__main__": main(SkeletonCharm)
self.unit.status = ActiveStatus() def _on_daemon_started(self, event): if not self.cluster.is_joined and not self.is_single_node: self.unit.status = WaitingStatus('Waiting for peer units to join.') event.defer() return if self.cluster.is_cluster_initialized: # Skip this event when some other unit has already initialized a cluster. self.unit.status = ActiveStatus() return elif not self.unit.is_leader(): self.unit.status = WaitingStatus( 'Waiting for the leader unit to initialize a cluster.') event.defer() return self.unit.status = MaintenanceStatus('Initializing the cluster.') # Initialize the cluster if we're a leader in a multi-node deployment, otherwise it have # already been initialized by running start-single-node. if not self.is_single_node and self.model.unit.is_leader(): self.instance_manager.init_db() self.unit.status = ActiveStatus() def _on_config_changed(self, event): self.instance_manager.reconfigure() if __name__ == '__main__': main(CockroachDbCharm)
# check for valid high availability (or single node) configuration self._check_high_availability() # in the case where we have peers but no DB connection, # don't set the pod spec until it is resolved if self.unit.status == BlockedStatus('Need database relation for HA.'): log.error('Application is in a blocked state. ' 'Please resolve before pod spec can be set.') return if not self.unit.is_leader(): self.unit.status = ActiveStatus() return # general pod spec component updates self.unit.status = MaintenanceStatus('Building pod spec.') pod_spec = self._build_pod_spec() if not pod_spec: return self._update_pod_data_source_config_file(pod_spec) self._update_pod_config_ini_file(pod_spec) # set the pod spec with Juju self.model.pod.set_spec(pod_spec) self.unit.status = ActiveStatus() if __name__ == '__main__': main(GrafanaK8s)