def install_cherrypy_helloworld(): """Install the cherrypy helloworld service.""" # Install dependencies for our helloworld service for pkg in ['CherryPy', 'jinja2']: pip_install(pkg) # When we first run, generate the systemd service file with open('{}/templates/helloworld.service.j2'.format(charm_dir())) as f: t = Template(f.read()) # Render the new configuration rendered = t.render( charm_dir=charm_dir(), ) status_set('maintenance', 'Creating helloworld service...') service_file = "/etc/systemd/system/{}.service".format(charm_name()) with open(service_file, "w") as svc: svc.write(rendered) # Render the initial configuration render_config() status_set('maintenance', 'Starting helloworld service...') service_start(charm_name()) # Make sure the port is open update_http_port() status_set('active', 'Ready!') set_flag('cherrypy-helloworld.installed')
def ha_relation_joined(relation_id=None): cluster_config = get_hacluster_config() sstpsswd = config('sst-password') resources = {'res_mysql_monitor': 'ocf:percona:mysql_monitor'} resource_params = {'res_mysql_monitor': RES_MONITOR_PARAMS % {'sstpass': sstpsswd}} if config('dns-ha'): update_dns_ha_resource_params(relation_id=relation_id, resources=resources, resource_params=resource_params) group_name = 'grp_{}_hostnames'.format(charm_name()) groups = {group_name: 'res_{}_access_hostname'.format(charm_name())} else: vip_iface = (get_iface_for_address(cluster_config['vip']) or config('vip_iface')) vip_cidr = (get_netmask_for_address(cluster_config['vip']) or config('vip_cidr')) if config('prefer-ipv6'): res_mysql_vip = 'ocf:heartbeat:IPv6addr' vip_params = 'params ipv6addr="%s" cidr_netmask="%s" nic="%s"' % \ (cluster_config['vip'], vip_cidr, vip_iface) else: res_mysql_vip = 'ocf:heartbeat:IPaddr2' vip_params = 'params ip="%s" cidr_netmask="%s" nic="%s"' % \ (cluster_config['vip'], vip_cidr, vip_iface) resources['res_mysql_vip'] = res_mysql_vip resource_params['res_mysql_vip'] = vip_params group_name = 'grp_percona_cluster' groups = {group_name: 'res_mysql_vip'} clones = {'cl_mysql_monitor': 'res_mysql_monitor meta interleave=true'} colocations = {'colo_percona_cluster': 'inf: {} cl_mysql_monitor' ''.format(group_name)} locations = {'loc_percona_cluster': '{} rule inf: writable eq 1' ''.format(group_name)} for rel_id in relation_ids('ha'): relation_set(relation_id=rel_id, corosync_bindiface=cluster_config['ha-bindiface'], corosync_mcastport=cluster_config['ha-mcastport'], resources=resources, resource_params=resource_params, groups=groups, clones=clones, colocations=colocations, locations=locations)
def _finalize(): if _statuses['_initialized']: # If we haven't been initialized, we can't truly be finalized. # This makes things more efficient if an action sets a status # but subsequently starts the reactive bus. _statuses['_finalized'] = True charm_name = hookenv.charm_name() charm_dir = Path(hookenv.charm_dir()) with charm_dir.joinpath('layer.yaml').open() as fp: includes = yaml.safe_load(fp.read()).get('includes', []) layer_order = includes + [charm_name] for workload_state in WorkloadState: if workload_state not in _statuses: continue if not _statuses[workload_state]: continue def _get_key(record): layer_name, message = record if layer_name in layer_order: return layer_order.index(layer_name) else: return 0 sorted_statuses = sorted(_statuses[workload_state], key=_get_key) layer_name, message = sorted_statuses[-1] _status_set_immediate(workload_state, message) break
def install_custom_ca(): """ Installs a configured CA cert into the system-wide location. """ ca_cert = hookenv.config().get('custom-registry-ca') if ca_cert: try: # decode to bytes, as that's what install_ca_cert wants _ca = b64decode(ca_cert) except Exception: status.blocked( 'Invalid base64 value for custom-registry-ca config') return else: host.install_ca_cert(_ca, name='juju-custom-registry') charm = hookenv.charm_name() hookenv.log( 'Custom registry CA has been installed for {}'.format(charm)) # manage appropriate charm flags to recycle the runtime daemon if charm == 'docker': clear_flag('docker.available') set_flag('docker.restart') elif charm == 'containerd': set_flag('containerd.restart') else: hookenv.log('Unknown runtime: {}. ' 'Cannot request a service restart.'.format(charm))
def restart_service(): """Restart the helloworld service.""" status_set('maintenance', 'Restarting helloworld service...') service_restart(charm_name()) status_set('active', 'Ready!') clear_flag('helloworld.restart')
def test_charm_name(self): open_ = mock_open() open_.return_value = io.BytesIO(CHARM_METADATA) with patch('charmhelpers.core.hookenv.open', open_, create=True): with patch.dict('os.environ', {'CHARM_DIR': '/var/empty'}): charm_name = hookenv.charm_name() self.assertEqual("testmock", charm_name)
def make_pod_spec(base_url): with open('reactive/spec_template.yaml') as spec_file: pod_spec_template = spec_file.read() image_info = layer.docker_resource.get_info('consumer_image') data = { 'name': hookenv.charm_name(), 'docker_image_path': image_info.registry_path, 'docker_image_username': image_info.username, 'docker_image_password': image_info.password, 'base_url': base_url, } return pod_spec_template.format(**data)
def record_packages(layer_name, names, charm_name=None): """ Record the list of packages installed by the current unit's layer. """ if charm_name is None: charm_name = hookenv.charm_name() if not os.path.isdir('/var/lib/storpool'): os.mkdir('/var/lib/storpool', mode=0o700) with open(charm_install_list_file(), mode='at'): # Just making sure the file exists so we can open it as r+t. pass with open(charm_install_list_file(), mode='r+t') as listf: fcntl.lockf(listf, fcntl.LOCK_EX) # OK, we're ready to go now contents = listf.read() if len(contents) > 0: data = json.loads(contents) else: data = {'charms': {}} if charm_name not in data['charms']: data['charms'][charm_name] = {'layers': {}} layers = data['charms'][charm_name]['layers'] if layer_name not in layers: layers[layer_name] = {'packages': []} layer = layers[layer_name] pset = set(layer['packages']) cset = set.union(pset, set(names)) layer['packages'] = list(sorted(cset)) # Hm, any packages that no longer need to be uninstalled? if 'packages' not in data: data['packages'] = {'remove': []} data['packages']['remove'] = \ list(sorted(set(data['packages']['remove']).difference(cset))) # Right, so let's write it back listf.seek(0) print(json.dumps(data), file=listf) listf.truncate()
def generate_external_ids(external_id_value=None): """Generate external-ids dictionary that can be used to mark OVS bridges and ports as managed by the charm. :param external_id_value: Value of the external-ids entry. Note: 'managed' will be used if not specified. :type external_id_value: Optional[str] :returns: Dict with a single external-ids entry. { 'external-ids': { charm-``charm_name``: ``external_id_value`` } } :rtype: Dict[str, Dict[str]] """ external_id_key = "charm-{}".format(charm_name()) external_id_value = ('managed' if external_id_value is None else external_id_value) return {'external-ids': {external_id_key: external_id_value}}
def update_dns_ha_resource_params(resources, resource_params, relation_id=None, crm_ocf='ocf:maas:dns'): """ Configure DNS-HA resources based on provided configuration and update resource dictionaries for the HA relation. @param resources: Pointer to dictionary of resources. Usually instantiated in ha_joined(). @param resource_params: Pointer to dictionary of resource parameters. Usually instantiated in ha_joined() @param relation_id: Relation ID of the ha relation @param crm_ocf: Corosync Open Cluster Framework resource agent to use for DNS HA """ _relation_data = {'resources': {}, 'resource_params': {}} update_hacluster_dns_ha(charm_name(), _relation_data, crm_ocf) resources.update(_relation_data['resources']) resource_params.update(_relation_data['resource_params']) relation_set(relation_id=relation_id, groups=_relation_data['groups'])
def request_keystone_auth(keystone): keystone.request_credentials(username=charm_name())
def ha_relation_joined(relation_id=None): cluster_config = get_hacluster_config() sstpsswd = sst_password() resources = {'res_percona': 'ocf:heartbeat:galera'} resource_params = {} vip_iface = (get_iface_for_address(cluster_config['vip']) or config('vip_iface')) vip_cidr = (get_netmask_for_address(cluster_config['vip']) or config('vip_cidr')) if config('dns-ha'): update_dns_ha_resource_params(relation_id=relation_id, resources=resources, resource_params=resource_params) group_name = 'grp_{}_hostnames'.format(charm_name()) groups = {group_name: 'res_{}_access_hostname'.format(charm_name())} if config('prefer-ipv6'): res_mysql_vip = 'ocf:heartbeat:IPv6addr' vip_params = 'params ipv6addr="%s" cidr_netmask="%s" nic="%s"' % \ (cluster_config['vip'], vip_cidr, vip_iface) else: res_mysql_vip = 'ocf:heartbeat:IPaddr2' vip_params = 'params ip="%s" cidr_netmask="%s" nic="%s"' % \ (cluster_config['vip'], vip_cidr, vip_iface) hostname_list = get_cluster_hostnames() percona_params = \ ' params ' \ ' wsrep_cluster_address="gcomm://' + ",".join(hostname_list) + '"' \ ' config="' + resolve_cnf_file() + '"' \ ' datadir="/var/lib/percona-xtradb-cluster"' \ ' socket="/var/run/mysqld/mysqld.sock" ' \ ' pid="/var/run/mysqld/mysqld.pid"' \ ' check_user=sstuser check_passwd="' + sstpsswd + '"' \ ' binary="/usr/bin/mysqld_safe"' \ ' op monitor timeout=120 interval=20 depth=0' \ ' op monitor role=Master timeout=120 interval=10 depth=0' \ ' op monitor role=Slave timeout=120 interval=30 depth=0' percona_ms = { 'ms_percona': 'res_percona meta notify=true ' 'interleave=true master-max=3 ' 'ordered=true target-role=Started' } resource_params['res_percona'] = percona_params resources['res_mysql_vip'] = res_mysql_vip resource_params['res_mysql_vip'] = vip_params groups = {'grp_percona_cluster': 'res_mysql_vip'} colocations = { 'colo_percona_cluster': '+inf: grp_percona_cluster ms_percona:Master' } for rel_id in relation_ids('ha'): relation_set(relation_id=rel_id, corosync_bindiface=cluster_config['ha-bindiface'], corosync_mcastport=cluster_config['ha-mcastport'], resources=resources, resource_params=resource_params, ms=percona_ms, groups=groups, colocations=colocations)
def update_dns_ha_resource_params(resources, resource_params, relation_id=None, crm_ocf='ocf:maas:dns'): """ Check for os-*-hostname settings and update resource dictionaries for the HA relation. @param resources: Pointer to dictionary of resources. Usually instantiated in ha_joined(). @param resource_params: Pointer to dictionary of resource parameters. Usually instantiated in ha_joined() @param relation_id: Relation ID of the ha relation @param crm_ocf: Corosync Open Cluster Framework resource agent to use for DNS HA """ # Validate the charm environment for DNS HA assert_charm_supports_dns_ha() settings = ['os-admin-hostname', 'os-internal-hostname', 'os-public-hostname', 'os-access-hostname'] # Check which DNS settings are set and update dictionaries hostname_group = [] for setting in settings: hostname = config(setting) if hostname is None: log('DNS HA: Hostname setting {} is None. Ignoring.' ''.format(setting), DEBUG) continue m = re.search('os-(.+?)-hostname', setting) if m: networkspace = m.group(1) else: msg = ('Unexpected DNS hostname setting: {}. ' 'Cannot determine network space name' ''.format(setting)) status_set('blocked', msg) raise DNSHAException(msg) hostname_key = 'res_{}_{}_hostname'.format(charm_name(), networkspace) if hostname_key in hostname_group: log('DNS HA: Resource {}: {} already exists in ' 'hostname group - skipping'.format(hostname_key, hostname), DEBUG) continue hostname_group.append(hostname_key) resources[hostname_key] = crm_ocf resource_params[hostname_key] = ( 'params fqdn="{}" ip_address="{}" ' ''.format(hostname, resolve_address(endpoint_type=networkspace, override=False))) if len(hostname_group) >= 1: log('DNS HA: Hostname group is set with {} as members. ' 'Informing the ha relation'.format(' '.join(hostname_group)), DEBUG) relation_set(relation_id=relation_id, groups={ 'grp_{}_hostnames'.format(charm_name()): ' '.join(hostname_group)}) else: msg = 'DNS HA: Hostname group has no members.' status_set('blocked', msg) raise DNSHAException(msg)
def __call__(self): sysctl_dict = config('sysctl') if sysctl_dict: sysctl_create(sysctl_dict, '/etc/sysctl.d/50-{0}.conf'.format(charm_name())) return {'sysctl': sysctl_dict}
def send_instance_info(self): self._to_publish['charm'] = hookenv.charm_name() self._to_publish['vm-id'] = self.vm_id self._to_publish['vm-name'] = self.vm_name self._to_publish['res-group'] = self.resource_group self._to_publish['model-uuid'] = os.environ['JUJU_MODEL_UUID']
def send_instance_info(self): self._to_publish['charm'] = hookenv.charm_name() self._to_publish['instance'] = self.instance self._to_publish['zone'] = self.zone self._to_publish['model-uuid'] = os.environ['JUJU_MODEL_UUID']
def unrecord_packages(layer_name, charm_name=None): """ Remove the packages installed by the specified unit's layer from the record. Uninstall those of them are not wanted by any other unit's layer. """ if charm_name is None: charm_name = hookenv.charm_name() try: with open(charm_install_list_file(), mode='r+t') as listf: fcntl.lockf(listf, fcntl.LOCK_EX) # ...and it must contain valid JSON? data = json.loads(listf.read()) packages = set() has_layer = False has_charm = charm_name in data['charms'] changed = False if has_charm: layers = data['charms'][charm_name]['layers'] has_layer = layer_name in layers if has_layer: layer = layers[layer_name] packages = set(layer['packages']) del layers[layer_name] changed = True if not layers: del data['charms'][charm_name] # Right, so let's write it back if needed if changed: listf.seek(0) print(json.dumps(data), file=listf) listf.truncate() changed = False if 'packages' not in data: data['packages'] = {'remove': []} try_remove = set(data['packages']['remove']).union(packages) for cdata in data['charms'].values(): for layer in cdata['layers'].values(): try_remove = try_remove.difference(set(layer['packages'])) if try_remove != set(data['packages']['remove']): changed = True removed = set() while True: removed_now = set() # Sigh... don't we just love special cases... pkgs = set(['libwww-perl', 'liblwp-protocol-https-perl']) if pkgs.issubset(try_remove): if subprocess.call(['dpkg', '-r', '--dry-run', '--'] + list(pkgs), stdout=subprocess.PIPE, stderr=subprocess.PIPE) == 0: subprocess.call(['dpkg', '--purge', '--'] + list(pkgs)) removed_now = removed_now.union(pkgs) changed = True # Now go for them all for pkg in try_remove: if subprocess.call(['dpkg', '-r', '--dry-run', '--', pkg], stdout=subprocess.PIPE, stderr=subprocess.PIPE) != 0: continue subprocess.call(['dpkg', '--purge', '--', pkg]) removed_now.add(pkg) changed = True if removed_now: removed = removed.union(removed_now) try_remove = try_remove.difference(removed_now) else: break data['packages']['remove'] = \ list(sorted(try_remove.difference(removed))) # Let's write it back again if needed if changed: listf.seek(0) print(json.dumps(data), file=listf) listf.truncate() except FileNotFoundError: pass
def module_name(): """ Use the charm name as a base for the module name passed to txn-install. """ return 'charm-' + hookenv.charm_name()