def missing_kube_control(): """Inform the operator they need to add the kube-control relation. If deploying via bundle this won't happen, but if operator is upgrading a a charm in a deployment that pre-dates the kube-control relation, it'll be missing. Called from charm_status. """ try: goal_state = hookenv.goal_state() except NotImplementedError: goal_state = {} if "kube-control" in goal_state.get("relations", {}): if not is_flag_set("kube-control.connected"): hookenv.status_set( "waiting", "Waiting for kubernetes-control-plane to become ready" ) return True else: hookenv.status_set( "blocked", "Relate {}:kube-control kubernetes-control-plane:kube-control".format( hookenv.service_name() ), ) return True return False
def blocked(): goal_state = hookenv.goal_state() if 'redis' in goal_state['relations']: layer.status.waiting('waiting for redis') else: layer.status.blocked('missing relation to redis') clear_flag('charm.kubeflow-seldon-cluster-manager.started')
def get_sandbox_image(): '''Return the container image location for the sandbox_image. Set an appropriate sandbox image based on known registries. Precedence should be: - related docker-registry - default charmed k8s registry (if related to kubernetes) - upstream :return: str container image location ''' db = unitdata.kv() canonical_registry = 'rocks.canonical.com:443/cdk' upstream_registry = 'k8s.gcr.io' docker_registry = db.get('registry', None) if docker_registry: sandbox_registry = docker_registry['url'] else: try: deployment = hookenv.goal_state() except NotImplementedError: relations = [] for rid in hookenv.relation_ids('containerd'): relations.append(hookenv.remote_service_name(rid)) else: relations = deployment.get('relations', {}).get('containerd', {}) if any(k in relations for k in ('kubernetes-master', 'kubernetes-worker')): sandbox_registry = canonical_registry else: sandbox_registry = upstream_registry return '{}/pause-{}:3.1'.format(sandbox_registry, host.arch())
def _cluster_size(self): """Get cluster size. Return the number of units in goal state. :returns: Cluster size :rtype: Int """ return len(hookenv.goal_state()["units"])
def wait_for_db(): try: goal_state = hookenv.goal_state() except NotImplementedError: goal_state = {} if 'db' in goal_state.get('relations', {}): layer.status.waiting('Waiting for Postgres') else: layer.status.blocked('Missing postgresql:db-admin relation')
def waiting_for_certificate_relation(): try: goal_state = hookenv.goal_state() except NotImplementedError: goal_state = {} if 'certificates' in goal_state.get('relations', {}): hookenv.status_set('waiting', 'Waiting for certificate relation to become ready') else: hookenv.status_set('waiting', 'Requires certificate relation established')
def config_available(self): """Ensures all config from the CNI plugin is available.""" goal_state = hookenv.goal_state() related_apps = [ app for app in goal_state.get("relations", {}).get( self.endpoint_name, "") if "/" not in app ] if not related_apps: return False configs = self.get_configs() return all( "cidr" in config and "cni-conf-file" in config for config in [configs.get(related_app, {}) for related_app in related_apps])
def expected_units(self) -> List[str]: # Goal state looks like this: # # relations: {} # units: # postgresql/0: # since: '2020-08-31 11:05:32Z' # status: active # postgresql/1: # since: '2020-08-31 11:05:54Z' # status: maintenance return sorted(hookenv.goal_state().get("units", {}).keys(), key=lambda x: int(x.split("/")[-1]))
def install(self): """Install packages related to this charm based on contents of self.packages attribute, after first configuring the installation source. """ # Use goal-state to determine if we are expecting multiple units # and, if so, mask the manila-share service so that it only ever # gets run by pacemaker. _goal_state = goal_state() peers = (key for key in _goal_state['units'] if '/' in key and key != local_unit()) if len(list(peers)) > 0: service_pause('manila-share') super().install()
def get_internal_api_endpoints(relation=None): """ Determine the best API endpoints for an internal client to connect to. If a relation is given, it will try to take that into account. May return an empty list if an endpoint is expected but not yet available. """ try: goal_state = hookenv.goal_state() except NotImplementedError: goal_state = {} goal_state.setdefault("relations", {}) # Config takes precedence. endpoints_from_config = get_endpoints_from_config() if endpoints_from_config: return endpoints_from_config # If the internal LB relation is attached, use that or nothing. If it's # not attached but the external LB relation is, use that or nothing. for lb_type in ("internal", "external"): lb_endpoint = "loadbalancer-" + lb_type request_name = "api-server-" + lb_type api_port = EXTERNAL_API_PORT if lb_type == "external" else STANDARD_API_PORT if lb_endpoint in goal_state["relations"]: lb_provider = endpoint_from_name(lb_endpoint) lb_response = lb_provider.get_response(request_name) if not lb_response or lb_response.error: return [] return [(lb_response.address, api_port)] # Support the older loadbalancer relation (public-address interface). if "loadbalancer" in goal_state["relations"]: loadbalancer = endpoint_from_name("loadbalancer") lb_addresses = loadbalancer.get_addresses_ports() return [(host.get("public-address"), host.get("port")) for host in lb_addresses] # No LBs of any kind, so fall back to ingress-address. if not relation: kube_control = endpoint_from_name("kube-control") if not kube_control.relations: return [] relation = kube_control.relations[0] ingress_address = hookenv.ingress_address(relation.relation_id, hookenv.local_unit()) return [(ingress_address, STANDARD_API_PORT)]
def has_rbd_mirrors(): """Determine if we have or will have ``rbd-mirror`` charms related. :returns: True or False :rtype: bool """ try: # NOTE(fnordahl): This optimization will not be useful until we get a # resolution on LP: #1818245 raise NotImplementedError gs = goal_state() return 'rbd-mirror' in gs.get('relations', {}) except NotImplementedError: for relid in relation_ids('rbd-mirror'): if related_units(relid): return True
def config_available(self): ''' Ensures all config from the CNI plugin is available. ''' goal_state = hookenv.goal_state() related_apps = [ app for app in goal_state.get('relations', {}).get(self.endpoint_name, '') if '/' not in app ] if not related_apps: return False configs = self.get_configs() return all( 'cidr' in config and 'cni-conf-file' in config for config in [ configs.get(related_app, {}) for related_app in related_apps ] )
def has_rbd_mirrors(): """Determine if we have or will have ``rbd-mirror`` charms related. :returns: True or False :rtype: bool """ try: # NOTE(fnordahl): This optimization will not be useful until we get a # resolution on LP: #1818245 raise NotImplementedError gs = goal_state() return 'rbd-mirror' in gs.get('relations', {}) except NotImplementedError: for relid in relation_ids('rbd-mirror'): if related_units(relid): return True
def api_server_broken(): try: goal_state = hookenv.goal_state() except NotImplementedError: goal_state = {} # just a blip if the goal state still has it. if 'aws-iam' in goal_state.get('relations', {}): return # forget all the things. The service IP will change # if we lose our cluster, which will domino into everything is_leader = is_flag_set('leadership.is_leader') if is_leader: leader_set({'cert': None, 'key': None, 'service_ip': None}) _remove_service() _remove_certificate() _remove_deployment() _remove_webhook()
def missing_kube_control(): """Inform the operator they need to add the kube-control relation. If deploying via bundle this won't happen, but if operator is upgrading a a charm in a deployment that pre-dates the kube-control relation, it'll be missing. """ try: goal_state = hookenv.goal_state() except NotImplementedError: goal_state = {} if 'kube-control' in goal_state.get('relations', {}): hookenv.status_set('waiting', 'Waiting for kubernetes-master to become ready') else: hookenv.status_set( 'blocked', 'Relate {}:kube-control kubernetes-master:kube-control'.format( hookenv.service_name()))
def get_external_api_endpoints(): """ Determine the best API endpoints for an external client to connect to. May return an empty list if an endpoint is expected but not yet available. """ try: goal_state = hookenv.goal_state() except NotImplementedError: goal_state = {} goal_state.setdefault("relations", {}) # Config takes precedence. endpoints_from_config = get_endpoints_from_config() if endpoints_from_config: return endpoints_from_config # If the external LB relation is attached, use that or nothing. If it's # not attached but the internal LB relation is, use that or nothing. for lb_type in ("external", "internal"): lb_endpoint = "loadbalancer-" + lb_type lb_name = "api-server-" + lb_type api_port = EXTERNAL_API_PORT if lb_type == "external" else STANDARD_API_PORT if lb_endpoint in goal_state["relations"]: lb_provider = endpoint_from_name(lb_endpoint) lb_response = lb_provider.get_response(lb_name) if not lb_response or lb_response.error: return [] return [(lb_response.address, api_port)] # Support the older loadbalancer relation (public-address interface). if "loadbalancer" in goal_state["relations"]: loadbalancer = endpoint_from_name("loadbalancer") lb_addresses = loadbalancer.get_addresses_ports() return [(host.get("public-address"), host.get("port")) for host in lb_addresses] # No LBs of any kind, so fall back to public-address. return [(hookenv.unit_public_ip(), STANDARD_API_PORT)]
def missing_kube_control(): """Inform the operator they need to add the kube-control relation. If deploying via bundle this won't happen, but if operator is upgrading a a charm in a deployment that pre-dates the kube-control relation, it'll be missing. """ try: goal_state = hookenv.goal_state() except NotImplementedError: goal_state = {} if 'relations' in goal_state and 'kube-control' in goal_state['relations']: hookenv.status_set( 'waiting', 'Waiting for kubernetes-master to become ready') else: hookenv.status_set( 'blocked', 'Relate {}:kube-control kubernetes-master:kube-control'.format( hookenv.service_name()))
def on_config_changed(self, event: ops.charm.ConfigChangedEvent): """Check that we're leader, and if so, set up the pod.""" if self.model.unit.is_leader(): goal_state = hookenv.goal_state() log.info("Goal state <<EOM\n{}\nEOM".format(yaml.dump(goal_state))) # Only the leader can set_spec(). spec = self.make_pod_spec() resources = self.make_pod_resources() msg = "Configuring pod" log.info(msg) self.model.unit.status = ops.model.MaintenanceStatus(msg) self.model.pod.set_spec(spec, {"kubernetesResources": resources}) msg = "Pod configured" log.info(msg) self.model.unit.status = ops.model.ActiveStatus(msg) else: log.info("Spec changes ignored by non-leader") self.model.unit.status = ops.model.ActiveStatus()