def haproxy_generate_config( self, daemon_spec: CephadmDaemonDeploySpec, ) -> Tuple[Dict[str, Any], List[str]]: spec = cast(IngressSpec, self.mgr.spec_store[daemon_spec.service_name].spec) assert spec.backend_service daemons = self.mgr.cache.get_daemons_by_service(spec.backend_service) deps = [d.name() for d in daemons] # generate password? pw_key = f'{spec.service_name()}/monitor_password' password = self.mgr.get_store(pw_key) if password is None: if not spec.monitor_password: password = ''.join( random.choice(string.ascii_lowercase) for _ in range(20)) self.mgr.set_store(pw_key, password) else: if spec.monitor_password: self.mgr.set_store(pw_key, None) if spec.monitor_password: password = spec.monitor_password haproxy_conf = self.mgr.template.render( 'services/ingress/haproxy.cfg.j2', { 'spec': spec, 'servers': [{ 'name': d.name(), 'ip': d.ip or resolve_ip(str(d.hostname)), 'port': d.ports[0], } for d in daemons if d.ports], 'user': spec.monitor_user or 'admin', 'password': password, 'ip': daemon_spec.ip or '*', 'frontend_port': daemon_spec.ports[0] if daemon_spec.ports else spec.frontend_port, 'monitor_port': daemon_spec.ports[1] if daemon_spec.ports else spec.monitor_port, }) config_files = { 'files': { "haproxy.cfg": haproxy_conf, } } if spec.ssl_cert: ssl_cert = spec.ssl_cert if isinstance(ssl_cert, list): ssl_cert = '\n'.join(ssl_cert) config_files['files']['haproxy.pem'] = ssl_cert return config_files, sorted(deps)
def _show_nfs_cluster_info(self, cluster_id: str) -> Dict[str, Any]: self._set_cluster_id(cluster_id) completion = self.mgr.list_daemons(daemon_type='nfs') orchestrator.raise_if_exception(completion) backends: List[Dict[str, Union[str, int]]] = [] # Here completion.result is a list DaemonDescription objects for cluster in completion.result: if self.cluster_id == cluster.service_id(): try: if cluster.ip: ip = cluster.ip else: c = self.mgr.get_hosts() orchestrator.raise_if_exception(c) hosts = [h for h in c.result if h.hostname == cluster.hostname] if hosts: ip = resolve_ip(hosts[0].addr) else: # sigh ip = resolve_ip(cluster.hostname) backends.append({ "hostname": cluster.hostname, "ip": ip, "port": cluster.ports[0] }) except orchestrator.OrchestratorError: continue r: Dict[str, Any] = { 'virtual_ip': None, 'backend': backends, } sc = self.mgr.describe_service(service_type='ingress') orchestrator.raise_if_exception(sc) for i in sc.result: spec = cast(IngressSpec, i.spec) if spec.backend_service == f'nfs.{cluster_id}': r['virtual_ip'] = i.virtual_ip.split('/')[0] if i.ports: r['port'] = i.ports[0] if len(i.ports) > 1: r['monitor_port'] = i.ports[1] return r
def haproxy_generate_config( self, daemon_spec: CephadmDaemonDeploySpec, ) -> Tuple[Dict[str, Any], List[str]]: spec = cast(IngressSpec, self.mgr.spec_store[daemon_spec.service_name].spec) assert spec.backend_service if spec.backend_service not in self.mgr.spec_store: raise RuntimeError( f'{spec.service_name()} backend service {spec.backend_service} does not exist' ) backend_spec = self.mgr.spec_store[spec.backend_service].spec daemons = self.mgr.cache.get_daemons_by_service(spec.backend_service) deps = [d.name() for d in daemons] # generate password? pw_key = f'{spec.service_name()}/monitor_password' password = self.mgr.get_store(pw_key) if password is None: if not spec.monitor_password: password = ''.join( random.choice(string.ascii_lowercase) for _ in range(20)) self.mgr.set_store(pw_key, password) else: if spec.monitor_password: self.mgr.set_store(pw_key, None) if spec.monitor_password: password = spec.monitor_password if backend_spec.service_type == 'nfs': mode = 'tcp' by_rank = {d.rank: d for d in daemons if d.rank is not None} servers = [] # try to establish how many ranks we *should* have num_ranks = backend_spec.placement.count if not num_ranks: num_ranks = 1 + max(by_rank.keys()) for rank in range(num_ranks): if rank in by_rank: d = by_rank[rank] assert (d.ports) servers.append({ 'name': f"{spec.backend_service}.{rank}", 'ip': d.ip or resolve_ip( self.mgr.inventory.get_addr(str(d.hostname))), 'port': d.ports[0], }) else: # offline/missing server; leave rank in place servers.append({ 'name': f"{spec.backend_service}.{rank}", 'ip': '0.0.0.0', 'port': 0, }) else: mode = 'http' servers = [{ 'name': d.name(), 'ip': d.ip or resolve_ip(self.mgr.inventory.get_addr(str(d.hostname))), 'port': d.ports[0], } for d in daemons if d.ports] haproxy_conf = self.mgr.template.render( 'services/ingress/haproxy.cfg.j2', { 'spec': spec, 'mode': mode, 'servers': servers, 'user': spec.monitor_user or 'admin', 'password': password, 'ip': str(spec.virtual_ip).split('/')[0] or daemon_spec.ip or '*', 'frontend_port': daemon_spec.ports[0] if daemon_spec.ports else spec.frontend_port, 'monitor_port': daemon_spec.ports[1] if daemon_spec.ports else spec.monitor_port, }) config_files = { 'files': { "haproxy.cfg": haproxy_conf, } } if spec.ssl_cert: ssl_cert = spec.ssl_cert if isinstance(ssl_cert, list): ssl_cert = '\n'.join(ssl_cert) config_files['files']['haproxy.pem'] = ssl_cert return config_files, sorted(deps)
def keepalived_generate_config( self, daemon_spec: CephadmDaemonDeploySpec, ) -> Tuple[Dict[str, Any], List[str]]: spec = cast(IngressSpec, self.mgr.spec_store[daemon_spec.service_name].spec) assert spec.backend_service # generate password? pw_key = f'{spec.service_name()}/keepalived_password' password = self.mgr.get_store(pw_key) if password is None: if not spec.keepalived_password: password = ''.join( random.choice(string.ascii_lowercase) for _ in range(20)) self.mgr.set_store(pw_key, password) else: if spec.keepalived_password: self.mgr.set_store(pw_key, None) if spec.keepalived_password: password = spec.keepalived_password daemons = self.mgr.cache.get_daemons_by_service(spec.service_name()) if not daemons: raise OrchestratorError( f'Failed to generate keepalived.conf: No daemons deployed for {spec.service_name()}' ) deps = sorted( [d.name() for d in daemons if d.daemon_type == 'haproxy']) host = daemon_spec.host hosts = sorted(list(set([host] + [str(d.hostname) for d in daemons]))) # interface bare_ip = str(spec.virtual_ip).split('/')[0] interface = None for subnet, ifaces in self.mgr.cache.networks.get(host, {}).items(): if ifaces and ipaddress.ip_address( bare_ip) in ipaddress.ip_network(subnet): interface = list(ifaces.keys())[0] logger.info( f'{bare_ip} is in {subnet} on {host} interface {interface}' ) break # try to find interface by matching spec.virtual_interface_networks if not interface and spec.virtual_interface_networks: for subnet, ifaces in self.mgr.cache.networks.get(host, {}).items(): if subnet in spec.virtual_interface_networks: interface = list(ifaces.keys())[0] logger.info( f'{spec.virtual_ip} will be configured on {host} interface ' f'{interface} (which has guiding subnet {subnet})') break if not interface: raise OrchestratorError( f"Unable to identify interface for {spec.virtual_ip} on {host}" ) # script to monitor health script = '/usr/bin/false' for d in daemons: if d.hostname == host: if d.daemon_type == 'haproxy': assert d.ports port = d.ports[1] # monitoring port script = f'/usr/bin/curl {build_url(scheme="http", host=d.ip or "localhost", port=port)}/health' assert script # set state. first host in placement is master all others backups state = 'BACKUP' if hosts[0] == host: state = 'MASTER' # remove host, daemon is being deployed on from hosts list for # other_ips in conf file and converter to ips if host in hosts: hosts.remove(host) other_ips = [resolve_ip(self.mgr.inventory.get_addr(h)) for h in hosts] keepalived_conf = self.mgr.template.render( 'services/ingress/keepalived.conf.j2', { 'spec': spec, 'script': script, 'password': password, 'interface': interface, 'state': state, 'other_ips': other_ips, 'host_ip': resolve_ip(self.mgr.inventory.get_addr(host)), }) config_file = { 'files': { "keepalived.conf": keepalived_conf, } } return config_file, deps