def get_auth_entity(self, daemon_id: str, host: str = "") -> AuthEntity: """ Map the daemon id to a cephx keyring entity name """ # despite this mapping entity names to daemons, self.TYPE within # the CephService class refers to service types, not daemon types if self.TYPE in [ 'rgw', 'rbd-mirror', 'cephfs-mirror', 'nfs', "iscsi", 'ingress' ]: return AuthEntity(f'client.{self.TYPE}.{daemon_id}') elif self.TYPE in ['crash', 'agent']: if host == "": raise OrchestratorError( f'Host not provided to generate <{self.TYPE}> auth entity name' ) return AuthEntity(f'client.{self.TYPE}.{host}') elif self.TYPE == 'mon': return AuthEntity('mon.') elif self.TYPE in ['mgr', 'osd', 'mds']: return AuthEntity(f'{self.TYPE}.{daemon_id}') else: raise OrchestratorError("unknown daemon type")
async def _check_execute_command( self, host: str, cmd: List[str], stdin: Optional[str] = None, addr: Optional[str] = None, ) -> str: out, err, code = await self._execute_command(host, cmd, stdin, addr) if code != 0: msg = f'Command {cmd} failed. {err}' logger.debug(msg) raise OrchestratorError(msg) return out
def _check_safe_to_destroy(self, mon_id: str) -> None: ret, out, err = self.mgr.check_mon_command({ 'prefix': 'quorum_status', }) try: j = json.loads(out) except Exception: raise OrchestratorError('failed to parse quorum status') mons = [m['name'] for m in j['monmap']['mons']] if mon_id not in mons: logger.info('Safe to remove mon.%s: not in monmap (%s)' % ( mon_id, mons)) return new_mons = [m for m in mons if m != mon_id] new_quorum = [m for m in j['quorum_names'] if m != mon_id] if len(new_quorum) > len(new_mons) / 2: logger.info('Safe to remove mon.%s: new quorum should be %s (from %s)' % (mon_id, new_quorum, new_mons)) return raise OrchestratorError( 'Removing %s would break mon quorum (new quorum %s, new mons %s)' % (mon_id, new_quorum, new_mons))
def name_to_auth_entity(name) -> str: """ Map from daemon names to ceph entity names (as seen in config) """ daemon_type = name.split('.', 1)[0] if daemon_type in ['rgw', 'rbd-mirror', 'nfs', 'crash', 'iscsi']: return 'client.' + name elif daemon_type == 'mon': return 'mon.' elif daemon_type in ['osd', 'mds', 'mgr', 'client']: return name else: raise OrchestratorError("unknown auth entity name")
def create(self, name, host, network): """ Create a new monitor on the given host. """ # get mon. key ret, keyring, err = self.mgr.check_mon_command({ 'prefix': 'auth get', 'entity': 'mon.', }) extra_config = '[mon.%s]\n' % name if network: # infer whether this is a CIDR network, addrvec, or plain IP if '/' in network: extra_config += 'public network = %s\n' % network elif network.startswith('[v') and network.endswith(']'): extra_config += 'public addrv = %s\n' % network elif ':' not in network: extra_config += 'public addr = %s\n' % network else: raise OrchestratorError('Must specify a CIDR network, ceph addrvec, or plain IP: \'%s\'' % network) else: # try to get the public_network from the config ret, network, err = self.mgr.check_mon_command({ 'prefix': 'config get', 'who': 'mon', 'key': 'public_network', }) network = network.strip() # type: ignore if not network: raise OrchestratorError('Must set public_network config option or specify a CIDR network, ceph addrvec, or plain IP') if '/' not in network: raise OrchestratorError('public_network is set but does not look like a CIDR network: \'%s\'' % network) extra_config += 'public network = %s\n' % network return self.mgr._create_daemon('mon', name, host, keyring=keyring, extra_config={'config': extra_config})
def prepare_create(self, daemon_spec: CephadmDaemonDeploySpec) -> CephadmDaemonDeploySpec: assert self.TYPE == daemon_spec.daemon_type daemon_id, host = daemon_spec.daemon_id, daemon_spec.host if not self.mgr.cherrypy_thread: raise OrchestratorError('Cannot deploy agent before creating cephadm endpoint') keyring = self.get_keyring_with_caps(self.get_auth_entity(daemon_id, host=host), []) daemon_spec.keyring = keyring self.mgr.cache.agent_keys[host] = keyring daemon_spec.final_config, daemon_spec.deps = self.generate_config(daemon_spec) return daemon_spec
def get_cluster_health(mgr: 'CephadmOrchestrator') -> str: # check cluster health ret, out, err = mgr.check_mon_command({ 'prefix': 'health', 'format': 'json', }) try: j = json.loads(out) except ValueError: msg = 'Failed to parse health status: Cannot decode JSON' logger.exception('%s: \'%s\'' % (msg, out)) raise OrchestratorError('failed to parse health status') return j['status']
def get_pg_count(self, osd_id: str) -> int: """ Queries for PG count of an OSD """ self.mgr.log.debug("Querying for drain status") ret, out, err = self.mgr.mon_command({ 'prefix': 'osd drain status', }) if ret != 0: self.mgr.log.error(f"Calling osd drain status failed with {err}") raise OrchestratorError("Could not query `osd drain status`") out = json.loads(out) for o in out: if str(o.get('osd_id', '')) == str(osd_id): return int(o.get('pgs', -1)) return -1
def name_to_auth_entity( daemon_type, # type: str daemon_id, # type: str host="" # type Optional[str] = "" ): """ Map from daemon names/host to ceph entity names (as seen in config) """ if daemon_type in ['rgw', 'rbd-mirror', 'nfs', "iscsi"]: return 'client.' + daemon_type + "." + daemon_id elif daemon_type == 'crash': if host == "": raise OrchestratorError( "Host not provided to generate <crash> auth entity name") return 'client.' + daemon_type + "." + host elif daemon_type == 'mon': return 'mon.' elif daemon_type == 'mgr': return daemon_type + "." + daemon_id elif daemon_type in ['osd', 'mds', 'client']: return daemon_type + "." + daemon_id else: raise OrchestratorError("unknown auth entity name")
def name_to_auth_entity( daemon_type: str, daemon_id: str, host: str = "", ) -> AuthEntity: """ Map from daemon names/host to ceph entity names (as seen in config) """ if daemon_type in ['rgw', 'rbd-mirror', 'nfs', "iscsi"]: return AuthEntity('client.' + daemon_type + "." + daemon_id) elif daemon_type == 'crash': if host == "": raise OrchestratorError( "Host not provided to generate <crash> auth entity name") return AuthEntity('client.' + daemon_type + "." + host) elif daemon_type == 'mon': return AuthEntity('mon.') elif daemon_type == 'mgr': return AuthEntity(daemon_type + "." + daemon_id) elif daemon_type in ['osd', 'mds', 'client']: return AuthEntity(daemon_type + "." + daemon_id) else: raise OrchestratorError("unknown auth entity name")
def fail_over(self) -> None: if not self.mgr_map_has_standby(): raise OrchestratorError('Need standby mgr daemon', event_kind_subject=( 'daemon', 'mgr' + self.mgr.get_mgr_id())) self.mgr.events.for_daemon('mgr' + self.mgr.get_mgr_id(), 'INFO', 'Failing over to other MGR') logger.info('Failing over to other MGR') # fail over ret, out, err = self.mgr.check_mon_command({ 'prefix': 'mgr fail', 'who': self.mgr.get_mgr_id(), })
def test_daemon_add_fail(self, _run_cephadm, cephadm_module): _run_cephadm.return_value = '{}', '', 0 with with_host(cephadm_module, 'test'): spec = ServiceSpec(service_type='mgr', placement=PlacementSpec( hosts=[HostPlacementSpec('test', '', 'x')], count=1)) _run_cephadm.side_effect = OrchestratorError('fail') with pytest.raises(OrchestratorError): wait(cephadm_module, cephadm_module.add_mgr(spec)) cephadm_module.assert_issued_mon_command({ 'prefix': 'auth rm', 'entity': 'mgr.x', })
def get_or_create_keyring(self, entity: Optional[str] = None) -> str: if not entity: entity = self.get_keyring_entity() logger.info('Create keyring: %s' % entity) ret, keyring, err = self.mgr.mon_command({ 'prefix': 'auth get-or-create', 'entity': entity, }) if ret != 0: raise OrchestratorError('Unable to create keyring %s: %s %s' % (entity, ret, err)) return keyring
def upgrade_start(self, image, version) -> str: if self.mgr.mode != 'root': raise OrchestratorError('upgrade is not supported in %s mode' % (self.mgr.mode)) if version: try: (major, minor, patch) = version.split('.') assert int(minor) >= 0 assert int(patch) >= 0 except: raise OrchestratorError( 'version must be in the form X.Y.Z (e.g., 15.2.3)') if int(major) < 15 or (int(major) == 15 and int(minor) < 2): raise OrchestratorError( 'cephadm only supports octopus (15.2.0) or later') target_name = self.mgr.container_image_base + ':v' + version elif image: target_name = image else: raise OrchestratorError('must specify either image or version') if self.upgrade_state: if self.upgrade_state.target_name != target_name: raise OrchestratorError( 'Upgrade to %s (not %s) already in progress' % (self.upgrade_state.target_name, target_name)) if self.upgrade_state.paused: self.upgrade_state.paused = False self._save_upgrade_state() return 'Resumed upgrade to %s' % self.upgrade_state.target_name return 'Upgrade to %s in progress' % self.upgrade_state.target_name self.upgrade_state = UpgradeState(target_name=target_name, progress_id=str(uuid.uuid4())) self._update_upgrade_progress(0.0) self._save_upgrade_state() self._clear_upgrade_health_checks() self.mgr.event.set() return 'Initiating upgrade to %s' % (target_name)
def create_realm() -> None: cmd = [ 'radosgw-admin', '--key=%s' % keyring, '--user', 'rgw.%s' % rgw_id, 'realm', 'create', '--rgw-realm=%s' % spec.rgw_realm, '--default' ] result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) if result.returncode: err = 'failed to create RGW realm "%s": %r' % (spec.rgw_realm, result.stderr) raise OrchestratorError(err) self.mgr.log.info('created realm: %s' % spec.rgw_realm)
def create_zone() -> None: cmd = [ 'radosgw-admin', '--key=%s' % keyring, '--user', 'rgw.%s' % rgw_id, 'zone', 'create', '--rgw-zonegroup=default', '--rgw-zone=%s' % spec.rgw_zone, '--master', '--default' ] result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) if result.returncode: err = 'failed to create RGW zone "%s": %r' % (spec.rgw_zone, result.stderr) raise OrchestratorError(err) self.mgr.log.info('created zone: %s' % spec.rgw_zone)
def get_zones() -> List[str]: cmd = ['radosgw-admin', '--key=%s' % keyring, '--user', 'rgw.%s' % rgw_id, 'zone', 'list', '--format=json'] result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out = result.stdout if not out: return [] try: j = json.loads(out) return j.get('zones', []) except Exception: raise OrchestratorError('failed to parse zone info')
def remove_mds(self, name): daemons = self._get_services('mds') results = [] for d in daemons: if d.service_instance == name or d.service_instance.startswith( name + '.'): results.append( self._worker_pool.apply_async( self._remove_daemon, ('%s.%s' % (d.service_type, d.service_instance), d.nodename))) if not results: raise OrchestratorError('Unable to find mds.%s[-*] daemon(s)' % name) return SSHWriteCompletion(results)
async def _write_remote_file( self, host: str, path: str, content: bytes, mode: Optional[int] = None, uid: Optional[int] = None, gid: Optional[int] = None, addr: Optional[str] = None, ) -> None: try: dirname = os.path.dirname(path) await self._check_execute_command(host, ['mkdir', '-p', dirname], addr=addr) await self._check_execute_command( host, ['mkdir', '-p', '/tmp' + dirname], addr=addr) tmp_path = '/tmp' + path + '.new' await self._check_execute_command(host, ['touch', tmp_path], addr=addr) if self.mgr.ssh_user != 'root': assert self.mgr.ssh_user await self._check_execute_command( host, ['chown', '-R', self.mgr.ssh_user, tmp_path], addr=addr) await self._check_execute_command( host, ['chmod', str(644), tmp_path], addr=addr) with NamedTemporaryFile(prefix='cephadm-write-remote-file-') as f: os.fchmod(f.fileno(), 0o600) f.write(content) f.flush() conn = await self._remote_connection(host, addr) await asyncssh.scp(f.name, (conn, tmp_path)) if uid is not None and gid is not None and mode is not None: # shlex quote takes str or byte object, not int await self._check_execute_command( host, ['chown', '-R', str(uid) + ':' + str(gid), tmp_path], addr=addr) await self._check_execute_command( host, ['chmod', oct(mode)[2:], tmp_path], addr=addr) await self._check_execute_command(host, ['mv', tmp_path, path], addr=addr) except Exception as e: msg = f"Unable to write {host}:{path}: {e}" logger.exception(msg) raise OrchestratorError(msg)
async def _remote_connection( self, host: str, addr: Optional[str] = None, ) -> "SSHClientConnection": if not self.cons.get(host): if not addr and host in self.mgr.inventory: addr = self.mgr.inventory.get_addr(host) if not addr: raise OrchestratorError("host address is empty") assert self.mgr.ssh_user n = self.mgr.ssh_user + '@' + addr logger.debug( "Opening connection to {} with ssh options '{}'".format( n, self.mgr._ssh_options)) asyncssh.set_log_level('DEBUG') asyncssh.set_debug_level(3) with self.redirect_log(host, addr): try: ssh_options = asyncssh.SSHClientConnectionOptions( keepalive_interval=7, keepalive_count_max=3) conn = await asyncssh.connect( addr, username=self.mgr.ssh_user, client_keys=[self.mgr.tkey.name], known_hosts=None, config=[self.mgr.ssh_config_fname], preferred_auth=['publickey'], options=ssh_options) except OSError: raise except asyncssh.Error: raise except Exception: raise self.cons[host] = conn self.mgr.offline_hosts_remove(host) return self.cons[host]
def generate_config( self, daemon_spec: CephadmDaemonDeploySpec ) -> Tuple[Dict[str, Any], List[str]]: try: assert self.mgr.cherrypy_thread assert self.mgr.cherrypy_thread.ssl_certs.get_root_cert() assert self.mgr.cherrypy_thread.server_port except Exception: raise OrchestratorError( 'Cannot deploy agent daemons until cephadm endpoint has finished generating certs' ) cfg = { 'target_ip': self.mgr.get_mgr_ip(), 'target_port': self.mgr.cherrypy_thread.server_port, 'refresh_period': self.mgr.agent_refresh_rate, 'listener_port': self.mgr.agent_starting_port, 'host': daemon_spec.host, 'device_enhanced_scan': str(self.mgr.get_module_option('device_enhanced_scan')) } listener_cert, listener_key = self.mgr.cherrypy_thread.ssl_certs.generate_cert( self.mgr.inventory.get_addr(daemon_spec.host)) config = { 'agent.json': json.dumps(cfg), 'keyring': daemon_spec.keyring, 'root_cert.pem': self.mgr.cherrypy_thread.ssl_certs.get_root_cert(), 'listener.crt': listener_cert, 'listener.key': listener_key, } return config, sorted([ str(self.mgr.get_mgr_ip()), str(self.mgr.cherrypy_thread.server_port), self.mgr.cherrypy_thread.ssl_certs.get_root_cert(), str(self.mgr.get_module_option('device_enhanced_scan')) ])
def post_remove(self, daemon: DaemonDescription) -> None: """ Called after the daemon is removed. """ logger.debug(f'Post remove daemon {self.TYPE}.{daemon.daemon_id}') ret, out, err = self.mgr.check_mon_command({ 'prefix': 'mgr module ls', 'format': 'json', }) try: j = json.loads(out) except ValueError: msg = 'Failed to parse mgr module ls: Cannot decode JSON' logger.exception('%s: \'%s\'' % (msg, out)) raise OrchestratorError('failed to parse mgr module ls') if 'dashboard' in j['enabled_modules']: # remove config for dashboard iscsi gateways ret, out, err = self.mgr.check_mon_command({ 'prefix': 'dashboard iscsi-gateway-rm', 'name': daemon.hostname, }) logger.info( f'{daemon.hostname} removed from iscsi gateways dashboard config' ) # needed to know if we have ssl stuff for iscsi in ceph config iscsi_config_dict = {} ret, iscsi_config, err = self.mgr.check_mon_command({ 'prefix': 'config-key dump', 'key': 'iscsi', }) if iscsi_config: iscsi_config_dict = json.loads(iscsi_config) # remove iscsi cert and key from ceph config for iscsi_key, value in iscsi_config_dict.items(): if f'iscsi/client.{daemon.name()}/' in iscsi_key: ret, out, err = self.mgr.check_mon_command({ 'prefix': 'config-key rm', 'key': iscsi_key, }) logger.info(f'{iscsi_key} removed from ceph config')
def run(self) -> None: try: try: old_creds = self.mgr.get_store('cephadm_endpoint_credentials') if not old_creds: raise OrchestratorError('No old credentials for cephadm endpoint found') old_creds_dict = json.loads(old_creds) old_key = old_creds_dict['key'] old_cert = old_creds_dict['cert'] self.ssl_certs.load_root_credentials(old_cert, old_key) except (OrchestratorError, json.decoder.JSONDecodeError, KeyError, ValueError): self.ssl_certs.generate_root_cert() cert, key = self.ssl_certs.generate_cert() self.key_tmp = tempfile.NamedTemporaryFile() self.key_tmp.write(key.encode('utf-8')) self.key_tmp.flush() # pkey_tmp must not be gc'ed key_fname = self.key_tmp.name self.cert_tmp = tempfile.NamedTemporaryFile() self.cert_tmp.write(cert.encode('utf-8')) self.cert_tmp.flush() # cert_tmp must not be gc'ed cert_fname = self.cert_tmp.name verify_tls_files(cert_fname, key_fname) self.configure_cherrypy() self.mgr.log.debug('Starting cherrypy engine...') self.start_engine() self.mgr.log.debug('Cherrypy engine started.') cephadm_endpoint_creds = { 'cert': self.ssl_certs.get_root_cert(), 'key': self.ssl_certs.get_root_key() } self.mgr.set_store('cephadm_endpoint_credentials', json.dumps(cephadm_endpoint_creds)) self.mgr._kick_serve_loop() # wait for the shutdown event self.cherrypy_shutdown_event.wait() self.cherrypy_shutdown_event.clear() cherrypy.engine.stop() self.mgr.log.debug('Cherrypy engine stopped.') except Exception as e: self.mgr.log.error(f'Failed to run cephadm cherrypy endpoint: {e}')
def generate_config( self, daemon_spec: CephadmDaemonDeploySpec ) -> Tuple[Dict[str, Any], List[str]]: assert self.TYPE == daemon_spec.daemon_type deps: List[str] = [] cfg = CephadmExporterConfig(self.mgr) cfg.load_from_store() if cfg.ready: rc, reason = cfg.validate_config() if rc: raise OrchestratorError(reason) else: logger.info("Using default configuration for cephadm-exporter") self.mgr._set_exporter_defaults() cfg.load_from_store() config = {"crt": cfg.crt, "key": cfg.key, "token": cfg.token} return config, deps
def prepare_create(self, daemon_spec: CephadmDaemonDeploySpec) -> CephadmDaemonDeploySpec: assert self.TYPE == daemon_spec.daemon_type cfg = CephadmExporterConfig(self.mgr) cfg.load_from_store() if cfg.ready: rc, reason = cfg.validate_config() if rc: raise OrchestratorError(reason) else: logger.info( "Incomplete/Missing configuration, applying defaults") self.mgr._set_exporter_defaults() cfg.load_from_store() if not daemon_spec.ports: daemon_spec.ports = [int(cfg.port)] return daemon_spec
def update_keyring_caps(self, entity: Optional[str] = None) -> None: if not entity: entity = self.get_keyring_entity() osd_caps = 'allow rw pool=%s' % (self.spec.pool) if self.spec.namespace: osd_caps = '%s namespace=%s' % (osd_caps, self.spec.namespace) logger.info('Updating keyring caps: %s' % entity) ret, out, err = self.mgr.mon_command({ 'prefix': 'auth caps', 'entity': entity, 'caps': ['mon', 'allow r', 'osd', osd_caps], }) if ret != 0: raise OrchestratorError( 'Unable to update keyring caps %s: %s %s' % (entity, ret, err))
def config(self, spec: RGWSpec) -> None: # type: ignore assert self.TYPE == spec.service_type # set rgw_realm and rgw_zone, if present if spec.rgw_realm: ret, out, err = self.mgr.check_mon_command({ 'prefix': 'config set', 'who': f"{utils.name_to_config_section('rgw')}.{spec.service_id}", 'name': 'rgw_realm', 'value': spec.rgw_realm, }) if spec.rgw_zone: ret, out, err = self.mgr.check_mon_command({ 'prefix': 'config set', 'who': f"{utils.name_to_config_section('rgw')}.{spec.service_id}", 'name': 'rgw_zone', 'value': spec.rgw_zone, }) if spec.rgw_frontend_ssl_certificate: if isinstance(spec.rgw_frontend_ssl_certificate, list): cert_data = '\n'.join(spec.rgw_frontend_ssl_certificate) elif isinstance(spec.rgw_frontend_ssl_certificate, str): cert_data = spec.rgw_frontend_ssl_certificate else: raise OrchestratorError( 'Invalid rgw_frontend_ssl_certificate: %s' % spec.rgw_frontend_ssl_certificate) ret, out, err = self.mgr.check_mon_command({ 'prefix': 'config-key set', 'key': f'rgw/cert/{spec.service_name()}', 'val': cert_data, }) # TODO: fail, if we don't have a spec logger.info('Saving service %s spec with placement %s' % (spec.service_name(), spec.placement.pretty_str())) self.mgr.spec_store.save(spec) self.mgr.trigger_connect_dashboard_rgw()
def _get_container_image_info(self, image_name: str) -> ContainerInspectInfo: # pick a random host... host = None for host_name in self.mgr.inventory.keys(): host = host_name break if not host: raise OrchestratorError('no hosts defined') if self.mgr.cache.host_needs_registry_login(host) and self.mgr.registry_url: self._registry_login(host, self.mgr.registry_url, self.mgr.registry_username, self.mgr.registry_password) j = self._run_cephadm_json(host, '', 'pull', [], image=image_name, no_fsid=True) r = ContainerInspectInfo( j['image_id'], j.get('ceph_version'), j.get('repo_digests') ) self.log.debug(f'image {image_name} -> {r}') return r
async def _execute_command(self, host: str, cmd: List[str], stdin: Optional[str] = None, addr: Optional[str] = None, ) -> Tuple[str, str, int]: conn = await self._remote_connection(host, addr) cmd = "sudo " + " ".join(quote(x) for x in cmd) logger.debug(f'Running command: {cmd}') try: r = await conn.run(cmd, input=stdin) # handle these Exceptions otherwise you might get a weird error like TypeError: __init__() missing 1 required positional argument: 'reason' (due to the asyncssh error interacting with raise_if_exception) except (asyncssh.ChannelOpenError, Exception) as e: # SSH connection closed or broken, will create new connection next call logger.debug(f'Connection to {host} failed. {str(e)}') await self._reset_con(host) self.mgr.offline_hosts.add(host) raise OrchestratorError(f'Unable to reach remote host {host}. {str(e)}') out = r.stdout.rstrip('\n') err = r.stderr.rstrip('\n') return out, err, r.returncode
async def _execute_command( self, host: str, cmd: List[str], stdin: Optional[str] = None, addr: Optional[str] = None, ) -> Tuple[str, str, int]: conn = await self._remote_connection(host, addr) sudo_prefix = "sudo " if self.mgr.ssh_user != 'root' else "" cmd = sudo_prefix + " ".join(quote(x) for x in cmd) logger.debug(f'Running command: {cmd}') try: r = await conn.run('sudo true', check=True, timeout=5) r = await conn.run(cmd, input=stdin) # handle these Exceptions otherwise you might get a weird error like TypeError: __init__() missing 1 required positional argument: 'reason' (due to the asyncssh error interacting with raise_if_exception) except (asyncssh.ChannelOpenError, asyncssh.ProcessError, Exception) as e: # SSH connection closed or broken, will create new connection next call logger.debug(f'Connection to {host} failed. {str(e)}') await self._reset_con(host) self.mgr.offline_hosts.add(host) raise OrchestratorError( f'Unable to reach remote host {host}. {str(e)}') def _rstrip(v: Union[bytes, str, None]) -> str: if not v: return '' if isinstance(v, str): return v.rstrip('\n') if isinstance(v, bytes): return v.decode().rstrip('\n') raise OrchestratorError( f'Unable to parse ssh output with type {type(v)} from remote host {host}' ) out = _rstrip(r.stdout) err = _rstrip(r.stderr) rc = r.returncode if r.returncode else 0 return out, err, rc