def _add_host(self, host, addr=None, labels=None): s = orchestrator.HostSpec(hostname=host, addr=addr, labels=labels) completion = self.add_host(s) self._orchestrator_wait([completion]) orchestrator.raise_if_exception(completion) return HandleCommandResult(stdout=completion.result_str())
def _host_label_rm(self, hostname, label): completion = self.remove_host_label(hostname, label) self._orchestrator_wait([completion]) raise_if_exception(completion) return HandleCommandResult(stdout=completion.result_str())
def _service_action(self, action, service_name): completion = self.service_action(action, service_name) self._orchestrator_wait([completion]) raise_if_exception(completion) return HandleCommandResult(stdout=completion.result_str())
def _upgrade_stop(self): completion = self.upgrade_stop() self._orchestrator_wait([completion]) raise_if_exception(completion) return HandleCommandResult(stdout=completion.result_str())
def _add_host(self, hostname:str, addr: Optional[str]=None, labels: Optional[List[str]]=None): s = HostSpec(hostname=hostname, addr=addr, labels=labels) completion = self.add_host(s) self._orchestrator_wait([completion]) raise_if_exception(completion) return HandleCommandResult(stdout=completion.result_str())
def _rgw_rm(self, name): completion = self.remove_rgw(name) self._orchestrator_wait([completion]) orchestrator.raise_if_exception(completion) return HandleCommandResult(stdout=completion.result_str())
def _nfs_rm(self, svc_id): completion = self.remove_nfs(svc_id) self._orchestrator_wait([completion]) orchestrator.raise_if_exception(completion) return HandleCommandResult(stdout=completion.result_str())
def _rm_stateless_svc(self, svc_type, svc_id): completion = self.remove_stateless_service(svc_type, svc_id) self._orchestrator_wait([completion]) return HandleCommandResult()
def _apply_services(self, inbuf): completion = self.apply_service_config(inbuf) self._orchestrator_wait([completion]) raise_if_exception(completion) return HandleCommandResult(stdout=completion.result_str())
def _apply_osd(self, all_available_devices: bool = False, preview: bool = False, service_name: Optional[str] = None, unmanaged: Optional[bool] = None, format: Optional[str] = 'plain', inbuf: Optional[str] = None) -> HandleCommandResult: """Apply DriveGroupSpecs to create OSDs""" usage = """ Usage: ceph orch apply osd -i <json_file/yaml_file> ceph orch apply osd --all-available-devices ceph orch apply osd --service-name <service_name> --preview ceph orch apply osd --service-name <service_name> --unmanaged=True|False """ def print_preview(prev, format): if format != 'plain': return to_format(prev, format) else: table = PrettyTable(['NAME', 'HOST', 'DATA', 'DB', 'WAL'], border=False) table.align = 'l' table.left_padding_width = 0 table.right_padding_width = 1 for data in prev: dg_name = data.get('drivegroup') hostname = data.get('host') for osd in data.get('data', {}).get('osds', []): db_path = '-' wal_path = '-' block_db = osd.get('block.db', {}).get('path') block_wal = osd.get('block.wal', {}).get('path') block_data = osd.get('data', {}).get('path', '') if not block_data: continue if block_db: db_path = data.get('data', {}).get('vg', {}).get('devices', []) if block_wal: wal_path = data.get('data', {}).get('wal_vg', {}).get('devices', []) table.add_row( (dg_name, hostname, block_data, db_path, wal_path)) out = table.get_string() if not out: out = "No pending deployments." return out if (inbuf or all_available_devices) and service_name: # mutually exclusive return HandleCommandResult(-errno.EINVAL, stderr=usage) if preview and not (service_name or all_available_devices or inbuf): # get all stored drivegroups and print prev = self.preview_drivegroups() return HandleCommandResult(stdout=print_preview(prev, format)) if service_name and preview: # get specified drivegroup and print prev = self.preview_drivegroups(service_name) return HandleCommandResult(stdout=print_preview(prev, format)) if service_name and unmanaged is not None: return self.set_unmanaged_flag(service_name, unmanaged) if not inbuf and not all_available_devices: return HandleCommandResult(-errno.EINVAL, stderr=usage) if inbuf: if all_available_devices: raise OrchestratorError( '--all-available-devices cannot be combined with an osd spec' ) try: drivegroups = yaml.load_all(inbuf) dg_specs = [DriveGroupSpec.from_json(dg) for dg in drivegroups] except ValueError as e: msg = 'Failed to read JSON/YAML input: {}'.format( str(e)) + usage return HandleCommandResult(-errno.EINVAL, stderr=msg) else: dg_specs = [ DriveGroupSpec( service_id='all-available-devices', placement=PlacementSpec(host_pattern='*'), data_devices=DeviceSelection(all=True), ) ] if not preview: completion = self.apply_drivegroups(dg_specs) self._orchestrator_wait([completion]) raise_if_exception(completion) ret = self.preview_drivegroups(dg_specs=dg_specs) return HandleCommandResult(stdout=print_preview(ret, format))
def _add_stateless_svc(self, svc_type, spec): completion = self.add_stateless_service(svc_type, spec) self._orchestrator_wait([completion]) return HandleCommandResult()
def handle_fs_status(self, cmd): output = "" json_output = defaultdict(list) output_format = cmd.get('format', 'plain') fs_filter = cmd.get('fs', None) mds_versions = defaultdict(list) fsmap = self.get("fs_map") for filesystem in fsmap['filesystems']: if fs_filter and filesystem['mdsmap']['fs_name'] != fs_filter: continue rank_table = PrettyTable( ("RANK", "STATE", "MDS", "ACTIVITY", "DNS", "INOS", "DIRS", "CAPS"), border=False, ) rank_table.left_padding_width = 0 rank_table.right_padding_width = 2 mdsmap = filesystem['mdsmap'] client_count = 0 for rank in mdsmap["in"]: up = "mds_{0}".format(rank) in mdsmap["up"] if up: gid = mdsmap['up']["mds_{0}".format(rank)] info = mdsmap['info']['gid_{0}'.format(gid)] dns = self.get_latest("mds", info['name'], "mds_mem.dn") inos = self.get_latest("mds", info['name'], "mds_mem.ino") dirs = self.get_latest("mds", info['name'], "mds_mem.dir") caps = self.get_latest("mds", info['name'], "mds_mem.cap") if rank == 0: client_count = self.get_latest( "mds", info['name'], "mds_sessions.session_count") elif client_count == 0: # In case rank 0 was down, look at another rank's # sessionmap to get an indication of clients. client_count = self.get_latest( "mds", info['name'], "mds_sessions.session_count") laggy = "laggy_since" in info state = info['state'].split(":")[1] if laggy: state += "(laggy)" if state == "active" and not laggy: c_state = mgr_util.colorize(state, mgr_util.GREEN) else: c_state = mgr_util.colorize(state, mgr_util.YELLOW) # Populate based on context of state, e.g. client # ops for an active daemon, replay progress, reconnect # progress activity = "" if state == "active": rate = self.get_rate( "mds", info['name'], "mds_server.handle_client_request") if output_format not in ('json', 'json-pretty'): activity = "Reqs: " + mgr_util.format_dimless( rate, 5) + "/s" metadata = self.get_metadata('mds', info['name']) mds_versions[metadata.get('ceph_version', "unknown")].append(info['name']) if output_format in ('json', 'json-pretty'): json_output['mdsmap'].append({ 'rank': rank, 'name': info['name'], 'state': state, 'rate': rate if state == "active" else "0", 'dns': dns, 'inos': inos, 'dirs': dirs, 'caps': caps }) else: rank_table.add_row([ mgr_util.bold(rank.__str__()), c_state, info['name'], activity, mgr_util.format_dimless(dns, 5), mgr_util.format_dimless(inos, 5), mgr_util.format_dimless(dirs, 5), mgr_util.format_dimless(caps, 5) ]) else: if output_format in ('json', 'json-pretty'): json_output['mdsmap'].append({ 'rank': rank, 'state': "failed" }) else: rank_table.add_row( [rank, "failed", "", "", "", "", "", ""]) # Find the standby replays for gid_str, daemon_info in six.iteritems(mdsmap['info']): if daemon_info['state'] != "up:standby-replay": continue inos = self.get_latest("mds", daemon_info['name'], "mds_mem.ino") dns = self.get_latest("mds", daemon_info['name'], "mds_mem.dn") dirs = self.get_latest("mds", daemon_info['name'], "mds_mem.dir") caps = self.get_latest("mds", daemon_info['name'], "mds_mem.cap") events = self.get_rate("mds", daemon_info['name'], "mds_log.replayed") if output_format not in ('json', 'json-pretty'): activity = "Evts: " + mgr_util.format_dimless(events, 5) + "/s" metadata = self.get_metadata('mds', daemon_info['name']) mds_versions[metadata.get('ceph_version', "unknown")].append( daemon_info['name']) if output_format in ('json', 'json-pretty'): json_output['mdsmap'].append({ 'rank': rank, 'name': daemon_info['name'], 'state': 'standby-replay', 'events': events, 'dns': 5, 'inos': 5, 'dirs': 5, 'caps': 5 }) else: rank_table.add_row([ "{0}-s".format(daemon_info['rank']), "standby-replay", daemon_info['name'], activity, mgr_util.format_dimless(dns, 5), mgr_util.format_dimless(inos, 5), mgr_util.format_dimless(dirs, 5), mgr_util.format_dimless(caps, 5) ]) df = self.get("df") pool_stats = dict([(p['id'], p['stats']) for p in df['pools']]) osdmap = self.get("osd_map") pools = dict([(p['pool'], p) for p in osdmap['pools']]) metadata_pool_id = mdsmap['metadata_pool'] data_pool_ids = mdsmap['data_pools'] pools_table = PrettyTable(["POOL", "TYPE", "USED", "AVAIL"], border=False) pools_table.left_padding_width = 0 pools_table.right_padding_width = 2 for pool_id in [metadata_pool_id] + data_pool_ids: pool_type = "metadata" if pool_id == metadata_pool_id else "data" stats = pool_stats[pool_id] if output_format in ('json', 'json-pretty'): json_output['pools'].append({ 'id': pool_id, 'name': pools[pool_id]['pool_name'], 'type': pool_type, 'used': stats['bytes_used'], 'avail': stats['max_avail'] }) else: pools_table.add_row([ pools[pool_id]['pool_name'], pool_type, mgr_util.format_bytes(stats['bytes_used'], 5), mgr_util.format_bytes(stats['max_avail'], 5) ]) if output_format in ('json', 'json-pretty'): json_output['clients'].append({ 'fs': mdsmap['fs_name'], 'clients': client_count, }) else: output += "{0} - {1} clients\n".format(mdsmap['fs_name'], client_count) output += "=" * len(mdsmap['fs_name']) + "\n" output += rank_table.get_string() output += "\n" + pools_table.get_string() + "\n" if not output and not json_output and fs_filter is not None: return errno.EINVAL, "", "Invalid filesystem: " + fs_filter standby_table = PrettyTable(["STANDBY MDS"], border=False) standby_table.left_padding_width = 0 standby_table.right_padding_width = 2 for standby in fsmap['standbys']: metadata = self.get_metadata('mds', standby['name']) mds_versions[metadata.get('ceph_version', "unknown")].append(standby['name']) if output_format in ('json', 'json-pretty'): json_output['mdsmap'].append({ 'name': standby['name'], 'state': "standby" }) else: standby_table.add_row([standby['name']]) if output_format not in ('json', 'json-pretty'): output += "\n" + standby_table.get_string() + "\n" if len(mds_versions) == 1: if output_format in ('json', 'json-pretty'): json_output['mds_version'] = list(mds_versions)[0] else: output += "MDS version: {0}".format(list(mds_versions)[0]) else: version_table = PrettyTable(["VERSION", "DAEMONS"], border=False) version_table.left_padding_width = 0 version_table.right_padding_width = 2 for version, daemons in six.iteritems(mds_versions): if output_format in ('json', 'json-pretty'): json_output['mds_version'].append({ 'version': version, 'daemons': daemons }) else: version_table.add_row([version, ", ".join(daemons)]) if output_format not in ('json', 'json-pretty'): output += version_table.get_string() + "\n" if output_format == "json": return HandleCommandResult( stdout=json.dumps(json_output, sort_keys=True)) elif output_format == "json-pretty": return HandleCommandResult(stdout=json.dumps( json_output, sort_keys=True, indent=4, separators=(',', ': '))) else: return HandleCommandResult(stdout=output)
def _service_action(self, action, svc_type, svc_name): completion = self.service_action(action, svc_type, service_name=svc_name) self._orchestrator_wait([completion]) orchestrator.raise_if_exception(completion) return HandleCommandResult()
def _rm_stateless_svc(self, svc_type, svc_id): completion = self.remove_stateless_service(svc_type, svc_id) self._orchestrator_wait([completion]) orchestrator.raise_if_exception(completion) return HandleCommandResult(stdout=str(completion.result))
def _remove_host(self, host): completion = self.remove_host(host) self._orchestrator_wait([completion]) orchestrator.raise_if_exception(completion) return HandleCommandResult(stdout=completion.result_str())
def _get_service_specs(self): completion = self.list_specs() self._orchestrator_wait([completion]) raise_if_exception(completion) specs = completion.result_str() return HandleCommandResult(stdout=specs)
def _get_hosts(self): completion = self.get_hosts() self._orchestrator_wait([completion]) orchestrator.raise_if_exception(completion) result = "\n".join(map(lambda node: node.name, completion.result)) return HandleCommandResult(stdout=result)
def do_prune_health(self, hours: int): ''' Remove health history older than <hours> hours ''' self._health_prune_history(hours) return HandleCommandResult()
def _nfs_update(self, svc_id, num): spec = orchestrator.NFSServiceSpec(svc_id, count=num) completion = self.update_nfs(spec) self._orchestrator_wait([completion]) return HandleCommandResult(stdout=completion.result_str())
def _get_service_specs(self, service_name=None): completion = self.list_specs(service_name=service_name) self._orchestrator_wait([completion]) raise_if_exception(completion) specs = completion.result return HandleCommandResult(stdout=yaml.safe_dump_all(specs))
def _service_instance_action(self, action, svc_type, svc_id): completion = self.service_action(action, svc_type, service_id=svc_id) self._orchestrator_wait([completion]) orchestrator.raise_if_exception(completion) return HandleCommandResult(stdout=completion.result_str())
def _resume(self): self.resume() return HandleCommandResult()
def _device_ls(self): return HandleCommandResult( stdout=json.dumps({ 'ident': list(self.ident), 'fault': list(self.fault) }, indent=4, sort_keys=True))
def _upgrade_start(self, image=None, ceph_version=None): completion = self.upgrade_start(image, ceph_version) self._orchestrator_wait([completion]) raise_if_exception(completion) return HandleCommandResult(stdout=completion.result_str())
def _update_set_addr(self, hostname, addr): completion = self.update_host_addr(hostname, addr) self._orchestrator_wait([completion]) raise_if_exception(completion) return HandleCommandResult(stdout=completion.result_str())
def _host_label_add(self, host, label): completion = self.add_host_label(host, label) self._orchestrator_wait([completion]) orchestrator.raise_if_exception(completion) return HandleCommandResult(stdout=completion.result_str())
def _list_services(self, host=None, service_type=None, service_name=None, export=False, format='plain', refresh=False): if export and format == 'plain': format = 'yaml' completion = self.describe_service(service_type, service_name, refresh=refresh) self._orchestrator_wait([completion]) raise_if_exception(completion) services: List[ServiceDescription] = completion.result def ukn(s): return '<unknown>' if s is None else s # Sort the list for display services.sort(key=lambda s: (ukn(s.spec.service_name()))) if len(services) == 0: return HandleCommandResult(stdout="No services reported") elif format != 'plain': if export: data = [s.spec.to_json() for s in services] else: data = [s.to_json() for s in services] return HandleCommandResult(stdout=to_format(data, format)) else: now = datetime.datetime.utcnow() table = PrettyTable( ['NAME', 'RUNNING', 'REFRESHED', 'AGE', 'PLACEMENT', 'IMAGE NAME', 'IMAGE ID', ], border=False) table.align['NAME'] = 'l' table.align['RUNNING'] = 'r' table.align['REFRESHED'] = 'l' table.align['AGE'] = 'l' table.align['IMAGE NAME'] = 'l' table.align['IMAGE ID'] = 'l' table.align['PLACEMENT'] = 'l' table.left_padding_width = 0 table.right_padding_width = 2 for s in services: if not s.spec: pl = '<no spec>' elif s.spec.unmanaged: pl = '<unmanaged>' else: pl = s.spec.placement.pretty_str() table.add_row(( s.spec.service_name(), '%d/%d' % (s.running, s.size), nice_delta(now, s.last_refresh, ' ago'), nice_delta(now, s.created), pl, ukn(s.container_image_name), ukn(s.container_image_id)[0:12], )) return HandleCommandResult(stdout=table.get_string())
def _cancel(self): """ ProgressReferences might get stuck. Let's unstuck them. """ self.cancel_completions() return HandleCommandResult()
def _pause(self): self.pause() return HandleCommandResult()
def _service_instance_action(self, action, svc_type, svc_id): completion = self.service_action(action, svc_type, service_id=svc_id) self._orchestrator_wait([completion]) return HandleCommandResult()