def predicate(item): # type: (V1Pod) -> bool metadata = item.metadata if service_type is not None: if metadata.labels['app'] != "rook-ceph-{0}".format(service_type): return False if service_id is not None: try: k, v = { "mds": ("rook_file_system", service_id), "osd": ("ceph-osd-id", service_id), "mon": ("mon", service_id), "mgr": ("mgr", service_id), "ceph_nfs": ("ceph_nfs", service_id), "rgw": ("ceph_rgw", service_id), }[service_type] except KeyError: raise orchestrator.OrchestratorValidationError( '{} not supported'.format(service_type)) if metadata.labels[k] != v: return False if nodename is not None: if item.spec.node_name != nodename: return False return True
def run(all_hosts): # type: (List[orchestrator.HostSpec]) -> None drive_group.validate() if not drive_group.placement.filter_matching_hosts( lambda label=None, as_hostspec=None: [h.hostname for h in all_hosts]): raise orchestrator.OrchestratorValidationError( 'failed to match')
def run(all_hosts): # type: (List[orchestrator.HostSpec]) -> None drive_group.validate() if drive_group.placement.host_pattern: if not drive_group.placement.pattern_matches_hosts( [h.hostname for h in all_hosts]): raise orchestrator.OrchestratorValidationError( 'failed to match')
def _create_osds(self, drive_group): # type: (DriveGroupSpec) -> str drive_group.validate() all_hosts = raise_if_exception(self.get_hosts()) if not drive_group.placement.filter_matching_hostspecs(all_hosts): raise orchestrator.OrchestratorValidationError('failed to match') return ''
def describe_service(self, service_type=None, service_id=None, node_name=None, refresh=False): if service_type not in ("mds", "osd", "mgr", "mon", 'rgw', "nfs", None): raise orchestrator.OrchestratorValidationError( service_type + " unsupported") result = self._get_services(service_type, service_id=service_id, node_name=node_name, refresh=refresh) return result
def describe_service(self, service_type=None, service_id=None, node_name=None, refresh=False): if service_type not in ("mds", "osd", "mgr", "mon", "nfs", None): raise orchestrator.OrchestratorValidationError(service_type + " unsupported") pods = self.rook_cluster.describe_pods(service_type, service_id, node_name) result = [] for p in pods: sd = orchestrator.ServiceDescription() sd.nodename = p['nodename'] sd.container_id = p['name'] sd.service_type = p['labels']['app'].replace('rook-ceph-', '') status = { 'Pending': -1, 'Running': 1, 'Succeeded': 0, 'Failed': -1, 'Unknown': -1, }[p['phase']] sd.status = status sd.status_desc = p['phase'] if sd.service_type == "osd": sd.service_instance = "%s" % p['labels']["ceph-osd-id"] elif sd.service_type == "mds": sd.service = p['labels']['rook_file_system'] pfx = "{0}-".format(sd.service) sd.service_instance = p['labels']['ceph_daemon_id'].replace( pfx, '', 1) elif sd.service_type == "mon": sd.service_instance = p['labels']["mon"] elif sd.service_type == "mgr": sd.service_instance = p['labels']["mgr"] elif sd.service_type == "nfs": sd.service = p['labels']['ceph_nfs'] sd.service_instance = p['labels']['instance'] sd.rados_config_location = self.rook_cluster.get_nfs_conf_url( sd.service, sd.service_instance) elif sd.service_type == "rgw": sd.service = p['labels']['rgw'] sd.service_instance = p['labels']['ceph_daemon_id'] else: # Unknown type -- skip it continue result.append(sd) return result
def describe_service(self, service_type=None, service_id=None, node_name=None, refresh=False): if service_type not in ("mds", "osd", "mgr", "mon", "nfs", None): raise orchestrator.OrchestratorValidationError(service_type + " unsupported") #daemons = self.get_daemons() daemons = {} for host, _ in self._get_hosts(): self.log.info("refresh stale daemons for '{}'".format(host)) out, code = self._run_ceph_daemon(host, 'mon', 'ls', [], no_fsid=True) daemons[host] = json.loads(''.join(out)) result = [] for host, ls in daemons.items(): for d in ls: if not d['style'].startswith('ceph-daemon'): self.log.debug('ignoring non-ceph-daemon on %s: %s' % (host, d)) continue if d['fsid'] != self._cluster_fsid: self.log.debug('ignoring foreign daemon on %s: %s' % (host, d)) continue self.log.debug('including %s' % d) sd = orchestrator.ServiceDescription() sd.service_type = d['name'].split('.')[0] if service_type and service_type != sd.service_type: continue if '.' in d['name']: sd.service_instance = d['name'].split('.')[1] else: sd.service_instance = host # e.g., crash if service_id and service_id != sd.service_instance: continue sd.nodename = host sd.container_id = d['container_id'] sd.version = d['version'] sd.status_desc = d['state'] sd.status = { 'running': 1, 'inactive': 0, 'error': -1, 'unknown': -1, }[d['state']] result.append(sd) return orchestrator.TrivialReadCompletion(result)
def run(all_hosts): # type: (List[orchestrator.HostSpec]) -> None drive_group.validate() def get_hosts_func(label=None, as_hostspec=False): if as_hostspec: return all_hosts return [h.hostname for h in all_hosts] if not drive_group.placement.filter_matching_hosts(get_hosts_func): raise orchestrator.OrchestratorValidationError('failed to match')
def add_host(self, host): if host == 'raise_no_support': raise orchestrator.OrchestratorValidationError("MON count must be either 1, 3 or 5") if host == 'raise_bug': raise ZeroDivisionError() if host == 'raise_not_implemented': raise NotImplementedError() if host == 'raise_no_orchestrator': raise orchestrator.NoOrchestrator() if host == 'raise_import_error': raise ImportError("test_orchestrator not enabled") assert isinstance(host, six.string_types)
def _update_mons(self, num=None, hosts=[], label=None): if not num and not hosts and not label: # Improve Error message. Point to parse_host_spec examples raise orchestrator.OrchestratorValidationError("Mons need a placement spec. (num, host, network, name(opt))") placement = orchestrator.PlacementSpec(label=label, count=num, hosts=hosts) placement.validate() spec = orchestrator.ServiceSpec(placement=placement) completion = self.update_mons(spec) self._orchestrator_wait([completion]) orchestrator.raise_if_exception(completion) return HandleCommandResult(stdout=completion.result_str())
def add_host(self, spec): # type: (orchestrator.HostSpec) -> None host = spec.hostname if host == 'raise_no_support': raise orchestrator.OrchestratorValidationError( "MON count must be either 1, 3 or 5") if host == 'raise_bug': raise ZeroDivisionError() if host == 'raise_not_implemented': raise NotImplementedError() if host == 'raise_no_orchestrator': raise orchestrator.NoOrchestrator() if host == 'raise_import_error': raise ImportError("test_orchestrator not enabled") assert isinstance(host, str)
def _update_mons(self, num=None, hosts=[], label=None): placement = orchestrator.PlacementSpec(label=label, count=num, hosts=hosts) if not hosts and not label: # Improve Error message. Point to parse_host_spec examples raise orchestrator.OrchestratorValidationError( "Mons need a host spec. (host, network, name(opt))") # TODO: Scaling without a HostSpec doesn't work right now. # we need network autodetection for that. # placement = orchestrator.PlacementSpec(count=num) placement.validate() spec = orchestrator.StatefulServiceSpec(placement=placement) completion = self.update_mons(spec) self._orchestrator_wait([completion]) orchestrator.raise_if_exception(completion) return HandleCommandResult(stdout=completion.result_str())