def _service_add(self, cmd): svc_type = cmd['svc_type'] if svc_type == "osd": device_spec = cmd['svc_arg'] try: node_name, block_device = device_spec.split(":") except TypeError: return HandleCommandResult(-errno.EINVAL, stderr="Invalid device spec, should be <node>:<device>") spec = orchestrator.OsdCreationSpec() spec.node = node_name spec.format = "bluestore" spec.drive_group = orchestrator.DriveGroupSpec([block_device]) completion = self._oremote("create_osds", spec) self._wait([completion]) return HandleCommandResult() elif svc_type == "mds": fs_name = cmd['svc_arg'] spec = orchestrator.StatelessServiceSpec() spec.name = fs_name completion = self._oremote( "add_stateless_service", svc_type, spec ) self._wait([completion]) return HandleCommandResult() elif svc_type == "rgw": store_name = cmd['svc_arg'] spec = orchestrator.StatelessServiceSpec() spec.name = store_name completion = self._oremote( "add_stateless_service", svc_type, spec ) self._wait([completion]) return HandleCommandResult() else: raise NotImplementedError(svc_type)
def _nfs_update(self, svc_id, num): spec = orchestrator.StatelessServiceSpec() spec.name = svc_id spec.count = num completion = self.update_stateless_service("nfs", spec) self._orchestrator_wait([completion]) return HandleCommandResult()
def _nfs_add(self, svc_arg, pool, namespace=None): spec = orchestrator.StatelessServiceSpec() spec.name = svc_arg spec.extended = {"pool": pool} if namespace is not None: spec.extended["namespace"] = namespace return self._add_stateless_svc("nfs", spec)
def _rbd_mirror_add(self, num=None, hosts=None): spec = orchestrator.StatelessServiceSpec( None, placement=orchestrator.PlacementSpec(hosts=hosts, count=num)) completion = self.add_rbd_mirror(spec) self._orchestrator_wait([completion]) orchestrator.raise_if_exception(completion) return HandleCommandResult(stdout=completion.result_str())
def _cmd_fs_volume_create(self, inbuf, cmd): vol_id = cmd['name'] # TODO: validate name against any rules for pool/fs names # (...are there any?) size = cmd.get('size', None) base_name = self._pool_base_name(vol_id) mdp_name, dp_name = self._pool_names(base_name) r, outb, outs = self.mon_command({ 'prefix': 'osd pool create', 'pool': mdp_name, 'pg_num': 8 }) if r != 0: return r, outb, outs r, outb, outs = self.mon_command({ 'prefix': 'osd pool create', 'pool': dp_name, 'pg_num': 8 }) if r != 0: return r, outb, outs # Create a filesystem # ==================== r, outb, outs = self.mon_command({ 'prefix': 'fs new', 'fs_name': vol_id, 'metadata': mdp_name, 'data': dp_name }) if r != 0: self.log.error("Filesystem creation error: {0} {1} {2}".format( r, outb, outs)) return r, outb, outs # TODO: apply quotas to the filesystem root # Create an MDS cluster # ===================== spec = orchestrator.StatelessServiceSpec() spec.name = vol_id try: completion = self.add_stateless_service("mds", spec) self._orchestrator_wait([completion]) orchestrator.raise_if_exception(completion) except (ImportError, orchestrator.OrchestratorError): return 0, "", "Volume created successfully (no MDS daemons created)" except Exception as e: # Don't let detailed orchestrator exceptions (python backtraces) # bubble out to the user self.log.exception("Failed to create MDS daemons") return -errno.EINVAL, "", str(e) return 0, "", ""
def _mds_update(self, fs_name, num, hosts=None): spec = orchestrator.StatelessServiceSpec( fs_name, placement=orchestrator.PlacementSpec(nodes=hosts), count=num or 1) completion = self.update_mds(spec) self._orchestrator_wait([completion]) orchestrator.raise_if_exception(completion) return HandleCommandResult(stdout=completion.result_str())
def _nfs_add(self, cmd): cluster_name = cmd['svc_arg'] pool = cmd['pool'] ns = cmd.get('namespace', None) spec = orchestrator.StatelessServiceSpec() spec.name = cluster_name spec.extended = { "pool":pool } if ns != None: spec.extended["namespace"] = ns return self._add_stateless_svc("nfs", spec)
def _mds_update(self, fs_name, num=None, label=None, hosts=[]): placement = orchestrator.PlacementSpec(label=label, count=num, hosts=hosts) placement.validate() spec = orchestrator.StatelessServiceSpec(fs_name, placement=placement) completion = self.update_mds(spec) self._orchestrator_wait([completion]) orchestrator.raise_if_exception(completion) return HandleCommandResult(stdout=completion.result_str())
def create_mds(self, fs_name): spec = orchestrator.StatelessServiceSpec(fs_name) try: completion = self.mgr.add_mds(spec) self.mgr._orchestrator_wait([completion]) orchestrator.raise_if_exception(completion) except (ImportError, orchestrator.OrchestratorError): return 0, "", "Volume created successfully (no MDS daemons created)" except Exception as e: # Don't let detailed orchestrator exceptions (python backtraces) # bubble out to the user log.exception("Failed to create MDS daemons") return -errno.EINVAL, "", str(e) return 0, "", ""
def _rgw_add(self, svc_arg=None, inbuf=None): """ """ usage = """ Usage: ceph orchestrator rgw add -i <json_file> ceph orchestrator rgw add <zone_name> """ if inbuf: try: rgw_spec = orchestrator.RGWSpec.from_json(json.loads(inbuf)) except ValueError as e: msg = 'Failed to read JSON input: {}'.format(str(e)) + usage return HandleCommandResult(-errno.EINVAL, stderr=msg) elif svc_arg: rgw_spec = orchestrator.RGWSpec() rgw_spec.zone_name = svc_arg spec = orchestrator.StatelessServiceSpec() spec.service_spec = rgw_spec spec.name = rgw_spec.rgw_zone return self._add_stateless_svc("rgw", spec)
def _rgw_add(self, svc_arg): spec = orchestrator.StatelessServiceSpec() spec.name = svc_arg return self._add_stateless_svc("rgw", spec)
def _mds_add(self, cmd): spec = orchestrator.StatelessServiceSpec() spec.name = cmd['svc_arg'] return self._add_stateless_svc("mds", spec)
def _mds_add(self, svc_arg): spec = orchestrator.StatelessServiceSpec(svc_arg) completion = self.add_mds(spec) self._orchestrator_wait([completion]) orchestrator.raise_if_exception(completion) return HandleCommandResult(stdout=completion.result_str())
def _cmd_fs_volume_rm(self, inbuf, cmd): vol_name = cmd['vol_name'] # Tear down MDS daemons # ===================== spec = orchestrator.StatelessServiceSpec() spec.name = vol_name try: completion = self._oremote("rm_stateless_service", "mds", spec) self._wait([completion]) except ImportError: self.log.warning("No orchestrator, not tearing down MDS daemons") except Exception as e: # Don't let detailed orchestrator exceptions (python backtraces) # bubble out to the user self.log.exception("Failed to tear down MDS daemons") return -errno.EINVAL, "", str(e) if self._volume_exists(vol_name): # In case orchestrator didn't tear down MDS daemons cleanly, or # there was no orchestrator, we force the daemons down. r, out, err = self.mon_command({ 'prefix': 'fs set', 'fs_name': vol_name, 'var': 'cluster_down', 'val': 'true' }) if r != 0: return r, out, err for mds_name in self._volume_get_mds_daemon_names(vol_name): r, out, err = self.mon_command({ 'prefix': 'mds fail', 'role_or_gid': mds_name }) if r != 0: return r, out, err # Delete CephFS filesystem # ========================= r, out, err = self.mon_command({ 'prefix': 'fs rm', 'fs_name': vol_name, 'yes_i_really_mean_it': True, }) if r != 0: return r, out, err else: self.log.warning( "Filesystem already gone for volume '{0}'".format(vol_name)) # Delete pools # ============ base_name = self._pool_base_name(vol_name) mdp_name, dp_name = self._pool_names(base_name) r, out, err = self.mon_command({ 'prefix': 'osd pool rm', 'pool': mdp_name, 'pool2': mdp_name, 'yes_i_really_really_mean_it': True, }) if r != 0: return r, out, err r, out, err = self.mon_command({ 'prefix': 'osd pool rm', 'pool': dp_name, 'pool2': dp_name, 'yes_i_really_really_mean_it': True, }) if r != 0: return r, out, err return 0, "", ""