Exemplo n.º 1
0
    def _list_services(self, host=None, svc_type=None, svc_id=None, format='plain'):
        # XXX this is kind of confusing for people because in the orchestrator
        # context the service ID for MDS is the filesystem ID, not the daemon ID

        completion = self.describe_service(svc_type, svc_id, host)
        self._orchestrator_wait([completion])
        orchestrator.raise_if_exception(completion)
        services = completion.result

        # Sort the list for display
        services.sort(key=lambda s: (s.service_type, s.nodename, s.service_instance))

        if len(services) == 0:
            return HandleCommandResult(stdout="No services reported")
        elif format == 'json':
            data = [s.to_json() for s in services]
            return HandleCommandResult(stdout=json.dumps(data))
        else:
            lines = []
            for s in services:
                if s.service == None:
                    service_id = s.service_instance
                else:
                    service_id = "{0}.{1}".format(s.service, s.service_instance)

                lines.append("{0} {1} {2} {3} {4} {5}".format(
                    s.service_type,
                    service_id,
                    s.nodename,
                    s.container_id,
                    s.version,
                    s.rados_config_location))

            return HandleCommandResult(stdout="\n".join(lines))
Exemplo n.º 2
0
    def _update_mons(self, num, hosts=None):
        hosts = hosts if hosts is not None else []

        if num <= 0:
            return HandleCommandResult(-errno.EINVAL,
                    stderr="Invalid number of mons: require {} > 0".format(num))

        def split_host(host):
            """Split host into host and network parts"""
            # TODO: stricter validation
            parts = host.split(":")
            if len(parts) == 1:
                return (parts[0], None)
            elif len(parts) == 2:
                return (parts[0], parts[1])
            else:
                raise RuntimeError("Invalid host specification: "
                        "'{}'".format(host))

        if hosts:
            try:
                hosts = list(map(split_host, hosts))
            except Exception as e:
                msg = "Failed to parse host list: '{}': {}".format(hosts, e)
                return HandleCommandResult(-errno.EINVAL, stderr=msg)

        completion = self.update_mons(num, hosts)
        self._orchestrator_wait([completion])
        orchestrator.raise_if_exception(completion)
        return HandleCommandResult(stdout=str(completion.result))
Exemplo n.º 3
0
 def _osd_rm(self, svc_id):
     # type: (List[str]) -> HandleCommandResult
     """
     Remove OSD's
     :cmd : Arguments for remove the osd
     """
     completion = self.remove_osds(svc_id)
     self._orchestrator_wait([completion])
     orchestrator.raise_if_exception(completion)
     return HandleCommandResult(stdout=str(completion.result))
Exemplo n.º 4
0
    def reload_service(self, service_type, service_ids):
        if not isinstance(service_ids, list):
            service_ids = [service_ids]

        completion_list = [self.service_action('reload', service_type,
                                               service_name, service_id)
                           for service_name, service_id in service_ids]
        self._orchestrator_wait(completion_list)
        for c in completion_list:
            raise_if_exception(c)
Exemplo n.º 5
0
    def _update_mgrs(self, num, hosts=None):
        hosts = hosts if hosts is not None else []

        if num <= 0:
            return HandleCommandResult(-errno.EINVAL,
                    stderr="Invalid number of mgrs: require {} > 0".format(num))

        completion = self.update_mgrs(num, hosts)
        self._orchestrator_wait([completion])
        orchestrator.raise_if_exception(completion)
        return HandleCommandResult(stdout=str(completion.result))
Exemplo n.º 6
0
    def _create_osd(self, svc_arg=None, inbuf=None):
        # type: (str, str) -> HandleCommandResult
        """Create one or more OSDs"""

        usage = """
Usage:
  ceph orchestrator osd create -i <json_file>
  ceph orchestrator osd create host:device1,device2,...
"""

        if inbuf:
            try:
                drive_group = orchestrator.DriveGroupSpec.from_json(json.loads(inbuf))
            except ValueError as e:
                msg = 'Failed to read JSON input: {}'.format(str(e)) + usage
                return HandleCommandResult(-errno.EINVAL, stderr=msg)

        elif svc_arg:
            try:
                node_name, block_device = svc_arg.split(":")
                block_devices = block_device.split(',')
            except (TypeError, KeyError, ValueError):
                msg = "Invalid host:device spec: '{}'".format(svc_arg) + usage
                return HandleCommandResult(-errno.EINVAL, stderr=msg)

            devs = orchestrator.DeviceSelection(paths=block_devices)
            drive_group = orchestrator.DriveGroupSpec(node_name, data_devices=devs)
        else:
            return HandleCommandResult(-errno.EINVAL, stderr=usage)

        # TODO: Remove this and make the orchestrator composable
        #   Like a future or so.
        host_completion = self.get_hosts()
        self._orchestrator_wait([host_completion])
        orchestrator.raise_if_exception(host_completion)
        all_hosts = [h.name for h in host_completion.result]

        try:
            drive_group.validate(all_hosts)
        except orchestrator.DriveGroupValidationError as e:
            return HandleCommandResult(-errno.EINVAL, stderr=str(e))

        completion = self.create_osds(drive_group, all_hosts)
        self._orchestrator_wait([completion])
        orchestrator.raise_if_exception(completion)
        self.log.warning(str(completion.result))
        return HandleCommandResult(stdout=str(completion.result))
Exemplo n.º 7
0
    def _update_mons(self, num, hosts=None):
        hosts = hosts if hosts is not None else []

        if num <= 0:
            return HandleCommandResult(-errno.EINVAL,
                                       stderr="Invalid number of mons: require {} > 0".format(num))

        if hosts:
            try:
                hosts = [orchestrator.parse_host_specs(host_spec)
                         for host_spec in hosts]
            except Exception as e:
                msg = "Failed to parse host list: '{}': {}".format(hosts, e)
                return HandleCommandResult(-errno.EINVAL, stderr=msg)

        completion = self.update_mons(num, hosts)
        self._orchestrator_wait([completion])
        orchestrator.raise_if_exception(completion)
        return HandleCommandResult(stdout=completion.result_str())
Exemplo n.º 8
0
 def _get_hosts(self, format='plain'):
     completion = self.get_hosts()
     self._orchestrator_wait([completion])
     orchestrator.raise_if_exception(completion)
     if format == 'json':
         hosts = [dict(host=node.name, labels=node.labels)
                  for node in completion.result]
         output = json.dumps(hosts)
     else:
         table = PrettyTable(
             ['HOST', 'LABELS'],
             border=False)
         table.align = 'l'
         table.left_padding_width = 0
         table.right_padding_width = 1
         for node in completion.result:
             table.add_row((node.name, ' '.join(node.labels)))
         output = table.get_string()
     return HandleCommandResult(stdout=output)
Exemplo n.º 9
0
 def _nfs_add(self,
              svc_arg,
              pool,
              namespace=None,
              num=None,
              label=None,
              hosts=[]):
     spec = orchestrator.NFSServiceSpec(
         svc_arg,
         pool=pool,
         namespace=namespace,
         placement=orchestrator.PlacementSpec(label=label,
                                              hosts=hosts,
                                              count=num),
     )
     spec.validate_add()
     completion = self.add_nfs(spec)
     self._orchestrator_wait([completion])
     orchestrator.raise_if_exception(completion)
     return HandleCommandResult(stdout=completion.result_str())
Exemplo n.º 10
0
    def _update_mons(self, num=None, hosts=[], label=None):

        placement = orchestrator.PlacementSpec(label=label,
                                               count=num,
                                               hosts=hosts)
        if not hosts and not label:
            # Improve Error message. Point to parse_host_spec examples
            raise orchestrator.OrchestratorValidationError(
                "Mons need a host spec. (host, network, name(opt))")
            # TODO: Scaling without a HostSpec doesn't work right now.
            # we need network autodetection for that.
            # placement = orchestrator.PlacementSpec(count=num)
        placement.validate()

        spec = orchestrator.StatefulServiceSpec(placement=placement)

        completion = self.update_mons(spec)
        self._orchestrator_wait([completion])
        orchestrator.raise_if_exception(completion)
        return HandleCommandResult(stdout=completion.result_str())
Exemplo n.º 11
0
def wait(m, c):
    # type: (CephadmOrchestrator, Completion) -> Any
    m.process([c])

    try:
        import pydevd  # if in debugger
        while True:    # don't timeout
            if c.is_finished:
                raise_if_exception(c)
                return c.result
            time.sleep(0.1)
    except ImportError:  # not in debugger
        for i in range(30):
            if i % 10 == 0:
                m.process([c])
            if c.is_finished:
                raise_if_exception(c)
                return c.result
            time.sleep(0.1)
    assert False, "timeout" + str(c._state)
Exemplo n.º 12
0
    def _list_devices(self, host=None, format='plain', refresh=False):
        # type: (List[str], str, bool) -> HandleCommandResult
        """
        Provide information about storage devices present in cluster hosts

        Note: this does not have to be completely synchronous. Slightly out of
        date hardware inventory is fine as long as hardware ultimately appears
        in the output of this command.
        """
        nf = orchestrator.InventoryFilter(nodes=host) if host else None

        completion = self.get_inventory(node_filter=nf, refresh=refresh)

        self._orchestrator_wait([completion])
        orchestrator.raise_if_exception(completion)

        if format == 'json':
            data = [n.to_json() for n in completion.result]
            return HandleCommandResult(stdout=json.dumps(data))
        else:
            out = []

            for host in completion.result: # type: orchestrator.InventoryNode
                out.append('Host {}:'.format(host.name))
                table = PrettyTable(
                    ['Path', 'Type', 'Size', 'Available', 'Ceph Device ID', 'Reject Reasons'],
                    border=False)
                table._align['Path'] = 'l'
                for d in host.devices.devices:  # type: Device
                    table.add_row(
                        (
                            d.path,
                            d.human_readable_type,
                            format_bytes(d.sys_api.get('size', 0), 5, colored=False),
                            d.available,
                            d.device_id,
                            ', '.join(d.rejected_reasons)
                        )
                    )
                out.append(table.get_string())
            return HandleCommandResult(stdout='\n'.join(out))
Exemplo n.º 13
0
 def list_daemons(self,
                  service_name,
                  daemon_type=None,
                  daemon_id=None,
                  host=None,
                  refresh=True):
     completion = self.mgr.list_daemons(service_name,
                                        daemon_type,
                                        daemon_id=daemon_id,
                                        host=host,
                                        refresh=refresh)
     return orchestrator.raise_if_exception(completion)
Exemplo n.º 14
0
    def self_test(self):
        old_orch = self._select_orchestrator()
        self._set_backend('')
        assert self._select_orchestrator() is None
        self._set_backend(old_orch)

        e = self.remote('selftest', 'remote_from_orchestrator_cli_self_test',
                        "ZeroDivisionError")
        try:
            orchestrator.raise_if_exception(e)
            assert False
        except ZeroDivisionError as e:
            assert e.args == ('hello', 'world')

        e = self.remote('selftest', 'remote_from_orchestrator_cli_self_test',
                        "OrchestratorError")
        try:
            orchestrator.raise_if_exception(e)
            assert False
        except orchestrator.OrchestratorError as e:
            assert e.args == ('hello', 'world')
Exemplo n.º 15
0
    def _rgw_add(self, realm_name, zone_name, num=1, hosts=None, inbuf=None):
        usage = """
Usage:
  ceph orchestrator rgw add -i <json_file>
  ceph orchestrator rgw add <realm_name> <zone_name>
        """
        if inbuf:
            try:
                rgw_spec = orchestrator.RGWSpec.from_json(json.loads(inbuf))
            except ValueError as e:
                msg = 'Failed to read JSON input: {}'.format(str(e)) + usage
                return HandleCommandResult(-errno.EINVAL, stderr=msg)
        rgw_spec = orchestrator.RGWSpec(rgw_realm=realm_name,
                                        rgw_zone=zone_name,
                                        placement=orchestrator.PlacementSpec(
                                            hosts=hosts, count=num))

        completion = self.add_rgw(rgw_spec)
        self._orchestrator_wait([completion])
        orchestrator.raise_if_exception(completion)
        return HandleCommandResult(stdout=completion.result_str())
Exemplo n.º 16
0
 def _call_orch_apply_nfs(self, placement, virtual_ip=None):
     if virtual_ip:
         # nfs + ingress
         # run NFS on non-standard port
         spec = NFSServiceSpec(
             service_type='nfs',
             service_id=self.cluster_id,
             pool=self.pool_name,
             namespace=self.pool_ns,
             placement=PlacementSpec.from_string(placement),
             # use non-default port so we don't conflict with ingress
             port=12049)
         completion = self.mgr.apply_nfs(spec)
         orchestrator.raise_if_exception(completion)
         ispec = IngressSpec(
             service_type='ingress',
             service_id='nfs.' + self.cluster_id,
             backend_service='nfs.' + self.cluster_id,
             frontend_port=2049,  # default nfs port
             monitor_port=9049,
             virtual_ip=virtual_ip)
         completion = self.mgr.apply_ingress(ispec)
         orchestrator.raise_if_exception(completion)
     else:
         # standalone nfs
         spec = NFSServiceSpec(
             service_type='nfs',
             service_id=self.cluster_id,
             pool=self.pool_name,
             namespace=self.pool_ns,
             placement=PlacementSpec.from_string(placement))
         completion = self.mgr.apply_nfs(spec)
         orchestrator.raise_if_exception(completion)
Exemplo n.º 17
0
    def _list_services(self,
                       host=None,
                       svc_type=None,
                       svc_id=None,
                       format='plain',
                       refresh=False):
        # XXX this is kind of confusing for people because in the orchestrator
        # context the service ID for MDS is the filesystem ID, not the daemon ID

        completion = self.describe_service(svc_type,
                                           svc_id,
                                           host,
                                           refresh=refresh)
        self._orchestrator_wait([completion])
        orchestrator.raise_if_exception(completion)
        services = completion.result

        # Sort the list for display
        services.sort(
            key=lambda s: (s.service_type, s.nodename, s.service_instance))

        if len(services) == 0:
            return HandleCommandResult(stdout="No services reported")
        elif format == 'json':
            data = [s.to_json() for s in services]
            return HandleCommandResult(stdout=json.dumps(data))
        else:
            lines = []
            for s in services:
                if s.service == None:
                    service_id = s.service_instance
                else:
                    service_id = "{0}.{1}".format(s.service,
                                                  s.service_instance)

                lines.append("{0} {1} {2} {3} {4} {5}".format(
                    s.service_type, service_id, s.nodename, s.container_id,
                    s.version, s.rados_config_location))

            return HandleCommandResult(stdout="\n".join(lines))
Exemplo n.º 18
0
    def _rgw_add(self, zone_name=None, inbuf=None):
        usage = """
Usage:
  ceph orchestrator rgw add -i <json_file>
  ceph orchestrator rgw add <zone_name>
        """

        if inbuf:
            try:
                rgw_spec = orchestrator.RGWSpec.from_json(json.loads(inbuf))
            except ValueError as e:
                msg = 'Failed to read JSON input: {}'.format(str(e)) + usage
                return HandleCommandResult(-errno.EINVAL, stderr=msg)
        elif zone_name:
            rgw_spec = orchestrator.RGWSpec(rgw_zone=zone_name)
        else:
            return HandleCommandResult(-errno.EINVAL, stderr=usage)

        completion = self.add_rgw(rgw_spec)
        self._orchestrator_wait([completion])
        orchestrator.raise_if_exception(completion)
        return HandleCommandResult(stdout=completion.result_str())
Exemplo n.º 19
0
    def self_test(self):
        old_orch = self._select_orchestrator()
        self._set_backend('')
        assert self._select_orchestrator() is None
        self._set_backend(old_orch)

        e1 = self.remote('selftest', 'remote_from_orchestrator_cli_self_test', "ZeroDivisionError")
        try:
            orchestrator.raise_if_exception(e1)
            assert False
        except ZeroDivisionError as e:
            assert e.args == ('hello', 'world')

        e2 = self.remote('selftest', 'remote_from_orchestrator_cli_self_test', "OrchestratorError")
        try:
            orchestrator.raise_if_exception(e2)
            assert False
        except orchestrator.OrchestratorError as e:
            assert e.args == ('hello', 'world')

        c = orchestrator.TrivialReadCompletion(result=True)
        assert c.has_result
Exemplo n.º 20
0
    def _list_devices(self, host=None, format='plain', refresh=False):
        # type: (Optional[List[str]], str, bool) -> HandleCommandResult
        """
        Provide information about storage devices present in cluster hosts

        Note: this does not have to be completely synchronous. Slightly out of
        date hardware inventory is fine as long as hardware ultimately appears
        in the output of this command.
        """
        nf = orchestrator.InventoryFilter(nodes=host) if host else None

        completion = self.get_inventory(node_filter=nf, refresh=refresh)

        self._orchestrator_wait([completion])
        orchestrator.raise_if_exception(completion)

        if format == 'json':
            data = [n.to_json() for n in completion.result]
            return HandleCommandResult(stdout=json.dumps(data))
        else:
            out = []

            table = PrettyTable([
                'HOST', 'PATH', 'TYPE', 'SIZE', 'DEVICE', 'AVAIL',
                'REJECT REASONS'
            ],
                                border=False)
            table.align = 'l'
            table._align['SIZE'] = 'r'
            table.left_padding_width = 0
            table.right_padding_width = 1
            for host_ in completion.result:  # type: orchestrator.InventoryNode
                for d in host_.devices.devices:  # type: Device
                    table.add_row((host_.name, d.path, d.human_readable_type,
                                   format_bytes(d.sys_api.get('size', 0),
                                                5), d.device_id, d.available,
                                   ', '.join(d.rejected_reasons)))
            out.append(table.get_string())
            return HandleCommandResult(stdout='\n'.join(out))
Exemplo n.º 21
0
    def verify_and_manage_mds_instance(self, fs_map: dict,
                                       fs_name: str) -> None:
        assert fs_map is not None

        try:
            svc = self.get_service(fs_name)
            if not svc:
                self.log.info(f"fs {fs_name}: no service defined; skipping")
                return
            if not svc.spec.placement.count:
                self.log.info(
                    f"fs {fs_name}: service does not specify a count; skipping"
                )
                return

            standbys_required = self.get_required_standby_count(
                fs_map, fs_name)
            max_mds = self.get_required_max_mds(fs_map, fs_name)
            want = max_mds + standbys_required

            self.log.info(f"fs {fs_name}: "
                          f"max_mds={max_mds} "
                          f"standbys_required={standbys_required}, "
                          f"count={svc.spec.placement.count}")

            if want == svc.spec.placement.count:
                return

            self.log.info(
                f"fs {fs_name}: adjusting daemon count from {svc.spec.placement.count} to {want}"
            )
            newspec = self.update_daemon_count(svc.spec, fs_name, want)
            completion = self.apply_mds(newspec)
            orchestrator.raise_if_exception(completion)
        except orchestrator.OrchestratorError as e:
            self.log.exception(
                f"fs {fs_name}: exception while updating service: {e}")
            pass
Exemplo n.º 22
0
def delete_volume(mgr, volname, metadata_pool, data_pools):
    """
    delete the given module (tear down mds, remove filesystem, remove pools)
    """
    # Tear down MDS daemons
    try:
        completion = mgr.remove_service('mds.' + volname)
        mgr._orchestrator_wait([completion])
        orchestrator.raise_if_exception(completion)
    except (ImportError, orchestrator.OrchestratorError):
        log.warning("OrchestratorError, not tearing down MDS daemons")
    except Exception as e:
        # Don't let detailed orchestrator exceptions (python backtraces)
        # bubble out to the user
        log.exception("Failed to tear down MDS daemons")
        return -errno.EINVAL, "", str(e)

    # In case orchestrator didn't tear down MDS daemons cleanly, or
    # there was no orchestrator, we force the daemons down.
    if volume_exists(mgr, volname):
        r, outb, outs = remove_filesystem(mgr, volname)
        if r != 0:
            return r, outb, outs
    else:
        err = "Filesystem not found for volume '{0}'".format(volname)
        log.warning(err)
        return -errno.ENOENT, "", err
    r, outb, outs = remove_pool(mgr, metadata_pool)
    if r != 0:
        return r, outb, outs

    for data_pool in data_pools:
        r, outb, outs = remove_pool(mgr, data_pool)
        if r != 0:
            return r, outb, outs
    result_str = "metadata pool: {0} data pool: {1} removed".format(
        metadata_pool, str(data_pools))
    return r, result_str, ""
Exemplo n.º 23
0
    def delete_volume(self, volname, confirm):
        """
        delete the given module (tear down mds, remove filesystem)
        """
        if self.stopping.isSet():
            return -errno.ESHUTDOWN, "", "shutdown in progress"

        self.purge_queue.cancel_purge_job(volname)
        self.connection_pool.del_fs_handle(volname, wait=True)
        # Tear down MDS daemons
        try:
            completion = self.mgr.remove_mds(volname)
            self.mgr._orchestrator_wait([completion])
            orchestrator.raise_if_exception(completion)
        except (ImportError, orchestrator.OrchestratorError):
            log.warning("OrchestratorError, not tearing down MDS daemons")
        except Exception as e:
            # Don't let detailed orchestrator exceptions (python backtraces)
            # bubble out to the user
            log.exception("Failed to tear down MDS daemons")
            return -errno.EINVAL, "", str(e)

        # In case orchestrator didn't tear down MDS daemons cleanly, or
        # there was no orchestrator, we force the daemons down.
        if self.volume_exists(volname):
            r, outb, outs = self.remove_filesystem(volname, confirm)
            if r != 0:
                return r, outb, outs
        else:
            err = "Filesystem not found for volume '{0}'".format(volname)
            log.warning(err)
            return -errno.ENOENT, "", err
        metadata_pool, data_pool = self.gen_pool_names(volname)
        r, outb, outs = self.remove_pool(metadata_pool)
        if r != 0:
            return r, outb, outs
        return self.remove_pool(data_pool)
Exemplo n.º 24
0
    def _list_devices(self, host=None, format='plain', refresh=False):
        # type: (List[str], str, bool) -> HandleCommandResult
        """
        Provide information about storage devices present in cluster hosts

        Note: this does not have to be completely synchronous. Slightly out of
        date hardware inventory is fine as long as hardware ultimately appears
        in the output of this command.
        """
        nf = orchestrator.InventoryFilter(nodes=host) if host else None

        completion = self.get_inventory(node_filter=nf, refresh=refresh)

        self._orchestrator_wait([completion])
        orchestrator.raise_if_exception(completion)

        if format == 'json':
            data = [n.to_json() for n in completion.result]
            return HandleCommandResult(stdout=json.dumps(data))
        else:
            # Return a human readable version
            result = ""

            for inventory_node in completion.result:
                result += "Host {0}:\n".format(inventory_node.name)

                if inventory_node.devices:
                    result += inventory_node.devices[0].pretty_print(
                        only_header=True)
                else:
                    result += "No storage devices found"

                for d in inventory_node.devices:
                    result += d.pretty_print()
                result += "\n"

            return HandleCommandResult(stdout=result)
Exemplo n.º 25
0
    def _list_devices(self, host=None, format='plain', refresh=False):
        # type: (List[str], str, bool) -> HandleCommandResult
        """
        Provide information about storage devices present in cluster hosts

        Note: this does not have to be completely synchronous. Slightly out of
        date hardware inventory is fine as long as hardware ultimately appears
        in the output of this command.
        """
        nf = orchestrator.InventoryFilter(nodes=host) if host else None

        completion = self.get_inventory(node_filter=nf, refresh=refresh)

        self._orchestrator_wait([completion])
        orchestrator.raise_if_exception(completion)

        if format == 'json':
            data = [n.to_json() for n in completion.result]
            return HandleCommandResult(stdout=json.dumps(data))
        else:
            # Return a human readable version
            result = ""

            for inventory_node in completion.result:
                result += "Host {0}:\n".format(inventory_node.name)

                if inventory_node.devices:
                    result += inventory_node.devices[0].pretty_print(only_header=True)
                else:
                    result += "No storage devices found"

                for d in inventory_node.devices:
                    result += d.pretty_print()
                result += "\n"

            return HandleCommandResult(stdout=result)
Exemplo n.º 26
0
    def _apply_drivegroups(self, ls: List[DriveGroupSpec]) -> List[str]:
        all_hosts = raise_if_exception(self.get_hosts())
        result_list: List[str] = []
        for drive_group in ls:
            matching_hosts = drive_group.placement.filter_matching_hosts(
                lambda label=None, as_hostspec=None: all_hosts)

            if not self.rook_cluster.node_exists(matching_hosts[0]):
                raise RuntimeError("Node '{0}' is not in the Kubernetes "
                                   "cluster".format(matching_hosts))

            # Validate whether cluster CRD can accept individual OSD
            # creations (i.e. not useAllDevices)
            if not self.rook_cluster.can_create_osd():
                raise RuntimeError("Rook cluster configuration does not "
                                   "support OSD creation.")
            result_list.append(
                self.rook_cluster.add_osds(drive_group, matching_hosts))
        return result_list
Exemplo n.º 27
0
    def _show_nfs_cluster_info(self, cluster_id: str) -> Dict[str, Any]:
        self._set_cluster_id(cluster_id)
        completion = self.mgr.list_daemons(daemon_type='nfs')
        orchestrator.raise_if_exception(completion)
        backends: List[Dict[str, Union[str, int]]] = []
        # Here completion.result is a list DaemonDescription objects
        for cluster in completion.result:
            if self.cluster_id == cluster.service_id():
                try:
                    if cluster.ip:
                        ip = cluster.ip
                    else:
                        c = self.mgr.get_hosts()
                        orchestrator.raise_if_exception(c)
                        hosts = [
                            h for h in c.result
                            if h.hostname == cluster.hostname
                        ]
                        if hosts:
                            ip = resolve_ip(hosts[0].addr)
                        else:
                            # sigh
                            ip = resolve_ip(cluster.hostname)
                    backends.append({
                        "hostname": cluster.hostname,
                        "ip": ip,
                        "port": cluster.ports[0]
                    })
                except orchestrator.OrchestratorError:
                    continue

        r: Dict[str, Any] = {
            'virtual_ip': None,
            'backend': backends,
        }
        sc = self.mgr.describe_service(service_type='ingress')
        orchestrator.raise_if_exception(sc)
        for i in sc.result:
            spec = cast(IngressSpec, i.spec)
            if spec.backend_service == f'nfs.{cluster_id}':
                r['virtual_ip'] = i.virtual_ip.split('/')[0]
                if i.ports:
                    r['port'] = i.ports[0]
                    if len(i.ports) > 1:
                        r['monitor_port'] = i.ports[1]
        return r
Exemplo n.º 28
0
    def create_osds(self, drive_group):
        # type: (DriveGroupSpec) -> str
        """ Creates OSDs from a drive group specification.

        $: ceph orch osd create -i <dg.file>

        The drivegroup file must only contain one spec at a time.
        """

        targets = []  # type: List[str]
        if drive_group.data_devices and drive_group.data_devices.paths:
            targets += [d.path for d in drive_group.data_devices.paths]
        if drive_group.data_directories:
            targets += drive_group.data_directories

        all_hosts = raise_if_exception(self.get_hosts())

        matching_hosts = drive_group.placement.filter_matching_hosts(
            lambda label=None, as_hostspec=None: all_hosts)

        assert len(matching_hosts) == 1

        if not self.rook_cluster.node_exists(matching_hosts[0]):
            raise RuntimeError("Node '{0}' is not in the Kubernetes "
                               "cluster".format(matching_hosts))

        # Validate whether cluster CRD can accept individual OSD
        # creations (i.e. not useAllDevices)
        if not self.rook_cluster.can_create_osd():
            raise RuntimeError("Rook cluster configuration does not "
                               "support OSD creation.")

        return self.rook_cluster.add_osds(drive_group, matching_hosts)

        # TODO: this was the code to update the progress reference:
        """
Exemplo n.º 29
0
 def _call_orch_apply_nfs(
     self,
     cluster_id: str,
     placement: Optional[str],
     virtual_ip: Optional[str] = None,
     port: Optional[int] = None,
 ) -> None:
     if not port:
         port = 2049  # default nfs port
     if virtual_ip:
         # nfs + ingress
         # run NFS on non-standard port
         spec = NFSServiceSpec(
             service_type='nfs',
             service_id=cluster_id,
             placement=PlacementSpec.from_string(placement),
             # use non-default port so we don't conflict with ingress
             port=10000 + port)  # semi-arbitrary, fix me someday
         completion = self.mgr.apply_nfs(spec)
         orchestrator.raise_if_exception(completion)
         ispec = IngressSpec(
             service_type='ingress',
             service_id='nfs.' + cluster_id,
             backend_service='nfs.' + cluster_id,
             frontend_port=port,
             monitor_port=7000 + port,  # semi-arbitrary, fix me someday
             virtual_ip=virtual_ip)
         completion = self.mgr.apply_ingress(ispec)
         orchestrator.raise_if_exception(completion)
     else:
         # standalone nfs
         spec = NFSServiceSpec(
             service_type='nfs',
             service_id=cluster_id,
             placement=PlacementSpec.from_string(placement),
             port=port)
         completion = self.mgr.apply_nfs(spec)
         orchestrator.raise_if_exception(completion)
     log.debug(
         "Successfully deployed nfs daemons with cluster id %s and placement %s",
         cluster_id, placement)
Exemplo n.º 30
0
 def _nfs_rm(self, svc_id):
     completion = self.remove_nfs(svc_id)
     self._orchestrator_wait([completion])
     orchestrator.raise_if_exception(completion)
     return HandleCommandResult(stdout=completion.result_str())
Exemplo n.º 31
0
 def _rgw_rm(self, name):
     completion = self.remove_rgw(name)
     self._orchestrator_wait([completion])
     orchestrator.raise_if_exception(completion)
     return HandleCommandResult(stdout=completion.result_str())
Exemplo n.º 32
0
    def _cmd_fs_volume_rm(self, inbuf, cmd):
        vol_name = cmd['vol_name']

        # Tear down MDS daemons
        # =====================
        try:
            completion = self.remove_stateless_service("mds", vol_name)
            self._orchestrator_wait([completion])
            orchestrator.raise_if_exception(completion)
        except (ImportError, orchestrator.OrchestratorError):
            self.log.warning("OrchestratorError, not tearing down MDS daemons")
        except Exception as e:
            # Don't let detailed orchestrator exceptions (python backtraces)
            # bubble out to the user
            self.log.exception("Failed to tear down MDS daemons")
            return -errno.EINVAL, "", str(e)

        if self._volume_exists(vol_name):
            # In case orchestrator didn't tear down MDS daemons cleanly, or
            # there was no orchestrator, we force the daemons down.
            r, out, err = self.mon_command({
                'prefix': 'fs set',
                'fs_name': vol_name,
                'var': 'cluster_down',
                'val': 'true'
            })
            if r != 0:
                return r, out, err

            for mds_name in self._volume_get_mds_daemon_names(vol_name):
                r, out, err = self.mon_command({
                    'prefix': 'mds fail',
                    'role_or_gid': mds_name})
                if r != 0:
                    return r, out, err

            # Delete CephFS filesystem
            # =========================
            r, out, err = self.mon_command({
                'prefix': 'fs rm',
                'fs_name': vol_name,
                'yes_i_really_mean_it': True,
            })
            if r != 0:
                return r, out, err
        else:
            self.log.warning("Filesystem already gone for volume '{0}'".format(
                vol_name
            ))

        # Delete pools
        # ============
        base_name = self._pool_base_name(vol_name)
        mdp_name, dp_name = self._pool_names(base_name)

        r, out, err = self.mon_command({
            'prefix': 'osd pool rm',
            'pool': mdp_name,
            'pool2': mdp_name,
            'yes_i_really_really_mean_it': True,
        })
        if r != 0:
            return r, out, err

        r, out, err = self.mon_command({
            'prefix': 'osd pool rm',
            'pool': dp_name,
            'pool2': dp_name,
            'yes_i_really_really_mean_it': True,
        })
        if r != 0:
            return r, out, err

        return 0, "", ""
Exemplo n.º 33
0
 def _get_hosts(self):
     completion = self.get_hosts()
     self._orchestrator_wait([completion])
     orchestrator.raise_if_exception(completion)
     result = "\n".join(map(lambda node: node.name, completion.result))
     return HandleCommandResult(stdout=result)
Exemplo n.º 34
0
 def _remove_host(self, host):
     completion = self.remove_host(host)
     self._orchestrator_wait([completion])
     orchestrator.raise_if_exception(completion)
     return HandleCommandResult(stdout=completion.result_str())
Exemplo n.º 35
0
    def _cmd_fs_volume_create(self, inbuf, cmd):
        vol_id = cmd['name']
        # TODO: validate name against any rules for pool/fs names
        # (...are there any?)

        size = cmd.get('size', None)

        base_name = self._pool_base_name(vol_id)
        mdp_name, dp_name = self._pool_names(base_name)

        r, outb, outs = self.mon_command({
            'prefix': 'osd pool create',
            'pool': mdp_name,
            'pg_num': 16,
            'pg_num_min': 16,
        })
        if r != 0:
            return r, outb, outs

        # count fs metadata omap at 4x usual rate
        r, outb, outs = self.mon_command({
            'prefix': 'osd pool set',
            'pool': mdp_name,
            'var': "pg_autoscale_bias",
            'val': "4.0",
        })
        if r != 0:
            return r, outb, outs

        r, outb, outs = self.mon_command({
            'prefix': 'osd pool create',
            'pool': dp_name,
            'pg_num': 8
        })
        if r != 0:
            return r, outb, outs

        # Create a filesystem
        # ====================
        r, outb, outs = self.mon_command({
            'prefix': 'fs new',
            'fs_name': vol_id,
            'metadata': mdp_name,
            'data': dp_name
        })

        if r != 0:
            self.log.error("Filesystem creation error: {0} {1} {2}".format(
                r, outb, outs
            ))
            return r, outb, outs

        # TODO: apply quotas to the filesystem root

        # Create an MDS cluster
        # =====================
        spec = orchestrator.StatelessServiceSpec()
        spec.name = vol_id
        try:
            completion = self.add_stateless_service("mds", spec)
            self._orchestrator_wait([completion])
            orchestrator.raise_if_exception(completion)
        except (ImportError, orchestrator.OrchestratorError):
            return 0, "", "Volume created successfully (no MDS daemons created)"
        except Exception as e:
            # Don't let detailed orchestrator exceptions (python backtraces)
            # bubble out to the user
            self.log.exception("Failed to create MDS daemons")
            return -errno.EINVAL, "", str(e)

        return 0, "", ""
Exemplo n.º 36
0
 def _service_instance_action(self, action, svc_type, svc_id):
     completion = self.service_action(action, svc_type, service_id=svc_id)
     self._orchestrator_wait([completion])
     orchestrator.raise_if_exception(completion)
     return HandleCommandResult()
Exemplo n.º 37
0
 def inner(self, *args, **kwargs):
     completion = method(self, *args, **kwargs)
     raise_if_exception(completion)
     return completion.result
Exemplo n.º 38
0
def wait(m, c):
    # type: (CephadmOrchestrator, OrchResult) -> Any
    return raise_if_exception(c)
Exemplo n.º 39
0
def test_raise():
    c = ReadCompletion()
    c.exception = ZeroDivisionError()
    with pytest.raises(ZeroDivisionError):
        raise_if_exception(c)
Exemplo n.º 40
0
 def _rm_stateless_svc(self, svc_type, svc_id):
     completion = self.remove_stateless_service(svc_type, svc_id)
     self._orchestrator_wait([completion])
     orchestrator.raise_if_exception(completion)
     return HandleCommandResult()
Exemplo n.º 41
0
 def _get_hosts(self):
     completion = self.get_hosts()
     self._orchestrator_wait([completion])
     orchestrator.raise_if_exception(completion)
     result = "\n".join(map(lambda node: node.name, completion.result))
     return HandleCommandResult(stdout=result)
Exemplo n.º 42
0
 def _remove_host(self, host):
     completion = self.remove_host(host)
     self._orchestrator_wait([completion])
     orchestrator.raise_if_exception(completion)
     return HandleCommandResult(stdout=str(completion.result))
Exemplo n.º 43
0
 def _service_instance_action(self, action, svc_type, svc_id):
     completion = self.service_action(action, svc_type, service_id=svc_id)
     self._orchestrator_wait([completion])
     orchestrator.raise_if_exception(completion)
     return HandleCommandResult(stdout=completion.result_str())
Exemplo n.º 44
0
def test_raise():
    c = Completion()
    c._exception = ZeroDivisionError()
    with pytest.raises(ZeroDivisionError):
        raise_if_exception(c)
Exemplo n.º 45
0
 def list_service_info(self, service_type):
     # type: (str) -> list
     completion = self.describe_service(service_type, None, None)
     self._orchestrator_wait([completion])
     raise_if_exception(completion)
     return completion.result