Esempio n. 1
0
 def zap_device(self, host: str, path: str) -> OrchResult[str]:
     try:
         self.rook_cluster.create_zap_job(host, path)
     except Exception as e:
         logging.error(e)
         return OrchResult(
             None,
             Exception("Unable to zap device: " +
                       str(e.with_traceback(None))))
     return OrchResult(f'{path} on {host} zapped')
Esempio n. 2
0
 def remove_osds(self,
                 osd_ids: List[str],
                 replace: bool = False,
                 force: bool = False) -> OrchResult[str]:
     assert self._rook_cluster is not None
     res = self._rook_cluster.remove_osds(osd_ids, replace, force,
                                          self.mon_command)
     return OrchResult(res)
Esempio n. 3
0
    def test_scale_up(self, _apply_mds, _describe_service, _list_daemons, _get,
                      mds_autoscaler_module: MDSAutoscaler):
        daemons = OrchResult(result=[
            DaemonDescription(hostname='myhost',
                              daemon_type='mds',
                              daemon_id='fs_name.myhost.a'),
            DaemonDescription(hostname='myhost',
                              daemon_type='mds',
                              daemon_id='fs_name.myhost.b'),
        ])
        _list_daemons.return_value = daemons

        services = OrchResult(result=[
            ServiceDescription(spec=ServiceSpec(service_type='mds',
                                                service_id='fs_name',
                                                placement=PlacementSpec(
                                                    count=2)))
        ])
        _describe_service.return_value = services

        apply = OrchResult(result='')
        _apply_mds.return_value = apply

        _get.return_value = {
            'filesystems': [{
                'mdsmap': {
                    'fs_name': 'fs_name',
                    'in': [{
                        'name': 'mds.fs_name.myhost.a',
                    }],
                    'standby_count_wanted': 2,
                    'max_mds': 1
                }
            }],
            'standbys': [{
                'name': 'mds.fs_name.myhost.b',
            }],
        }
        mds_autoscaler_module.notify('fs_map', None)

        _apply_mds.assert_called_with(
            ServiceSpec(service_type='mds',
                        service_id='fs_name',
                        placement=PlacementSpec(count=3)))
Esempio n. 4
0
 def remove_osds(self,
                 osd_ids: List[str],
                 replace: bool = False,
                 force: bool = False,
                 zap: bool = False) -> OrchResult[str]:
     assert self._rook_cluster is not None
     if zap:
         raise RuntimeError(
             "Rook does not support zapping devices during OSD removal.")
     res = self._rook_cluster.remove_osds(osd_ids, replace, force,
                                          self.mon_command)
     return OrchResult(res)
Esempio n. 5
0
def test_orch_device_ls():
    devices = Devices([Device("/dev/vdb", available=True)])
    ilist = OrchResult([InventoryHost("ceph-node-1", devices=devices), InventoryHost("ceph-node-2",
                       devices=devices), InventoryHost("ceph-node-10", devices=devices)])

    with mock.patch("orchestrator.OrchestratorCli.get_inventory", return_value=ilist):
        # Ensure natural sorting on hostnames (ceph-node-1, ceph-node-2, ceph-node-10)
        cmd = {
            'prefix': 'orch device ls'
        }
        m = OrchestratorCli('orchestrator', 0, 0)
        r = m._handle_command(None, cmd)
        out = 'HOST          PATH      TYPE     DEVICE ID   SIZE  AVAILABLE  REFRESHED  REJECT REASONS  \n'\
              'ceph-node-1   /dev/vdb  unknown  None          0   Yes        0s ago                     \n'\
              'ceph-node-2   /dev/vdb  unknown  None          0   Yes        0s ago                     \n'\
              'ceph-node-10  /dev/vdb  unknown  None          0   Yes        0s ago                     '
        assert r == HandleCommandResult(retval=0, stdout=out, stderr='')
Esempio n. 6
0
    def apply_drivegroups(
            self, specs: List[DriveGroupSpec]) -> OrchResult[List[str]]:
        result_list = []
        all_hosts = raise_if_exception(self.get_hosts())
        for drive_group in specs:
            matching_hosts = drive_group.placement.filter_matching_hosts(
                lambda label=None, as_hostspec=None: all_hosts)

            if not self.rook_cluster.node_exists(matching_hosts[0]):
                raise RuntimeError("Node '{0}' is not in the Kubernetes "
                                   "cluster".format(matching_hosts))

            # Validate whether cluster CRD can accept individual OSD
            # creations (i.e. not useAllDevices)
            if not self.rook_cluster.can_create_osd():
                raise RuntimeError("Rook cluster configuration does not "
                                   "support OSD creation.")
            result_list.append(
                self.rook_cluster.add_osds(drive_group, matching_hosts))
        return OrchResult(result_list)
Esempio n. 7
0
    def _mock_orchestrator(self, enable: bool) -> Iterator:
        self.io_mock = MagicMock()
        self.io_mock.set_namespace.side_effect = self._ioctx_set_namespace_mock
        self.io_mock.read = self._ioctl_read_mock
        self.io_mock.stat = self._ioctl_stat_mock
        self.io_mock.list_objects.side_effect = self._ioctx_list_objects_mock
        self.io_mock.write_full.side_effect = self._ioctx_write_full_mock
        self.io_mock.remove_object.side_effect = self._ioctx_remove_mock

        # mock nfs services
        orch_nfs_services = [
            ServiceDescription(spec=NFSServiceSpec(service_id=self.cluster_id))
        ] if enable else []

        def mock_exec(cls, args):
            u = {
                "user_id":
                "abc",
                "display_name":
                "foo",
                "email":
                "",
                "suspended":
                0,
                "max_buckets":
                1000,
                "subusers": [],
                "keys": [{
                    "user": "******",
                    "access_key": "the_access_key",
                    "secret_key": "the_secret_key"
                }],
                "swift_keys": [],
                "caps": [],
                "op_mask":
                "read, write, delete",
                "default_placement":
                "",
                "default_storage_class":
                "",
                "placement_tags": [],
                "bucket_quota": {
                    "enabled": False,
                    "check_on_raw": False,
                    "max_size": -1,
                    "max_size_kb": 0,
                    "max_objects": -1
                },
                "user_quota": {
                    "enabled": False,
                    "check_on_raw": False,
                    "max_size": -1,
                    "max_size_kb": 0,
                    "max_objects": -1
                },
                "temp_url_keys": [],
                "type":
                "rgw",
                "mfa_ids": []
            }
            if args[2] == 'list':
                return 0, json.dumps([u]), ''
            return 0, json.dumps(u), ''

        with mock.patch('nfs.module.Module.describe_service') as describe_service, \
                mock.patch('nfs.module.Module.rados') as rados, \
                mock.patch('nfs.export.available_clusters',
                           return_value=[self.cluster_id]), \
                mock.patch('nfs.export.restart_nfs_service'), \
                mock.patch('nfs.export.ExportMgr._exec', mock_exec), \
                mock.patch('nfs.export.check_fs', return_value=True), \
                mock.patch('nfs.export_utils.check_fs', return_value=True), \
                mock.patch('nfs.export.ExportMgr._create_user_key',
                           return_value='thekeyforclientabc'):

            rados.open_ioctx.return_value.__enter__.return_value = self.io_mock
            rados.open_ioctx.return_value.__exit__ = mock.Mock(
                return_value=None)

            describe_service.return_value = OrchResult(orch_nfs_services)

            self._reset_temp_store()

            yield
Esempio n. 8
0
 def apply_drivegroups(
         self, specs: List[DriveGroupSpec]) -> OrchResult[List[str]]:
     for drive_group in specs:
         self._drive_group_map[str(drive_group.service_id)] = drive_group
     self._save_drive_groups()
     return OrchResult(self._apply_drivegroups(specs))
Esempio n. 9
0
 def apply_rbd_mirror(self, spec: ServiceSpec) -> OrchResult[str]:
     try:
         self.rook_cluster.rbd_mirror(spec)
         return OrchResult("Success")
     except Exception as e:
         return OrchResult(None, e)
Esempio n. 10
0
 def mock_list_daemons(cls, *args, **kwargs):
     if kwargs['daemon_type'] == 'nfs':
         return OrchResult(orch_nfs_daemons)
     return OrchResult([])
Esempio n. 11
0
 def mock_describe_service(cls, *args, **kwargs):
     if kwargs['service_type'] == 'nfs':
         return OrchResult(orch_nfs_services)
     return OrchResult([])
Esempio n. 12
0
    assert OrchestratorEvent.from_json(e.to_json()) == e


def test_handle_command():
    cmd = {
        'prefix': 'orch daemon add',
        'daemon_type': 'mon',
        'placement': 'smithi044:[v2:172.21.15.44:3301,v1:172.21.15.44:6790]=c',
    }
    m = OrchestratorCli('orchestrator', 0, 0)
    r = m._handle_command(None, cmd)
    assert r == HandleCommandResult(
        retval=-2, stdout='', stderr='No orchestrator configured (try `ceph orch set backend`)')


r = OrchResult([ServiceDescription(spec=ServiceSpec(service_type='osd'), running=123)])


@mock.patch("orchestrator.OrchestratorCli.describe_service", return_value=r)
def test_orch_ls(_describe_service):
    cmd = {
        'prefix': 'orch ls',
    }
    m = OrchestratorCli('orchestrator', 0, 0)
    r = m._handle_command(None, cmd)
    out = 'NAME  PORTS  RUNNING  REFRESHED  AGE  PLACEMENT  \n' \
          'osd              123  -          -               '
    assert r == HandleCommandResult(retval=0, stdout=out, stderr='')


def test_preview_table_osd_smoke():
Esempio n. 13
0
    assert OrchestratorEvent.from_json(e.to_json()) == e


def test_handle_command():
    cmd = {
        'prefix': 'orch daemon add',
        'daemon_type': 'mon',
        'placement': 'smithi044:[v2:172.21.15.44:3301,v1:172.21.15.44:6790]=c',
    }
    m = OrchestratorCli('orchestrator', 0, 0)
    r = m._handle_command(None, cmd)
    assert r == HandleCommandResult(
        retval=-2, stdout='', stderr='No orchestrator configured (try `ceph orch set backend`)')


r = OrchResult([ServiceDescription(spec=ServiceSpec(service_type='osd'), running=123)])


@mock.patch("orchestrator.OrchestratorCli.describe_service", return_value=r)
def test_orch_ls(_describe_service):
    cmd = {
        'prefix': 'orch ls',
    }
    m = OrchestratorCli('orchestrator', 0, 0)
    r = m._handle_command(None, cmd)
    out = 'NAME  PORTS  RUNNING  REFRESHED  AGE  PLACEMENT  \n' \
          'osd              123  -          -               '
    assert r == HandleCommandResult(retval=0, stdout=out, stderr='')

    cmd = {
        'prefix': 'orch ls',
Esempio n. 14
0
    def _mock_orchestrator(self, enable: bool) -> Iterator:

        self.io_mock = MagicMock()
        self.io_mock.set_namespace.side_effect = self._ioctx_set_namespace_mock
        self.io_mock.read = self._ioctl_read_mock
        self.io_mock.stat = self._ioctl_stat_mock
        self.io_mock.list_objects.side_effect = self._ioctx_list_objects_mock
        self.io_mock.write_full.side_effect = self._ioctx_write_full_mock
        self.io_mock.remove_object.side_effect = self._ioctx_remove_mock

        # mock nfs services
        cluster_info = self.clusters['foo']
        orch_nfs_services = [
            ServiceDescription(
                spec=NFSServiceSpec(service_id='foo',
                                    pool=cluster_info['pool'],
                                    namespace=cluster_info['namespace']))
        ] if enable else []
        """
        # mock nfs daemons
        def _get_nfs_instances(service_name=None):
            if not enable:
                return []
            instances = {
                'nfs.foo': [
                    DaemonDescription(daemon_id='foo.host_a', status=1),
                    DaemonDescription(daemon_id='foo.host_b', status=1)
                ],
                'nfs.bar': [
                    DaemonDescription(daemon_id='bar.host_c', status=1)
                ]
            }
            if service_name is not None:
                return instances[service_name]
            result = []
            for _, daemons in instances.items():
                result.extend(daemons)
            return result
        """
        def mock_exec(cls, args):
            u = {
                "user_id":
                "abc",
                "display_name":
                "foo",
                "email":
                "",
                "suspended":
                0,
                "max_buckets":
                1000,
                "subusers": [],
                "keys": [{
                    "user": "******",
                    "access_key": "the_access_key",
                    "secret_key": "the_secret_key"
                }],
                "swift_keys": [],
                "caps": [],
                "op_mask":
                "read, write, delete",
                "default_placement":
                "",
                "default_storage_class":
                "",
                "placement_tags": [],
                "bucket_quota": {
                    "enabled": False,
                    "check_on_raw": False,
                    "max_size": -1,
                    "max_size_kb": 0,
                    "max_objects": -1
                },
                "user_quota": {
                    "enabled": False,
                    "check_on_raw": False,
                    "max_size": -1,
                    "max_size_kb": 0,
                    "max_objects": -1
                },
                "temp_url_keys": [],
                "type":
                "rgw",
                "mfa_ids": []
            }
            if args[2] == 'list':
                return 0, json.dumps([u]), ''
            return 0, json.dumps(u), ''

        with mock.patch('nfs.module.Module.describe_service') as describe_service, \
                mock.patch('nfs.module.Module.rados') as rados, \
                mock.patch('nfs.export.available_clusters',
                           return_value=self.clusters.keys()), \
                mock.patch('nfs.export.restart_nfs_service'), \
                mock.patch('nfs.export.ExportMgr._exec', mock_exec), \
                mock.patch('nfs.export.check_fs', return_value=True), \
                mock.patch('nfs.export_utils.check_fs', return_value=True), \
                mock.patch('nfs.export.ExportMgr._create_user_key',
                           return_value='thekeyforclientabc'):

            rados.open_ioctx.return_value.__enter__.return_value = self.io_mock
            rados.open_ioctx.return_value.__exit__ = mock.Mock(
                return_value=None)

            describe_service.return_value = OrchResult(orch_nfs_services)

            self._reset_temp_store()

            yield