示例#1
0
 def test_apply_iscsi_save(self, cephadm_module):
     with self._with_host(cephadm_module, 'test'):
         ps = PlacementSpec(hosts=['test'], count=1)
         spec = IscsiServiceSpec('name', pool='pool', placement=ps)
         c = cephadm_module.apply_iscsi(spec)
         assert wait(cephadm_module, c) == 'Scheduled iscsi update...'
         assert [
             d.spec for d in wait(cephadm_module,
                                  cephadm_module.describe_service())
         ] == [spec]
示例#2
0
 def test_driveselection_to_ceph_volume(self, cephadm_module, devices,
                                        preview, exp_command):
     with self._with_host(cephadm_module, 'test'):
         dg = DriveGroupSpec(placement=PlacementSpec(host_pattern='test'),
                             data_devices=DeviceSelection(paths=devices))
         ds = DriveSelection(dg,
                             Devices([Device(path) for path in devices]))
         preview = preview
         out = cephadm_module.driveselection_to_ceph_volume(dg, ds, preview)
         assert out in exp_command
示例#3
0
 def test_nfs(self, cephadm_module):
     with self._with_host(cephadm_module, 'test'):
         ps = PlacementSpec(hosts=['test'], count=1)
         spec = NFSServiceSpec('name',
                               pool='pool',
                               namespace='namespace',
                               placement=ps)
         c = cephadm_module.add_nfs(spec)
         [out] = wait(cephadm_module, c)
         match_glob(out, "Deployed nfs.name.* on host 'test'")
示例#4
0
def test_drive_selection():
    devs = DeviceSelection(paths=['/dev/sda'])
    spec = DriveGroupSpec(PlacementSpec('node_name'),
                          service_id='foobar',
                          data_devices=devs)
    assert all([isinstance(x, Device) for x in spec.data_devices.paths])
    assert spec.data_devices.paths[0].path == '/dev/sda'

    with pytest.raises(DriveGroupValidationError, match='exclusive'):
        DeviceSelection(paths=['/dev/sda'], rotational=False)
示例#5
0
 def test_preview_drivegroups_str(self, _run_c_v_command, _ds_to_cv, _prepare_dg, _find_store, cephadm_module):
     with self._with_host(cephadm_module, 'test'):
         dg = DriveGroupSpec(placement=PlacementSpec(host_pattern='test'), data_devices=DeviceSelection(paths=['']))
         _find_store.return_value = [dg]
         _prepare_dg.return_value = [('host1', 'ds_dummy')]
         _run_c_v_command.return_value = ("{}", '', 0)
         cephadm_module.preview_drivegroups(drive_group_name='foo')
         _find_store.assert_called_once_with(service_name='foo')
         _prepare_dg.assert_called_once_with(dg)
         _run_c_v_command.assert_called_once()
示例#6
0
 def _mds_add(self, fs_name, placement=None):
     spec = ServiceSpec(
         'mds',
         fs_name,
         placement=PlacementSpec.from_string(placement),
     )
     completion = self.add_mds(spec)
     self._orchestrator_wait([completion])
     raise_if_exception(completion)
     return HandleCommandResult(stdout=completion.result_str())
示例#7
0
def test_ceph_volume_command_1():
    spec = DriveGroupSpec(placement=PlacementSpec(host_pattern='*'),
                          data_devices=DeviceSelection(rotational=True),
                          db_devices=DeviceSelection(rotational=False))
    inventory = _mk_inventory(
        _mk_device(rotational=True) * 2 + _mk_device(rotational=False) * 2)
    sel = drive_selection.DriveSelection(spec, inventory)
    cmd = translate.to_ceph_volume(spec, sel, []).run()
    assert cmd == ('lvm batch --no-auto /dev/sda /dev/sdb '
                   '--db-devices /dev/sdc /dev/sdd --yes --no-systemd')
示例#8
0
def test_ceph_volume_command_7():
    spec = DriveGroupSpec(placement=PlacementSpec(host_pattern='*'),
                          service_id='foobar',
                          data_devices=DeviceSelection(all=True),
                          osd_id_claims={'host1': ['0', '1']})
    spec.validate()
    inventory = _mk_inventory(_mk_device(rotational=True) * 2)
    sel = drive_selection.DriveSelection(spec, inventory)
    cmd = translate.to_ceph_volume(sel, ['0', '1']).run()
    assert cmd == 'lvm batch --no-auto /dev/sda /dev/sdb --osd-ids 0 1 --yes --no-systemd'
示例#9
0
文件: module.py 项目: jbwyatt4/ceph
 def _apply_rgw(self, zone_name, realm_name, placement=None):
     spec = RGWSpec(
         rgw_realm=realm_name,
         rgw_zone=zone_name,
         placement=PlacementSpec.from_string(placement),
     )
     completion = self.apply_rgw(spec)
     self._orchestrator_wait([completion])
     raise_if_exception(completion)
     return HandleCommandResult(stdout=completion.result_str())
示例#10
0
 def test_apply_rgw_save(self, cephadm_module):
     with self._with_host(cephadm_module, 'test'):
         ps = PlacementSpec(hosts=['test'], count=1)
         spec = ServiceSpec('rgw', 'r.z', placement=ps)
         c = cephadm_module.apply_rgw(spec)
         assert wait(cephadm_module, c) == 'Scheduled rgw update...'
         assert [
             d.spec for d in wait(cephadm_module,
                                  cephadm_module.describe_service())
         ] == [spec]
示例#11
0
文件: module.py 项目: jbwyatt4/ceph
    def _apply_mon(self, placement=None):
        placement = PlacementSpec.from_string(placement)
        placement.validate()

        spec = ServiceSpec('mon', placement=placement)

        completion = self.apply_mon(spec)
        self._orchestrator_wait([completion])
        raise_if_exception(completion)
        return HandleCommandResult(stdout=completion.result_str())
示例#12
0
 def test_apply_nfs_save(self, cephadm_module):
     with self._with_host(cephadm_module, 'test'):
         ps = PlacementSpec(hosts=['test'], count=1)
         spec = NFSServiceSpec('name',
                               pool='pool',
                               namespace='namespace',
                               placement=ps)
         c = cephadm_module.apply_nfs(spec)
         assert wait(cephadm_module, c) == 'Scheduled nfs update...'
         assert wait(cephadm_module, cephadm_module.list_specs()) == [spec]
示例#13
0
def test_ceph_volume_command_9():
    spec = DriveGroupSpec(placement=PlacementSpec(host_pattern='*'),
                          service_id='foobar',
                          data_devices=DeviceSelection(all=True),
                          data_allocate_fraction=0.8)
    spec.validate()
    inventory = _mk_inventory(_mk_device() * 2)
    sel = drive_selection.DriveSelection(spec, inventory)
    cmd = translate.to_ceph_volume(sel, []).run()
    assert cmd == 'lvm batch --no-auto /dev/sda /dev/sdb --data-allocate-fraction 0.8 --yes --no-systemd'
示例#14
0
class TestDriveSelection(object):

    testdata = [
        (DriveGroupSpec(placement=PlacementSpec(host_pattern='*'),
                        service_id='foobar',
                        data_devices=DeviceSelection(all=True)),
         _mk_inventory(_mk_device() * 5),
         ['/dev/sda', '/dev/sdb', '/dev/sdc', '/dev/sdd', '/dev/sde'], []),
        (DriveGroupSpec(placement=PlacementSpec(host_pattern='*'),
                        service_id='foobar',
                        data_devices=DeviceSelection(all=True, limit=3),
                        db_devices=DeviceSelection(all=True)),
         _mk_inventory(_mk_device() * 5), ['/dev/sda', '/dev/sdb', '/dev/sdc'],
         ['/dev/sdd', '/dev/sde']),
        (DriveGroupSpec(placement=PlacementSpec(host_pattern='*'),
                        service_id='foobar',
                        data_devices=DeviceSelection(rotational=True),
                        db_devices=DeviceSelection(rotational=False)),
         _mk_inventory(
             _mk_device(rotational=False) + _mk_device(rotational=True)),
         ['/dev/sdb'], ['/dev/sda']),
        (DriveGroupSpec(placement=PlacementSpec(host_pattern='*'),
                        service_id='foobar',
                        data_devices=DeviceSelection(rotational=True),
                        db_devices=DeviceSelection(rotational=False)),
         _mk_inventory(
             _mk_device(rotational=True) * 2 + _mk_device(rotational=False)),
         ['/dev/sda', '/dev/sdb'], ['/dev/sdc']),
        (DriveGroupSpec(placement=PlacementSpec(host_pattern='*'),
                        service_id='foobar',
                        data_devices=DeviceSelection(rotational=True),
                        db_devices=DeviceSelection(rotational=False)),
         _mk_inventory(_mk_device(rotational=True) * 2),
         ['/dev/sda', '/dev/sdb'], []),
    ]

    @pytest.mark.parametrize("spec,inventory,expected_data,expected_db",
                             testdata)
    def test_disk_selection(self, spec, inventory, expected_data, expected_db):
        sel = drive_selection.DriveSelection(spec, inventory)
        assert [d.path for d in sel.data_devices()] == expected_data
        assert [d.path for d in sel.db_devices()] == expected_db
示例#15
0
文件: module.py 项目: zlsyl/ceph
 def _apply_nfs(self, svc_id, pool, namespace=None, placement=None, unmanaged=False):
     spec = NFSServiceSpec(
         svc_id,
         pool=pool,
         namespace=namespace,
         placement=PlacementSpec.from_string(placement),
         unmanaged=unmanaged,
     )
     completion = self.apply_nfs(spec)
     self._orchestrator_wait([completion])
     return HandleCommandResult(stdout=completion.result_str())
示例#16
0
 def _apply_mds(self, fs_name, placement=None, unmanaged=False):
     placement = PlacementSpec.from_string(placement)
     placement.validate()
     spec = ServiceSpec('mds',
                        fs_name,
                        placement=placement,
                        unmanaged=unmanaged)
     completion = self.apply_mds(spec)
     self._orchestrator_wait([completion])
     raise_if_exception(completion)
     return HandleCommandResult(stdout=completion.result_str())
示例#17
0
def test_ceph_volume_command_5():
    spec = DriveGroupSpec(placement=PlacementSpec(host_pattern='*'),
                          service_id='foobar',
                          data_devices=DeviceSelection(rotational=True),
                          objectstore='filestore')
    with pytest.raises(DriveGroupValidationError):
        spec.validate()
    inventory = _mk_inventory(_mk_device(rotational=True) * 2)
    sel = drive_selection.DriveSelection(spec, inventory)
    cmd = translate.to_ceph_volume(sel, []).run()
    assert cmd == 'lvm batch --no-auto /dev/sda /dev/sdb --filestore --yes --no-systemd'
示例#18
0
def test_ceph_volume_command_0():
    spec = DriveGroupSpec(placement=PlacementSpec(host_pattern='*'),
                          service_id='foobar',
                          data_devices=DeviceSelection(all=True))
    spec.validate()
    inventory = _mk_inventory(_mk_device() * 2)
    sel = drive_selection.DriveSelection(spec, inventory)
    cmds = translate.to_ceph_volume(sel, []).run()
    assert all(
        cmd == 'lvm batch --no-auto /dev/sda /dev/sdb --yes --no-systemd'
        for cmd in cmds), f'Expected {cmd} in {cmds}'
示例#19
0
 def _call_orch_apply_nfs(
     self,
     cluster_id: str,
     placement: Optional[str],
     virtual_ip: Optional[str] = None,
     port: Optional[int] = None,
 ) -> None:
     if not port:
         port = 2049  # default nfs port
     if virtual_ip:
         # nfs + ingress
         # run NFS on non-standard port
         spec = NFSServiceSpec(
             service_type='nfs',
             service_id=cluster_id,
             placement=PlacementSpec.from_string(placement),
             # use non-default port so we don't conflict with ingress
             port=10000 + port)  # semi-arbitrary, fix me someday
         completion = self.mgr.apply_nfs(spec)
         orchestrator.raise_if_exception(completion)
         ispec = IngressSpec(
             service_type='ingress',
             service_id='nfs.' + cluster_id,
             backend_service='nfs.' + cluster_id,
             frontend_port=port,
             monitor_port=7000 + port,  # semi-arbitrary, fix me someday
             virtual_ip=virtual_ip)
         completion = self.mgr.apply_ingress(ispec)
         orchestrator.raise_if_exception(completion)
     else:
         # standalone nfs
         spec = NFSServiceSpec(
             service_type='nfs',
             service_id=cluster_id,
             placement=PlacementSpec.from_string(placement),
             port=port)
         completion = self.mgr.apply_nfs(spec)
         orchestrator.raise_if_exception(completion)
     log.debug(
         "Successfully deployed nfs daemons with cluster id %s and placement %s",
         cluster_id, placement)
示例#20
0
 def _nfs_add(self, svc_arg, pool, namespace=None, placement=None):
     spec = NFSServiceSpec(
         svc_arg,
         pool=pool,
         namespace=namespace,
         placement=PlacementSpec.from_string(placement),
     )
     spec.validate_add()
     completion = self.add_nfs(spec)
     self._orchestrator_wait([completion])
     raise_if_exception(completion)
     return HandleCommandResult(stdout=completion.result_str())
示例#21
0
    def test_apply_save(self, spec: ServiceSpec, meth, cephadm_module):
        with self._with_host(cephadm_module, 'test'):
            spec.placement = PlacementSpec(hosts=['test'], count=1)
            c = meth(cephadm_module, spec)
            assert wait(cephadm_module,
                        c) == f'Scheduled {spec.service_name()} update...'
            assert [
                d.spec for d in wait(cephadm_module,
                                     cephadm_module.describe_service())
            ] == [spec]

            assert_rm_service(cephadm_module, spec.service_name())
示例#22
0
def mk_spec_and_host(spec_section, hosts, explicit_key, explicit, count):

    if spec_section == 'hosts':
        mk_spec = lambda: ServiceSpec('mon',
                                      placement=PlacementSpec(
                                          hosts=explicit,
                                          count=count,
                                      ))
        mk_hosts = lambda _: hosts
    elif spec_section == 'label':
        mk_spec = lambda: ServiceSpec('mon',
                                      placement=PlacementSpec(
                                          label='mylabel',
                                          count=count,
                                      ))
        mk_hosts = lambda l: [e for e in explicit
                              if e in hosts] if l == 'mylabel' else hosts
    elif spec_section == 'host_pattern':
        pattern = {
            'e': 'notfound',
            '1': '1',
            '12': '[1-2]',
            '123': '*',
        }[explicit_key]
        mk_spec = lambda: ServiceSpec('mon',
                                      placement=PlacementSpec(
                                          host_pattern=pattern,
                                          count=count,
                                      ))
        mk_hosts = lambda _: hosts
    else:
        assert False

    def _get_hosts_wrapper(label=None, as_hostspec=False):
        hosts = mk_hosts(label)
        if as_hostspec:
            return list(map(HostSpec, hosts))
        return hosts

    return mk_spec, _get_hosts_wrapper
示例#23
0
def test_service_ls(cephadm_module: CephadmOrchestrator):
    with with_host(cephadm_module, 'host1'):
        with with_host(cephadm_module, 'host2'):

            # emulate the old scheduler:
            c = cephadm_module.apply_rgw(
                ServiceSpec('rgw', 'r.z', placement=PlacementSpec(host_pattern='*', count=2))
            )
            assert wait(cephadm_module, c) == 'Scheduled rgw.r.z update...'

            cephadm_module._apply_all_services()
            out = {o.hostname for o in wait(cephadm_module, cephadm_module.list_daemons())}
            assert out == {'host1', 'host2'}

            c = cephadm_module.apply_rgw(
                ServiceSpec('rgw', 'r.z', placement=PlacementSpec(host_pattern='host1', count=2))
            )
            assert wait(cephadm_module, c) == 'Scheduled rgw.r.z update...'

            cephadm_module.migration_current = 0
            cephadm_module.migration.migrate()

            # assert we need all daemons.
            assert cephadm_module.migration_current == 0

            # Sorry, for this hack, but I need to make sure, Migration thinks,
            # we have updated all daemons already.
            cephadm_module.cache.last_daemon_update['host1'] = datetime.now()
            cephadm_module.cache.last_daemon_update['host2'] = datetime.now()

            cephadm_module.migration.migrate()
            assert cephadm_module.migration_current == 1

            out = [o.spec.placement for o in wait(cephadm_module, cephadm_module.describe_service())]
            assert out == [PlacementSpec(count=2, hosts=[HostPlacementSpec(hostname='host1', network='', name=''), HostPlacementSpec(hostname='host2', network='', name='')])]



#            assert_rm_service(cephadm_module, 'rgw.r.z')
#            assert_rm_daemon(cephadm_module, 'mds.name', 'test')
示例#24
0
def test_ceph_volume_command_6():
    spec = DriveGroupSpec(placement=PlacementSpec(host_pattern='*'),
                          data_devices=DeviceSelection(rotational=False),
                          journal_devices=DeviceSelection(rotational=True),
                          journal_size='500M',
                          objectstore='filestore')
    inventory = _mk_inventory(
        _mk_device(rotational=True) * 2 + _mk_device(rotational=False) * 2)
    sel = drive_selection.DriveSelection(spec, inventory)
    cmd = translate.to_ceph_volume(spec, sel, []).run()
    assert cmd == ('lvm batch --no-auto /dev/sdc /dev/sdd '
                   '--journal-size 500M --journal-devices /dev/sda /dev/sdb '
                   '--filestore --yes --no-systemd')
示例#25
0
def mk_spec_and_host(spec_section, hosts, explicit_key, explicit, count):

    if spec_section == 'hosts':
        mk_spec = lambda: ServiceSpec(
            'mgr',
            placement=PlacementSpec(  # noqa: E731
                hosts=explicit,
                count=count,
            ))
    elif spec_section == 'label':
        mk_spec = lambda: ServiceSpec(
            'mgr',
            placement=PlacementSpec(  # noqa: E731
                label='mylabel',
                count=count,
            ))
    elif spec_section == 'host_pattern':
        pattern = {
            'e': 'notfound',
            '1': '1',
            '12': '[1-2]',
            '123': '*',
        }[explicit_key]
        mk_spec = lambda: ServiceSpec(
            'mgr',
            placement=PlacementSpec(  # noqa: E731
                host_pattern=pattern,
                count=count,
            ))
    else:
        assert False

    hosts = [
        HostSpec(h, labels=['mylabel']) if h in explicit else HostSpec(h)
        for h in hosts
    ]

    return mk_spec, hosts
示例#26
0
    def test_iscsi(self, cephadm_module):
        with self._with_host(cephadm_module, 'test'):
            ps = PlacementSpec(hosts=['test'], count=1)
            spec = IscsiServiceSpec('name', pool='pool', placement=ps)
            c = cephadm_module.add_iscsi(spec)
            [out] = wait(cephadm_module, c)
            match_glob(out, "Deployed iscsi.name.* on host 'test'")

            assert_rm_daemon(cephadm_module, 'iscsi.name.test', 'test')

            # Hack. We never created the service, but we now need to remove it.
            # this is in contrast to the other services, which don't create this service
            # automatically.
            assert_rm_service(cephadm_module, 'iscsi.name')
示例#27
0
 def test_daemon_add_fail(self, _run_cephadm, cephadm_module):
     _run_cephadm.return_value = '{}', '', 0
     with with_host(cephadm_module, 'test'):
         spec = ServiceSpec(service_type='mgr',
                            placement=PlacementSpec(
                                hosts=[HostPlacementSpec('test', '', 'x')],
                                count=1))
         _run_cephadm.side_effect = OrchestratorError('fail')
         with pytest.raises(OrchestratorError):
             wait(cephadm_module, cephadm_module.add_mgr(spec))
         cephadm_module.assert_issued_mon_command({
             'prefix': 'auth rm',
             'entity': 'mgr.x',
         })
示例#28
0
    def _from_json_impl(cls, json_drive_group):
        # type: (dict) -> DriveGroupSpec
        """
        Initialize 'Drive group' structure

        :param json_drive_group: A valid json string with a Drive Group
               specification
        """
        args: Dict[str, Any] = {}
        # legacy json (pre Octopus)
        if 'host_pattern' in json_drive_group and 'placement' not in json_drive_group:
            json_drive_group['placement'] = {
                'host_pattern': json_drive_group['host_pattern']
            }
            del json_drive_group['host_pattern']

        try:
            args['placement'] = PlacementSpec.from_json(
                json_drive_group.pop('placement'))
        except KeyError:
            args['placement'] = PlacementSpec()

        args['service_type'] = json_drive_group.pop('service_type', 'osd')

        # service_id was not required in early octopus.
        args['service_id'] = json_drive_group.pop('service_id', '')

        # spec: was not mandatory in octopus
        if 'spec' in json_drive_group:
            args.update(
                cls._drive_group_spec_from_json(json_drive_group.pop('spec')))
        else:
            args.update(cls._drive_group_spec_from_json(json_drive_group))

        args['unmanaged'] = json_drive_group.pop('unmanaged', False)

        return cls(**args)
示例#29
0
    def _daemon_add_misc(self,
                         daemon_type: Optional[str] = None,
                         placement: Optional[str] = None,
                         inbuf: Optional[str] = None) -> HandleCommandResult:
        usage = f"""Usage:
    ceph orch daemon add -i <json_file>
    ceph orch daemon add {daemon_type or '<daemon_type>'} <placement>"""
        if inbuf:
            if daemon_type or placement:
                raise OrchestratorValidationError(usage)
            spec = ServiceSpec.from_json(yaml.safe_load(inbuf))
        else:
            spec = PlacementSpec.from_string(placement)
            assert daemon_type
            spec = ServiceSpec(daemon_type, placement=spec)

        daemon_type = spec.service_type

        if daemon_type == 'mon':
            completion = self.add_mon(spec)
        elif daemon_type == 'mgr':
            completion = self.add_mgr(spec)
        elif daemon_type == 'rbd-mirror':
            completion = self.add_rbd_mirror(spec)
        elif daemon_type == 'crash':
            completion = self.add_crash(spec)
        elif daemon_type == 'alertmanager':
            completion = self.add_alertmanager(spec)
        elif daemon_type == 'grafana':
            completion = self.add_grafana(spec)
        elif daemon_type == 'node-exporter':
            completion = self.add_node_exporter(spec)
        elif daemon_type == 'prometheus':
            completion = self.add_prometheus(spec)
        elif daemon_type == 'mds':
            completion = self.add_mds(spec)
        elif daemon_type == 'rgw':
            completion = self.add_rgw(spec)
        elif daemon_type == 'nfs':
            completion = self.add_nfs(spec)
        elif daemon_type == 'iscsi':
            completion = self.add_iscsi(spec)
        else:
            raise OrchestratorValidationError(
                f'unknown daemon type `{daemon_type}`')

        self._orchestrator_wait([completion])
        raise_if_exception(completion)
        return HandleCommandResult(stdout=completion.result_str())
示例#30
0
def test_migrate_service_id_mon_two(cephadm_module: CephadmOrchestrator):
    with with_host(cephadm_module, 'host1'):
        cephadm_module.set_store(
            SPEC_STORE_PREFIX + 'mon',
            json.dumps(
                {
                    'spec': {
                        'service_type': 'mon',
                        'placement': {
                            'count': 5,
                        }
                    },
                    'created': datetime_to_str(datetime_now()),
                },
                sort_keys=True),
        )
        cephadm_module.set_store(
            SPEC_STORE_PREFIX + 'mon.wrong',
            json.dumps(
                {
                    'spec': {
                        'service_type': 'mon',
                        'service_id': 'wrong',
                        'placement': {
                            'hosts': ['host1']
                        }
                    },
                    'created': datetime_to_str(datetime_now()),
                },
                sort_keys=True),
        )

        cephadm_module.spec_store.load()

        assert len(cephadm_module.spec_store.all_specs) == 2
        assert cephadm_module.spec_store.all_specs['mon.wrong'].service_name(
        ) == 'mon'
        assert cephadm_module.spec_store.all_specs['mon'].service_name(
        ) == 'mon'

        cephadm_module.migration_current = 1
        cephadm_module.migration.migrate()
        assert cephadm_module.migration_current == 2

        assert len(cephadm_module.spec_store.all_specs) == 1
        assert cephadm_module.spec_store.all_specs['mon'] == ServiceSpec(
            service_type='mon',
            unmanaged=True,
            placement=PlacementSpec(count=5))