예제 #1
0
    def config(self, spec: RGWSpec) -> None:  # type: ignore
        assert self.TYPE == spec.service_type

        # set rgw_realm and rgw_zone, if present
        if spec.rgw_realm:
            ret, out, err = self.mgr.check_mon_command({
                'prefix': 'config set',
                'who': f"{utils.name_to_config_section('rgw')}.{spec.service_id}",
                'name': 'rgw_realm',
                'value': spec.rgw_realm,
            })
        if spec.rgw_zone:
            ret, out, err = self.mgr.check_mon_command({
                'prefix': 'config set',
                'who': f"{utils.name_to_config_section('rgw')}.{spec.service_id}",
                'name': 'rgw_zone',
                'value': spec.rgw_zone,
            })

        if spec.rgw_frontend_ssl_certificate:
            if isinstance(spec.rgw_frontend_ssl_certificate, list):
                cert_data = '\n'.join(spec.rgw_frontend_ssl_certificate)
            elif isinstance(spec.rgw_frontend_ssl_certificate, str):
                cert_data = spec.rgw_frontend_ssl_certificate
            else:
                raise OrchestratorError(
                    'Invalid rgw_frontend_ssl_certificate: %s'
                    % spec.rgw_frontend_ssl_certificate)
            ret, out, err = self.mgr.check_mon_command({
                'prefix': 'config-key set',
                'key': f'rgw/cert/{spec.service_name()}',
                'val': cert_data,
            })

        # TODO: fail, if we don't have a spec
        logger.info('Saving service %s spec with placement %s' % (
            spec.service_name(), spec.placement.pretty_str()))
        self.mgr.spec_store.save(spec)
        self.mgr.trigger_connect_dashboard_rgw()
예제 #2
0
    def test_daemon_action(self, cephadm_module: CephadmOrchestrator):
        cephadm_module.service_cache_timeout = 10
        with with_host(cephadm_module, 'test'):
            with with_daemon(cephadm_module, RGWSpec(service_id='myrgw.foobar'), CephadmOrchestrator.add_rgw, 'test') as daemon_id:

                c = cephadm_module.daemon_action('redeploy', 'rgw.' + daemon_id)
                assert wait(cephadm_module,
                            c) == f"Scheduled to redeploy rgw.{daemon_id} on host 'test'"

                for what in ('start', 'stop', 'restart'):
                    c = cephadm_module.daemon_action(what, 'rgw.' + daemon_id)
                    assert wait(cephadm_module,
                                c) == F"Scheduled to {what} rgw.{daemon_id} on host 'test'"

                # Make sure, _check_daemons does a redeploy due to monmap change:
                cephadm_module._store['_ceph_get/mon_map'] = {
                    'modified': datetime_to_str(datetime_now()),
                    'fsid': 'foobar',
                }
                cephadm_module.notify('mon_map', None)

                CephadmServe(cephadm_module)._check_daemons()
예제 #3
0
]""")
)
def test_dd_octopus(dd_json):
    # https://tracker.ceph.com/issues/44934
    # Those are real user data from early octopus.
    # Please do not modify those JSON values.
    assert dd_json == DaemonDescription.from_json(dd_json).to_json()


@pytest.mark.parametrize("spec,dd,valid",
[
    # https://tracker.ceph.com/issues/44934
    (
        RGWSpec(
            rgw_realm="default-rgw-realm",
            rgw_zone="eu-central-1",
            subcluster='1',
        ),
        DaemonDescription(
            daemon_type='rgw',
            daemon_id="default-rgw-realm.eu-central-1.1.ceph-001.ytywjo",
            hostname="ceph-001",
        ),
        True
    ),
    (
        # no subcluster
        RGWSpec(
            rgw_realm="default-rgw-realm",
            rgw_zone="eu-central-1",
        ),
예제 #4
0
         True),
        (ServiceSpec(service_type='mon'),
         ServiceSpec(service_type='mon', service_id='foo'), True),
        # Add service_type='mgr'
        (ServiceSpec(service_type='osd'), ServiceSpec(
            service_type='osd', ), True),
        (ServiceSpec(service_type='osd'), DriveGroupSpec(), True),
        (ServiceSpec(service_type='osd'),
         ServiceSpec(
             service_type='osd',
             service_id='foo',
         ), False),
        (ServiceSpec(
            service_type='rgw',
            service_id='foo',
        ), RGWSpec(service_id='foo'), True),
    ])
def test_spec_hash_eq(spec1: ServiceSpec, spec2: ServiceSpec, eq: bool):

    assert (spec1 == spec2) is eq


@pytest.mark.parametrize("s_type,s_id,s_name", [
    ('mgr', 's_id', 'mgr'),
    ('mon', 's_id', 'mon'),
    ('mds', 's_id', 'mds.s_id'),
    ('rgw', 's_id', 'rgw.s_id'),
    ('nfs', 's_id', 'nfs.s_id'),
    ('iscsi', 's_id', 'iscsi.s_id'),
    ('osd', 's_id', 'osd.s_id'),
])
예제 #5
0
    def config(self, spec: RGWSpec, rgw_id: str) -> None:  # type: ignore
        assert self.TYPE == spec.service_type

        # create realm, zonegroup, and zone if needed
        self.create_realm_zonegroup_zone(spec, rgw_id)

        # ensure rgw_realm and rgw_zone is set for these daemons
        ret, out, err = self.mgr.check_mon_command({
            'prefix': 'config set',
            'who': f"{utils.name_to_config_section('rgw')}.{spec.service_id}",
            'name': 'rgw_zone',
            'value': spec.rgw_zone,
        })
        ret, out, err = self.mgr.check_mon_command({
            'prefix': 'config set',
            'who': f"{utils.name_to_config_section('rgw')}.{spec.rgw_realm}",
            'name': 'rgw_realm',
            'value': spec.rgw_realm,
        })
        ret, out, err = self.mgr.check_mon_command({
            'prefix':
            'config set',
            'who':
            f"{utils.name_to_config_section('rgw')}.{spec.service_id}",
            'name':
            'rgw_frontends',
            'value':
            spec.rgw_frontends_config_value(),
        })

        if spec.rgw_frontend_ssl_certificate:
            if isinstance(spec.rgw_frontend_ssl_certificate, list):
                cert_data = '\n'.join(spec.rgw_frontend_ssl_certificate)
            elif isinstance(spec.rgw_frontend_ssl_certificate, str):
                cert_data = spec.rgw_frontend_ssl_certificate
            else:
                raise OrchestratorError(
                    'Invalid rgw_frontend_ssl_certificate: %s' %
                    spec.rgw_frontend_ssl_certificate)
            ret, out, err = self.mgr.check_mon_command({
                'prefix': 'config-key set',
                'key': f'rgw/cert/{spec.rgw_realm}/{spec.rgw_zone}.crt',
                'val': cert_data,
            })

        if spec.rgw_frontend_ssl_key:
            if isinstance(spec.rgw_frontend_ssl_key, list):
                key_data = '\n'.join(spec.rgw_frontend_ssl_key)
            elif isinstance(spec.rgw_frontend_ssl_certificate, str):
                key_data = spec.rgw_frontend_ssl_key
            else:
                raise OrchestratorError('Invalid rgw_frontend_ssl_key: %s' %
                                        spec.rgw_frontend_ssl_key)
            ret, out, err = self.mgr.check_mon_command({
                'prefix': 'config-key set',
                'key': f'rgw/cert/{spec.rgw_realm}/{spec.rgw_zone}.key',
                'val': key_data,
            })

        # TODO: fail, if we don't have a spec
        logger.info('Saving service %s spec with placement %s' %
                    (spec.service_name(), spec.placement.pretty_str()))
        self.mgr.spec_store.save(spec)
예제 #6
0
class TestCephadm(object):
    def test_get_unique_name(self, cephadm_module):
        # type: (CephadmOrchestrator) -> None
        existing = [DaemonDescription(daemon_type='mon', daemon_id='a')]
        new_mon = cephadm_module.get_unique_name('mon', 'myhost', existing)
        match_glob(new_mon, 'myhost')
        new_mgr = cephadm_module.get_unique_name('mgr', 'myhost', existing)
        match_glob(new_mgr, 'myhost.*')

    @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm",
                _run_cephadm('[]'))
    def test_host(self, cephadm_module):
        assert wait(cephadm_module, cephadm_module.get_hosts()) == []
        with with_host(cephadm_module, 'test'):
            assert wait(cephadm_module, cephadm_module.get_hosts()) == [
                HostSpec('test', 'test')
            ]

            # Be careful with backward compatibility when changing things here:
            assert json.loads(cephadm_module.get_store('inventory')) == \
                {"test": {"hostname": "test", "addr": "test", "labels": [], "status": ""}}

            with with_host(cephadm_module, 'second'):
                assert wait(cephadm_module, cephadm_module.get_hosts()) == [
                    HostSpec('test', 'test'),
                    HostSpec('second', 'second')
                ]

            assert wait(cephadm_module, cephadm_module.get_hosts()) == [
                HostSpec('test', 'test')
            ]
        assert wait(cephadm_module, cephadm_module.get_hosts()) == []

    @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm",
                _run_cephadm('[]'))
    @mock.patch(
        "cephadm.services.cephadmservice.RgwService.create_realm_zonegroup_zone",
        lambda _, __, ___: None)
    def test_service_ls(self, cephadm_module):
        with with_host(cephadm_module, 'test'):
            c = cephadm_module.list_daemons(refresh=True)
            assert wait(cephadm_module, c) == []

            with with_daemon(cephadm_module, ServiceSpec('mds', 'name'),
                             CephadmOrchestrator.add_mds, 'test'):

                c = cephadm_module.list_daemons()

                def remove_id_events(dd):
                    out = dd.to_json()
                    del out['daemon_id']
                    del out['events']
                    return out

                assert [
                    remove_id_events(dd) for dd in wait(cephadm_module, c)
                ] == [{
                    'daemon_type': 'mds',
                    'hostname': 'test',
                    'status': 1,
                    'status_desc': 'starting',
                    'is_active': False
                }]

                with with_service(cephadm_module, ServiceSpec('rgw', 'r.z'),
                                  CephadmOrchestrator.apply_rgw, 'test'):

                    c = cephadm_module.describe_service()
                    out = [dict(o.to_json()) for o in wait(cephadm_module, c)]
                    expected = [{
                        'placement': {
                            'hosts': ['test']
                        },
                        'service_id': 'name',
                        'service_name': 'mds.name',
                        'service_type': 'mds',
                        'status': {
                            'running': 1,
                            'size': 0
                        },
                        'unmanaged': True
                    }, {
                        'placement': {
                            'count': 1,
                            'hosts': ["test"]
                        },
                        'spec': {
                            'rgw_realm': 'r',
                            'rgw_zone': 'z',
                        },
                        'service_id': 'r.z',
                        'service_name': 'rgw.r.z',
                        'service_type': 'rgw',
                        'status': {
                            'created': mock.ANY,
                            'running': 1,
                            'size': 1
                        },
                    }]
                    for o in out:
                        if 'events' in o:
                            del o[
                                'events']  # delete it, as it contains a timestamp
                    assert out == expected

    @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm",
                _run_cephadm('[]'))
    def test_device_ls(self, cephadm_module):
        with with_host(cephadm_module, 'test'):
            c = cephadm_module.get_inventory()
            assert wait(cephadm_module, c) == [InventoryHost('test')]

    @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm",
                _run_cephadm(
                    json.dumps([
                        dict(
                            name='rgw.myrgw.foobar',
                            style='cephadm',
                            fsid='fsid',
                            container_id='container_id',
                            version='version',
                            state='running',
                        )
                    ])))
    def test_list_daemons(self, cephadm_module: CephadmOrchestrator):
        cephadm_module.service_cache_timeout = 10
        with with_host(cephadm_module, 'test'):
            CephadmServe(cephadm_module)._refresh_host_daemons('test')
            c = cephadm_module.list_daemons()
            assert wait(cephadm_module, c)[0].name() == 'rgw.myrgw.foobar'

    @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm",
                _run_cephadm('[]'))
    @mock.patch(
        "cephadm.services.cephadmservice.RgwService.create_realm_zonegroup_zone",
        lambda _, __, ___: None)
    def test_daemon_action(self, cephadm_module: CephadmOrchestrator):
        cephadm_module.service_cache_timeout = 10
        with with_host(cephadm_module, 'test'):
            with with_daemon(cephadm_module,
                             RGWSpec(service_id='myrgw.foobar'),
                             CephadmOrchestrator.add_rgw, 'test') as daemon_id:

                c = cephadm_module.daemon_action('redeploy',
                                                 'rgw.' + daemon_id)
                assert wait(
                    cephadm_module, c
                ) == f"Scheduled to redeploy rgw.{daemon_id} on host 'test'"

                for what in ('start', 'stop', 'restart'):
                    c = cephadm_module.daemon_action(what, 'rgw.' + daemon_id)
                    assert wait(
                        cephadm_module, c
                    ) == F"Scheduled to {what} rgw.{daemon_id} on host 'test'"

                # Make sure, _check_daemons does a redeploy due to monmap change:
                cephadm_module._store['_ceph_get/mon_map'] = {
                    'modified':
                    datetime.datetime.utcnow().strftime(CEPH_DATEFMT),
                    'fsid': 'foobar',
                }
                cephadm_module.notify('mon_map', None)

                CephadmServe(cephadm_module)._check_daemons()

    @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm",
                _run_cephadm('[]'))
    @mock.patch(
        "cephadm.services.cephadmservice.RgwService.create_realm_zonegroup_zone",
        lambda _, __, ___: None)
    def test_daemon_action_fail(self, cephadm_module: CephadmOrchestrator):
        cephadm_module.service_cache_timeout = 10
        with with_host(cephadm_module, 'test'):
            with with_daemon(cephadm_module,
                             RGWSpec(service_id='myrgw.foobar'),
                             CephadmOrchestrator.add_rgw, 'test') as daemon_id:
                with mock.patch('ceph_module.BaseMgrModule._ceph_send_command'
                                ) as _ceph_send_command:

                    _ceph_send_command.side_effect = Exception("myerror")

                    # Make sure, _check_daemons does a redeploy due to monmap change:
                    cephadm_module.mock_store_set(
                        '_ceph_get', 'mon_map', {
                            'modified':
                            datetime.datetime.utcnow().strftime(CEPH_DATEFMT),
                            'fsid':
                            'foobar',
                        })
                    cephadm_module.notify('mon_map', None)

                    CephadmServe(cephadm_module)._check_daemons()

                    evs = [
                        e.message
                        for e in cephadm_module.events.get_for_daemon(
                            f'rgw.{daemon_id}')
                    ]

                    assert 'myerror' in ''.join(evs)

    @pytest.mark.parametrize(
        "action", ['start', 'stop', 'restart', 'reconfig', 'redeploy'])
    @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm",
                _run_cephadm('{}'))
    def test_daemon_check(self, cephadm_module: CephadmOrchestrator, action):
        with with_host(cephadm_module, 'test'):
            with with_service(cephadm_module,
                              ServiceSpec(service_type='grafana'),
                              CephadmOrchestrator.apply_grafana,
                              'test') as d_names:
                [daemon_name] = d_names

                cephadm_module._schedule_daemon_action(daemon_name, action)

                assert cephadm_module.cache.get_scheduled_daemon_action(
                    'test', daemon_name) == action

                CephadmServe(cephadm_module)._check_daemons()

                assert cephadm_module.cache.get_scheduled_daemon_action(
                    'test', daemon_name) is None

    @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm")
    def test_daemon_check_extra_config(self, _run_cephadm,
                                       cephadm_module: CephadmOrchestrator):
        _run_cephadm.return_value = ('{}', '', 0)

        with with_host(cephadm_module, 'test'):

            # Also testing deploying mons without explicit network placement
            cephadm_module.check_mon_command({
                'prefix': 'config set',
                'who': 'mon',
                'name': 'public_network',
                'value': '127.0.0.0/8'
            })

            cephadm_module.cache.update_host_devices_networks(
                'test', [], {
                    "127.0.0.0/8": ["127.0.0.1"],
                })

            with with_service(cephadm_module, ServiceSpec(service_type='mon'),
                              CephadmOrchestrator.apply_mon,
                              'test') as d_names:
                [daemon_name] = d_names

                cephadm_module._set_extra_ceph_conf('[mon]\nk=v')

                CephadmServe(cephadm_module)._check_daemons()

                _run_cephadm.assert_called_with(
                    'test',
                    'mon.test',
                    'deploy',
                    ['--name', 'mon.test', '--reconfig', '--config-json', '-'],
                    stdin='{"config": "\\n\\n[mon]\\nk=v\\n", "keyring": ""}',
                    image='')

    @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm",
                _run_cephadm('{}'))
    def test_daemon_check_post(self, cephadm_module: CephadmOrchestrator):
        with with_host(cephadm_module, 'test'):
            with with_service(cephadm_module,
                              ServiceSpec(service_type='grafana'),
                              CephadmOrchestrator.apply_grafana, 'test'):

                # Make sure, _check_daemons does a redeploy due to monmap change:
                cephadm_module.mock_store_set(
                    '_ceph_get', 'mon_map', {
                        'modified':
                        datetime.datetime.utcnow().strftime(CEPH_DATEFMT),
                        'fsid':
                        'foobar',
                    })
                cephadm_module.notify('mon_map', None)
                cephadm_module.mock_store_set('_ceph_get', 'mgr_map',
                                              {'modules': ['dashboard']})

                with mock.patch(
                        "cephadm.module.CephadmOrchestrator.mon_command"
                ) as _mon_cmd:
                    CephadmServe(cephadm_module)._check_daemons()
                    _mon_cmd.assert_any_call({
                        'prefix': 'dashboard set-grafana-api-url',
                        'value': 'https://*****:*****@mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm",
                _run_cephadm('[]'))
    def test_mon_add(self, cephadm_module):
        with with_host(cephadm_module, 'test'):
            ps = PlacementSpec(hosts=['test:0.0.0.0=a'], count=1)
            c = cephadm_module.add_mon(ServiceSpec('mon', placement=ps))
            assert wait(cephadm_module, c) == ["Deployed mon.a on host 'test'"]

            with pytest.raises(
                    OrchestratorError,
                    match=
                    "Must set public_network config option or specify a CIDR network,"
            ):
                ps = PlacementSpec(hosts=['test'], count=1)
                c = cephadm_module.add_mon(ServiceSpec('mon', placement=ps))
                wait(cephadm_module, c)

    @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm",
                _run_cephadm('[]'))
    def test_mgr_update(self, cephadm_module):
        with with_host(cephadm_module, 'test'):
            ps = PlacementSpec(hosts=['test:0.0.0.0=a'], count=1)
            r = CephadmServe(cephadm_module)._apply_service(
                ServiceSpec('mgr', placement=ps))
            assert r

            assert_rm_daemon(cephadm_module, 'mgr.a', 'test')

    @mock.patch("cephadm.module.CephadmOrchestrator.mon_command")
    def test_find_destroyed_osds(self, _mon_cmd, cephadm_module):
        dict_out = {
            "nodes": [{
                "id": -1,
                "name": "default",
                "type": "root",
                "type_id": 11,
                "children": [-3]
            }, {
                "id": -3,
                "name": "host1",
                "type": "host",
                "type_id": 1,
                "pool_weights": {},
                "children": [0]
            }, {
                "id": 0,
                "device_class": "hdd",
                "name": "osd.0",
                "type": "osd",
                "type_id": 0,
                "crush_weight": 0.0243988037109375,
                "depth": 2,
                "pool_weights": {},
                "exists": 1,
                "status": "destroyed",
                "reweight": 1,
                "primary_affinity": 1
            }],
            "stray": []
        }
        json_out = json.dumps(dict_out)
        _mon_cmd.return_value = (0, json_out, '')
        out = cephadm_module.osd_service.find_destroyed_osds()
        assert out == {'host1': ['0']}

    @mock.patch("cephadm.module.CephadmOrchestrator.mon_command")
    def test_find_destroyed_osds_cmd_failure(self, _mon_cmd, cephadm_module):
        _mon_cmd.return_value = (1, "", "fail_msg")
        with pytest.raises(OrchestratorError):
            out = cephadm_module.osd_service.find_destroyed_osds()

    @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm")
    def test_apply_osd_save(self, _run_cephadm,
                            cephadm_module: CephadmOrchestrator):
        _run_cephadm.return_value = ('{}', '', 0)
        with with_host(cephadm_module, 'test'):

            spec = DriveGroupSpec(service_id='foo',
                                  placement=PlacementSpec(host_pattern='*', ),
                                  data_devices=DeviceSelection(all=True))

            c = cephadm_module.apply([spec])
            assert wait(cephadm_module, c) == ['Scheduled osd.foo update...']

            inventory = Devices([
                Device('/dev/sdb', available=True),
            ])

            cephadm_module.cache.update_host_devices_networks(
                'test', inventory.devices, {})

            _run_cephadm.return_value = (['{}'], '', 0)

            assert CephadmServe(cephadm_module)._apply_all_services() == False

            _run_cephadm.assert_any_call(
                'test',
                'osd',
                'ceph-volume', [
                    '--config-json', '-', '--', 'lvm', 'batch', '--no-auto',
                    '/dev/sdb', '--yes', '--no-systemd'
                ],
                env_vars=['CEPH_VOLUME_OSDSPEC_AFFINITY=foo'],
                error_ok=True,
                stdin='{"config": "", "keyring": ""}')
            _run_cephadm.assert_called_with(
                'test', 'osd', 'ceph-volume',
                ['--', 'lvm', 'list', '--format', 'json'])

    @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm",
                _run_cephadm('{}'))
    @mock.patch("cephadm.module.SpecStore.save")
    def test_apply_osd_save_placement(self, _save_spec, cephadm_module):
        with with_host(cephadm_module, 'test'):
            json_spec = {
                'service_type': 'osd',
                'placement': {
                    'host_pattern': 'test'
                },
                'service_id': 'foo',
                'data_devices': {
                    'all': True
                }
            }
            spec = ServiceSpec.from_json(json_spec)
            assert isinstance(spec, DriveGroupSpec)
            c = cephadm_module.apply([spec])
            assert wait(cephadm_module, c) == ['Scheduled osd.foo update...']
            _save_spec.assert_called_with(spec)

    @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm",
                _run_cephadm('{}'))
    def test_create_osds(self, cephadm_module):
        with with_host(cephadm_module, 'test'):
            dg = DriveGroupSpec(placement=PlacementSpec(host_pattern='test'),
                                data_devices=DeviceSelection(paths=['']))
            c = cephadm_module.create_osds(dg)
            out = wait(cephadm_module, c)
            assert out == "Created no osd(s) on host test; already created?"

    @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm",
                _run_cephadm('{}'))
    def test_prepare_drivegroup(self, cephadm_module):
        with with_host(cephadm_module, 'test'):
            dg = DriveGroupSpec(placement=PlacementSpec(host_pattern='test'),
                                data_devices=DeviceSelection(paths=['']))
            out = cephadm_module.osd_service.prepare_drivegroup(dg)
            assert len(out) == 1
            f1 = out[0]
            assert f1[0] == 'test'
            assert isinstance(f1[1], DriveSelection)

    @pytest.mark.parametrize(
        "devices, preview, exp_command",
        [
            # no preview and only one disk, prepare is used due the hack that is in place.
            (['/dev/sda'
              ], False, "lvm batch --no-auto /dev/sda --yes --no-systemd"),
            # no preview and multiple disks, uses batch
            (['/dev/sda', '/dev/sdb'], False,
             "CEPH_VOLUME_OSDSPEC_AFFINITY=test.spec lvm batch --no-auto /dev/sda /dev/sdb --yes --no-systemd"
             ),
            # preview and only one disk needs to use batch again to generate the preview
            (['/dev/sda'], True,
             "lvm batch --no-auto /dev/sda --yes --no-systemd --report --format json"
             ),
            # preview and multiple disks work the same
            (['/dev/sda', '/dev/sdb'], True,
             "CEPH_VOLUME_OSDSPEC_AFFINITY=test.spec lvm batch --no-auto /dev/sda /dev/sdb --yes --no-systemd --report --format json"
             ),
        ])
    @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm",
                _run_cephadm('{}'))
    def test_driveselection_to_ceph_volume(self, cephadm_module, devices,
                                           preview, exp_command):
        with with_host(cephadm_module, 'test'):
            dg = DriveGroupSpec(service_id='test.spec',
                                placement=PlacementSpec(host_pattern='test'),
                                data_devices=DeviceSelection(paths=devices))
            ds = DriveSelection(dg,
                                Devices([Device(path) for path in devices]))
            preview = preview
            out = cephadm_module.osd_service.driveselection_to_ceph_volume(
                ds, [], preview)
            assert out in exp_command

    @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm",
                _run_cephadm(
                    json.dumps([
                        dict(
                            name='osd.0',
                            style='cephadm',
                            fsid='fsid',
                            container_id='container_id',
                            version='version',
                            state='running',
                        )
                    ])))
    @mock.patch("cephadm.services.osd.OSD.exists", True)
    @mock.patch("cephadm.services.osd.RemoveUtil.get_pg_count",
                lambda _, __: 0)
    def test_remove_osds(self, cephadm_module):
        with with_host(cephadm_module, 'test'):
            CephadmServe(cephadm_module)._refresh_host_daemons('test')
            c = cephadm_module.list_daemons()
            wait(cephadm_module, c)

            c = cephadm_module.remove_daemons(['osd.0'])
            out = wait(cephadm_module, c)
            assert out == ["Removed osd.0 from host 'test'"]

            cephadm_module.to_remove_osds.enqueue(
                OSD(osd_id=0,
                    replace=False,
                    force=False,
                    hostname='test',
                    fullname='osd.0',
                    process_started_at=datetime.datetime.utcnow(),
                    remove_util=cephadm_module.rm_util))
            cephadm_module.rm_util.process_removal_queue()
            assert cephadm_module.to_remove_osds == OSDQueue()

            c = cephadm_module.remove_osds_status()
            out = wait(cephadm_module, c)
            assert out == []

    @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm",
                _run_cephadm('{}'))
    @mock.patch(
        "cephadm.services.cephadmservice.RgwService.create_realm_zonegroup_zone",
        lambda _, __, ___: None)
    def test_rgw_update(self, cephadm_module):
        with with_host(cephadm_module, 'host1'):
            with with_host(cephadm_module, 'host2'):
                ps = PlacementSpec(hosts=['host1'], count=1)
                c = cephadm_module.add_rgw(
                    RGWSpec(rgw_realm='realm', rgw_zone='zone1', placement=ps))
                [out] = wait(cephadm_module, c)
                match_glob(out,
                           "Deployed rgw.realm.zone1.host1.* on host 'host1'")

                ps = PlacementSpec(hosts=['host1', 'host2'], count=2)
                r = CephadmServe(cephadm_module)._apply_service(
                    RGWSpec(rgw_realm='realm', rgw_zone='zone1', placement=ps))
                assert r

                assert_rm_daemon(cephadm_module, 'rgw.realm.zone1', 'host1')
                assert_rm_daemon(cephadm_module, 'rgw.realm.zone1', 'host2')

    @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm",
                _run_cephadm(
                    json.dumps([
                        dict(
                            name='rgw.myrgw.myhost.myid',
                            style='cephadm',
                            fsid='fsid',
                            container_id='container_id',
                            version='version',
                            state='running',
                        )
                    ])))
    def test_remove_daemon(self, cephadm_module):
        with with_host(cephadm_module, 'test'):
            CephadmServe(cephadm_module)._refresh_host_daemons('test')
            c = cephadm_module.list_daemons()
            wait(cephadm_module, c)
            c = cephadm_module.remove_daemons(['rgw.myrgw.myhost.myid'])
            out = wait(cephadm_module, c)
            assert out == ["Removed rgw.myrgw.myhost.myid from host 'test'"]

    @pytest.mark.parametrize("spec, meth", [
        (ServiceSpec('crash'), CephadmOrchestrator.add_crash),
        (ServiceSpec('prometheus'), CephadmOrchestrator.add_prometheus),
        (ServiceSpec('grafana'), CephadmOrchestrator.add_grafana),
        (ServiceSpec('node-exporter'), CephadmOrchestrator.add_node_exporter),
        (ServiceSpec('alertmanager'), CephadmOrchestrator.add_alertmanager),
        (ServiceSpec('rbd-mirror'), CephadmOrchestrator.add_rbd_mirror),
        (ServiceSpec('mds', service_id='fsname'), CephadmOrchestrator.add_mds),
        (RGWSpec(rgw_realm='realm',
                 rgw_zone='zone'), CephadmOrchestrator.add_rgw),
    ])
    @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm",
                _run_cephadm('{}'))
    @mock.patch(
        "cephadm.services.cephadmservice.RgwService.create_realm_zonegroup_zone",
        lambda _, __, ___: None)
    def test_daemon_add(self, spec: ServiceSpec, meth, cephadm_module):
        with with_host(cephadm_module, 'test'):
            with with_daemon(cephadm_module, spec, meth, 'test'):
                pass

    @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm",
                _run_cephadm('{}'))
    @mock.patch("cephadm.module.CephadmOrchestrator.rados", mock.MagicMock())
    def test_nfs(self, cephadm_module):
        with with_host(cephadm_module, 'test'):
            ps = PlacementSpec(hosts=['test'], count=1)
            spec = NFSServiceSpec(service_id='name',
                                  pool='pool',
                                  namespace='namespace',
                                  placement=ps)
            c = cephadm_module.add_nfs(spec)
            [out] = wait(cephadm_module, c)
            match_glob(out, "Deployed nfs.name.* on host 'test'")

            assert_rm_daemon(cephadm_module, 'nfs.name.test', 'test')

            # Hack. We never created the service, but we now need to remove it.
            # this is in contrast to the other services, which don't create this service
            # automatically.
            assert_rm_service(cephadm_module, 'nfs.name')

    @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm",
                _run_cephadm('{}'))
    @mock.patch("cephadm.module.CephadmOrchestrator.rados", mock.MagicMock())
    def test_iscsi(self, cephadm_module):
        with with_host(cephadm_module, 'test'):
            ps = PlacementSpec(hosts=['test'], count=1)
            spec = IscsiServiceSpec(service_id='name',
                                    pool='pool',
                                    api_user='******',
                                    api_password='******',
                                    placement=ps)
            c = cephadm_module.add_iscsi(spec)
            [out] = wait(cephadm_module, c)
            match_glob(out, "Deployed iscsi.name.* on host 'test'")

            assert_rm_daemon(cephadm_module, 'iscsi.name.test', 'test')

            # Hack. We never created the service, but we now need to remove it.
            # this is in contrast to the other services, which don't create this service
            # automatically.
            assert_rm_service(cephadm_module, 'iscsi.name')

    @pytest.mark.parametrize("on_bool", [True, False])
    @pytest.mark.parametrize("fault_ident", ['fault', 'ident'])
    @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm")
    def test_blink_device_light(self, _run_cephadm, on_bool, fault_ident,
                                cephadm_module):
        _run_cephadm.return_value = '{}', '', 0
        with with_host(cephadm_module, 'test'):
            c = cephadm_module.blink_device_light(fault_ident, on_bool,
                                                  [('test', '', 'dev')])
            on_off = 'on' if on_bool else 'off'
            assert wait(cephadm_module,
                        c) == [f'Set {fault_ident} light for test: {on_off}']
            _run_cephadm.assert_called_with(
                'test',
                'osd',
                'shell', [
                    '--', 'lsmcli', f'local-disk-{fault_ident}-led-{on_off}',
                    '--path', 'dev'
                ],
                error_ok=True)

    @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm")
    def test_blink_device_light_custom(self, _run_cephadm, cephadm_module):
        _run_cephadm.return_value = '{}', '', 0
        with with_host(cephadm_module, 'test'):
            cephadm_module.set_store('blink_device_light_cmd', 'echo hello')
            c = cephadm_module.blink_device_light('ident', True,
                                                  [('test', '', '/dev/sda')])
            assert wait(cephadm_module, c) == ['Set ident light for test: on']
            _run_cephadm.assert_called_with('test',
                                            'osd',
                                            'shell', ['--', 'echo', 'hello'],
                                            error_ok=True)

    @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm")
    def test_blink_device_light_custom_per_host(self, _run_cephadm,
                                                cephadm_module):
        _run_cephadm.return_value = '{}', '', 0
        with with_host(cephadm_module, 'mgr0'):
            cephadm_module.set_store(
                'mgr0/blink_device_light_cmd',
                'xyz --foo --{{ ident_fault }}={{\'on\' if on else \'off\'}} \'{{ path or dev }}\''
            )
            c = cephadm_module.blink_device_light(
                'fault', True,
                [('mgr0', 'SanDisk_X400_M.2_2280_512GB_162924424784', '')])
            assert wait(cephadm_module, c) == [
                'Set fault light for mgr0:SanDisk_X400_M.2_2280_512GB_162924424784 on'
            ]
            _run_cephadm.assert_called_with(
                'mgr0',
                'osd',
                'shell', [
                    '--', 'xyz', '--foo', '--fault=on',
                    'SanDisk_X400_M.2_2280_512GB_162924424784'
                ],
                error_ok=True)

    @pytest.mark.parametrize("spec, meth", [
        (ServiceSpec('mgr'), CephadmOrchestrator.apply_mgr),
        (ServiceSpec('crash'), CephadmOrchestrator.apply_crash),
        (ServiceSpec('prometheus'), CephadmOrchestrator.apply_prometheus),
        (ServiceSpec('grafana'), CephadmOrchestrator.apply_grafana),
        (ServiceSpec('node-exporter'),
         CephadmOrchestrator.apply_node_exporter),
        (ServiceSpec('alertmanager'), CephadmOrchestrator.apply_alertmanager),
        (ServiceSpec('rbd-mirror'), CephadmOrchestrator.apply_rbd_mirror),
        (ServiceSpec('mds',
                     service_id='fsname'), CephadmOrchestrator.apply_mds),
        (ServiceSpec(
            'mds',
            service_id='fsname',
            placement=PlacementSpec(hosts=[
                HostPlacementSpec(hostname='test', name='fsname', network='')
            ])), CephadmOrchestrator.apply_mds),
        (RGWSpec(rgw_realm='realm',
                 rgw_zone='zone'), CephadmOrchestrator.apply_rgw),
        (RGWSpec(rgw_realm='realm',
                 rgw_zone='zone',
                 placement=PlacementSpec(hosts=[
                     HostPlacementSpec(
                         hostname='test', name='realm.zone.a', network='')
                 ])), CephadmOrchestrator.apply_rgw),
        (NFSServiceSpec(service_id='name', pool='pool',
                        namespace='namespace'), CephadmOrchestrator.apply_nfs),
        (IscsiServiceSpec(
            service_id='name',
            pool='pool',
            api_user='******',
            api_password='******'), CephadmOrchestrator.apply_iscsi),
        (CustomContainerSpec(service_id='hello-world',
                             image='docker.io/library/hello-world:latest',
                             uid=65534,
                             gid=65534,
                             dirs=['foo/bar'],
                             files={'foo/bar/xyz.conf': 'aaa\nbbb'},
                             bind_mounts=[[
                                 'type=bind', 'source=lib/modules',
                                 'destination=/lib/modules', 'ro=true'
                             ]],
                             volume_mounts={'foo/bar': '/foo/bar:Z'},
                             args=['--no-healthcheck'],
                             envs=['SECRET=password'],
                             ports=[8080, 8443
                                    ]), CephadmOrchestrator.apply_container),
    ])
    @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm",
                _run_cephadm('{}'))
    @mock.patch(
        "cephadm.services.cephadmservice.RgwService.create_realm_zonegroup_zone",
        lambda _, __, ___: None)
    def test_apply_save(self, spec: ServiceSpec, meth,
                        cephadm_module: CephadmOrchestrator):
        with with_host(cephadm_module, 'test'):
            with with_service(cephadm_module, spec, meth, 'test'):
                pass

    @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm",
                _run_cephadm('{}'))
    @mock.patch("cephadm.services.cephadmservice.CephadmService.ok_to_stop")
    def test_daemon_ok_to_stop(self, ok_to_stop,
                               cephadm_module: CephadmOrchestrator):
        spec = ServiceSpec('mds',
                           service_id='fsname',
                           placement=PlacementSpec(hosts=['host1', 'host2']))
        with with_host(cephadm_module,
                       'host1'), with_host(cephadm_module, 'host2'):
            c = cephadm_module.apply_mds(spec)
            out = wait(cephadm_module, c)
            match_glob(out, "Scheduled mds.fsname update...")
            CephadmServe(cephadm_module)._apply_all_services()

            [daemon] = cephadm_module.cache.daemons['host1'].keys()

            spec.placement.set_hosts(['host2'])

            ok_to_stop.side_effect = False

            c = cephadm_module.apply_mds(spec)
            out = wait(cephadm_module, c)
            match_glob(out, "Scheduled mds.fsname update...")
            CephadmServe(cephadm_module)._apply_all_services()

            ok_to_stop.assert_called_with([daemon[4:]])

            assert_rm_daemon(cephadm_module, spec.service_name(),
                             'host1')  # verifies ok-to-stop
            assert_rm_daemon(cephadm_module, spec.service_name(), 'host2')

    @mock.patch("cephadm.module.CephadmOrchestrator._get_connection")
    @mock.patch("remoto.process.check")
    def test_offline(self, _check, _get_connection, cephadm_module):
        _check.return_value = '{}', '', 0
        _get_connection.return_value = mock.Mock(), mock.Mock()
        with with_host(cephadm_module, 'test'):
            _get_connection.side_effect = HostNotFound
            code, out, err = cephadm_module.check_host('test')
            assert out == ''
            assert "Host 'test' not found" in err

            out = wait(cephadm_module, cephadm_module.get_hosts())[0].to_json()
            assert out == HostSpec('test', 'test', status='Offline').to_json()

            _get_connection.side_effect = None
            assert CephadmServe(cephadm_module)._check_host('test') is None
            out = wait(cephadm_module, cephadm_module.get_hosts())[0].to_json()
            assert out == HostSpec('test', 'test').to_json()

    def test_stale_connections(self, cephadm_module):
        class Connection(object):
            """
            A mocked connection class that only allows the use of the connection
            once. If you attempt to use it again via a _check, it'll explode (go
            boom!).

            The old code triggers the boom. The new code checks the has_connection
            and will recreate the connection.
            """
            fuse = False

            @staticmethod
            def has_connection():
                return False

            def import_module(self, *args, **kargs):
                return mock.Mock()

            @staticmethod
            def exit():
                pass

        def _check(conn, *args, **kargs):
            if conn.fuse:
                raise Exception("boom: connection is dead")
            else:
                conn.fuse = True
            return '{}', None, 0

        with mock.patch("remoto.Connection",
                        side_effect=[Connection(),
                                     Connection(),
                                     Connection()]):
            with mock.patch("remoto.process.check", _check):
                with with_host(cephadm_module, 'test', refresh_hosts=False):
                    code, out, err = cephadm_module.check_host('test')
                    # First should succeed.
                    assert err is None

                    # On second it should attempt to reuse the connection, where the
                    # connection is "down" so will recreate the connection. The old
                    # code will blow up here triggering the BOOM!
                    code, out, err = cephadm_module.check_host('test')
                    assert err is None

    @mock.patch("cephadm.module.CephadmOrchestrator._get_connection")
    @mock.patch("remoto.process.check")
    def test_etc_ceph(self, _check, _get_connection, cephadm_module):
        _get_connection.return_value = mock.Mock(), mock.Mock()
        _check.return_value = '{}', '', 0

        assert cephadm_module.manage_etc_ceph_ceph_conf is False

        with with_host(cephadm_module, 'test'):
            assert not cephadm_module.cache.host_needs_new_etc_ceph_ceph_conf(
                'test')

        with with_host(cephadm_module, 'test'):
            cephadm_module.set_module_option('manage_etc_ceph_ceph_conf', True)
            cephadm_module.config_notify()
            assert cephadm_module.manage_etc_ceph_ceph_conf == True

            CephadmServe(cephadm_module)._refresh_hosts_and_daemons()
            _check.assert_called_with(ANY, ['dd', 'of=/etc/ceph/ceph.conf'],
                                      stdin=b'')

            assert not cephadm_module.cache.host_needs_new_etc_ceph_ceph_conf(
                'test')

            # set extra config and expect that we deploy another ceph.conf
            cephadm_module._set_extra_ceph_conf('[mon]\nk=v')
            CephadmServe(cephadm_module)._refresh_hosts_and_daemons()
            _check.assert_called_with(ANY, ['dd', 'of=/etc/ceph/ceph.conf'],
                                      stdin=b'\n\n[mon]\nk=v\n')

            # reload
            cephadm_module.cache.last_etc_ceph_ceph_conf = {}
            cephadm_module.cache.load()

            assert not cephadm_module.cache.host_needs_new_etc_ceph_ceph_conf(
                'test')

            # Make sure, _check_daemons does a redeploy due to monmap change:
            cephadm_module.mock_store_set(
                '_ceph_get', 'mon_map', {
                    'modified':
                    datetime.datetime.utcnow().strftime(CEPH_DATEFMT),
                    'fsid': 'foobar',
                })
            cephadm_module.notify('mon_map', mock.MagicMock())
            assert cephadm_module.cache.host_needs_new_etc_ceph_ceph_conf(
                'test')
            cephadm_module.cache.last_etc_ceph_ceph_conf = {}
            cephadm_module.cache.load()
            assert cephadm_module.cache.host_needs_new_etc_ceph_ceph_conf(
                'test')

    def test_etc_ceph_init(self):
        with with_cephadm_module({'manage_etc_ceph_ceph_conf': True}) as m:
            assert m.manage_etc_ceph_ceph_conf is True

    @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm")
    def test_registry_login(self, _run_cephadm,
                            cephadm_module: CephadmOrchestrator):
        def check_registry_credentials(url, username, password):
            assert cephadm_module.get_module_option('registry_url') == url
            assert cephadm_module.get_module_option(
                'registry_username') == username
            assert cephadm_module.get_module_option(
                'registry_password') == password

        _run_cephadm.return_value = '{}', '', 0
        with with_host(cephadm_module, 'test'):
            # test successful login with valid args
            code, out, err = cephadm_module.registry_login(
                'test-url', 'test-user', 'test-password')
            assert out == 'registry login scheduled'
            assert err == ''
            check_registry_credentials('test-url', 'test-user',
                                       'test-password')

            # test bad login attempt with invalid args
            code, out, err = cephadm_module.registry_login('bad-args')
            assert err == (
                "Invalid arguments. Please provide arguments <url> <username> <password> "
                "or -i <login credentials json file>")
            check_registry_credentials('test-url', 'test-user',
                                       'test-password')

            # test bad login using invalid json file
            code, out, err = cephadm_module.registry_login(
                None, None, None, '{"bad-json": "bad-json"}')
            assert err == (
                "json provided for custom registry login did not include all necessary fields. "
                "Please setup json file as\n"
                "{\n"
                " \"url\": \"REGISTRY_URL\",\n"
                " \"username\": \"REGISTRY_USERNAME\",\n"
                " \"password\": \"REGISTRY_PASSWORD\"\n"
                "}\n")
            check_registry_credentials('test-url', 'test-user',
                                       'test-password')

            # test  good login using valid json file
            good_json = ("{\"url\": \"" + "json-url" + "\", \"username\": \"" +
                         "json-user" + "\", "
                         " \"password\": \"" + "json-pass" + "\"}")
            code, out, err = cephadm_module.registry_login(
                None, None, None, good_json)
            assert out == 'registry login scheduled'
            assert err == ''
            check_registry_credentials('json-url', 'json-user', 'json-pass')

            # test bad login where args are valid but login command fails
            _run_cephadm.return_value = '{}', 'error', 1
            code, out, err = cephadm_module.registry_login(
                'fail-url', 'fail-user', 'fail-password')
            assert err == 'Host test failed to login to fail-url as fail-user with given password'
            check_registry_credentials('json-url', 'json-user', 'json-pass')

    @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm",
                _run_cephadm(
                    json.dumps({
                        'image_id': 'image_id',
                        'repo_digest': 'image@repo_digest',
                    })))
    @pytest.mark.parametrize("use_repo_digest", [False, True])
    def test_upgrade_run(self, use_repo_digest,
                         cephadm_module: CephadmOrchestrator):
        with with_host(cephadm_module, 'test', refresh_hosts=False):
            cephadm_module.set_container_image('global', 'image')
            if use_repo_digest:
                cephadm_module.use_repo_digest = True

                CephadmServe(cephadm_module).convert_tags_to_repo_digest()

            _, image, _ = cephadm_module.check_mon_command({
                'prefix':
                'config get',
                'who':
                'global',
                'key':
                'container_image',
            })
            if use_repo_digest:
                assert image == 'image@repo_digest'
            else:
                assert image == 'image'
예제 #7
0
class TestCephadm(object):
    def test_get_unique_name(self, cephadm_module):
        # type: (CephadmOrchestrator) -> None
        existing = [DaemonDescription(daemon_type='mon', daemon_id='a')]
        new_mon = cephadm_module.get_unique_name('mon', 'myhost', existing)
        match_glob(new_mon, 'myhost')
        new_mgr = cephadm_module.get_unique_name('mgr', 'myhost', existing)
        match_glob(new_mgr, 'myhost.*')

    @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm",
                _run_cephadm('[]'))
    def test_host(self, cephadm_module):
        assert wait(cephadm_module, cephadm_module.get_hosts()) == []
        with with_host(cephadm_module, 'test'):
            assert wait(cephadm_module, cephadm_module.get_hosts()) == [
                HostSpec('test', 'test')
            ]

            # Be careful with backward compatibility when changing things here:
            assert json.loads(cephadm_module._store['inventory']) == \
                   {"test": {"hostname": "test", "addr": "test", "labels": [], "status": ""}}

            with with_host(cephadm_module, 'second'):
                assert wait(cephadm_module, cephadm_module.get_hosts()) == [
                    HostSpec('test', 'test'),
                    HostSpec('second', 'second')
                ]

            assert wait(cephadm_module, cephadm_module.get_hosts()) == [
                HostSpec('test', 'test')
            ]
        assert wait(cephadm_module, cephadm_module.get_hosts()) == []

    @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm",
                _run_cephadm('[]'))
    def test_service_ls(self, cephadm_module):
        with with_host(cephadm_module, 'test'):
            c = cephadm_module.list_daemons(refresh=True)
            assert wait(cephadm_module, c) == []

            ps = PlacementSpec(hosts=['test'], count=1)
            c = cephadm_module.add_mds(ServiceSpec('mds', 'name',
                                                   placement=ps))
            [out] = wait(cephadm_module, c)
            match_glob(out, "Deployed mds.name.* on host 'test'")

            c = cephadm_module.list_daemons()

            def remove_id(dd):
                out = dd.to_json()
                del out['daemon_id']
                return out

            assert [remove_id(dd) for dd in wait(cephadm_module, c)] == [{
                'daemon_type':
                'mds',
                'hostname':
                'test',
                'status':
                1,
                'status_desc':
                'starting'
            }]

            ps = PlacementSpec(hosts=['test'], count=1)
            spec = ServiceSpec('rgw', 'r.z', placement=ps)
            c = cephadm_module.apply_rgw(spec)
            assert wait(cephadm_module, c) == 'Scheduled rgw.r.z update...'

            c = cephadm_module.describe_service()
            out = [dict(o.to_json()) for o in wait(cephadm_module, c)]
            expected = [{
                'placement': {
                    'hosts': [{
                        'hostname': 'test',
                        'name': '',
                        'network': ''
                    }]
                },
                'service_id': 'name',
                'service_name': 'mds.name',
                'service_type': 'mds',
                'status': {
                    'running': 1,
                    'size': 0
                },
                'unmanaged': True
            }, {
                'placement': {
                    'count': 1,
                    'hosts': [{
                        'hostname': 'test',
                        'name': '',
                        'network': ''
                    }]
                },
                'spec': {
                    'rgw_realm': 'r',
                    'rgw_zone': 'z',
                },
                'service_id': 'r.z',
                'service_name': 'rgw.r.z',
                'service_type': 'rgw',
                'status': {
                    'running': 0,
                    'size': 1
                }
            }]
            assert out == expected
            assert [
                ServiceDescription.from_json(o).to_json() for o in expected
            ] == expected

            assert_rm_service(cephadm_module, 'rgw.r.z')
            assert_rm_daemon(cephadm_module, 'mds.name', 'test')

    @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm",
                _run_cephadm('[]'))
    def test_device_ls(self, cephadm_module):
        with with_host(cephadm_module, 'test'):
            c = cephadm_module.get_inventory()
            assert wait(cephadm_module, c) == [InventoryHost('test')]

    @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm",
                _run_cephadm(
                    json.dumps([
                        dict(
                            name='rgw.myrgw.foobar',
                            style='cephadm',
                            fsid='fsid',
                            container_id='container_id',
                            version='version',
                            state='running',
                        )
                    ])))
    def test_daemon_action(self, cephadm_module):
        cephadm_module.service_cache_timeout = 10
        with with_host(cephadm_module, 'test'):
            c = cephadm_module.list_daemons(refresh=True)
            wait(cephadm_module, c)
            c = cephadm_module.daemon_action('redeploy', 'rgw', 'myrgw.foobar')
            assert wait(cephadm_module,
                        c) == ["Deployed rgw.myrgw.foobar on host 'test'"]

            for what in ('start', 'stop', 'restart'):
                c = cephadm_module.daemon_action(what, 'rgw', 'myrgw.foobar')
                assert wait(cephadm_module, c) == [
                    what + " rgw.myrgw.foobar from host 'test'"
                ]

            assert_rm_daemon(cephadm_module, 'rgw.myrgw.foobar', 'test')

    @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm",
                _run_cephadm('[]'))
    def test_mon_add(self, cephadm_module):
        with with_host(cephadm_module, 'test'):
            ps = PlacementSpec(hosts=['test:0.0.0.0=a'], count=1)
            c = cephadm_module.add_mon(ServiceSpec('mon', placement=ps))
            assert wait(cephadm_module, c) == ["Deployed mon.a on host 'test'"]

            with pytest.raises(
                    OrchestratorError,
                    match=
                    "Must set public_network config option or specify a CIDR network,"
            ):
                ps = PlacementSpec(hosts=['test'], count=1)
                c = cephadm_module.add_mon(ServiceSpec('mon', placement=ps))
                wait(cephadm_module, c)

    @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm",
                _run_cephadm('[]'))
    def test_mgr_update(self, cephadm_module):
        with with_host(cephadm_module, 'test'):
            ps = PlacementSpec(hosts=['test:0.0.0.0=a'], count=1)
            r = cephadm_module._apply_service(ServiceSpec('mgr', placement=ps))
            assert r

            assert_rm_daemon(cephadm_module, 'mgr.a', 'test')

    @mock.patch("cephadm.module.CephadmOrchestrator.mon_command")
    def test_find_destroyed_osds(self, _mon_cmd, cephadm_module):
        dict_out = {
            "nodes": [{
                "id": -1,
                "name": "default",
                "type": "root",
                "type_id": 11,
                "children": [-3]
            }, {
                "id": -3,
                "name": "host1",
                "type": "host",
                "type_id": 1,
                "pool_weights": {},
                "children": [0]
            }, {
                "id": 0,
                "device_class": "hdd",
                "name": "osd.0",
                "type": "osd",
                "type_id": 0,
                "crush_weight": 0.0243988037109375,
                "depth": 2,
                "pool_weights": {},
                "exists": 1,
                "status": "destroyed",
                "reweight": 1,
                "primary_affinity": 1
            }],
            "stray": []
        }
        json_out = json.dumps(dict_out)
        _mon_cmd.return_value = (0, json_out, '')
        out = cephadm_module.osd_service.find_destroyed_osds()
        assert out == {'host1': ['0']}

    @mock.patch("cephadm.module.CephadmOrchestrator.mon_command")
    def test_find_destroyed_osds_cmd_failure(self, _mon_cmd, cephadm_module):
        _mon_cmd.return_value = (1, "", "fail_msg")
        with pytest.raises(OrchestratorError):
            out = cephadm_module.osd_service.find_destroyed_osds()

    @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm")
    def test_apply_osd_save(self, _run_cephadm,
                            cephadm_module: CephadmOrchestrator):
        _run_cephadm.return_value = ('{}', '', 0)
        with with_host(cephadm_module, 'test'):

            spec = DriveGroupSpec(service_id='foo',
                                  placement=PlacementSpec(host_pattern='*', ),
                                  data_devices=DeviceSelection(all=True))

            c = cephadm_module.apply_drivegroups([spec])
            assert wait(cephadm_module, c) == ['Scheduled osd.foo update...']

            inventory = Devices([
                Device('/dev/sdb', available=True),
            ])

            cephadm_module.cache.update_host_devices_networks(
                'test', inventory.devices, {})

            _run_cephadm.return_value = (['{}'], '', 0)

            assert cephadm_module._apply_all_services() == False

            _run_cephadm.assert_any_call(
                'test',
                'osd',
                'ceph-volume', [
                    '--config-json', '-', '--', 'lvm', 'prepare',
                    '--bluestore', '--data', '/dev/sdb', '--no-systemd'
                ],
                env_vars=[],
                error_ok=True,
                stdin='{"config": "", "keyring": ""}')
            _run_cephadm.assert_called_with(
                'test', 'osd', 'ceph-volume',
                ['--', 'lvm', 'list', '--format', 'json'])

    @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm",
                _run_cephadm('{}'))
    @mock.patch("cephadm.module.SpecStore.save")
    def test_apply_osd_save_placement(self, _save_spec, cephadm_module):
        with with_host(cephadm_module, 'test'):
            json_spec = {
                'service_type': 'osd',
                'placement': {
                    'host_pattern': 'test'
                },
                'service_id': 'foo',
                'data_devices': {
                    'all': True
                }
            }
            spec = ServiceSpec.from_json(json_spec)
            assert isinstance(spec, DriveGroupSpec)
            c = cephadm_module.apply_drivegroups([spec])
            assert wait(cephadm_module, c) == ['Scheduled osd.foo update...']
            _save_spec.assert_called_with(spec)

    @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm",
                _run_cephadm('{}'))
    def test_create_osds(self, cephadm_module):
        with with_host(cephadm_module, 'test'):
            dg = DriveGroupSpec(placement=PlacementSpec(host_pattern='test'),
                                data_devices=DeviceSelection(paths=['']))
            c = cephadm_module.create_osds(dg)
            out = wait(cephadm_module, c)
            assert out == "Created no osd(s) on host test; already created?"

    @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm",
                _run_cephadm('{}'))
    def test_prepare_drivegroup(self, cephadm_module):
        with with_host(cephadm_module, 'test'):
            dg = DriveGroupSpec(placement=PlacementSpec(host_pattern='test'),
                                data_devices=DeviceSelection(paths=['']))
            out = cephadm_module.osd_service.prepare_drivegroup(dg)
            assert len(out) == 1
            f1 = out[0]
            assert f1[0] == 'test'
            assert isinstance(f1[1], DriveSelection)

    @pytest.mark.parametrize(
        "devices, preview, exp_command",
        [
            # no preview and only one disk, prepare is used due the hack that is in place.
            (['/dev/sda'], False,
             "lvm prepare --bluestore --data /dev/sda --no-systemd"),
            # no preview and multiple disks, uses batch
            (['/dev/sda', '/dev/sdb'], False,
             "CEPH_VOLUME_OSDSPEC_AFFINITY=test.spec lvm batch --no-auto /dev/sda /dev/sdb --yes --no-systemd"
             ),
            # preview and only one disk needs to use batch again to generate the preview
            (['/dev/sda'
              ], True, "lvm batch --no-auto /dev/sda --report --format json"),
            # preview and multiple disks work the same
            (['/dev/sda', '/dev/sdb'], True,
             "CEPH_VOLUME_OSDSPEC_AFFINITY=test.spec lvm batch --no-auto /dev/sda /dev/sdb --yes --no-systemd --report --format json"
             ),
        ])
    @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm",
                _run_cephadm('{}'))
    def test_driveselection_to_ceph_volume(self, cephadm_module, devices,
                                           preview, exp_command):
        with with_host(cephadm_module, 'test'):
            dg = DriveGroupSpec(service_id='test.spec',
                                placement=PlacementSpec(host_pattern='test'),
                                data_devices=DeviceSelection(paths=devices))
            ds = DriveSelection(dg,
                                Devices([Device(path) for path in devices]))
            preview = preview
            out = cephadm_module.osd_service.driveselection_to_ceph_volume(
                ds, [], preview)
            assert out in exp_command

    @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm",
                _run_cephadm(
                    json.dumps([
                        dict(
                            name='osd.0',
                            style='cephadm',
                            fsid='fsid',
                            container_id='container_id',
                            version='version',
                            state='running',
                        )
                    ])))
    @mock.patch("cephadm.services.osd.RemoveUtil.get_pg_count",
                lambda _, __: 0)
    def test_remove_osds(self, cephadm_module):
        with with_host(cephadm_module, 'test'):
            c = cephadm_module.list_daemons(refresh=True)
            wait(cephadm_module, c)

            c = cephadm_module.remove_daemons(['osd.0'])
            out = wait(cephadm_module, c)
            assert out == ["Removed osd.0 from host 'test'"]

            osd_removal_op = OSDRemoval(0, False, False, 'test', 'osd.0',
                                        datetime.datetime.utcnow(), -1)
            cephadm_module.rm_util.queue_osds_for_removal({osd_removal_op})
            cephadm_module.rm_util._remove_osds_bg()
            assert cephadm_module.rm_util.to_remove_osds == set()

            c = cephadm_module.remove_osds_status()
            out = wait(cephadm_module, c)
            assert out == set()

    @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm",
                _run_cephadm('{}'))
    def test_rgw_update(self, cephadm_module):
        with with_host(cephadm_module, 'host1'):
            with with_host(cephadm_module, 'host2'):
                ps = PlacementSpec(hosts=['host1'], count=1)
                c = cephadm_module.add_rgw(
                    RGWSpec(rgw_realm='realm', rgw_zone='zone1', placement=ps))
                [out] = wait(cephadm_module, c)
                match_glob(out,
                           "Deployed rgw.realm.zone1.host1.* on host 'host1'")

                ps = PlacementSpec(hosts=['host1', 'host2'], count=2)
                r = cephadm_module._apply_service(
                    RGWSpec(rgw_realm='realm', rgw_zone='zone1', placement=ps))
                assert r

                assert_rm_daemon(cephadm_module, 'rgw.realm.zone1', 'host1')
                assert_rm_daemon(cephadm_module, 'rgw.realm.zone1', 'host2')

    @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm",
                _run_cephadm(
                    json.dumps([
                        dict(
                            name='rgw.myrgw.myhost.myid',
                            style='cephadm',
                            fsid='fsid',
                            container_id='container_id',
                            version='version',
                            state='running',
                        )
                    ])))
    def test_remove_daemon(self, cephadm_module):
        with with_host(cephadm_module, 'test'):
            c = cephadm_module.list_daemons(refresh=True)
            wait(cephadm_module, c)
            c = cephadm_module.remove_daemons(['rgw.myrgw.myhost.myid'])
            out = wait(cephadm_module, c)
            assert out == ["Removed rgw.myrgw.myhost.myid from host 'test'"]

    @pytest.mark.parametrize("spec, meth", [
        (ServiceSpec('crash'), CephadmOrchestrator.add_crash),
        (ServiceSpec('prometheus'), CephadmOrchestrator.add_prometheus),
        (ServiceSpec('grafana'), CephadmOrchestrator.add_grafana),
        (ServiceSpec('node-exporter'), CephadmOrchestrator.add_node_exporter),
        (ServiceSpec('alertmanager'), CephadmOrchestrator.add_alertmanager),
        (ServiceSpec('rbd-mirror'), CephadmOrchestrator.add_rbd_mirror),
        (ServiceSpec('mds', service_id='fsname'), CephadmOrchestrator.add_mds),
        (RGWSpec(rgw_realm='realm',
                 rgw_zone='zone'), CephadmOrchestrator.add_rgw),
    ])
    @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm",
                _run_cephadm('{}'))
    def test_daemon_add(self, spec: ServiceSpec, meth, cephadm_module):
        with with_host(cephadm_module, 'test'):
            spec.placement = PlacementSpec(hosts=['test'], count=1)

            c = meth(cephadm_module, spec)
            [out] = wait(cephadm_module, c)
            match_glob(out, f"Deployed {spec.service_name()}.* on host 'test'")

            assert_rm_daemon(cephadm_module, spec.service_name(), 'test')

    @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm",
                _run_cephadm('{}'))
    @mock.patch("cephadm.module.CephadmOrchestrator.rados", mock.MagicMock())
    def test_nfs(self, cephadm_module):
        with with_host(cephadm_module, 'test'):
            ps = PlacementSpec(hosts=['test'], count=1)
            spec = NFSServiceSpec(service_id='name',
                                  pool='pool',
                                  namespace='namespace',
                                  placement=ps)
            c = cephadm_module.add_nfs(spec)
            [out] = wait(cephadm_module, c)
            match_glob(out, "Deployed nfs.name.* on host 'test'")

            assert_rm_daemon(cephadm_module, 'nfs.name.test', 'test')

            # Hack. We never created the service, but we now need to remove it.
            # this is in contrast to the other services, which don't create this service
            # automatically.
            assert_rm_service(cephadm_module, 'nfs.name')

    @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm",
                _run_cephadm('{}'))
    @mock.patch("cephadm.module.CephadmOrchestrator.rados", mock.MagicMock())
    def test_iscsi(self, cephadm_module):
        with with_host(cephadm_module, 'test'):
            ps = PlacementSpec(hosts=['test'], count=1)
            spec = IscsiServiceSpec(service_id='name',
                                    pool='pool',
                                    api_user='******',
                                    api_password='******',
                                    placement=ps)
            c = cephadm_module.add_iscsi(spec)
            [out] = wait(cephadm_module, c)
            match_glob(out, "Deployed iscsi.name.* on host 'test'")

            assert_rm_daemon(cephadm_module, 'iscsi.name.test', 'test')

            # Hack. We never created the service, but we now need to remove it.
            # this is in contrast to the other services, which don't create this service
            # automatically.
            assert_rm_service(cephadm_module, 'iscsi.name')

    @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm",
                _run_cephadm('{}'))
    def test_blink_device_light(self, cephadm_module):
        with with_host(cephadm_module, 'test'):
            c = cephadm_module.blink_device_light('ident', True,
                                                  [('test', '', '')])
            assert wait(cephadm_module, c) == ['Set ident light for test: on']

    @pytest.mark.parametrize("spec, meth", [
        (ServiceSpec('mgr'), CephadmOrchestrator.apply_mgr),
        (ServiceSpec('crash'), CephadmOrchestrator.apply_crash),
        (ServiceSpec('prometheus'), CephadmOrchestrator.apply_prometheus),
        (ServiceSpec('grafana'), CephadmOrchestrator.apply_grafana),
        (ServiceSpec('node-exporter'),
         CephadmOrchestrator.apply_node_exporter),
        (ServiceSpec('alertmanager'), CephadmOrchestrator.apply_alertmanager),
        (ServiceSpec('rbd-mirror'), CephadmOrchestrator.apply_rbd_mirror),
        (ServiceSpec('mds',
                     service_id='fsname'), CephadmOrchestrator.apply_mds),
        (ServiceSpec(
            'mds',
            service_id='fsname',
            placement=PlacementSpec(hosts=[
                HostPlacementSpec(hostname='test', name='fsname', network='')
            ])), CephadmOrchestrator.apply_mds),
        (RGWSpec(rgw_realm='realm',
                 rgw_zone='zone'), CephadmOrchestrator.apply_rgw),
        (RGWSpec(rgw_realm='realm',
                 rgw_zone='zone',
                 placement=PlacementSpec(hosts=[
                     HostPlacementSpec(
                         hostname='test', name='realm.zone.a', network='')
                 ])), CephadmOrchestrator.apply_rgw),
        (NFSServiceSpec(service_id='name', pool='pool',
                        namespace='namespace'), CephadmOrchestrator.apply_nfs),
        (IscsiServiceSpec(
            service_id='name',
            pool='pool',
            api_user='******',
            api_password='******'), CephadmOrchestrator.apply_iscsi),
    ])
    @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm",
                _run_cephadm('{}'))
    def test_apply_save(self, spec: ServiceSpec, meth,
                        cephadm_module: CephadmOrchestrator):
        with with_host(cephadm_module, 'test'):
            if not spec.placement:
                spec.placement = PlacementSpec(hosts=['test'], count=1)
            c = meth(cephadm_module, spec)
            assert wait(cephadm_module,
                        c) == f'Scheduled {spec.service_name()} update...'
            assert [
                d.spec for d in wait(cephadm_module,
                                     cephadm_module.describe_service())
            ] == [spec]

            cephadm_module._apply_all_services()

            dds = wait(cephadm_module, cephadm_module.list_daemons())
            for dd in dds:
                assert dd.service_name() == spec.service_name()

            assert_rm_service(cephadm_module, spec.service_name())

    @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm",
                _run_cephadm('{}'))
    @mock.patch("cephadm.services.cephadmservice.CephadmService.ok_to_stop")
    def test_daemon_ok_to_stop(self, ok_to_stop,
                               cephadm_module: CephadmOrchestrator):
        spec = ServiceSpec('mds',
                           service_id='fsname',
                           placement=PlacementSpec(hosts=['host1', 'host2']))
        with with_host(cephadm_module,
                       'host1'), with_host(cephadm_module, 'host2'):
            c = cephadm_module.apply_mds(spec)
            out = wait(cephadm_module, c)
            match_glob(out, "Scheduled mds.fsname update...")
            cephadm_module._apply_all_services()

            [daemon] = cephadm_module.cache.daemons['host1'].keys()

            spec.placement.set_hosts(['host2'])

            ok_to_stop.side_effect = False

            c = cephadm_module.apply_mds(spec)
            out = wait(cephadm_module, c)
            match_glob(out, "Scheduled mds.fsname update...")
            cephadm_module._apply_all_services()

            ok_to_stop.assert_called_with([daemon[4:]])

            assert_rm_daemon(cephadm_module, spec.service_name(),
                             'host1')  # verifies ok-to-stop
            assert_rm_daemon(cephadm_module, spec.service_name(), 'host2')

    @mock.patch("cephadm.module.CephadmOrchestrator._get_connection")
    @mock.patch("remoto.process.check")
    def test_offline(self, _check, _get_connection, cephadm_module):
        _check.return_value = '{}', '', 0
        _get_connection.return_value = mock.Mock(), mock.Mock()
        with with_host(cephadm_module, 'test'):
            _get_connection.side_effect = HostNotFound
            code, out, err = cephadm_module.check_host('test')
            assert out == ''
            assert 'Failed to connect to test (test)' in err

            out = wait(cephadm_module, cephadm_module.get_hosts())[0].to_json()
            assert out == HostSpec('test', 'test', status='Offline').to_json()

            _get_connection.side_effect = None
            assert cephadm_module._check_host('test') is None
            out = wait(cephadm_module, cephadm_module.get_hosts())[0].to_json()
            assert out == HostSpec('test', 'test').to_json()

    def test_stale_connections(self, cephadm_module):
        class Connection(object):
            """
            A mocked connection class that only allows the use of the connection
            once. If you attempt to use it again via a _check, it'll explode (go
            boom!).

            The old code triggers the boom. The new code checks the has_connection
            and will recreate the connection.
            """
            fuse = False

            @staticmethod
            def has_connection():
                return False

            def import_module(self, *args, **kargs):
                return mock.Mock()

            @staticmethod
            def exit():
                pass

        def _check(conn, *args, **kargs):
            if conn.fuse:
                raise Exception("boom: connection is dead")
            else:
                conn.fuse = True
            return '{}', None, 0

        with mock.patch("remoto.Connection",
                        side_effect=[Connection(),
                                     Connection(),
                                     Connection()]):
            with mock.patch("remoto.process.check", _check):
                with with_host(cephadm_module, 'test'):
                    code, out, err = cephadm_module.check_host('test')
                    # First should succeed.
                    assert err is None

                    # On second it should attempt to reuse the connection, where the
                    # connection is "down" so will recreate the connection. The old
                    # code will blow up here triggering the BOOM!
                    code, out, err = cephadm_module.check_host('test')
                    assert err is None
예제 #8
0
                             ),
                             (
                                     ServiceSpec(
                                         service_type='osd'
                                     ),
                                     ServiceSpec(
                                         service_type='osd',
                                         service_id='foo',
                                     ),
                                     False
                             ),
                             (
                                     ServiceSpec(
                                         service_type='rgw'
                                     ),
                                     RGWSpec(),
                                     True
                             ),
                         ])
def test_spec_hash_eq(spec1: ServiceSpec,
                      spec2: ServiceSpec,
                      eq: bool):

    assert (spec1 == spec2) is eq

@pytest.mark.parametrize(
    "s_type,s_id,s_name",
    [
        ('mgr', 's_id', 'mgr'),
        ('mon', 's_id', 'mon'),
        ('mds', 's_id', 'mds.s_id'),
예제 #9
0
                             (
                                     ServiceSpec(
                                         service_type='osd'
                                     ),
                                     ServiceSpec(
                                         service_type='osd',
                                         service_id='foo',
                                     ),
                                     False
                             ),
                             (
                                     ServiceSpec(
                                         service_type='rgw',
                                         service_id='foo',
                                     ),
                                     RGWSpec(service_id='foo'),
                                     True
                             ),
                         ])
def test_spec_hash_eq(spec1: ServiceSpec,
                      spec2: ServiceSpec,
                      eq: bool):

    assert (spec1 == spec2) is eq

@pytest.mark.parametrize(
    "s_type,s_id,s_name",
    [
        ('mgr', 's_id', 'mgr'),
        ('mon', 's_id', 'mon'),
        ('mds', 's_id', 'mds.s_id'),
예제 #10
0
파일: module.py 프로젝트: varshar16/ceph
    def describe_service(
            self,
            service_type: Optional[str] = None,
            service_name: Optional[str] = None,
            refresh: bool = False) -> List[orchestrator.ServiceDescription]:
        now = datetime_now()

        # CephCluster
        cl = self.rook_cluster.rook_api_get("cephclusters/{0}".format(
            self.rook_cluster.rook_env.cluster_name))
        self.log.debug('CephCluster %s' % cl)
        image_name = cl['spec'].get('cephVersion', {}).get('image', None)
        num_nodes = len(self.rook_cluster.get_node_names())

        spec = {}
        if service_type == 'mon' or service_type is None:
            spec['mon'] = orchestrator.ServiceDescription(
                spec=ServiceSpec(
                    'mon',
                    placement=PlacementSpec(count=cl['spec'].get(
                        'mon', {}).get('count', 1), ),
                ),
                size=cl['spec'].get('mon', {}).get('count', 1),
                container_image_name=image_name,
                last_refresh=now,
            )
        if service_type == 'mgr' or service_type is None:
            spec['mgr'] = orchestrator.ServiceDescription(
                spec=ServiceSpec(
                    'mgr',
                    placement=PlacementSpec.from_string('count:1'),
                ),
                size=1,
                container_image_name=image_name,
                last_refresh=now,
            )
        if not cl['spec'].get('crashCollector', {}).get('disable', False):
            spec['crash'] = orchestrator.ServiceDescription(
                spec=ServiceSpec(
                    'crash',
                    placement=PlacementSpec.from_string('*'),
                ),
                size=num_nodes,
                container_image_name=image_name,
                last_refresh=now,
            )

        if service_type == 'mds' or service_type is None:
            # CephFilesystems
            all_fs = self.rook_cluster.rook_api_get("cephfilesystems/")
            self.log.debug('CephFilesystems %s' % all_fs)
            for fs in all_fs.get('items', []):
                svc = 'mds.' + fs['metadata']['name']
                if svc in spec:
                    continue
                # FIXME: we are conflating active (+ standby) with count
                active = fs['spec'].get('metadataServer',
                                        {}).get('activeCount', 1)
                total_mds = active
                if fs['spec'].get('metadataServer',
                                  {}).get('activeStandby', False):
                    total_mds = active * 2
                    spec[svc] = orchestrator.ServiceDescription(
                        spec=ServiceSpec(
                            service_type='mds',
                            service_id=fs['metadata']['name'],
                            placement=PlacementSpec(count=active),
                        ),
                        size=total_mds,
                        container_image_name=image_name,
                        last_refresh=now,
                    )

        if service_type == 'rgw' or service_type is None:
            # CephObjectstores
            all_zones = self.rook_cluster.rook_api_get("cephobjectstores/")
            self.log.debug('CephObjectstores %s' % all_zones)
            for zone in all_zones.get('items', []):
                rgw_realm = zone['metadata']['name']
                rgw_zone = rgw_realm
                svc = 'rgw.' + rgw_realm + '.' + rgw_zone
                if svc in spec:
                    continue
                active = zone['spec']['gateway']['instances']
                if 'securePort' in zone['spec']['gateway']:
                    ssl = True
                    port = zone['spec']['gateway']['securePort']
                else:
                    ssl = False
                    port = zone['spec']['gateway']['port'] or 80
                spec[svc] = orchestrator.ServiceDescription(
                    spec=RGWSpec(
                        service_id=rgw_realm + '.' + rgw_zone,
                        rgw_realm=rgw_realm,
                        rgw_zone=rgw_zone,
                        ssl=ssl,
                        rgw_frontend_port=port,
                        placement=PlacementSpec(count=active),
                    ),
                    size=active,
                    container_image_name=image_name,
                    last_refresh=now,
                )

        if service_type == 'nfs' or service_type is None:
            # CephNFSes
            all_nfs = self.rook_cluster.rook_api_get("cephnfses/")
            self.log.warning('CephNFS %s' % all_nfs)
            for nfs in all_nfs.get('items', []):
                nfs_name = nfs['metadata']['name']
                svc = 'nfs.' + nfs_name
                if svc in spec:
                    continue
                active = nfs['spec'].get('server', {}).get('active')
                spec[svc] = orchestrator.ServiceDescription(
                    spec=NFSServiceSpec(
                        service_id=nfs_name,
                        placement=PlacementSpec(count=active),
                    ),
                    size=active,
                    last_refresh=now,
                )

        for dd in self._list_daemons():
            if dd.service_name() not in spec:
                continue
            service = spec[dd.service_name()]
            service.running += 1
            if not service.container_image_id:
                service.container_image_id = dd.container_image_id
            if not service.container_image_name:
                service.container_image_name = dd.container_image_name
            if service.last_refresh is None or not dd.last_refresh or dd.last_refresh < service.last_refresh:
                service.last_refresh = dd.last_refresh
            if service.created is None or dd.created is None or dd.created < service.created:
                service.created = dd.created

        return [v for k, v in spec.items()]
예제 #11
0
    def test_ingress_config(self, _run_cephadm,
                            cephadm_module: CephadmOrchestrator):
        _run_cephadm.side_effect = async_side_effect(('{}', '', 0))

        with with_host(cephadm_module, 'test'):
            cephadm_module.cache.update_host_networks(
                'test', {'1.2.3.0/24': {
                    'if0': ['1.2.3.4/32']
                }})

            # the ingress backend
            s = RGWSpec(service_id="foo",
                        placement=PlacementSpec(count=1),
                        rgw_frontend_type='beast')

            ispec = IngressSpec(service_type='ingress',
                                service_id='test',
                                backend_service='rgw.foo',
                                frontend_port=8089,
                                monitor_port=8999,
                                monitor_user='******',
                                monitor_password='******',
                                keepalived_password='******',
                                virtual_interface_networks=['1.2.3.0/24'],
                                virtual_ip="1.2.3.4/32")
            with with_service(cephadm_module,
                              s) as _, with_service(cephadm_module,
                                                    ispec) as _:
                # generate the keepalived conf based on the specified spec
                keepalived_generated_conf = cephadm_module.cephadm_services[
                    'ingress'].keepalived_generate_config(
                        CephadmDaemonDeploySpec(
                            host='test',
                            daemon_id='ingress',
                            service_name=ispec.service_name()))

                keepalived_expected_conf = {
                    'files': {
                        'keepalived.conf':
                        '# This file is generated by cephadm.\n'
                        'vrrp_script check_backend {\n    '
                        'script "/usr/bin/curl http://localhost:8999/health"\n    '
                        'weight -20\n    '
                        'interval 2\n    '
                        'rise 2\n    '
                        'fall 2\n}\n\n'
                        'vrrp_instance VI_0 {\n  '
                        'state MASTER\n  '
                        'priority 100\n  '
                        'interface if0\n  '
                        'virtual_router_id 51\n  '
                        'advert_int 1\n  '
                        'authentication {\n      '
                        'auth_type PASS\n      '
                        'auth_pass 12345\n  '
                        '}\n  '
                        'unicast_src_ip 1::4\n  '
                        'unicast_peer {\n  '
                        '}\n  '
                        'virtual_ipaddress {\n    '
                        '1.2.3.4/32 dev if0\n  '
                        '}\n  '
                        'track_script {\n      '
                        'check_backend\n  }\n'
                        '}'
                    }
                }

                # check keepalived config
                assert keepalived_generated_conf[0] == keepalived_expected_conf

                # generate the haproxy conf based on the specified spec
                haproxy_generated_conf = cephadm_module.cephadm_services[
                    'ingress'].haproxy_generate_config(
                        CephadmDaemonDeploySpec(
                            host='test',
                            daemon_id='ingress',
                            service_name=ispec.service_name()))

                haproxy_expected_conf = {
                    'files': {
                        'haproxy.cfg':
                        '# This file is generated by cephadm.'
                        '\nglobal\n    log         '
                        '127.0.0.1 local2\n    '
                        'chroot      /var/lib/haproxy\n    '
                        'pidfile     /var/lib/haproxy/haproxy.pid\n    '
                        'maxconn     8000\n    '
                        'daemon\n    '
                        'stats socket /var/lib/haproxy/stats\n'
                        '\ndefaults\n    '
                        'mode                    http\n    '
                        'log                     global\n    '
                        'option                  httplog\n    '
                        'option                  dontlognull\n    '
                        'option http-server-close\n    '
                        'option forwardfor       except 127.0.0.0/8\n    '
                        'option                  redispatch\n    '
                        'retries                 3\n    '
                        'timeout queue           20s\n    '
                        'timeout connect         5s\n    '
                        'timeout http-request    1s\n    '
                        'timeout http-keep-alive 5s\n    '
                        'timeout client          1s\n    '
                        'timeout server          1s\n    '
                        'timeout check           5s\n    '
                        'maxconn                 8000\n'
                        '\nfrontend stats\n    '
                        'mode http\n    '
                        'bind 1.2.3.4:8999\n    '
                        'bind localhost:8999\n    '
                        'stats enable\n    '
                        'stats uri /stats\n    '
                        'stats refresh 10s\n    '
                        'stats auth admin:12345\n    '
                        'http-request use-service prometheus-exporter if { path /metrics }\n    '
                        'monitor-uri /health\n'
                        '\nfrontend frontend\n    '
                        'bind 1.2.3.4:8089\n    '
                        'default_backend backend\n\n'
                        'backend backend\n    '
                        'option forwardfor\n    '
                        'balance static-rr\n    '
                        'option httpchk HEAD / HTTP/1.0\n    '
                        'server ' + haproxy_generated_conf[1][0] +
                        ' 1::4:80 check weight 100\n'
                    }
                }

                assert haproxy_generated_conf[0] == haproxy_expected_conf
예제 #12
0
    def describe_service(
            self,
            service_type: Optional[str] = None,
            service_name: Optional[str] = None,
            refresh: bool = False) -> List[orchestrator.ServiceDescription]:
        now = datetime_now()

        # CephCluster
        cl = self.rook_cluster.rook_api_get("cephclusters/{0}".format(
            self.rook_cluster.rook_env.cluster_name))
        self.log.debug('CephCluster %s' % cl)
        image_name = cl['spec'].get('cephVersion', {}).get('image', None)
        num_nodes = len(self.rook_cluster.get_node_names())

        spec = {}
        if service_type == 'mon' or service_type is None:
            spec['mon'] = orchestrator.ServiceDescription(
                spec=ServiceSpec(
                    'mon',
                    placement=PlacementSpec(count=cl['spec'].get(
                        'mon', {}).get('count', 1), ),
                ),
                size=cl['spec'].get('mon', {}).get('count', 1),
                container_image_name=image_name,
                last_refresh=now,
            )
        if service_type == 'mgr' or service_type is None:
            spec['mgr'] = orchestrator.ServiceDescription(
                spec=ServiceSpec(
                    'mgr',
                    placement=PlacementSpec.from_string('count:1'),
                ),
                size=1,
                container_image_name=image_name,
                last_refresh=now,
            )

        if (service_type == 'crash'
                or service_type is None and not cl['spec'].get(
                    'crashCollector', {}).get('disable', False)):
            spec['crash'] = orchestrator.ServiceDescription(
                spec=ServiceSpec(
                    'crash',
                    placement=PlacementSpec.from_string('*'),
                ),
                size=num_nodes,
                container_image_name=image_name,
                last_refresh=now,
            )

        if service_type == 'mds' or service_type is None:
            # CephFilesystems
            all_fs = self.rook_cluster.get_resource("cephfilesystems")
            for fs in all_fs:
                svc = 'mds.' + fs['metadata']['name']
                if svc in spec:
                    continue
                # FIXME: we are conflating active (+ standby) with count
                active = fs['spec'].get('metadataServer',
                                        {}).get('activeCount', 1)
                total_mds = active
                if fs['spec'].get('metadataServer',
                                  {}).get('activeStandby', False):
                    total_mds = active * 2
                spec[svc] = orchestrator.ServiceDescription(
                    spec=ServiceSpec(
                        service_type='mds',
                        service_id=fs['metadata']['name'],
                        placement=PlacementSpec(count=active),
                    ),
                    size=total_mds,
                    container_image_name=image_name,
                    last_refresh=now,
                )

        if service_type == 'rgw' or service_type is None:
            # CephObjectstores
            all_zones = self.rook_cluster.get_resource("cephobjectstores")
            for zone in all_zones:
                svc = 'rgw.' + zone['metadata']['name']
                if svc in spec:
                    continue
                active = zone['spec']['gateway']['instances']
                if 'securePort' in zone['spec']['gateway']:
                    ssl = True
                    port = zone['spec']['gateway']['securePort']
                else:
                    ssl = False
                    port = zone['spec']['gateway']['port'] or 80
                rgw_zone = zone['spec'].get('zone', {}).get('name') or None
                spec[svc] = orchestrator.ServiceDescription(
                    spec=RGWSpec(
                        service_id=zone['metadata']['name'],
                        rgw_zone=rgw_zone,
                        ssl=ssl,
                        rgw_frontend_port=port,
                        placement=PlacementSpec(count=active),
                    ),
                    size=active,
                    container_image_name=image_name,
                    last_refresh=now,
                )

        if service_type == 'nfs' or service_type is None:
            # CephNFSes
            all_nfs = self.rook_cluster.get_resource("cephnfses")
            nfs_pods = self.rook_cluster.describe_pods('nfs', None, None)
            for nfs in all_nfs:
                if nfs['spec']['rados']['pool'] != NFS_POOL_NAME:
                    continue
                nfs_name = nfs['metadata']['name']
                svc = 'nfs.' + nfs_name
                if svc in spec:
                    continue
                active = nfs['spec'].get('server', {}).get('active')
                creation_timestamp = datetime.datetime.strptime(
                    nfs['metadata']['creationTimestamp'], '%Y-%m-%dT%H:%M:%SZ')
                spec[svc] = orchestrator.ServiceDescription(
                    spec=NFSServiceSpec(
                        service_id=nfs_name,
                        placement=PlacementSpec(count=active),
                    ),
                    size=active,
                    last_refresh=now,
                    running=len([
                        1 for pod in nfs_pods
                        if pod['labels']['ceph_nfs'] == nfs_name
                    ]),
                    created=creation_timestamp.astimezone(
                        tz=datetime.timezone.utc))
        if service_type == 'osd' or service_type is None:
            # OSDs
            # FIXME: map running OSDs back to their respective services...

            # the catch-all unmanaged
            all_osds = self.rook_cluster.get_osds()
            svc = 'osd'
            spec[svc] = orchestrator.ServiceDescription(
                spec=DriveGroupSpec(
                    unmanaged=True,
                    service_type='osd',
                ),
                size=len(all_osds),
                last_refresh=now,
                running=sum(osd.status.phase == 'Running' for osd in all_osds))

            # drivegroups
            for name, dg in self._drive_group_map.items():
                spec[f'osd.{name}'] = orchestrator.ServiceDescription(
                    spec=dg,
                    last_refresh=now,
                    size=0,
                    running=0,
                )

        if service_type == 'rbd-mirror' or service_type is None:
            # rbd-mirrors
            all_mirrors = self.rook_cluster.get_resource("cephrbdmirrors")
            for mirror in all_mirrors:
                logging.warn(mirror)
                mirror_name = mirror['metadata']['name']
                svc = 'rbd-mirror.' + mirror_name
                if svc in spec:
                    continue
                spec[svc] = orchestrator.ServiceDescription(
                    spec=ServiceSpec(
                        service_id=mirror_name,
                        service_type="rbd-mirror",
                        placement=PlacementSpec(count=1),
                    ),
                    size=1,
                    last_refresh=now,
                )

        for dd in self._list_daemons():
            if dd.service_name() not in spec:
                continue
            service = spec[dd.service_name()]
            service.running += 1
            if not service.container_image_id:
                service.container_image_id = dd.container_image_id
            if not service.container_image_name:
                service.container_image_name = dd.container_image_name
            if service.last_refresh is None or not dd.last_refresh or dd.last_refresh < service.last_refresh:
                service.last_refresh = dd.last_refresh
            if service.created is None or dd.created is None or dd.created < service.created:
                service.created = dd.created

        return [v for k, v in spec.items()]
예제 #13
0
                  'last_configured']:
            if k in j:
                j[k] = j[k].rstrip('Z')
        return j

    assert dd_json == convert_to_old_style_json(
        DaemonDescription.from_json(dd_json).to_json())


@pytest.mark.parametrize("spec,dd,valid",
[   # noqa: E128
    # https://tracker.ceph.com/issues/44934
    (
        RGWSpec(
            service_id="foo",
            rgw_realm="default-rgw-realm",
            rgw_zone="eu-central-1",
        ),
        DaemonDescription(
            daemon_type='rgw',
            daemon_id="foo.ceph-001.ytywjo",
            hostname="ceph-001",
        ),
        True
    ),
    (
        # no realm
        RGWSpec(
            service_id="foo.bar",
            rgw_zone="eu-central-1",
        ),
예제 #14
0
def test_rgw_service_name(spec: RGWSpec, dd: DaemonDescription, valid):
    if valid:
        assert spec.service_name() == dd.service_name()
    else:
        with pytest.raises(OrchestratorError):
            dd.service_name()
예제 #15
0
    "spec1, spec2, eq",
    [
        (ServiceSpec(service_type='mon'), ServiceSpec(service_type='mon'),
         True),
        (ServiceSpec(service_type='mon'),
         ServiceSpec(service_type='mon', service_id='foo'), True),
        # Add service_type='mgr'
        (ServiceSpec(service_type='osd'), ServiceSpec(
            service_type='osd', ), True),
        (ServiceSpec(service_type='osd'), DriveGroupSpec(), True),
        (ServiceSpec(service_type='osd'),
         ServiceSpec(
             service_type='osd',
             service_id='foo',
         ), False),
        (ServiceSpec(service_type='rgw'), RGWSpec(), True),
    ])
def test_spec_hash_eq(spec1: ServiceSpec, spec2: ServiceSpec, eq: bool):

    assert (spec1 == spec2) is eq


@pytest.mark.parametrize("s_type,s_id,s_name", [
    ('mgr', 's_id', 'mgr'),
    ('mon', 's_id', 'mon'),
    ('mds', 's_id', 'mds.s_id'),
    ('rgw', 's_id', 'rgw.s_id'),
    ('nfs', 's_id', 'nfs.s_id'),
    ('iscsi', 's_id', 'iscsi.s_id'),
    ('osd', 's_id', 'osd.s_id'),
])
예제 #16
0
class TestCephadm(object):
    @contextmanager
    def _with_host(self, m, name):
        # type: (CephadmOrchestrator, str) -> None
        wait(m, m.add_host(HostSpec(hostname=name)))
        yield
        wait(m, m.remove_host(name))

    def test_get_unique_name(self, cephadm_module):
        # type: (CephadmOrchestrator) -> None
        existing = [DaemonDescription(daemon_type='mon', daemon_id='a')]
        new_mon = cephadm_module.get_unique_name('mon', 'myhost', existing)
        match_glob(new_mon, 'myhost')
        new_mgr = cephadm_module.get_unique_name('mgr', 'myhost', existing)
        match_glob(new_mgr, 'myhost.*')

    @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm",
                _run_cephadm('[]'))
    def test_host(self, cephadm_module):
        assert wait(cephadm_module, cephadm_module.get_hosts()) == []
        with self._with_host(cephadm_module, 'test'):
            assert wait(cephadm_module, cephadm_module.get_hosts()) == [
                HostSpec('test', 'test')
            ]

            # Be careful with backward compatibility when changing things here:
            assert json.loads(cephadm_module._store['inventory']) == \
                   {"test": {"hostname": "test", "addr": "test", "labels": [], "status": ""}}

            with self._with_host(cephadm_module, 'second'):
                assert wait(cephadm_module, cephadm_module.get_hosts()) == [
                    HostSpec('test', 'test'),
                    HostSpec('second', 'second')
                ]

            assert wait(cephadm_module, cephadm_module.get_hosts()) == [
                HostSpec('test', 'test')
            ]
        assert wait(cephadm_module, cephadm_module.get_hosts()) == []

    @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm",
                _run_cephadm('[]'))
    def test_service_ls(self, cephadm_module):
        with self._with_host(cephadm_module, 'test'):
            c = cephadm_module.list_daemons(refresh=True)
            assert wait(cephadm_module, c) == []

            ps = PlacementSpec(hosts=['test'], count=1)
            c = cephadm_module.add_mds(ServiceSpec('mds', 'name',
                                                   placement=ps))
            [out] = wait(cephadm_module, c)
            match_glob(out, "Deployed mds.name.* on host 'test'")

            c = cephadm_module.list_daemons()

            def remove_id(dd):
                out = dd.to_json()
                del out['daemon_id']
                return out

            assert [remove_id(dd) for dd in wait(cephadm_module, c)] == [{
                'daemon_type':
                'mds',
                'hostname':
                'test',
                'status':
                1,
                'status_desc':
                'starting'
            }]

            ps = PlacementSpec(hosts=['test'], count=1)
            spec = ServiceSpec('rgw', 'r.z', placement=ps)
            c = cephadm_module.apply_rgw(spec)
            assert wait(cephadm_module, c) == 'Scheduled rgw.r.z update...'

            c = cephadm_module.describe_service()
            out = [o.to_json() for o in wait(cephadm_module, c)]
            expected = [{
                'placement': {
                    'hosts': [{
                        'hostname': 'test',
                        'name': '',
                        'network': ''
                    }]
                },
                'service_id': 'name',
                'service_name': 'mds.name',
                'service_type': 'mds',
                'status': {
                    'running': 1,
                    'size': 0
                },
                'unmanaged': True
            }, {
                'placement': {
                    'count': 1,
                    'hosts': [{
                        'hostname': 'test',
                        'name': '',
                        'network': ''
                    }]
                },
                'rgw_realm': 'r',
                'rgw_zone': 'z',
                'service_id': 'r.z',
                'service_name': 'rgw.r.z',
                'service_type': 'rgw',
                'status': {
                    'running': 0,
                    'size': 1
                }
            }]
            assert out == expected
            assert [
                ServiceDescription.from_json(o).to_json() for o in expected
            ] == expected

            assert_rm_service(cephadm_module, 'rgw.r.z')
            assert_rm_daemon(cephadm_module, 'mds.name', 'test')

    @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm",
                _run_cephadm('[]'))
    def test_device_ls(self, cephadm_module):
        with self._with_host(cephadm_module, 'test'):
            c = cephadm_module.get_inventory()
            assert wait(cephadm_module, c) == [InventoryHost('test')]

    @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm",
                _run_cephadm(
                    json.dumps([
                        dict(
                            name='rgw.myrgw.foobar',
                            style='cephadm',
                            fsid='fsid',
                            container_id='container_id',
                            version='version',
                            state='running',
                        )
                    ])))
    def test_daemon_action(self, cephadm_module):
        cephadm_module.service_cache_timeout = 10
        with self._with_host(cephadm_module, 'test'):
            c = cephadm_module.list_daemons(refresh=True)
            wait(cephadm_module, c)
            c = cephadm_module.daemon_action('redeploy', 'rgw', 'myrgw.foobar')
            assert wait(cephadm_module,
                        c) == ["Deployed rgw.myrgw.foobar on host 'test'"]

            for what in ('start', 'stop', 'restart'):
                c = cephadm_module.daemon_action(what, 'rgw', 'myrgw.foobar')
                assert wait(cephadm_module, c) == [
                    what + " rgw.myrgw.foobar from host 'test'"
                ]

            assert_rm_daemon(cephadm_module, 'rgw.myrgw.foobar', 'test')

    @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm",
                _run_cephadm('[]'))
    def test_mon_add(self, cephadm_module):
        with self._with_host(cephadm_module, 'test'):
            ps = PlacementSpec(hosts=['test:0.0.0.0=a'], count=1)
            c = cephadm_module.add_mon(ServiceSpec('mon', placement=ps))
            assert wait(cephadm_module, c) == ["Deployed mon.a on host 'test'"]

            with pytest.raises(
                    OrchestratorError,
                    match=
                    "Must set public_network config option or specify a CIDR network,"
            ):
                ps = PlacementSpec(hosts=['test'], count=1)
                c = cephadm_module.add_mon(ServiceSpec('mon', placement=ps))
                wait(cephadm_module, c)

    @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm",
                _run_cephadm('[]'))
    def test_mgr_update(self, cephadm_module):
        with self._with_host(cephadm_module, 'test'):
            ps = PlacementSpec(hosts=['test:0.0.0.0=a'], count=1)
            r = cephadm_module._apply_service(ServiceSpec('mgr', placement=ps))
            assert r

            assert_rm_daemon(cephadm_module, 'mgr.a', 'test')

    @mock.patch("cephadm.module.CephadmOrchestrator.mon_command")
    def test_find_destroyed_osds(self, _mon_cmd, cephadm_module):
        dict_out = {
            "nodes": [{
                "id": -1,
                "name": "default",
                "type": "root",
                "type_id": 11,
                "children": [-3]
            }, {
                "id": -3,
                "name": "host1",
                "type": "host",
                "type_id": 1,
                "pool_weights": {},
                "children": [0]
            }, {
                "id": 0,
                "device_class": "hdd",
                "name": "osd.0",
                "type": "osd",
                "type_id": 0,
                "crush_weight": 0.0243988037109375,
                "depth": 2,
                "pool_weights": {},
                "exists": 1,
                "status": "destroyed",
                "reweight": 1,
                "primary_affinity": 1
            }],
            "stray": []
        }
        json_out = json.dumps(dict_out)
        _mon_cmd.return_value = (0, json_out, '')
        out = cephadm_module.find_destroyed_osds()
        assert out == {'host1': ['0']}

    @mock.patch("cephadm.module.CephadmOrchestrator.mon_command")
    def test_find_destroyed_osds_cmd_failure(self, _mon_cmd, cephadm_module):
        _mon_cmd.return_value = (1, "", "fail_msg")
        with pytest.raises(OrchestratorError):
            out = cephadm_module.find_destroyed_osds()

    @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm",
                _run_cephadm('{}'))
    @mock.patch("cephadm.module.SpecStore.save")
    def test_apply_osd_save(self, _save_spec, cephadm_module):
        with self._with_host(cephadm_module, 'test'):
            json_spec = {
                'service_type': 'osd',
                'host_pattern': 'test',
                'service_id': 'foo',
                'data_devices': {
                    'all': True
                }
            }
            spec = ServiceSpec.from_json(json_spec)
            assert isinstance(spec, DriveGroupSpec)
            c = cephadm_module.apply_drivegroups([spec])
            assert wait(cephadm_module, c) == ['Scheduled osd.foo update...']
            _save_spec.assert_called_with(spec)

    @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm",
                _run_cephadm('{}'))
    @mock.patch("cephadm.module.SpecStore.save")
    def test_apply_osd_save_placement(self, _save_spec, cephadm_module):
        with self._with_host(cephadm_module, 'test'):
            json_spec = {
                'service_type': 'osd',
                'placement': {
                    'host_pattern': 'test'
                },
                'service_id': 'foo',
                'data_devices': {
                    'all': True
                }
            }
            spec = ServiceSpec.from_json(json_spec)
            assert isinstance(spec, DriveGroupSpec)
            c = cephadm_module.apply_drivegroups([spec])
            assert wait(cephadm_module, c) == ['Scheduled osd.foo update...']
            _save_spec.assert_called_with(spec)

    @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm",
                _run_cephadm('{}'))
    def test_create_osds(self, cephadm_module):
        with self._with_host(cephadm_module, 'test'):
            dg = DriveGroupSpec(placement=PlacementSpec(host_pattern='test'),
                                data_devices=DeviceSelection(paths=['']))
            c = cephadm_module.create_osds(dg)
            out = wait(cephadm_module, c)
            assert out == "Created no osd(s) on host test; already created?"

    @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm",
                _run_cephadm('{}'))
    def test_prepare_drivegroup(self, cephadm_module):
        with self._with_host(cephadm_module, 'test'):
            dg = DriveGroupSpec(placement=PlacementSpec(host_pattern='test'),
                                data_devices=DeviceSelection(paths=['']))
            out = cephadm_module.prepare_drivegroup(dg)
            assert len(out) == 1
            f1 = out[0]
            assert f1[0] == 'test'
            assert isinstance(f1[1], DriveSelection)

    @pytest.mark.parametrize(
        "devices, preview, exp_command",
        [
            # no preview and only one disk, prepare is used due the hack that is in place.
            (['/dev/sda'], False,
             "lvm prepare --bluestore --data /dev/sda --no-systemd"),
            # no preview and multiple disks, uses batch
            (['/dev/sda', '/dev/sdb'], False,
             "CEPH_VOLUME_OSDSPEC_AFFINITY=test.spec lvm batch --no-auto /dev/sda /dev/sdb --yes --no-systemd"
             ),
            # preview and only one disk needs to use batch again to generate the preview
            (['/dev/sda'
              ], True, "lvm batch --no-auto /dev/sda --report --format json"),
            # preview and multiple disks work the same
            (['/dev/sda', '/dev/sdb'], True,
             "CEPH_VOLUME_OSDSPEC_AFFINITY=test.spec lvm batch --no-auto /dev/sda /dev/sdb --yes --no-systemd --report --format json"
             ),
        ])
    @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm",
                _run_cephadm('{}'))
    def test_driveselection_to_ceph_volume(self, cephadm_module, devices,
                                           preview, exp_command):
        with self._with_host(cephadm_module, 'test'):
            dg = DriveGroupSpec(service_id='test.spec',
                                placement=PlacementSpec(host_pattern='test'),
                                data_devices=DeviceSelection(paths=devices))
            ds = DriveSelection(dg,
                                Devices([Device(path) for path in devices]))
            preview = preview
            out = cephadm_module.driveselection_to_ceph_volume(
                dg, ds, [], preview)
            assert out in exp_command

    @mock.patch("cephadm.module.SpecStore.find")
    @mock.patch("cephadm.module.CephadmOrchestrator.prepare_drivegroup")
    @mock.patch(
        "cephadm.module.CephadmOrchestrator.driveselection_to_ceph_volume")
    @mock.patch("cephadm.module.CephadmOrchestrator._run_ceph_volume_command")
    @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm",
                _run_cephadm('{}'))
    def test_preview_drivegroups_str(self, _run_c_v_command, _ds_to_cv,
                                     _prepare_dg, _find_store, cephadm_module):
        with self._with_host(cephadm_module, 'test'):
            dg = DriveGroupSpec(placement=PlacementSpec(host_pattern='test'),
                                data_devices=DeviceSelection(paths=['']))
            _find_store.return_value = [dg]
            _prepare_dg.return_value = [('host1', 'ds_dummy')]
            _run_c_v_command.return_value = ("{}", '', 0)
            cephadm_module.preview_drivegroups(drive_group_name='foo')
            _find_store.assert_called_once_with(service_name='foo')
            _prepare_dg.assert_called_once_with(dg)
            _run_c_v_command.assert_called_once()

    @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm",
                _run_cephadm(
                    json.dumps([
                        dict(
                            name='osd.0',
                            style='cephadm',
                            fsid='fsid',
                            container_id='container_id',
                            version='version',
                            state='running',
                        )
                    ])))
    @mock.patch("cephadm.osd.RemoveUtil.get_pg_count", lambda _, __: 0)
    def test_remove_osds(self, cephadm_module):
        with self._with_host(cephadm_module, 'test'):
            c = cephadm_module.list_daemons(refresh=True)
            wait(cephadm_module, c)

            c = cephadm_module.remove_daemons(['osd.0'])
            out = wait(cephadm_module, c)
            assert out == ["Removed osd.0 from host 'test'"]

            osd_removal_op = OSDRemoval(0, False, False, 'test', 'osd.0',
                                        datetime.datetime.utcnow(), -1)
            cephadm_module.rm_util.queue_osds_for_removal({osd_removal_op})
            cephadm_module.rm_util._remove_osds_bg()
            assert cephadm_module.rm_util.to_remove_osds == set()

            c = cephadm_module.remove_osds_status()
            out = wait(cephadm_module, c)
            assert out == set()

    @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm",
                _run_cephadm('{}'))
    def test_rgw_update(self, cephadm_module):
        with self._with_host(cephadm_module, 'host1'):
            with self._with_host(cephadm_module, 'host2'):
                ps = PlacementSpec(hosts=['host1'], count=1)
                c = cephadm_module.add_rgw(
                    RGWSpec(rgw_realm='realm', rgw_zone='zone1', placement=ps))
                [out] = wait(cephadm_module, c)
                match_glob(out,
                           "Deployed rgw.realm.zone1.host1.* on host 'host1'")

                ps = PlacementSpec(hosts=['host1', 'host2'], count=2)
                r = cephadm_module._apply_service(
                    RGWSpec(rgw_realm='realm', rgw_zone='zone1', placement=ps))
                assert r

                assert_rm_daemon(cephadm_module, 'rgw.realm.zone1', 'host1')
                assert_rm_daemon(cephadm_module, 'rgw.realm.zone1', 'host2')

    @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm",
                _run_cephadm(
                    json.dumps([
                        dict(
                            name='rgw.myrgw.myhost.myid',
                            style='cephadm',
                            fsid='fsid',
                            container_id='container_id',
                            version='version',
                            state='running',
                        )
                    ])))
    def test_remove_daemon(self, cephadm_module):
        with self._with_host(cephadm_module, 'test'):
            c = cephadm_module.list_daemons(refresh=True)
            wait(cephadm_module, c)
            c = cephadm_module.remove_daemons(['rgw.myrgw.myhost.myid'])
            out = wait(cephadm_module, c)
            assert out == ["Removed rgw.myrgw.myhost.myid from host 'test'"]

    @pytest.mark.parametrize("spec, meth", [
        (ServiceSpec('crash'), CephadmOrchestrator.add_crash),
        (ServiceSpec('prometheus'), CephadmOrchestrator.add_prometheus),
        (ServiceSpec('grafana'), CephadmOrchestrator.add_grafana),
        (ServiceSpec('node-exporter'), CephadmOrchestrator.add_node_exporter),
        (ServiceSpec('alertmanager'), CephadmOrchestrator.add_alertmanager),
        (ServiceSpec('rbd-mirror'), CephadmOrchestrator.add_rbd_mirror),
        (ServiceSpec('mds', service_id='fsname'), CephadmOrchestrator.add_mds),
        (RGWSpec(rgw_realm='realm',
                 rgw_zone='zone'), CephadmOrchestrator.add_rgw),
    ])
    @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm",
                _run_cephadm('{}'))
    def test_daemon_add(self, spec: ServiceSpec, meth, cephadm_module):
        with self._with_host(cephadm_module, 'test'):
            spec.placement = PlacementSpec(hosts=['test'], count=1)

            c = meth(cephadm_module, spec)
            [out] = wait(cephadm_module, c)
            match_glob(out, f"Deployed {spec.service_name()}.* on host 'test'")

            assert_rm_daemon(cephadm_module, spec.service_name(), 'test')

    @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm",
                _run_cephadm('{}'))
    @mock.patch("cephadm.module.CephadmOrchestrator.rados", mock.MagicMock())
    def test_nfs(self, cephadm_module):
        with self._with_host(cephadm_module, 'test'):
            ps = PlacementSpec(hosts=['test'], count=1)
            spec = NFSServiceSpec('name',
                                  pool='pool',
                                  namespace='namespace',
                                  placement=ps)
            c = cephadm_module.add_nfs(spec)
            [out] = wait(cephadm_module, c)
            match_glob(out, "Deployed nfs.name.* on host 'test'")

            assert_rm_daemon(cephadm_module, 'nfs.name.test', 'test')

            # Hack. We never created the service, but we now need to remove it.
            # this is in contrast to the other services, which don't create this service
            # automatically.
            assert_rm_service(cephadm_module, 'nfs.name')

    @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm",
                _run_cephadm('{}'))
    @mock.patch("cephadm.module.CephadmOrchestrator.rados", mock.MagicMock())
    def test_iscsi(self, cephadm_module):
        with self._with_host(cephadm_module, 'test'):
            ps = PlacementSpec(hosts=['test'], count=1)
            spec = IscsiServiceSpec('name', pool='pool', placement=ps)
            c = cephadm_module.add_iscsi(spec)
            [out] = wait(cephadm_module, c)
            match_glob(out, "Deployed iscsi.name.* on host 'test'")

            assert_rm_daemon(cephadm_module, 'iscsi.name.test', 'test')

            # Hack. We never created the service, but we now need to remove it.
            # this is in contrast to the other services, which don't create this service
            # automatically.
            assert_rm_service(cephadm_module, 'iscsi.name')

    @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm",
                _run_cephadm('{}'))
    def test_blink_device_light(self, cephadm_module):
        with self._with_host(cephadm_module, 'test'):
            c = cephadm_module.blink_device_light('ident', True,
                                                  [('test', '', '')])
            assert wait(cephadm_module, c) == ['Set ident light for test: on']

    @pytest.mark.parametrize("spec, meth", [
        (ServiceSpec('mgr'), CephadmOrchestrator.apply_mgr),
        (ServiceSpec('crash'), CephadmOrchestrator.apply_crash),
        (ServiceSpec('prometheus'), CephadmOrchestrator.apply_prometheus),
        (ServiceSpec('grafana'), CephadmOrchestrator.apply_grafana),
        (ServiceSpec('node-exporter'),
         CephadmOrchestrator.apply_node_exporter),
        (ServiceSpec('alertmanager'), CephadmOrchestrator.apply_alertmanager),
        (ServiceSpec('rbd-mirror'), CephadmOrchestrator.apply_rbd_mirror),
        (ServiceSpec('mds',
                     service_id='fsname'), CephadmOrchestrator.apply_mds),
        (RGWSpec(rgw_realm='realm',
                 rgw_zone='zone'), CephadmOrchestrator.apply_rgw),
        (NFSServiceSpec('name', pool='pool',
                        namespace='namespace'), CephadmOrchestrator.apply_nfs),
        (IscsiServiceSpec('name',
                          pool='pool'), CephadmOrchestrator.apply_iscsi),
    ])
    @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm",
                _run_cephadm('{}'))
    def test_apply_save(self, spec: ServiceSpec, meth, cephadm_module):
        with self._with_host(cephadm_module, 'test'):
            spec.placement = PlacementSpec(hosts=['test'], count=1)
            c = meth(cephadm_module, spec)
            assert wait(cephadm_module,
                        c) == f'Scheduled {spec.service_name()} update...'
            assert [
                d.spec for d in wait(cephadm_module,
                                     cephadm_module.describe_service())
            ] == [spec]

            assert_rm_service(cephadm_module, spec.service_name())

    @mock.patch("cephadm.module.CephadmOrchestrator._get_connection")
    @mock.patch("remoto.process.check")
    def test_offline(self, _check, _get_connection, cephadm_module):
        _check.return_value = '{}', '', 0
        _get_connection.return_value = mock.Mock(), mock.Mock()
        with self._with_host(cephadm_module, 'test'):
            _get_connection.side_effect = HostNotFound
            code, out, err = cephadm_module.check_host('test')
            assert out == ''
            assert 'Failed to connect to test (test)' in err

            out = wait(cephadm_module, cephadm_module.get_hosts())[0].to_json()
            assert out == HostSpec('test', 'test', status='Offline').to_json()

            _get_connection.side_effect = None
            assert cephadm_module._check_host('test') is None
            out = wait(cephadm_module, cephadm_module.get_hosts())[0].to_json()
            assert out == HostSpec('test', 'test').to_json()