def test_apply_osd_save(self, _run_cephadm, cephadm_module: CephadmOrchestrator): _run_cephadm.return_value = ('{}', '', 0) with with_host(cephadm_module, 'test'): spec = DriveGroupSpec(service_id='foo', placement=PlacementSpec(host_pattern='*', ), data_devices=DeviceSelection(all=True)) c = cephadm_module.apply([spec]) assert wait(cephadm_module, c) == ['Scheduled osd.foo update...'] inventory = Devices([ Device('/dev/sdb', available=True), ]) cephadm_module.cache.update_host_devices_networks( 'test', inventory.devices, {}) _run_cephadm.return_value = (['{}'], '', 0) assert cephadm_module._apply_all_services() == False _run_cephadm.assert_any_call( 'test', 'osd', 'ceph-volume', [ '--config-json', '-', '--', 'lvm', 'prepare', '--bluestore', '--data', '/dev/sdb', '--no-systemd' ], env_vars=['CEPH_VOLUME_OSDSPEC_AFFINITY=foo'], error_ok=True, stdin='{"config": "", "keyring": ""}') _run_cephadm.assert_called_with( 'test', 'osd', 'ceph-volume', ['--', 'lvm', 'list', '--format', 'json'])
def test_daemon_check_post(self, cephadm_module: CephadmOrchestrator): with with_host(cephadm_module, 'test'): with with_service(cephadm_module, ServiceSpec(service_type='grafana'), CephadmOrchestrator.apply_grafana, 'test'): # Make sure, _check_daemons does a redeploy due to monmap change: cephadm_module.mock_store_set('_ceph_get', 'mon_map', { 'modified': datetime.datetime.utcnow().strftime(CEPH_DATEFMT), 'fsid': 'foobar', }) cephadm_module.notify('mon_map', None) cephadm_module.mock_store_set('_ceph_get', 'mgr_map', { 'modules': ['dashboard'] }) with mock.patch("cephadm.module.CephadmOrchestrator.mon_command") as _mon_cmd: cephadm_module._check_daemons() _mon_cmd.assert_any_call({'prefix': 'dashboard set-grafana-api-url', 'value': 'https://test:3000'})
def test_etc_ceph(self, _check, _get_connection, cephadm_module: CephadmOrchestrator): _get_connection.return_value = mock.Mock(), mock.Mock() _check.return_value = '{}', '', 0 with with_host(cephadm_module, 'test'): assert not cephadm_module.cache.host_needs_new_etc_ceph_ceph_conf('test') with with_host(cephadm_module, 'test'): cephadm_module.set_module_option('manage_etc_ceph_ceph_conf', True) cephadm_module.config_notify() assert cephadm_module.manage_etc_ceph_ceph_conf == True cephadm_module._refresh_hosts_and_daemons() _check.assert_called_with(ANY, ['dd', 'of=/etc/ceph/ceph.conf'], stdin=b'') assert not cephadm_module.cache.host_needs_new_etc_ceph_ceph_conf('test') cephadm_module.notify('mon_map', mock.MagicMock()) assert cephadm_module.cache.host_needs_new_etc_ceph_ceph_conf('test')
def test_rgw_update(self, frontend, ssl, expected, cephadm_module: CephadmOrchestrator): with with_host(cephadm_module, 'host1'): cephadm_module.cache.update_host_networks('host1', { 'fd00:fd00:fd00:3000::/64': { 'if0': ['fd00:fd00:fd00:3000::1'] } }) s = RGWSpec(service_id="foo", networks=['fd00:fd00:fd00:3000::/64'], ssl=ssl, rgw_frontend_type=frontend) with with_service(cephadm_module, s) as dds: _, f, _ = cephadm_module.check_mon_command({ 'prefix': 'config get', 'who': f'client.{dds[0]}', 'key': 'rgw_frontends', }) assert f == expected
def test_daemon_action(self, cephadm_module: CephadmOrchestrator): cephadm_module.service_cache_timeout = 10 with with_host(cephadm_module, 'test'): with with_daemon(cephadm_module, RGWSpec(service_id='myrgw.foobar'), CephadmOrchestrator.add_rgw, 'test') as daemon_id: c = cephadm_module.daemon_action('redeploy', 'rgw.' + daemon_id) assert wait(cephadm_module, c) == f"Deployed rgw.{daemon_id} on host 'test'" for what in ('start', 'stop', 'restart'): c = cephadm_module.daemon_action(what, 'rgw.' + daemon_id) assert wait(cephadm_module, c) == what + f" rgw.{daemon_id} from host 'test'" # Make sure, _check_daemons does a redeploy due to monmap change: cephadm_module._store['_ceph_get/mon_map'] = { 'modified': datetime.datetime.utcnow().strftime(CEPH_DATEFMT), 'fsid': 'foobar', } cephadm_module.notify('mon_map', None) cephadm_module._check_daemons()
def test_daemon_action(self, cephadm_module: CephadmOrchestrator): cephadm_module.service_cache_timeout = 10 with with_host(cephadm_module, 'test'): c = cephadm_module.list_daemons(refresh=True) wait(cephadm_module, c) assert len(c.result) == 1 c = cephadm_module.daemon_action('redeploy', 'rgw', 'myrgw.foobar') assert wait(cephadm_module, c) == ["Deployed rgw.myrgw.foobar on host 'test'"] for what in ('start', 'stop', 'restart'): c = cephadm_module.daemon_action(what, 'rgw', 'myrgw.foobar') assert wait(cephadm_module, c) == [what + " rgw.myrgw.foobar from host 'test'"] # Make sure, _check_daemons does a redeploy due to monmap change: cephadm_module._store['_ceph_get/mon_map'] = { 'modified': datetime.datetime.utcnow().strftime(CEPH_DATEFMT), 'fsid': 'foobar', } cephadm_module.notify('mon_map', None) cephadm_module._check_daemons() assert_rm_daemon(cephadm_module, 'rgw.myrgw.foobar', 'test')
def test_daemon_action_fail(self, cephadm_module: CephadmOrchestrator): cephadm_module.service_cache_timeout = 10 with with_host(cephadm_module, 'test'): with with_daemon(cephadm_module, RGWSpec(service_id='myrgw.foobar'), CephadmOrchestrator.add_rgw, 'test') as daemon_id: with mock.patch('ceph_module.BaseMgrModule._ceph_send_command') as _ceph_send_command: _ceph_send_command.side_effect = Exception("myerror") # Make sure, _check_daemons does a redeploy due to monmap change: cephadm_module.mock_store_set('_ceph_get', 'mon_map', { 'modified': datetime.datetime.utcnow().strftime(CEPH_DATEFMT), 'fsid': 'foobar', }) cephadm_module.notify('mon_map', None) cephadm_module._check_daemons() evs = [e.message for e in cephadm_module.events.get_for_daemon(f'rgw.{daemon_id}')] assert 'myerror' in ''.join(evs)
def test_upgrade_run(self, use_repo_digest, cephadm_module: CephadmOrchestrator): with with_host(cephadm_module, 'test', refresh_hosts=False): cephadm_module.set_container_image('global', 'image') if use_repo_digest: cephadm_module.use_repo_digest = True cephadm_module.convert_tags_to_repo_digest() _, image, _ = cephadm_module.check_mon_command({ 'prefix': 'config get', 'who': 'global', 'key': 'container_image', }) if use_repo_digest: assert image == 'image@repo_digest' else: assert image == 'image'
def test_daemon_action(self, _ceph_get, cephadm_module: CephadmOrchestrator): cephadm_module.service_cache_timeout = 10 with with_host(cephadm_module, 'test'): c = cephadm_module.list_daemons(refresh=True) wait(cephadm_module, c) c = cephadm_module.daemon_action('redeploy', 'rgw', 'myrgw.foobar') assert wait(cephadm_module, c) == ["Deployed rgw.myrgw.foobar on host 'test'"] for what in ('start', 'stop', 'restart'): c = cephadm_module.daemon_action(what, 'rgw', 'myrgw.foobar') assert wait(cephadm_module, c) == [what + " rgw.myrgw.foobar from host 'test'"] now = datetime.datetime.utcnow().strftime(CEPH_DATEFMT) _ceph_get.return_value = {'modified': now} cephadm_module._check_daemons() assert_rm_daemon(cephadm_module, 'rgw.myrgw.foobar', 'test')
def test_daemon_action(self, cephadm_module: CephadmOrchestrator): cephadm_module.service_cache_timeout = 10 with with_host(cephadm_module, 'test'): with with_daemon(cephadm_module, RGWSpec(service_id='myrgw.foobar'), CephadmOrchestrator.add_rgw, 'test') as daemon_id: c = cephadm_module.daemon_action('redeploy', 'rgw.' + daemon_id) assert wait(cephadm_module, c) == f"Scheduled to redeploy rgw.{daemon_id} on host 'test'" for what in ('start', 'stop', 'restart'): c = cephadm_module.daemon_action(what, 'rgw.' + daemon_id) assert wait(cephadm_module, c) == F"Scheduled to {what} rgw.{daemon_id} on host 'test'" # Make sure, _check_daemons does a redeploy due to monmap change: cephadm_module._store['_ceph_get/mon_map'] = { 'modified': datetime_to_str(datetime_now()), 'fsid': 'foobar', } cephadm_module.notify('mon_map', None) CephadmServe(cephadm_module)._check_daemons()
def test_list_daemons(self, cephadm_module: CephadmOrchestrator): cephadm_module.service_cache_timeout = 10 with with_host(cephadm_module, 'test'): CephadmServe(cephadm_module)._refresh_host_daemons('test') c = cephadm_module.list_daemons() assert wait(cephadm_module, c)[0].name() == 'rgw.myrgw.foobar'
def test_grafana_config(self, _run_cephadm, cephadm_module: CephadmOrchestrator): _run_cephadm.side_effect = async_side_effect(('{}', '', 0)) with with_host(cephadm_module, 'test'): cephadm_module.set_store('grafana_crt', 'c') cephadm_module.set_store('grafana_key', 'k') with with_service(cephadm_module, MonitoringSpec('prometheus')) as _, \ with_service(cephadm_module, GrafanaSpec('grafana')) as _: files = { 'grafana.ini': dedent(""" # This file is generated by cephadm. [users] default_theme = light [auth.anonymous] enabled = true org_name = 'Main Org.' org_role = 'Viewer' [server] domain = 'bootstrap.storage.lab' protocol = https cert_file = /etc/grafana/certs/cert_file cert_key = /etc/grafana/certs/cert_key http_port = 3000 http_addr = [security] disable_initial_admin_creation = true cookie_secure = true cookie_samesite = none allow_embedding = true""").lstrip(), # noqa: W291 'provisioning/datasources/ceph-dashboard.yml': dedent(""" # This file is generated by cephadm. deleteDatasources: - name: 'Dashboard1' orgId: 1 datasources: - name: 'Dashboard1' type: 'prometheus' access: 'proxy' orgId: 1 url: 'http://[1::4]:9095' basicAuth: false isDefault: true editable: false """).lstrip(), 'certs/cert_file': dedent(""" # generated by cephadm c""").lstrip(), 'certs/cert_key': dedent(""" # generated by cephadm k""").lstrip(), } _run_cephadm.assert_called_with( 'test', 'grafana.test', 'deploy', [ '--name', 'grafana.test', '--meta-json', '{"service_name": "grafana", "ports": [3000], "ip": null, "deployed_by": [], "rank": null, "rank_generation": null}', '--config-json', '-', '--tcp-ports', '3000'], stdin=json.dumps({"files": files}), image='')