def test_migrate_nfs_initial_octopus(cephadm_module: CephadmOrchestrator): with with_host(cephadm_module, 'host1'): cephadm_module.set_store( SPEC_STORE_PREFIX + 'mds', json.dumps({ 'spec': { 'service_type': 'nfs', 'service_id': 'ganesha-foo', 'placement': { 'hosts': ['host1'] }, 'spec': { 'pool': 'mypool', 'namespace': 'foons', }, }, 'created': datetime_to_str(datetime_now()), }, sort_keys=True), ) cephadm_module.migration_current = 1 cephadm_module.spec_store.load() ls = json.loads(cephadm_module.get_store('nfs_migration_queue')) assert ls == [['ganesha-foo', 'mypool', 'foons']] cephadm_module.migration.migrate(True) assert cephadm_module.migration_current == 2 cephadm_module.migration.migrate() assert cephadm_module.migration_current == 3
def with_host(m: CephadmOrchestrator, name, refresh_hosts=True): # type: (CephadmOrchestrator, str) -> None wait(m, m.add_host(HostSpec(hostname=name))) if refresh_hosts: CephadmServe(m)._refresh_hosts_and_daemons() yield wait(m, m.remove_host(name))
def with_service(cephadm_module: CephadmOrchestrator, spec: ServiceSpec, meth=None, host: str = '', status_running=False) -> Iterator[List[str]]: if spec.placement.is_empty() and host: spec.placement = PlacementSpec(hosts=[host], count=1) if meth is not None: c = meth(cephadm_module, spec) assert wait(cephadm_module, c) == f'Scheduled {spec.service_name()} update...' else: c = cephadm_module.apply([spec]) assert wait(cephadm_module, c) == [f'Scheduled {spec.service_name()} update...'] specs = [ d.spec for d in wait(cephadm_module, cephadm_module.describe_service()) ] assert spec in specs CephadmServe(cephadm_module)._apply_all_services() if status_running: make_daemons_running(cephadm_module, spec.service_name()) dds = wait(cephadm_module, cephadm_module.list_daemons()) own_dds = [dd for dd in dds if dd.service_name() == spec.service_name()] if host and spec.service_type != 'osd': assert own_dds yield [dd.name() for dd in own_dds] assert_rm_service(cephadm_module, spec.service_name())
def test_migrate_service_id_mon_one(cephadm_module: CephadmOrchestrator): with with_host(cephadm_module, 'host1'): cephadm_module.set_store( SPEC_STORE_PREFIX + 'mon.wrong', json.dumps( { 'spec': { 'service_type': 'mon', 'service_id': 'wrong', 'placement': { 'hosts': ['host1'] } }, 'created': datetime_to_str(datetime_now()), }, sort_keys=True), ) cephadm_module.spec_store.load() assert len(cephadm_module.spec_store.specs) == 1 assert cephadm_module.spec_store.specs['mon.wrong'].service_name( ) == 'mon' cephadm_module.migration_current = 1 cephadm_module.migration.migrate() assert cephadm_module.migration_current == 2 assert len(cephadm_module.spec_store.specs) == 1 assert cephadm_module.spec_store.specs['mon'] == ServiceSpec( service_type='mon', unmanaged=True, placement=PlacementSpec(hosts=['host1']))
def cephadm_module(): with mock.patch("cephadm.module.CephadmOrchestrator.get_ceph_option", get_ceph_option),\ mock.patch("cephadm.module.CephadmOrchestrator._configure_logging", lambda *args: None),\ mock.patch("cephadm.module.CephadmOrchestrator.remote"),\ mock.patch("cephadm.module.CephadmOrchestrator.set_store", set_store), \ mock.patch("cephadm.module.CephadmOrchestrator.get_store", get_store),\ mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('[]')), \ mock.patch("cephadm.module.HostCache.save_host"), \ mock.patch("cephadm.module.HostCache.rm_host"), \ mock.patch("cephadm.module.CephadmOrchestrator.send_command"), \ mock.patch("cephadm.module.CephadmOrchestrator.mon_command", mon_command), \ mock.patch("cephadm.module.CephadmOrchestrator.get_store_prefix", get_store_prefix): CephadmOrchestrator._register_commands('') CephadmOrchestrator._register_options('') m = CephadmOrchestrator.__new__(CephadmOrchestrator) m._root_logger = mock.MagicMock() m._store = { 'ssh_config': '', 'ssh_identity_key': '', 'ssh_identity_pub': '', 'inventory': {}, 'upgrade_state': None, } m.__init__('cephadm', 0, 0) m._cluster_fsid = "fsid" yield m
def with_host(m: CephadmOrchestrator, name, addr='1::4', refresh_hosts=True): # type: (CephadmOrchestrator, str) -> None with mock.patch("cephadm.utils.resolve_ip", return_value=addr): wait(m, m.add_host(HostSpec(hostname=name))) if refresh_hosts: CephadmServe(m)._refresh_hosts_and_daemons() receive_agent_metadata(m, name) yield wait(m, m.remove_host(name))
def test_upgrade_start(cephadm_module: CephadmOrchestrator): with with_host(cephadm_module, 'test'): assert wait(cephadm_module, cephadm_module.upgrade_start( 'image_id', None)) == 'Initiating upgrade to image_id' assert wait(cephadm_module, cephadm_module.upgrade_status()).target_image == 'image_id' assert wait(cephadm_module, cephadm_module.upgrade_pause()) == 'Paused upgrade to image_id' assert wait(cephadm_module, cephadm_module.upgrade_resume() ) == 'Resumed upgrade to image_id' assert wait(cephadm_module, cephadm_module.upgrade_stop()) == 'Stopped upgrade to image_id'
def assert_rm_service(cephadm: CephadmOrchestrator, srv_name): mon_or_mgr = cephadm.spec_store[srv_name].spec.service_type in ('mon', 'mgr') if mon_or_mgr: assert 'Unable' in wait(cephadm, cephadm.remove_service(srv_name)) return assert wait(cephadm, cephadm.remove_service(srv_name)) == f'Removed service {srv_name}' assert cephadm.spec_store[srv_name].deleted is not None CephadmServe(cephadm)._check_daemons() CephadmServe(cephadm)._apply_all_services() assert cephadm.spec_store[srv_name].deleted unmanaged = cephadm.spec_store[srv_name].spec.unmanaged CephadmServe(cephadm)._purge_deleted_services() if not unmanaged: # cause then we're not deleting daemons assert srv_name not in cephadm.spec_store, f'{cephadm.spec_store[srv_name]!r}'
def test_upgrade_run(use_repo_digest, cephadm_module: CephadmOrchestrator): with with_host(cephadm_module, 'test'): cephadm_module.set_container_image('global', 'from_image') if use_repo_digest: cephadm_module.use_repo_digest = True with with_service(cephadm_module, ServiceSpec('mgr'), CephadmOrchestrator.apply_mgr, 'test'): assert wait(cephadm_module, cephadm_module.upgrade_start( 'to_image', None)) == 'Initiating upgrade to to_image' assert wait(cephadm_module, cephadm_module.upgrade_status()).target_image == 'to_image' def _versions_mock(cmd): return json.dumps({ 'mgr': { 'myversion': 1 } }) cephadm_module._mon_command_mock_versions = _versions_mock with mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm(json.dumps({ 'image_id': 'image_id', 'repo_digest': 'to_image@repo_digest', }))): cephadm_module.upgrade._do_upgrade() assert cephadm_module.upgrade_status is not None with mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm( json.dumps([ dict( name=list(cephadm_module.cache.daemons['test'].keys())[0], style='cephadm', fsid='fsid', container_id='container_id', container_image_id='image_id', version='version', state='running', ) ]) )): cephadm_module._refresh_hosts_and_daemons() cephadm_module.upgrade._do_upgrade() _, image, _ = cephadm_module.check_mon_command({ 'prefix': 'config get', 'who': 'global', 'key': 'container_image', }) if use_repo_digest: assert image == 'to_image@repo_digest' else: assert image == 'to_image'
def with_cephadm_module(module_options=None, store=None): """ :param module_options: Set opts as if they were set before module.__init__ is called :param store: Set the store before module.__init__ is called """ with mock.patch("cephadm.module.CephadmOrchestrator.get_ceph_option", get_ceph_option),\ mock.patch("cephadm.services.osd.RemoveUtil._run_mon_cmd"), \ mock.patch("cephadm.module.CephadmOrchestrator.get_osdmap"), \ mock.patch("cephadm.module.CephadmOrchestrator.remote"): m = CephadmOrchestrator.__new__(CephadmOrchestrator) if module_options is not None: for k, v in module_options.items(): m._ceph_set_module_option('cephadm', k, v) if store is None: store = {} if '_ceph_get/mon_map' not in store: store['_ceph_get/mon_map'] = { 'modified': datetime.datetime.utcnow().strftime(CEPH_DATEFMT), 'fsid': 'foobar', } for k, v in store.items(): m._ceph_set_store(k, v) m.__init__('cephadm', 0, 0) m._cluster_fsid = "fsid" yield m
def with_cephadm_module(module_options=None, store=None): """ :param module_options: Set opts as if they were set before module.__init__ is called :param store: Set the store before module.__init__ is called """ with mock.patch("cephadm.module.CephadmOrchestrator.get_ceph_option", get_ceph_option),\ mock.patch("cephadm.services.osd.RemoveUtil._run_mon_cmd"), \ mock.patch("cephadm.module.CephadmOrchestrator.get_osdmap"), \ mock.patch("cephadm.services.osd.OSDService.get_osdspec_affinity", return_value='test_spec'), \ mock.patch("cephadm.module.CephadmOrchestrator.remote"): m = CephadmOrchestrator.__new__(CephadmOrchestrator) if module_options is not None: for k, v in module_options.items(): m._ceph_set_module_option('cephadm', k, v) if store is None: store = {} if '_ceph_get/mon_map' not in store: m.mock_store_set('_ceph_get', 'mon_map', { 'modified': datetime_to_str(datetime_now()), 'fsid': 'foobar', }) for k, v in store.items(): m._ceph_set_store(k, v) m.__init__('cephadm', 0, 0) m._cluster_fsid = "fsid" yield m
def with_service(cephadm_module: CephadmOrchestrator, spec: ServiceSpec, meth, host: str) -> Iterator[List[str]]: if spec.placement.is_empty(): spec.placement = PlacementSpec(hosts=[host], count=1) c = meth(cephadm_module, spec) assert wait(cephadm_module, c) == f'Scheduled {spec.service_name()} update...' specs = [d.spec for d in wait(cephadm_module, cephadm_module.describe_service())] assert spec in specs cephadm_module._apply_all_services() dds = wait(cephadm_module, cephadm_module.list_daemons()) own_dds = [dd for dd in dds if dd.service_name() == spec.service_name()] assert own_dds yield [dd.name() for dd in own_dds] assert_rm_service(cephadm_module, spec.service_name())
def test_migrate_service_id_mds_one(cephadm_module: CephadmOrchestrator): with with_host(cephadm_module, 'host1'): cephadm_module.set_store(SPEC_STORE_PREFIX + 'mds', json.dumps({ 'spec': { 'service_type': 'mds', 'placement': { 'hosts': ['host1'] } }, 'created': datetime_to_str(datetime_now()), }, sort_keys=True), ) cephadm_module.spec_store.load() # there is nothing to migrate, as the spec is gone now. assert len(cephadm_module.spec_store.all_specs) == 0
def cephadm_module(): with mock.patch("cephadm.module.CephadmOrchestrator.get_ceph_option", get_ceph_option),\ mock.patch("cephadm.module.CephadmOrchestrator._configure_logging", lambda *args: None),\ mock.patch("cephadm.module.CephadmOrchestrator.set_store", set_store),\ mock.patch("cephadm.module.CephadmOrchestrator.get_store", get_store),\ mock.patch("cephadm.module.CephadmOrchestrator.get_store_prefix", get_store_prefix): CephadmOrchestrator._register_commands('') m = CephadmOrchestrator.__new__(CephadmOrchestrator) m._root_logger = mock.MagicMock() m._store = { 'ssh_config': '', 'ssh_identity_key': '', 'ssh_identity_pub': '', 'inventory': {}, } m.__init__('cephadm', 0, 0) yield m
def test_migrate_admin_client_keyring(cephadm_module: CephadmOrchestrator): assert 'client.admin' not in cephadm_module.keys.keys cephadm_module.migration_current = 3 cephadm_module.migration.migrate() assert cephadm_module.migration_current == LAST_MIGRATION assert cephadm_module.keys.keys['client.admin'].placement.label == '_admin'
def test_not_enough_mgrs(cephadm_module: CephadmOrchestrator): with with_host(cephadm_module, 'host1'): with with_service(cephadm_module, ServiceSpec('mgr', placement=PlacementSpec(count=1)), CephadmOrchestrator.apply_mgr, ''): with pytest.raises(OrchestratorError): wait(cephadm_module, cephadm_module.upgrade_start('image_id', None))
def test_upgrade_start(cephadm_module: CephadmOrchestrator): with with_host(cephadm_module, 'test'): with with_host(cephadm_module, 'test2'): with with_service(cephadm_module, ServiceSpec('mgr', placement=PlacementSpec(count=2))): assert wait(cephadm_module, cephadm_module.upgrade_start( 'image_id', None)) == 'Initiating upgrade to docker.io/image_id' assert wait(cephadm_module, cephadm_module.upgrade_status() ).target_image == 'docker.io/image_id' assert wait(cephadm_module, cephadm_module.upgrade_pause() ) == 'Paused upgrade to docker.io/image_id' assert wait(cephadm_module, cephadm_module.upgrade_resume() ) == 'Resumed upgrade to docker.io/image_id' assert wait(cephadm_module, cephadm_module.upgrade_stop() ) == 'Stopped upgrade to docker.io/image_id'
def cephadm_module(): with mock.patch("cephadm.module.CephadmOrchestrator.get_ceph_option", get_ceph_option),\ mock.patch("cephadm.module.CephadmOrchestrator.remote"),\ mock.patch("cephadm.module.CephadmOrchestrator.send_command"), \ mock.patch("cephadm.module.CephadmOrchestrator.mon_command", mon_command): m = CephadmOrchestrator.__new__(CephadmOrchestrator) m.__init__('cephadm', 0, 0) m._cluster_fsid = "fsid" yield m
def test_migrate_scheduler(cephadm_module: CephadmOrchestrator): with with_host(cephadm_module, 'host1'): with with_host(cephadm_module, 'host2'): # emulate the old scheduler: c = cephadm_module.apply_rgw( ServiceSpec('rgw', 'r.z', placement=PlacementSpec(host_pattern='*', count=2))) assert wait(cephadm_module, c) == 'Scheduled rgw.r.z update...' cephadm_module._apply_all_services() out = { o.hostname for o in wait(cephadm_module, cephadm_module.list_daemons()) } assert out == {'host1', 'host2'} c = cephadm_module.apply_rgw( ServiceSpec('rgw', 'r.z', placement=PlacementSpec(host_pattern='host1', count=2))) assert wait(cephadm_module, c) == 'Scheduled rgw.r.z update...' cephadm_module.migration_current = 0 cephadm_module.migration.migrate() # assert we need all daemons. assert cephadm_module.migration_current == 0 # Sorry, for this hack, but I need to make sure, Migration thinks, # we have updated all daemons already. cephadm_module.cache.last_daemon_update['host1'] = datetime.now() cephadm_module.cache.last_daemon_update['host2'] = datetime.now() cephadm_module.migration.migrate() assert cephadm_module.migration_current == 2 out = [ o.spec.placement for o in wait( cephadm_module, cephadm_module.describe_service()) ] assert out == [ PlacementSpec(count=2, hosts=[ HostPlacementSpec(hostname='host1', network='', name=''), HostPlacementSpec(hostname='host2', network='', name='') ]) ]
def with_cephadm_module(module_options=None, store=None): """ :param module_options: Set opts as if they were set before module.__init__ is called :param store: Set the store before module.__init__ is called """ with mock.patch("cephadm.module.CephadmOrchestrator.get_ceph_option", get_ceph_option),\ mock.patch("cephadm.services.osd.RemoveUtil._run_mon_cmd"), \ mock.patch('cephadm.module.CephadmOrchestrator.get_module_option_ex', get_module_option_ex),\ mock.patch("cephadm.module.CephadmOrchestrator.get_osdmap"), \ mock.patch("cephadm.module.CephadmOrchestrator.remote"), \ mock.patch("cephadm.agent.CephadmAgentHelpers._request_agent_acks"), \ mock.patch("cephadm.agent.CephadmAgentHelpers._apply_agent", return_value=False), \ mock.patch("cephadm.agent.CephadmAgentHelpers._agent_down", return_value=False), \ mock.patch('cephadm.agent.CherryPyThread.run'), \ mock.patch('cephadm.offline_watcher.OfflineHostWatcher.run'), \ mock.patch('cephadm.tuned_profiles.TunedProfileUtils._remove_stray_tuned_profiles'): m = CephadmOrchestrator.__new__(CephadmOrchestrator) if module_options is not None: for k, v in module_options.items(): m._ceph_set_module_option('cephadm', k, v) if store is None: store = {} if '_ceph_get/mon_map' not in store: m.mock_store_set('_ceph_get', 'mon_map', { 'modified': datetime_to_str(datetime_now()), 'fsid': 'foobar', }) if '_ceph_get/mgr_map' not in store: m.mock_store_set( '_ceph_get', 'mgr_map', { 'services': { 'dashboard': 'http://[::1]:8080', 'prometheus': 'http://[::1]:8081' }, 'modules': ['dashboard', 'prometheus'], }) for k, v in store.items(): m._ceph_set_store(k, v) m.__init__('cephadm', 0, 0) m._cluster_fsid = "fsid" m.event_loop = MockEventLoopThread() m.tkey = NamedTemporaryFile(prefix='test-cephadm-identity-') yield m
def receive_agent_metadata(m: CephadmOrchestrator, host: str, ops: List[str] = None) -> None: to_update: Dict[str, Callable[[str, Any], None]] = { 'ls': m._process_ls_output, 'gather-facts': m.cache.update_host_facts, 'list-networks': m.cache.update_host_networks, } if ops: for op in ops: out = m.wait_async( CephadmServe(m)._run_cephadm_json(host, cephadmNoImage, op, [])) to_update[op](host, out) m.cache.last_daemon_update[host] = datetime_now() m.cache.last_facts_update[host] = datetime_now() m.cache.last_network_update[host] = datetime_now() m.cache.metadata_up_to_date[host] = True
def test_migrate_scheduler(cephadm_module: CephadmOrchestrator): with with_host(cephadm_module, 'host1', refresh_hosts=False): with with_host(cephadm_module, 'host2', refresh_hosts=False): # emulate the old scheduler: c = cephadm_module.apply_rgw( ServiceSpec('rgw', 'r.z', placement=PlacementSpec(host_pattern='*', count=2)) ) assert wait(cephadm_module, c) == 'Scheduled rgw.r.z update...' # with pytest.raises(OrchestratorError, match="cephadm migration still ongoing. Please wait, until the migration is complete."): CephadmServe(cephadm_module)._apply_all_services() cephadm_module.migration_current = 0 cephadm_module.migration.migrate() # assert we need all daemons. assert cephadm_module.migration_current == 0 CephadmServe(cephadm_module)._refresh_hosts_and_daemons() receive_agent_metadata_all_hosts(cephadm_module) cephadm_module.migration.migrate() CephadmServe(cephadm_module)._apply_all_services() out = {o.hostname for o in wait(cephadm_module, cephadm_module.list_daemons())} assert out == {'host1', 'host2'} c = cephadm_module.apply_rgw( ServiceSpec('rgw', 'r.z', placement=PlacementSpec(host_pattern='host1', count=2)) ) assert wait(cephadm_module, c) == 'Scheduled rgw.r.z update...' # Sorry, for this hack, but I need to make sure, Migration thinks, # we have updated all daemons already. cephadm_module.cache.last_daemon_update['host1'] = datetime_now() cephadm_module.cache.last_daemon_update['host2'] = datetime_now() cephadm_module.migration_current = 0 cephadm_module.migration.migrate() assert cephadm_module.migration_current >= 2 out = [o.spec.placement for o in wait( cephadm_module, cephadm_module.describe_service())] assert out == [PlacementSpec(count=2, hosts=[HostPlacementSpec( hostname='host1', network='', name=''), HostPlacementSpec(hostname='host2', network='', name='')])]
def with_cephadm_module(module_options=None, store=None): """ :param module_options: Set opts as if they were set before module.__init__ is called :param store: Set the store before module.__init__ is called """ with mock.patch("cephadm.module.CephadmOrchestrator.get_ceph_option", get_ceph_option),\ mock.patch("cephadm.services.osd.RemoveUtil._run_mon_cmd"), \ mock.patch("cephadm.module.CephadmOrchestrator.get_osdmap"), \ mock.patch("cephadm.services.osd.OSDService.get_osdspec_affinity", return_value='test_spec'), \ mock.patch("cephadm.module.CephadmOrchestrator.remote"), \ mock.patch("cephadm.agent.CephadmAgentHelpers._request_agent_acks"), \ mock.patch("cephadm.agent.CephadmAgentHelpers._apply_agent", return_value=False), \ mock.patch("cephadm.agent.CephadmAgentHelpers._agent_down", return_value=False), \ mock.patch('cephadm.agent.CherryPyThread.run'): m = CephadmOrchestrator.__new__(CephadmOrchestrator) if module_options is not None: for k, v in module_options.items(): m._ceph_set_module_option('cephadm', k, v) if store is None: store = {} if '_ceph_get/mon_map' not in store: m.mock_store_set('_ceph_get', 'mon_map', { 'modified': datetime_to_str(datetime_now()), 'fsid': 'foobar', }) for k, v in store.items(): m._ceph_set_store(k, v) m.__init__('cephadm', 0, 0) m._cluster_fsid = "fsid" m.event_loop = MockEventLoopThread() m.tkey = NamedTemporaryFile(prefix='test-cephadm-identity-') yield m
def test_upgrade_run(cephadm_module: CephadmOrchestrator): with with_host(cephadm_module, 'test'): cephadm_module.check_mon_command({ 'prefix': 'config set', 'name': 'container_image', 'value': 'from_image', 'who': 'global', }) with with_service(cephadm_module, ServiceSpec('mgr'), CephadmOrchestrator.apply_mgr, 'test'): assert wait(cephadm_module, cephadm_module.upgrade_start( 'to_image', None)) == 'Initiating upgrade to to_image' assert wait( cephadm_module, cephadm_module.upgrade_status()).target_image == 'to_image' def _versions_mock(cmd): return json.dumps({'mgr': {'myversion': 1}}) cephadm_module._mon_command_mock_versions = _versions_mock cephadm_module.upgrade._do_upgrade() _, image, _ = cephadm_module.check_mon_command({ 'prefix': 'config get', 'who': 'global', 'key': 'container_image', }) assert image == 'to_image'
def test_upgrade_run(use_repo_digest, cephadm_module: CephadmOrchestrator): with with_host(cephadm_module, 'host1'): with with_host(cephadm_module, 'host2'): cephadm_module.set_container_image('global', 'from_image') cephadm_module.use_repo_digest = use_repo_digest with with_service(cephadm_module, ServiceSpec('mgr', placement=PlacementSpec(host_pattern='*', count=2)), CephadmOrchestrator.apply_mgr, '', status_running=True),\ mock.patch("cephadm.module.CephadmOrchestrator.lookup_release_name", return_value='foo'),\ mock.patch("cephadm.module.CephadmOrchestrator.version", new_callable=mock.PropertyMock) as version_mock,\ mock.patch("cephadm.module.CephadmOrchestrator.get", return_value={ # capture fields in both mon and osd maps "require_osd_release": "pacific", "min_mon_release": 16, }): version_mock.return_value = 'ceph version 18.2.1 (somehash)' assert wait( cephadm_module, cephadm_module.upgrade_start( 'to_image', None)) == 'Initiating upgrade to docker.io/to_image' assert wait(cephadm_module, cephadm_module.upgrade_status() ).target_image == 'docker.io/to_image' def _versions_mock(cmd): return json.dumps( {'mgr': { 'ceph version 1.2.3 (asdf) blah': 1 }}) cephadm_module._mon_command_mock_versions = _versions_mock with mock.patch( "cephadm.serve.CephadmServe._run_cephadm", _run_cephadm( json.dumps({ 'image_id': 'image_id', 'repo_digests': ['to_image@repo_digest'], 'ceph_version': 'ceph version 18.2.3 (hash)', }))): cephadm_module.upgrade._do_upgrade() assert cephadm_module.upgrade_status is not None with mock.patch( "cephadm.serve.CephadmServe._run_cephadm", _run_cephadm( json.dumps([ dict( name=list(cephadm_module.cache. daemons['host1'].keys())[0], style='cephadm', fsid='fsid', container_id='container_id', container_image_id='image_id', container_image_digests=[ 'to_image@repo_digest' ], deployed_by=['to_image@repo_digest'], version='version', state='running', ) ]))): receive_agent_metadata(cephadm_module, 'host1', ['ls']) receive_agent_metadata(cephadm_module, 'host2', ['ls']) with mock.patch( "cephadm.serve.CephadmServe._run_cephadm", _run_cephadm( json.dumps({ 'image_id': 'image_id', 'repo_digests': ['to_image@repo_digest'], 'ceph_version': 'ceph version 18.2.3 (hash)', }))): cephadm_module.upgrade._do_upgrade() _, image, _ = cephadm_module.check_mon_command({ 'prefix': 'config get', 'who': 'global', 'key': 'container_image', }) if use_repo_digest: assert image == 'to_image@repo_digest' else: assert image == 'docker.io/to_image'
def test_upgrade_state_null(cephadm_module: CephadmOrchestrator): # This test validates https://tracker.ceph.com/issues/47580 cephadm_module.set_store('upgrade_state', 'null') CephadmUpgrade(cephadm_module) assert CephadmUpgrade(cephadm_module).upgrade_state is None
def with_host(m: CephadmOrchestrator, name): # type: (CephadmOrchestrator, str) -> None wait(m, m.add_host(HostSpec(hostname=name))) yield wait(m, m.remove_host(name))
def test_can_run(self, cephadm_module: CephadmOrchestrator): assert cephadm_module.can_run() == ( False, "loading asyncssh library:No module named 'asyncssh'")