Ejemplo n.º 1
0
 def post_remove(self, daemon: DaemonDescription, is_failed_deploy: bool) -> None:
     super().post_remove(daemon, is_failed_deploy=is_failed_deploy)
     self.mgr.check_mon_command({
         'prefix': 'config rm',
         'who': utils.name_to_config_section(daemon.name()),
         'name': 'rgw_frontends',
     })
Ejemplo n.º 2
0
 def to_daemon_description(self, status: DaemonDescriptionStatus,
                           status_desc: str) -> DaemonDescription:
     return DaemonDescription(daemon_type=self.daemon_type,
                              daemon_id=self.daemon_id,
                              hostname=self.host,
                              status=status,
                              status_desc=status_desc)
Ejemplo n.º 3
0
 def test_get_unique_name(self, cephadm_module):
     # type: (CephadmOrchestrator) -> None
     existing = [DaemonDescription(daemon_type='mon', daemon_id='a')]
     new_mon = cephadm_module.get_unique_name('mon', 'myhost', existing)
     match_glob(new_mon, 'myhost')
     new_mgr = cephadm_module.get_unique_name('mgr', 'myhost', existing)
     match_glob(new_mgr, 'myhost.*')
Ejemplo n.º 4
0
 def get_active_daemon(
         self, daemon_descrs: List[DaemonDescription]) -> DaemonDescription:
     # TODO: if there are multiple daemons, who is the active one?
     if daemon_descrs:
         return daemon_descrs[0]
     # if empty list provided, return empty Daemon Desc
     return DaemonDescription()
Ejemplo n.º 5
0
 def get_active_daemon(
         self, daemon_descrs: List[DaemonDescription]) -> DaemonDescription:
     # Use the least-created one as the active daemon
     if daemon_descrs:
         return daemon_descrs[-1]
     # if empty list provided, return empty Daemon Desc
     return DaemonDescription()
Ejemplo n.º 6
0
 def get_active_daemon(
         self, daemon_descrs: List[DaemonDescription]) -> DaemonDescription:
     for daemon in daemon_descrs:
         if self.mgr.daemon_is_self(daemon.daemon_type, daemon.daemon_id):
             return daemon
     # if no active mgr found, return empty Daemon Desc
     return DaemonDescription()
Ejemplo n.º 7
0
    def test_scale_up(self, _apply_mds, _describe_service, _list_daemons, _get,
                      mds_autoscaler_module: MDSAutoscaler):
        daemons = Completion(value=[
            DaemonDescription(hostname='myhost',
                              daemon_type='mds',
                              daemon_id='fs_name.myhost.a'),
            DaemonDescription(hostname='myhost',
                              daemon_type='mds',
                              daemon_id='fs_name.myhost.b'),
        ])
        daemons.finalize()
        _list_daemons.return_value = daemons

        services = Completion(value=[
            ServiceDescription(spec=ServiceSpec(service_type='mds',
                                                service_id='fs_name',
                                                placement=PlacementSpec(
                                                    count=2)))
        ])
        services.finalize()
        _describe_service.return_value = services

        apply = Completion(value='')
        apply.finalize()
        _apply_mds.return_value = apply

        _get.return_value = {
            'filesystems': [{
                'mdsmap': {
                    'fs_name': 'fs_name',
                    'in': [{
                        'name': 'mds.fs_name.myhost.a',
                    }],
                    'standby_count_wanted': 2,
                }
            }],
            'standbys': [{
                'name': 'mds.fs_name.myhost.b',
            }],
        }
        mds_autoscaler_module.notify('fs_map', None)

        _apply_mds.assert_called_with(
            ServiceSpec(service_type='mds',
                        service_id='fs_name',
                        placement=PlacementSpec(count=3)))
Ejemplo n.º 8
0
 def _get_nfs_instances(service_name=None):
     if not enable:
         return []
     instances = {
         'nfs.foo': [
             DaemonDescription(daemon_id='foo.host_a', status=1),
             DaemonDescription(daemon_id='foo.host_b', status=1)
         ],
         'nfs.bar':
         [DaemonDescription(daemon_id='bar.host_c', status=1)]
     }
     if service_name is not None:
         return instances[service_name]
     result = []
     for _, daemons in instances.items():
         result.extend(daemons)
     return result
Ejemplo n.º 9
0
 def get_active_daemon(
         self, daemon_descrs: List[DaemonDescription]) -> DaemonDescription:
     active_mgr_str = self.mgr.get('mgr_map')['active_name']
     for daemon in daemon_descrs:
         if daemon.daemon_id == active_mgr_str:
             return daemon
     # if no active mgr found, return empty Daemon Desc
     return DaemonDescription()
Ejemplo n.º 10
0
def test_daemon_description():
    json_data = {
        'hostname': 'test',
        'daemon_type': 'mon',
        'daemon_id': 'a',
        'status': -1,
    }
    _test_resource(json_data, DaemonDescription, {'abc': False})

    dd = DaemonDescription.from_json(json_data)
    assert dd.status.value == DaemonDescriptionStatus.error.value
Ejemplo n.º 11
0
 def from_daemon_description(dd: DaemonDescription) -> 'CephadmDaemonDeploySpec':
     assert dd.hostname
     assert dd.daemon_id
     assert dd.daemon_type
     return CephadmDaemonDeploySpec(
         host=dd.hostname,
         daemon_id=dd.daemon_id,
         daemon_type=dd.daemon_type,
         service_name=dd.service_name(),
         ip=dd.ip,
         ports=dd.ports,
     )
Ejemplo n.º 12
0
def test_scheduler_daemons(host_key, hosts, explicit_key, explicit, count,
                           daemons_key, daemons, spec_section_key,
                           spec_section):
    mk_spec, hosts = mk_spec_and_host(spec_section, hosts, explicit_key,
                                      explicit, count)
    dds = [DaemonDescription('mon', d, d) for d in daemons]
    run_scheduler_test(results=test_scheduler_daemons_results,
                       mk_spec=mk_spec,
                       hosts=hosts,
                       get_daemons_func=lambda _: dds,
                       key_elems=(host_key, explicit_key, count, daemons_key,
                                  spec_section_key))
Ejemplo n.º 13
0
 def to_daemon_description(self, status: DaemonDescriptionStatus, status_desc: str) -> DaemonDescription:
     return DaemonDescription(
         daemon_type=self.daemon_type,
         daemon_id=self.daemon_id,
         service_name=self.service_name,
         hostname=self.host,
         status=status,
         status_desc=status_desc,
         ip=self.ip,
         ports=self.ports,
         rank=self.rank,
         rank_generation=self.rank_generation,
     )
Ejemplo n.º 14
0
 def to_dds(ts: List[Tuple[str, str]],
            upgraded: bool) -> List[DaemonDescription]:
     dds = []
     digest = 'new_image@repo_digest' if upgraded else 'old_image@repo_digest'
     for t in ts:
         dds.append(
             DaemonDescription(
                 daemon_type=t[0],
                 hostname=t[1],
                 daemon_id=t[2],
                 container_image_digests=[digest],
                 deployed_by=[digest],
             ))
     return dds
Ejemplo n.º 15
0
 def get_active_daemon(self, daemon_descrs: List[DaemonDescription]) -> DaemonDescription:
     active_mds_strs = list()
     for fs in self.mgr.get('fs_map')['filesystems']:
         mds_map = fs['mdsmap']
         if mds_map is not None:
             for mds_id, mds_status in mds_map['info'].items():
                 if mds_status['state'] == 'up:active':
                     active_mds_strs.append(mds_status['name'])
     if len(active_mds_strs) != 0:
         for daemon in daemon_descrs:
             if daemon.daemon_id in active_mds_strs:
                 return daemon
     # if no mds found, return empty Daemon Desc
     return DaemonDescription()
Ejemplo n.º 16
0
def test_enough_mds_for_ok_to_stop(get, get_daemons_by_service,
                                   cephadm_module: CephadmOrchestrator):
    get.side_effect = [{
        'filesystems': [{
            'mdsmap': {
                'fs_name': 'test',
                'max_mds': 1
            }
        }]
    }]
    get_daemons_by_service.side_effect = [[DaemonDescription()]]
    assert not cephadm_module.upgrade._enough_mds_for_ok_to_stop(
        DaemonDescription(daemon_type='mds',
                          daemon_id='test.host1.gfknd',
                          service_name='mds.test'))

    get.side_effect = [{
        'filesystems': [{
            'mdsmap': {
                'fs_name': 'myfs.test',
                'max_mds': 2
            }
        }]
    }]
    get_daemons_by_service.side_effect = [[
        DaemonDescription(), DaemonDescription()
    ]]
    assert not cephadm_module.upgrade._enough_mds_for_ok_to_stop(
        DaemonDescription(daemon_type='mds',
                          daemon_id='myfs.test.host1.gfknd',
                          service_name='mds.myfs.test'))

    get.side_effect = [{
        'filesystems': [{
            'mdsmap': {
                'fs_name': 'myfs.test',
                'max_mds': 1
            }
        }]
    }]
    get_daemons_by_service.side_effect = [[
        DaemonDescription(), DaemonDescription()
    ]]
    assert cephadm_module.upgrade._enough_mds_for_ok_to_stop(
        DaemonDescription(daemon_type='mds',
                          daemon_id='myfs.test.host1.gfknd',
                          service_name='mds.myfs.test'))
Ejemplo n.º 17
0
 def from_daemon_description(dd: DaemonDescription) -> 'CephadmDaemonDeploySpec':
     assert dd.hostname
     assert dd.daemon_id
     assert dd.daemon_type
     return CephadmDaemonDeploySpec(
         host=dd.hostname,
         daemon_id=dd.daemon_id,
         daemon_type=dd.daemon_type,
         service_name=dd.service_name(),
         ip=dd.ip,
         ports=dd.ports,
         rank=dd.rank,
         rank_generation=dd.rank_generation,
         extra_container_args=dd.extra_container_args,
     )
Ejemplo n.º 18
0
    def _enough_mds_for_ok_to_stop(self, mds_daemon: DaemonDescription) -> bool:
        # type (DaemonDescription) -> bool

        # find fs this mds daemon belongs to
        fsmap = self.mgr.get("fs_map")
        for fs in fsmap.get('filesystems', []):
            mdsmap = fs["mdsmap"]
            fs_name = mdsmap["fs_name"]

            assert mds_daemon.daemon_id
            if fs_name != mds_daemon.service_name().split('.', 1)[1]:
                # wrong fs for this mds daemon
                continue

            # get number of mds daemons for this fs
            mds_count = len(
                [daemon for daemon in self.mgr.cache.get_daemons_by_service(mds_daemon.service_name())])

            # standby mds daemons for this fs?
            if mdsmap["max_mds"] < mds_count:
                return True
            return False

        return True  # if mds has no fs it should pass ok-to-stop
Ejemplo n.º 19
0
def test_dd_octopus(dd_json):
    # https://tracker.ceph.com/issues/44934
    # Those are real user data from early octopus.
    # Please do not modify those JSON values.

    # Convert datetime properties to old style.
    # 2020-04-03T07:29:16.926292Z -> 2020-04-03T07:29:16.926292
    def convert_to_old_style_json(j):
        for k in ['last_refresh', 'created', 'started', 'last_deployed',
                  'last_configured']:
            if k in j:
                j[k] = j[k].rstrip('Z')
        return j

    assert dd_json == convert_to_old_style_json(
        DaemonDescription.from_json(dd_json).to_json())
Ejemplo n.º 20
0
    def generate_config(self, daemon_spec: CephadmDaemonSpec) -> Tuple[Dict[str, Any], List[str]]:
        assert self.TYPE == daemon_spec.daemon_type

        daemon_type = daemon_spec.daemon_type
        daemon_id = daemon_spec.daemon_id
        host = daemon_spec.host

        deps: List[str] = []

        # find the matching NFSServiceSpec
        # TODO: find the spec and pass via _create_daemon instead ??
        dd = DaemonDescription()
        dd.daemon_type = daemon_type
        dd.daemon_id = daemon_id
        dd.hostname = host

        service_name = dd.service_name()
        specs = self.mgr.spec_store.find(service_name)

        if not specs:
            raise OrchestratorError('Cannot find service spec %s' % (service_name))
        elif len(specs) > 1:
            raise OrchestratorError('Found multiple service specs for %s' % (service_name))
        else:
            # cast to keep mypy happy
            spec = cast(NFSServiceSpec, specs[0])

        nfs = NFSGanesha(self.mgr, daemon_id, spec)

        # create the keyring
        entity = nfs.get_keyring_entity()
        keyring = nfs.get_or_create_keyring(entity=entity)

        # update the caps after get-or-create, the keyring might already exist!
        nfs.update_keyring_caps(entity=entity)

        # create the rados config object
        nfs.create_rados_config_obj()

        # generate the cephadm config
        cephadm_config = nfs.get_cephadm_config()
        cephadm_config.update(
            self.mgr._get_config_and_keyring(
                daemon_type, daemon_id,
                keyring=keyring,
                host=host
            )
        )

        return cephadm_config, deps
Ejemplo n.º 21
0
 def test_get_unique_name(self, cephadm_module):
     existing = [
         DaemonDescription(daemon_type='mon', daemon_id='a')
     ]
     new_mon = cephadm_module.get_unique_name('myhost', existing, 'mon')
     match_glob(new_mon, 'mon.myhost.*')
Ejemplo n.º 22
0
    expected: List[str]


@pytest.mark.parametrize(
    "service_type,placement,hosts,daemons,expected",
    [
        # just hosts
        NodeAssignmentTest(
            'mon',
            PlacementSpec(hosts=[
                'smithi060:[v2:172.21.15.60:3301,v1:172.21.15.60:6790]=c'
            ]), ['smithi060'], [], ['smithi060']),
        # all_hosts
        NodeAssignmentTest('mon', PlacementSpec(host_pattern='*'),
                           'host1 host2 host3'.split(), [
                               DaemonDescription('mon', 'a', 'host1'),
                               DaemonDescription('mon', 'b', 'host2'),
                           ], ['host1', 'host2', 'host3']),
        # count that is bigger than the amount of hosts. Truncate to len(hosts)
        # RGWs should not be co-located to each other.
        NodeAssignmentTest('rgw', PlacementSpec(count=4),
                           'host1 host2 host3'.split(), [],
                           ['host1', 'host2', 'host3']),
        # count + partial host list
        NodeAssignmentTest('mon', PlacementSpec(count=3, hosts=['host3']),
                           'host1 host2 host3'.split(), [
                               DaemonDescription('mon', 'a', 'host1'),
                               DaemonDescription('mon', 'b', 'host2'),
                           ], ['host3']),
        # count 1 + partial host list
        NodeAssignmentTest('mon', PlacementSpec(count=1, hosts=['host3']),
Ejemplo n.º 23
0
        ),
        (
            DaemonPlacement(
                daemon_type='mgr', hostname='host1', ports=[80, 90]),
            2,
            DaemonPlacement(
                daemon_type='mgr', hostname='host1', ports=[82, 92]),
        ),
    ])
def test_daemon_placement_renumber(dp, n, result):
    assert dp.renumber_ports(n) == result


@pytest.mark.parametrize('dp,dd,result', [
    (DaemonPlacement(daemon_type='mgr', hostname='host1'),
     DaemonDescription('mgr', 'a', 'host1'), True),
    (DaemonPlacement(daemon_type='mgr', hostname='host1',
                     name='a'), DaemonDescription('mgr', 'a', 'host1'), True),
    (DaemonPlacement(daemon_type='mon', hostname='host1',
                     name='a'), DaemonDescription('mgr', 'a', 'host1'), False),
    (DaemonPlacement(daemon_type='mgr', hostname='host1',
                     name='a'), DaemonDescription('mgr', 'b', 'host1'), False),
])
def test_daemon_placement_match(dp, dd, result):
    assert dp.matches_daemon(dd) == result


@pytest.mark.parametrize(
    "spec_section_key,spec_section",
    [  # noqa: E128
        ('h', 'hosts'),
Ejemplo n.º 24
0
    expected: List[str]


@pytest.mark.parametrize(
    "service_type,placement,hosts,daemons,expected",
    [
        # just hosts
        NodeAssignmentTest(
            'mon',
            PlacementSpec(hosts=[
                'smithi060:[v2:172.21.15.60:3301,v1:172.21.15.60:6790]=c'
            ]), ['smithi060'], [], ['smithi060']),
        # all_hosts
        NodeAssignmentTest('mon', PlacementSpec(all_hosts=True),
                           'host1 host2 host3'.split(), [
                               DaemonDescription('mon', 'a', 'host1'),
                               DaemonDescription('mon', 'b', 'host2'),
                           ], ['host1', 'host2', 'host3']),
        # count + partial host list
        NodeAssignmentTest('mon', PlacementSpec(count=3, hosts=['host3']),
                           'host1 host2 host3'.split(), [
                               DaemonDescription('mon', 'a', 'host1'),
                               DaemonDescription('mon', 'b', 'host2'),
                           ], ['host1', 'host2', 'host3']),
        # count + partial host list + existing
        NodeAssignmentTest('mon', PlacementSpec(count=2, hosts=['host3']),
                           'host1 host2 host3'.split(), [
                               DaemonDescription('mon', 'a', 'host1'),
                           ], ['host1', 'host3']),
        # label only
        NodeAssignmentTest('mon', PlacementSpec(label='foo'),
Ejemplo n.º 25
0
    # Please do not modify those JSON values.
    assert dd_json == DaemonDescription.from_json(dd_json).to_json()


@pytest.mark.parametrize("spec,dd,valid",
[
    # https://tracker.ceph.com/issues/44934
    (
        RGWSpec(
            rgw_realm="default-rgw-realm",
            rgw_zone="eu-central-1",
            subcluster='1',
        ),
        DaemonDescription(
            daemon_type='rgw',
            daemon_id="default-rgw-realm.eu-central-1.1.ceph-001.ytywjo",
            hostname="ceph-001",
        ),
        True
    ),
    (
        # no subcluster
        RGWSpec(
            rgw_realm="default-rgw-realm",
            rgw_zone="eu-central-1",
        ),
        DaemonDescription(
            daemon_type='rgw',
            daemon_id="default-rgw-realm.eu-central-1.ceph-001.ytywjo",
            hostname="ceph-001",
        ),
Ejemplo n.º 26
0
def test_spec_octopus():
    # https://tracker.ceph.com/issues/44934
    # Those are real user data from early octopus.
    # Please do not modify those JSON values.
    specs_text = """[
{
  "placement": {
    "count": 1
  },
  "service_type": "alertmanager"
},
{
  "placement": {
    "host_pattern": "*"
  },
  "service_type": "crash"
},
{
  "placement": {
    "count": 1
  },
  "service_type": "grafana"
},
{
  "placement": {
    "count": 2
  },
  "service_type": "mgr"
},
{
  "placement": {
    "count": 5
  },
  "service_type": "mon"
},
{
  "placement": {
    "host_pattern": "*"
  },
  "service_type": "node-exporter"
},
{
  "placement": {
    "count": 1
  },
  "service_type": "prometheus"
},
{
  "placement": {
    "hosts": [
      {
        "hostname": "ceph-001",
        "network": "",
        "name": ""
      }
    ]
  },
  "service_type": "rgw",
  "service_id": "default-rgw-realm.eu-central-1.1",
  "rgw_realm": "default-rgw-realm",
  "rgw_zone": "eu-central-1",
  "subcluster": "1"
}
]
"""
    dds_text = """[
    {
        "hostname": "ceph-001",
        "container_id": "d94d7969094d",
        "container_image_id": "0881eb8f169f5556a292b4e2c01d683172b12830a62a9225a98a8e206bb734f0",
        "container_image_name": "docker.io/prom/alertmanager:latest",
        "daemon_id": "ceph-001",
        "daemon_type": "alertmanager",
        "version": "0.20.0",
        "status": 1,
        "status_desc": "running",
        "last_refresh": "2020-04-03T15:31:48.725856",
        "created": "2020-04-02T19:23:08.829543",
        "started": "2020-04-03T07:29:16.932838" 
    },
    {
        "hostname": "ceph-001",
        "container_id": "c4b036202241",
        "container_image_id": "204a01f9b0b6710dd0c0af7f37ce7139c47ff0f0105d778d7104c69282dfbbf1",
        "container_image_name": "docker.io/ceph/ceph:v15",
        "daemon_id": "ceph-001",
        "daemon_type": "crash",
        "version": "15.2.0",
        "status": 1,
        "status_desc": "running",
        "last_refresh": "2020-04-03T15:31:48.725903",
        "created": "2020-04-02T19:23:11.390694",
        "started": "2020-04-03T07:29:16.910897" 
    },
    {
        "hostname": "ceph-001",
        "container_id": "5b7b94b48f31",
        "container_image_id": "87a51ecf0b1c9a7b187b21c1b071425dafea0d765a96d5bc371c791169b3d7f4",
        "container_image_name": "docker.io/ceph/ceph-grafana:latest",
        "daemon_id": "ceph-001",
        "daemon_type": "grafana",
        "version": "6.6.2",
        "status": 1,
        "status_desc": "running",
        "last_refresh": "2020-04-03T15:31:48.725950",
        "created": "2020-04-02T19:23:52.025088",
        "started": "2020-04-03T07:29:16.847972" 
    },
    {
        "hostname": "ceph-001",
        "container_id": "9ca007280456",
        "container_image_id": "204a01f9b0b6710dd0c0af7f37ce7139c47ff0f0105d778d7104c69282dfbbf1",
        "container_image_name": "docker.io/ceph/ceph:v15",
        "daemon_id": "ceph-001.gkjwqp",
        "daemon_type": "mgr",
        "version": "15.2.0",
        "status": 1,
        "status_desc": "running",
        "last_refresh": "2020-04-03T15:31:48.725807",
        "created": "2020-04-02T19:22:18.648584",
        "started": "2020-04-03T07:29:16.856153" 
    },
    {
        "hostname": "ceph-001",
        "container_id": "3d1ba9a2b697",
        "container_image_id": "204a01f9b0b6710dd0c0af7f37ce7139c47ff0f0105d778d7104c69282dfbbf1",
        "container_image_name": "docker.io/ceph/ceph:v15",
        "daemon_id": "ceph-001",
        "daemon_type": "mon",
        "version": "15.2.0",
        "status": 1,
        "status_desc": "running",
        "last_refresh": "2020-04-03T15:31:48.725715",
        "created": "2020-04-02T19:22:13.863300",
        "started": "2020-04-03T07:29:17.206024" 
    },
    {
        "hostname": "ceph-001",
        "container_id": "36d026c68ba1",
        "container_image_id": "e5a616e4b9cf68dfcad7782b78e118be4310022e874d52da85c55923fb615f87",
        "container_image_name": "docker.io/prom/node-exporter:latest",
        "daemon_id": "ceph-001",
        "daemon_type": "node-exporter",
        "version": "0.18.1",
        "status": 1,
        "status_desc": "running",
        "last_refresh": "2020-04-03T15:31:48.725996",
        "created": "2020-04-02T19:23:53.880197",
        "started": "2020-04-03T07:29:16.880044" 
    },
    {
        "hostname": "ceph-001",
        "container_id": "faf76193cbfe",
        "container_image_id": "204a01f9b0b6710dd0c0af7f37ce7139c47ff0f0105d778d7104c69282dfbbf1",
        "container_image_name": "docker.io/ceph/ceph:v15",
        "daemon_id": "0",
        "daemon_type": "osd",
        "version": "15.2.0",
        "status": 1,
        "status_desc": "running",
        "last_refresh": "2020-04-03T15:31:48.726088",
        "created": "2020-04-02T20:35:02.991435",
        "started": "2020-04-03T07:29:19.373956" 
    },
    {
        "hostname": "ceph-001",
        "container_id": "f82505bae0f1",
        "container_image_id": "204a01f9b0b6710dd0c0af7f37ce7139c47ff0f0105d778d7104c69282dfbbf1",
        "container_image_name": "docker.io/ceph/ceph:v15",
        "daemon_id": "1",
        "daemon_type": "osd",
        "version": "15.2.0",
        "status": 1,
        "status_desc": "running",
        "last_refresh": "2020-04-03T15:31:48.726134",
        "created": "2020-04-02T20:35:17.142272",
        "started": "2020-04-03T07:29:19.374002" 
    },
    {
        "hostname": "ceph-001",
        "container_id": "2708d84cd484",
        "container_image_id": "358a0d2395fe711bb8258e8fb4b2d7865c0a9a6463969bcd1452ee8869ea6653",
        "container_image_name": "docker.io/prom/prometheus:latest",
        "daemon_id": "ceph-001",
        "daemon_type": "prometheus",
        "version": "2.17.1",
        "status": 1,
        "status_desc": "running",
        "last_refresh": "2020-04-03T15:31:48.726042",
        "created": "2020-04-02T19:24:10.281163",
        "started": "2020-04-03T07:29:16.926292" 
    },
    {
        "hostname": "ceph-001",
        "daemon_id": "default-rgw-realm.eu-central-1.1.ceph-001.ytywjo",
        "daemon_type": "rgw",
        "status": 1,
        "status_desc": "starting" 
    }
]"""
    specs_json = json.loads(specs_text)
    dds_json = json.loads(dds_text)
    specs = [ServiceSpec.from_json(j) for j in specs_json]
    dds = [DaemonDescription.from_json(j) for j in dds_json]

    # just some verification that we can sill read old octopus specs
    def remove_service_name(j):
        if 'service_name' in j:
            j_c = j.copy()
            del j_c['service_name']
            return j_c
        return j
    assert specs_json == [remove_service_name(s.to_json()) for s in specs]
    assert dds_json == [d.to_json() for d in dds]
Ejemplo n.º 27
0
 def get_active_daemon(
         self, daemon_descrs: List[DaemonDescription]) -> DaemonDescription:
     # if this is called for a service type where it hasn't explcitly been
     # defined, return empty Daemon Desc
     return DaemonDescription()
Ejemplo n.º 28
0
    assert dd_json == convert_to_old_style_json(
        DaemonDescription.from_json(dd_json).to_json())


@pytest.mark.parametrize(
    "spec,dd,valid",
    [  # noqa: E128
        # https://tracker.ceph.com/issues/44934
        (RGWSpec(
            rgw_realm="default-rgw-realm",
            rgw_zone="eu-central-1",
            subcluster='1',
        ),
         DaemonDescription(
             daemon_type='rgw',
             daemon_id="default-rgw-realm.eu-central-1.1.ceph-001.ytywjo",
             hostname="ceph-001",
         ), True),
        (
            # no subcluster
            RGWSpec(
                rgw_realm="default-rgw-realm",
                rgw_zone="eu-central-1",
            ),
            DaemonDescription(
                daemon_type='rgw',
                daemon_id="default-rgw-realm.eu-central-1.ceph-001.ytywjo",
                hostname="ceph-001",
            ),
            True),
        (
Ejemplo n.º 29
0
def test_dd_octopus(dd_json):
    # https://tracker.ceph.com/issues/44934
    # Those are real user data from early octopus.
    # Please do not modify those JSON values.
    assert dd_json == DaemonDescription.from_json(dd_json).to_json()
Ejemplo n.º 30
0
    def _mock_orchestrator(self, enable: bool) -> Iterator:
        self.io_mock = MagicMock()
        self.io_mock.set_namespace.side_effect = self._ioctx_set_namespace_mock
        self.io_mock.read = self._ioctl_read_mock
        self.io_mock.stat = self._ioctl_stat_mock
        self.io_mock.list_objects.side_effect = self._ioctx_list_objects_mock
        self.io_mock.write_full.side_effect = self._ioctx_write_full_mock
        self.io_mock.remove_object.side_effect = self._ioctx_remove_mock

        # mock nfs services
        orch_nfs_services = [
            ServiceDescription(spec=NFSServiceSpec(service_id=self.cluster_id))
        ] if enable else []

        orch_nfs_daemons = [
            DaemonDescription('nfs', 'foo.mydaemon', 'myhostname')
        ] if enable else []

        def mock_exec(cls, args):
            u = {
                "user_id":
                "abc",
                "display_name":
                "foo",
                "email":
                "",
                "suspended":
                0,
                "max_buckets":
                1000,
                "subusers": [],
                "keys": [{
                    "user": "******",
                    "access_key": "the_access_key",
                    "secret_key": "the_secret_key"
                }],
                "swift_keys": [],
                "caps": [],
                "op_mask":
                "read, write, delete",
                "default_placement":
                "",
                "default_storage_class":
                "",
                "placement_tags": [],
                "bucket_quota": {
                    "enabled": False,
                    "check_on_raw": False,
                    "max_size": -1,
                    "max_size_kb": 0,
                    "max_objects": -1
                },
                "user_quota": {
                    "enabled": False,
                    "check_on_raw": False,
                    "max_size": -1,
                    "max_size_kb": 0,
                    "max_objects": -1
                },
                "temp_url_keys": [],
                "type":
                "rgw",
                "mfa_ids": []
            }
            if args[2] == 'list':
                return 0, json.dumps([u]), ''
            return 0, json.dumps(u), ''

        def mock_describe_service(cls, *args, **kwargs):
            if kwargs['service_type'] == 'nfs':
                return OrchResult(orch_nfs_services)
            return OrchResult([])

        def mock_list_daemons(cls, *args, **kwargs):
            if kwargs['daemon_type'] == 'nfs':
                return OrchResult(orch_nfs_daemons)
            return OrchResult([])

        with mock.patch('nfs.module.Module.describe_service', mock_describe_service) as describe_service, \
             mock.patch('nfs.module.Module.list_daemons', mock_list_daemons) as list_daemons, \
                mock.patch('nfs.module.Module.rados') as rados, \
                mock.patch('nfs.export.available_clusters',
                           return_value=[self.cluster_id]), \
                mock.patch('nfs.export.restart_nfs_service'), \
                mock.patch('nfs.cluster.restart_nfs_service'), \
                mock.patch.object(MgrModule, 'tool_exec', mock_exec), \
                mock.patch('nfs.export.check_fs', return_value=True), \
                mock.patch('nfs.export_utils.check_fs', return_value=True), \
                mock.patch('nfs.export.ExportMgr._create_user_key',
                           return_value='thekeyforclientabc'):

            rados.open_ioctx.return_value.__enter__.return_value = self.io_mock
            rados.open_ioctx.return_value.__exit__ = mock.Mock(
                return_value=None)

            self._reset_temp_store()

            yield