Exemplo n.º 1
0
    def _create_osd(self, svc_arg=None, inbuf=None):
        # type: (str, str) -> HandleCommandResult
        """Create one or more OSDs"""

        usage = """
Usage:
  ceph orchestrator osd create -i <json_file>
  ceph orchestrator osd create host:device1,device2,...
"""

        if inbuf:
            try:
                drive_group = DriveGroupSpec.from_json(json.loads(inbuf))
            except ValueError as e:
                msg = 'Failed to read JSON input: {}'.format(str(e)) + usage
                return HandleCommandResult(-errno.EINVAL, stderr=msg)

        elif svc_arg:
            try:
                node_name, block_device = svc_arg.split(":")
                block_devices = block_device.split(',')
            except (TypeError, KeyError, ValueError):
                msg = "Invalid host:device spec: '{}'".format(svc_arg) + usage
                return HandleCommandResult(-errno.EINVAL, stderr=msg)

            devs = DeviceSelection(paths=block_devices)
            drive_group = DriveGroupSpec(node_name, data_devices=devs)
        else:
            return HandleCommandResult(-errno.EINVAL, stderr=usage)

        completion = self.create_osds(drive_group)
        self._orchestrator_wait([completion])
        orchestrator.raise_if_exception(completion)
        self.log.warning(str(completion.result))
        return HandleCommandResult(stdout=completion.result_str())
Exemplo n.º 2
0
def test_ceph_volume_command_4():
    spec = DriveGroupSpec(placement=PlacementSpec(host_pattern='*'),
                          service_id='foobar',
                          data_devices=DeviceSelection(size='200GB:350GB',
                                                       rotational=True),
                          db_devices=DeviceSelection(size='200GB:350GB',
                                                     rotational=False),
                          wal_devices=DeviceSelection(size='10G'),
                          block_db_size='500M',
                          block_wal_size='500M',
                          osds_per_device=3,
                          encrypted=True)
    spec.validate()
    inventory = _mk_inventory(
        _mk_device(rotational=True, size="300.00 GB") * 2 +
        _mk_device(rotational=False, size="300.00 GB") * 2 +
        _mk_device(size="10.0 GB", rotational=False) * 2)
    sel = drive_selection.DriveSelection(spec, inventory)
    cmds = translate.to_ceph_volume(sel, []).run()
    assert all(cmd == (
        'lvm batch --no-auto /dev/sda /dev/sdb '
        '--db-devices /dev/sdc /dev/sdd --wal-devices /dev/sde /dev/sdf '
        '--block-wal-size 500M --block-db-size 500M --dmcrypt '
        '--osds-per-device 3 --yes --no-systemd')
               for cmd in cmds), f'Expected {cmd} in {cmds}'
Exemplo n.º 3
0
def test_ceph_volume_command_0():
    spec = DriveGroupSpec(placement=PlacementSpec(host_pattern='*'),
                          service_id='foobar',
                          data_devices=DeviceSelection(all=True))
    spec.validate()
    inventory = _mk_inventory(_mk_device() * 2)
    sel = drive_selection.DriveSelection(spec, inventory)
    cmd = translate.to_ceph_volume(sel, []).run()
    assert cmd == 'lvm batch --no-auto /dev/sda /dev/sdb --yes --no-systemd'
Exemplo n.º 4
0
def test_ceph_volume_command_7():
    spec = DriveGroupSpec(placement=PlacementSpec(host_pattern='*'),
                          service_id='foobar',
                          data_devices=DeviceSelection(all=True),
                          osd_id_claims={'host1': ['0', '1']})
    spec.validate()
    inventory = _mk_inventory(_mk_device(rotational=True) * 2)
    sel = drive_selection.DriveSelection(spec, inventory)
    cmd = translate.to_ceph_volume(sel, ['0', '1']).run()
    assert cmd == 'lvm batch --no-auto /dev/sda /dev/sdb --osd-ids 0 1 --yes --no-systemd'
Exemplo n.º 5
0
def test_ceph_volume_command_5():
    spec = DriveGroupSpec(placement=PlacementSpec(host_pattern='*'),
                          service_id='foobar',
                          data_devices=DeviceSelection(rotational=True),
                          objectstore='filestore')
    with pytest.raises(DriveGroupValidationError):
        spec.validate()
    inventory = _mk_inventory(_mk_device(rotational=True) * 2)
    sel = drive_selection.DriveSelection(spec, inventory)
    cmd = translate.to_ceph_volume(sel, []).run()
    assert cmd == 'lvm batch --no-auto /dev/sda /dev/sdb --filestore --yes --no-systemd'
Exemplo n.º 6
0
def test_ceph_volume_command_1():
    spec = DriveGroupSpec(placement=PlacementSpec(host_pattern='*'),
                          service_id='foobar',
                          data_devices=DeviceSelection(rotational=True),
                          db_devices=DeviceSelection(rotational=False))
    spec.validate()
    inventory = _mk_inventory(
        _mk_device(rotational=True) * 2 + _mk_device(rotational=False) * 2)
    sel = drive_selection.DriveSelection(spec, inventory)
    cmds = translate.to_ceph_volume(sel, []).run()
    assert all(cmd == ('lvm batch --no-auto /dev/sda /dev/sdb '
                       '--db-devices /dev/sdc /dev/sdd --yes --no-systemd')
               for cmd in cmds), f'Expected {cmd} in {cmds}'
Exemplo n.º 7
0
def test_ceph_volume_command_9():
    spec = DriveGroupSpec(placement=PlacementSpec(host_pattern='*'),
                          service_id='foobar',
                          data_devices=DeviceSelection(all=True),
                          data_allocate_fraction=0.8)
    spec.validate()
    inventory = _mk_inventory(_mk_device() * 2)
    sel = drive_selection.DriveSelection(spec, inventory)
    cmds = translate.to_ceph_volume(sel, []).run()
    assert all(
        cmd ==
        'lvm batch --no-auto /dev/sda /dev/sdb --data-allocate-fraction 0.8 --yes --no-systemd'
        for cmd in cmds), f'Expected {cmd} in {cmds}'
Exemplo n.º 8
0
def test_DriveGroup(test_input):
    dg = [DriveGroupSpec.from_json(inp) for inp in test_input][0]
    assert dg.placement.filter_matching_hostspecs([HostSpec('hostname')
                                                   ]) == ['hostname']
    assert dg.service_id == 'testing_drivegroup'
    assert all([isinstance(x, Device) for x in dg.data_devices.paths])
    assert dg.data_devices.paths[0].path == '/dev/sda'
Exemplo n.º 9
0
 def test_create_osds(self, _send_command, _get_connection, _save_host,
                      _rm_host, cephadm_module):
     with self._with_host(cephadm_module, 'test'):
         dg = DriveGroupSpec('test',
                             data_devices=DeviceSelection(paths=['']))
         c = cephadm_module.create_osds([dg])
         assert wait(cephadm_module, c) == ["Created osd(s) on host 'test'"]
Exemplo n.º 10
0
 def test_driveselection_to_ceph_volume(self, cephadm_module, devices, preview, exp_command):
     with with_host(cephadm_module, 'test'):
         dg = DriveGroupSpec(service_id='test.spec', placement=PlacementSpec(host_pattern='test'), data_devices=DeviceSelection(paths=devices))
         ds = DriveSelection(dg, Devices([Device(path) for path in devices]))
         preview = preview
         out = cephadm_module.osd_service.driveselection_to_ceph_volume(ds, [], preview)
         assert out in exp_command
Exemplo n.º 11
0
    def test_apply_osd_save(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
        _run_cephadm.return_value = ('{}', '', 0)
        with with_host(cephadm_module, 'test'):

            spec = DriveGroupSpec(
                service_id='foo',
                placement=PlacementSpec(
                    host_pattern='*',
                ),
                data_devices=DeviceSelection(
                    all=True
                )
            )

            c = cephadm_module.apply([spec])
            assert wait(cephadm_module, c) == ['Scheduled osd.foo update...']

            inventory = Devices([
                Device(
                    '/dev/sdb',
                    available=True
                ),
            ])

            cephadm_module.cache.update_host_devices_networks('test', inventory.devices, {})

            _run_cephadm.return_value = (['{}'], '', 0)

            assert cephadm_module._apply_all_services() == False

            _run_cephadm.assert_any_call(
                'test', 'osd', 'ceph-volume',
                ['--config-json', '-', '--', 'lvm', 'prepare', '--bluestore', '--data', '/dev/sdb', '--no-systemd'],
                env_vars=['CEPH_VOLUME_OSDSPEC_AFFINITY=foo'], error_ok=True, stdin='{"config": "", "keyring": ""}')
            _run_cephadm.assert_called_with('test', 'osd', 'ceph-volume', ['--', 'lvm', 'list', '--format', 'json'])
Exemplo n.º 12
0
    def _apply_osd(self, all_available_devices=False, inbuf=None):
        # type: (bool, Optional[str]) -> HandleCommandResult
        """Apply DriveGroupSpecs to create OSDs"""
        usage = """
Usage:
  ceph orch apply osd -i <json_file/yaml_file>
  ceph orch apply osd --use-all-devices
"""
        if not inbuf and not all_available_devices:
            return HandleCommandResult(-errno.EINVAL, stderr=usage)
        if inbuf:
            if all_available_devices:
                raise OrchestratorError(
                    '--all-available-devices cannot be combined with an osd spec'
                )
            try:
                drivegroups = yaml.load_all(inbuf)
                dg_specs = [ServiceSpec.from_json(dg) for dg in drivegroups]
            except ValueError as e:
                msg = 'Failed to read JSON/YAML input: {}'.format(
                    str(e)) + usage
                return HandleCommandResult(-errno.EINVAL, stderr=msg)
        else:
            dg_specs = [
                DriveGroupSpec(
                    service_id='all-available-devices',
                    placement=PlacementSpec(host_pattern='*'),
                    data_devices=DeviceSelection(all=True),
                )
            ]

        completion = self.apply_drivegroups(dg_specs)
        self._orchestrator_wait([completion])
        raise_if_exception(completion)
        return HandleCommandResult(stdout=completion.result_str())
Exemplo n.º 13
0
    def test_osd_create_with_drive_groups(self, instance):
        # without orchestrator service
        fake_client = mock.Mock()
        instance.return_value = fake_client

        # Valid DriveGroup
        data = self._get_drive_group_data()

        # Without orchestrator service
        fake_client.available.return_value = False
        self._task_post('/api/osd', data)
        self.assertStatus(503)

        # With orchestrator service
        fake_client.available.return_value = True
        fake_client.get_missing_features.return_value = []
        self._task_post('/api/osd', data)
        self.assertStatus(201)
        dg_specs = [
            DriveGroupSpec(placement=PlacementSpec(host_pattern='*'),
                           service_id='all_hdd',
                           service_type='osd',
                           data_devices=DeviceSelection(rotational=True))
        ]
        fake_client.osds.create.assert_called_with(dg_specs)
Exemplo n.º 14
0
    def _daemon_add_osd(self, svc_arg=None):
        # type: (Optional[str]) -> HandleCommandResult
        """Create one or more OSDs"""

        usage = """
Usage:
  ceph orch daemon add osd host:device1,device2,...
"""
        if not svc_arg:
            return HandleCommandResult(-errno.EINVAL, stderr=usage)
        try:
            host_name, block_device = svc_arg.split(":")
            block_devices = block_device.split(',')
            devs = DeviceSelection(paths=block_devices)
            drive_group = DriveGroupSpec(
                placement=PlacementSpec(host_pattern=host_name),
                data_devices=devs)
        except (TypeError, KeyError, ValueError):
            msg = "Invalid host:device spec: '{}'".format(svc_arg) + usage
            return HandleCommandResult(-errno.EINVAL, stderr=msg)

        completion = self.create_osds(drive_group)
        self._orchestrator_wait([completion])
        raise_if_exception(completion)
        return HandleCommandResult(stdout=completion.result_str())
Exemplo n.º 15
0
    def test_osd_create_with_drive_groups(self, instance):
        # without orchestrator service
        fake_client = mock.Mock()
        instance.return_value = fake_client

        # Valid DriveGroup
        data = {
            'method': 'drive_group',
            'data': {
                'service_type': 'osd',
                'service_id': 'all_hdd',
                'data_devices': {
                    'rotational': True
                },
                'host_pattern': '*'
            },
            'tracking_id': 'all_hdd, b_ssd'
        }

        # Without orchestrator service
        fake_client.available.return_value = False
        self._task_post('/api/osd', data)
        self.assertStatus(503)

        # With orchestrator service
        fake_client.available.return_value = True
        self._task_post('/api/osd', data)
        self.assertStatus(201)
        fake_client.osds.create.assert_called_with(
            DriveGroupSpec(placement=PlacementSpec(host_pattern='*'),
                           service_id='all_hdd',
                           service_type='osd',
                           data_devices=DeviceSelection(rotational=True)))
Exemplo n.º 16
0
Arquivo: osd.py Projeto: zli091/ceph
    def prepare_drivegroup(self, drive_group: DriveGroupSpec) -> List[Tuple[str, DriveSelection]]:
        # 1) use fn_filter to determine matching_hosts
        matching_hosts = drive_group.placement.filter_matching_hostspecs(
            self.mgr.inventory.all_specs())
        # 2) Map the inventory to the InventoryHost object
        host_ds_map = []

        # set osd_id_claims

        def _find_inv_for_host(hostname: str, inventory_dict: dict) -> List[Device]:
            # This is stupid and needs to be loaded with the host
            for _host, _inventory in inventory_dict.items():
                if _host == hostname:
                    return _inventory
            raise OrchestratorError("No inventory found for host: {}".format(hostname))

        # 3) iterate over matching_host and call DriveSelection
        logger.debug(f"Checking matching hosts -> {matching_hosts}")
        for host in matching_hosts:
            inventory_for_host = _find_inv_for_host(host, self.mgr.cache.devices)
            logger.debug(f"Found inventory for host {inventory_for_host}")

            # List of Daemons on that host
            dd_for_spec = self.mgr.cache.get_daemons_by_service(drive_group.service_name())
            dd_for_spec_and_host = [dd for dd in dd_for_spec if dd.hostname == host]

            drive_selection = DriveSelection(drive_group, inventory_for_host,
                                             existing_daemons=len(dd_for_spec_and_host))
            logger.debug(f"Found drive selection {drive_selection}")
            host_ds_map.append((host, drive_selection))
        return host_ds_map
Exemplo n.º 17
0
 def test_create_osds(self, cephadm_module):
     with with_host(cephadm_module, 'test'):
         dg = DriveGroupSpec(placement=PlacementSpec(host_pattern='test'),
                             data_devices=DeviceSelection(paths=['']))
         c = cephadm_module.create_osds(dg)
         out = wait(cephadm_module, c)
         assert out == "Created no osd(s) on host test; already created?"
Exemplo n.º 18
0
def test_drivegroup_pattern():
    dg = DriveGroupSpec(PlacementSpec(host_pattern='node[1-3]'),
                        service_id='foobar',
                        data_devices=DeviceSelection(all=True))
    assert dg.placement.filter_matching_hostspecs([
        HostSpec('node{}'.format(i)) for i in range(10)
    ]) == ['node1', 'node2', 'node3']
Exemplo n.º 19
0
def test_drive_selection():
    devs = DeviceSelection(paths=['/dev/sda'])
    spec = DriveGroupSpec('node_name', data_devices=devs)
    assert spec.data_devices.paths == ['/dev/sda']

    with pytest.raises(DriveGroupValidationError, match='exclusive'):
        DeviceSelection(paths=['/dev/sda'], rotates=False)
Exemplo n.º 20
0
 def create(self, drive_group: DriveGroupSpec) -> str:
     logger.debug(f"Processing DriveGroup {drive_group}")
     ret = []
     drive_group.osd_id_claims = self.find_destroyed_osds()
     logger.info(
         f"Found osd claims for drivegroup {drive_group.service_id} -> {drive_group.osd_id_claims}"
     )
     for host, drive_selection in self.prepare_drivegroup(drive_group):
         logger.info('Applying %s on host %s...' %
                     (drive_group.service_id, host))
         cmd = self.driveselection_to_ceph_volume(
             drive_group, drive_selection,
             drive_group.osd_id_claims.get(host, []))
         if not cmd:
             logger.debug("No data_devices, skipping DriveGroup: {}".format(
                 drive_group.service_id))
             continue
         # env_vars = [f"CEPH_VOLUME_OSDSPEC_AFFINITY={drive_group.service_id}"]
         # disable this until https://github.com/ceph/ceph/pull/34835 is merged
         env_vars: List[str] = []
         ret_msg = self.create_single_host(
             host,
             cmd,
             replace_osd_ids=drive_group.osd_id_claims.get(host, []),
             env_vars=env_vars)
         ret.append(ret_msg)
     return ", ".join(ret)
Exemplo n.º 21
0
 def _create_with_drive_groups(self, drive_group):
     """Create OSDs with DriveGroups."""
     orch = OrchClient.instance()
     try:
         orch.osds.create(DriveGroupSpec.from_json(drive_group))
     except (ValueError, TypeError, DriveGroupValidationError) as e:
         raise DashboardException(e, component='osd')
Exemplo n.º 22
0
    def test_osd_create_with_drive_groups(self, instance):
        # without orchestrator service
        fake_client = mock.Mock()
        instance.return_value = fake_client

        # Valid DriveGroups
        data = {
            'method': 'drive_groups',
            'data': {
                'all_hdd': {
                    'host_pattern': '*',
                    'data_devices': {
                        'rotational': True
                    }
                },
                'b_ssd': {
                    'host_pattern': 'b',
                    'data_devices': {
                        'rotational': False
                    }
                }
            },
            'tracking_id': 'all_hdd, b_ssd'
        }

        # Without orchestrator service
        fake_client.available.return_value = False
        self._task_post('/api/osd', data)
        self.assertStatus(503)

        # With orchestrator service
        fake_client.available.return_value = True
        self._task_post('/api/osd', data)
        self.assertStatus(201)
        fake_client.osds.create.assert_called_with([
            DriveGroupSpec(host_pattern='*',
                           name='all_hdd',
                           data_devices=DeviceSelection(rotational=True)),
            DriveGroupSpec(host_pattern='b',
                           name='b_ssd',
                           data_devices=DeviceSelection(rotational=False))
        ])

        # Invalid DriveGroups
        data['data']['b'] = {'host_pattern1': 'aa'}
        self._task_post('/api/osd', data)
        self.assertStatus(400)
Exemplo n.º 23
0
 def test_create_osds(self, cephadm_module):
     with self._with_host(cephadm_module, 'test'):
         dg = DriveGroupSpec('test',
                             data_devices=DeviceSelection(paths=['']))
         c = cephadm_module.create_osds([dg])
         assert wait(cephadm_module, c) == [
             "Created no osd(s) on host test; already created?"
         ]
Exemplo n.º 24
0
def test_raw_ceph_volume_command_1():
    spec = DriveGroupSpec(
        placement=PlacementSpec(host_pattern='*'),
        service_id='foobar',
        data_devices=DeviceSelection(rotational=True),
        db_devices=DeviceSelection(rotational=False),
        method='raw',
    )
    spec.validate()
    inventory = _mk_inventory(
        _mk_device(rotational=True) +  # data
        _mk_device(rotational=True) +  # data
        _mk_device(rotational=False)  # db
    )
    sel = drive_selection.DriveSelection(spec, inventory)
    with pytest.raises(ValueError):
        cmds = translate.to_ceph_volume(sel, []).run()
Exemplo n.º 25
0
def test_drivegroup_pattern():
    dg = DriveGroupSpec(PlacementSpec(host_pattern='node[1-3]'),
                        data_devices=DeviceSelection(all=True))
    assert dg.placement.filter_matching_hosts(
        lambda label=None, as_hostspec=None:
        ['node{}'.format(i) for i in range(10)]) == [
            'node1', 'node2', 'node3'
        ]
Exemplo n.º 26
0
 def test_prepare_drivegroup(self, cephadm_module):
     with with_host(cephadm_module, 'test'):
         dg = DriveGroupSpec(placement=PlacementSpec(host_pattern='test'), data_devices=DeviceSelection(paths=['']))
         out = cephadm_module.osd_service.prepare_drivegroup(dg)
         assert len(out) == 1
         f1 = out[0]
         assert f1[0] == 'test'
         assert isinstance(f1[1], DriveSelection)
Exemplo n.º 27
0
def test_ceph_volume_command_5():
    spec = DriveGroupSpec(host_pattern='*',
                          data_devices=DeviceSelection(rotational=True),
                          objectstore='filestore')
    inventory = _mk_inventory(_mk_device(rotational=True) * 2)
    sel = drive_selection.DriveSelection(spec, inventory)
    cmd = translate.to_ceph_volume(spec, sel).run()
    assert cmd == 'lvm batch --no-auto /dev/sda /dev/sdb --filestore --yes --no-systemd'
Exemplo n.º 28
0
def test_drive_selection():
    devs = DeviceSelection(paths=['/dev/sda'])
    spec = DriveGroupSpec('node_name', data_devices=devs)
    assert all([isinstance(x, Device) for x in spec.data_devices.paths])
    assert spec.data_devices.paths[0].path == '/dev/sda'

    with pytest.raises(DriveGroupValidationError, match='exclusive'):
        DeviceSelection(paths=['/dev/sda'], rotational=False)
Exemplo n.º 29
0
 def get_dg_spec(self, dg):
     dg_spec = DriveGroupSpec._from_json_impl(dg)
     dg_spec.validate()
     i = Inventory([])
     i.main()
     inventory = i.get_report()
     devices = [Device.from_json(i) for i in inventory]
     selection = DriveSelection(dg_spec, devices)
     return to_ceph_volume(selection)
Exemplo n.º 30
0
def test_ceph_volume_command_6():
    spec = DriveGroupSpec(placement=PlacementSpec(host_pattern='*'),
                          service_id='foobar',
                          data_devices=DeviceSelection(rotational=False),
                          journal_devices=DeviceSelection(rotational=True),
                          journal_size='500M',
                          objectstore='filestore')
    with pytest.raises(DriveGroupValidationError):
        spec.validate()
    inventory = _mk_inventory(
        _mk_device(rotational=True) * 2 + _mk_device(rotational=False) * 2)
    sel = drive_selection.DriveSelection(spec, inventory)
    cmds = translate.to_ceph_volume(sel, []).run()
    assert all(
        cmd == ('lvm batch --no-auto /dev/sdc /dev/sdd '
                '--journal-size 500M --journal-devices /dev/sda /dev/sdb '
                '--filestore --yes --no-systemd')
        for cmd in cmds), f'Expected {cmd} in {cmds}'