def test_refuse_tiny_journals(self, conf_ceph_stub): conf_ceph_stub(dedent(""" [global] fsid = a25d19a6-7d57-4eda-b006-78e35d2c4d9f [osd] osd journal size = 1024 """)) with pytest.raises(RuntimeError) as error: prepare.get_journal_size() assert 'journal sizes must be larger' in str(error) assert 'detected: 1024.00 MB' in str(error)
def execute(self): """ Create vgs/lvs from the incoming set of devices, assign their roles (data, journal) and offload the OSD creation to ``lvm create`` """ osd_vgs = [] # create the vgs first, one per device (since this is colocating, it # picks the 'data' path) for osd in self.computed['osds']: vg = lvm.create_vg(osd['data']['path']) osd_vgs.append(vg) # create the lvs from the vgs captured in the beginning for vg in osd_vgs: # this is called again, getting us the LVM formatted string journal_size = prepare.get_journal_size() journal_lv = lvm.create_lv('osd-journal', vg.name, size=journal_size) # no extents or size means it will use 100%FREE data_lv = lvm.create_lv('osd-data', vg.name) command = ['--filestore', '--data'] command.append('%s/%s' % (vg.name, data_lv.name)) command.extend(['--journal', '%s/%s' % (vg.name, journal_lv.name)]) if self.args.dmcrypt: command.append('--dmcrypt') if self.args.no_systemd: command.append('--no-systemd') if self.args.crush_device_class: command.extend(['--crush-device-class', self.args.crush_device_class]) Create(command).main()
def compute(self): """ Go through the rules needed to properly size the lvs, return a dictionary with the result """ # chose whichever is the one group we have to compute against devices = self.hdds or self.ssds osds = self.computed['osds'] vgs = self.computed['vgs'] for device in devices: device_size = disk.Size(b=device['size']) journal_size = prepare.get_journal_size(lv_format=False) data_size = device_size - journal_size data_percentage = data_size * 100 / device_size vgs.append({'devices': [device['path']], 'parts': 2}) osd = {'data': {}, 'journal': {}} osd['data']['path'] = device['path'] osd['data']['size'] = data_size.b osd['data']['percentage'] = int(data_percentage) osd['data']['human_readable_size'] = str(data_size) osd['journal']['path'] = device['path'] osd['journal']['size'] = journal_size.b osd['journal']['percentage'] = int(100 - data_percentage) osd['journal']['human_readable_size'] = str(journal_size) osds.append(osd)
def test_undefined_size_fallbacks_unformatted(self, conf_ceph_stub): conf_ceph_stub(dedent(""" [global] fsid = a25d19a6-7d57-4eda-b006-78e35d2c4d9f """)) result = prepare.get_journal_size(lv_format=False) assert result.gb.as_int() == 5
def get_journal_size(args): """ Helper for Filestore strategies, to prefer the --journal-size value from the CLI over anything that might be in a ceph configuration file (if any). """ if args.journal_size: return disk.Size(mb=args.journal_size) else: return prepare.get_journal_size(lv_format=False)
def execute(self): """ Create vgs/lvs from the incoming set of devices, assign their roles (data, journal) and offload the OSD creation to ``lvm create`` """ blank_journal_dev_paths = [d.abspath for d in self.blank_journal_devs] data_vgs = dict([(osd['data']['path'], None) for osd in self.computed['osds']]) # no common vg is found, create one with all the blank SSDs if not self.common_vg: journal_vg = lvm.create_vg(blank_journal_dev_paths, name_prefix='ceph-journals') # a vg exists that can be extended elif self.common_vg and blank_journal_dev_paths: journal_vg = lvm.extend_vg(self.common_vg, blank_journal_dev_paths) # one common vg with nothing else to extend can be used directly else: journal_vg = self.common_vg journal_size = prepare.get_journal_size(lv_format=True) # create 1 vg per data device first, mapping them to the device path, # when the lv gets created later, it can create as many as needed (or # even just 1) for osd in self.computed['osds']: vg = data_vgs.get(osd['data']['path']) if not vg: vg = lvm.create_vg(osd['data']['path'], name_prefix='ceph-data') data_vgs[osd['data']['path']] = vg for osd in self.computed['osds']: data_path = osd['data']['path'] data_vg = data_vgs[data_path] data_lv_extents = data_vg.sizing(parts=1)['extents'] data_lv = lvm.create_lv( 'osd-data', data_vg.name, extents=data_lv_extents, uuid_name=True ) journal_lv = lvm.create_lv( 'osd-journal', journal_vg.name, size=journal_size, uuid_name=True ) command = ['--filestore', '--data'] command.append('%s/%s' % (data_vg.name, data_lv.name)) command.extend(['--journal', '%s/%s' % (journal_vg.name, journal_lv.name)]) if self.args.dmcrypt: command.append('--dmcrypt') if self.args.no_systemd: command.append('--no-systemd') if self.args.crush_device_class: command.extend(['--crush-device-class', self.args.crush_device_class]) if 'osd_id' in osd: command.extend(['--osd-id', osd['osd_id']]) if self.args.prepare: Prepare(command).main() else: Create(command).main()
def test_defined_size_formatted(self, conf_ceph_stub): conf_ceph_stub(dedent(""" [global] fsid = a25d19a6-7d57-4eda-b006-78e35d2c4d9f [osd] osd journal size = 10240 """)) result = prepare.get_journal_size() assert result == '10G'
def execute(self): """ Create vgs/lvs from the incoming set of devices, assign their roles (data, journal) and offload the OSD creation to ``lvm create`` """ blank_ssd_paths = [d.abspath for d in self.blank_ssds] data_vgs = dict([(osd['data']['path'], None) for osd in self.computed['osds']]) # no common vg is found, create one with all the blank SSDs if not self.common_vg: journal_vg = lvm.create_vg(blank_ssd_paths, name_prefix='ceph-journals') # a vg exists that can be extended elif self.common_vg and blank_ssd_paths: journal_vg = lvm.extend_vg(self.common_vg, blank_ssd_paths) # one common vg with nothing else to extend can be used directly else: journal_vg = self.common_vg journal_size = prepare.get_journal_size(lv_format=True) # create 1 vg per data device first, mapping them to the device path, # when the lv gets created later, it can create as many as needed (or # even just 1) for osd in self.computed['osds']: vg = data_vgs.get(osd['data']['path']) if not vg: vg = lvm.create_vg(osd['data']['path'], name_prefix='ceph-data') data_vgs[osd['data']['path']] = vg for osd in self.computed['osds']: data_path = osd['data']['path'] data_lv_size = disk.Size(b=osd['data']['size']).gb.as_int() data_vg = data_vgs[data_path] data_lv_extents = data_vg.sizing(size=data_lv_size)['extents'] data_lv = lvm.create_lv( 'osd-data', data_vg.name, extents=data_lv_extents, uuid_name=True ) journal_lv = lvm.create_lv( 'osd-journal', journal_vg.name, size=journal_size, uuid_name=True ) command = ['--filestore', '--data'] command.append('%s/%s' % (data_vg.name, data_lv.name)) command.extend(['--journal', '%s/%s' % (journal_vg.name, journal_lv.name)]) if self.args.dmcrypt: command.append('--dmcrypt') if self.args.no_systemd: command.append('--no-systemd') if self.args.crush_device_class: command.extend(['--crush-device-class', self.args.crush_device_class]) Create(command).main()
def __init__(self, devices, args): self.args = args self.osds_per_device = args.osds_per_device self.devices = devices self.hdds = [device for device in devices if device.sys_api['rotational'] == '1'] self.ssds = [device for device in devices if device.sys_api['rotational'] == '0'] self.computed = {'osds': [], 'vg': None} self.blank_ssds = [] self.journals_needed = len(self.hdds) * self.osds_per_device self.journal_size = prepare.get_journal_size(lv_format=False) self.system_vgs = lvm.VolumeGroups() self.validate() self.compute()
def execute(self): """ Create vgs/lvs from the incoming set of devices, assign their roles (data, journal) and offload the OSD creation to ``lvm create`` """ ssd_paths = [d.abspath for d in self.blank_ssds] # no common vg is found, create one with all the blank SSDs if not self.common_vg: journal_vg = lvm.create_vg(ssd_paths, name_prefix='ceph-journals') # a vg exists that can be extended elif self.common_vg and ssd_paths: journal_vg = lvm.extend_vg(self.common_vg, ssd_paths) # one common vg with nothing else to extend can be used directly else: journal_vg = self.common_vg journal_size = prepare.get_journal_size(lv_format=True) for osd in self.computed['osds']: data_vg = lvm.create_vg(osd['data']['path'], name_prefix='ceph-data') # no extents or size means it will use 100%FREE data_lv = lvm.create_lv('osd-data', data_vg.name) journal_lv = lvm.create_lv('osd-journal', journal_vg.name, size=journal_size, uuid_name=True) command = ['--filestore', '--data'] command.append('%s/%s' % (data_vg.name, data_lv.name)) command.extend( ['--journal', '%s/%s' % (journal_vg.name, journal_lv.name)]) if self.args.dmcrypt: command.append('--dmcrypt') if self.args.no_systemd: command.append('--no-systemd') if self.args.crush_device_class: command.extend( ['--crush-device-class', self.args.crush_device_class]) Create(command).main()
def validate(self): """ Ensure that the minimum requirements for this type of scenario is met, raise an error if the provided devices would not work """ # validate minimum size for all devices validators.minimum_device_size(self.devices, osds_per_device=self.osds_per_device) # validate collocation self.journal_size = prepare.get_journal_size(lv_format=False) if self.hdds: validators.minimum_device_collocated_size( self.hdds, self.journal_size, osds_per_device=self.osds_per_device ) else: validators.minimum_device_collocated_size( self.ssds, self.journal_size, osds_per_device=self.osds_per_device ) # make sure that data devices do not have any LVs validators.no_lvm_membership(self.hdds)