def execute(self): """ Create vgs/lvs from the incoming set of devices, assign their roles (block, block.db, block.wal, etc..) and offload the OSD creation to ``lvm create`` """ # create the single vg for all block.db lv's first vg_info = self.computed['vgs'][0] vg = lvm.create_vg(vg_info['devices']) # now produce all the block.db lvs needed from that single vg db_lvs = lvm.create_lvs(vg, parts=vg_info['parts'], name_prefix='osd-block-db') # create the data lvs, and create the OSD with the matching block.db lvs from before for osd in self.computed['osds']: vg = lvm.create_vg(osd['data']['path']) data_lv = lvm.create_lv('osd-data-%s' % str(uuid4()), vg.name) db_lv = db_lvs.pop() command = [ '--bluestore', '--data', "%s/%s" % (data_lv.vg_name, data_lv.name), '--block.db', '%s/%s' % (db_lv.vg_name, db_lv.name) ] if self.args.dmcrypt: command.append('--dmcrypt') if self.args.no_systemd: command.append('--no-systemd') if self.args.crush_device_class: command.extend( ['--crush-device-class', self.args.crush_device_class]) Create(command).main()
def prepare_device(self, arg, device_type, cluster_fsid, osd_fsid): """ Check if ``arg`` is a device or partition to create an LV out of it with a distinct volume group name, assigning LV tags on it and ultimately, returning the logical volume object. Failing to detect a device or partition will result in error. :param arg: The value of ``--data`` when parsing args :param device_type: Usually, either ``data`` or ``block`` (filestore vs. bluestore) :param cluster_fsid: The cluster fsid/uuid :param osd_fsid: The OSD fsid/uuid """ if disk.is_partition(arg) or disk.is_device(arg): # we must create a vg, and then a single lv vg_name = "ceph-%s" % cluster_fsid if api.get_vg(vg_name=vg_name): # means we already have a group for this, make a different one # XXX this could end up being annoying for an operator, maybe? vg_name = "ceph-%s" % str(uuid.uuid4()) api.create_vg(vg_name, arg) lv_name = "osd-%s-%s" % (device_type, osd_fsid) return api.create_lv( lv_name, vg_name, # the volume group tags={'ceph.type': device_type}) else: error = [ 'Cannot use device (%s).' % arg, 'A vg/lv path or an existing device is needed'] raise RuntimeError(' '.join(error)) raise RuntimeError('no data logical volume found with: %s' % arg)
def execute(self): """ Create vgs/lvs from the incoming set of devices, assign their roles (block, block.db, block.wal, etc..) and offload the OSD creation to ``lvm create`` """ # create the single vg for all block.db lv's first vg_info = self.computed['vgs'][0] vg = lvm.create_vg(vg_info['devices']) # now produce all the block.db lvs needed from that single vg db_lvs = lvm.create_lvs(vg, parts=vg_info['parts'], name_prefix='osd-block-db') # create the data lvs, and create the OSD with the matching block.db lvs from before for osd in self.computed['osds']: vg = lvm.create_vg(osd['data']['path']) from uuid import uuid4 data_lv = lvm.create_lv('osd-data-%s' % str(uuid4()), vg.name) db_lv = db_lvs.pop() # FIXME: no support for dmcrypt, crush class, etc... Create([ '--bluestore', '--data', "%s/%s" % (data_lv.vg_name, data_lv.name), '--block.db', '%s/%s' % (db_lv.vg_name, db_lv.name) ]).main()
def prepare_device(self, arg, device_type, cluster_fsid, osd_fsid): """ Check if ``arg`` is a device or partition to create an LV out of it with a distinct volume group name, assigning LV tags on it and ultimately, returning the logical volume object. Failing to detect a device or partition will result in error. :param arg: The value of ``--data`` when parsing args :param device_type: Usually, either ``data`` or ``block`` (filestore vs. bluestore) :param cluster_fsid: The cluster fsid/uuid :param osd_fsid: The OSD fsid/uuid """ if disk.is_partition(arg) or disk.is_device(arg): # we must create a vg, and then a single lv vg_name = "ceph-%s" % cluster_fsid if api.get_vg(vg_name=vg_name): # means we already have a group for this, make a different one # XXX this could end up being annoying for an operator, maybe? vg_name = "ceph-%s" % str(uuid.uuid4()) api.create_vg(vg_name, arg) lv_name = "osd-%s-%s" % (device_type, osd_fsid) return api.create_lv( lv_name, vg_name, # the volume group tags={'ceph.type': device_type}) else: error = [ 'Cannot use device (%s).' % arg, 'A vg/lv path or an existing device is needed' ] raise RuntimeError(' '.join(error)) raise RuntimeError('no data logical volume found with: %s' % arg)
def execute(self): """ Create vgs/lvs from the incoming set of devices, assign their roles (block, block.db, block.wal, etc..) and offload the OSD creation to ``lvm create`` """ # create the single vg for all block.db lv's first vg_info = self.computed['vgs'][0] vg = lvm.create_vg(vg_info['devices']) # now produce all the block.db lvs needed from that single vg db_lvs = lvm.create_lvs(vg, parts=vg_info['parts'], name_prefix='osd-block-db') # create the data lvs, and create the OSD with the matching block.db lvs from before for osd in self.computed['osds']: vg = lvm.create_vg(osd['data']['path']) from uuid import uuid4 data_lv = lvm.create_lv('osd-data-%s' % str(uuid4()), vg.name) db_lv = db_lvs.pop() # FIXME: no support for dmcrypt, crush class, etc... Create([ '--bluestore', '--data', "%s/%s" % (data_lv.vg_name, data_lv.name), '--block.db', '%s/%s' % (db_lv.vg_name, db_lv.name) ]).main()
def execute(self): """ Create vgs/lvs from the incoming set of devices, assign their roles (block, block.db, block.wal, etc..) and offload the OSD creation to ``lvm create`` """ # create the single vg for all block.db lv's first vg_info = self.computed['vgs'][0] vg = lvm.create_vg(vg_info['devices']) # now produce all the block.db lvs needed from that single vg db_lvs = lvm.create_lvs(vg, parts=vg_info['parts'], name_prefix='osd-block-db') # create the data lvs, and create the OSD with the matching block.db lvs from before for osd in self.computed['osds']: vg = lvm.create_vg(osd['data']['path']) data_lv = lvm.create_lv('osd-data-%s' % str(uuid4()), vg.name) db_lv = db_lvs.pop() command = [ '--bluestore', '--data', "%s/%s" % (data_lv.vg_name, data_lv.name), '--block.db', '%s/%s' % (db_lv.vg_name, db_lv.name) ] if self.args.dmcrypt: command.append('--dmcrypt') if self.args.no_systemd: command.append('--no-systemd') if self.args.crush_device_class: command.extend(['--crush-device-class', self.args.crush_device_class]) Create(command).main()
def test_devices_list(self, monkeypatch, fake_run): monkeypatch.setattr(api, 'get_vg', lambda **kw: True) api.create_vg(['/dev/sda', '/dev/sdb'], name='ceph') result = fake_run.calls[0]['args'][0] expected = [ 'vgcreate', '--force', '--yes', 'ceph', '/dev/sda', '/dev/sdb' ] assert result == expected
def execute(self): """ Create vgs/lvs from the incoming set of devices, assign their roles (data, journal) and offload the OSD creation to ``lvm create`` """ blank_journal_dev_paths = [d.abspath for d in self.blank_journal_devs] data_vgs = dict([(osd['data']['path'], None) for osd in self.computed['osds']]) # no common vg is found, create one with all the blank SSDs if not self.common_vg: journal_vg = lvm.create_vg(blank_journal_dev_paths, name_prefix='ceph-journals') # a vg exists that can be extended elif self.common_vg and blank_journal_dev_paths: journal_vg = lvm.extend_vg(self.common_vg, blank_journal_dev_paths) # one common vg with nothing else to extend can be used directly else: journal_vg = self.common_vg journal_size = prepare.get_journal_size(lv_format=True) # create 1 vg per data device first, mapping them to the device path, # when the lv gets created later, it can create as many as needed (or # even just 1) for osd in self.computed['osds']: vg = data_vgs.get(osd['data']['path']) if not vg: vg = lvm.create_vg(osd['data']['path'], name_prefix='ceph-data') data_vgs[osd['data']['path']] = vg for osd in self.computed['osds']: data_path = osd['data']['path'] data_vg = data_vgs[data_path] data_lv_extents = data_vg.sizing(parts=1)['extents'] data_lv = lvm.create_lv( 'osd-data', data_vg.name, extents=data_lv_extents, uuid_name=True ) journal_lv = lvm.create_lv( 'osd-journal', journal_vg.name, size=journal_size, uuid_name=True ) command = ['--filestore', '--data'] command.append('%s/%s' % (data_vg.name, data_lv.name)) command.extend(['--journal', '%s/%s' % (journal_vg.name, journal_lv.name)]) if self.args.dmcrypt: command.append('--dmcrypt') if self.args.no_systemd: command.append('--no-systemd') if self.args.crush_device_class: command.extend(['--crush-device-class', self.args.crush_device_class]) if 'osd_id' in osd: command.extend(['--osd-id', osd['osd_id']]) if self.args.prepare: Prepare(command).main() else: Create(command).main()
def execute(self): """ Create vgs/lvs from the incoming set of devices, assign their roles (data, journal) and offload the OSD creation to ``lvm create`` """ blank_ssd_paths = [d.abspath for d in self.blank_ssds] data_vgs = dict([(osd['data']['path'], None) for osd in self.computed['osds']]) # no common vg is found, create one with all the blank SSDs if not self.common_vg: journal_vg = lvm.create_vg(blank_ssd_paths, name_prefix='ceph-journals') # a vg exists that can be extended elif self.common_vg and blank_ssd_paths: journal_vg = lvm.extend_vg(self.common_vg, blank_ssd_paths) # one common vg with nothing else to extend can be used directly else: journal_vg = self.common_vg journal_size = prepare.get_journal_size(lv_format=True) # create 1 vg per data device first, mapping them to the device path, # when the lv gets created later, it can create as many as needed (or # even just 1) for osd in self.computed['osds']: vg = data_vgs.get(osd['data']['path']) if not vg: vg = lvm.create_vg(osd['data']['path'], name_prefix='ceph-data') data_vgs[osd['data']['path']] = vg for osd in self.computed['osds']: data_path = osd['data']['path'] data_lv_size = disk.Size(b=osd['data']['size']).gb.as_int() data_vg = data_vgs[data_path] data_lv_extents = data_vg.sizing(size=data_lv_size)['extents'] data_lv = lvm.create_lv( 'osd-data', data_vg.name, extents=data_lv_extents, uuid_name=True ) journal_lv = lvm.create_lv( 'osd-journal', journal_vg.name, size=journal_size, uuid_name=True ) command = ['--filestore', '--data'] command.append('%s/%s' % (data_vg.name, data_lv.name)) command.extend(['--journal', '%s/%s' % (journal_vg.name, journal_lv.name)]) if self.args.dmcrypt: command.append('--dmcrypt') if self.args.no_systemd: command.append('--no-systemd') if self.args.crush_device_class: command.extend(['--crush-device-class', self.args.crush_device_class]) Create(command).main()
def execute(self): """ Create vgs/lvs from the incoming set of devices, assign their roles (block, block.db, block.wal, etc..) and offload the OSD creation to ``lvm create`` """ osd_vgs = dict([(osd['data']['path'], None) for osd in self.computed['osds']]) # create the vgs first, mapping them to the device path for osd in self.computed['osds']: vg = osd_vgs.get(osd['data']['path']) if not vg: vg = lvm.create_vg(osd['data']['path']) osd_vgs[osd['data']['path']] = { 'vg': vg, 'parts': osd['data']['parts'] } # create the lvs from the vgs captured in the beginning for create in osd_vgs.values(): lvs = lvm.create_lvs(create['vg'], parts=create['parts'], name_prefix='osd-data') vg_name = create['vg'].name for lv in lvs: # FIXME: no support for dmcrypt, crush class, etc... Create([ '--bluestore', '--data', "%s/%s" % (vg_name, lv.name), ]).main()
def execute(self): """ Create vgs/lvs from the incoming set of devices, assign their roles (block, block.db, block.wal, etc..) and offload the OSD creation to ``lvm create`` """ osd_vgs = dict([(osd['data']['path'], None) for osd in self.computed['osds']]) # create the vgs first, mapping them to the device path for osd in self.computed['osds']: vg = osd_vgs.get(osd['data']['path']) if not vg: vg = lvm.create_vg(osd['data']['path']) osd_vgs[osd['data']['path']] = {'vg': vg, 'parts': osd['data']['parts']} # create the lvs from the vgs captured in the beginning for create in osd_vgs.values(): lvs = lvm.create_lvs(create['vg'], parts=create['parts'], name_prefix='osd-data') vg_name = create['vg'].name for lv in lvs: # FIXME: no support for dmcrypt, crush class, etc... Create([ '--bluestore', '--data', "%s/%s" % (vg_name, lv.name), ]).main()
def execute(self): """ Create vgs/lvs from the incoming set of devices, assign their roles (data, journal) and offload the OSD creation to ``lvm create`` """ osd_vgs = [] # create the vgs first, one per device (since this is colocating, it # picks the 'data' path) for osd in self.computed['osds']: vg = lvm.create_vg(osd['data']['path']) osd_vgs.append(vg) # create the lvs from the vgs captured in the beginning for vg in osd_vgs: # this is called again, getting us the LVM formatted string journal_size = prepare.get_journal_size() journal_lv = lvm.create_lv('osd-journal', vg.name, size=journal_size) # no extents or size means it will use 100%FREE data_lv = lvm.create_lv('osd-data', vg.name) command = ['--filestore', '--data'] command.append('%s/%s' % (vg.name, data_lv.name)) command.extend(['--journal', '%s/%s' % (vg.name, journal_lv.name)]) if self.args.dmcrypt: command.append('--dmcrypt') if self.args.no_systemd: command.append('--no-systemd') if self.args.crush_device_class: command.extend(['--crush-device-class', self.args.crush_device_class]) Create(command).main()
def execute(self): """ Create vgs/lvs from the incoming set of devices, assign their roles (block, block.db, block.wal, etc..) and offload the OSD creation to ``lvm create`` """ osd_vgs = dict([(osd['data']['path'], None) for osd in self.computed['osds']]) # create the vgs first, mapping them to the device path for osd in self.computed['osds']: vg = osd_vgs.get(osd['data']['path']) if not vg: vg = lvm.create_vg(osd['data']['path']) osd_vgs[osd['data']['path']] = {'vg': vg, 'parts': osd['data']['parts']} # create the lvs from the vgs captured in the beginning for create in osd_vgs.values(): lvs = lvm.create_lvs(create['vg'], parts=create['parts'], name_prefix='osd-data') vg_name = create['vg'].name for lv in lvs: command = ['--bluestore', '--data'] command.append('%s/%s' % (vg_name, lv.name)) if self.args.dmcrypt: command.append('--dmcrypt') if self.args.no_systemd: command.append('--no-systemd') if self.args.crush_device_class: command.extend(['--crush-device-class', self.args.crush_device_class]) if self.args.prepare: Prepare(command).main() else: Create(command).main()
def execute(self): """ Create vgs/lvs from the incoming set of devices, assign their roles (block, block.db, block.wal, etc..) and offload the OSD creation to ``lvm create`` """ osd_vgs = dict([(osd['data']['path'], None) for osd in self.computed['osds']]) # create the vgs first, mapping them to the device path for osd in self.computed['osds']: vg = osd_vgs.get(osd['data']['path']) if not vg: vg = lvm.create_vg(osd['data']['path']) osd_vgs[osd['data']['path']] = {'vg': vg, 'parts': osd['data']['parts']} # create the lvs from the vgs captured in the beginning for create in osd_vgs.values(): lvs = lvm.create_lvs(create['vg'], parts=create['parts'], name_prefix='osd-data') vg_name = create['vg'].name for lv in lvs: command = ['--bluestore', '--data'] command.append('%s/%s' % (vg_name, lv.name)) if self.args.dmcrypt: command.append('--dmcrypt') if self.args.no_systemd: command.append('--no-systemd') if self.args.crush_device_class: command.extend(['--crush-device-class', self.args.crush_device_class]) if self.args.prepare: Prepare(command).main() else: Create(command).main()
def execute(self): """ Create vgs/lvs from the incoming set of devices, assign their roles (data, journal) and offload the OSD creation to ``lvm create`` """ device_vgs = dict([(osd['data']['path'], None) for osd in self.computed['osds']]) # create 1 vg per data device first, mapping them to the device path, # when the lvs get created later, it can create as many as needed, # including the journals since it is going to be collocated for osd in self.computed['osds']: vg = device_vgs.get(osd['data']['path']) if not vg: vg = lvm.create_vg(osd['data']['path'], name_prefix='ceph-filestore') device_vgs[osd['data']['path']] = vg # create the lvs from the per-device vg created in the beginning for osd in self.computed['osds']: data_path = osd['data']['path'] data_lv_size = disk.Size(b=osd['data']['size']).gb.as_int() device_vg = device_vgs[data_path] data_lv_extents = device_vg.sizing(size=data_lv_size)['extents'] journal_lv_extents = device_vg.sizing( size=self.journal_size.gb.as_int())['extents'] data_uuid = system.generate_uuid() data_lv = lvm.create_lv('osd-data', data_uuid, vg=device_vg, extents=data_lv_extents) journal_uuid = system.generate_uuid() journal_lv = lvm.create_lv('osd-journal', journal_uuid, vg=device_vg, extents=journal_lv_extents) command = ['--filestore', '--data'] command.append('%s/%s' % (device_vg.name, data_lv.name)) command.extend( ['--journal', '%s/%s' % (device_vg.name, journal_lv.name)]) if self.args.dmcrypt: command.append('--dmcrypt') if self.args.no_systemd: command.append('--no-systemd') if self.args.crush_device_class: command.extend( ['--crush-device-class', self.args.crush_device_class]) if 'osd_id' in osd: command.extend(['--osd-id', osd['osd_id']]) if self.args.prepare: Prepare(command).main() else: Create(command).main()
def execute(self): """ Create vgs/lvs from the incoming set of devices, assign their roles (data, journal) and offload the OSD creation to ``lvm create`` """ ssd_paths = [d.abspath for d in self.blank_ssds] # no common vg is found, create one with all the blank SSDs if not self.common_vg: journal_vg = lvm.create_vg(ssd_paths, name_prefix='ceph-journals') # a vg exists that can be extended elif self.common_vg and ssd_paths: journal_vg = lvm.extend_vg(self.common_vg, ssd_paths) # one common vg with nothing else to extend can be used directly else: journal_vg = self.common_vg journal_size = prepare.get_journal_size(lv_format=True) for osd in self.computed['osds']: data_vg = lvm.create_vg(osd['data']['path'], name_prefix='ceph-data') # no extents or size means it will use 100%FREE data_lv = lvm.create_lv('osd-data', data_vg.name) journal_lv = lvm.create_lv('osd-journal', journal_vg.name, size=journal_size, uuid_name=True) command = ['--filestore', '--data'] command.append('%s/%s' % (data_vg.name, data_lv.name)) command.extend( ['--journal', '%s/%s' % (journal_vg.name, journal_lv.name)]) if self.args.dmcrypt: command.append('--dmcrypt') if self.args.no_systemd: command.append('--no-systemd') if self.args.crush_device_class: command.extend( ['--crush-device-class', self.args.crush_device_class]) Create(command).main()
def execute(self): """ Create vgs/lvs from the incoming set of devices, assign their roles (data, journal) and offload the OSD creation to ``lvm create`` """ device_vgs = dict([(osd['data']['path'], None) for osd in self.computed['osds']]) # create 1 vg per data device first, mapping them to the device path, # when the lvs get created later, it can create as many as needed, # including the journals since it is going to be collocated for osd in self.computed['osds']: vg = device_vgs.get(osd['data']['path']) if not vg: vg = lvm.create_vg(osd['data']['path'], name_prefix='ceph-filestore') device_vgs[osd['data']['path']] = vg # create the lvs from the per-device vg created in the beginning for osd in self.computed['osds']: data_path = osd['data']['path'] data_lv_size = disk.Size(b=osd['data']['size']).gb.as_int() device_vg = device_vgs[data_path] data_lv_extents = device_vg.sizing(size=data_lv_size)['extents'] journal_lv_extents = device_vg.sizing(size=self.journal_size.gb.as_int())['extents'] data_lv = lvm.create_lv( 'osd-data', device_vg.name, extents=data_lv_extents, uuid_name=True ) journal_lv = lvm.create_lv( 'osd-journal', device_vg.name, extents=journal_lv_extents, uuid_name=True ) command = ['--filestore', '--data'] command.append('%s/%s' % (device_vg.name, data_lv.name)) command.extend(['--journal', '%s/%s' % (device_vg.name, journal_lv.name)]) if self.args.dmcrypt: command.append('--dmcrypt') if self.args.no_systemd: command.append('--no-systemd') if self.args.crush_device_class: command.extend(['--crush-device-class', self.args.crush_device_class]) if 'osd_id' in osd: command.extend(['--osd-id', osd['osd_id']]) if self.args.prepare: Prepare(command).main() else: Create(command).main()
def test_specific_name(self, monkeypatch, fake_run): monkeypatch.setattr(api, 'get_vg', lambda **kw: True) api.create_vg('/dev/sda', name='master') result = fake_run.calls[0]['args'][0] assert '/dev/sda' in result assert result[-2] == 'master'
def execute(self): """ Create vgs/lvs from the incoming set of devices, assign their roles (block, block.db, block.wal, etc..) and offload the OSD creation to ``lvm create`` """ blank_ssd_paths = [d.abspath for d in self.blank_ssds] data_vgs = dict([(osd['data']['path'], None) for osd in self.computed['osds']]) # no common vg is found, create one with all the blank SSDs if not self.common_vg: db_vg = lvm.create_vg(blank_ssd_paths, name_prefix='ceph-block-dbs') # if a common vg exists then extend it with any blank ssds elif self.common_vg and blank_ssd_paths: db_vg = lvm.extend_vg(self.common_vg, blank_ssd_paths) # one common vg with nothing else to extend can be used directly, # either this is one device with one vg, or multiple devices with the # same vg else: db_vg = self.common_vg # since we are falling back to a block_db_size that might be "as large # as possible" we can't fully rely on LV format coming from the helper # function that looks up this value block_db_size = "%sG" % self.block_db_size.gb.as_int() # create 1 vg per data device first, mapping them to the device path, # when the lv gets created later, it can create as many as needed (or # even just 1) for osd in self.computed['osds']: vg = data_vgs.get(osd['data']['path']) if not vg: vg = lvm.create_vg(osd['data']['path'], name_prefix='ceph-block') data_vgs[osd['data']['path']] = vg # create the data lvs, and create the OSD with an lv from the common # block.db vg from before for osd in self.computed['osds']: data_path = osd['data']['path'] data_lv_size = disk.Size(b=osd['data']['size']).gb.as_int() data_vg = data_vgs[data_path] data_lv_extents = data_vg.sizing(size=data_lv_size)['extents'] data_lv = lvm.create_lv( 'osd-block', data_vg.name, extents=data_lv_extents, uuid_name=True ) db_lv = lvm.create_lv( 'osd-block-db', db_vg.name, size=block_db_size, uuid_name=True ) command = [ '--bluestore', '--data', "%s/%s" % (data_lv.vg_name, data_lv.name), '--block.db', '%s/%s' % (db_lv.vg_name, db_lv.name) ] if self.args.dmcrypt: command.append('--dmcrypt') if self.args.no_systemd: command.append('--no-systemd') if self.args.crush_device_class: command.extend(['--crush-device-class', self.args.crush_device_class]) if self.args.prepare: Prepare(command).main() else: Create(command).main()
def execute(self): """ Create vgs/lvs from the incoming set of devices, assign their roles (block, block.db, block.wal, etc..) and offload the OSD creation to ``lvm create`` """ blank_ssd_paths = [d.abspath for d in self.blank_ssds] data_vgs = dict([(osd['data']['path'], None) for osd in self.computed['osds']]) # no common vg is found, create one with all the blank SSDs if not self.common_vg: db_vg = lvm.create_vg(blank_ssd_paths, name_prefix='ceph-block-dbs') # if a common vg exists then extend it with any blank ssds elif self.common_vg and blank_ssd_paths: db_vg = lvm.extend_vg(self.common_vg, blank_ssd_paths) # one common vg with nothing else to extend can be used directly, # either this is one device with one vg, or multiple devices with the # same vg else: db_vg = self.common_vg # since we are falling back to a block_db_size that might be "as large # as possible" we can't fully rely on LV format coming from the helper # function that looks up this value block_db_size = "%sG" % self.block_db_size.gb.as_int() # create 1 vg per data device first, mapping them to the device path, # when the lv gets created later, it can create as many as needed (or # even just 1) for osd in self.computed['osds']: vg = data_vgs.get(osd['data']['path']) if not vg: vg = lvm.create_vg(osd['data']['path'], name_prefix='ceph-block') data_vgs[osd['data']['path']] = vg # create the data lvs, and create the OSD with an lv from the common # block.db vg from before for osd in self.computed['osds']: data_path = osd['data']['path'] data_lv_size = disk.Size(b=osd['data']['size']).gb.as_int() data_vg = data_vgs[data_path] data_lv_extents = data_vg.sizing(size=data_lv_size)['extents'] data_lv = lvm.create_lv( 'osd-block', data_vg.name, extents=data_lv_extents, uuid_name=True ) db_lv = lvm.create_lv( 'osd-block-db', db_vg.name, size=block_db_size, uuid_name=True ) command = [ '--bluestore', '--data', "%s/%s" % (data_lv.vg_name, data_lv.name), '--block.db', '%s/%s' % (db_lv.vg_name, db_lv.name) ] if self.args.dmcrypt: command.append('--dmcrypt') if self.args.no_systemd: command.append('--no-systemd') if self.args.crush_device_class: command.extend(['--crush-device-class', self.args.crush_device_class]) Create(command).main()
def execute(self): """ Create vgs/lvs from the incoming set of devices, assign their roles (block, block.db, block.wal, etc..) and offload the OSD creation to ``lvm create`` """ blank_ssd_paths = [d.abspath for d in self.blank_ssds] data_vgs = dict([(osd['data']['path'], None) for osd in self.computed['osds']]) # no common vg is found, create one with all the blank SSDs if not self.common_vg: db_vg = lvm.create_vg(blank_ssd_paths, name_prefix='ceph-block-dbs') # if a common vg exists then extend it with any blank ssds elif self.common_vg and blank_ssd_paths: db_vg = lvm.extend_vg(self.common_vg, blank_ssd_paths) # one common vg with nothing else to extend can be used directly, # either this is one device with one vg, or multiple devices with the # same vg else: db_vg = self.common_vg # create 1 vg per data device first, mapping them to the device path, # when the lv gets created later, it can create as many as needed (or # even just 1) for osd in self.computed['osds']: vg = data_vgs.get(osd['data']['path']) if not vg: vg = lvm.create_vg(osd['data']['path'], name_prefix='ceph-block') data_vgs[osd['data']['path']] = vg if self.use_large_block_db: # make the block.db lvs as large as possible vg_free_count = str_to_int(db_vg.vg_free_count) db_lv_extents = int(vg_free_count / self.dbs_needed) else: db_lv_extents = db_vg.sizing(size=self.block_db_size.gb.as_int())['extents'] # create the data lvs, and create the OSD with an lv from the common # block.db vg from before for osd in self.computed['osds']: data_path = osd['data']['path'] data_vg = data_vgs[data_path] data_lv_extents = data_vg.sizing(parts=1)['extents'] data_lv = lvm.create_lv( 'osd-block', data_vg.name, extents=data_lv_extents, uuid_name=True ) db_lv = lvm.create_lv( 'osd-block-db', db_vg.name, extents=db_lv_extents, uuid_name=True ) command = [ '--bluestore', '--data', "%s/%s" % (data_lv.vg_name, data_lv.name), '--block.db', '%s/%s' % (db_lv.vg_name, db_lv.name) ] if self.args.dmcrypt: command.append('--dmcrypt') if self.args.no_systemd: command.append('--no-systemd') if self.args.crush_device_class: command.extend(['--crush-device-class', self.args.crush_device_class]) if self.args.prepare: Prepare(command).main() else: Create(command).main()
def test_no_name(self, monkeypatch, fake_run): monkeypatch.setattr(api, 'get_first_vg', lambda **kw: True) api.create_vg('/dev/sda') result = fake_run.calls[0]['args'][0] assert '/dev/sda' in result assert result[-2].startswith('ceph-')
def test_name_prefix(self, monkeypatch, fake_run): monkeypatch.setattr(api, 'get_vg', lambda **kw: True) api.create_vg('/dev/sda', name_prefix='master') result = fake_run.calls[0]['args'][0] assert '/dev/sda' in result assert result[-2].startswith('master-')
def execute(self): """ Create vgs/lvs from the incoming set of devices, assign their roles (block, block.db, block.wal, etc..) and offload the OSD creation to ``lvm create`` """ data_vgs = dict([(osd['data']['path'], None) for osd in self.computed['osds']]) # create 1 vg per data device first, mapping them to the device path, # when the lv gets created later, it can create as many as needed (or # even just 1) for osd in self.computed['osds']: vg = data_vgs.get(osd['data']['path']) if not vg: vg = lvm.create_vg(osd['data']['path'], name_prefix='ceph-block') data_vgs[osd['data']['path']] = vg if self.data_devs and self.db_or_journal_devs: blank_db_dev_paths = [d.abspath for d in self.blank_db_devs] # no common vg is found, create one with all the blank SSDs if not self.common_vg: db_vg = lvm.create_vg(blank_db_dev_paths, name_prefix='ceph-block-dbs') elif self.common_vg and blank_db_dev_paths: # if a common vg exists then extend it with any blank ssds db_vg = lvm.extend_vg(self.common_vg, blank_db_dev_paths) else: # one common vg with nothing else to extend can be used directly, # either this is one device with one vg, or multiple devices with the # same vg db_vg = self.common_vg if self.use_large_block_db: # make the block.db lvs as large as possible vg_free_count = str_to_int(db_vg.vg_free_count) db_lv_extents = int(vg_free_count / self.dbs_needed) else: db_lv_extents = db_vg.sizing(size=self.block_db_size.gb.as_int())['extents'] if self.data_devs and self.wal_devs: blank_wal_dev_paths = [d.abspath for d in self.blank_wal_devs] if not self.common_wal_vg: wal_vg = lvm.create_vg(blank_wal_dev_paths, name_prefix='ceph-block-wals') elif self.common_wal_vg and blank_wal_dev_paths: wal_vg = lvm.extend_vg(self.common_wal_vg, blank_wal_dev_paths) else: wal_vg = self.common_wal_vg if self.use_large_block_wal: # make the block.db lvs as large as possible vg_free_count = str_to_int(wal_vg.vg_free_count) wal_lv_extents = int(vg_free_count / self.wals_needed) else: wal_lv_extents = wal_vg.sizing(size=self.block_wal_size.gb.as_int())['extents'] # create the data lvs, and create the OSD with an lv from the common # block.db vg from before for osd in self.computed['osds']: data_path = osd['data']['path'] data_vg = data_vgs[data_path] data_lv_extents = data_vg.sizing(parts=1)['extents'] data_lv = lvm.create_lv( 'osd-block', data_vg.name, extents=data_lv_extents, uuid_name=True ) command = [ '--bluestore', '--data', "%s/%s" % (data_lv.vg_name, data_lv.name), ] if 'block.db' in osd: db_lv = lvm.create_lv( 'osd-block-db', db_vg.name, extents=db_lv_extents, uuid_name=True ) command.extend([ '--block.db', '{}/{}'.format(db_lv.vg_name, db_lv.name)]) if 'block.wal' in osd: wal_lv = lvm.create_lv( 'osd-block-wal', wal_vg.name, extents=wal_lv_extents, uuid_name=True ) command.extend( ['--block.wal', '{}/{}'.format(wal_lv.vg_name, wal_lv.name) ]) if self.args.dmcrypt: command.append('--dmcrypt') if self.args.no_systemd: command.append('--no-systemd') if self.args.crush_device_class: command.extend(['--crush-device-class', self.args.crush_device_class]) if 'osd_id' in osd: command.extend(['--osd-id', osd['osd_id']]) if self.args.prepare: Prepare(command).main() else: Create(command).main()
def test_devices_list(self, monkeypatch, fake_run): monkeypatch.setattr(api, 'get_vg', lambda **kw: True) api.create_vg(['/dev/sda', '/dev/sdb'], name='ceph') result = fake_run.calls[0]['args'][0] expected = ['vgcreate', '--force', '--yes', 'ceph', '/dev/sda', '/dev/sdb'] assert result == expected
def prepare(self, args): # FIXME we don't allow re-using a keyring, we always generate one for the # OSD, this needs to be fixed. This could either be a file (!) or a string # (!!) or some flags that we would need to compound into a dict so that we # can convert to JSON (!!!) secrets = {'cephx_secret': prepare_utils.create_key()} cluster_fsid = conf.ceph.get('global', 'fsid') osd_fsid = args.osd_fsid or system.generate_uuid() # allow re-using an id, in case a prepare failed osd_id = args.osd_id or prepare_utils.create_id( osd_fsid, json.dumps(secrets)) if args.filestore: if not args.journal: raise RuntimeError( '--journal is required when using --filestore') data_lv = self.get_lv(args.data) if not data_lv: raise RuntimeError('no data logical volume found with: %s' % args.data) tags = { 'ceph.osd_fsid': osd_fsid, 'ceph.osd_id': osd_id, 'ceph.cluster_fsid': cluster_fsid, 'ceph.cluster_name': conf.cluster, 'ceph.data_device': data_lv.lv_path, 'ceph.data_uuid': data_lv.lv_uuid, } journal_device, journal_uuid, tags = self.setup_device( 'journal', args.journal, tags) tags['ceph.type'] = 'data' data_lv.set_tags(tags) prepare_filestore( data_lv.lv_path, journal_device, secrets, id_=osd_id, fsid=osd_fsid, ) elif args.bluestore: block_lv = self.get_lv(args.data) if not block_lv: if disk.is_partition(args.data) or disk.is_device(args.data): # we must create a vg, and then a single lv vg_name = "ceph-%s" % cluster_fsid if api.get_vg(vg_name=vg_name): # means we already have a group for this, make a different one # XXX this could end up being annoying for an operator, maybe? vg_name = "ceph-%s" % str(uuid.uuid4()) api.create_vg(vg_name, args.data) block_name = "osd-block-%s" % osd_fsid block_lv = api.create_lv( block_name, vg_name, # the volume group tags={'ceph.type': 'block'}) else: error = [ 'Cannot use device (%s) for bluestore. ' % args.data, 'A vg/lv path or an existing device is needed' ] raise RuntimeError(' '.join(error)) tags = { 'ceph.osd_fsid': osd_fsid, 'ceph.osd_id': osd_id, 'ceph.cluster_fsid': cluster_fsid, 'ceph.cluster_name': conf.cluster, 'ceph.block_device': block_lv.lv_path, 'ceph.block_uuid': block_lv.lv_uuid, } wal_device, wal_uuid, tags = self.setup_device( 'wal', args.block_wal, tags) db_device, db_uuid, tags = self.setup_device( 'db', args.block_db, tags) tags['ceph.type'] = 'block' block_lv.set_tags(tags) prepare_bluestore( block_lv.lv_path, wal_device, db_device, secrets, id_=osd_id, fsid=osd_fsid, )
def test_specific_name(self, monkeypatch, fake_run): monkeypatch.setattr(api, 'get_vg', lambda **kw: True) api.create_vg('/dev/sda', name='master') result = fake_run.calls[0]['args'][0] assert '/dev/sda' in result assert result[-2] == 'master'
def test_name_prefix(self, monkeypatch, fake_run): monkeypatch.setattr(api, 'get_vg', lambda **kw: True) api.create_vg('/dev/sda', name_prefix='master') result = fake_run.calls[0]['args'][0] assert '/dev/sda' in result assert result[-2].startswith('master-')