def test_calls_to_set_type_tag(self, monkeypatch, capture): monkeypatch.setattr(process, 'run', capture) monkeypatch.setattr(process, 'call', capture) monkeypatch.setattr(api, 'get_lv', lambda *a, **kw: self.foo_volume) api.create_lv('foo', 'foo_group', size=5, type='data') ceph_tag = ['sudo', 'lvchange', '--addtag', 'ceph.type=data', '/path'] assert capture.calls[1]['args'][0] == ceph_tag
def test_uses_size(self, monkeypatch, capture): monkeypatch.setattr(process, 'run', capture) monkeypatch.setattr(process, 'call', capture) monkeypatch.setattr(api, 'get_lv', lambda *a, **kw: self.foo_volume) api.create_lv('foo', 'foo_group', size='5G', tags={'ceph.type': 'data'}) expected = ['lvcreate', '--yes', '-L', '5G', '-n', 'foo', 'foo_group'] assert capture.calls[0]['args'][0] == expected
def execute(self): """ Create vgs/lvs from the incoming set of devices, assign their roles (data, journal) and offload the OSD creation to ``lvm create`` """ osd_vgs = [] # create the vgs first, one per device (since this is colocating, it # picks the 'data' path) for osd in self.computed['osds']: vg = lvm.create_vg(osd['data']['path']) osd_vgs.append(vg) # create the lvs from the vgs captured in the beginning for vg in osd_vgs: # this is called again, getting us the LVM formatted string journal_size = prepare.get_journal_size() journal_lv = lvm.create_lv('osd-journal', vg.name, size=journal_size) # no extents or size means it will use 100%FREE data_lv = lvm.create_lv('osd-data', vg.name) command = ['--filestore', '--data'] command.append('%s/%s' % (vg.name, data_lv.name)) command.extend(['--journal', '%s/%s' % (vg.name, journal_lv.name)]) if self.args.dmcrypt: command.append('--dmcrypt') if self.args.no_systemd: command.append('--no-systemd') if self.args.crush_device_class: command.extend(['--crush-device-class', self.args.crush_device_class]) Create(command).main()
def test_calls_to_set_data_tag(self, monkeypatch, capture): monkeypatch.setattr(process, 'run', capture) monkeypatch.setattr(process, 'call', capture) monkeypatch.setattr(api, 'get_lv', lambda *a, **kw: self.foo_volume) api.create_lv('foo', 'foo_group', size='5G', tags={'ceph.type': 'data'}) data_tag = ['lvchange', '--addtag', 'ceph.data_device=/path', '/path'] assert capture.calls[2]['args'][0] == data_tag
def test_uses_uuid(self, monkeypatch, capture): monkeypatch.setattr(process, 'run', capture) monkeypatch.setattr(process, 'call', capture) monkeypatch.setattr(api, 'get_lv', lambda *a, **kw: self.foo_volume) api.create_lv('foo', 'foo_group', size='5G', tags={'ceph.type': 'data'}, uuid_name=True) result = capture.calls[0]['args'][0][5] assert result.startswith('foo-') assert len(result) == 40
def execute(self): """ Create vgs/lvs from the incoming set of devices, assign their roles (data, journal) and offload the OSD creation to ``lvm create`` """ blank_journal_dev_paths = [d.abspath for d in self.blank_journal_devs] data_vgs = dict([(osd['data']['path'], None) for osd in self.computed['osds']]) # no common vg is found, create one with all the blank SSDs if not self.common_vg: journal_vg = lvm.create_vg(blank_journal_dev_paths, name_prefix='ceph-journals') # a vg exists that can be extended elif self.common_vg and blank_journal_dev_paths: journal_vg = lvm.extend_vg(self.common_vg, blank_journal_dev_paths) # one common vg with nothing else to extend can be used directly else: journal_vg = self.common_vg journal_size = prepare.get_journal_size(lv_format=True) # create 1 vg per data device first, mapping them to the device path, # when the lv gets created later, it can create as many as needed (or # even just 1) for osd in self.computed['osds']: vg = data_vgs.get(osd['data']['path']) if not vg: vg = lvm.create_vg(osd['data']['path'], name_prefix='ceph-data') data_vgs[osd['data']['path']] = vg for osd in self.computed['osds']: data_path = osd['data']['path'] data_vg = data_vgs[data_path] data_lv_extents = data_vg.sizing(parts=1)['extents'] data_lv = lvm.create_lv( 'osd-data', data_vg.name, extents=data_lv_extents, uuid_name=True ) journal_lv = lvm.create_lv( 'osd-journal', journal_vg.name, size=journal_size, uuid_name=True ) command = ['--filestore', '--data'] command.append('%s/%s' % (data_vg.name, data_lv.name)) command.extend(['--journal', '%s/%s' % (journal_vg.name, journal_lv.name)]) if self.args.dmcrypt: command.append('--dmcrypt') if self.args.no_systemd: command.append('--no-systemd') if self.args.crush_device_class: command.extend(['--crush-device-class', self.args.crush_device_class]) if 'osd_id' in osd: command.extend(['--osd-id', osd['osd_id']]) if self.args.prepare: Prepare(command).main() else: Create(command).main()
def prepare_device(self, arg, device_type, cluster_fsid, osd_fsid): """ Check if ``arg`` is a device or partition to create an LV out of it with a distinct volume group name, assigning LV tags on it and ultimately, returning the logical volume object. Failing to detect a device or partition will result in error. :param arg: The value of ``--data`` when parsing args :param device_type: Usually, either ``data`` or ``block`` (filestore vs. bluestore) :param cluster_fsid: The cluster fsid/uuid :param osd_fsid: The OSD fsid/uuid """ if disk.is_partition(arg) or disk.is_device(arg): # we must create a vg, and then a single lv vg_name = "ceph-%s" % cluster_fsid if api.get_vg(vg_name=vg_name): # means we already have a group for this, make a different one # XXX this could end up being annoying for an operator, maybe? vg_name = "ceph-%s" % str(uuid.uuid4()) api.create_vg(vg_name, arg) lv_name = "osd-%s-%s" % (device_type, osd_fsid) return api.create_lv( lv_name, vg_name, # the volume group tags={'ceph.type': device_type}) else: error = [ 'Cannot use device (%s).' % arg, 'A vg/lv path or an existing device is needed' ] raise RuntimeError(' '.join(error)) raise RuntimeError('no data logical volume found with: %s' % arg)
def execute(self): """ Create vgs/lvs from the incoming set of devices, assign their roles (block, block.db, block.wal, etc..) and offload the OSD creation to ``lvm create`` """ # create the single vg for all block.db lv's first vg_info = self.computed['vgs'][0] vg = lvm.create_vg(vg_info['devices']) # now produce all the block.db lvs needed from that single vg db_lvs = lvm.create_lvs(vg, parts=vg_info['parts'], name_prefix='osd-block-db') # create the data lvs, and create the OSD with the matching block.db lvs from before for osd in self.computed['osds']: vg = lvm.create_vg(osd['data']['path']) data_lv = lvm.create_lv('osd-data-%s' % str(uuid4()), vg.name) db_lv = db_lvs.pop() command = [ '--bluestore', '--data', "%s/%s" % (data_lv.vg_name, data_lv.name), '--block.db', '%s/%s' % (db_lv.vg_name, db_lv.name) ] if self.args.dmcrypt: command.append('--dmcrypt') if self.args.no_systemd: command.append('--no-systemd') if self.args.crush_device_class: command.extend( ['--crush-device-class', self.args.crush_device_class]) Create(command).main()
def prepare_data_device(self, device_type, osd_uuid): """ Check if ``arg`` is a device or partition to create an LV out of it with a distinct volume group name, assigning LV tags on it and ultimately, returning the logical volume object. Failing to detect a device or partition will result in error. :param arg: The value of ``--data`` when parsing args :param device_type: Usually, either ``data`` or ``block`` (filestore vs. bluestore) :param osd_uuid: The OSD uuid """ device = self.args.data if disk.is_partition(device) or disk.is_device(device): # we must create a vg, and then a single lv lv_name_prefix = "osd-{}".format(device_type) kwargs = { 'device': device, 'tags': { 'ceph.type': device_type }, } logger.debug('data device size: {}'.format(self.args.data_size)) if self.args.data_size != 0: kwargs['size'] = disk.Size.parse(self.args.data_size) return api.create_lv(lv_name_prefix, osd_uuid, **kwargs) else: error = [ 'Cannot use device ({}).'.format(device), 'A vg/lv path or an existing device is needed' ] raise RuntimeError(' '.join(error)) raise RuntimeError( 'no data logical volume found with: {}'.format(device))
def execute(self): """ Create vgs/lvs from the incoming set of devices, assign their roles (block, block.db, block.wal, etc..) and offload the OSD creation to ``lvm create`` """ # create the single vg for all block.db lv's first vg_info = self.computed['vgs'][0] vg = lvm.create_vg(vg_info['devices']) # now produce all the block.db lvs needed from that single vg db_lvs = lvm.create_lvs(vg, parts=vg_info['parts'], name_prefix='osd-block-db') # create the data lvs, and create the OSD with the matching block.db lvs from before for osd in self.computed['osds']: vg = lvm.create_vg(osd['data']['path']) from uuid import uuid4 data_lv = lvm.create_lv('osd-data-%s' % str(uuid4()), vg.name) db_lv = db_lvs.pop() # FIXME: no support for dmcrypt, crush class, etc... Create([ '--bluestore', '--data', "%s/%s" % (data_lv.vg_name, data_lv.name), '--block.db', '%s/%s' % (db_lv.vg_name, db_lv.name) ]).main()
def execute(self): """ Create vgs/lvs from the incoming set of devices, assign their roles (block, block.db, block.wal, etc..) and offload the OSD creation to ``lvm create`` """ # create the single vg for all block.db lv's first vg_info = self.computed['vgs'][0] vg = lvm.create_vg(vg_info['devices']) # now produce all the block.db lvs needed from that single vg db_lvs = lvm.create_lvs(vg, parts=vg_info['parts'], name_prefix='osd-block-db') # create the data lvs, and create the OSD with the matching block.db lvs from before for osd in self.computed['osds']: vg = lvm.create_vg(osd['data']['path']) data_lv = lvm.create_lv('osd-data-%s' % str(uuid4()), vg.name) db_lv = db_lvs.pop() command = [ '--bluestore', '--data', "%s/%s" % (data_lv.vg_name, data_lv.name), '--block.db', '%s/%s' % (db_lv.vg_name, db_lv.name) ] if self.args.dmcrypt: command.append('--dmcrypt') if self.args.no_systemd: command.append('--no-systemd') if self.args.crush_device_class: command.extend(['--crush-device-class', self.args.crush_device_class]) Create(command).main()
def prepare_device(self, arg, device_type, cluster_fsid, osd_fsid): """ Check if ``arg`` is a device or partition to create an LV out of it with a distinct volume group name, assigning LV tags on it and ultimately, returning the logical volume object. Failing to detect a device or partition will result in error. :param arg: The value of ``--data`` when parsing args :param device_type: Usually, either ``data`` or ``block`` (filestore vs. bluestore) :param cluster_fsid: The cluster fsid/uuid :param osd_fsid: The OSD fsid/uuid """ if disk.is_partition(arg) or disk.is_device(arg): # we must create a vg, and then a single lv vg_name = "ceph-%s" % cluster_fsid if api.get_vg(vg_name=vg_name): # means we already have a group for this, make a different one # XXX this could end up being annoying for an operator, maybe? vg_name = "ceph-%s" % str(uuid.uuid4()) api.create_vg(vg_name, arg) lv_name = "osd-%s-%s" % (device_type, osd_fsid) return api.create_lv( lv_name, vg_name, # the volume group tags={'ceph.type': device_type}) else: error = [ 'Cannot use device (%s).' % arg, 'A vg/lv path or an existing device is needed'] raise RuntimeError(' '.join(error)) raise RuntimeError('no data logical volume found with: %s' % arg)
def execute(self): """ Create vgs/lvs from the incoming set of devices, assign their roles (data, journal) and offload the OSD creation to ``lvm create`` """ device_vgs = dict([(osd['data']['path'], None) for osd in self.computed['osds']]) # create 1 vg per data device first, mapping them to the device path, # when the lvs get created later, it can create as many as needed, # including the journals since it is going to be collocated for osd in self.computed['osds']: vg = device_vgs.get(osd['data']['path']) if not vg: vg = lvm.create_vg(osd['data']['path'], name_prefix='ceph-filestore') device_vgs[osd['data']['path']] = vg # create the lvs from the per-device vg created in the beginning for osd in self.computed['osds']: data_path = osd['data']['path'] data_lv_size = disk.Size(b=osd['data']['size']).gb.as_int() device_vg = device_vgs[data_path] data_lv_extents = device_vg.sizing(size=data_lv_size)['extents'] journal_lv_extents = device_vg.sizing(size=self.journal_size.gb.as_int())['extents'] data_lv = lvm.create_lv( 'osd-data', device_vg.name, extents=data_lv_extents, uuid_name=True ) journal_lv = lvm.create_lv( 'osd-journal', device_vg.name, extents=journal_lv_extents, uuid_name=True ) command = ['--filestore', '--data'] command.append('%s/%s' % (device_vg.name, data_lv.name)) command.extend(['--journal', '%s/%s' % (device_vg.name, journal_lv.name)]) if self.args.dmcrypt: command.append('--dmcrypt') if self.args.no_systemd: command.append('--no-systemd') if self.args.crush_device_class: command.extend(['--crush-device-class', self.args.crush_device_class]) if 'osd_id' in osd: command.extend(['--osd-id', osd['osd_id']]) if self.args.prepare: Prepare(command).main() else: Create(command).main()
def execute(self): """ Create vgs/lvs from the incoming set of devices, assign their roles (data, journal) and offload the OSD creation to ``lvm create`` """ ssd_paths = [d.abspath for d in self.blank_ssds] # no common vg is found, create one with all the blank SSDs if not self.common_vg: journal_vg = lvm.create_vg(ssd_paths, name_prefix='ceph-journals') # a vg exists that can be extended elif self.common_vg and ssd_paths: journal_vg = lvm.extend_vg(self.common_vg, ssd_paths) # one common vg with nothing else to extend can be used directly else: journal_vg = self.common_vg journal_size = prepare.get_journal_size(lv_format=True) for osd in self.computed['osds']: data_vg = lvm.create_vg(osd['data']['path'], name_prefix='ceph-data') # no extents or size means it will use 100%FREE data_lv = lvm.create_lv('osd-data', data_vg.name) journal_lv = lvm.create_lv('osd-journal', journal_vg.name, size=journal_size, uuid_name=True) command = ['--filestore', '--data'] command.append('%s/%s' % (data_vg.name, data_lv.name)) command.extend( ['--journal', '%s/%s' % (journal_vg.name, journal_lv.name)]) if self.args.dmcrypt: command.append('--dmcrypt') if self.args.no_systemd: command.append('--no-systemd') if self.args.crush_device_class: command.extend( ['--crush-device-class', self.args.crush_device_class]) Create(command).main()
def setup_device(self, device_type, device_name, tags, size, slots): """ Check if ``device`` is an lv, if so, set the tags, making sure to update the tags with the lv_uuid and lv_path which the incoming tags will not have. If the device is not a logical volume, then retrieve the partition UUID by querying ``blkid`` """ if device_name is None: return '', '', tags tags['ceph.type'] = device_type tags['ceph.vdo'] = api.is_vdo(device_name) try: vg_name, lv_name = device_name.split('/') lv = api.get_first_lv(filters={'lv_name': lv_name, 'vg_name': vg_name}) except ValueError: lv = None if lv: uuid = lv.lv_uuid path = lv.lv_path tags['ceph.%s_uuid' % device_type] = uuid tags['ceph.%s_device' % device_type] = path lv.set_tags(tags) elif disk.is_device(device_name): # We got a disk, create an lv lv_type = "osd-{}".format(device_type) uuid = system.generate_uuid() tags['ceph.{}_uuid'.format(device_type)] = uuid kwargs = { 'device': device_name, 'tags': tags, 'slots': slots } #TODO use get_block_db_size and co here to get configured size in #conf file if size != 0: kwargs['size'] = size lv = api.create_lv( lv_type, uuid, **kwargs) path = lv.lv_path tags['ceph.{}_device'.format(device_type)] = path lv.set_tags(tags) else: # otherwise assume this is a regular disk partition uuid = self.get_ptuuid(device_name) path = device_name tags['ceph.%s_uuid' % device_type] = uuid tags['ceph.%s_device' % device_type] = path return path, uuid, tags
def execute(self): """ Create vgs/lvs from the incoming set of devices, assign their roles (data, journal) and offload the OSD creation to ``lvm create`` """ device_vgs = dict([(osd['data']['path'], None) for osd in self.computed['osds']]) # create 1 vg per data device first, mapping them to the device path, # when the lvs get created later, it can create as many as needed, # including the journals since it is going to be collocated for osd in self.computed['osds']: vg = device_vgs.get(osd['data']['path']) if not vg: vg = lvm.create_vg(osd['data']['path'], name_prefix='ceph-filestore') device_vgs[osd['data']['path']] = vg # create the lvs from the per-device vg created in the beginning for osd in self.computed['osds']: data_path = osd['data']['path'] data_lv_size = disk.Size(b=osd['data']['size']).gb.as_int() device_vg = device_vgs[data_path] data_lv_extents = device_vg.sizing(size=data_lv_size)['extents'] journal_lv_extents = device_vg.sizing(size=self.journal_size.gb.as_int())['extents'] data_lv = lvm.create_lv( 'osd-data', device_vg.name, extents=data_lv_extents, uuid_name=True ) journal_lv = lvm.create_lv( 'osd-journal', device_vg.name, extents=journal_lv_extents, uuid_name=True ) command = ['--filestore', '--data'] command.append('%s/%s' % (device_vg.name, data_lv.name)) command.extend(['--journal', '%s/%s' % (device_vg.name, journal_lv.name)]) if self.args.dmcrypt: command.append('--dmcrypt') if self.args.no_systemd: command.append('--no-systemd') if self.args.crush_device_class: command.extend(['--crush-device-class', self.args.crush_device_class]) Create(command).main()
def execute(self): """ Create vgs/lvs from the incoming set of devices, assign their roles (data, journal) and offload the OSD creation to ``lvm create`` """ osd_vgs = [] # create the vgs first, one per device (since this is colocating, it # picks the 'data' path) for osd in self.computed['osds']: vg = lvm.create_vg(osd['data']['path']) osd_vgs.append(vg) journal_size = prepare.get_journal_size() # create the lvs from the vgs captured in the beginning for vg in osd_vgs: # this is called again, getting us the LVM formatted string journal_lv = lvm.create_lv('osd-journal', vg.name, size=journal_size, uuid_name=True) # no extents or size means it will use 100%FREE data_lv = lvm.create_lv('osd-data', vg.name) command = ['--filestore', '--data'] command.append('%s/%s' % (vg.name, data_lv.name)) command.extend(['--journal', '%s/%s' % (vg.name, journal_lv.name)]) if self.args.dmcrypt: command.append('--dmcrypt') if self.args.no_systemd: command.append('--no-systemd') if self.args.crush_device_class: command.extend( ['--crush-device-class', self.args.crush_device_class]) Create(command).main()
def setup_device(self, device_type, device_name, tags, size): """ Check if ``device`` is an lv, if so, set the tags, making sure to update the tags with the lv_uuid and lv_path which the incoming tags will not have. If the device is not a logical volume, then retrieve the partition UUID by querying ``blkid`` """ if device_name is None: return '', '', tags tags['ceph.type'] = device_type tags['ceph.vdo'] = api.is_vdo(device_name) lv = self.get_lv(device_name) if lv: uuid = lv.lv_uuid path = lv.lv_path tags['ceph.%s_uuid' % device_type] = uuid tags['ceph.%s_device' % device_type] = path lv.set_tags(tags) elif disk.is_device(device_name): # We got a disk, create an lv lv_type = "osd-{}".format(device_type) uuid = system.generate_uuid() tags['ceph.{}_uuid'.format(device_type)] = uuid kwargs = { 'device': device_name, 'tags': tags, 'slots': getattr(self.args, 'block_{}_slots'.format(device_type), 1), } if size != 0: kwargs['size'] = disk.Size.parse(size) lv = api.create_lv(lv_type, uuid, **kwargs) path = lv.lv_path tags['ceph.{}_device'.format(device_type)] = path lv.set_tags(tags) else: # otherwise assume this is a regular disk partition uuid = self.get_ptuuid(device_name) path = device_name tags['ceph.%s_uuid' % device_type] = uuid tags['ceph.%s_device' % device_type] = path return path, uuid, tags
def test_uses_size_too_large(self, m_get_first_lv, m_call, m_run, monkeypatch): m_get_first_lv.return_value = self.foo_volume with pytest.raises(RuntimeError): api.create_lv('foo', 0, vg=self.foo_group, size=5368709120, tags={'ceph.type': 'data'})
def execute(self): """ Create vgs/lvs from the incoming set of devices, assign their roles (block, block.db, block.wal, etc..) and offload the OSD creation to ``lvm create`` """ blank_ssd_paths = [d.abspath for d in self.blank_ssds] data_vgs = dict([(osd['data']['path'], None) for osd in self.computed['osds']]) # no common vg is found, create one with all the blank SSDs if not self.common_vg: db_vg = lvm.create_vg(blank_ssd_paths, name_prefix='ceph-block-dbs') # if a common vg exists then extend it with any blank ssds elif self.common_vg and blank_ssd_paths: db_vg = lvm.extend_vg(self.common_vg, blank_ssd_paths) # one common vg with nothing else to extend can be used directly, # either this is one device with one vg, or multiple devices with the # same vg else: db_vg = self.common_vg # since we are falling back to a block_db_size that might be "as large # as possible" we can't fully rely on LV format coming from the helper # function that looks up this value block_db_size = "%sG" % self.block_db_size.gb.as_int() # create 1 vg per data device first, mapping them to the device path, # when the lv gets created later, it can create as many as needed (or # even just 1) for osd in self.computed['osds']: vg = data_vgs.get(osd['data']['path']) if not vg: vg = lvm.create_vg(osd['data']['path'], name_prefix='ceph-block') data_vgs[osd['data']['path']] = vg # create the data lvs, and create the OSD with an lv from the common # block.db vg from before for osd in self.computed['osds']: data_path = osd['data']['path'] data_lv_size = disk.Size(b=osd['data']['size']).gb.as_int() data_vg = data_vgs[data_path] data_lv_extents = data_vg.sizing(size=data_lv_size)['extents'] data_lv = lvm.create_lv( 'osd-block', data_vg.name, extents=data_lv_extents, uuid_name=True ) db_lv = lvm.create_lv( 'osd-block-db', db_vg.name, size=block_db_size, uuid_name=True ) command = [ '--bluestore', '--data', "%s/%s" % (data_lv.vg_name, data_lv.name), '--block.db', '%s/%s' % (db_lv.vg_name, db_lv.name) ] if self.args.dmcrypt: command.append('--dmcrypt') if self.args.no_systemd: command.append('--no-systemd') if self.args.crush_device_class: command.extend(['--crush-device-class', self.args.crush_device_class]) if self.args.prepare: Prepare(command).main() else: Create(command).main()
def test_uses_slots(self, m_get_first_lv, m_call, m_run, monkeypatch, test_input, expected): m_get_first_lv.return_value = self.foo_volume api.create_lv('foo', 0, vg=self.foo_group, slots=test_input, tags={'ceph.type': 'data'}) expected = ['lvcreate', '--yes', '-l', str(expected), '-n', 'foo-0', 'foo_group'] m_run.assert_called_with(expected)
def execute(self): """ Create vgs/lvs from the incoming set of devices, assign their roles (block, block.db, block.wal, etc..) and offload the OSD creation to ``lvm create`` """ blank_ssd_paths = [d.abspath for d in self.blank_ssds] data_vgs = dict([(osd['data']['path'], None) for osd in self.computed['osds']]) # no common vg is found, create one with all the blank SSDs if not self.common_vg: db_vg = lvm.create_vg(blank_ssd_paths, name_prefix='ceph-block-dbs') # if a common vg exists then extend it with any blank ssds elif self.common_vg and blank_ssd_paths: db_vg = lvm.extend_vg(self.common_vg, blank_ssd_paths) # one common vg with nothing else to extend can be used directly, # either this is one device with one vg, or multiple devices with the # same vg else: db_vg = self.common_vg # create 1 vg per data device first, mapping them to the device path, # when the lv gets created later, it can create as many as needed (or # even just 1) for osd in self.computed['osds']: vg = data_vgs.get(osd['data']['path']) if not vg: vg = lvm.create_vg(osd['data']['path'], name_prefix='ceph-block') data_vgs[osd['data']['path']] = vg if self.use_large_block_db: # make the block.db lvs as large as possible vg_free_count = str_to_int(db_vg.vg_free_count) db_lv_extents = int(vg_free_count / self.dbs_needed) else: db_lv_extents = db_vg.sizing(size=self.block_db_size.gb.as_int())['extents'] # create the data lvs, and create the OSD with an lv from the common # block.db vg from before for osd in self.computed['osds']: data_path = osd['data']['path'] data_vg = data_vgs[data_path] data_lv_extents = data_vg.sizing(parts=1)['extents'] data_lv = lvm.create_lv( 'osd-block', data_vg.name, extents=data_lv_extents, uuid_name=True ) db_lv = lvm.create_lv( 'osd-block-db', db_vg.name, extents=db_lv_extents, uuid_name=True ) command = [ '--bluestore', '--data', "%s/%s" % (data_lv.vg_name, data_lv.name), '--block.db', '%s/%s' % (db_lv.vg_name, db_lv.name) ] if self.args.dmcrypt: command.append('--dmcrypt') if self.args.no_systemd: command.append('--no-systemd') if self.args.crush_device_class: command.extend(['--crush-device-class', self.args.crush_device_class]) if self.args.prepare: Prepare(command).main() else: Create(command).main()
def test_uses_size(self, m_get_lv, m_call, m_run, monkeypatch): m_get_lv.return_value = self.foo_volume api.create_lv('foo', 0, vg='foo_group', size='5G', tags={'ceph.type': 'data'}) expected = ['lvcreate', '--yes', '-L', '5G', '-n', 'foo-0', 'foo_group'] m_run.assert_called_with(expected)
def execute(self): """ Create vgs/lvs from the incoming set of devices, assign their roles (data, journal) and offload the OSD creation to ``lvm create`` """ blank_ssd_paths = [d.abspath for d in self.blank_ssds] data_vgs = dict([(osd['data']['path'], None) for osd in self.computed['osds']]) # no common vg is found, create one with all the blank SSDs if not self.common_vg: journal_vg = lvm.create_vg(blank_ssd_paths, name_prefix='ceph-journals') # a vg exists that can be extended elif self.common_vg and blank_ssd_paths: journal_vg = lvm.extend_vg(self.common_vg, blank_ssd_paths) # one common vg with nothing else to extend can be used directly else: journal_vg = self.common_vg journal_size = prepare.get_journal_size(lv_format=True) # create 1 vg per data device first, mapping them to the device path, # when the lv gets created later, it can create as many as needed (or # even just 1) for osd in self.computed['osds']: vg = data_vgs.get(osd['data']['path']) if not vg: vg = lvm.create_vg(osd['data']['path'], name_prefix='ceph-data') data_vgs[osd['data']['path']] = vg for osd in self.computed['osds']: data_path = osd['data']['path'] data_lv_size = disk.Size(b=osd['data']['size']).gb.as_int() data_vg = data_vgs[data_path] data_lv_extents = data_vg.sizing(size=data_lv_size)['extents'] data_lv = lvm.create_lv('osd-data', data_vg.name, extents=data_lv_extents, uuid_name=True) journal_lv = lvm.create_lv('osd-journal', journal_vg.name, size=journal_size, uuid_name=True) command = ['--filestore', '--data'] command.append('%s/%s' % (data_vg.name, data_lv.name)) command.extend( ['--journal', '%s/%s' % (journal_vg.name, journal_lv.name)]) if self.args.dmcrypt: command.append('--dmcrypt') if self.args.no_systemd: command.append('--no-systemd') if self.args.crush_device_class: command.extend( ['--crush-device-class', self.args.crush_device_class]) if self.args.prepare: Prepare(command).main() else: Create(command).main()
def execute(self): """ Create vgs/lvs from the incoming set of devices, assign their roles (block, block.db, block.wal, etc..) and offload the OSD creation to ``lvm create`` """ data_vgs = dict([(osd['data']['path'], None) for osd in self.computed['osds']]) # create 1 vg per data device first, mapping them to the device path, # when the lv gets created later, it can create as many as needed (or # even just 1) for osd in self.computed['osds']: vg = data_vgs.get(osd['data']['path']) if not vg: vg = lvm.create_vg(osd['data']['path'], name_prefix='ceph-block') data_vgs[osd['data']['path']] = vg if self.data_devs and self.db_or_journal_devs: blank_db_dev_paths = [d.abspath for d in self.blank_db_devs] # no common vg is found, create one with all the blank SSDs if not self.common_vg: db_vg = lvm.create_vg(blank_db_dev_paths, name_prefix='ceph-block-dbs') elif self.common_vg and blank_db_dev_paths: # if a common vg exists then extend it with any blank ssds db_vg = lvm.extend_vg(self.common_vg, blank_db_dev_paths) else: # one common vg with nothing else to extend can be used directly, # either this is one device with one vg, or multiple devices with the # same vg db_vg = self.common_vg if self.use_large_block_db: # make the block.db lvs as large as possible vg_free_count = str_to_int(db_vg.vg_free_count) db_lv_extents = int(vg_free_count / self.dbs_needed) else: db_lv_extents = db_vg.sizing(size=self.block_db_size.gb.as_int())['extents'] if self.data_devs and self.wal_devs: blank_wal_dev_paths = [d.abspath for d in self.blank_wal_devs] if not self.common_wal_vg: wal_vg = lvm.create_vg(blank_wal_dev_paths, name_prefix='ceph-block-wals') elif self.common_wal_vg and blank_wal_dev_paths: wal_vg = lvm.extend_vg(self.common_wal_vg, blank_wal_dev_paths) else: wal_vg = self.common_wal_vg if self.use_large_block_wal: # make the block.db lvs as large as possible vg_free_count = str_to_int(wal_vg.vg_free_count) wal_lv_extents = int(vg_free_count / self.wals_needed) else: wal_lv_extents = wal_vg.sizing(size=self.block_wal_size.gb.as_int())['extents'] # create the data lvs, and create the OSD with an lv from the common # block.db vg from before for osd in self.computed['osds']: data_path = osd['data']['path'] data_vg = data_vgs[data_path] data_lv_extents = data_vg.sizing(parts=1)['extents'] data_lv = lvm.create_lv( 'osd-block', data_vg.name, extents=data_lv_extents, uuid_name=True ) command = [ '--bluestore', '--data', "%s/%s" % (data_lv.vg_name, data_lv.name), ] if 'block.db' in osd: db_lv = lvm.create_lv( 'osd-block-db', db_vg.name, extents=db_lv_extents, uuid_name=True ) command.extend([ '--block.db', '{}/{}'.format(db_lv.vg_name, db_lv.name)]) if 'block.wal' in osd: wal_lv = lvm.create_lv( 'osd-block-wal', wal_vg.name, extents=wal_lv_extents, uuid_name=True ) command.extend( ['--block.wal', '{}/{}'.format(wal_lv.vg_name, wal_lv.name) ]) if self.args.dmcrypt: command.append('--dmcrypt') if self.args.no_systemd: command.append('--no-systemd') if self.args.crush_device_class: command.extend(['--crush-device-class', self.args.crush_device_class]) if 'osd_id' in osd: command.extend(['--osd-id', osd['osd_id']]) if self.args.prepare: Prepare(command).main() else: Create(command).main()
def prepare(self, args): # FIXME we don't allow re-using a keyring, we always generate one for the # OSD, this needs to be fixed. This could either be a file (!) or a string # (!!) or some flags that we would need to compound into a dict so that we # can convert to JSON (!!!) secrets = {'cephx_secret': prepare_utils.create_key()} cluster_fsid = conf.ceph.get('global', 'fsid') osd_fsid = args.osd_fsid or system.generate_uuid() # allow re-using an id, in case a prepare failed osd_id = args.osd_id or prepare_utils.create_id( osd_fsid, json.dumps(secrets)) if args.filestore: if not args.journal: raise RuntimeError( '--journal is required when using --filestore') data_lv = self.get_lv(args.data) if not data_lv: raise RuntimeError('no data logical volume found with: %s' % args.data) tags = { 'ceph.osd_fsid': osd_fsid, 'ceph.osd_id': osd_id, 'ceph.cluster_fsid': cluster_fsid, 'ceph.cluster_name': conf.cluster, 'ceph.data_device': data_lv.lv_path, 'ceph.data_uuid': data_lv.lv_uuid, } journal_device, journal_uuid, tags = self.setup_device( 'journal', args.journal, tags) tags['ceph.type'] = 'data' data_lv.set_tags(tags) prepare_filestore( data_lv.lv_path, journal_device, secrets, id_=osd_id, fsid=osd_fsid, ) elif args.bluestore: block_lv = self.get_lv(args.data) if not block_lv: if disk.is_partition(args.data) or disk.is_device(args.data): # we must create a vg, and then a single lv vg_name = "ceph-%s" % cluster_fsid if api.get_vg(vg_name=vg_name): # means we already have a group for this, make a different one # XXX this could end up being annoying for an operator, maybe? vg_name = "ceph-%s" % str(uuid.uuid4()) api.create_vg(vg_name, args.data) block_name = "osd-block-%s" % osd_fsid block_lv = api.create_lv( block_name, vg_name, # the volume group tags={'ceph.type': 'block'}) else: error = [ 'Cannot use device (%s) for bluestore. ' % args.data, 'A vg/lv path or an existing device is needed' ] raise RuntimeError(' '.join(error)) tags = { 'ceph.osd_fsid': osd_fsid, 'ceph.osd_id': osd_id, 'ceph.cluster_fsid': cluster_fsid, 'ceph.cluster_name': conf.cluster, 'ceph.block_device': block_lv.lv_path, 'ceph.block_uuid': block_lv.lv_uuid, } wal_device, wal_uuid, tags = self.setup_device( 'wal', args.block_wal, tags) db_device, db_uuid, tags = self.setup_device( 'db', args.block_db, tags) tags['ceph.type'] = 'block' block_lv.set_tags(tags) prepare_bluestore( block_lv.lv_path, wal_device, db_device, secrets, id_=osd_id, fsid=osd_fsid, )
def execute(self): """ Create vgs/lvs from the incoming set of devices, assign their roles (block, block.db, block.wal, etc..) and offload the OSD creation to ``lvm create`` """ blank_ssd_paths = [d.abspath for d in self.blank_ssds] data_vgs = dict([(osd['data']['path'], None) for osd in self.computed['osds']]) # no common vg is found, create one with all the blank SSDs if not self.common_vg: db_vg = lvm.create_vg(blank_ssd_paths, name_prefix='ceph-block-dbs') # if a common vg exists then extend it with any blank ssds elif self.common_vg and blank_ssd_paths: db_vg = lvm.extend_vg(self.common_vg, blank_ssd_paths) # one common vg with nothing else to extend can be used directly, # either this is one device with one vg, or multiple devices with the # same vg else: db_vg = self.common_vg # since we are falling back to a block_db_size that might be "as large # as possible" we can't fully rely on LV format coming from the helper # function that looks up this value block_db_size = "%sG" % self.block_db_size.gb.as_int() # create 1 vg per data device first, mapping them to the device path, # when the lv gets created later, it can create as many as needed (or # even just 1) for osd in self.computed['osds']: vg = data_vgs.get(osd['data']['path']) if not vg: vg = lvm.create_vg(osd['data']['path'], name_prefix='ceph-block') data_vgs[osd['data']['path']] = vg # create the data lvs, and create the OSD with an lv from the common # block.db vg from before for osd in self.computed['osds']: data_path = osd['data']['path'] data_lv_size = disk.Size(b=osd['data']['size']).gb.as_int() data_vg = data_vgs[data_path] data_lv_extents = data_vg.sizing(size=data_lv_size)['extents'] data_lv = lvm.create_lv( 'osd-block', data_vg.name, extents=data_lv_extents, uuid_name=True ) db_lv = lvm.create_lv( 'osd-block-db', db_vg.name, size=block_db_size, uuid_name=True ) command = [ '--bluestore', '--data', "%s/%s" % (data_lv.vg_name, data_lv.name), '--block.db', '%s/%s' % (db_lv.vg_name, db_lv.name) ] if self.args.dmcrypt: command.append('--dmcrypt') if self.args.no_systemd: command.append('--no-systemd') if self.args.crush_device_class: command.extend(['--crush-device-class', self.args.crush_device_class]) Create(command).main()
def test_uses_all(self, m_get_first_lv, m_call, m_run, monkeypatch): m_get_first_lv.return_value = self.foo_volume api.create_lv('foo', 0, vg=self.foo_group, tags={'ceph.type': 'data'}) expected = ['lvcreate', '--yes', '-l', '100%FREE', '-n', 'foo-0', 'foo_group'] m_run.assert_called_with(expected)
def test_uses_extents(self, m_get_single_lv, m_call, m_run, monkeypatch): m_get_single_lv.return_value = self.foo_volume api.create_lv('foo', 0, vg=self.foo_group, extents='50', tags={'ceph.type': 'data'}) expected = ['lvcreate', '--yes', '-l', '50', '-n', 'foo-0', 'foo_group'] m_run.assert_called_with(expected, run_on_host=True)
def test_calls_to_set_tags_arg(self, m_get_lv, m_set_tags, m_call, m_run, monkeypatch): m_get_lv.return_value = self.foo_volume api.create_lv('foo', 0, vg=self.foo_group, tags={'ceph.type': 'data'}) tags = {"ceph.type": "data", "ceph.data_device": "/path"} m_set_tags.assert_called_with(tags)
def test_create_vg(self, m_get_first_lv, m_create_vg, m_get_device_vgs, m_call, m_run, monkeypatch): m_get_first_lv.return_value = self.foo_volume m_get_device_vgs.return_value = [] api.create_lv('foo', 0, device='dev/foo', size='5G', tags={'ceph.type': 'data'}) m_create_vg.assert_called_with('dev/foo', name_prefix='ceph')