def execute(self): """ Create vgs/lvs from the incoming set of devices, assign their roles (data, journal) and offload the OSD creation to ``lvm create`` """ osd_vgs = [] # create the vgs first, one per device (since this is colocating, it # picks the 'data' path) for osd in self.computed['osds']: vg = lvm.create_vg(osd['data']['path']) osd_vgs.append(vg) # create the lvs from the vgs captured in the beginning for vg in osd_vgs: # this is called again, getting us the LVM formatted string journal_size = prepare.get_journal_size() journal_lv = lvm.create_lv('osd-journal', vg.name, size=journal_size) # no extents or size means it will use 100%FREE data_lv = lvm.create_lv('osd-data', vg.name) command = ['--filestore', '--data'] command.append('%s/%s' % (vg.name, data_lv.name)) command.extend(['--journal', '%s/%s' % (vg.name, journal_lv.name)]) if self.args.dmcrypt: command.append('--dmcrypt') if self.args.no_systemd: command.append('--no-systemd') if self.args.crush_device_class: command.extend(['--crush-device-class', self.args.crush_device_class]) Create(command).main()
def execute(self): """ Create vgs/lvs from the incoming set of devices, assign their roles (block, block.db, block.wal, etc..) and offload the OSD creation to ``lvm create`` """ # create the single vg for all block.db lv's first vg_info = self.computed['vgs'][0] vg = lvm.create_vg(vg_info['devices']) # now produce all the block.db lvs needed from that single vg db_lvs = lvm.create_lvs(vg, parts=vg_info['parts'], name_prefix='osd-block-db') # create the data lvs, and create the OSD with the matching block.db lvs from before for osd in self.computed['osds']: vg = lvm.create_vg(osd['data']['path']) data_lv = lvm.create_lv('osd-data-%s' % str(uuid4()), vg.name) db_lv = db_lvs.pop() command = [ '--bluestore', '--data', "%s/%s" % (data_lv.vg_name, data_lv.name), '--block.db', '%s/%s' % (db_lv.vg_name, db_lv.name) ] if self.args.dmcrypt: command.append('--dmcrypt') if self.args.no_systemd: command.append('--no-systemd') if self.args.crush_device_class: command.extend( ['--crush-device-class', self.args.crush_device_class]) Create(command).main()
def execute(self): """ Create vgs/lvs from the incoming set of devices, assign their roles (block, block.db, block.wal, etc..) and offload the OSD creation to ``lvm create`` """ osd_vgs = dict([(osd['data']['path'], None) for osd in self.computed['osds']]) # create the vgs first, mapping them to the device path for osd in self.computed['osds']: vg = osd_vgs.get(osd['data']['path']) if not vg: vg = lvm.create_vg(osd['data']['path']) osd_vgs[osd['data']['path']] = {'vg': vg, 'parts': osd['data']['parts']} # create the lvs from the vgs captured in the beginning for create in osd_vgs.values(): lvs = lvm.create_lvs(create['vg'], parts=create['parts'], name_prefix='osd-data') vg_name = create['vg'].name for lv in lvs: command = ['--bluestore', '--data'] command.append('%s/%s' % (vg_name, lv.name)) if self.args.dmcrypt: command.append('--dmcrypt') if self.args.no_systemd: command.append('--no-systemd') if self.args.crush_device_class: command.extend(['--crush-device-class', self.args.crush_device_class]) if self.args.prepare: Prepare(command).main() else: Create(command).main()
def execute(self): """ Create vgs/lvs from the incoming set of devices, assign their roles (block, block.db, block.wal, etc..) and offload the OSD creation to ``lvm create`` """ # create the single vg for all block.db lv's first vg_info = self.computed['vgs'][0] vg = lvm.create_vg(vg_info['devices']) # now produce all the block.db lvs needed from that single vg db_lvs = lvm.create_lvs(vg, parts=vg_info['parts'], name_prefix='osd-block-db') # create the data lvs, and create the OSD with the matching block.db lvs from before for osd in self.computed['osds']: vg = lvm.create_vg(osd['data']['path']) from uuid import uuid4 data_lv = lvm.create_lv('osd-data-%s' % str(uuid4()), vg.name) db_lv = db_lvs.pop() # FIXME: no support for dmcrypt, crush class, etc... Create([ '--bluestore', '--data', "%s/%s" % (data_lv.vg_name, data_lv.name), '--block.db', '%s/%s' % (db_lv.vg_name, db_lv.name) ]).main()
def execute(self): """ Create vgs/lvs from the incoming set of devices, assign their roles (block, block.db, block.wal, etc..) and offload the OSD creation to ``lvm create`` """ osd_vgs = dict([(osd['data']['path'], None) for osd in self.computed['osds']]) # create the vgs first, mapping them to the device path for osd in self.computed['osds']: vg = osd_vgs.get(osd['data']['path']) if not vg: vg = lvm.create_vg(osd['data']['path']) osd_vgs[osd['data']['path']] = { 'vg': vg, 'parts': osd['data']['parts'] } # create the lvs from the vgs captured in the beginning for create in osd_vgs.values(): lvs = lvm.create_lvs(create['vg'], parts=create['parts'], name_prefix='osd-data') vg_name = create['vg'].name for lv in lvs: # FIXME: no support for dmcrypt, crush class, etc... Create([ '--bluestore', '--data', "%s/%s" % (vg_name, lv.name), ]).main()
def execute(self): """ Create vgs/lvs from the incoming set of devices, assign their roles (data, journal) and offload the OSD creation to ``lvm create`` """ device_vgs = dict([(osd['data']['path'], None) for osd in self.computed['osds']]) # create 1 vg per data device first, mapping them to the device path, # when the lvs get created later, it can create as many as needed, # including the journals since it is going to be collocated for osd in self.computed['osds']: vg = device_vgs.get(osd['data']['path']) if not vg: vg = lvm.create_vg(osd['data']['path'], name_prefix='ceph-filestore') device_vgs[osd['data']['path']] = vg # create the lvs from the per-device vg created in the beginning for osd in self.computed['osds']: data_path = osd['data']['path'] data_lv_size = disk.Size(b=osd['data']['size']).gb.as_int() device_vg = device_vgs[data_path] data_lv_extents = device_vg.sizing(size=data_lv_size)['extents'] journal_lv_extents = device_vg.sizing( size=self.journal_size.gb.as_int())['extents'] data_uuid = system.generate_uuid() data_lv = lvm.create_lv('osd-data', data_uuid, vg=device_vg, extents=data_lv_extents) journal_uuid = system.generate_uuid() journal_lv = lvm.create_lv('osd-journal', journal_uuid, vg=device_vg, extents=journal_lv_extents) command = ['--filestore', '--data'] command.append('%s/%s' % (device_vg.name, data_lv.name)) command.extend( ['--journal', '%s/%s' % (device_vg.name, journal_lv.name)]) if self.args.dmcrypt: command.append('--dmcrypt') if self.args.no_systemd: command.append('--no-systemd') if self.args.crush_device_class: command.extend( ['--crush-device-class', self.args.crush_device_class]) if 'osd_id' in osd: command.extend(['--osd-id', osd['osd_id']]) if self.args.prepare: Prepare(command).main() else: Create(command).main()
def execute(self): """ Create vgs/lvs from the incoming set of devices, assign their roles (data, journal) and offload the OSD creation to ``lvm create`` """ blank_ssd_paths = [d.abspath for d in self.blank_ssds] data_vgs = dict([(osd['data']['path'], None) for osd in self.computed['osds']]) # no common vg is found, create one with all the blank SSDs if not self.common_vg: journal_vg = lvm.create_vg(blank_ssd_paths, name_prefix='ceph-journals') # a vg exists that can be extended elif self.common_vg and blank_ssd_paths: journal_vg = lvm.extend_vg(self.common_vg, blank_ssd_paths) # one common vg with nothing else to extend can be used directly else: journal_vg = self.common_vg journal_size = prepare.get_journal_size(lv_format=True) # create 1 vg per data device first, mapping them to the device path, # when the lv gets created later, it can create as many as needed (or # even just 1) for osd in self.computed['osds']: vg = data_vgs.get(osd['data']['path']) if not vg: vg = lvm.create_vg(osd['data']['path'], name_prefix='ceph-data') data_vgs[osd['data']['path']] = vg for osd in self.computed['osds']: data_path = osd['data']['path'] data_lv_size = disk.Size(b=osd['data']['size']).gb.as_int() data_vg = data_vgs[data_path] data_lv_extents = data_vg.sizing(size=data_lv_size)['extents'] data_lv = lvm.create_lv( 'osd-data', data_vg.name, extents=data_lv_extents, uuid_name=True ) journal_lv = lvm.create_lv( 'osd-journal', journal_vg.name, size=journal_size, uuid_name=True ) command = ['--filestore', '--data'] command.append('%s/%s' % (data_vg.name, data_lv.name)) command.extend(['--journal', '%s/%s' % (journal_vg.name, journal_lv.name)]) if self.args.dmcrypt: command.append('--dmcrypt') if self.args.no_systemd: command.append('--no-systemd') if self.args.crush_device_class: command.extend(['--crush-device-class', self.args.crush_device_class]) Create(command).main()
def execute(self): """ Create vgs/lvs from the incoming set of devices, assign their roles (data, journal) and offload the OSD creation to ``lvm create`` """ ssd_paths = [d.abspath for d in self.blank_ssds] # no common vg is found, create one with all the blank SSDs if not self.common_vg: journal_vg = lvm.create_vg(ssd_paths, name_prefix='ceph-journals') # a vg exists that can be extended elif self.common_vg and ssd_paths: journal_vg = lvm.extend_vg(self.common_vg, ssd_paths) # one common vg with nothing else to extend can be used directly else: journal_vg = self.common_vg journal_size = prepare.get_journal_size(lv_format=True) for osd in self.computed['osds']: data_vg = lvm.create_vg(osd['data']['path'], name_prefix='ceph-data') # no extents or size means it will use 100%FREE data_lv = lvm.create_lv('osd-data', data_vg.name) journal_lv = lvm.create_lv('osd-journal', journal_vg.name, size=journal_size, uuid_name=True) command = ['--filestore', '--data'] command.append('%s/%s' % (data_vg.name, data_lv.name)) command.extend( ['--journal', '%s/%s' % (journal_vg.name, journal_lv.name)]) if self.args.dmcrypt: command.append('--dmcrypt') if self.args.no_systemd: command.append('--no-systemd') if self.args.crush_device_class: command.extend( ['--crush-device-class', self.args.crush_device_class]) Create(command).main()
def execute(self): """ Create vgs/lvs from the incoming set of devices, assign their roles (block, block.db, block.wal, etc..) and offload the OSD creation to ``lvm create`` """ blank_ssd_paths = [d.abspath for d in self.blank_ssds] data_vgs = dict([(osd['data']['path'], None) for osd in self.computed['osds']]) # no common vg is found, create one with all the blank SSDs if not self.common_vg: db_vg = lvm.create_vg(blank_ssd_paths, name_prefix='ceph-block-dbs') # if a common vg exists then extend it with any blank ssds elif self.common_vg and blank_ssd_paths: db_vg = lvm.extend_vg(self.common_vg, blank_ssd_paths) # one common vg with nothing else to extend can be used directly, # either this is one device with one vg, or multiple devices with the # same vg else: db_vg = self.common_vg # create 1 vg per data device first, mapping them to the device path, # when the lv gets created later, it can create as many as needed (or # even just 1) for osd in self.computed['osds']: vg = data_vgs.get(osd['data']['path']) if not vg: vg = lvm.create_vg(osd['data']['path'], name_prefix='ceph-block') data_vgs[osd['data']['path']] = vg if self.use_large_block_db: # make the block.db lvs as large as possible vg_free_count = str_to_int(db_vg.vg_free_count) db_lv_extents = int(vg_free_count / self.dbs_needed) else: db_lv_extents = db_vg.sizing(size=self.block_db_size.gb.as_int())['extents'] # create the data lvs, and create the OSD with an lv from the common # block.db vg from before for osd in self.computed['osds']: data_path = osd['data']['path'] data_vg = data_vgs[data_path] data_lv_extents = data_vg.sizing(parts=1)['extents'] data_lv = lvm.create_lv( 'osd-block', data_vg.name, extents=data_lv_extents, uuid_name=True ) db_lv = lvm.create_lv( 'osd-block-db', db_vg.name, extents=db_lv_extents, uuid_name=True ) command = [ '--bluestore', '--data', "%s/%s" % (data_lv.vg_name, data_lv.name), '--block.db', '%s/%s' % (db_lv.vg_name, db_lv.name) ] if self.args.dmcrypt: command.append('--dmcrypt') if self.args.no_systemd: command.append('--no-systemd') if self.args.crush_device_class: command.extend(['--crush-device-class', self.args.crush_device_class]) if self.args.prepare: Prepare(command).main() else: Create(command).main()
def execute(self): """ Create vgs/lvs from the incoming set of devices, assign their roles (block, block.db, block.wal, etc..) and offload the OSD creation to ``lvm create`` """ blank_ssd_paths = [d.abspath for d in self.blank_ssds] data_vgs = dict([(osd['data']['path'], None) for osd in self.computed['osds']]) # no common vg is found, create one with all the blank SSDs if not self.common_vg: db_vg = lvm.create_vg(blank_ssd_paths, name_prefix='ceph-block-dbs') # if a common vg exists then extend it with any blank ssds elif self.common_vg and blank_ssd_paths: db_vg = lvm.extend_vg(self.common_vg, blank_ssd_paths) # one common vg with nothing else to extend can be used directly, # either this is one device with one vg, or multiple devices with the # same vg else: db_vg = self.common_vg # since we are falling back to a block_db_size that might be "as large # as possible" we can't fully rely on LV format coming from the helper # function that looks up this value block_db_size = "%sG" % self.block_db_size.gb.as_int() # create 1 vg per data device first, mapping them to the device path, # when the lv gets created later, it can create as many as needed (or # even just 1) for osd in self.computed['osds']: vg = data_vgs.get(osd['data']['path']) if not vg: vg = lvm.create_vg(osd['data']['path'], name_prefix='ceph-block') data_vgs[osd['data']['path']] = vg # create the data lvs, and create the OSD with an lv from the common # block.db vg from before for osd in self.computed['osds']: data_path = osd['data']['path'] data_lv_size = disk.Size(b=osd['data']['size']).gb.as_int() data_vg = data_vgs[data_path] data_lv_extents = data_vg.sizing(size=data_lv_size)['extents'] data_lv = lvm.create_lv( 'osd-block', data_vg.name, extents=data_lv_extents, uuid_name=True ) db_lv = lvm.create_lv( 'osd-block-db', db_vg.name, size=block_db_size, uuid_name=True ) command = [ '--bluestore', '--data', "%s/%s" % (data_lv.vg_name, data_lv.name), '--block.db', '%s/%s' % (db_lv.vg_name, db_lv.name) ] if self.args.dmcrypt: command.append('--dmcrypt') if self.args.no_systemd: command.append('--no-systemd') if self.args.crush_device_class: command.extend(['--crush-device-class', self.args.crush_device_class]) Create(command).main()