def execute(self): """ Create vgs/lvs from the incoming set of devices, assign their roles (data, journal) and offload the OSD creation to ``lvm create`` """ device_vgs = dict([(osd['data']['path'], None) for osd in self.computed['osds']]) # create 1 vg per data device first, mapping them to the device path, # when the lvs get created later, it can create as many as needed, # including the journals since it is going to be collocated for osd in self.computed['osds']: vg = device_vgs.get(osd['data']['path']) if not vg: vg = lvm.create_vg(osd['data']['path'], name_prefix='ceph-filestore') device_vgs[osd['data']['path']] = vg # create the lvs from the per-device vg created in the beginning for osd in self.computed['osds']: data_path = osd['data']['path'] data_lv_size = disk.Size(b=osd['data']['size']).gb.as_int() device_vg = device_vgs[data_path] data_lv_extents = device_vg.sizing(size=data_lv_size)['extents'] journal_lv_extents = device_vg.sizing( size=self.journal_size.gb.as_int())['extents'] data_uuid = system.generate_uuid() data_lv = lvm.create_lv('osd-data', data_uuid, vg=device_vg, extents=data_lv_extents) journal_uuid = system.generate_uuid() journal_lv = lvm.create_lv('osd-journal', journal_uuid, vg=device_vg, extents=journal_lv_extents) command = ['--filestore', '--data'] command.append('%s/%s' % (device_vg.name, data_lv.name)) command.extend( ['--journal', '%s/%s' % (device_vg.name, journal_lv.name)]) if self.args.dmcrypt: command.append('--dmcrypt') if self.args.no_systemd: command.append('--no-systemd') if self.args.crush_device_class: command.extend( ['--crush-device-class', self.args.crush_device_class]) if 'osd_id' in osd: command.extend(['--osd-id', osd['osd_id']]) if self.args.prepare: Prepare(command).main() else: Create(command).main()
def prepare(self): secrets = {'cephx_secret': prepare_utils.create_key()} osd_fsid = system.generate_uuid() crush_device_class = self.args.crush_device_class if crush_device_class: secrets['crush_device_class'] = crush_device_class tmpfs = not self.args.no_tmpfs wal = "" db = "" if self.args.block_wal: wal = self.args.block_wal if self.args.block_db: db = self.args.block_db # reuse a given ID if it exists, otherwise create a new ID self.osd_id = prepare_utils.create_id(osd_fsid, json.dumps(secrets)) prepare_bluestore( self.args.data, wal, db, secrets, self.osd_id, osd_fsid, tmpfs, )
def prepare(self): secrets = {'cephx_secret': prepare_utils.create_key()} encrypted = 1 if self.args.dmcrypt else 0 cephx_lockbox_secret = '' if not encrypted else prepare_utils.create_key( ) if encrypted: secrets['dmcrypt_key'] = os.getenv('CEPH_VOLUME_DMCRYPT_SECRET') secrets[ 'cephx_lockbox_secret'] = cephx_lockbox_secret # dummy value to make `ceph osd new` not complaining osd_fsid = system.generate_uuid() crush_device_class = self.args.crush_device_class if crush_device_class: secrets['crush_device_class'] = crush_device_class tmpfs = not self.args.no_tmpfs wal = "" db = "" if self.args.block_wal: wal = self.args.block_wal if self.args.block_db: db = self.args.block_db # reuse a given ID if it exists, otherwise create a new ID self.osd_id = prepare_utils.create_id(osd_fsid, json.dumps(secrets)) prepare_bluestore( self.args.data, wal, db, secrets, self.osd_id, osd_fsid, tmpfs, )
def prepare_filestore(device, journal, secrets, id_=None, fsid=None): """ :param device: The name of the logical volume to work with :param journal: similar to device but can also be a regular/plain disk :param secrets: A dict with the secrets needed to create the osd (e.g. cephx) :param id_: The OSD id :param fsid: The OSD fsid, also known as the OSD UUID """ cephx_secret = secrets.get('cephx_secret', prepare_utils.create_key()) json_secrets = json.dumps(secrets) # allow re-using an existing fsid, in case prepare failed fsid = fsid or system.generate_uuid() # allow re-using an id, in case a prepare failed osd_id = id_ or prepare_utils.create_id(fsid, json_secrets) # create the directory prepare_utils.create_osd_path(osd_id) # format the device prepare_utils.format_device(device) # mount the data device prepare_utils.mount_osd(device, osd_id) # symlink the journal prepare_utils.link_journal(journal, osd_id) # get the latest monmap prepare_utils.get_monmap(osd_id) # prepare the osd filesystem prepare_utils.osd_mkfs_filestore(osd_id, fsid) # write the OSD keyring if it doesn't exist already prepare_utils.write_keyring(osd_id, cephx_secret)
def prepare_bluestore(block, wal, db, secrets, id_=None, fsid=None): """ :param block: The name of the logical volume for the bluestore data :param wal: a regular/plain disk or logical volume, to be used for block.wal :param db: a regular/plain disk or logical volume, to be used for block.db :param secrets: A dict with the secrets needed to create the osd (e.g. cephx) :param id_: The OSD id :param fsid: The OSD fsid, also known as the OSD UUID """ cephx_secret = secrets.get('cephx_secret', prepare_utils.create_key()) json_secrets = json.dumps(secrets) # allow re-using an existing fsid, in case prepare failed fsid = fsid or system.generate_uuid() # allow re-using an id, in case a prepare failed osd_id = id_ or prepare_utils.create_id(fsid, json_secrets) # create the directory prepare_utils.create_osd_path(osd_id, tmpfs=True) # symlink the block prepare_utils.link_block(block, osd_id) # get the latest monmap prepare_utils.get_monmap(osd_id) # write the OSD keyring if it doesn't exist already prepare_utils.write_keyring(osd_id, cephx_secret) # prepare the osd filesystem prepare_utils.osd_mkfs_bluestore(osd_id, fsid, keyring=cephx_secret, wal=wal, db=db)
def prepare_bluestore(block, wal, db, secrets, id_=None, fsid=None): """ :param block: The name of the logical volume for the bluestore data :param wal: a regular/plain disk or logical volume, to be used for block.wal :param db: a regular/plain disk or logical volume, to be used for block.db :param secrets: A dict with the secrets needed to create the osd (e.g. cephx) :param id_: The OSD id :param fsid: The OSD fsid, also known as the OSD UUID """ cephx_secret = secrets.get('cephx_secret', prepare_utils.create_key()) json_secrets = json.dumps(secrets) # allow re-using an existing fsid, in case prepare failed fsid = fsid or system.generate_uuid() # allow re-using an id, in case a prepare failed osd_id = id_ or prepare_utils.create_id(fsid, json_secrets) # create the directory prepare_utils.create_osd_path(osd_id, tmpfs=True) # symlink the block prepare_utils.link_block(block, osd_id) # get the latest monmap prepare_utils.get_monmap(osd_id) # write the OSD keyring if it doesn't exist already prepare_utils.write_keyring(osd_id, cephx_secret) # prepare the osd filesystem prepare_utils.osd_mkfs_bluestore( osd_id, fsid, keyring=cephx_secret, wal=wal, db=db )
def prepare_filestore(device, journal, secrets, id_=None, fsid=None): """ :param device: The name of the logical volume to work with :param journal: similar to device but can also be a regular/plain disk :param secrets: A dict with the secrets needed to create the osd (e.g. cephx) :param id_: The OSD id :param fsid: The OSD fsid, also known as the OSD UUID """ cephx_secret = secrets.get('cephx_secret', prepare_utils.create_key()) json_secrets = json.dumps(secrets) # allow re-using an existing fsid, in case prepare failed fsid = fsid or system.generate_uuid() # allow re-using an id, in case a prepare failed osd_id = id_ or prepare_utils.create_id(fsid, json_secrets) # create the directory prepare_utils.create_osd_path(osd_id) # format the device prepare_utils.format_device(device) # mount the data device prepare_utils.mount_osd(device, osd_id) # symlink the journal prepare_utils.link_journal(journal, osd_id) # get the latest monmap prepare_utils.get_monmap(osd_id) # prepare the osd filesystem prepare_utils.osd_mkfs_filestore(osd_id, fsid) # write the OSD keyring if it doesn't exist already prepare_utils.write_keyring(osd_id, cephx_secret)
def create(self, args): if not args.osd_fsid: args.osd_fsid = system.generate_uuid() prepare_step = Prepare([]) prepare_step.safe_prepare(args) osd_id = prepare_step.osd_id try: # we try this for activate only when 'creating' an OSD, # because a rollback should not happen when doing normal # activation. For example when starting an OSD, systemd # will call activate, which would never need to be rolled # back. a = Activate([]) a.args = self.args a.activate([args.data], tmpfs=not args.no_tmpfs, systemd=not args.no_systemd) except Exception: logger.exception( 'raw activate was unable to complete, while creating the OSD') logger.info('will rollback OSD ID creation') rollback_osd(args, osd_id) raise terminal.success("ceph-volume raw create successful for: %s" % args.data)
def setup_device(self, device_type, device_name, tags, size, slots): """ Check if ``device`` is an lv, if so, set the tags, making sure to update the tags with the lv_uuid and lv_path which the incoming tags will not have. If the device is not a logical volume, then retrieve the partition UUID by querying ``blkid`` """ if device_name is None: return '', '', tags tags['ceph.type'] = device_type tags['ceph.vdo'] = api.is_vdo(device_name) try: vg_name, lv_name = device_name.split('/') lv = api.get_first_lv(filters={'lv_name': lv_name, 'vg_name': vg_name}) except ValueError: lv = None if lv: uuid = lv.lv_uuid path = lv.lv_path tags['ceph.%s_uuid' % device_type] = uuid tags['ceph.%s_device' % device_type] = path lv.set_tags(tags) elif disk.is_device(device_name): # We got a disk, create an lv lv_type = "osd-{}".format(device_type) uuid = system.generate_uuid() tags['ceph.{}_uuid'.format(device_type)] = uuid kwargs = { 'device': device_name, 'tags': tags, 'slots': slots } #TODO use get_block_db_size and co here to get configured size in #conf file if size != 0: kwargs['size'] = size lv = api.create_lv( lv_type, uuid, **kwargs) path = lv.lv_path tags['ceph.{}_device'.format(device_type)] = path lv.set_tags(tags) else: # otherwise assume this is a regular disk partition uuid = self.get_ptuuid(device_name) path = device_name tags['ceph.%s_uuid' % device_type] = uuid tags['ceph.%s_device' % device_type] = path return path, uuid, tags
def create(self, args): if not args.osd_fsid: args.osd_fsid = system.generate_uuid() prepare_step = Prepare([]) prepare_step.safe_prepare(args) osd_id = prepare_step.osd_id try: # we try this for activate only when 'creating' an OSD, because a rollback should not # happen when doing normal activation. For example when starting an OSD, systemd will call # activate, which would never need to be rolled back. Activate([]).activate(args) except Exception: logger.error('lvm activate was unable to complete, while creating the OSD') logger.info('will rollback OSD ID creation') rollback_osd(args, osd_id) raise
def setup_device(self, device_type, device_name, tags, size): """ Check if ``device`` is an lv, if so, set the tags, making sure to update the tags with the lv_uuid and lv_path which the incoming tags will not have. If the device is not a logical volume, then retrieve the partition UUID by querying ``blkid`` """ if device_name is None: return '', '', tags tags['ceph.type'] = device_type tags['ceph.vdo'] = api.is_vdo(device_name) lv = self.get_lv(device_name) if lv: uuid = lv.lv_uuid path = lv.lv_path tags['ceph.%s_uuid' % device_type] = uuid tags['ceph.%s_device' % device_type] = path lv.set_tags(tags) elif disk.is_device(device_name): # We got a disk, create an lv lv_type = "osd-{}".format(device_type) uuid = system.generate_uuid() tags['ceph.{}_uuid'.format(device_type)] = uuid kwargs = { 'device': device_name, 'tags': tags, 'slots': getattr(self.args, 'block_{}_slots'.format(device_type), 1), } if size != 0: kwargs['size'] = disk.Size.parse(size) lv = api.create_lv(lv_type, uuid, **kwargs) path = lv.lv_path tags['ceph.{}_device'.format(device_type)] = path lv.set_tags(tags) else: # otherwise assume this is a regular disk partition uuid = self.get_ptuuid(device_name) path = device_name tags['ceph.%s_uuid' % device_type] = uuid tags['ceph.%s_device' % device_type] = path return path, uuid, tags
def create(self, args): if not args.osd_fsid: args.osd_fsid = system.generate_uuid() prepare_step = Prepare([]) prepare_step.safe_prepare(args) osd_id = prepare_step.osd_id try: # we try this for activate only when 'creating' an OSD, because a rollback should not # happen when doing normal activation. For example when starting an OSD, systemd will call # activate, which would never need to be rolled back. Activate([]).activate(args) except Exception: logger.error( 'lvm activate was unable to complete, while creating the OSD') logger.info('will rollback OSD ID creation') rollback_osd(args, osd_id) raise
def prepare(self): secrets = {'cephx_secret': prepare_utils.create_key()} osd_fsid = self.args.osd_fsid or system.generate_uuid() crush_device_class = self.args.crush_device_class if crush_device_class: secrets['crush_device_class'] = crush_device_class tmpfs = not self.args.no_tmpfs # reuse a given ID if it exists, otherwise create a new ID self.osd_id = prepare_utils.create_id( osd_fsid, json.dumps(secrets), osd_id=self.args.osd_id) prepare_bluestore( self.args.data, secrets, self.osd_id, osd_fsid, tmpfs, )
def execute(self): """ Create vgs/lvs from the incoming set of devices, assign their roles (block, block.db, block.wal, etc..) and offload the OSD creation to ``lvm create`` """ osd_vgs = dict([(osd['data']['path'], None) for osd in self.computed['osds']]) # create the vgs first, mapping them to the device path for osd in self.computed['osds']: vg = osd_vgs.get(osd['data']['path']) if not vg: vg = lvm.create_vg(osd['data']['path']) osd_vgs[osd['data']['path']] = { 'vg': vg, 'parts': osd['data']['parts'] } # create the lvs from the vgs captured in the beginning for create in osd_vgs.values(): block_uuid = system.generate_uuid() lvs = lvm.create_lvs('osd-data', block_uuid, vg=create['vg'], parts=create['parts']) vg_name = create['vg'].name for lv in lvs: command = ['--bluestore', '--data'] command.append('%s/%s' % (vg_name, lv.name)) if self.args.dmcrypt: command.append('--dmcrypt') if self.args.no_systemd: command.append('--no-systemd') if self.args.crush_device_class: command.extend( ['--crush-device-class', self.args.crush_device_class]) if self.osd_ids: command.extend(['--osd-id', self.osd_ids.pop(0)]) if self.args.prepare: Prepare(command).main() else: Create(command).main()
def prepare_filestore(device, journal, secrets, tags, id_=None, fsid=None): """ :param device: The name of the logical volume to work with :param journal: similar to device but can also be a regular/plain disk :param secrets: A dict with the secrets needed to create the osd (e.g. cephx) :param id_: The OSD id :param fsid: The OSD fsid, also known as the OSD UUID """ cephx_secret = secrets.get('cephx_secret', prepare_utils.create_key()) json_secrets = json.dumps(secrets) # allow re-using an existing fsid, in case prepare failed fsid = fsid or system.generate_uuid() # allow re-using an id, in case a prepare failed osd_id = id_ or prepare_utils.create_id(fsid, json_secrets) # encryption-only operations if secrets.get('dmcrypt_key'): # format and open ('decrypt' devices) and re-assign the device and journal # variables so that the rest of the process can use the mapper paths key = secrets['dmcrypt_key'] device = prepare_dmcrypt(key, device, 'data', tags) journal = prepare_dmcrypt(key, journal, 'journal', tags) # create the directory prepare_utils.create_osd_path(osd_id) # format the device prepare_utils.format_device(device) # mount the data device prepare_utils.mount_osd(device, osd_id) # symlink the journal prepare_utils.link_journal(journal, osd_id) # get the latest monmap prepare_utils.get_monmap(osd_id) # prepare the osd filesystem prepare_utils.osd_mkfs_filestore(osd_id, fsid) # write the OSD keyring if it doesn't exist already prepare_utils.write_keyring(osd_id, cephx_secret) if secrets.get('dmcrypt_key'): # if the device is going to get activated right away, this can be done # here, otherwise it will be recreated encryption_utils.write_lockbox_keyring( osd_id, fsid, tags['ceph.cephx_lockbox_secret'])
def prepare_bluestore(block, wal, db, secrets, tags, id_=None, fsid=None): """ :param block: The name of the logical volume for the bluestore data :param wal: a regular/plain disk or logical volume, to be used for block.wal :param db: a regular/plain disk or logical volume, to be used for block.db :param secrets: A dict with the secrets needed to create the osd (e.g. cephx) :param id_: The OSD id :param fsid: The OSD fsid, also known as the OSD UUID """ cephx_secret = secrets.get('cephx_secret', prepare_utils.create_key()) json_secrets = json.dumps(secrets) # encryption-only operations if secrets.get('dmcrypt_key'): # If encrypted, there is no need to create the lockbox keyring file because # bluestore re-creates the files and does not have support for other files # like the custom lockbox one. This will need to be done on activation. # format and open ('decrypt' devices) and re-assign the device and journal # variables so that the rest of the process can use the mapper paths key = secrets['dmcrypt_key'] block = prepare_dmcrypt(key, block, 'block', tags) wal = prepare_dmcrypt(key, wal, 'wal', tags) db = prepare_dmcrypt(key, db, 'db', tags) # allow re-using an existing fsid, in case prepare failed fsid = fsid or system.generate_uuid() # allow re-using an id, in case a prepare failed osd_id = id_ or prepare_utils.create_id(fsid, json_secrets) # create the directory prepare_utils.create_osd_path(osd_id, tmpfs=True) # symlink the block prepare_utils.link_block(block, osd_id) # get the latest monmap prepare_utils.get_monmap(osd_id) # write the OSD keyring if it doesn't exist already prepare_utils.write_keyring(osd_id, cephx_secret) # prepare the osd filesystem prepare_utils.osd_mkfs_bluestore(osd_id, fsid, keyring=cephx_secret, wal=wal, db=db)
def prepare(self, args): # FIXME we don't allow re-using a keyring, we always generate one for the # OSD, this needs to be fixed. This could either be a file (!) or a string # (!!) or some flags that we would need to compound into a dict so that we # can convert to JSON (!!!) secrets = {'cephx_secret': prepare_utils.create_key()} cluster_fsid = conf.ceph.get('global', 'fsid') osd_fsid = args.osd_fsid or system.generate_uuid() # allow re-using an id, in case a prepare failed self.osd_id = args.osd_id or prepare_utils.create_id( osd_fsid, json.dumps(secrets)) if args.filestore: if not args.journal: raise RuntimeError( '--journal is required when using --filestore') data_lv = self.get_lv(args.data) if not data_lv: data_lv = self.prepare_device(args.data, 'data', cluster_fsid, osd_fsid) tags = { 'ceph.osd_fsid': osd_fsid, 'ceph.osd_id': self.osd_id, 'ceph.cluster_fsid': cluster_fsid, 'ceph.cluster_name': conf.cluster, 'ceph.data_device': data_lv.lv_path, 'ceph.data_uuid': data_lv.lv_uuid, } journal_device, journal_uuid, tags = self.setup_device( 'journal', args.journal, tags) tags['ceph.type'] = 'data' data_lv.set_tags(tags) prepare_filestore( data_lv.lv_path, journal_device, secrets, id_=self.osd_id, fsid=osd_fsid, ) elif args.bluestore: block_lv = self.get_lv(args.data) if not block_lv: block_lv = self.prepare_device(args.data, 'block', cluster_fsid, osd_fsid) tags = { 'ceph.osd_fsid': osd_fsid, 'ceph.osd_id': self.osd_id, 'ceph.cluster_fsid': cluster_fsid, 'ceph.cluster_name': conf.cluster, 'ceph.block_device': block_lv.lv_path, 'ceph.block_uuid': block_lv.lv_uuid, } wal_device, wal_uuid, tags = self.setup_device( 'wal', args.block_wal, tags) db_device, db_uuid, tags = self.setup_device( 'db', args.block_db, tags) tags['ceph.type'] = 'block' block_lv.set_tags(tags) prepare_bluestore( block_lv.lv_path, wal_device, db_device, secrets, id_=self.osd_id, fsid=osd_fsid, )
def execute(self): """ Create vgs/lvs from the incoming set of devices, assign their roles (data, journal) and offload the OSD creation to ``lvm create`` """ blank_journal_dev_paths = [d.abspath for d in self.blank_journal_devs] data_vgs = dict([(osd['data']['path'], None) for osd in self.computed['osds']]) # no common vg is found, create one with all the blank SSDs if not self.common_vg: journal_vg = lvm.create_vg(blank_journal_dev_paths, name_prefix='ceph-journals') # a vg exists that can be extended elif self.common_vg and blank_journal_dev_paths: journal_vg = lvm.extend_vg(self.common_vg, blank_journal_dev_paths) # one common vg with nothing else to extend can be used directly else: journal_vg = self.common_vg journal_size = prepare.get_journal_size(lv_format=True) # create 1 vg per data device first, mapping them to the device path, # when the lv gets created later, it can create as many as needed (or # even just 1) for osd in self.computed['osds']: vg = data_vgs.get(osd['data']['path']) if not vg: vg = lvm.create_vg(osd['data']['path'], name_prefix='ceph-data') data_vgs[osd['data']['path']] = vg for osd in self.computed['osds']: data_path = osd['data']['path'] data_vg = data_vgs[data_path] data_lv_extents = data_vg.sizing(parts=1)['extents'] data_uuid = system.generate_uuid() data_lv = lvm.create_lv('osd-data', data_uuid, vg=data_vg, extents=data_lv_extents) journal_uuid = system.generate_uuid() journal_lv = lvm.create_lv('osd-journal', journal_uuid, vg=journal_vg, size=journal_size) command = ['--filestore', '--data'] command.append('%s/%s' % (data_vg.name, data_lv.name)) command.extend( ['--journal', '%s/%s' % (journal_vg.name, journal_lv.name)]) if self.args.dmcrypt: command.append('--dmcrypt') if self.args.no_systemd: command.append('--no-systemd') if self.args.crush_device_class: command.extend( ['--crush-device-class', self.args.crush_device_class]) if 'osd_id' in osd: command.extend(['--osd-id', osd['osd_id']]) if self.args.prepare: Prepare(command).main() else: Create(command).main()
def prepare(self, args): # FIXME we don't allow re-using a keyring, we always generate one for the # OSD, this needs to be fixed. This could either be a file (!) or a string # (!!) or some flags that we would need to compound into a dict so that we # can convert to JSON (!!!) secrets = {'cephx_secret': prepare_utils.create_key()} cluster_fsid = conf.ceph.get('global', 'fsid') fsid = args.osd_fsid or system.generate_uuid() # allow re-using an id, in case a prepare failed osd_id = args.osd_id or prepare_utils.create_id(fsid, json.dumps(secrets)) vg_name, lv_name = args.data.split('/') if args.filestore: data_lv = api.get_lv(lv_name=lv_name, vg_name=vg_name) # we must have either an existing data_lv or a newly created, so lets make # sure that the tags are correct if not data_lv: raise RuntimeError('no data logical volume found with: %s' % args.data) if not args.journal: raise RuntimeError('--journal is required when using --filestore') journal_device = None journal_lv = self.get_journal_lv(args.journal) # check if we have an actual path to a device, which is allowed if not journal_lv: if os.path.exists(args.journal): journal_device = args.journal else: raise RuntimeError( '--journal specified an invalid or non-existent device: %s' % args.journal ) # Otherwise the journal_device is the path to the lv else: journal_device = journal_lv.lv_path journal_lv.set_tags({ 'ceph.type': 'journal', 'ceph.osd_fsid': fsid, 'ceph.osd_id': osd_id, 'ceph.cluster_fsid': cluster_fsid, 'ceph.journal_device': journal_device, 'ceph.data_device': data_lv.lv_path, }) data_lv.set_tags({ 'ceph.type': 'data', 'ceph.osd_fsid': fsid, 'ceph.osd_id': osd_id, 'ceph.cluster_fsid': cluster_fsid, 'ceph.journal_device': journal_device, 'ceph.data_device': data_lv.lv_path, }) prepare_filestore( data_lv.lv_path, journal_device, secrets, id_=osd_id, fsid=fsid, ) elif args.bluestore: prepare_bluestore(args)
def prepare(self, args): # FIXME we don't allow re-using a keyring, we always generate one for the # OSD, this needs to be fixed. This could either be a file (!) or a string # (!!) or some flags that we would need to compound into a dict so that we # can convert to JSON (!!!) secrets = {'cephx_secret': prepare_utils.create_key()} cluster_fsid = conf.ceph.get('global', 'fsid') fsid = args.osd_fsid or system.generate_uuid() # allow re-using an id, in case a prepare failed osd_id = args.osd_id or prepare_utils.create_id( fsid, json.dumps(secrets)) vg_name, lv_name = args.data.split('/') if args.filestore: data_lv = api.get_lv(lv_name=lv_name, vg_name=vg_name) # we must have either an existing data_lv or a newly created, so lets make # sure that the tags are correct if not data_lv: raise RuntimeError('no data logical volume found with: %s' % args.data) if not args.journal: raise RuntimeError( '--journal is required when using --filestore') journal_device = None journal_lv = self.get_journal_lv(args.journal) # check if we have an actual path to a device, which is allowed if not journal_lv: if os.path.exists(args.journal): journal_device = args.journal else: raise RuntimeError( '--journal specified an invalid or non-existent device: %s' % args.journal) # Otherwise the journal_device is the path to the lv else: journal_device = journal_lv.lv_path journal_lv.set_tags({ 'ceph.type': 'journal', 'ceph.osd_fsid': fsid, 'ceph.osd_id': osd_id, 'ceph.cluster_fsid': cluster_fsid, 'ceph.journal_device': journal_device, 'ceph.data_device': data_lv.lv_path, }) data_lv.set_tags({ 'ceph.type': 'data', 'ceph.osd_fsid': fsid, 'ceph.osd_id': osd_id, 'ceph.cluster_fsid': cluster_fsid, 'ceph.journal_device': journal_device, 'ceph.data_device': data_lv.lv_path, }) prepare_filestore( data_lv.lv_path, journal_device, secrets, id_=osd_id, fsid=fsid, ) elif args.bluestore: prepare_bluestore(args)
def prepare(self, args): # FIXME we don't allow re-using a keyring, we always generate one for the # OSD, this needs to be fixed. This could either be a file (!) or a string # (!!) or some flags that we would need to compound into a dict so that we # can convert to JSON (!!!) secrets = {'cephx_secret': prepare_utils.create_key()} cluster_fsid = conf.ceph.get('global', 'fsid') fsid = args.osd_fsid or system.generate_uuid() #osd_id = args.osd_id or prepare_utils.create_id(fsid) # allow re-using an id, in case a prepare failed osd_id = args.osd_id or prepare_utils.create_id(fsid, json.dumps(secrets)) vg_name, lv_name = args.data.split('/') if args.filestore: data_lv = api.get_lv(lv_name=lv_name, vg_name=vg_name) # we must have either an existing data_lv or a newly created, so lets make # sure that the tags are correct if not data_lv: raise RuntimeError('no data logical volume found with: %s' % args.data) if not args.journal: raise RuntimeError('--journal is required when using --filestore') journal_lv = self.get_journal_lv(args.journal) if journal_lv: journal_device = journal_lv.lv_path journal_uuid = journal_lv.lv_uuid # we can only set tags on an lv, the pv (if any) can't as we # aren't making it part of an lvm group (vg) journal_lv.set_tags({ 'ceph.type': 'journal', 'ceph.osd_fsid': fsid, 'ceph.osd_id': osd_id, 'ceph.cluster_fsid': cluster_fsid, 'ceph.journal_device': journal_device, 'ceph.journal_uuid': journal_uuid, 'ceph.data_device': data_lv.lv_path, 'ceph.data_uuid': data_lv.lv_uuid, }) # allow a file elif os.path.isfile(args.journal): journal_uuid = '' journal_device = args.journal # otherwise assume this is a regular disk partition else: journal_uuid = self.get_journal_ptuuid(args.journal) journal_device = args.journal data_lv.set_tags({ 'ceph.type': 'data', 'ceph.osd_fsid': fsid, 'ceph.osd_id': osd_id, 'ceph.cluster_fsid': cluster_fsid, 'ceph.journal_device': journal_device, 'ceph.journal_uuid': journal_uuid, 'ceph.data_device': data_lv.lv_path, 'ceph.data_uuid': data_lv.lv_uuid, }) prepare_filestore( data_lv.lv_path, journal_device, secrets, id_=osd_id, fsid=fsid, ) elif args.bluestore: prepare_bluestore(args)
def prepare(self, args): # FIXME we don't allow re-using a keyring, we always generate one for the # OSD, this needs to be fixed. This could either be a file (!) or a string # (!!) or some flags that we would need to compound into a dict so that we # can convert to JSON (!!!) secrets = {'cephx_secret': prepare_utils.create_key()} cluster_fsid = conf.ceph.get('global', 'fsid') fsid = args.osd_fsid or system.generate_uuid() # allow re-using an id, in case a prepare failed osd_id = args.osd_id or prepare_utils.create_id(fsid, json.dumps(secrets)) vg_name, lv_name = args.data.split('/') if args.filestore: data_lv = api.get_lv(lv_name=lv_name, vg_name=vg_name) # we must have either an existing data_lv or a newly created, so lets make # sure that the tags are correct if not data_lv: raise RuntimeError('no data logical volume found with: %s' % args.data) if not args.journal: raise RuntimeError('--journal is required when using --filestore') journal_lv = self.get_journal_lv(args.journal) if journal_lv: journal_device = journal_lv.lv_path journal_uuid = journal_lv.lv_uuid # we can only set tags on an lv, the pv (if any) can't as we # aren't making it part of an lvm group (vg) journal_lv.set_tags({ 'ceph.type': 'journal', 'ceph.osd_fsid': fsid, 'ceph.osd_id': osd_id, 'ceph.cluster_fsid': cluster_fsid, 'ceph.journal_device': journal_device, 'ceph.journal_uuid': journal_uuid, 'ceph.data_device': data_lv.lv_path, 'ceph.data_uuid': data_lv.lv_uuid, }) # allow a file elif os.path.isfile(args.journal): journal_uuid = '' journal_device = args.journal # otherwise assume this is a regular disk partition else: journal_uuid = self.get_journal_ptuuid(args.journal) journal_device = args.journal data_lv.set_tags({ 'ceph.type': 'data', 'ceph.osd_fsid': fsid, 'ceph.osd_id': osd_id, 'ceph.cluster_fsid': cluster_fsid, 'ceph.journal_device': journal_device, 'ceph.journal_uuid': journal_uuid, 'ceph.data_device': data_lv.lv_path, 'ceph.data_uuid': data_lv.lv_uuid, }) prepare_filestore( data_lv.lv_path, journal_device, secrets, id_=osd_id, fsid=fsid, ) elif args.bluestore: prepare_bluestore(args)
def prepare(self, args): # FIXME we don't allow re-using a keyring, we always generate one for the # OSD, this needs to be fixed. This could either be a file (!) or a string # (!!) or some flags that we would need to compound into a dict so that we # can convert to JSON (!!!) secrets = {'cephx_secret': prepare_utils.create_key()} cluster_fsid = conf.ceph.get('global', 'fsid') osd_fsid = args.osd_fsid or system.generate_uuid() # allow re-using an id, in case a prepare failed osd_id = args.osd_id or prepare_utils.create_id(osd_fsid, json.dumps(secrets)) if args.filestore: if not args.journal: raise RuntimeError('--journal is required when using --filestore') data_lv = self.get_lv(args.data) if not data_lv: data_lv = self.prepare_device(args.data, 'data', cluster_fsid, osd_fsid) tags = { 'ceph.osd_fsid': osd_fsid, 'ceph.osd_id': osd_id, 'ceph.cluster_fsid': cluster_fsid, 'ceph.cluster_name': conf.cluster, 'ceph.data_device': data_lv.lv_path, 'ceph.data_uuid': data_lv.lv_uuid, } journal_device, journal_uuid, tags = self.setup_device('journal', args.journal, tags) tags['ceph.type'] = 'data' data_lv.set_tags(tags) prepare_filestore( data_lv.lv_path, journal_device, secrets, id_=osd_id, fsid=osd_fsid, ) elif args.bluestore: block_lv = self.get_lv(args.data) if not block_lv: block_lv = self.prepare_device(args.data, 'block', cluster_fsid, osd_fsid) tags = { 'ceph.osd_fsid': osd_fsid, 'ceph.osd_id': osd_id, 'ceph.cluster_fsid': cluster_fsid, 'ceph.cluster_name': conf.cluster, 'ceph.block_device': block_lv.lv_path, 'ceph.block_uuid': block_lv.lv_uuid, } wal_device, wal_uuid, tags = self.setup_device('wal', args.block_wal, tags) db_device, db_uuid, tags = self.setup_device('db', args.block_db, tags) tags['ceph.type'] = 'block' block_lv.set_tags(tags) prepare_bluestore( block_lv.lv_path, wal_device, db_device, secrets, id_=osd_id, fsid=osd_fsid, )
def create(self, args): if not args.osd_fsid: args.osd_fsid = system.generate_uuid() Prepare([]).prepare(args) Activate([]).activate(args)
def prepare(self, args): # FIXME we don't allow re-using a keyring, we always generate one for the # OSD, this needs to be fixed. This could either be a file (!) or a string # (!!) or some flags that we would need to compound into a dict so that we # can convert to JSON (!!!) secrets = {'cephx_secret': prepare_utils.create_key()} cluster_fsid = conf.ceph.get('global', 'fsid') osd_fsid = args.osd_fsid or system.generate_uuid() # allow re-using an id, in case a prepare failed osd_id = args.osd_id or prepare_utils.create_id( osd_fsid, json.dumps(secrets)) if args.filestore: if not args.journal: raise RuntimeError( '--journal is required when using --filestore') data_lv = self.get_lv(args.data) if not data_lv: raise RuntimeError('no data logical volume found with: %s' % args.data) tags = { 'ceph.osd_fsid': osd_fsid, 'ceph.osd_id': osd_id, 'ceph.cluster_fsid': cluster_fsid, 'ceph.cluster_name': conf.cluster, 'ceph.data_device': data_lv.lv_path, 'ceph.data_uuid': data_lv.lv_uuid, } journal_device, journal_uuid, tags = self.setup_device( 'journal', args.journal, tags) tags['ceph.type'] = 'data' data_lv.set_tags(tags) prepare_filestore( data_lv.lv_path, journal_device, secrets, id_=osd_id, fsid=osd_fsid, ) elif args.bluestore: block_lv = self.get_lv(args.data) if not block_lv: if disk.is_partition(args.data) or disk.is_device(args.data): # we must create a vg, and then a single lv vg_name = "ceph-%s" % cluster_fsid if api.get_vg(vg_name=vg_name): # means we already have a group for this, make a different one # XXX this could end up being annoying for an operator, maybe? vg_name = "ceph-%s" % str(uuid.uuid4()) api.create_vg(vg_name, args.data) block_name = "osd-block-%s" % osd_fsid block_lv = api.create_lv( block_name, vg_name, # the volume group tags={'ceph.type': 'block'}) else: error = [ 'Cannot use device (%s) for bluestore. ' % args.data, 'A vg/lv path or an existing device is needed' ] raise RuntimeError(' '.join(error)) tags = { 'ceph.osd_fsid': osd_fsid, 'ceph.osd_id': osd_id, 'ceph.cluster_fsid': cluster_fsid, 'ceph.cluster_name': conf.cluster, 'ceph.block_device': block_lv.lv_path, 'ceph.block_uuid': block_lv.lv_uuid, } wal_device, wal_uuid, tags = self.setup_device( 'wal', args.block_wal, tags) db_device, db_uuid, tags = self.setup_device( 'db', args.block_db, tags) tags['ceph.type'] = 'block' block_lv.set_tags(tags) prepare_bluestore( block_lv.lv_path, wal_device, db_device, secrets, id_=osd_id, fsid=osd_fsid, )
def prepare(self): # FIXME we don't allow re-using a keyring, we always generate one for the # OSD, this needs to be fixed. This could either be a file (!) or a string # (!!) or some flags that we would need to compound into a dict so that we # can convert to JSON (!!!) secrets = {'cephx_secret': prepare_utils.create_key()} cephx_lockbox_secret = '' encrypted = 1 if self.args.dmcrypt else 0 cephx_lockbox_secret = '' if not encrypted else prepare_utils.create_key( ) if encrypted: secrets['dmcrypt_key'] = encryption_utils.create_dmcrypt_key() secrets['cephx_lockbox_secret'] = cephx_lockbox_secret cluster_fsid = self.get_cluster_fsid() osd_fsid = self.args.osd_fsid or system.generate_uuid() crush_device_class = self.args.crush_device_class if crush_device_class: secrets['crush_device_class'] = crush_device_class # reuse a given ID if it exists, otherwise create a new ID self.osd_id = prepare_utils.create_id(osd_fsid, json.dumps(secrets), osd_id=self.args.osd_id) tags = { 'ceph.osd_fsid': osd_fsid, 'ceph.osd_id': self.osd_id, 'ceph.cluster_fsid': cluster_fsid, 'ceph.cluster_name': conf.cluster, 'ceph.crush_device_class': crush_device_class, 'ceph.osdspec_affinity': prepare_utils.get_osdspec_affinity() } if self.args.filestore: if not self.args.journal: logger.info(('no journal was specifed, creating journal lv ' 'on {}').format(self.args.data)) self.args.journal = self.args.data self.args.journal_size = disk.Size(g=5) # need to adjust data size/slots for colocated journal if self.args.data_size: self.args.data_size -= self.args.journal_size if self.args.data_slots == 1: self.args.data_slots = 0 else: raise RuntimeError('Can\'t handle multiple filestore OSDs ' 'with colocated journals yet. Please ' 'create journal LVs manually') tags['ceph.cephx_lockbox_secret'] = cephx_lockbox_secret tags['ceph.encrypted'] = encrypted journal_device, journal_uuid, tags = self.setup_device( 'journal', self.args.journal, tags, self.args.journal_size, self.args.journal_slots) try: vg_name, lv_name = self.args.data.split('/') data_lv = api.get_single_lv(filters={ 'lv_name': lv_name, 'vg_name': vg_name }) except ValueError: data_lv = None if not data_lv: data_lv = self.prepare_data_device('data', osd_fsid) tags['ceph.data_device'] = data_lv.lv_path tags['ceph.data_uuid'] = data_lv.lv_uuid tags['ceph.vdo'] = api.is_vdo(data_lv.lv_path) tags['ceph.type'] = 'data' data_lv.set_tags(tags) if not journal_device.startswith('/'): # we got a journal lv, set rest of the tags api.get_single_lv(filters={ 'lv_name': lv_name, 'vg_name': vg_name }).set_tags(tags) prepare_filestore( data_lv.lv_path, journal_device, secrets, tags, self.osd_id, osd_fsid, ) elif self.args.bluestore: try: vg_name, lv_name = self.args.data.split('/') block_lv = api.get_single_lv(filters={ 'lv_name': lv_name, 'vg_name': vg_name }) except ValueError: block_lv = None if not block_lv: block_lv = self.prepare_data_device('block', osd_fsid) tags['ceph.block_device'] = block_lv.lv_path tags['ceph.block_uuid'] = block_lv.lv_uuid tags['ceph.cephx_lockbox_secret'] = cephx_lockbox_secret tags['ceph.encrypted'] = encrypted tags['ceph.vdo'] = api.is_vdo(block_lv.lv_path) wal_device, wal_uuid, tags = self.setup_device( 'wal', self.args.block_wal, tags, self.args.block_wal_size, self.args.block_wal_slots) db_device, db_uuid, tags = self.setup_device( 'db', self.args.block_db, tags, self.args.block_db_size, self.args.block_db_slots) tags['ceph.type'] = 'block' block_lv.set_tags(tags) prepare_bluestore( block_lv.lv_path, wal_device, db_device, secrets, tags, self.osd_id, osd_fsid, )
def prepare(self, args): # FIXME we don't allow re-using a keyring, we always generate one for the # OSD, this needs to be fixed. This could either be a file (!) or a string # (!!) or some flags that we would need to compound into a dict so that we # can convert to JSON (!!!) secrets = {'cephx_secret': prepare_utils.create_key()} cluster_fsid = conf.ceph.get('global', 'fsid') fsid = args.osd_fsid or system.generate_uuid() #osd_id = args.osd_id or prepare_utils.create_id(fsid) # allow re-using an id, in case a prepare failed osd_id = args.osd_id or prepare_utils.create_id(fsid, json.dumps(secrets)) journal_name = "journal_%s" % fsid osd_name = "osd_%s" % fsid if args.filestore: data_vg = api.get_vg(vg_name=args.data) data_lv = api.get_lv(lv_name=args.data) journal_vg = api.get_vg(vg_name=args.journal) journal_lv = api.get_lv(lv_name=args.journal) journal_device = None # it is possible to pass a device as a journal that is not # an actual logical volume (or group) if not args.journal: if data_lv: raise RuntimeError('--journal is required when not using a vg for OSD data') # collocated: carve out the journal from the data vg if data_vg: journal_lv = api.create_lv( name=journal_name, group=data_vg.name, size=args.journal_size, osd_fsid=fsid, osd_id=osd_id, type='journal', cluster_fsid=cluster_fsid ) # if a volume group was defined for the journal create that first if journal_vg: journal_lv = api.create_lv( name=journal_name, group=args.journal, size=args.journal_size, osd_fsid=fsid, osd_id=osd_id, type='journal', cluster_fsid=cluster_fsid ) if journal_lv: journal_device = journal_lv.lv_path # The journal is probably a device, not in LVM elif args.journal: journal_device = canonical_device_path(args.journal) # At this point we must have a journal_lv or a journal device # now create the osd from the group if that was found if data_vg: # XXX make sure that a there aren't more OSDs than physical # devices from this volume group data_lv = api.create_lv( name=osd_name, group=args.data, osd_fsid=fsid, osd_id=osd_id, type='data', journal_device=journal_device, cluster_fsid=cluster_fsid ) # we must have either an existing data_lv or a newly created, so lets make # sure that the tags are correct if not data_lv: raise RuntimeError('no data logical volume found with: %s' % args.data) data_lv.set_tags({ 'ceph.type': 'data', 'ceph.osd_fsid': fsid, 'ceph.osd_id': osd_id, 'ceph.cluster_fsid': cluster_fsid, 'ceph.journal_device': journal_device, 'ceph.data_device': data_lv.lv_path, }) prepare_filestore( data_lv.lv_path, journal_device, secrets, id_=osd_id, fsid=fsid, ) elif args.bluestore: prepare_bluestore(args)
def create(self, args): if not args.osd_fsid: args.osd_fsid = system.generate_uuid() Prepare([]).prepare(args) Activate([]).activate(args)
def prepare(self, args): # FIXME we don't allow re-using a keyring, we always generate one for the # OSD, this needs to be fixed. This could either be a file (!) or a string # (!!) or some flags that we would need to compound into a dict so that we # can convert to JSON (!!!) secrets = {'cephx_secret': prepare_utils.create_key()} cephx_lockbox_secret = '' encrypted = 1 if args.dmcrypt else 0 cephx_lockbox_secret = '' if not encrypted else prepare_utils.create_key() if encrypted: secrets['dmcrypt_key'] = encryption_utils.create_dmcrypt_key() secrets['cephx_lockbox_secret'] = cephx_lockbox_secret cluster_fsid = conf.ceph.get('global', 'fsid') osd_fsid = args.osd_fsid or system.generate_uuid() crush_device_class = args.crush_device_class if crush_device_class: secrets['crush_device_class'] = crush_device_class # reuse a given ID if it exists, otherwise create a new ID self.osd_id = prepare_utils.create_id(osd_fsid, json.dumps(secrets), osd_id=args.osd_id) tags = { 'ceph.osd_fsid': osd_fsid, 'ceph.osd_id': self.osd_id, 'ceph.cluster_fsid': cluster_fsid, 'ceph.cluster_name': conf.cluster, 'ceph.crush_device_class': crush_device_class, } if args.filestore: if not args.journal: raise RuntimeError('--journal is required when using --filestore') data_lv = self.get_lv(args.data) if not data_lv: data_lv = self.prepare_device(args.data, 'data', cluster_fsid, osd_fsid) tags['ceph.data_device'] = data_lv.lv_path tags['ceph.data_uuid'] = data_lv.lv_uuid tags['ceph.cephx_lockbox_secret'] = cephx_lockbox_secret tags['ceph.encrypted'] = encrypted journal_device, journal_uuid, tags = self.setup_device('journal', args.journal, tags) tags['ceph.type'] = 'data' data_lv.set_tags(tags) prepare_filestore( data_lv.lv_path, journal_device, secrets, tags, self.osd_id, osd_fsid, ) elif args.bluestore: block_lv = self.get_lv(args.data) if not block_lv: block_lv = self.prepare_device(args.data, 'block', cluster_fsid, osd_fsid) tags['ceph.block_device'] = block_lv.lv_path tags['ceph.block_uuid'] = block_lv.lv_uuid tags['ceph.cephx_lockbox_secret'] = cephx_lockbox_secret tags['ceph.encrypted'] = encrypted wal_device, wal_uuid, tags = self.setup_device('wal', args.block_wal, tags) db_device, db_uuid, tags = self.setup_device('db', args.block_db, tags) tags['ceph.type'] = 'block' block_lv.set_tags(tags) prepare_bluestore( block_lv.lv_path, wal_device, db_device, secrets, tags, self.osd_id, osd_fsid, )
def execute(self): """ Create vgs/lvs from the incoming set of devices, assign their roles (block, block.db, block.wal, etc..) and offload the OSD creation to ``lvm create`` """ data_vgs = dict([(osd['data']['path'], None) for osd in self.computed['osds']]) # create 1 vg per data device first, mapping them to the device path, # when the lv gets created later, it can create as many as needed (or # even just 1) for osd in self.computed['osds']: vg = data_vgs.get(osd['data']['path']) if not vg: vg = lvm.create_vg(osd['data']['path'], name_prefix='ceph-block') data_vgs[osd['data']['path']] = vg if self.data_devs and self.db_or_journal_devs: blank_db_dev_paths = [d.abspath for d in self.blank_db_devs] # no common vg is found, create one with all the blank SSDs if not self.common_vg: db_vg = lvm.create_vg(blank_db_dev_paths, name_prefix='ceph-block-dbs') elif self.common_vg and blank_db_dev_paths: # if a common vg exists then extend it with any blank ssds db_vg = lvm.extend_vg(self.common_vg, blank_db_dev_paths) else: # one common vg with nothing else to extend can be used directly, # either this is one device with one vg, or multiple devices with the # same vg db_vg = self.common_vg if self.use_large_block_db: # make the block.db lvs as large as possible vg_free_count = str_to_int(db_vg.vg_free_count) db_lv_extents = int(vg_free_count / self.dbs_needed) else: db_lv_extents = db_vg.sizing( size=self.block_db_size.gb.as_int())['extents'] if self.data_devs and self.wal_devs: blank_wal_dev_paths = [d.abspath for d in self.blank_wal_devs] if not self.common_wal_vg: wal_vg = lvm.create_vg(blank_wal_dev_paths, name_prefix='ceph-block-wals') elif self.common_wal_vg and blank_wal_dev_paths: wal_vg = lvm.extend_vg(self.common_wal_vg, blank_wal_dev_paths) else: wal_vg = self.common_wal_vg if self.use_large_block_wal: # make the block.db lvs as large as possible vg_free_count = str_to_int(wal_vg.vg_free_count) wal_lv_extents = int(vg_free_count / self.wals_needed) else: wal_lv_extents = wal_vg.sizing( size=self.block_wal_size.gb.as_int())['extents'] # create the data lvs, and create the OSD with an lv from the common # block.db vg from before for osd in self.computed['osds']: data_path = osd['data']['path'] data_vg = data_vgs[data_path] data_lv_extents = data_vg.sizing(parts=1)['extents'] data_uuid = system.generate_uuid() data_lv = lvm.create_lv('osd-block', data_uuid, vg=data_vg.name, extents=data_lv_extents) command = [ '--bluestore', '--data', "%s/%s" % (data_lv.vg_name, data_lv.name), ] if 'block.db' in osd: db_uuid = system.generate_uuid() db_lv = lvm.create_lv('osd-block-db', db_uuid, vg=db_vg.name, extents=db_lv_extents) command.extend( ['--block.db', '{}/{}'.format(db_lv.vg_name, db_lv.name)]) if 'block.wal' in osd: wal_uuid = system.generate_uuid() wal_lv = lvm.create_lv('osd-block-wal', wal_uuid, vg=wal_vg.name, extents=wal_lv_extents) command.extend([ '--block.wal', '{}/{}'.format(wal_lv.vg_name, wal_lv.name) ]) if self.args.dmcrypt: command.append('--dmcrypt') if self.args.no_systemd: command.append('--no-systemd') if self.args.crush_device_class: command.extend( ['--crush-device-class', self.args.crush_device_class]) if 'osd_id' in osd: command.extend(['--osd-id', osd['osd_id']]) if self.args.prepare: Prepare(command).main() else: Create(command).main()
def prepare(self, args): # FIXME we don't allow re-using a keyring, we always generate one for the # OSD, this needs to be fixed. This could either be a file (!) or a string # (!!) or some flags that we would need to compound into a dict so that we # can convert to JSON (!!!) secrets = {'cephx_secret': prepare_utils.create_key()} cephx_lockbox_secret = '' encrypted = 1 if args.dmcrypt else 0 cephx_lockbox_secret = '' if not encrypted else prepare_utils.create_key( ) if encrypted: secrets['dmcrypt_key'] = encryption_utils.create_dmcrypt_key() secrets['cephx_lockbox_secret'] = cephx_lockbox_secret cluster_fsid = conf.ceph.get('global', 'fsid') osd_fsid = args.osd_fsid or system.generate_uuid() crush_device_class = args.crush_device_class if crush_device_class: secrets['crush_device_class'] = crush_device_class # reuse a given ID if it exists, otherwise create a new ID self.osd_id = prepare_utils.create_id(osd_fsid, json.dumps(secrets), osd_id=args.osd_id) tags = { 'ceph.osd_fsid': osd_fsid, 'ceph.osd_id': self.osd_id, 'ceph.cluster_fsid': cluster_fsid, 'ceph.cluster_name': conf.cluster, 'ceph.crush_device_class': crush_device_class, } if args.filestore: if not args.journal: raise RuntimeError( '--journal is required when using --filestore') data_lv = self.get_lv(args.data) if not data_lv: data_lv = self.prepare_device(args.data, 'data', cluster_fsid, osd_fsid) tags['ceph.data_device'] = data_lv.lv_path tags['ceph.data_uuid'] = data_lv.lv_uuid tags['ceph.cephx_lockbox_secret'] = cephx_lockbox_secret tags['ceph.encrypted'] = encrypted tags['ceph.vdo'] = api.is_vdo(data_lv.lv_path) journal_device, journal_uuid, tags = self.setup_device( 'journal', args.journal, tags) tags['ceph.type'] = 'data' data_lv.set_tags(tags) prepare_filestore( data_lv.lv_path, journal_device, secrets, tags, self.osd_id, osd_fsid, ) elif args.bluestore: block_lv = self.get_lv(args.data) if not block_lv: block_lv = self.prepare_device(args.data, 'block', cluster_fsid, osd_fsid) tags['ceph.block_device'] = block_lv.lv_path tags['ceph.block_uuid'] = block_lv.lv_uuid tags['ceph.cephx_lockbox_secret'] = cephx_lockbox_secret tags['ceph.encrypted'] = encrypted tags['ceph.vdo'] = api.is_vdo(block_lv.lv_path) wal_device, wal_uuid, tags = self.setup_device( 'wal', args.block_wal, tags) db_device, db_uuid, tags = self.setup_device( 'db', args.block_db, tags) tags['ceph.type'] = 'block' block_lv.set_tags(tags) prepare_bluestore( block_lv.lv_path, wal_device, db_device, secrets, tags, self.osd_id, osd_fsid, )
def prepare(self): # FIXME we don't allow re-using a keyring, we always generate one for the # OSD, this needs to be fixed. This could either be a file (!) or a string # (!!) or some flags that we would need to compound into a dict so that we # can convert to JSON (!!!) secrets = {'cephx_secret': prepare_utils.create_key()} cephx_lockbox_secret = '' encrypted = 1 if self.args.dmcrypt else 0 cephx_lockbox_secret = '' if not encrypted else prepare_utils.create_key() if encrypted: secrets['dmcrypt_key'] = encryption_utils.create_dmcrypt_key() secrets['cephx_lockbox_secret'] = cephx_lockbox_secret cluster_fsid = self.get_cluster_fsid() osd_fsid = self.args.osd_fsid or system.generate_uuid() crush_device_class = self.args.crush_device_class if crush_device_class: secrets['crush_device_class'] = crush_device_class # reuse a given ID if it exists, otherwise create a new ID self.osd_id = prepare_utils.create_id(osd_fsid, json.dumps(secrets), osd_id=self.args.osd_id) tags = { 'ceph.osd_fsid': osd_fsid, 'ceph.osd_id': self.osd_id, 'ceph.cluster_fsid': cluster_fsid, 'ceph.cluster_name': conf.cluster, 'ceph.crush_device_class': crush_device_class, 'ceph.osdspec_affinity': prepare_utils.get_osdspec_affinity() } if self.args.filestore: #TODO: allow auto creation of journal on passed device, only works # when physical device is passed, not LV if not self.args.journal: raise RuntimeError('--journal is required when using --filestore') try: vg_name, lv_name = self.args.data.split('/') data_lv = api.get_first_lv(filters={'lv_name': lv_name, 'vg_name': vg_name}) except ValueError: data_lv = None if not data_lv: data_lv = self.prepare_data_device('data', osd_fsid) tags['ceph.data_device'] = data_lv.lv_path tags['ceph.data_uuid'] = data_lv.lv_uuid tags['ceph.cephx_lockbox_secret'] = cephx_lockbox_secret tags['ceph.encrypted'] = encrypted tags['ceph.vdo'] = api.is_vdo(data_lv.lv_path) journal_device, journal_uuid, tags = self.setup_device( 'journal', self.args.journal, tags, self.args.journal_size) tags['ceph.type'] = 'data' data_lv.set_tags(tags) prepare_filestore( data_lv.lv_path, journal_device, secrets, tags, self.osd_id, osd_fsid, ) elif self.args.bluestore: try: vg_name, lv_name = self.args.data.split('/') block_lv = api.get_first_lv(filters={'lv_name': lv_name, 'vg_name': vg_name}) except ValueError: block_lv = None if not block_lv: block_lv = self.prepare_data_device('block', osd_fsid) tags['ceph.block_device'] = block_lv.lv_path tags['ceph.block_uuid'] = block_lv.lv_uuid tags['ceph.cephx_lockbox_secret'] = cephx_lockbox_secret tags['ceph.encrypted'] = encrypted tags['ceph.vdo'] = api.is_vdo(block_lv.lv_path) wal_device, wal_uuid, tags = self.setup_device( 'wal', self.args.block_wal, tags, self.args.block_wal_size) db_device, db_uuid, tags = self.setup_device( 'db', self.args.block_db, tags, self.args.block_db_size) tags['ceph.type'] = 'block' block_lv.set_tags(tags) prepare_bluestore( block_lv.lv_path, wal_device, db_device, secrets, tags, self.osd_id, osd_fsid, )