def make_new_volume(self, osd_id, osd_fsid, devices, target_lv): osd_path = get_osd_path(osd_id, osd_fsid) mlogger.info( 'Making new volume at {} for OSD: {} ({})'.format( target_lv.lv_path, osd_id, osd_path)) tag_tracker = VolumeTagTracker(devices, target_lv) try: tag_tracker.update_tags_when_lv_create(self.create_type) stdout, stderr, exit_code = process.call([ 'ceph-bluestore-tool', '--path', osd_path, '--dev-target', target_lv.lv_path, '--command', 'bluefs-bdev-new-{}'.format(self.create_type) ]) if exit_code != 0: mlogger.error( 'failed to attach new volume, error code:{}'.format( exit_code)) raise SystemExit( "Failed to attach new volume: {}".format( self.args.target)) else: system.chown(os.path.join(osd_path, "block.{}".format( self.create_type))) terminal.success('New volume attached.') except: tag_tracker.undo() raise return
def osd_mkfs_filestore(osd_id, fsid): """ Create the files for the OSD to function. A normal call will look like: ceph-osd --cluster ceph --mkfs --mkkey -i 0 \ --monmap /var/lib/ceph/osd/ceph-0/activate.monmap \ --osd-data /var/lib/ceph/osd/ceph-0 \ --osd-journal /var/lib/ceph/osd/ceph-0/journal \ --osd-uuid 8d208665-89ae-4733-8888-5d3bfbeeec6c \ --keyring /var/lib/ceph/osd/ceph-0/keyring \ --setuser ceph --setgroup ceph """ path = '/var/lib/ceph/osd/%s-%s/' % (conf.cluster, osd_id) monmap = os.path.join(path, 'activate.monmap') journal = os.path.join(path, 'journal') system.chown(journal) system.chown(path) process.run([ 'ceph-osd', '--cluster', conf.cluster, # undocumented flag, sets the `type` file to contain 'filestore' '--osd-objectstore', 'filestore', '--mkfs', '-i', osd_id, '--monmap', monmap, '--osd-data', path, '--osd-journal', journal, '--osd-uuid', fsid, '--setuser', 'ceph', '--setgroup', 'ceph' ])
def activate_filestore(lvs): # find the osd osd_lv = lvs.get(lv_tags={'ceph.type': 'data'}) if not osd_lv: raise RuntimeError('Unable to find a data LV for filestore activation') is_encrypted = osd_lv.tags.get('ceph.encrypted', '0') == '1' osd_id = osd_lv.tags['ceph.osd_id'] conf.cluster = osd_lv.tags['ceph.cluster_name'] # it may have a volume with a journal osd_journal_lv = lvs.get(lv_tags={'ceph.type': 'journal'}) # TODO: add sensible error reporting if this is ever the case # blow up with a KeyError if this doesn't exist osd_fsid = osd_lv.tags['ceph.osd_fsid'] if not osd_journal_lv: # must be a disk partition, by quering blkid by the uuid we are ensuring that the # device path is always correct journal_uuid = osd_lv.tags['ceph.journal_uuid'] osd_journal = disk.get_device_from_partuuid(journal_uuid) else: journal_uuid = osd_journal_lv.lv_uuid osd_journal = osd_lv.tags['ceph.journal_device'] if not osd_journal: raise RuntimeError('unable to detect an lv or device journal for OSD %s' % osd_id) # this is done here, so that previous checks that ensure path availability # and correctness can still be enforced, and report if any issues are found if is_encrypted: lockbox_secret = osd_lv.tags['ceph.cephx_lockbox_secret'] # this keyring writing is idempotent encryption_utils.write_lockbox_keyring(osd_id, osd_fsid, lockbox_secret) dmcrypt_secret = encryption_utils.get_dmcrypt_key(osd_id, osd_fsid) encryption_utils.luks_open(dmcrypt_secret, osd_lv.lv_path, osd_lv.lv_uuid) encryption_utils.luks_open(dmcrypt_secret, osd_journal, journal_uuid) osd_journal = '/dev/mapper/%s' % journal_uuid source = '/dev/mapper/%s' % osd_lv.lv_uuid else: source = osd_lv.lv_path # mount the osd destination = '/var/lib/ceph/osd/%s-%s' % (conf.cluster, osd_id) if not system.device_is_mounted(source, destination=destination): process.run(['mount', '-v', source, destination]) # always re-do the symlink regardless if it exists, so that the journal # device path that may have changed can be mapped correctly every time destination = '/var/lib/ceph/osd/%s-%s/journal' % (conf.cluster, osd_id) process.run(['ln', '-snf', osd_journal, destination]) # make sure that the journal has proper permissions system.chown(osd_journal) # enable the ceph-volume unit for this OSD systemctl.enable_volume(osd_id, osd_fsid, 'lvm') # start the OSD systemctl.start_osd(osd_id) terminal.success("ceph-volume lvm activate successful for osd ID: %s" % osd_id)
def write_keyring(osd_id, secret): # FIXME this only works for cephx, but there will be other types of secrets # later osd_keyring = '/var/lib/ceph/osd/%s-%s/keyring' % (conf.cluster, osd_id) process.run([ 'ceph-authtool', osd_keyring, '--create-keyring', '--name', 'osd.%s' % str(osd_id), '--add-key', secret ]) system.chown(osd_keyring)
def write_keyring(osd_id, secret): # FIXME this only works for cephx, but there will be other types of secrets # later osd_keyring = '/var/lib/ceph/osd/%s-%s/keyring' % (conf.cluster, osd_id) process.run( [ 'ceph-authtool', osd_keyring, '--create-keyring', '--name', 'osd.%s' % str(osd_id), '--add-key', secret ]) system.chown(osd_keyring)
def _link_device(device, device_type, osd_id): """ Allow linking any device type in an OSD directory. ``device`` must the be source, with an absolute path and ``device_type`` will be the destination name, like 'journal', or 'block' """ device_path = '/var/lib/ceph/osd/%s-%s/%s' % (conf.cluster, osd_id, device_type) command = ['ln', '-s', device, device_path] system.chown(device) process.run(command)
def osd_mkfs_bluestore(osd_id, fsid, keyring=None, wal=False, db=False): """ Create the files for the OSD to function. A normal call will look like: ceph-osd --cluster ceph --mkfs --mkkey -i 0 \ --monmap /var/lib/ceph/osd/ceph-0/activate.monmap \ --osd-data /var/lib/ceph/osd/ceph-0 \ --osd-uuid 8d208665-89ae-4733-8888-5d3bfbeeec6c \ --keyring /var/lib/ceph/osd/ceph-0/keyring \ --setuser ceph --setgroup ceph In some cases it is required to use the keyring, when it is passed in as a keywork argument it is used as part of the ceph-osd command """ path = '/var/lib/ceph/osd/%s-%s/' % (conf.cluster, osd_id) monmap = os.path.join(path, 'activate.monmap') wal_path = os.path.join(path, 'block.wal') db_path = os.path.join(path, 'block.db') system.chown(path) base_command = [ 'sudo', 'ceph-osd', '--cluster', conf.cluster, # undocumented flag, sets the `type` file to contain 'bluestore' '--osd-objectstore', 'bluestore', '--mkfs', '-i', osd_id, '--monmap', monmap, ] supplementary_command = [ '--osd-data', path, '--osd-uuid', fsid, '--setuser', 'ceph', '--setgroup', 'ceph' ] if keyring is not None: base_command.extend(['--key', keyring]) if wal: base_command.extend(['--bluestore-block-wal-path', wal_path]) if db: base_command.extend(['--bluestore-block-db-path', db_path]) command = base_command + supplementary_command process.run(command, obfuscate='--key')
def osd_mkfs_filestore(osd_id, fsid, keyring): """ Create the files for the OSD to function. A normal call will look like: ceph-osd --cluster ceph --mkfs --mkkey -i 0 \ --monmap /var/lib/ceph/osd/ceph-0/activate.monmap \ --osd-data /var/lib/ceph/osd/ceph-0 \ --osd-journal /var/lib/ceph/osd/ceph-0/journal \ --osd-uuid 8d208665-89ae-4733-8888-5d3bfbeeec6c \ --keyring /var/lib/ceph/osd/ceph-0/keyring \ --setuser ceph --setgroup ceph """ path = '/var/lib/ceph/osd/%s-%s/' % (conf.cluster, osd_id) monmap = os.path.join(path, 'activate.monmap') journal = os.path.join(path, 'journal') system.chown(journal) system.chown(path) command = [ 'ceph-osd', '--cluster', conf.cluster, '--osd-objectstore', 'filestore', '--mkfs', '-i', osd_id, '--monmap', monmap, ] if get_osdspec_affinity(): command.extend(['--osdspec-affinity', get_osdspec_affinity()]) if __release__ != 'luminous': # goes through stdin command.extend(['--keyfile', '-']) command.extend([ '--osd-data', path, '--osd-journal', journal, '--osd-uuid', fsid, '--setuser', 'ceph', '--setgroup', 'ceph' ]) _, _, returncode = process.call(command, stdin=keyring, terminal_verbose=True, show_command=True) if returncode != 0: raise RuntimeError('Command failed with exit code %s: %s' % (returncode, ' '.join(command)))
def activate_bluestore(lvs): # find the osd osd_lv = lvs.get(lv_tags={'ceph.type': 'block'}) is_encrypted = osd_lv.tags.get('ceph.encrypted', '0') == '1' dmcrypt_secret = None osd_id = osd_lv.tags['ceph.osd_id'] conf.cluster = osd_lv.tags['ceph.cluster_name'] osd_fsid = osd_lv.tags['ceph.osd_fsid'] # mount on tmpfs the osd directory osd_path = '/var/lib/ceph/osd/%s-%s' % (conf.cluster, osd_id) if not system.path_is_mounted(osd_path): # mkdir -p and mount as tmpfs prepare_utils.create_osd_path(osd_id, tmpfs=True) # XXX This needs to be removed once ceph-bluestore-tool can deal with # symlinks that exist in the osd dir for link_name in ['block', 'block.db', 'block.wal']: link_path = os.path.join(osd_path, link_name) if os.path.exists(link_path): os.unlink(os.path.join(osd_path, link_name)) # encryption is handled here, before priming the OSD dir if is_encrypted: osd_lv_path = '/dev/mapper/%s' % osd_lv.lv_uuid lockbox_secret = osd_lv.tags['ceph.cephx_lockbox_secret'] encryption_utils.write_lockbox_keyring(osd_id, osd_fsid, lockbox_secret) dmcrypt_secret = encryption_utils.get_dmcrypt_key(osd_id, osd_fsid) encryption_utils.luks_open(dmcrypt_secret, osd_lv.lv_path, osd_lv.lv_uuid) else: osd_lv_path = osd_lv.lv_path db_device_path = get_osd_device_path(osd_lv, lvs, 'db', dmcrypt_secret=dmcrypt_secret) wal_device_path = get_osd_device_path(osd_lv, lvs, 'wal', dmcrypt_secret=dmcrypt_secret) # Once symlinks are removed, the osd dir can be 'primed again. process.run([ 'ceph-bluestore-tool', '--cluster=%s' % conf.cluster, 'prime-osd-dir', '--dev', osd_lv_path, '--path', osd_path]) # always re-do the symlink regardless if it exists, so that the block, # block.wal, and block.db devices that may have changed can be mapped # correctly every time process.run(['ln', '-snf', osd_lv_path, os.path.join(osd_path, 'block')]) system.chown(os.path.join(osd_path, 'block')) system.chown(osd_path) if db_device_path: destination = os.path.join(osd_path, 'block.db') process.run(['ln', '-snf', db_device_path, destination]) system.chown(db_device_path) if wal_device_path: destination = os.path.join(osd_path, 'block.wal') process.run(['ln', '-snf', wal_device_path, destination]) system.chown(wal_device_path) # enable the ceph-volume unit for this OSD systemctl.enable_volume(osd_id, osd_fsid, 'lvm') # start the OSD systemctl.start_osd(osd_id) terminal.success("ceph-volume lvm activate successful for osd ID: %s" % osd_id)
def _link_device(device, device_type, osd_id): """ Allow linking any device type in an OSD directory. ``device`` must the be source, with an absolute path and ``device_type`` will be the destination name, like 'journal', or 'block' """ device_path = '/var/lib/ceph/osd/%s-%s/%s' % ( conf.cluster, osd_id, device_type ) command = ['ln', '-s', device, device_path] system.chown(device) process.run(command)
def migrate_to_new(self, osd_id, osd_fsid, devices, target_lv): source_devices = self.get_source_devices(devices) target_type = self.get_target_type_by_source(source_devices) if not target_type: mlogger.error( "Unable to determine new volume type," " please use new-db or new-wal command before.") raise SystemExit( "Unable to migrate to : {}".format(self.args.target)) target_path = target_lv.lv_path try: tag_tracker = VolumeTagTracker(devices, target_lv) # we need to update lvm tags for all the remaining volumes # and clear for ones which to be removed # ceph-bluestore-tool removes source volume(s) other than block one # and attaches target one after successful migration tag_tracker.replace_lvs(source_devices, target_type) osd_path = get_osd_path(osd_id, osd_fsid) source_args = self.get_source_args(osd_path, source_devices) mlogger.info("Migrate to new, Source: {} Target: {}".format( source_args, target_path)) stdout, stderr, exit_code = process.call([ 'ceph-bluestore-tool', '--path', osd_path, '--dev-target', target_path, '--command', 'bluefs-bdev-migrate'] + source_args) if exit_code != 0: mlogger.error( 'Failed to migrate device, error code:{}'.format(exit_code)) raise SystemExit( 'Failed to migrate to : {}'.format(self.args.target)) else: system.chown(os.path.join(osd_path, "block.{}".format( target_type))) terminal.success('Migration successful.') except: tag_tracker.undo() raise return
def activate_bluestore(meta, tmpfs, systemd): # find the osd osd_id = meta['osd_id'] osd_uuid = meta['osd_uuid'] # mount on tmpfs the osd directory osd_path = '/var/lib/ceph/osd/%s-%s' % (conf.cluster, osd_id) if not system.path_is_mounted(osd_path): # mkdir -p and mount as tmpfs prepare_utils.create_osd_path(osd_id, tmpfs=tmpfs) # XXX This needs to be removed once ceph-bluestore-tool can deal with # symlinks that exist in the osd dir for link_name in ['block', 'block.db', 'block.wal']: link_path = os.path.join(osd_path, link_name) if os.path.exists(link_path): os.unlink(os.path.join(osd_path, link_name)) # Once symlinks are removed, the osd dir can be 'primed again. chown first, # regardless of what currently exists so that ``prime-osd-dir`` can succeed # even if permissions are somehow messed up system.chown(osd_path) prime_command = [ 'ceph-bluestore-tool', 'prime-osd-dir', '--path', osd_path, '--no-mon-config', '--dev', meta['device'], ] process.run(prime_command) # always re-do the symlink regardless if it exists, so that the block, # block.wal, and block.db devices that may have changed can be mapped # correctly every time prepare_utils.link_block(meta['device'], osd_id) if 'device_db' in meta: prepare_utils.link_db(meta['device_db'], osd_id, osd_uuid) if 'device_wal' in meta: prepare_utils.link_wal(meta['device_wal'], osd_id, osd_uuid) system.chown(osd_path) terminal.success("ceph-volume raw activate successful for osd ID: %s" % osd_id)
def osd_mkfs_filestore(osd_id, fsid, keyring): """ Create the files for the OSD to function. A normal call will look like: ceph-osd --cluster ceph --mkfs --mkkey -i 0 \ --monmap /var/lib/ceph/osd/ceph-0/activate.monmap \ --osd-data /var/lib/ceph/osd/ceph-0 \ --osd-journal /var/lib/ceph/osd/ceph-0/journal \ --osd-uuid 8d208665-89ae-4733-8888-5d3bfbeeec6c \ --keyring /var/lib/ceph/osd/ceph-0/keyring \ --setuser ceph --setgroup ceph """ path = '/var/lib/ceph/osd/%s-%s/' % (conf.cluster, osd_id) monmap = os.path.join(path, 'activate.monmap') journal = os.path.join(path, 'journal') system.chown(journal) system.chown(path) command = [ 'ceph-osd', '--cluster', conf.cluster, '--osd-objectstore', 'filestore', '--mkfs', '-i', osd_id, '--monmap', monmap, ] if __release__ != 'luminous': # goes through stdin command.extend(['--keyfile', '-']) command.extend([ '--osd-data', path, '--osd-journal', journal, '--osd-uuid', fsid, '--setuser', 'ceph', '--setgroup', 'ceph' ]) _, _, returncode = process.call( command, stdin=keyring, terminal_verbose=True, show_command=True ) if returncode != 0: raise RuntimeError('Command failed with exit code %s: %s' % (returncode, ' '.join(command)))
def activate_filestore(lvs): # find the osd osd_lv = lvs.get(lv_tags={'ceph.type': 'data'}) if not osd_lv: raise RuntimeError('Unable to find a data LV for filestore activation') osd_id = osd_lv.tags['ceph.osd_id'] conf.cluster = osd_lv.tags['ceph.cluster_name'] # it may have a volume with a journal osd_journal_lv = lvs.get(lv_tags={'ceph.type': 'journal'}) # TODO: add sensible error reporting if this is ever the case # blow up with a KeyError if this doesn't exist osd_fsid = osd_lv.tags['ceph.osd_fsid'] if not osd_journal_lv: # must be a disk partition, by quering blkid by the uuid we are ensuring that the # device path is always correct osd_journal = disk.get_device_from_partuuid( osd_lv.tags['ceph.journal_uuid']) else: osd_journal = osd_lv.tags['ceph.journal_device'] if not osd_journal: raise RuntimeError( 'unable to detect an lv or device journal for OSD %s' % osd_id) # mount the osd source = osd_lv.lv_path destination = '/var/lib/ceph/osd/%s-%s' % (conf.cluster, osd_id) if not system.device_is_mounted(source, destination=destination): process.run(['mount', '-v', source, destination]) # always re-do the symlink regardless if it exists, so that the journal # device path that may have changed can be mapped correctly every time destination = '/var/lib/ceph/osd/%s-%s/journal' % (conf.cluster, osd_id) process.run(['ln', '-snf', osd_journal, destination]) # make sure that the journal has proper permissions system.chown(osd_journal) # enable the ceph-volume unit for this OSD systemctl.enable_volume(osd_id, osd_fsid, 'lvm') # start the OSD systemctl.start_osd(osd_id) terminal.success("ceph-volume lvm activate successful for osd ID: %s" % osd_id)
def activate_filestore(lvs): # find the osd osd_lv = lvs.get(lv_tags={'ceph.type': 'data'}) if not osd_lv: raise RuntimeError('Unable to find a data LV for filestore activation') osd_id = osd_lv.tags['ceph.osd_id'] conf.cluster = osd_lv.tags['ceph.cluster_name'] # it may have a volume with a journal osd_journal_lv = lvs.get(lv_tags={'ceph.type': 'journal'}) # TODO: add sensible error reporting if this is ever the case # blow up with a KeyError if this doesn't exist osd_fsid = osd_lv.tags['ceph.osd_fsid'] if not osd_journal_lv: # must be a disk partition, by quering blkid by the uuid we are ensuring that the # device path is always correct osd_journal = disk.get_device_from_partuuid(osd_lv.tags['ceph.journal_uuid']) else: osd_journal = osd_lv.tags['ceph.journal_device'] if not osd_journal: raise RuntimeError('unable to detect an lv or device journal for OSD %s' % osd_id) # mount the osd source = osd_lv.lv_path destination = '/var/lib/ceph/osd/%s-%s' % (conf.cluster, osd_id) if not system.device_is_mounted(source, destination=destination): process.run(['mount', '-v', source, destination]) # always re-do the symlink regardless if it exists, so that the journal # device path that may have changed can be mapped correctly every time destination = '/var/lib/ceph/osd/%s-%s/journal' % (conf.cluster, osd_id) process.run(['ln', '-snf', osd_journal, destination]) # make sure that the journal has proper permissions system.chown(osd_journal) # enable the ceph-volume unit for this OSD systemctl.enable_volume(osd_id, osd_fsid, 'lvm') # start the OSD systemctl.start_osd(osd_id) terminal.success("ceph-volume lvm activate successful for osd ID: %s" % osd_id)
def write_keyring(osd_id, secret, keyring_name='keyring', name=None): """ Create a keyring file with the ``ceph-authtool`` utility. Constructs the path over well-known conventions for the OSD, and allows any other custom ``name`` to be set. :param osd_id: The ID for the OSD to be used :param secret: The key to be added as (as a string) :param name: Defaults to 'osd.{ID}' but can be used to add other client names, specifically for 'lockbox' type of keys :param keyring_name: Alternative keyring name, for supporting other types of keys like for lockbox """ osd_keyring = '/var/lib/ceph/osd/%s-%s/%s' % (conf.cluster, osd_id, keyring_name) name = name or 'osd.%s' % str(osd_id) process.run([ 'ceph-authtool', osd_keyring, '--create-keyring', '--name', name, '--add-key', secret ]) system.chown(osd_keyring)
def activate_filestore(lvs): # find the osd osd_lv = lvs.get(lv_tags={'ceph.type': 'data'}) osd_id = osd_lv.tags['ceph.osd_id'] # it may have a volume with a journal osd_journal_lv = lvs.get(lv_tags={'ceph.type': 'journal'}) # TODO: add sensible error reporting if this is ever the case # blow up with a KeyError if this doesn't exist osd_fsid = osd_lv.tags['ceph.osd_fsid'] if not osd_journal_lv: osd_journal = osd_lv.tags.get('ceph.journal_device') else: osd_journal = osd_journal.lv_path if not osd_journal: raise RuntimeError( 'unable to detect an lv or device journal for OSD %s' % osd_id) # mount the osd source = osd_lv.lv_path destination = '/var/lib/ceph/osd/%s-%s' % (conf.cluster, osd_id) if not system.is_mounted(source, destination=destination): process.run(['sudo', 'mount', '-v', source, destination]) # ensure that the symlink for the journal is there if not os.path.exists(osd_journal): source = osd_journal destination = '/var/lib/ceph/osd/%s-%s/journal' % (conf.cluster, osd_id) process.run(['sudo', 'ln', '-s', source, destination]) # make sure that the journal has proper permissions system.chown(osd_journal) # enable the ceph-volume unit for this OSD systemctl.enable_volume(osd_id, osd_fsid, 'lvm') # start the OSD systemctl.start_osd(osd_id)
def activate_bluestore(lvs): # find the osd osd_lv = lvs.get(lv_tags={'ceph.type': 'block'}) osd_id = osd_lv.tags['ceph.osd_id'] conf.cluster = osd_lv.tags['ceph.cluster_name'] osd_fsid = osd_lv.tags['ceph.osd_fsid'] db_device_path = get_osd_device_path(osd_lv, lvs, 'db') wal_device_path = get_osd_device_path(osd_lv, lvs, 'wal') # mount on tmpfs the osd directory osd_path = '/var/lib/ceph/osd/%s-%s' % (conf.cluster, osd_id) if not system.path_is_mounted(osd_path): # mkdir -p and mount as tmpfs prepare_utils.create_osd_path(osd_id, tmpfs=True) # XXX This needs to be removed once ceph-bluestore-tool can deal with # symlinks that exist in the osd dir for link_name in ['block', 'block.db', 'block.wal']: link_path = os.path.join(osd_path, link_name) if os.path.exists(link_path): os.unlink(os.path.join(osd_path, link_name)) # Once symlinks are removed, the osd dir can be 'primed again. process.run([ 'ceph-bluestore-tool', '--cluster=%s' % conf.cluster, 'prime-osd-dir', '--dev', osd_lv.lv_path, '--path', osd_path ]) # always re-do the symlink regardless if it exists, so that the block, # block.wal, and block.db devices that may have changed can be mapped # correctly every time process.run( ['ln', '-snf', osd_lv.lv_path, os.path.join(osd_path, 'block')]) system.chown(os.path.join(osd_path, 'block')) system.chown(osd_path) if db_device_path: destination = os.path.join(osd_path, 'block.db') process.run(['ln', '-snf', db_device_path, destination]) system.chown(db_device_path) if wal_device_path: destination = os.path.join(osd_path, 'block.wal') process.run(['ln', '-snf', wal_device_path, destination]) system.chown(wal_device_path) # enable the ceph-volume unit for this OSD systemctl.enable_volume(osd_id, osd_fsid, 'lvm') # start the OSD systemctl.start_osd(osd_id)
def write_keyring(osd_id, secret, keyring_name='keyring', name=None): """ Create a keyring file with the ``ceph-authtool`` utility. Constructs the path over well-known conventions for the OSD, and allows any other custom ``name`` to be set. :param osd_id: The ID for the OSD to be used :param secret: The key to be added as (as a string) :param name: Defaults to 'osd.{ID}' but can be used to add other client names, specifically for 'lockbox' type of keys :param keyring_name: Alternative keyring name, for supporting other types of keys like for lockbox """ osd_keyring = '/var/lib/ceph/osd/%s-%s/%s' % (conf.cluster, osd_id, keyring_name) name = name or 'osd.%s' % str(osd_id) process.run( [ 'ceph-authtool', osd_keyring, '--create-keyring', '--name', name, '--add-key', secret ]) system.chown(osd_keyring)
def osd_mkfs_bluestore(osd_id, fsid, keyring=None, wal=False, db=False): """ Create the files for the OSD to function. A normal call will look like: ceph-osd --cluster ceph --mkfs --mkkey -i 0 \ --monmap /var/lib/ceph/osd/ceph-0/activate.monmap \ --osd-data /var/lib/ceph/osd/ceph-0 \ --osd-uuid 8d208665-89ae-4733-8888-5d3bfbeeec6c \ --keyring /var/lib/ceph/osd/ceph-0/keyring \ --setuser ceph --setgroup ceph In some cases it is required to use the keyring, when it is passed in as a keyword argument it is used as part of the ceph-osd command """ path = '/var/lib/ceph/osd/%s-%s/' % (conf.cluster, osd_id) monmap = os.path.join(path, 'activate.monmap') system.chown(path) base_command = [ 'ceph-osd', '--cluster', conf.cluster, '--osd-objectstore', 'bluestore', '--mkfs', '-i', osd_id, '--monmap', monmap, ] supplementary_command = [ '--osd-data', path, '--osd-uuid', fsid, '--setuser', 'ceph', '--setgroup', 'ceph' ] if keyring is not None: base_command.extend(['--keyfile', '-']) if wal: base_command.extend(['--bluestore-block-wal-path', wal]) system.chown(wal) if db: base_command.extend(['--bluestore-block-db-path', db]) system.chown(db) if get_osdspec_affinity(): base_command.extend(['--osdspec-affinity', get_osdspec_affinity()]) command = base_command + supplementary_command _, _, returncode = process.call(command, stdin=keyring, show_command=True) if returncode != 0: raise RuntimeError('Command failed with exit code %s: %s' % (returncode, ' '.join(command)))
def osd_mkfs_bluestore(osd_id, fsid, keyring=None, wal=False, db=False): """ Create the files for the OSD to function. A normal call will look like: ceph-osd --cluster ceph --mkfs --mkkey -i 0 \ --monmap /var/lib/ceph/osd/ceph-0/activate.monmap \ --osd-data /var/lib/ceph/osd/ceph-0 \ --osd-uuid 8d208665-89ae-4733-8888-5d3bfbeeec6c \ --keyring /var/lib/ceph/osd/ceph-0/keyring \ --setuser ceph --setgroup ceph In some cases it is required to use the keyring, when it is passed in as a keywork argument it is used as part of the ceph-osd command """ path = '/var/lib/ceph/osd/%s-%s/' % (conf.cluster, osd_id) monmap = os.path.join(path, 'activate.monmap') system.chown(path) base_command = [ 'ceph-osd', '--cluster', conf.cluster, # undocumented flag, sets the `type` file to contain 'bluestore' '--osd-objectstore', 'bluestore', '--mkfs', '-i', osd_id, '--monmap', monmap, ] supplementary_command = [ '--osd-data', path, '--osd-uuid', fsid, '--setuser', 'ceph', '--setgroup', 'ceph' ] if keyring is not None: base_command.extend(['--keyfile', '-']) if wal: base_command.extend( ['--bluestore-block-wal-path', wal] ) system.chown(wal) if db: base_command.extend( ['--bluestore-block-db-path', db] ) system.chown(db) command = base_command + supplementary_command _, _, returncode = process.call(command, stdin=keyring, show_command=True) if returncode != 0: raise RuntimeError('Command failed with exit code %s: %s' % (returncode, ' '.join(command)))
def activate_bluestore(lvs): # find the osd osd_lv = lvs.get(lv_tags={'ceph.type': 'block'}) osd_id = osd_lv.tags['ceph.osd_id'] conf.cluster = osd_lv.tags['ceph.cluster_name'] osd_fsid = osd_lv.tags['ceph.osd_fsid'] db_device_path = get_osd_device_path(osd_lv, lvs, 'db') wal_device_path = get_osd_device_path(osd_lv, lvs, 'wal') # mount on tmpfs the osd directory osd_path = '/var/lib/ceph/osd/%s-%s' % (conf.cluster, osd_id) if not system.path_is_mounted(osd_path): # mkdir -p and mount as tmpfs prepare_utils.create_osd_path(osd_id, tmpfs=True) # XXX This needs to be removed once ceph-bluestore-tool can deal with # symlinks that exist in the osd dir for link_name in ['block', 'block.db', 'block.wal']: link_path = os.path.join(osd_path, link_name) if os.path.exists(link_path): os.unlink(os.path.join(osd_path, link_name)) # Once symlinks are removed, the osd dir can be 'primed again. process.run([ 'ceph-bluestore-tool', '--cluster=%s' % conf.cluster, 'prime-osd-dir', '--dev', osd_lv.lv_path, '--path', osd_path]) # always re-do the symlink regardless if it exists, so that the block, # block.wal, and block.db devices that may have changed can be mapped # correctly every time process.run(['ln', '-snf', osd_lv.lv_path, os.path.join(osd_path, 'block')]) system.chown(os.path.join(osd_path, 'block')) system.chown(osd_path) if db_device_path: destination = os.path.join(osd_path, 'block.db') process.run(['ln', '-snf', db_device_path, destination]) system.chown(db_device_path) if wal_device_path: destination = os.path.join(osd_path, 'block.wal') process.run(['ln', '-snf', wal_device_path, destination]) system.chown(wal_device_path) # enable the ceph-volume unit for this OSD systemctl.enable_volume(osd_id, osd_fsid, 'lvm') # start the OSD systemctl.start_osd(osd_id)
def activate_bluestore(lvs): # find the osd osd_lv = lvs.get(lv_tags={'ceph.type': 'block'}) osd_id = osd_lv.tags['ceph.osd_id'] osd_fsid = osd_lv.tags['ceph.osd_fsid'] db_device_path = get_osd_device_path(osd_lv, lvs, 'db') wal_device_path = get_osd_device_path(osd_lv, lvs, 'wal') # mount on tmpfs the osd directory osd_path = '/var/lib/ceph/osd/%s-%s' % (conf.cluster, osd_id) if not system.path_is_mounted(osd_path): # mkdir -p and mount as tmpfs prepare_utils.create_osd_path(osd_id, tmpfs=True) # if the osd dir was not mounted via tmpfs, it means that the files are # gone, so it needs to be 'primed' again. The command would otherwise # fail if the directory was already populated process.run([ 'sudo', 'ceph-bluestore-tool', 'prime-osd-dir', '--dev', osd_lv.lv_path, '--path', osd_path ]) # always re-do the symlink regardless if it exists, so that the block, # block.wal, and block.db devices that may have changed can be mapped # correctly every time process.run([ 'sudo', 'ln', '-snf', osd_lv.lv_path, os.path.join(osd_path, 'block') ]) system.chown(os.path.join(osd_path, 'block')) system.chown(osd_path) if db_device_path: destination = os.path.join(osd_path, 'block.db') process.run(['sudo', 'ln', '-snf', db_device_path, destination]) system.chown(db_device_path) if wal_device_path: destination = os.path.join(osd_path, 'block.wal') process.run(['sudo', 'ln', '-snf', wal_device_path, destination]) system.chown(wal_device_path) # enable the ceph-volume unit for this OSD systemctl.enable_volume(osd_id, osd_fsid, 'lvm') # start the OSD systemctl.start_osd(osd_id)
def activate(self, args): with open(args.json_config, 'r') as fp: osd_metadata = json.load(fp) osd_id = osd_metadata.get('whoami', args.osd_id) osd_fsid = osd_metadata.get('fsid', args.osd_fsid) cluster_name = osd_metadata.get('cluster_name', 'ceph') osd_dir = '/var/lib/ceph/osd/%s-%s' % (cluster_name, osd_id) data_uuid = osd_metadata.get('data', {}).get('uuid') if not data_uuid: raise RuntimeError( 'Unable to activate OSD %s - no "uuid" key found for data' % args.osd_id ) data_device = disk.get_device_from_partuuid(data_uuid) journal_device = disk.get_device_from_partuuid(osd_metadata.get('journal', {}).get('uuid')) block_device = disk.get_device_from_partuuid(osd_metadata.get('block', {}).get('uuid')) block_db_device = disk.get_device_from_partuuid(osd_metadata.get('block.db', {}).get('uuid')) block_wal_device = disk.get_device_from_partuuid( osd_metadata.get('block.wal', {}).get('uuid') ) if not system.device_is_mounted(data_device, destination=osd_dir): process.run(['sudo', 'mount', '-v', data_device, osd_dir]) device_map = { 'journal': journal_device, 'block': block_device, 'block.db': block_db_device, 'block.wal': block_wal_device } for name, device in device_map.items(): if not device: continue # always re-do the symlink regardless if it exists, so that the journal # device path that may have changed can be mapped correctly every time destination = os.path.join(osd_dir, name) process.run(['sudo', 'ln', '-snf', device, destination]) # make sure that the journal has proper permissions system.chown(device) if not self.systemd: # enable the ceph-volume unit for this OSD systemctl.enable_volume(osd_id, osd_fsid, 'simple') # disable any/all ceph-disk units systemctl.mask_ceph_disk() # enable the OSD systemctl.enable_osd(osd_id) # start the OSD systemctl.start_osd(osd_id) if not self.systemd: terminal.success('Successfully activated OSD %s with FSID %s' % (osd_id, osd_fsid)) terminal.warning( ('All ceph-disk systemd units have been disabled to ' 'prevent OSDs getting triggered by UDEV events') )
def activate(self, args): with open(args.json_config, 'r') as fp: osd_metadata = json.load(fp) # Make sure that required devices are configured self.validate_devices(osd_metadata) osd_id = osd_metadata.get('whoami', args.osd_id) osd_fsid = osd_metadata.get('fsid', args.osd_fsid) data_uuid = osd_metadata.get('data', {}).get('uuid') conf.cluster = osd_metadata.get('cluster_name', 'ceph') if not data_uuid: raise RuntimeError( 'Unable to activate OSD %s - no "uuid" key found for data' % args.osd_id) # Encryption detection, and capturing of the keys to decrypt self.is_encrypted = osd_metadata.get('encrypted', False) self.encryption_type = osd_metadata.get('encryption_type') if self.is_encrypted: lockbox_secret = osd_metadata.get('lockbox.keyring') # write the keyring always so that we can unlock encryption_utils.write_lockbox_keyring(osd_id, osd_fsid, lockbox_secret) # Store the secret around so that the decrypt method can reuse raw_dmcrypt_secret = encryption_utils.get_dmcrypt_key( osd_id, osd_fsid) # Note how both these calls need b64decode. For some reason, the # way ceph-disk creates these keys, it stores them in the monitor # *undecoded*, requiring this decode call again. The lvm side of # encryption doesn't need it, so we are assuming here that anything # that `simple` scans, will come from ceph-disk and will need this # extra decode call here self.dmcrypt_secret = base64.b64decode(raw_dmcrypt_secret) cluster_name = osd_metadata.get('cluster_name', 'ceph') osd_dir = '/var/lib/ceph/osd/%s-%s' % (cluster_name, osd_id) # XXX there is no support for LVM here data_device = self.get_device(data_uuid) if not data_device: raise RuntimeError("osd fsid {} doesn't exist, this file will " "be skipped, consider cleaning legacy " "json file {}".format(osd_metadata['fsid'], args.json_config)) journal_device = self.get_device( osd_metadata.get('journal', {}).get('uuid')) block_device = self.get_device( osd_metadata.get('block', {}).get('uuid')) block_db_device = self.get_device( osd_metadata.get('block.db', {}).get('uuid')) block_wal_device = self.get_device( osd_metadata.get('block.wal', {}).get('uuid')) if not system.device_is_mounted(data_device, destination=osd_dir): if osd_metadata.get('type') == 'filestore': prepare_utils.mount_osd(data_device, osd_id) else: process.run(['mount', '-v', data_device, osd_dir]) device_map = { 'journal': journal_device, 'block': block_device, 'block.db': block_db_device, 'block.wal': block_wal_device } for name, device in device_map.items(): if not device: continue # always re-do the symlink regardless if it exists, so that the journal # device path that may have changed can be mapped correctly every time destination = os.path.join(osd_dir, name) process.run(['ln', '-snf', device, destination]) # make sure that the journal has proper permissions system.chown(device) self.enable_systemd_units(osd_id, osd_fsid) terminal.success('Successfully activated OSD %s with FSID %s' % (osd_id, osd_fsid))
def activate(self, args): with open(args.json_config, 'r') as fp: osd_metadata = json.load(fp) osd_id = osd_metadata.get('whoami', args.osd_id) osd_fsid = osd_metadata.get('fsid', args.osd_fsid) cluster_name = osd_metadata.get('cluster_name', 'ceph') osd_dir = '/var/lib/ceph/osd/%s-%s' % (cluster_name, osd_id) data_uuid = osd_metadata.get('data', {}).get('uuid') if not data_uuid: raise RuntimeError( 'Unable to activate OSD %s - no "uuid" key found for data' % args.osd_id ) data_device = disk.get_device_from_partuuid(data_uuid) journal_device = disk.get_device_from_partuuid(osd_metadata.get('journal', {}).get('uuid')) block_device = disk.get_device_from_partuuid(osd_metadata.get('block', {}).get('uuid')) block_db_device = disk.get_device_from_partuuid(osd_metadata.get('block.db', {}).get('uuid')) block_wal_device = disk.get_device_from_partuuid( osd_metadata.get('block.wal', {}).get('uuid') ) if not system.device_is_mounted(data_device, destination=osd_dir): process.run(['mount', '-v', data_device, osd_dir]) device_map = { 'journal': journal_device, 'block': block_device, 'block.db': block_db_device, 'block.wal': block_wal_device } for name, device in device_map.items(): if not device: continue # always re-do the symlink regardless if it exists, so that the journal # device path that may have changed can be mapped correctly every time destination = os.path.join(osd_dir, name) process.run(['ln', '-snf', device, destination]) # make sure that the journal has proper permissions system.chown(device) if not self.systemd: # enable the ceph-volume unit for this OSD systemctl.enable_volume(osd_id, osd_fsid, 'simple') # disable any/all ceph-disk units systemctl.mask_ceph_disk() # enable the OSD systemctl.enable_osd(osd_id) # start the OSD systemctl.start_osd(osd_id) terminal.success('Successfully activated OSD %s with FSID %s' % (osd_id, osd_fsid)) terminal.warning( ('All ceph-disk systemd units have been disabled to ' 'prevent OSDs getting triggered by UDEV events') )
def activate_bluestore(osd_lvs, no_systemd=False, no_tmpfs=False): for lv in osd_lvs: if lv.tags.get('ceph.type') == 'block': osd_block_lv = lv break else: raise RuntimeError('could not find a bluestore OSD to activate') is_encrypted = osd_block_lv.tags.get('ceph.encrypted', '0') == '1' dmcrypt_secret = None osd_id = osd_block_lv.tags['ceph.osd_id'] conf.cluster = osd_block_lv.tags['ceph.cluster_name'] osd_fsid = osd_block_lv.tags['ceph.osd_fsid'] # mount on tmpfs the osd directory osd_path = '/var/lib/ceph/osd/%s-%s' % (conf.cluster, osd_id) if not system.path_is_mounted(osd_path): # mkdir -p and mount as tmpfs prepare_utils.create_osd_path(osd_id, tmpfs=not no_tmpfs) # XXX This needs to be removed once ceph-bluestore-tool can deal with # symlinks that exist in the osd dir for link_name in ['block', 'block.db', 'block.wal']: link_path = os.path.join(osd_path, link_name) if os.path.exists(link_path): os.unlink(os.path.join(osd_path, link_name)) # encryption is handled here, before priming the OSD dir if is_encrypted: osd_lv_path = '/dev/mapper/%s' % osd_block_lv.lv_uuid lockbox_secret = osd_block_lv.tags['ceph.cephx_lockbox_secret'] encryption_utils.write_lockbox_keyring(osd_id, osd_fsid, lockbox_secret) dmcrypt_secret = encryption_utils.get_dmcrypt_key(osd_id, osd_fsid) encryption_utils.luks_open(dmcrypt_secret, osd_block_lv.lv_path, osd_block_lv.lv_uuid) else: osd_lv_path = osd_block_lv.lv_path db_device_path = get_osd_device_path(osd_lvs, 'db', dmcrypt_secret=dmcrypt_secret) wal_device_path = get_osd_device_path(osd_lvs, 'wal', dmcrypt_secret=dmcrypt_secret) # Once symlinks are removed, the osd dir can be 'primed again. chown first, # regardless of what currently exists so that ``prime-osd-dir`` can succeed # even if permissions are somehow messed up system.chown(osd_path) prime_command = [ 'ceph-bluestore-tool', '--cluster=%s' % conf.cluster, 'prime-osd-dir', '--dev', osd_lv_path, '--path', osd_path, '--no-mon-config' ] process.run(prime_command) # always re-do the symlink regardless if it exists, so that the block, # block.wal, and block.db devices that may have changed can be mapped # correctly every time process.run(['ln', '-snf', osd_lv_path, os.path.join(osd_path, 'block')]) system.chown(os.path.join(osd_path, 'block')) system.chown(osd_path) if db_device_path: destination = os.path.join(osd_path, 'block.db') process.run(['ln', '-snf', db_device_path, destination]) system.chown(db_device_path) system.chown(destination) if wal_device_path: destination = os.path.join(osd_path, 'block.wal') process.run(['ln', '-snf', wal_device_path, destination]) system.chown(wal_device_path) system.chown(destination) if no_systemd is False: # enable the ceph-volume unit for this OSD systemctl.enable_volume(osd_id, osd_fsid, 'lvm') # enable the OSD systemctl.enable_osd(osd_id) # start the OSD systemctl.start_osd(osd_id) terminal.success("ceph-volume lvm activate successful for osd ID: %s" % osd_id)
def activate(self, args): with open(args.json_config, 'r') as fp: osd_metadata = json.load(fp) # Make sure that required devices are configured self.validate_devices(osd_metadata) osd_id = osd_metadata.get('whoami', args.osd_id) osd_fsid = osd_metadata.get('fsid', args.osd_fsid) data_uuid = osd_metadata.get('data', {}).get('uuid') conf.cluster = osd_metadata.get('cluster_name', 'ceph') if not data_uuid: raise RuntimeError( 'Unable to activate OSD %s - no "uuid" key found for data' % args.osd_id ) # Encryption detection, and capturing of the keys to decrypt self.is_encrypted = osd_metadata.get('encrypted', False) self.encryption_type = osd_metadata.get('encryption_type') if self.is_encrypted: lockbox_secret = osd_metadata.get('lockbox.keyring') # write the keyring always so that we can unlock encryption_utils.write_lockbox_keyring(osd_id, osd_fsid, lockbox_secret) # Store the secret around so that the decrypt method can reuse raw_dmcrypt_secret = encryption_utils.get_dmcrypt_key(osd_id, osd_fsid) # Note how both these calls need b64decode. For some reason, the # way ceph-disk creates these keys, it stores them in the monitor # *undecoded*, requiring this decode call again. The lvm side of # encryption doesn't need it, so we are assuming here that anything # that `simple` scans, will come from ceph-disk and will need this # extra decode call here self.dmcrypt_secret = base64.b64decode(raw_dmcrypt_secret) cluster_name = osd_metadata.get('cluster_name', 'ceph') osd_dir = '/var/lib/ceph/osd/%s-%s' % (cluster_name, osd_id) # XXX there is no support for LVM here data_device = self.get_device(data_uuid) journal_device = self.get_device(osd_metadata.get('journal', {}).get('uuid')) block_device = self.get_device(osd_metadata.get('block', {}).get('uuid')) block_db_device = self.get_device(osd_metadata.get('block.db', {}).get('uuid')) block_wal_device = self.get_device(osd_metadata.get('block.wal', {}).get('uuid')) if not system.device_is_mounted(data_device, destination=osd_dir): process.run(['mount', '-v', data_device, osd_dir]) device_map = { 'journal': journal_device, 'block': block_device, 'block.db': block_db_device, 'block.wal': block_wal_device } for name, device in device_map.items(): if not device: continue # always re-do the symlink regardless if it exists, so that the journal # device path that may have changed can be mapped correctly every time destination = os.path.join(osd_dir, name) process.run(['ln', '-snf', device, destination]) # make sure that the journal has proper permissions system.chown(device) self.enable_systemd_units(osd_id, osd_fsid) terminal.success('Successfully activated OSD %s with FSID %s' % (osd_id, osd_fsid))
def osd_mkfs_bluestore(osd_id, fsid, keyring=None, wal=False, db=False): """ Create the files for the OSD to function. A normal call will look like: ceph-osd --cluster ceph --mkfs --mkkey -i 0 \ --monmap /var/lib/ceph/osd/ceph-0/activate.monmap \ --osd-data /var/lib/ceph/osd/ceph-0 \ --osd-uuid 8d208665-89ae-4733-8888-5d3bfbeeec6c \ --keyring /var/lib/ceph/osd/ceph-0/keyring \ --setuser ceph --setgroup ceph In some cases it is required to use the keyring, when it is passed in as a keyword argument it is used as part of the ceph-osd command """ path = '/var/lib/ceph/osd/%s-%s/' % (conf.cluster, osd_id) monmap = os.path.join(path, 'activate.monmap') system.chown(path) base_command = [ 'ceph-osd', '--cluster', conf.cluster, '--osd-objectstore', 'bluestore', '--mkfs', '-i', osd_id, '--monmap', monmap, ] supplementary_command = [ '--osd-data', path, '--osd-uuid', fsid, '--setuser', 'ceph', '--setgroup', 'ceph' ] if keyring is not None: base_command.extend(['--keyfile', '-']) if wal: base_command.extend(['--bluestore-block-wal-path', wal]) system.chown(wal) if db: base_command.extend(['--bluestore-block-db-path', db]) system.chown(db) if get_osdspec_affinity(): base_command.extend(['--osdspec-affinity', get_osdspec_affinity()]) command = base_command + supplementary_command """ When running in containers the --mkfs on raw device sometimes fails to acquire a lock through flock() on the device because systemd-udevd holds one temporarily. See KernelDevice.cc and _lock() to understand how ceph-osd acquires the lock. Because this is really transient, we retry up to 5 times and wait for 1 sec in-between """ for retry in range(5): _, _, returncode = process.call(command, stdin=keyring, terminal_verbose=True, show_command=True) if returncode == 0: break else: if returncode == errno.EWOULDBLOCK: time.sleep(1) logger.info( 'disk is held by another process, trying to mkfs again... (%s/5 attempt)' % retry) continue else: raise RuntimeError('Command failed with exit code %s: %s' % (returncode, ' '.join(command)))