def prepare_disks_and_activate(): # NOTE: vault/vaultlocker preflight check vault_kv = vaultlocker.VaultKVContext(vaultlocker.VAULTLOCKER_BACKEND) context = vault_kv() if use_vaultlocker() and not vault_kv.complete: log('Deferring OSD preparation as vault not ready', level=DEBUG) return elif use_vaultlocker() and vault_kv.complete: log('Vault ready, writing vaultlocker configuration', level=DEBUG) vaultlocker.write_vaultlocker_conf(context) osd_journal = get_journal_devices() if not osd_journal.isdisjoint(set(get_devices())): raise ValueError('`osd-journal` and `osd-devices` options must not' 'overlap.') log("got journal devs: {}".format(osd_journal), level=DEBUG) # pre-flight check of eligible device pristinity devices = get_devices() # if a device has been previously touched we need to consider it as # non-pristine. If it needs to be re-processed it has to be zapped # via the respective action which also clears the unitdata entry. db = kv() touched_devices = db.get('osd-devices', []) devices = [dev for dev in devices if dev not in touched_devices] log('Skipping osd devices previously processed by this unit: {}'.format( touched_devices)) # filter osd-devices that are file system paths devices = [dev for dev in devices if dev.startswith('/dev')] # filter osd-devices that does not exist on this unit devices = [dev for dev in devices if os.path.exists(dev)] # filter osd-devices that are already mounted devices = [dev for dev in devices if not is_device_mounted(dev)] # filter osd-devices that are active bluestore devices devices = [ dev for dev in devices if not ceph.is_active_bluestore_device(dev) ] log('Checking for pristine devices: "{}"'.format(devices), level=DEBUG) if not all(ceph.is_pristine_disk(dev) for dev in devices): status_set( 'blocked', 'Non-pristine devices detected, consult ' '`list-disks`, `zap-disk` and `blacklist-*` actions.') return if ceph.is_bootstrapped(): log('ceph bootstrapped, rescanning disks') emit_cephconf() for dev in get_devices(): ceph.osdize(dev, config('osd-format'), osd_journal, config('ignore-device-errors'), config('osd-encrypt'), config('bluestore'), config('osd-encrypt-keymanager')) # Make it fast! if config('autotune'): ceph.tune_dev(dev) ceph.start_osds(get_devices())
def test_write_vl_config_priority(self): ctxt = {'test': 'data'} vaultlocker.write_vaultlocker_conf(context=ctxt, priority=200) self.hookenv.service_name.assert_called_once_with() self.host.mkdir.assert_called_once_with(os.path.dirname( self._target_path), perms=0o700) self.templating.render.assert_called_once_with( source='vaultlocker.conf.j2', target=self._target_path, context=ctxt, perms=0o600, ) self.alternatives.install_alternative.assert_called_once_with( 'vaultlocker.conf', '/etc/vaultlocker/vaultlocker.conf', self._target_path, 200)
def configure_local_ephemeral_storage(): """Configure local block device for use as ephemeral instance storage""" # Preflight check vault relation if encryption is enabled vault_kv = vaultlocker.VaultKVContext( secret_backend=vaultlocker.VAULTLOCKER_BACKEND) context = vault_kv() encrypt = config('encrypt') if encrypt and not vault_kv.complete: log("Encryption requested but vault relation not complete", level=DEBUG) return elif encrypt and vault_kv.complete: # NOTE: only write vaultlocker configuration once relation is complete # otherwise we run the chance of an empty configuration file # being installed on a machine with other vaultlocker based # services vaultlocker.write_vaultlocker_conf(context, priority=80) db = kv() storage_configured = db.get('storage-configured', False) if storage_configured: log("Ephemeral storage already configured, skipping", level=DEBUG) return dev = determine_block_device() if not dev: log('No block device configuration found, skipping', level=DEBUG) return if not is_block_device(dev): log("Device '{}' is not a block device, " "unable to configure storage".format(dev), level=DEBUG) return # NOTE: this deals with a dm-crypt'ed block device already in # use if is_device_mounted(dev): log("Device '{}' is already mounted, " "unable to configure storage".format(dev), level=DEBUG) return options = None if encrypt: dev_uuid = str(uuid.uuid4()) check_call(['vaultlocker', 'encrypt', '--uuid', dev_uuid, dev]) dev = '/dev/mapper/crypt-{}'.format(dev_uuid) options = ','.join([ "defaults", "nofail", ("x-systemd.requires=" "vaultlocker-decrypt@{uuid}.service".format(uuid=dev_uuid)), "comment=vaultlocker", ]) # If not cleaned and in use, mkfs should fail. mkfs_xfs(dev, force=True) mountpoint = '/var/lib/nova/instances' filesystem = "xfs" mount(dev, mountpoint, filesystem=filesystem) fstab_add(dev, mountpoint, filesystem, options=options) check_call(['chown', '-R', 'nova:nova', mountpoint]) check_call(['chmod', '-R', '0755', mountpoint]) # NOTE: record preparation of device - this ensures that ephemeral # storage is never reconfigured by mistake, losing instance disks db.set('storage-configured', True) db.flush()
def prepare_disks_and_activate(): # NOTE: vault/vaultlocker preflight check vault_kv = vaultlocker.VaultKVContext(vaultlocker.VAULTLOCKER_BACKEND) context = vault_kv() if use_vaultlocker() and not vault_kv.complete: log('Deferring OSD preparation as vault not ready', level=DEBUG) return elif use_vaultlocker() and vault_kv.complete: log('Vault ready, writing vaultlocker configuration', level=DEBUG) vaultlocker.write_vaultlocker_conf(context) osd_journal = get_journal_devices() if not osd_journal.isdisjoint(set(get_devices())): raise ValueError('`osd-journal` and `osd-devices` options must not' 'overlap.') log("got journal devs: {}".format(osd_journal), level=DEBUG) # pre-flight check of eligible device pristinity devices = get_devices() # if a device has been previously touched we need to consider it as # non-pristine. If it needs to be re-processed it has to be zapped # via the respective action which also clears the unitdata entry. db = kv() touched_devices = db.get('osd-devices', []) devices = [dev for dev in devices if dev not in touched_devices] log('Skipping osd devices previously processed by this unit: {}' .format(touched_devices)) # filter osd-devices that are file system paths devices = [dev for dev in devices if dev.startswith('/dev')] # filter osd-devices that does not exist on this unit devices = [dev for dev in devices if os.path.exists(dev)] # filter osd-devices that are already mounted devices = [dev for dev in devices if not is_device_mounted(dev)] # filter osd-devices that are active bluestore devices devices = [dev for dev in devices if not ceph.is_active_bluestore_device(dev)] log('Checking for pristine devices: "{}"'.format(devices), level=DEBUG) if not all(ceph.is_pristine_disk(dev) for dev in devices): status_set('blocked', 'Non-pristine devices detected, consult ' '`list-disks`, `zap-disk` and `blacklist-*` actions.') return if ceph.is_bootstrapped(): log('ceph bootstrapped, rescanning disks') emit_cephconf() bluestore = use_bluestore() ceph.udevadm_settle() for dev in get_devices(): ceph.osdize(dev, config('osd-format'), osd_journal, config('ignore-device-errors'), config('osd-encrypt'), bluestore, config('osd-encrypt-keymanager')) # Make it fast! if config('autotune'): ceph.tune_dev(dev) ceph.start_osds(get_devices()) # Notify MON cluster as to how many OSD's this unit bootstrapped # into the cluster for r_id in relation_ids('mon'): relation_set( relation_id=r_id, relation_settings={ 'bootstrapped-osds': len(db.get('osd-devices', [])) } )
def setup_storage(encrypt=False): # Preflight check vault relation if encryption is enabled vault_kv = vaultlocker.VaultKVContext(vaultlocker.VAULTLOCKER_BACKEND) context = vault_kv() if encrypt and not vault_kv.complete: log("Encryption requested but vault relation not complete", level=DEBUG) return elif encrypt and vault_kv.complete: # NOTE: only write vaultlocker configuration once relation is complete # otherwise we run the chance of an empty configuration file # being installed on a machine with other vaultlocker based # services vaultlocker.write_vaultlocker_conf(context, priority=90) # Ensure /srv/node exists just in case no disks # are detected and used. mkdir(os.path.join('/srv', 'node'), owner='swift', group='swift', perms=0o755) reformat = str(config('overwrite')).lower() == "true" db = kv() prepared_devices = db.get('prepared-devices', []) for dev in determine_block_devices(): if dev in prepared_devices: log('Device {} already processed by charm,' ' skipping'.format(dev)) continue if is_device_in_ring(os.path.basename(dev)): log("Device '%s' already in the ring - ignoring" % (dev)) # NOTE: record existing use of device dealing with # upgrades from older versions of charms without # this feature prepared_devices.append(dev) db.set('prepared-devices', prepared_devices) db.flush() continue # NOTE: this deals with a dm-crypt'ed block device already in # use if is_device_mounted(dev): log("Device '{}' is already mounted, ignoring".format(dev)) continue if reformat: clean_storage(dev) loopback_device = is_mapped_loopback_device(dev) options = None if encrypt and not loopback_device: dev_uuid = str(uuid.uuid4()) check_call(['vaultlocker', 'encrypt', '--uuid', dev_uuid, dev]) dev = '/dev/mapper/crypt-{}'.format(dev_uuid) options = ','.join([ "defaults", "nofail", ("x-systemd.requires=" "vaultlocker-decrypt@{uuid}.service".format(uuid=dev_uuid)), "comment=vaultlocker", ]) try: # If not cleaned and in use, mkfs should fail. mkfs_xfs(dev, force=reformat) except subprocess.CalledProcessError as exc: # This is expected is a formatted device is provided and we are # forcing the format. log("Format device '%s' failed (%s) - continuing to next device" % (dev, exc), level=WARNING) continue basename = os.path.basename(dev) _mp = os.path.join('/srv', 'node', basename) mkdir(_mp, owner='swift', group='swift') mountpoint = '/srv/node/%s' % basename if loopback_device: # If an exiting fstab entry exists using the image file as the # source then preserve it, otherwise use the loopback device # directly to avoid a secound implicit loopback device being # created on mount. Bug #1762390 fstab = charmhelpers.core.fstab.Fstab() fstab_entry = fstab.get_entry_by_attr('mountpoint', mountpoint) if fstab_entry and loopback_device == fstab_entry.device: dev = loopback_device options = "loop,nofail,defaults" filesystem = "xfs" mount(dev, mountpoint, filesystem=filesystem) fstab_add(dev, mountpoint, filesystem, options=options) check_call(['chown', '-R', 'swift:swift', mountpoint]) check_call(['chmod', '-R', '0755', mountpoint]) # NOTE: record preparation of device - this will be used when # providing block device configuration for ring builders. prepared_devices.append(dev) db.set('prepared-devices', prepared_devices) db.flush()
def prepare_disks_and_activate(): if use_vaultlocker(): # NOTE: vault/vaultlocker preflight check vault_kv = vaultlocker.VaultKVContext(vaultlocker.VAULTLOCKER_BACKEND) context = vault_kv() if not vault_kv.complete: log('Deferring OSD preparation as vault not ready', level=DEBUG) return else: log('Vault ready, writing vaultlocker configuration', level=DEBUG) vaultlocker.write_vaultlocker_conf(context) osd_journal = get_journal_devices() if not osd_journal.isdisjoint(set(get_devices())): raise ValueError('`osd-journal` and `osd-devices` options must not' 'overlap.') log("got journal devs: {}".format(osd_journal), level=DEBUG) # pre-flight check of eligible device pristinity devices = get_devices() # if a device has been previously touched we need to consider it as # non-pristine. If it needs to be re-processed it has to be zapped # via the respective action which also clears the unitdata entry. db = kv() touched_devices = db.get('osd-devices', []) devices = [dev for dev in devices if dev not in touched_devices] log('Skipping osd devices previously processed by this unit: {}'.format( touched_devices)) # filter osd-devices that are file system paths devices = [dev for dev in devices if dev.startswith('/dev')] # filter osd-devices that does not exist on this unit devices = [dev for dev in devices if os.path.exists(dev)] # filter osd-devices that are already mounted devices = [dev for dev in devices if not is_device_mounted(dev)] # filter osd-devices that are active bluestore devices devices = [ dev for dev in devices if not ceph.is_active_bluestore_device(dev) ] # filter osd-devices that are used as dmcrypt devices devices = [dev for dev in devices if not ceph.is_mapped_luks_device(dev)] log('Checking for pristine devices: "{}"'.format(devices), level=DEBUG) if not all(ceph.is_pristine_disk(dev) for dev in devices): status_set( 'blocked', 'Non-pristine devices detected, consult ' '`list-disks`, `zap-disk` and `blacklist-*` actions.') return if is_osd_bootstrap_ready(): log('ceph bootstrapped, rescanning disks') emit_cephconf() bluestore = use_bluestore() ceph.udevadm_settle() for dev in get_devices(): ceph.osdize(dev, config('osd-format'), osd_journal, config('ignore-device-errors'), config('osd-encrypt'), bluestore, config('osd-encrypt-keymanager')) # Make it fast! if config('autotune'): log( 'The autotune config is deprecated and planned ' 'for removal in the next release.', level=WARNING) ceph.tune_dev(dev) ceph.start_osds(get_devices()) # Notify MON cluster as to how many OSD's this unit bootstrapped # into the cluster for r_id in relation_ids('mon'): relation_set(relation_id=r_id, relation_settings={ 'bootstrapped-osds': len(db.get('osd-devices', [])), 'ceph_release': ceph.resolve_ceph_version( hookenv.config('source') or 'distro') })