Example #1
0
    def ghost_nfs_share(self, ghost_share):
        """Bind mount the local units nfs share to another sites location

        :param ghost_share: NFS share URL to ghost
        :type ghost_share: str
        """
        nfs_share_path = os.path.join(
            TV_MOUNTS, self._encode_endpoint(hookenv.config("nfs-shares"))
        )
        ghost_share_path = os.path.join(
            TV_MOUNTS, self._encode_endpoint(ghost_share)
        )

        current_mounts = [mount[0] for mount in host.mounts()]

        if nfs_share_path not in current_mounts:
            # Trilio has not mounted the NFS share so return
            raise NFSShareNotMountedException(
                "nfs-shares ({}) not mounted".format(
                    hookenv.config("nfs-shares")
                )
            )

        if ghost_share_path in current_mounts:
            # bind mount already setup so return
            raise GhostShareAlreadyMountedException(
                "ghost mountpoint ({}) already bound".format(ghost_share_path)
            )

        if not os.path.exists(ghost_share_path):
            os.mkdir(ghost_share_path)

        host.mount(nfs_share_path, ghost_share_path, options="bind")
Example #2
0
    def create_log_dir(self,
                       data_log_dev,
                       data_log_dir,
                       data_log_fs,
                       user="******",
                       group="confluent",
                       fs_options=None):

        if len(data_log_dir or "") == 0:
            logger.warning("Data log dir config empty")
            BlockedStatus("data-log-dir missing, please define it")
            return
        os.makedirs(data_log_dir, 0o750, exist_ok=True)
        shutil.chown(data_log_dir,
                     user=self.config["user"],
                     group=self.config["group"])
        dev, fs = None, None
        if len(data_log_dev or "") == 0:
            logger.warning("Data log device not found, using rootfs instead")
        else:
            for k, v in data_log_dev:
                fs = k
                dev = v
            logger.info("Data log device: mkfs -t {}".format(fs))
            cmd = ["mkfs", "-t", fs, dev]
            subprocess.check_call(cmd)
            mount(dev,
                  data_log_dir,
                  options=self.config.get("fs-options", None),
                  persist=True,
                  filesystem=fs)
Example #3
0
    def test_mounts_and_persist_a_device(self, log, check_output, fstab):
        """Check if a mount works with the persist flag set to True
        """
        device = '/dev/guido'
        mountpoint = '/mnt/guido'
        options = 'foo,bar'

        result = host.mount(device, mountpoint, options, persist=True)

        self.assertTrue(result)
        check_output.assert_called_with(
            ['mount', '-o', 'foo,bar', '/dev/guido', '/mnt/guido'])

        fstab.add.assert_called_with('/dev/guido',
                                     '/mnt/guido',
                                     'ext3',
                                     options='foo,bar')

        result = host.mount(device,
                            mountpoint,
                            options,
                            persist=True,
                            filesystem="xfs")

        self.assertTrue(result)
        fstab.add.assert_called_with('/dev/guido',
                                     '/mnt/guido',
                                     'xfs',
                                     options='foo,bar')
Example #4
0
 def write_fstab(self):
     for (mnt, dev) in host.mounts():
         if self.sftp_dir in mnt:
             host.umount(mnt, persist=True)
     for entry in self.parse_config():
         host.mount(
             entry["src"],
             "{}/{}/{}".format(self.sftp_dir, entry["user"], entry["name"]),
             "bind,_netdev,x-systemd.requires={}".format(self.sftp_dir),
             persist=True,
             filesystem="none",
         )
         if self.charm_config["sftp-chown-mnt"]:
             try:
                 shutil.chown(
                     "{}/{}/{}".format(self.sftp_dir, entry["user"],
                                       entry["name"]),
                     user=entry["user"],
                     group=entry["user"],
                 )
             except Exception as e:
                 hookenv.log("Chown failed: {}".format(e),
                             level=hookenv.WARNING)
         else:
             try:
                 shutil.chown(
                     "{}/{}/{}".format(self.sftp_dir, entry["user"],
                                       entry["name"]),
                     user="******",
                     group="sftp",
                 )
             except Exception as e:
                 hookenv.log("Chown failed: {}".format(e),
                             level=hookenv.WARNING)
def setup_storage():
    for dev in determine_block_devices():
        if config('overwrite') in ['True', 'true']:
            clean_storage(dev)
        # if not cleaned and in use, mkfs should fail.
        mkfs_xfs(dev)
        _dev = os.path.basename(dev)
        _mp = os.path.join('/srv', 'node', _dev)
        mkdir(_mp, owner='swift', group='swift')
        mount(dev, '/srv/node/%s' % _dev, persist=True)
    check_call(['chown', '-R', 'swift:swift', '/srv/node/'])
    check_call(['chmod', '-R', '0750', '/srv/node/'])
Example #6
0
def update_nfs():
    rhost = relation_get('private-address')
    mpath = relation_get('mountpath')
    if len(mpath):
        umount('/mnt/plex/')
        fstab_remove('/mnt/plex')

        fstab_add(
            '{}:{}'.format(rhost, mpath),
            '/mnt/plex',
            'nfs rw'
        )
        mount()
Example #7
0
def validate_nfs():
    """
    Validate the nfs mount device
    """
    usr = config('tvault-datamover-ext-usr')
    grp = config('tvault-datamover-ext-group')
    data_dir = config('tv-data-dir')
    device = config('nfs-shares')
    nfs_options = config('nfs-options')

    # install nfs-common package
    if not filter_missing_packages(['nfs-common']):
        log("'nfs-common' package not found, installing the package...")
        apt_install(['nfs-common'], fatal=True)

    if not device:
        log("NFS mount device can not be empty."
            "Check 'nfs-shares' value in config")
        return False

    # Ensure mount directory exists
    mkdir(data_dir, owner=usr, group=grp, perms=501, force=True)

    # check for mountable device
    if not mount(device, data_dir, options=nfs_options, filesystem='nfs'):
        log("Unable to mount, please enter valid mount device")
        return False
    log("Device mounted successfully")
    umount(data_dir)
    log("Device unmounted successfully")
    return True
def validate_nfs():
    """
    Validate the nfs mount device
    """
    usr = config('tvault-datamover-ext-usr')
    grp = config('tvault-datamover-ext-group')
    data_dir = config('tv-data-dir')
    device = config('nfs-shares')

    if not device:
        log("NFS mount device can not be empty."
            "Check 'nfs-shares' value in config")
        return False

    # Ensure mount directory exists
    mkdir(data_dir, owner=usr, group=grp, perms=501, force=True)

    # check for mountable device
    if not mount(device, data_dir, filesystem='nfs'):
        log("Unable to mount, please enter valid mount device")
        return False
    log("Device mounted successfully")
    umount(data_dir)
    log("Device unmounted successfully")
    return True
Example #9
0
    def test_mounts_a_device_without_options(self, log, check_output):
        device = '/dev/guido'
        mountpoint = '/mnt/guido'

        result = host.mount(device, mountpoint)

        self.assertTrue(result)
        check_output.assert_called_with(['mount', '/dev/guido', '/mnt/guido'])
Example #10
0
def setup_storage():
    # Ensure /srv/node exists just in case no disks
    # are detected and used.
    mkdir(os.path.join('/srv', 'node'),
          owner='swift',
          group='swift',
          perms=0o755)
    reformat = str(config('overwrite')).lower() == "true"
    for dev in determine_block_devices():
        if is_device_in_ring(os.path.basename(dev)):
            log("Device '%s' already in the ring - ignoring" % (dev))
            continue

        if reformat:
            clean_storage(dev)

        try:
            # If not cleaned and in use, mkfs should fail.
            mkfs_xfs(dev, force=reformat)
        except subprocess.CalledProcessError as exc:
            # This is expected is a formatted device is provided and we are
            # forcing the format.
            log("Format device '%s' failed (%s) - continuing to next device" %
                (dev, exc),
                level=WARNING)
            continue

        basename = os.path.basename(dev)
        _mp = os.path.join('/srv', 'node', basename)
        mkdir(_mp, owner='swift', group='swift')

        options = None
        loopback_device = is_mapped_loopback_device(dev)

        if loopback_device:
            dev = loopback_device
            options = "loop, defaults"

        mountpoint = '/srv/node/%s' % basename
        filesystem = "xfs"

        mount(dev, mountpoint, filesystem=filesystem)
        fstab_add(dev, mountpoint, filesystem, options=options)

        check_call(['chown', '-R', 'swift:swift', mountpoint])
        check_call(['chmod', '-R', '0755', mountpoint])
Example #11
0
def place_data_on_block_device(blk_device, data_src_dst):
    """Migrate data in data_src_dst to blk_device and then remount."""
    # mount block device into /mnt
    mount(blk_device, '/mnt')
    # copy data to /mnt
    copy_files(data_src_dst, '/mnt')
    # umount block device
    umount('/mnt')
    # Grab user/group ID's from original source
    _dir = os.stat(data_src_dst)
    uid = _dir.st_uid
    gid = _dir.st_gid
    # re-mount where the data should originally be
    # TODO: persist is currently a NO-OP in core.host
    mount(blk_device, data_src_dst, persist=True)
    # ensure original ownership of new mount.
    os.chown(data_src_dst, uid, gid)
def place_data_on_block_device(blk_device, data_src_dst):
    ''' Migrate data in data_src_dst to blk_device and then remount '''
    # mount block device into /mnt
    mount(blk_device, '/mnt')
    # copy data to /mnt
    copy_files(data_src_dst, '/mnt')
    # umount block device
    umount('/mnt')
    # Grab user/group ID's from original source
    _dir = os.stat(data_src_dst)
    uid = _dir.st_uid
    gid = _dir.st_gid
    # re-mount where the data should originally be
    # TODO: persist is currently a NO-OP in core.host
    mount(blk_device, data_src_dst, persist=True)
    # ensure original ownership of new mount.
    os.chown(data_src_dst, uid, gid)
Example #13
0
def nfs_changed():
    # $ relation-get
    # fstype: nfs
    # mountpoint: /srv/data/reddit
    # options: rsize=8192,wsize=8192
    # private-address: 10.0.3.172

    # Install NFS dependencies
    apt_install(packages=['rpcbind', 'nfs-common'], fatal=True)

    fstype = hookenv.relation_get('fstype')
    mountpoint = hookenv.relation_get('mountpoint')
    options = hookenv.relation_get('options')
    privaddr = hookenv.relation_get('private-address')

    if options is None or fstype is None:
        return

    if nfs_is_mounted(mountpoint):
        log('NFS mountpoint %s is already mounted' % mountpoint)
        return

    # Create the local mountpoint
    if not os.path.exists(REDDIT_MEDIA):
        host.mkdir(REDDIT_MEDIA, REDDIT_USER, REDDIT_GROUP, 0222)

    # Setup the NFS mount
    log("Mounting NFS at %s" % mountpoint)
    host.mount(
        '%s:%s' % (privaddr, mountpoint), REDDIT_MEDIA, options=options,
        persist=True, filesystem=fstype
    )

    # Make sure Reddit knows where to look for thumbnails,
    # subreddit stylesheets/images, and icons.
    add_to_ini(values={
        'media_provider': 'filesystem',
        'media_fs_root': REDDIT_MEDIA,
        'media_fs_base_url_http': '',
        'media_fs_base_url_https': '',
        'media_domain': 'localhost',
    })
    make_ini()

    pass
def setup_storage():
    # Ensure /srv/node exists just in case no disks
    # are detected and used.
    mkdir(os.path.join('/srv', 'node'),
          owner='swift', group='swift',
          perms=0o755)
    reformat = str(config('overwrite')).lower() == "true"
    for dev in determine_block_devices():
        if is_device_in_ring(os.path.basename(dev)):
            log("Device '%s' already in the ring - ignoring" % (dev))
            continue

        if reformat:
            clean_storage(dev)

        try:
            # If not cleaned and in use, mkfs should fail.
            mkfs_xfs(dev, force=reformat)
        except subprocess.CalledProcessError as exc:
            # This is expected is a formatted device is provided and we are
            # forcing the format.
            log("Format device '%s' failed (%s) - continuing to next device" %
                (dev, exc), level=WARNING)
            continue

        basename = os.path.basename(dev)
        _mp = os.path.join('/srv', 'node', basename)
        mkdir(_mp, owner='swift', group='swift')

        options = None
        loopback_device = is_mapped_loopback_device(dev)

        if loopback_device:
            dev = loopback_device
            options = "loop, defaults"

        mountpoint = '/srv/node/%s' % basename
        filesystem = "xfs"

        mount(dev, mountpoint, filesystem=filesystem)
        fstab_add(dev, mountpoint, filesystem, options=options)

    check_call(['chown', '-R', 'swift:swift', '/srv/node/'])
    check_call(['chmod', '-R', '0755', '/srv/node/'])
Example #15
0
    def test_mounts_a_device(self, log, check_output):
        device = '/dev/guido'
        mountpoint = '/mnt/guido'
        options = 'foo,bar'

        result = host.mount(device, mountpoint, options)

        self.assertTrue(result)
        check_output.assert_called_with(
            ['mount', '-o', 'foo,bar', '/dev/guido', '/mnt/guido'])
Example #16
0
def config_btrfs(dev):
    status_set('maintenance',
               'Configuring btrfs container storage')
    if has_storage():
        cmd = ['lxc', 'storage', 'create', LXD_POOL, 'btrfs',
               'source={}'.format(dev)]
        check_call(cmd)
    else:
        lxd_stop()
        cmd = ['mkfs.btrfs', '-f', dev]
        check_call(cmd)
        mount(dev,
              '/var/lib/lxd',
              options='user_subvol_rm_allowed',
              persist=True,
              filesystem='btrfs')
        cmd = ['btrfs', 'quota', 'enable', '/var/lib/lxd']
        check_call(cmd)
        lxd_start()
Example #17
0
def mount_volume(config):
    if os.path.exists(config['mountpoint']):
        if not os.path.isdir(config['mountpoint']):
            hookenv.log('Not a directory: {}'.format(config['mountpoint']))
            raise VolumeConfigurationError()
    else:
        host.mkdir(config['mountpoint'])
    if os.path.ismount(config['mountpoint']):
        unmount_volume(config)
    if not host.mount(config['device'], config['mountpoint'], persist=True):
        raise VolumeConfigurationError()
Example #18
0
def mount_volume(config):
    if os.path.exists(config['mountpoint']):
        if not os.path.isdir(config['mountpoint']):
            hookenv.log('Not a directory: {}'.format(config['mountpoint']))
            raise VolumeConfigurationError()
    else:
        host.mkdir(config['mountpoint'])
    if os.path.ismount(config['mountpoint']):
        unmount_volume(config)
    if not host.mount(config['device'], config['mountpoint'], persist=True):
        raise VolumeConfigurationError()
Example #19
0
def config_btrfs(dev):
    status_set('maintenance', 'Configuring btrfs container storage')
    if has_storage():
        cmd = [
            'lxc', 'storage', 'create', LXD_POOL, 'btrfs',
            'source={}'.format(dev)
        ]
        check_call(cmd)
    else:
        lxd_stop()
        cmd = ['mkfs.btrfs', '-f', dev]
        check_call(cmd)
        mount(dev,
              '/var/lib/lxd',
              options='user_subvol_rm_allowed',
              persist=True,
              filesystem='btrfs')
        cmd = ['btrfs', 'quota', 'enable', '/var/lib/lxd']
        check_call(cmd)
        lxd_start()
Example #20
0
def encrypt_device(device, mountpoint=None, uuid=None):
    """
    Set up encryption for the given block device, and optionally create and
    mount an XFS filesystem on the encrypted device.

    If ``mountpoint`` is not given, the device will not be formatted or
    mounted.  When interacting with or mounting the device manually, the
    name returned by :func:`decrypted_device` called on the device name
    should be used in place of the raw device name.
    """
    if not is_block_device(device):
        raise VaultLockerError('Cannot encrypt non-block device: {}', device)
    if is_device_mounted(device):
        raise VaultLockerError('Cannot encrypt mounted device: {}', device)
    hookenv.log('Encrypting device: {}'.format(device))
    if uuid is None:
        uuid = str(uuid4())
    try:
        check_call(['vaultlocker', 'encrypt', '--uuid', uuid, device])
        unitdata.kv().set('layer.vaultlocker.uuids.{}'.format(device), uuid)
        if mountpoint:
            mapped_device = decrypted_device(device)
            hookenv.log('Creating filesystem on {} ({})'.format(
                mapped_device, device))
            # If this fails, it's probalby due to the size of the loopback
            #    backing file that is defined by the `dd`.
            mkfs_xfs(mapped_device)
            Path(mountpoint).mkdir(mode=0o755, parents=True, exist_ok=True)
            hookenv.log('Mounting filesystem for {} ({}) at {}'
                        ''.format(mapped_device, device, mountpoint))
            host.mount(mapped_device, mountpoint, filesystem='xfs')
            host.fstab_add(
                mapped_device, mountpoint, 'xfs', ','.join([
                    "defaults",
                    "nofail",
                    "x-systemd.requires=vaultlocker-decrypt@{uuid}.service".
                    format(uuid=uuid, ),
                    "comment=vaultlocker",
                ]))
    except (CalledProcessError, OSError) as e:
        raise VaultLockerError('Error configuring VaultLocker') from e
Example #21
0
    def test_doesnt_mount_on_error(self, log, check_output):
        device = '/dev/guido'
        mountpoint = '/mnt/guido'
        options = 'foo,bar'

        error = subprocess.CalledProcessError(123, 'mount it', 'Oops...')
        check_output.side_effect = error

        result = host.mount(device, mountpoint, options)

        self.assertFalse(result)
        check_output.assert_called_with(
            ['mount', '-o', 'foo,bar', '/dev/guido', '/mnt/guido'])
Example #22
0
def create_dir(data_log_dev,
               data_log_dir,
               data_log_fs,
               user,
               group,
               fs_options=None):

    if is_device_mounted(data_log_dev):
        logger.warning("Data device {} already mounted".format(data_log_dev))
        return
    lvm = None
    try:
        lvm = manage_lvm(data_log_dev, data_log_dir)
        if not lvm:
            return
    except DiskMapHelperPVAlreadyTakenForLVM:
        # Ignore this exception for now, it means there was
        # a rerun of disk_map.
        return

    if len(data_log_dir or "") == 0:
        logger.warning("Data log dir config empty")
        return
    if len(lvm or "") == 0:
        raise DiskMapHelperDeviceNotDefined(lvm)
    os.makedirs(data_log_dir, 0o750, exist_ok=True)
    logger.debug("Data device: mkfs -t {}".format(data_log_fs))
    cmd = ["mkfs", "-t", data_log_fs, lvm]
    opts = fs_options
    subprocess.check_call(cmd)
    logger.debug("mount {} to {} with options {} and fs {}".format(
        data_log_dir, lvm, opts, data_log_fs))
    mount(lvm,
          data_log_dir,
          options=opts,
          persist=True,
          filesystem=data_log_fs)
    shutil.chown(data_log_dir, user=user, group=group)
Example #23
0
def place_data_on_ceph(service, blk_device, data_src_dst, fstype='ext4'):
    # mount block device into /mnt
    mount(blk_device, '/mnt')

    # copy data to /mnt
    try:
        copy_files(data_src_dst, '/mnt')
    except:
        pass

    # umount block device
    umount('/mnt')

    _dir = os.stat(data_src_dst)
    uid = _dir.st_uid
    gid = _dir.st_gid

    # re-mount where the data should originally be
    mount(blk_device, data_src_dst, persist=True)

    # ensure original ownership of new mount.
    cmd = ['chown', '-R', '%s:%s' % (uid, gid), data_src_dst]
    check_call(cmd)
Example #24
0
def place_data_on_ceph(service, blk_device, data_src_dst, fstype='ext4'):
    # mount block device into /mnt
    mount(blk_device, '/mnt')

    # copy data to /mnt
    try:
        copy_files(data_src_dst, '/mnt')
    except:
        pass

    # umount block device
    umount('/mnt')

    _dir = os.stat(data_src_dst)
    uid = _dir.st_uid
    gid = _dir.st_gid

    # re-mount where the data should originally be
    mount(blk_device, data_src_dst, persist=True)

    # ensure original ownership of new mount.
    cmd = ['chown', '-R', '%s:%s' % (uid, gid), data_src_dst]
    check_call(cmd)
def validate_nfs():
    """
    Validate the nfs mount device
    """
    usr = DM_EXT_USR
    grp = DM_EXT_GRP
    data_dir = TV_DATA_DIR
    device = config('nfs-shares')

    # install nfs-common package
    if not filter_missing_packages(['nfs-common']):
        log("'nfs-common' package not found, installing the package...")
        apt_install(['nfs-common'], fatal=True)

    if not device:
        log("NFS shares can not be empty."
            "Check 'nfs-shares' value in config")
        status_set(
            'blocked',
            'No valid nfs-shares configuration found, please recheck')
        return False

    # Ensure mount directory exists
    mkdir(data_dir, owner=usr, group=grp, perms=501, force=True)

    # check for mountable device
    if not mount(device, data_dir, filesystem='nfs'):
        log("Unable to mount, please enter valid mount device")
        status_set(
            'blocked',
            'Failed while validating NFS mount, please recheck configuration')
        return False
    log("Device mounted successfully")
    umount(data_dir)
    log("Device unmounted successfully")
    return True
Example #26
0
def configure_lxd_block():
    '''Configure a block device for use by LXD for containers'''
    log('Configuring LXD container storage')
    if filesystem_mounted('/var/lib/lxd'):
        log('/var/lib/lxd already configured, skipping')
        return

    lxd_block_devices = get_block_devices()
    if len(lxd_block_devices) < 1:
        log('block devices not provided - skipping')
        return
    if len(lxd_block_devices) > 1:
        log("More than one block device is not supported yet, only"
            " using the first")
    lxd_block_device = lxd_block_devices[0]

    dev = None
    if lxd_block_device.startswith('/dev/'):
        dev = lxd_block_device
    elif lxd_block_device.startswith('/'):
        log('Configuring loopback device for use with LXD')
        _bd = lxd_block_device.split('|')
        if len(_bd) == 2:
            dev, size = _bd
        else:
            dev = lxd_block_device
            size = DEFAULT_LOOPBACK_SIZE
        dev = ensure_loopback_device(dev, size)

    if not dev or not is_block_device(dev):
        log('Invalid block device provided: %s' % lxd_block_device)
        return

    # NOTE: check overwrite and ensure its only execute once.
    db = kv()
    if config('overwrite') and not db.get('scrubbed', False):
        clean_storage(dev)
        db.set('scrubbed', True)
        db.flush()

    if not os.path.exists('/var/lib/lxd'):
        mkdir('/var/lib/lxd')

    if config('storage-type') == 'btrfs':
        status_set('maintenance', 'Configuring btrfs container storage')
        lxd_stop()
        cmd = ['mkfs.btrfs', '-f', dev]
        check_call(cmd)
        mount(dev,
              '/var/lib/lxd',
              options='user_subvol_rm_allowed',
              persist=True,
              filesystem='btrfs')
        cmd = ['btrfs', 'quota', 'enable', '/var/lib/lxd']
        check_call(cmd)
        lxd_start()
    elif config('storage-type') == 'lvm':
        if (is_lvm_physical_volume(dev)
                and list_lvm_volume_group(dev) == 'lxd_vg'):
            log('Device already configured for LVM/LXD, skipping')
            return
        status_set('maintenance', 'Configuring LVM container storage')
        # Enable and startup lvm2-lvmetad to avoid extra output
        # in lvm2 commands, which confused lxd.
        cmd = ['systemctl', 'enable', 'lvm2-lvmetad']
        check_call(cmd)
        cmd = ['systemctl', 'start', 'lvm2-lvmetad']
        check_call(cmd)
        create_lvm_physical_volume(dev)
        create_lvm_volume_group('lxd_vg', dev)
        cmd = ['lxc', 'config', 'set', 'storage.lvm_vg_name', 'lxd_vg']
        check_call(cmd)

        # The LVM thinpool logical volume is lazily created, either on
        # image import or container creation. This will force LV creation.
        create_and_import_busybox_image()
    elif config('storage-type') == 'zfs':
        status_set('maintenance', 'Configuring zfs container storage')
        if ZFS_POOL_NAME in zpools():
            log('ZFS pool already exist; skipping zfs configuration')
            return

        if config('overwrite'):
            cmd = ['zpool', 'create', '-f', ZFS_POOL_NAME, dev]
        else:
            cmd = ['zpool', 'create', ZFS_POOL_NAME, dev]
        check_call(cmd)

        cmd = ['lxc', 'config', 'set', 'storage.zfs_pool_name', ZFS_POOL_NAME]
        check_call(cmd)
Example #27
0
def setup_storage(encrypt=False):
    # Preflight check vault relation if encryption is enabled
    vault_kv = vaultlocker.VaultKVContext(vaultlocker.VAULTLOCKER_BACKEND)
    context = vault_kv()
    if encrypt and not vault_kv.complete:
        log("Encryption requested but vault relation not complete",
            level=DEBUG)
        return
    elif encrypt and vault_kv.complete:
        # NOTE: only write vaultlocker configuration once relation is complete
        #       otherwise we run the chance of an empty configuration file
        #       being installed on a machine with other vaultlocker based
        #       services
        vaultlocker.write_vaultlocker_conf(context, priority=90)

    # Ensure /srv/node exists just in case no disks
    # are detected and used.
    mkdir(os.path.join('/srv', 'node'),
          owner='swift', group='swift',
          perms=0o755)
    reformat = str(config('overwrite')).lower() == "true"

    db = kv()
    prepared_devices = db.get('prepared-devices', [])

    for dev in determine_block_devices():
        if dev in prepared_devices:
            log('Device {} already processed by charm,'
                ' skipping'.format(dev))
            continue

        if is_device_in_ring(os.path.basename(dev)):
            log("Device '%s' already in the ring - ignoring" % (dev))
            # NOTE: record existing use of device dealing with
            #       upgrades from older versions of charms without
            #       this feature
            prepared_devices.append(dev)
            db.set('prepared-devices', prepared_devices)
            db.flush()
            continue

        # NOTE: this deals with a dm-crypt'ed block device already in
        #       use
        if is_device_mounted(dev):
            log("Device '{}' is already mounted, ignoring".format(dev))
            continue

        if reformat:
            clean_storage(dev)

        loopback_device = is_mapped_loopback_device(dev)
        options = None

        if encrypt and not loopback_device:
            dev_uuid = str(uuid.uuid4())
            check_call(['vaultlocker', 'encrypt',
                        '--uuid', dev_uuid,
                        dev])
            dev = '/dev/mapper/crypt-{}'.format(dev_uuid)
            options = ','.join([
                "defaults",
                "nofail",
                ("x-systemd.requires="
                 "vaultlocker-decrypt@{uuid}.service".format(uuid=dev_uuid)),
                "comment=vaultlocker",
            ])

        try:
            # If not cleaned and in use, mkfs should fail.
            mkfs_xfs(dev, force=reformat)
        except subprocess.CalledProcessError as exc:
            # This is expected is a formatted device is provided and we are
            # forcing the format.
            log("Format device '%s' failed (%s) - continuing to next device" %
                (dev, exc), level=WARNING)
            continue

        basename = os.path.basename(dev)
        _mp = os.path.join('/srv', 'node', basename)
        mkdir(_mp, owner='swift', group='swift')

        mountpoint = '/srv/node/%s' % basename
        if loopback_device:
            # If an exiting fstab entry exists using the image file as the
            # source then preserve it, otherwise use the loopback device
            # directly to avoid a secound implicit loopback device being
            # created on mount. Bug #1762390
            fstab = charmhelpers.core.fstab.Fstab()
            fstab_entry = fstab.get_entry_by_attr('mountpoint', mountpoint)
            if fstab_entry and loopback_device == fstab_entry.device:
                dev = loopback_device
            options = "loop,nofail,defaults"

        filesystem = "xfs"

        mount(dev, mountpoint, filesystem=filesystem)
        fstab_add(dev, mountpoint, filesystem, options=options)

        check_call(['chown', '-R', 'swift:swift', mountpoint])
        check_call(['chmod', '-R', '0755', mountpoint])

        # NOTE: record preparation of device - this will be used when
        #       providing block device configuration for ring builders.
        prepared_devices.append(dev)
        db.set('prepared-devices', prepared_devices)
        db.flush()
def configure_local_ephemeral_storage():
    """Configure local block device for use as ephemeral instance storage"""
    # Preflight check vault relation if encryption is enabled
    vault_kv = vaultlocker.VaultKVContext(
        secret_backend=vaultlocker.VAULTLOCKER_BACKEND)
    context = vault_kv()
    encrypt = config('encrypt')
    if encrypt and not vault_kv.complete:
        log("Encryption requested but vault relation not complete",
            level=DEBUG)
        return
    elif encrypt and vault_kv.complete:
        # NOTE: only write vaultlocker configuration once relation is complete
        #       otherwise we run the chance of an empty configuration file
        #       being installed on a machine with other vaultlocker based
        #       services
        vaultlocker.write_vaultlocker_conf(context, priority=80)

    db = kv()
    storage_configured = db.get('storage-configured', False)
    if storage_configured:
        log("Ephemeral storage already configured, skipping", level=DEBUG)
        return

    dev = determine_block_device()

    if not dev:
        log('No block device configuration found, skipping', level=DEBUG)
        return

    if not is_block_device(dev):
        log("Device '{}' is not a block device, "
            "unable to configure storage".format(dev),
            level=DEBUG)
        return

    # NOTE: this deals with a dm-crypt'ed block device already in
    #       use
    if is_device_mounted(dev):
        log("Device '{}' is already mounted, "
            "unable to configure storage".format(dev),
            level=DEBUG)
        return

    options = None
    if encrypt:
        dev_uuid = str(uuid.uuid4())
        check_call(['vaultlocker', 'encrypt', '--uuid', dev_uuid, dev])
        dev = '/dev/mapper/crypt-{}'.format(dev_uuid)
        options = ','.join([
            "defaults",
            "nofail",
            ("x-systemd.requires="
             "vaultlocker-decrypt@{uuid}.service".format(uuid=dev_uuid)),
            "comment=vaultlocker",
        ])

    # If not cleaned and in use, mkfs should fail.
    mkfs_xfs(dev, force=True)

    mountpoint = '/var/lib/nova/instances'
    filesystem = "xfs"
    mount(dev, mountpoint, filesystem=filesystem)
    fstab_add(dev, mountpoint, filesystem, options=options)

    check_call(['chown', '-R', 'nova:nova', mountpoint])
    check_call(['chmod', '-R', '0755', mountpoint])

    # NOTE: record preparation of device - this ensures that ephemeral
    #       storage is never reconfigured by mistake, losing instance disks
    db.set('storage-configured', True)
    db.flush()
Example #29
0
def configure_lxd_block():
    '''Configure a block device for use by LXD for containers'''
    log('Configuring LXD container storage')
    if filesystem_mounted('/var/lib/lxd'):
        log('/var/lib/lxd already configured, skipping')
        return

    lxd_block_devices = get_block_devices()
    if len(lxd_block_devices) < 1:
        log('block devices not provided - skipping')
        return
    if len(lxd_block_devices) > 1:
        raise NotImplementedError('Multiple block devices are not supported.')
    lxd_block_device = lxd_block_devices[0]

    dev = None
    if lxd_block_device.startswith('/dev/'):
        dev = lxd_block_device
    elif lxd_block_device.startswith('/'):
        log('Configuring loopback device for use with LXD')
        _bd = lxd_block_device.split('|')
        if len(_bd) == 2:
            dev, size = _bd
        else:
            dev = lxd_block_device
            size = DEFAULT_LOOPBACK_SIZE
        dev = ensure_loopback_device(dev, size)

    if not dev or not is_block_device(dev):
        log('Invalid block device provided: %s' % lxd_block_device)
        return

    # NOTE: check overwrite and ensure its only execute once.
    db = kv()
    if config('overwrite') and not db.get('scrubbed', False):
        clean_storage(dev)
        db.set('scrubbed', True)
        db.flush()

    if not os.path.exists('/var/lib/lxd'):
        mkdir('/var/lib/lxd')

    if config('storage-type') == 'btrfs':
        status_set('maintenance',
                   'Configuring btrfs container storage')
        lxd_stop()
        cmd = ['mkfs.btrfs', '-f', dev]
        check_call(cmd)
        mount(dev,
              '/var/lib/lxd',
              options='user_subvol_rm_allowed',
              persist=True,
              filesystem='btrfs')
        cmd = ['btrfs', 'quota', 'enable', '/var/lib/lxd']
        check_call(cmd)
        lxd_start()
    elif config('storage-type') == 'lvm':
        if (is_lvm_physical_volume(dev) and
                list_lvm_volume_group(dev) == 'lxd_vg'):
            log('Device already configured for LVM/LXD, skipping')
            return
        status_set('maintenance',
                   'Configuring LVM container storage')
        # Enable and startup lvm2-lvmetad to avoid extra output
        # in lvm2 commands, which confused lxd.
        cmd = ['systemctl', 'enable', 'lvm2-lvmetad']
        check_call(cmd)
        cmd = ['systemctl', 'start', 'lvm2-lvmetad']
        check_call(cmd)
        create_lvm_physical_volume(dev)
        create_lvm_volume_group('lxd_vg', dev)
        cmd = ['lxc', 'config', 'set', 'storage.lvm_vg_name', 'lxd_vg']
        check_call(cmd)

        # The LVM thinpool logical volume is lazily created, either on
        # image import or container creation. This will force LV creation.
        create_and_import_busybox_image()
    elif config('storage-type') == 'zfs':
        status_set('maintenance',
                   'Configuring zfs container storage')
        if ZFS_POOL_NAME in zpools():
            log('ZFS pool already exist; skipping zfs configuration')
            return

        if config('overwrite'):
            cmd = ['zpool', 'create', '-f', ZFS_POOL_NAME, dev]
        else:
            cmd = ['zpool', 'create', ZFS_POOL_NAME, dev]
        check_call(cmd)

        cmd = ['lxc', 'config', 'set', 'storage.zfs_pool_name',
               ZFS_POOL_NAME]
        check_call(cmd)