示例#1
0
def register_configs():
    release = get_os_codename_package('swift', fatal=False) or 'essex'
    configs = templating.OSConfigRenderer(templates_dir=TEMPLATES,
                                          openstack_release=release)
    configs.register('/etc/swift/swift.conf',
                     [SwiftStorageContext()])
    configs.register('/etc/rsync-juju.d/050-swift-storage.conf',
                     [RsyncContext(), SwiftStorageServerContext()])
    # NOTE: add VaultKVContext so interface status can be assessed
    for server in ['account', 'object', 'container']:
        contexts = [SwiftStorageServerContext(),
                    context.BindHostContext(),
                    context.WorkerConfigContext()]

        # if vault deps are not installed it is not yet possible to check the
        # vault context status since it requires the hvac dependency.
        if vaultlocker_installed():
            contexts.append(vaultlocker.VaultKVContext(
                            vaultlocker.VAULTLOCKER_BACKEND))

        configs.register('/etc/swift/%s-server.conf' % server, contexts)

        if enable_replication():
            configs.register(
                '/etc/swift/{svc}-server/{svc}-server-replicator.conf'.format(
                    svc=server),
                contexts)

    return configs
示例#2
0
def prepare_disks_and_activate():
    # NOTE: vault/vaultlocker preflight check
    vault_kv = vaultlocker.VaultKVContext(vaultlocker.VAULTLOCKER_BACKEND)
    context = vault_kv()
    if use_vaultlocker() and not vault_kv.complete:
        log('Deferring OSD preparation as vault not ready', level=DEBUG)
        return
    elif use_vaultlocker() and vault_kv.complete:
        log('Vault ready, writing vaultlocker configuration', level=DEBUG)
        vaultlocker.write_vaultlocker_conf(context)

    osd_journal = get_journal_devices()
    if not osd_journal.isdisjoint(set(get_devices())):
        raise ValueError('`osd-journal` and `osd-devices` options must not'
                         'overlap.')
    log("got journal devs: {}".format(osd_journal), level=DEBUG)

    # pre-flight check of eligible device pristinity
    devices = get_devices()

    # if a device has been previously touched we need to consider it as
    # non-pristine. If it needs to be re-processed it has to be zapped
    # via the respective action which also clears the unitdata entry.
    db = kv()
    touched_devices = db.get('osd-devices', [])
    devices = [dev for dev in devices if dev not in touched_devices]
    log('Skipping osd devices previously processed by this unit: {}'.format(
        touched_devices))
    # filter osd-devices that are file system paths
    devices = [dev for dev in devices if dev.startswith('/dev')]
    # filter osd-devices that does not exist on this unit
    devices = [dev for dev in devices if os.path.exists(dev)]
    # filter osd-devices that are already mounted
    devices = [dev for dev in devices if not is_device_mounted(dev)]
    # filter osd-devices that are active bluestore devices
    devices = [
        dev for dev in devices if not ceph.is_active_bluestore_device(dev)
    ]

    log('Checking for pristine devices: "{}"'.format(devices), level=DEBUG)
    if not all(ceph.is_pristine_disk(dev) for dev in devices):
        status_set(
            'blocked', 'Non-pristine devices detected, consult '
            '`list-disks`, `zap-disk` and `blacklist-*` actions.')
        return

    if ceph.is_bootstrapped():
        log('ceph bootstrapped, rescanning disks')
        emit_cephconf()
        for dev in get_devices():
            ceph.osdize(dev, config('osd-format'), osd_journal,
                        config('ignore-device-errors'), config('osd-encrypt'),
                        config('bluestore'), config('osd-encrypt-keymanager'))
            # Make it fast!
            if config('autotune'):
                ceph.tune_dev(dev)
        ceph.start_osds(get_devices())
示例#3
0
 def test_context_complete(self):
     self._setup_relation(COMPLETE_RELATION)
     context = vaultlocker.VaultKVContext('charm-test')
     self.assertEqual(
         context(), {
             'role_id': 'test-role-from-vault',
             'secret_backend': 'charm-test',
             'vault_url': 'http://vault:8200'
         })
     self.hookenv.relation_ids.assert_called_with('secrets-storage')
     self.assertTrue(vaultlocker.vault_relation_complete())
示例#4
0
 def test_context_complete_cached_secret_id(self, retrieve_secret_id):
     self._setup_relation(COMPLETE_RELATION)
     context = vaultlocker.VaultKVContext('charm-test')
     self.db.set('last-token', '00c9a9ab-c523-459d-a250-2ce8f0877c03')
     self.db.set('secret-id', '5502fd27-059b-4b0a-91b2-eaff40b6a112')
     self.assertEqual(
         context(), {
             'role_id': 'test-role-from-vault',
             'secret_backend': 'charm-test',
             'secret_id': '5502fd27-059b-4b0a-91b2-eaff40b6a112',
             'vault_url': 'http://vault:8200'
         })
     self.hookenv.relation_ids.assert_called_with('secrets-storage')
     self.assertTrue(vaultlocker.vault_relation_complete())
     retrieve_secret_id.assert_not_called()
示例#5
0
 def test_context_complete(self, retrieve_secret_id):
     self._setup_relation(COMPLETE_RELATION)
     context = vaultlocker.VaultKVContext('charm-test')
     retrieve_secret_id.return_value = 'a3551c8d-0147-4cb6-afc6-efb3db2fccb2'
     self.assertEqual(
         context(), {
             'role_id': 'test-role-from-vault',
             'secret_backend': 'charm-test',
             'secret_id': 'a3551c8d-0147-4cb6-afc6-efb3db2fccb2',
             'vault_url': 'http://vault:8200'
         })
     self.hookenv.relation_ids.assert_called_with('secrets-storage')
     self.assertTrue(vaultlocker.vault_relation_complete())
     retrieve_secret_id.assert_called_once_with(
         url='http://vault:8200',
         token='00c9a9ab-c523-459d-a250-2ce8f0877c03')
示例#6
0
def register_configs():
    release = get_os_codename_package('python-swift', fatal=False) or 'essex'
    configs = templating.OSConfigRenderer(templates_dir=TEMPLATES,
                                          openstack_release=release)
    configs.register('/etc/swift/swift.conf',
                     [SwiftStorageContext()])
    configs.register('/etc/rsync-juju.d/050-swift-storage.conf',
                     [RsyncContext(), SwiftStorageServerContext()])
    # NOTE: add VaultKVContext so interface status can be assessed
    for server in ['account', 'object', 'container']:
        configs.register('/etc/swift/%s-server.conf' % server,
                         [SwiftStorageServerContext(),
                          context.BindHostContext(),
                          context.WorkerConfigContext(),
                          vaultlocker.VaultKVContext(
                              vaultlocker.VAULTLOCKER_BACKEND)]),
    return configs
示例#7
0
 def test_context_complete_cached_secret_id(self, retrieve_secret_id):
     self._setup_relation(COMPLETE_RELATION)
     context = vaultlocker.VaultKVContext('charm-test')
     self.db.set('secret-id', '5502fd27-059b-4b0a-91b2-eaff40b6a112')
     self.good_token = 'invalid-token'  # i.e. cause failure
     retrieve_secret_id.side_effect = self.fake_retrieve_secret_id
     self.assertEqual(
         context(), {
             'role_id': 'test-role-from-vault',
             'secret_backend': 'charm-test',
             'secret_id': '5502fd27-059b-4b0a-91b2-eaff40b6a112',
             'vault_url': 'http://vault:8200'
         })
     self.hookenv.relation_ids.assert_called_with('secrets-storage')
     self.assertTrue(vaultlocker.vault_relation_complete())
     calls = [
         mock.call(url='http://vault:8200',
                   token='00c9a9ab-c523-459d-a250-2ce8f0877c03')
     ]
     retrieve_secret_id.assert_has_calls(calls)
示例#8
0
 def test_context_complete_cached_dirty_data(self, retrieve_secret_id):
     self._setup_relation(DIRTY_RELATION)
     context = vaultlocker.VaultKVContext('charm-test')
     self.db.set('secret-id', '5502fd27-059b-4b0a-91b2-eaff40b6a112')
     self.good_token = '67b36149-dc86-4b80-96c4-35b91847d16e'
     retrieve_secret_id.side_effect = self.fake_retrieve_secret_id
     self.assertEqual(
         context(), {
             'role_id': 'test-role-from-vault',
             'secret_backend': 'charm-test',
             'secret_id': '31be8e65-20a3-45e0-a4a8-4d5a0554fb60',
             'vault_url': 'http://vault:8200'
         })
     self.hookenv.relation_ids.assert_called_with('secrets-storage')
     self.assertTrue(vaultlocker.vault_relation_complete())
     self.assertEquals(self.db.get('secret-id'),
                       '31be8e65-20a3-45e0-a4a8-4d5a0554fb60')
     calls = [
         mock.call(url='http://vault:8200',
                   token='67b36149-dc86-4b80-96c4-35b91847d16e')
     ]
     retrieve_secret_id.assert_has_calls(calls)
def configure_local_ephemeral_storage():
    """Configure local block device for use as ephemeral instance storage"""
    # Preflight check vault relation if encryption is enabled
    vault_kv = vaultlocker.VaultKVContext(
        secret_backend=vaultlocker.VAULTLOCKER_BACKEND)
    context = vault_kv()
    encrypt = config('encrypt')
    if encrypt and not vault_kv.complete:
        log("Encryption requested but vault relation not complete",
            level=DEBUG)
        return
    elif encrypt and vault_kv.complete:
        # NOTE: only write vaultlocker configuration once relation is complete
        #       otherwise we run the chance of an empty configuration file
        #       being installed on a machine with other vaultlocker based
        #       services
        vaultlocker.write_vaultlocker_conf(context, priority=80)

    db = kv()
    storage_configured = db.get('storage-configured', False)
    if storage_configured:
        log("Ephemeral storage already configured, skipping", level=DEBUG)
        return

    dev = determine_block_device()

    if not dev:
        log('No block device configuration found, skipping', level=DEBUG)
        return

    if not is_block_device(dev):
        log("Device '{}' is not a block device, "
            "unable to configure storage".format(dev),
            level=DEBUG)
        return

    # NOTE: this deals with a dm-crypt'ed block device already in
    #       use
    if is_device_mounted(dev):
        log("Device '{}' is already mounted, "
            "unable to configure storage".format(dev),
            level=DEBUG)
        return

    options = None
    if encrypt:
        dev_uuid = str(uuid.uuid4())
        check_call(['vaultlocker', 'encrypt', '--uuid', dev_uuid, dev])
        dev = '/dev/mapper/crypt-{}'.format(dev_uuid)
        options = ','.join([
            "defaults",
            "nofail",
            ("x-systemd.requires="
             "vaultlocker-decrypt@{uuid}.service".format(uuid=dev_uuid)),
            "comment=vaultlocker",
        ])

    # If not cleaned and in use, mkfs should fail.
    mkfs_xfs(dev, force=True)

    mountpoint = '/var/lib/nova/instances'
    filesystem = "xfs"
    mount(dev, mountpoint, filesystem=filesystem)
    fstab_add(dev, mountpoint, filesystem, options=options)

    check_call(['chown', '-R', 'nova:nova', mountpoint])
    check_call(['chmod', '-R', '0755', mountpoint])

    # NOTE: record preparation of device - this ensures that ephemeral
    #       storage is never reconfigured by mistake, losing instance disks
    db.set('storage-configured', True)
    db.flush()
         ],
                                          service=['nova-compute', 'nova'],
                                          config_file=NOVA_CONF),
         InstanceConsoleContext(),
         context.ZeroMQContext(),
         context.NotificationDriverContext(),
         MetadataServiceContext(),
         HostIPContext(),
         NovaComputeVirtContext(),
         context.LogLevelContext(),
         context.InternalEndpointContext(),
         context.VolumeAPIContext('nova-common'),
         SerialConsoleContext(),
         NovaComputeAvailabilityZoneContext(),
         context.WorkerConfigContext(),
         vaultlocker.VaultKVContext(vaultlocker.VAULTLOCKER_BACKEND),
         context.IdentityCredentialsContext(rel_name='cloud-credentials')
     ],
 },
 NOVA_API_AA_PROFILE_PATH: {
     'services': ['nova-api'],
     'contexts': [NovaAPIAppArmorContext()],
 },
 NOVA_COMPUTE_AA_PROFILE_PATH: {
     'services': ['nova-compute'],
     'contexts': [NovaComputeAppArmorContext()],
 },
 NOVA_NETWORK_AA_PROFILE_PATH: {
     'services': ['nova-network'],
     'contexts': [NovaNetworkAppArmorContext()],
 },
def resource_map():
    '''
    Dynamically generate a map of resources that will be managed for a single
    hook execution.
    '''
    # TODO: Cache this on first call?
    if config('virt-type').lower() == 'lxd':
        resource_map = deepcopy(BASE_RESOURCE_MAP)
    else:
        resource_map = deepcopy(LIBVIRT_RESOURCE_MAP)

    # if vault deps are not installed it is not yet possible to check the vault
    # context status since it requires the hvac dependency.
    if not vaultlocker_installed():
        to_delete = []
        for item in resource_map[NOVA_CONF]['contexts']:
            if isinstance(item, type(vaultlocker.VaultKVContext())):
                to_delete.append(item)

        for item in to_delete:
            resource_map[NOVA_CONF]['contexts'].remove(item)

    net_manager = network_manager()

    # Network manager gets set late by the cloud-compute interface.
    # FlatDHCPManager only requires some extra packages.
    cmp_os_release = CompareOpenStackReleases(os_release('nova-common'))
    if (net_manager in ['flatmanager', 'flatdhcpmanager']
            and config('multi-host').lower() == 'yes'
            and cmp_os_release < 'ocata'):
        resource_map[NOVA_CONF]['services'].extend(
            ['nova-api', 'nova-network'])
    else:
        resource_map.pop(NOVA_API_AA_PROFILE_PATH)
        resource_map.pop(NOVA_NETWORK_AA_PROFILE_PATH)

    cmp_distro_codename = CompareHostReleases(
        lsb_release()['DISTRIB_CODENAME'].lower())
    if (cmp_distro_codename >= 'yakkety' or cmp_os_release >= 'ocata'):
        for data in resource_map.values():
            if LIBVIRT_BIN_DAEMON in data['services']:
                data['services'].remove(LIBVIRT_BIN_DAEMON)
                data['services'].append(LIBVIRTD_DAEMON)

    # Neutron/quantum requires additional contexts, as well as new resources
    # depending on the plugin used.
    # NOTE(james-page): only required for ovs plugin right now
    if net_manager in ['neutron', 'quantum']:
        resource_map[NOVA_CONF]['contexts'].append(NeutronComputeContext())

    if relation_ids('ceph'):
        CEPH_RESOURCES[ceph_config_file()] = {
            'contexts': [NovaComputeCephContext()],
            'services': ['nova-compute']
        }
        resource_map.update(CEPH_RESOURCES)

    enable_nova_metadata, _ = nova_metadata_requirement()
    if enable_nova_metadata:
        resource_map[NOVA_CONF]['services'].append('nova-api-metadata')

    # NOTE(james-page): If not on an upstart based system, don't write
    #                   and override file for libvirt-bin.
    if not os.path.exists('/etc/init'):
        del resource_map[LIBVIRT_BIN_OVERRIDES]

    return resource_map
示例#12
0
def setup_storage(encrypt=False):
    # Preflight check vault relation if encryption is enabled
    vault_kv = vaultlocker.VaultKVContext(vaultlocker.VAULTLOCKER_BACKEND)
    context = vault_kv()
    if encrypt and not vault_kv.complete:
        log("Encryption requested but vault relation not complete",
            level=DEBUG)
        return
    elif encrypt and vault_kv.complete:
        # NOTE: only write vaultlocker configuration once relation is complete
        #       otherwise we run the chance of an empty configuration file
        #       being installed on a machine with other vaultlocker based
        #       services
        vaultlocker.write_vaultlocker_conf(context, priority=90)

    # Ensure /srv/node exists just in case no disks
    # are detected and used.
    mkdir(os.path.join('/srv', 'node'),
          owner='swift', group='swift',
          perms=0o755)
    reformat = str(config('overwrite')).lower() == "true"

    db = kv()
    prepared_devices = db.get('prepared-devices', [])

    for dev in determine_block_devices():
        if dev in prepared_devices:
            log('Device {} already processed by charm,'
                ' skipping'.format(dev))
            continue

        if is_device_in_ring(os.path.basename(dev)):
            log("Device '%s' already in the ring - ignoring" % (dev))
            # NOTE: record existing use of device dealing with
            #       upgrades from older versions of charms without
            #       this feature
            prepared_devices.append(dev)
            db.set('prepared-devices', prepared_devices)
            db.flush()
            continue

        # NOTE: this deals with a dm-crypt'ed block device already in
        #       use
        if is_device_mounted(dev):
            log("Device '{}' is already mounted, ignoring".format(dev))
            continue

        if reformat:
            clean_storage(dev)

        loopback_device = is_mapped_loopback_device(dev)
        options = None

        if encrypt and not loopback_device:
            dev_uuid = str(uuid.uuid4())
            check_call(['vaultlocker', 'encrypt',
                        '--uuid', dev_uuid,
                        dev])
            dev = '/dev/mapper/crypt-{}'.format(dev_uuid)
            options = ','.join([
                "defaults",
                "nofail",
                ("x-systemd.requires="
                 "vaultlocker-decrypt@{uuid}.service".format(uuid=dev_uuid)),
                "comment=vaultlocker",
            ])

        try:
            # If not cleaned and in use, mkfs should fail.
            mkfs_xfs(dev, force=reformat)
        except subprocess.CalledProcessError as exc:
            # This is expected is a formatted device is provided and we are
            # forcing the format.
            log("Format device '%s' failed (%s) - continuing to next device" %
                (dev, exc), level=WARNING)
            continue

        basename = os.path.basename(dev)
        _mp = os.path.join('/srv', 'node', basename)
        mkdir(_mp, owner='swift', group='swift')

        mountpoint = '/srv/node/%s' % basename
        if loopback_device:
            # If an exiting fstab entry exists using the image file as the
            # source then preserve it, otherwise use the loopback device
            # directly to avoid a secound implicit loopback device being
            # created on mount. Bug #1762390
            fstab = charmhelpers.core.fstab.Fstab()
            fstab_entry = fstab.get_entry_by_attr('mountpoint', mountpoint)
            if fstab_entry and loopback_device == fstab_entry.device:
                dev = loopback_device
            options = "loop,nofail,defaults"

        filesystem = "xfs"

        mount(dev, mountpoint, filesystem=filesystem)
        fstab_add(dev, mountpoint, filesystem, options=options)

        check_call(['chown', '-R', 'swift:swift', mountpoint])
        check_call(['chmod', '-R', '0755', mountpoint])

        # NOTE: record preparation of device - this will be used when
        #       providing block device configuration for ring builders.
        prepared_devices.append(dev)
        db.set('prepared-devices', prepared_devices)
        db.flush()
示例#13
0
def prepare_disks_and_activate():
    if use_vaultlocker():
        # NOTE: vault/vaultlocker preflight check
        vault_kv = vaultlocker.VaultKVContext(vaultlocker.VAULTLOCKER_BACKEND)
        context = vault_kv()
        if not vault_kv.complete:
            log('Deferring OSD preparation as vault not ready', level=DEBUG)
            return
        else:
            log('Vault ready, writing vaultlocker configuration', level=DEBUG)
            vaultlocker.write_vaultlocker_conf(context)

    osd_journal = get_journal_devices()
    if not osd_journal.isdisjoint(set(get_devices())):
        raise ValueError('`osd-journal` and `osd-devices` options must not'
                         'overlap.')
    log("got journal devs: {}".format(osd_journal), level=DEBUG)

    # pre-flight check of eligible device pristinity
    devices = get_devices()

    # if a device has been previously touched we need to consider it as
    # non-pristine. If it needs to be re-processed it has to be zapped
    # via the respective action which also clears the unitdata entry.
    db = kv()
    touched_devices = db.get('osd-devices', [])
    devices = [dev for dev in devices if dev not in touched_devices]
    log('Skipping osd devices previously processed by this unit: {}'.format(
        touched_devices))
    # filter osd-devices that are file system paths
    devices = [dev for dev in devices if dev.startswith('/dev')]
    # filter osd-devices that does not exist on this unit
    devices = [dev for dev in devices if os.path.exists(dev)]
    # filter osd-devices that are already mounted
    devices = [dev for dev in devices if not is_device_mounted(dev)]
    # filter osd-devices that are active bluestore devices
    devices = [
        dev for dev in devices if not ceph.is_active_bluestore_device(dev)
    ]
    # filter osd-devices that are used as dmcrypt devices
    devices = [dev for dev in devices if not ceph.is_mapped_luks_device(dev)]

    log('Checking for pristine devices: "{}"'.format(devices), level=DEBUG)
    if not all(ceph.is_pristine_disk(dev) for dev in devices):
        status_set(
            'blocked', 'Non-pristine devices detected, consult '
            '`list-disks`, `zap-disk` and `blacklist-*` actions.')
        return

    if is_osd_bootstrap_ready():
        log('ceph bootstrapped, rescanning disks')
        emit_cephconf()
        bluestore = use_bluestore()
        ceph.udevadm_settle()
        for dev in get_devices():
            ceph.osdize(dev, config('osd-format'), osd_journal,
                        config('ignore-device-errors'), config('osd-encrypt'),
                        bluestore, config('osd-encrypt-keymanager'))
            # Make it fast!
            if config('autotune'):
                log(
                    'The autotune config is deprecated and planned '
                    'for removal in the next release.',
                    level=WARNING)
                ceph.tune_dev(dev)
        ceph.start_osds(get_devices())

    # Notify MON cluster as to how many OSD's this unit bootstrapped
    # into the cluster
    for r_id in relation_ids('mon'):
        relation_set(relation_id=r_id,
                     relation_settings={
                         'bootstrapped-osds':
                         len(db.get('osd-devices', [])),
                         'ceph_release':
                         ceph.resolve_ceph_version(
                             hookenv.config('source') or 'distro')
                     })
示例#14
0
 def test_context_incomplete(self):
     self._setup_relation(INCOMPLETE_RELATION)
     context = vaultlocker.VaultKVContext('charm-test')
     self.assertEqual(context(), {})
     self.hookenv.relation_ids.assert_called_with('secrets-storage')
     self.assertFalse(vaultlocker.vault_relation_complete())