Beispiel #1
0
def get_ceph_request():
    rq = CephBrokerRq()
    if (config('libvirt-image-backend') == 'rbd'
            and assert_libvirt_rbd_imagebackend_allowed()):
        name = config('rbd-pool')
        replicas = config('ceph-osd-replication-count')
        weight = config('ceph-pool-weight')
        rq.add_op_create_pool(name=name,
                              replica_count=replicas,
                              weight=weight,
                              group='vms')
    if config('restrict-ceph-pools'):
        rq.add_op_request_access_to_group(
            name="volumes",
            object_prefix_permissions={'class-read': ['rbd_children']},
            permission='rwx')
        rq.add_op_request_access_to_group(
            name="images",
            object_prefix_permissions={'class-read': ['rbd_children']},
            permission='rwx')
        rq.add_op_request_access_to_group(
            name="vms",
            object_prefix_permissions={'class-read': ['rbd_children']},
            permission='rwx')
    return rq
def ceph_changed(rid=None, unit=None):
    if 'ceph' not in CONFIGS.complete_contexts():
        log('ceph relation incomplete. Peer not ready?')
        return

    if not ensure_ceph_keyring(
            service=service_name(), user='******', group='nova'):
        log('Could not create ceph keyring: peer not ready?')
        return

    CONFIGS.write(ceph_config_file())
    CONFIGS.write(CEPH_SECRET)
    CONFIGS.write(NOVA_CONF)

    # With some refactoring, this can move into NovaComputeCephContext
    # and allow easily extended to support other compute flavors.
    key = relation_get(attribute='key', rid=rid, unit=unit)
    if config('virt-type') in ['kvm', 'qemu', 'lxc'] and key:
        create_libvirt_secret(secret_file=CEPH_SECRET,
                              secret_uuid=CEPH_SECRET_UUID,
                              key=key)

    if (config('libvirt-image-backend') == 'rbd'
            and assert_libvirt_rbd_imagebackend_allowed()):
        if is_request_complete(get_ceph_request()):
            log('Request complete')
            # Ensure that nova-compute is restarted since only now can we
            # guarantee that ceph resources are ready, but only if not paused.
            if not is_unit_paused_set():
                service_restart('nova-compute')
        else:
            send_request_if_needed(get_ceph_request())
def get_ceph_request():
    rq = CephBrokerRq()
    if (config('libvirt-image-backend') == 'rbd' and
            assert_libvirt_rbd_imagebackend_allowed()):
        name = config('rbd-pool')
        replicas = config('ceph-osd-replication-count')
        weight = config('ceph-pool-weight')
        rq.add_op_create_pool(name=name, replica_count=replicas, weight=weight,
                              group='vms')
    if config('restrict-ceph-pools'):
        rq.add_op_request_access_to_group(name="volumes",
                                          permission='rwx')
        rq.add_op_request_access_to_group(name="images",
                                          permission='rwx')
        rq.add_op_request_access_to_group(name="vms",
                                          permission='rwx')
    return rq
def get_ceph_request():
    rq = CephBrokerRq()
    if (config('libvirt-image-backend') == 'rbd'
            and assert_libvirt_rbd_imagebackend_allowed()):
        pool_name = config('rbd-pool')
        replicas = config('ceph-osd-replication-count')
        weight = config('ceph-pool-weight')
        bluestore_compression = ch_context.CephBlueStoreCompressionContext()

        if config('pool-type') == 'erasure-coded':
            # General EC plugin config
            plugin = config('ec-profile-plugin')
            technique = config('ec-profile-technique')
            device_class = config('ec-profile-device-class')
            metadata_pool_name = (config('ec-rbd-metadata-pool')
                                  or "{}-metadata".format(pool_name))
            bdm_k = config('ec-profile-k')
            bdm_m = config('ec-profile-m')
            # LRC plugin config
            bdm_l = config('ec-profile-locality')
            crush_locality = config('ec-profile-crush-locality')
            # SHEC plugin config
            bdm_c = config('ec-profile-durability-estimator')
            # CLAY plugin config
            bdm_d = config('ec-profile-helper-chunks')
            scalar_mds = config('ec-profile-scalar-mds')
            # Profile name
            profile_name = (config('ec-profile-name')
                            or "{}-profile".format(pool_name))
            # Metadata sizing is approximately 1% of overall data weight
            # but is in effect driven by the number of rbd's rather than
            # their size - so it can be very lightweight.
            metadata_weight = weight * 0.01
            # Resize data pool weight to accomodate metadata weight
            weight = weight - metadata_weight
            # Create metadata pool
            rq.add_op_create_pool(name=metadata_pool_name,
                                  replica_count=replicas,
                                  weight=metadata_weight,
                                  group='vms',
                                  app_name='rbd')

            # Create erasure profile
            rq.add_op_create_erasure_profile(name=profile_name,
                                             k=bdm_k,
                                             m=bdm_m,
                                             lrc_locality=bdm_l,
                                             lrc_crush_locality=crush_locality,
                                             shec_durability_estimator=bdm_c,
                                             clay_helper_chunks=bdm_d,
                                             clay_scalar_mds=scalar_mds,
                                             device_class=device_class,
                                             erasure_type=plugin,
                                             erasure_technique=technique)

            # Create EC data pool

            # NOTE(fnordahl): once we deprecate Python 3.5 support we can do
            # the unpacking of the BlueStore compression arguments as part of
            # the function arguments. Until then we need to build the dict
            # prior to the function call.
            kwargs = {
                'name': pool_name,
                'erasure_profile': profile_name,
                'weight': weight,
                'group': "vms",
                'app_name': "rbd",
                'allow_ec_overwrites': True
            }
            kwargs.update(bluestore_compression.get_kwargs())
            rq.add_op_create_erasure_pool(**kwargs)
        else:
            kwargs = {
                'name': pool_name,
                'replica_count': replicas,
                'weight': weight,
                'group': 'vms',
                'app_name': 'rbd',
            }
            kwargs.update(bluestore_compression.get_kwargs())
            rq.add_op_create_replicated_pool(**kwargs)

    if config('restrict-ceph-pools'):
        rq.add_op_request_access_to_group(
            name="volumes",
            object_prefix_permissions={'class-read': ['rbd_children']},
            permission='rwx')
        rq.add_op_request_access_to_group(
            name="images",
            object_prefix_permissions={'class-read': ['rbd_children']},
            permission='rwx')
        rq.add_op_request_access_to_group(
            name="vms",
            object_prefix_permissions={'class-read': ['rbd_children']},
            permission='rwx')
    return rq
def config_changed():
    if config('prefer-ipv6'):
        status_set('maintenance', 'configuring ipv6')
        assert_charm_supports_ipv6()

    global CONFIGS
    send_remote_restart = False
    if git_install_requested():
        if config_value_changed('openstack-origin-git'):
            status_set('maintenance', 'Running Git install')
            git_install(config('openstack-origin-git'))
    elif not config('action-managed-upgrade'):
        if openstack_upgrade_available('nova-common'):
            status_set('maintenance', 'Running openstack upgrade')
            do_openstack_upgrade(CONFIGS)
            send_remote_restart = True

    sysctl_dict = config('sysctl')
    if sysctl_dict:
        create_sysctl(sysctl_dict, '/etc/sysctl.d/50-nova-compute.conf')

    destroy_libvirt_network('default')

    if migration_enabled() and config('migration-auth-type') == 'ssh':
        # Check-in with nova-c-c and register new ssh key, if it has just been
        # generated.
        status_set('maintenance', 'SSH key exchange')
        initialize_ssh_keys()
        import_authorized_keys()

    if config('enable-resize') is True:
        enable_shell(user='******')
        status_set('maintenance', 'SSH key exchange')
        initialize_ssh_keys(user='******')
        import_authorized_keys(user='******', prefix='nova')
    else:
        disable_shell(user='******')

    if config('instances-path') is not None:
        fp = config('instances-path')
        fix_path_ownership(fp, user='******')

    [compute_joined(rid) for rid in relation_ids('cloud-compute')]
    for rid in relation_ids('zeromq-configuration'):
        zeromq_configuration_relation_joined(rid)

    for rid in relation_ids('neutron-plugin'):
        neutron_plugin_joined(rid, remote_restart=send_remote_restart)

    if is_relation_made("nrpe-external-master"):
        update_nrpe_config()

    if config('hugepages'):
        install_hugepages()

    # Disable smt for ppc64, required for nova/libvirt/kvm
    arch = platform.machine()
    log('CPU architecture: {}'.format(arch))
    if arch in ['ppc64el', 'ppc64le']:
        set_ppc64_cpu_smt_state('off')

    if (config('libvirt-image-backend') == 'rbd'
            and assert_libvirt_rbd_imagebackend_allowed()):
        for rid in relation_ids('ceph'):
            for unit in related_units(rid):
                ceph_changed(rid=rid, unit=unit)

    CONFIGS.write_all()

    NovaComputeAppArmorContext().setup_aa_profile()
    if (network_manager() in ['flatmanager', 'flatdhcpmanager']
            and config('multi-host').lower() == 'yes'):
        NovaAPIAppArmorContext().setup_aa_profile()
        NovaNetworkAppArmorContext().setup_aa_profile()