def config_changed():
    configure_deferred_restarts(deferrable_services())
    if not config('action-managed-upgrade'):
        if openstack_upgrade_available(NEUTRON_COMMON):
            status_set('maintenance', 'Running openstack upgrade')
            do_openstack_upgrade(CONFIGS)

    update_nrpe_config()

    module_settings = config('kernel-modules')
    if module_settings:
        if is_container():
            log("Cannot load modules inside of a container", level=WARNING)
        else:
            for module in module_settings.split():
                try:
                    modprobe(module)
                except Exception:
                    message = "Failed to load kernel module '%s'" % module
                    log(message, level=WARNING)

    sysctl_settings = config('sysctl')
    if sysctl_settings:
        if is_container():
            log("Cannot create sysctls inside of a container", level=WARNING)
        else:
            create_sysctl(sysctl_settings,
                          '/etc/sysctl.d/50-quantum-gateway.conf')

    # Re-run joined hooks as config might have changed
    for r_id in relation_ids('amqp'):
        amqp_joined(relation_id=r_id)
    for r_id in relation_ids('amqp-nova'):
        amqp_nova_joined(relation_id=r_id)
    if valid_plugin():
        CONFIGS.write_all()
        configure_ovs()
        configure_apparmor()
    else:
        message = 'Please provide a valid plugin config'
        log(message, level=ERROR)
        status_set('blocked', message)
        sys.exit(1)
    if config('plugin') == 'n1kv':
        if config('enable-l3-agent'):
            status_set('maintenance', 'Installing apt packages')
            apt_install(filter_installed_packages('neutron-l3-agent'))
        else:
            apt_purge('neutron-l3-agent')

    # Setup legacy ha configurations
    update_legacy_ha_files()
    # Disable nova metadata if possible,
    if disable_nova_metadata():
        remove_legacy_nova_metadata()
    if disable_neutron_lbaas():
        remove_legacy_neutron_lbaas()
    def configure_deferred_restarts(self):
        """Install deferred event files and policies.

        Check that the charm supports deferred events by checking for the
        presence of the 'enable-auto-restarts' config option. If it is present
        then install the supporting files and directories, however,
        configure_deferred_restarts only enables deferred events if
        'enable-auto-restarts' is True.
        """
        if 'enable-auto-restarts' in ch_core.hookenv.config().keys():
            deferred_events.configure_deferred_restarts(
                self.deferable_services)
            # Reactive charms execute perm missing.
            os.chmod(
                '/var/lib/charm/{}/policy-rc.d'.format(
                    ch_core.hookenv.service_name()),
                0o755)
    def test_configure_deferred_restarts(self, install_policy_rcd,
                                         is_restart_permitted,
                                         remove_policy_file, add_policy_block):
        is_restart_permitted.return_value = True
        deferred_events.configure_deferred_restarts(['svcA', 'svcB'])
        remove_policy_file.assert_called_once_with()
        install_policy_rcd.assert_called_once_with()

        remove_policy_file.reset_mock()
        install_policy_rcd.reset_mock()
        is_restart_permitted.return_value = False
        deferred_events.configure_deferred_restarts(['svcA', 'svcB'])
        self.assertFalse(remove_policy_file.called)
        install_policy_rcd.assert_called_once_with()
        add_policy_block.assert_has_calls([
            call('svcA', ['stop', 'restart', 'try-restart']),
            call('svcB', ['stop', 'restart', 'try-restart'])])
예제 #4
0
def config_changed(check_deferred_restarts=True):
    configure_deferred_restarts(deferrable_services())
    # policy_rcd.remove_policy_file()
    # if we are paused, delay doing any config changed hooks.
    # It is forced on the resume.
    allowed, reason = is_hook_allowed(
        'config-changed', check_deferred_restarts=check_deferred_restarts)
    if not allowed:
        log(reason, "WARN")
        return

    install_packages()
    install_tmpfilesd()

    # NOTE(jamespage): purge any packages as a result of py3 switch
    #                  at rocky.
    packages_to_purge = determine_purge_packages()
    request_nova_compute_restart = False
    if packages_to_purge:
        purge_packages(packages_to_purge)
        request_nova_compute_restart = True

    sysctl_settings = config('sysctl')
    if not is_container() and sysctl_settings:
        create_sysctl(sysctl_settings, '/etc/sysctl.d/50-openvswitch.conf')

    # NOTE(fnordahl): It is important to write config to disk and perhaps
    # restart the openvswitch-swith service prior to attempting to do run-time
    # configuration of OVS as we may have to pass options to `ovs-ctl` for
    # `ovs-vswitchd` to run at all. LP: #1906280
    # TODO: make restart_on_change use contextlib.contextmanager
    @restart_on_change({
        cfg: services
        for cfg, services in restart_map().items() if cfg == OVS_DEFAULT
    })
    def _restart_before_runtime_config_when_required():
        CONFIGS.write_all()

    _restart_before_runtime_config_when_required()
    configure_ovs()

    for rid in relation_ids('neutron-plugin'):
        neutron_plugin_joined(relation_id=rid,
                              request_restart=request_nova_compute_restart)
def config_changed(check_deferred_restarts=True):
    """Run config-chaged hook.

    :param check_deferred_events: Whether to check if restarts are
                                  permitted before running hook.
    :type check_deferred_events: bool
    """
    configure_deferred_restarts(rabbit.services())
    allowed, reason = is_hook_allowed(
        'config-changed', check_deferred_restarts=check_deferred_restarts)
    if not allowed:
        log(reason, "WARN")
        return
    # Update hosts with this unit's information
    cluster_ip = ch_ip.get_relation_ip(
        rabbit_net_utils.CLUSTER_INTERFACE,
        cidr_network=config(rabbit_net_utils.CLUSTER_OVERRIDE_CONFIG))
    rabbit.update_hosts_file({cluster_ip: rabbit.get_unit_hostname()})

    # Add archive source if provided and not in the upgrade process
    if not leader_get("cluster_series_upgrading"):
        add_source(config('source'), config('key'))
    # Copy in defaults file for updated ulimits
    shutil.copyfile('templates/rabbitmq-server',
                    '/etc/default/rabbitmq-server')

    # Install packages to ensure any changes to source
    # result in an upgrade if applicable only if we change the 'source'
    # config option
    if rabbit.archive_upgrade_available():
        # Avoid packge upgrade collissions
        # Stopping and attempting to start rabbitmqs at the same time leads to
        # failed restarts
        rabbit.cluster_wait()
        rabbit.install_or_upgrade_packages()

    if config('ssl') == 'off':
        open_port(5672)
        close_port(int(config('ssl_port')))
    elif config('ssl') == 'on':
        open_port(5672)
        open_port(int(config('ssl_port')))
    elif config('ssl') == 'only':
        close_port(5672)
        open_port(int(config('ssl_port')))
    else:
        log("Unknown ssl config value: '%s'" % config('ssl'), level=ERROR)

    chown(RABBIT_DIR, rabbit.RABBIT_USER, rabbit.RABBIT_USER)
    chmod(RABBIT_DIR, 0o775)

    if rabbit.management_plugin_enabled():
        rabbit.enable_plugin(MAN_PLUGIN)
        open_port(rabbit.get_managment_port())
    else:
        rabbit.disable_plugin(MAN_PLUGIN)
        close_port(rabbit.get_managment_port())
        # LY: Close the old managment port since it may have been opened in a
        #     previous version of the charm. close_port is a noop if the port
        #     is not open
        close_port(55672)

    # NOTE(jamespage): If a newer RMQ version is
    # installed and the old style configuration file
    # is still on disk, remove before re-rendering
    # any new configuration.
    if (os.path.exists(rabbit.RABBITMQ_CONFIG)
            and cmp_pkgrevno('rabbitmq-server', '3.7') >= 0):
        os.remove(rabbit.RABBITMQ_CONFIG)

    rabbit.ConfigRenderer(rabbit.CONFIG_FILES()).write_all()

    if is_relation_made("ha"):
        ha_is_active_active = config("ha-vip-only")

        if ha_is_active_active:
            update_nrpe_checks()
        else:
            if is_elected_leader('res_rabbitmq_vip'):
                update_nrpe_checks()
            else:
                log("hacluster relation is present but this node is not active"
                    " skipping update nrpe checks")
    elif is_relation_made('nrpe-external-master'):
        update_nrpe_checks()

    # Only set values if this is the leader
    if not is_leader():
        return

    rabbit.set_all_mirroring_queues(config('mirroring-queues'))

    # Update cluster in case min-cluster-size has changed
    for rid in relation_ids('cluster'):
        for unit in related_units(rid):
            cluster_changed(relation_id=rid, remote_unit=unit)

    # NOTE(jamespage): Workaround until we have a good way
    #                  of generally disabling notifications
    #                  based on which services are deployed.
    if 'openstack' in rabbit.list_vhosts():
        rabbit.configure_notification_ttl('openstack',
                                          config('notification-ttl'))