def cluster_changed():
    CONFIGS.write_all()
    if hookenv.relation_ids('cluster'):
        ch_peerstorage.peer_echo(includes=['dbsync_state'])
        dbsync_state = ch_peerstorage.peer_retrieve('dbsync_state')
        if dbsync_state == 'complete':
            if not ch_utils.is_unit_paused_set():
                for svc in ncc_utils.services():
                    ch_host.service_resume(svc)
            else:
                hookenv.log('Unit is in paused state, not issuing '
                            'start/resume to all services')
        else:
            if not ch_utils.is_unit_paused_set():
                hookenv.log('Database sync not ready. Shutting down services')
                for svc in ncc_utils.services():
                    ch_host.service_pause(svc)
            else:
                hookenv.log(
                    'Database sync not ready. Would shut down services but '
                    'unit is in paused state, not issuing stop/pause to all '
                    'services')
    # The shared metadata secret is stored in the leader-db and if its changed
    # the gateway needs to know.
    for rid in hookenv.relation_ids('quantum-network-service'):
        quantum_joined(rid=rid, remote_restart=False)
def config_changed():
    # if we are paused, delay doing any config changed hooks.
    # It is forced on the resume.
    if is_unit_paused_set():
        log("Unit is pause or upgrading. Skipping config_changed", "WARN")
        return

    # If neutron is ready to be queried then check for incompatability between
    # existing neutron objects and charm settings
    if neutron_ready():
        if l3ha_router_present() and not get_l3ha():
            e = ('Cannot disable Router HA while ha enabled routers exist.'
                 ' Please remove any ha routers')
            status_set('blocked', e)
            raise Exception(e)
        if dvr_router_present() and not get_dvr():
            e = ('Cannot disable dvr while dvr enabled routers exist. Please'
                 ' remove any distributed routers')
            log(e, level=ERROR)
            status_set('blocked', e)
            raise Exception(e)
    if config('prefer-ipv6'):
        status_set('maintenance', 'configuring ipv6')
        setup_ipv6()
        sync_db_with_multi_ipv6_addresses(config('database'),
                                          config('database-user'))

    global CONFIGS
    if not config('action-managed-upgrade'):
        if openstack_upgrade_available('neutron-common'):
            status_set('maintenance', 'Running openstack upgrade')
            do_openstack_upgrade(CONFIGS)

    additional_install_locations(
        config('neutron-plugin'),
        config('openstack-origin')
    )
    status_set('maintenance', 'Installing apt packages')
    apt_install(filter_installed_packages(
                determine_packages(config('openstack-origin'))),
                fatal=True)
    packages_removed = remove_old_packages()
    configure_https()
    update_nrpe_config()
    CONFIGS.write_all()
    if packages_removed and not is_unit_paused_set():
        log("Package purge detected, restarting services", "INFO")
        for s in services():
            service_restart(s)
    for r_id in relation_ids('neutron-api'):
        neutron_api_relation_joined(rid=r_id)
    for r_id in relation_ids('neutron-plugin-api'):
        neutron_plugin_api_relation_joined(rid=r_id)
    for r_id in relation_ids('amqp'):
        amqp_joined(relation_id=r_id)
    for r_id in relation_ids('identity-service'):
        identity_joined(rid=r_id)
    for r_id in relation_ids('ha'):
        ha_joined(relation_id=r_id)
    [cluster_joined(rid) for rid in relation_ids('cluster')]
Ejemplo n.º 3
0
def config_changed():
    # if we are paused, delay doing any config changed hooks.
    # It is forced on the resume.
    if is_unit_paused_set():
        log("Unit is pause or upgrading. Skipping config_changed", "WARN")
        return

    conf = config()

    if conf['prefer-ipv6']:
        status_set('maintenance', 'configuring ipv6')
        setup_ipv6()
        sync_db_with_multi_ipv6_addresses(config('database'),
                                          config('database-user'))

    e_mountpoint = config('ephemeral-unmount')
    if e_mountpoint and filesystem_mounted(e_mountpoint):
        umount(e_mountpoint)

    # configure block devices either local or from juju storage
    _configure_block_devices()

    if not config('action-managed-upgrade'):
        if openstack_upgrade_available('cinder-common'):
            status_set('maintenance', 'Running openstack upgrade')
            do_openstack_upgrade(configs=CONFIGS)
            # NOTE(jamespage) tell any storage-backends we just upgraded
            for rid in relation_ids('storage-backend'):
                relation_set(relation_id=rid, upgrade_nonce=uuid.uuid4())
            # NOTE(hopem) tell any backup-backends we just upgraded
            for rid in relation_ids('backup-backend'):
                relation_set(relation_id=rid, upgrade_nonce=uuid.uuid4())

    # overwrite config is not in conf file. so We can't use restart_on_change
    if config_value_changed('overwrite') and not is_unit_paused_set():
        service_restart('cinder-volume')

    CONFIGS.write_all()
    configure_https()
    update_nrpe_config()
    open_port(config('api-listening-port'))

    for rid in relation_ids('cluster'):
        cluster_joined(relation_id=rid)
    for r_id in relation_ids('ha'):
        ha_joined(relation_id=r_id)
    # NOTE(jamespage):
    # ensure any new volume endpoints are created. Note that this
    # is normally done after an openstack series upgrade, but this
    # was not performed historically so always execute to ensure
    # any upgrades where this step was missed are fixed.
    for rid in relation_ids('identity-service'):
        identity_joined(rid=rid)

    # call the policy overrides handler which will install any policy overrides
    maybe_do_policyd_overrides_on_config_changed(
        os_release('cinder-common'),
        'cinder',
        restart_handler=lambda: service_restart('cinder-api'))
Ejemplo n.º 4
0
def config_changed_postupgrade():
    save_script_rc()
    release = os_release('keystone')
    if run_in_apache(release=release):
        # Need to ensure mod_wsgi is installed and apache2 is reloaded
        # immediatly as charm querys its local keystone before restart
        # decorator can fire
        apt_install(filter_installed_packages(determine_packages()))
        # when deployed from source, init scripts aren't installed
        service_pause('keystone')

        disable_unused_apache_sites()
        if WSGI_KEYSTONE_API_CONF in CONFIGS.templates:
            CONFIGS.write(WSGI_KEYSTONE_API_CONF)
        if not is_unit_paused_set():
            restart_pid_check('apache2')
            stop_manager_instance()

    if enable_memcache(release=release):
        # If charm or OpenStack have been upgraded then the list of required
        # packages may have changed so ensure they are installed.
        apt_install(filter_installed_packages(determine_packages()))

    if is_leader() and fernet_enabled():
        key_setup()
        key_leader_set()

    configure_https()
    open_port(config('service-port'))

    update_nrpe_config()

    CONFIGS.write_all()

    if snap_install_requested() and not is_unit_paused_set():
        service_restart('snap.keystone.*')
        stop_manager_instance()

    if (is_db_initialised() and is_elected_leader(CLUSTER_RES) and not
            is_unit_paused_set()):
        ensure_initial_admin(config)
        if CompareOpenStackReleases(
                os_release('keystone')) >= 'liberty':
            CONFIGS.write(POLICY_JSON)

    update_all_identity_relation_units()
    update_all_domain_backends()
    update_all_fid_backends()

    for r_id in relation_ids('ha'):
        ha_joined(relation_id=r_id)

    notify_middleware_with_release_version()
    inform_peers_if_ready(check_api_unit_ready)
Ejemplo n.º 5
0
def config_changed_postupgrade():
    save_script_rc()
    release = os_release('keystone')
    if run_in_apache(release=release):
        # Need to ensure mod_wsgi is installed and apache2 is reloaded
        # immediatly as charm querys its local keystone before restart
        # decorator can fire
        apt_install(filter_installed_packages(determine_packages()))
        # when deployed from source, init scripts aren't installed
        service_pause('keystone')

        disable_unused_apache_sites()
        if WSGI_KEYSTONE_API_CONF in CONFIGS.templates:
            CONFIGS.write(WSGI_KEYSTONE_API_CONF)
        if not is_unit_paused_set():
            restart_pid_check('apache2')
            stop_manager_instance()

    if enable_memcache(release=release):
        # If charm or OpenStack have been upgraded then the list of required
        # packages may have changed so ensure they are installed.
        apt_install(filter_installed_packages(determine_packages()))

    if is_leader() and fernet_enabled():
        key_setup()
        key_leader_set()

    configure_https()
    open_port(config('service-port'))

    update_nrpe_config()

    CONFIGS.write_all()

    if snap_install_requested() and not is_unit_paused_set():
        service_restart('snap.keystone.*')
        stop_manager_instance()

    if (is_db_initialised() and is_elected_leader(CLUSTER_RES) and not
            is_unit_paused_set()):
        ensure_initial_admin(config)
        if CompareOpenStackReleases(
                os_release('keystone')) >= 'liberty':
            CONFIGS.write(POLICY_JSON)

    update_all_identity_relation_units()
    update_all_domain_backends()
    update_all_fid_backends()

    for r_id in relation_ids('ha'):
        ha_joined(relation_id=r_id)

    notify_middleware_with_release_version()
Ejemplo n.º 6
0
def config_changed_postupgrade():
    # Ensure ssl dir exists and is unison-accessible
    ensure_ssl_dir()

    check_call(["chmod", "-R", "g+wrx", "/var/lib/keystone/"])

    ensure_ssl_dirs()

    save_script_rc()
    if run_in_apache():
        # Need to ensure mod_wsgi is installed and apache2 is reloaded
        # immediatly as charm querys its local keystone before restart
        # decorator can fire
        apt_install(filter_installed_packages(determine_packages()))
        # when deployed from source, init scripts aren't installed
        if not git_install_requested():
            service_pause("keystone")
        CONFIGS.write(WSGI_KEYSTONE_API_CONF)
        if not is_unit_paused_set():
            restart_pid_check("apache2")
    configure_https()
    open_port(config("service-port"))

    update_nrpe_config()
    CONFIGS.write_all()

    initialise_pki()

    update_all_identity_relation_units()

    # Ensure sync request is sent out (needed for any/all ssl change)
    send_ssl_sync_request()

    for r_id in relation_ids("ha"):
        ha_joined(relation_id=r_id)
def ceph_changed(rid=None, unit=None):
    if 'ceph' not in CONFIGS.complete_contexts():
        log('ceph relation incomplete. Peer not ready?')
        return

    if not ensure_ceph_keyring(service=service_name(), user='******',
                               group='nova'):
        log('Could not create ceph keyring: peer not ready?')
        return

    CONFIGS.write(ceph_config_file())
    CONFIGS.write(CEPH_SECRET)
    CONFIGS.write(NOVA_CONF)

    # With some refactoring, this can move into NovaComputeCephContext
    # and allow easily extended to support other compute flavors.
    key = relation_get(attribute='key', rid=rid, unit=unit)
    if config('virt-type') in ['kvm', 'qemu', 'lxc'] and key:
        create_libvirt_secret(secret_file=CEPH_SECRET,
                              secret_uuid=CEPH_SECRET_UUID, key=key)

    if is_request_complete(get_ceph_request()):
        log('Request complete')
        # Ensure that nova-compute is restarted since only now can we
        # guarantee that ceph resources are ready, but only if not paused.
        if not is_unit_paused_set():
            service_restart('nova-compute')
    else:
        send_request_if_needed(get_ceph_request())
Ejemplo n.º 8
0
def domain_backend_changed(relation_id=None, unit=None):
    if get_api_version() < 3:
        log('Domain specific backend identity configuration only supported '
            'with Keystone v3 API, skipping domain creation and '
            'restart.')
        return

    domain_name = relation_get(attribute='domain-name',
                               unit=unit,
                               rid=relation_id)
    if domain_name:
        # NOTE(jamespage): Only create domain data from lead
        #                  unit when clustered and database
        #                  is configured and created.
        if is_leader() and is_db_ready() and is_db_initialised():
            create_or_show_domain(domain_name)
        # NOTE(jamespage): Deployment may have multiple domains,
        #                  with different identity backends so
        #                  ensure that a domain specific nonce
        #                  is checked for restarts of keystone
        restart_nonce = relation_get(attribute='restart-nonce',
                                     unit=unit,
                                     rid=relation_id)
        domain_nonce_key = 'domain-restart-nonce-{}'.format(domain_name)
        db = unitdata.kv()
        if restart_nonce != db.get(domain_nonce_key):
            if not is_unit_paused_set():
                service_restart(keystone_service())
            db.set(domain_nonce_key, restart_nonce)
            db.flush()
Ejemplo n.º 9
0
def update_all_identity_relation_units(check_db_ready=True):
    if is_unit_paused_set():
        return
    if check_db_ready and not is_db_ready():
        log('Allowed_units list provided and this unit not present',
            level=INFO)
        return

    if not is_db_initialised():
        log("Database not yet initialised - deferring identity-relation "
            "updates", level=INFO)
        return
    if not is_expected_scale():
        log("Keystone charm and it's dependencies not yet at expected scale "
            "- deferring identity-relation updates", level=INFO)
        return

    log('Firing identity_changed hook for all related services.')
    for rid in relation_ids('identity-service'):
        for unit in related_units(rid):
            identity_changed(relation_id=rid, remote_unit=unit)
    log('Firing admin_relation_changed hook for all related services.')
    for rid in relation_ids('identity-admin'):
        admin_relation_changed(rid)
    log('Firing identity_credentials_changed hook for all related services.')
    for rid in relation_ids('identity-credentials'):
        for unit in related_units(rid):
            identity_credentials_changed(relation_id=rid, remote_unit=unit)
Ejemplo n.º 10
0
def check_local_db_actions_complete():
    """Check if we have received db init'd notification and restart services
    if we have not already.

    NOTE: this must only be called from peer relation context.
    """
    # leader must not respond to notifications
    if is_leader() or not is_db_initialised():
        return

    settings = relation_get() or {}
    if settings:
        init_id = settings.get(NEUTRON_DB_INIT_RKEY)
        echoed_init_id = relation_get(unit=local_unit(),
                                      attribute=NEUTRON_DB_INIT_ECHO_RKEY)

        # If we have received an init notification from a peer unit
        # (assumed to be the leader) then restart neutron-api and echo the
        # notification and don't restart again unless we receive a new
        # (different) notification.
        if is_new_dbinit_notification(init_id, echoed_init_id):
            if not is_unit_paused_set():
                log("Restarting neutron services following db "
                    "initialisation", level=DEBUG)
                service_restart('neutron-server')

            # Echo notification and ensure init key unset since we are not
            # leader anymore.
            relation_set(**{NEUTRON_DB_INIT_ECHO_RKEY: init_id,
                            NEUTRON_DB_INIT_RKEY: None})
def install():
    hookenv.status_set('maintenance', 'Executing pre-install')
    execd.execd_preinstall()
    ch_utils.configure_installation_source(hookenv.config('openstack-origin'))

    hookenv.status_set('maintenance', 'Installing apt packages')
    ch_fetch.apt_update()
    ch_fetch.apt_install(ncc_utils.determine_packages(), fatal=True)

    ncc_utils.disable_package_apache_site()
    ncc_utils.stop_deprecated_services()

    _files = os.path.join(hookenv.charm_dir(), 'files')
    if os.path.isdir(_files):
        for f in os.listdir(_files):
            f = os.path.join(_files, f)
            if os.path.isfile(f):
                hookenv.log('Installing %s to /usr/bin' % f)
                shutil.copy2(f, '/usr/bin')
    for port in ncc_utils.determine_ports():
        hookenv.open_port(port)
    msg = 'Disabling services into db relation joined'
    hookenv.log(msg)
    hookenv.status_set('maintenance', msg)
    if not ch_utils.is_unit_paused_set():
        for svc in ncc_utils.services():
            ch_host.service_pause(svc)
    else:
        hookenv.log('Unit is in paused state, not issuing stop/pause '
                    'to all services')
def check_local_db_actions_complete():
    """Check if we have received db init'd notification and restart services
    if we have not already.

    NOTE: this must only be called from peer relation context.
    """
    if not is_db_initialised():
        return

    settings = relation_get() or {}
    if settings:
        init_id = settings.get(NEUTRON_DB_INIT_RKEY)
        echoed_init_id = relation_get(unit=local_unit(),
                                      attribute=NEUTRON_DB_INIT_ECHO_RKEY)

        # If we have received an init notification from a peer unit
        # (assumed to be the leader) then restart neutron-api and echo the
        # notification and don't restart again unless we receive a new
        # (different) notification.
        if is_new_dbinit_notification(init_id, echoed_init_id):
            if not is_unit_paused_set():
                log("Restarting neutron services following db "
                    "initialisation", level=DEBUG)
                service_restart('neutron-server')

            # Echo notification
            relation_set(**{NEUTRON_DB_INIT_ECHO_RKEY: init_id})
def nm_changed():
    CONFIGS.write_all()
    if relation_get('ca_cert'):
        ca_crt = b64decode(relation_get('ca_cert'))
        install_ca_cert(ca_crt)

    if config('ha-legacy-mode'):
        cache_env_data()

    # Disable nova metadata if possible,
    if disable_nova_metadata():
        remove_legacy_nova_metadata()
    else:
        # NOTE: nova-api-metadata needs to be restarted
        #       once the nova-conductor is up and running
        #       on the nova-cc units.
        restart_nonce = relation_get('restart_trigger')
        if restart_nonce is not None:
            db = kv()
            previous_nonce = db.get('restart_nonce')
            if previous_nonce != restart_nonce:
                if not is_unit_paused_set():
                    service_restart('nova-api-metadata')
                db.set('restart_nonce', restart_nonce)
                db.flush()
def upgrade_charm():
    apt_install(filter_installed_packages(determine_packages()),
                fatal=True)
    # NOTE: ensure psutil install for hugepages configuration
    status_set('maintenance', 'Installing apt packages')
    apt_install(filter_installed_packages(['python-psutil']))
    packages_removed = remove_old_packages()
    if packages_removed and not is_unit_paused_set():
        log("Package purge detected, restarting services", "INFO")
        for s in services():
            service_restart(s)

    for r_id in relation_ids('amqp'):
        amqp_joined(relation_id=r_id)

    if is_relation_made('nrpe-external-master'):
        update_nrpe_config()

    # Fix previously wrongly created path permissions
    # LP: https://bugs.launchpad.net/charm-cinder-ceph/+bug/1779676
    asok_path = '/var/run/ceph/'
    gid = grp.getgrnam("kvm").gr_gid
    if gid and os.path.isdir(asok_path) and gid != os.stat(asok_path).st_gid:
        log("{} not owned by group 'kvm', fixing permissions."
            .format(asok_path))
        shutil.chown(asok_path, group='kvm')
def config_changed():
    # if we are paused, delay doing any config changed hooks.
    # It is forced on the resume.
    if is_unit_paused_set():
        log("Unit is pause or upgrading. Skipping config_changed", "WARN")
        return

    install_packages()
    install_tmpfilesd()

    # NOTE(jamespage): purge any packages as a result of py3 switch
    #                  at rocky.
    packages_to_purge = determine_purge_packages()
    request_nova_compute_restart = False
    if packages_to_purge:
        purge_packages(packages_to_purge)
        request_nova_compute_restart = True

    sysctl_settings = config('sysctl')
    if not is_container() and sysctl_settings:
        create_sysctl(sysctl_settings,
                      '/etc/sysctl.d/50-openvswitch.conf')

    configure_ovs()
    CONFIGS.write_all()
    # NOTE(fnordahl): configure_sriov must be run after CONFIGS.write_all()
    # to allow us to enable boot time execution of init script
    configure_sriov()
    for rid in relation_ids('neutron-plugin'):
        neutron_plugin_joined(
            relation_id=rid,
            request_restart=request_nova_compute_restart)
Ejemplo n.º 16
0
def series_upgrade_prepare():
    set_unit_upgrading()
    # HA services are shutdown when the unit receives series upgrade
    # notifications from peers so cannot pause services.
    if not is_unit_paused_set() and not is_waiting_unit_series_upgrade_set():
        pause_unit()
    notify_peers_of_series_upgrade()
def upgrade():

    if is_leader():
        if is_unit_paused_set():
            log('Unit is paused, skiping upgrade', level=INFO)
            return

        # broadcast the bootstrap-uuid
        wsrep_ready = get_wsrep_value('wsrep_ready') or ""
        if wsrep_ready.lower() in ['on', 'ready']:
            cluster_state_uuid = get_wsrep_value('wsrep_cluster_state_uuid')
            if cluster_state_uuid:
                mark_seeded()
                notify_bootstrapped(cluster_uuid=cluster_state_uuid)
    else:
        # Ensure all the peers have the bootstrap-uuid attribute set
        # as this is all happening during the upgrade-charm hook is reasonable
        # to expect the cluster is running.

        # Wait until the leader has set the
        try:
            update_bootstrap_uuid()
        except LeaderNoBootstrapUUIDError:
            status_set('waiting', "Waiting for bootstrap-uuid set by leader")

    config_changed()
Ejemplo n.º 18
0
def update_all_identity_relation_units(check_db_ready=True):
    CONFIGS.write_all()
    if is_unit_paused_set():
        return
    if check_db_ready and not is_db_ready():
        log('Allowed_units list provided and this unit not present',
            level=INFO)
        return

    if not is_db_initialised():
        log("Database not yet initialised - deferring identity-relation "
            "updates", level=INFO)
        return

    if is_elected_leader(CLUSTER_RES):
        ensure_initial_admin(config)

    log('Firing identity_changed hook for all related services.')
    for rid in relation_ids('identity-service'):
        for unit in related_units(rid):
            identity_changed(relation_id=rid, remote_unit=unit)
    log('Firing admin_relation_changed hook for all related services.')
    for rid in relation_ids('identity-admin'):
        admin_relation_changed(rid)
    log('Firing identity_credentials_changed hook for all related services.')
    for rid in relation_ids('identity-credentials'):
        for unit in related_units(rid):
            identity_credentials_changed(relation_id=rid, remote_unit=unit)
Ejemplo n.º 19
0
def do_openstack_upgrade(configs):
    """Perform an upgrade of glance.  Takes care of upgrading
    packages, rewriting configs + database migration and potentially
    any other post-upgrade actions.

    :param configs: The charms main OSConfigRenderer object.

    """
    new_src = config('openstack-origin')
    new_os_rel = get_os_codename_install_source(new_src)

    log('Performing OpenStack upgrade to %s.' % (new_os_rel))

    configure_installation_source(new_src)
    dpkg_opts = [
        '--option', 'Dpkg::Options::=--force-confnew',
        '--option', 'Dpkg::Options::=--force-confdef',
    ]
    apt_update()
    apt_upgrade(options=dpkg_opts, fatal=True, dist=True)
    apt_install(determine_packages(), fatal=True)

    # set CONFIGS to load templates from new release and regenerate config
    configs.set_release(openstack_release=new_os_rel)
    configs.write_all()

    [service_stop(s) for s in services()]
    if is_elected_leader(CLUSTER_RES):
        migrate_database()
    # Don't start services if the unit is supposed to be paused.
    if not is_unit_paused_set():
        [service_start(s) for s in services()]
Ejemplo n.º 20
0
def update_all_identity_relation_units(check_db_ready=True):
    if is_unit_paused_set():
        return
    if check_db_ready and not is_db_ready():
        log('Allowed_units list provided and this unit not present',
            level=INFO)
        return

    if not is_db_initialised():
        log("Database not yet initialised - deferring identity-relation "
            "updates", level=INFO)
        return
    if not is_expected_scale():
        log("Keystone charm and it's dependencies not yet at expected scale "
            "- deferring identity-relation updates", level=INFO)
        return

    log('Firing identity_changed hook for all related services.')
    for rid in relation_ids('identity-service'):
        for unit in related_units(rid):
            identity_changed(relation_id=rid, remote_unit=unit)
    log('Firing admin_relation_changed hook for all related services.')
    for rid in relation_ids('identity-admin'):
        admin_relation_changed(rid)
    log('Firing identity_credentials_changed hook for all related services.')
    for rid in relation_ids('identity-credentials'):
        for unit in related_units(rid):
            identity_credentials_changed(relation_id=rid, remote_unit=unit)
def config_changed():
    # if we are paused, delay doing any config changed hooks.  It is forced on
    # the resume.
    if is_unit_paused_set():
        return

    if config('prefer-ipv6'):
        assert_charm_supports_ipv6()

    hosts = get_cluster_hosts()
    clustered = len(hosts) > 1
    bootstrapped = is_bootstrapped()

    # NOTE: only configure the cluster if we have sufficient peers. This only
    # applies if min-cluster-size is provided and is used to avoid extraneous
    # configuration changes and premature bootstrapping as the cluster is
    # deployed.
    if is_sufficient_peers():
        try:
            # NOTE(jamespage): try with leadership election
            if is_leader():
                log("Leader unit - bootstrap required=%s" % (not bootstrapped),
                    DEBUG)
                render_config_restart_on_changed(clustered, hosts,
                                                 bootstrap=not bootstrapped)
            elif bootstrapped:
                log("Cluster is bootstrapped - configuring mysql on this node",
                    DEBUG)
                render_config_restart_on_changed(clustered, hosts)
            else:
                log("Not configuring", DEBUG)

        except NotImplementedError:
            # NOTE(jamespage): fallback to legacy behaviour.
            oldest = oldest_peer(peer_units())
            if oldest:
                log("Leader unit - bootstrap required=%s" % (not bootstrapped),
                    DEBUG)
                render_config_restart_on_changed(clustered, hosts,
                                                 bootstrap=not bootstrapped)
            elif bootstrapped:
                log("Cluster is bootstrapped - configuring mysql on this node",
                    DEBUG)
                render_config_restart_on_changed(clustered, hosts)
            else:
                log("Not configuring", DEBUG)

    # Notify any changes to the access network
    update_shared_db_rels()

    # (re)install pcmkr agent
    install_mysql_ocf()

    if relation_ids('ha'):
        # make sure all the HA resources are (re)created
        ha_relation_joined()

    if is_relation_made('nrpe-external-master'):
        update_nrpe_config()
def lxc_changed():
    nonce = relation_get('nonce')
    db = kv()
    if nonce and db.get('lxd-nonce') != nonce:
        db.set('lxd-nonce', nonce)
        configure_lxd(user='******')
        if not is_unit_paused_set():
            service_restart('nova-compute')
def ceph_joined():
    pkgs = filter_installed_packages(['ceph-common'])
    if pkgs:
        status_set('maintenance', 'Installing ceph-common package')
        apt_install(pkgs, fatal=True)
        # Bug 1427660
        if not is_unit_paused_set() and config('virt-type') in LIBVIRT_TYPES:
            service_restart(libvirt_daemon())
Ejemplo n.º 24
0
def git_post_install(projects_yaml):
    """Perform post-install setup."""
    http_proxy = git_yaml_value(projects_yaml, 'http_proxy')
    if http_proxy:
        pip_install('mysql-python', proxy=http_proxy,
                    venv=git_pip_venv_dir(projects_yaml))
    else:
        pip_install('mysql-python',
                    venv=git_pip_venv_dir(projects_yaml))

    src_etc = os.path.join(git_src_dir(projects_yaml, 'neutron'), 'etc')
    configs = [
        {'src': src_etc,
         'dest': '/etc/neutron'},
        {'src': os.path.join(src_etc, 'neutron/plugins'),
         'dest': '/etc/neutron/plugins'},
        {'src': os.path.join(src_etc, 'neutron/rootwrap.d'),
         'dest': '/etc/neutron/rootwrap.d'},
    ]

    for c in configs:
        if os.path.exists(c['dest']):
            shutil.rmtree(c['dest'])
        shutil.copytree(c['src'], c['dest'])

    # NOTE(coreycb): Need to find better solution than bin symlinks.
    symlinks = [
        {'src': os.path.join(git_pip_venv_dir(projects_yaml),
                             'bin/neutron-rootwrap'),
         'link': '/usr/local/bin/neutron-rootwrap'},
        {'src': os.path.join(git_pip_venv_dir(projects_yaml),
                             'bin/neutron-db-manage'),
         'link': '/usr/local/bin/neutron-db-manage'},
    ]

    for s in symlinks:
        if os.path.lexists(s['link']):
            os.remove(s['link'])
        os.symlink(s['src'], s['link'])

    render('git/neutron_sudoers', '/etc/sudoers.d/neutron_sudoers', {},
           perms=0o440)

    bin_dir = os.path.join(git_pip_venv_dir(projects_yaml), 'bin')
    neutron_api_context = {
        'service_description': 'Neutron API server',
        'charm_name': 'neutron-api',
        'process_name': 'neutron-server',
        'executable_name': os.path.join(bin_dir, 'neutron-server'),
    }

    # NOTE(coreycb): Needs systemd support
    render('git/upstart/neutron-server.upstart',
           '/etc/init/neutron-server.conf',
           neutron_api_context, perms=0o644)

    if not is_unit_paused_set():
        service_restart('neutron-server')
def _do_openstack_upgrade(new_src):
    enable_policy_rcd()
    cur_os_rel = os_release('nova-common')
    new_os_rel = get_os_codename_install_source(new_src)
    log('Performing OpenStack upgrade to %s.' % (new_os_rel))

    configure_installation_source(new_src)
    dpkg_opts = [
        '--option', 'Dpkg::Options::=--force-confnew',
        '--option', 'Dpkg::Options::=--force-confdef',
    ]

    # NOTE(jamespage) pre-stamp neutron database before upgrade from grizzly
    if cur_os_rel == 'grizzly':
        neutron_db_manage(['stamp', 'grizzly'])

    apt_update(fatal=True)
    apt_upgrade(options=dpkg_opts, fatal=True, dist=True)
    apt_install(determine_packages(), fatal=True)

    disable_policy_rcd()

    if cur_os_rel == 'grizzly':
        # NOTE(jamespage) when upgrading from grizzly->havana, config
        # files need to be generated prior to performing the db upgrade
        reset_os_release()
        configs = register_configs(release=new_os_rel)
        configs.write_all()
        neutron_db_manage(['upgrade', 'head'])
    else:
        if new_os_rel < 'kilo':
            neutron_db_manage(['stamp', cur_os_rel])
            migrate_neutron_database()
        # NOTE(jamespage) upgrade with existing config files as the
        # havana->icehouse migration enables new service_plugins which
        # create issues with db upgrades
        reset_os_release()
        configs = register_configs(release=new_os_rel)
        configs.write_all()

    if new_os_rel >= 'mitaka' and not database_setup(prefix='novaapi'):
        # NOTE: Defer service restarts and database migrations for now
        #       as nova_api database is not yet created
        if (relation_ids('cluster') and
                is_elected_leader(CLUSTER_RES)):
            # NOTE: reset dbsync state so that migration will complete
            #       when the nova_api database is setup.
            peer_store('dbsync_state', None)
        return configs

    if is_elected_leader(CLUSTER_RES):
        status_set('maintenance', 'Running nova db migration')
        migrate_nova_database()
    if not is_unit_paused_set():
        [service_start(s) for s in services()]

    return configs
Ejemplo n.º 26
0
def assess_status():
    """Assess status of current unit"""
    # check to see if the unit is paused.
    application_version_set(get_upstream_version(VERSION_PACKAGE))
    if is_unit_upgrading_set():
        status_set("blocked",
                   "Ready for do-release-upgrade and reboot. "
                   "Set complete when finished.")
        return
    if is_unit_paused_set():
        status_set('maintenance',
                   "Paused. Use 'resume' action to resume normal service.")
        return
    # Check for mon relation
    if len(relation_ids('mon')) < 1:
        status_set('blocked', 'Missing relation: monitor')
        return

    # Check for monitors with presented addresses
    # Check for bootstrap key presentation
    monitors = get_mon_hosts()
    if len(monitors) < 1 or not get_conf('osd_bootstrap_key'):
        status_set('waiting', 'Incomplete relation: monitor')
        return

    # Check for vault
    if use_vaultlocker():
        if not relation_ids('secrets-storage'):
            status_set('blocked', 'Missing relation: vault')
            return
        if not vaultlocker.vault_relation_complete():
            status_set('waiting', 'Incomplete relation: vault')
            return

    # Check for OSD device creation parity i.e. at least some devices
    # must have been presented and used for this charm to be operational
    (prev_status, prev_message) = status_get()
    running_osds = ceph.get_running_osds()
    if not prev_message.startswith('Non-pristine'):
        if not running_osds:
            status_set('blocked',
                       'No block devices detected using current configuration')
        else:
            status_set('active',
                       'Unit is ready ({} OSD)'.format(len(running_osds)))
    else:
        pristine = True
        osd_journals = get_journal_devices()
        for dev in list(set(ceph.unmounted_disks()) - set(osd_journals)):
            if (not ceph.is_active_bluestore_device(dev) and
                    not ceph.is_pristine_disk(dev)):
                pristine = False
                break
        if pristine:
            status_set('active',
                       'Unit is ready ({} OSD)'.format(len(running_osds)))
Ejemplo n.º 27
0
def force_etcd_restart():
    '''
    If etcd has been reconfigured we need to force it to fully restart.
    This is necessary because etcd has some config flags that it ignores
    after the first time it starts, so we need to make it forget them.
    '''
    service_stop('etcd')
    for directory in glob.glob('/var/lib/etcd/*'):
        shutil.rmtree(directory)
    if not is_unit_paused_set():
        service_start('etcd')
def config_changed():
    # if we are paused, delay doing any config changed hooks.
    # It is forced on the resume.
    if is_unit_paused_set():
        log("Unit is pause or upgrading. Skipping config_changed", "WARN")
        return

    apt_install(filter_installed_packages(get_packages()), fatal=True)
    if is_relation_made('nrpe-external-master'):
        update_nrpe_config()
    CONFIGS.write_all()
def db_departed():
    CONFIGS.write_all()
    update_cell_db_if_ready(skip_acl_check=True)
    for r_id in hookenv.relation_ids('cluster'):
        hookenv.relation_set(relation_id=r_id, dbsync_state='incomplete')
    if not ch_utils.is_unit_paused_set():
        for svc in ncc_utils.services():
            ch_host.service_pause(svc)
    else:
        hookenv.log('Unit is in paused state, not issuing stop/pause to all '
                    'services')
def series_upgrade_prepare():
    set_unit_upgrading()
    if not is_unit_paused_set():
        log("Pausing unit for series upgrade.")
        rabbit.pause_unit_helper(rabbit.ConfigRenderer(rabbit.CONFIG_FILES))
    if is_leader():
        if not leader_get('cluster_series_upgrading'):
            # Inform the entire cluster a series upgrade is occurring.
            # Run the complete-cluster-series-upgrade action on the leader to
            # clear this setting when the full cluster has completed its
            # upgrade.
            leader_set(cluster_series_upgrading=True)
Ejemplo n.º 31
0
def client_node_is_ready():
    """Determine if the leader node has set amqp client data

    @returns boolean
    """
    # Bail if this unit is paused
    if is_unit_paused_set():
        return False
    for rid in relation_ids('amqp'):
        if leader_get(attribute='{}_password'.format(rid)):
            return True
    return False
def cmd_all_services(cmd):
    if is_unit_paused_set():
        log('Unit is in paused state, not issuing {} to all'
            'services'.format(cmd))
        return
    if cmd == 'start':
        for svc in services():
            if not service_running(svc):
                service_start(svc)
    else:
        for svc in services():
            service(cmd, svc)
Ejemplo n.º 33
0
def config_changed():
    conf = config()

    if conf['prefer-ipv6']:
        status_set('maintenance', 'configuring ipv6')
        setup_ipv6()
        sync_db_with_multi_ipv6_addresses(config('database'),
                                          config('database-user'))

    e_mountpoint = config('ephemeral-unmount')
    if e_mountpoint and filesystem_mounted(e_mountpoint):
        umount(e_mountpoint)

    if (service_enabled('volume') and
            conf['block-device'] not in [None, 'None', 'none']):
        status_set('maintenance', 'Configuring lvm storage')
        block_devices = conf['block-device'].split()
        configure_lvm_storage(block_devices,
                              conf['volume-group'],
                              conf['overwrite'] in ['true', 'True', True],
                              conf['remove-missing'],
                              conf['remove-missing-force'])

    if git_install_requested():
        if config_value_changed('openstack-origin-git'):
            status_set('maintenance', 'Running Git install')
            git_install(config('openstack-origin-git'))
    elif not config('action-managed-upgrade'):
        if openstack_upgrade_available('cinder-common'):
            status_set('maintenance', 'Running openstack upgrade')
            do_openstack_upgrade(configs=CONFIGS)
            # NOTE(jamespage) tell any storage-backends we just upgraded
            for rid in relation_ids('storage-backend'):
                relation_set(relation_id=rid,
                             upgrade_nonce=uuid.uuid4())
            # NOTE(hopem) tell any backup-backends we just upgraded
            for rid in relation_ids('backup-backend'):
                relation_set(relation_id=rid,
                             upgrade_nonce=uuid.uuid4())

    # overwrite config is not in conf file. so We can't use restart_on_change
    if config_value_changed('overwrite') and not is_unit_paused_set():
        service_restart('cinder-volume')

    CONFIGS.write_all()
    configure_https()
    update_nrpe_config()
    open_port(config('api-listening-port'))

    for rid in relation_ids('cluster'):
        cluster_joined(relation_id=rid)
    for r_id in relation_ids('ha'):
        ha_joined(relation_id=r_id)
Ejemplo n.º 34
0
    def _mon_relation():
        key_name = 'rgw.{}'.format(socket.gethostname())
        if request_per_unit_key():
            relation_set(relation_id=rid,
                         key_name=key_name)
        # NOTE: prefer zone name if in use over pool-prefix.
        rq = ceph.get_create_rgw_pools_rq(
            prefix=config('zone') or config('pool-prefix'))
        if is_request_complete(rq, relation='mon'):
            log('Broker request complete', level=DEBUG)
            CONFIGS.write_all()
            # New style per unit keys
            key = relation_get(attribute='{}_key'.format(key_name),
                               rid=rid, unit=unit)
            if not key:
                # Fallback to old style global key
                key = relation_get(attribute='radosgw_key',
                                   rid=rid, unit=unit)
                key_name = None

            if key:
                new_keyring = ceph.import_radosgw_key(key,
                                                      name=key_name)
                # NOTE(jamespage):
                # Deal with switch from radosgw init script to
                # systemd named units for radosgw instances by
                # stopping and disabling the radosgw unit
                if systemd_based_radosgw():
                    service_stop('radosgw')
                    service('disable', 'radosgw')
                    # Update the nrpe config. If we wait for the below
                    # to be called elsewhere, there exists a period
                    # where nagios will report the radosgw service as
                    # down, and also not be monitoring the per
                    # host services.
                    update_nrpe_config(checks_to_remove=['radosgw'])

                service('enable', service_name())
                # NOTE(jamespage):
                # Multi-site deployments need to defer restart as the
                # zone is not created until the master relation is
                # joined; restarting here will cause a restart burst
                # in systemd and stop the process restarting once
                # zone configuration is complete.
                if (not is_unit_paused_set() and
                        new_keyring and
                        not multisite_deployment()):
                    service_restart(service_name())

            process_multisite_relations()
        else:
            send_request_if_needed(rq, relation='mon')
def upgrade_charm():
    install()
    packages_removed = remove_old_packages()
    if packages_removed and not is_unit_paused_set():
        log("Package purge detected, restarting services", "INFO")
        for s in services():
            service_restart(s)
    config_changed()
    update_legacy_ha_files(force=True)

    # Install systemd overrides to remove service startup race between
    # n-gateway and n-cloud-controller services.
    install_systemd_override()
def upgrade_charm():
    apt_install(
        filter_installed_packages(get_packages()),
        fatal=True)
    packages_removed = remove_old_packages()
    if packages_removed and not is_unit_paused_set():
        log("Package purge detected, restarting services", "INFO")
        for s in services():
            service_restart(s)
    # NOTE(jamespage): Ensure any changes to nova presented data are made
    #                  during charm upgrades.
    for rid in relation_ids('nova-ceilometer'):
        nova_ceilometer_joined(rid)