def _do_openstack_upgrade(new_src):
    enable_policy_rcd()
    cur_os_rel = os_release('nova-common')
    new_os_rel = get_os_codename_install_source(new_src)
    log('Performing OpenStack upgrade to %s.' % (new_os_rel))

    configure_installation_source(new_src)
    dpkg_opts = [
        '--option', 'Dpkg::Options::=--force-confnew',
        '--option', 'Dpkg::Options::=--force-confdef',
    ]

    # NOTE(jamespage) pre-stamp neutron database before upgrade from grizzly
    if cur_os_rel == 'grizzly':
        neutron_db_manage(['stamp', 'grizzly'])

    apt_update(fatal=True)
    apt_upgrade(options=dpkg_opts, fatal=True, dist=True)
    apt_install(determine_packages(), fatal=True)

    disable_policy_rcd()

    if cur_os_rel == 'grizzly':
        # NOTE(jamespage) when upgrading from grizzly->havana, config
        # files need to be generated prior to performing the db upgrade
        reset_os_release()
        configs = register_configs(release=new_os_rel)
        configs.write_all()
        neutron_db_manage(['upgrade', 'head'])
    else:
        if new_os_rel < 'kilo':
            neutron_db_manage(['stamp', cur_os_rel])
            migrate_neutron_database()
        # NOTE(jamespage) upgrade with existing config files as the
        # havana->icehouse migration enables new service_plugins which
        # create issues with db upgrades
        reset_os_release()
        configs = register_configs(release=new_os_rel)
        configs.write_all()

    if new_os_rel >= 'mitaka' and not database_setup(prefix='novaapi'):
        # NOTE: Defer service restarts and database migrations for now
        #       as nova_api database is not yet created
        if (relation_ids('cluster') and
                is_elected_leader(CLUSTER_RES)):
            # NOTE: reset dbsync state so that migration will complete
            #       when the nova_api database is setup.
            peer_store('dbsync_state', None)
        return configs

    if is_elected_leader(CLUSTER_RES):
        status_set('maintenance', 'Running nova db migration')
        migrate_nova_database()
    if not is_unit_paused_set():
        [service_start(s) for s in services()]

    return configs
def config_changed():
    if is_elected_leader(SWIFT_HA_RES):
        log("Leader established, generating ring builders", level=INFO)
        # initialize new storage rings.
        for ring, path in SWIFT_RINGS.items():
            if not os.path.exists(path):
                initialize_ring(path, config('partition-power'),
                                determine_replicas(ring), config('min-hours'))

    if config('prefer-ipv6'):
        status_set('maintenance', 'Configuring ipv6')
        setup_ipv6()

    configure_https()
    open_port(config('bind-port'))
    update_nrpe_config()

    # Determine whether or not we should do an upgrade.
    if not config('action-managed-upgrade') and \
            openstack.openstack_upgrade_available('swift'):
        do_openstack_upgrade(CONFIGS)
        status_set('maintenance', 'Running openstack upgrade')

    if not leader_get('swift-proxy-rings-consumer'):
        status_set('maintenance', 'Updating and (maybe) balancing rings')
        update_rings(min_part_hours=config('min-hours'),
                     replicas=config('replicas'))

    if not config('disable-ring-balance') and is_elected_leader(SWIFT_HA_RES):
        # Try ring balance. If rings are balanced, no sync will occur.
        balance_rings()

    for r_id in relation_ids('identity-service'):
        keystone_joined(relid=r_id)

    for r_id in relation_ids('cluster'):
        cluster_joined(relation_id=r_id)

    for r_id in relation_ids('object-store'):
        object_store_joined(relation_id=r_id)

    for r_id in relation_ids('amqp'):
        amqp_joined(relation_id=r_id)

    for r_id in relation_ids('ha'):
        ha_relation_joined(relation_id=r_id)

    try_initialize_swauth()

    # call the policy overrides handler which will install any policy overrides
    policyd.maybe_do_policyd_overrides(openstack.os_release('swift-proxy'),
                                       'swift')
Ejemplo n.º 3
0
def upgrade_charm():
    packages_to_install = filter_installed_packages(determine_packages())
    if packages_to_install:
        log('Installing apt packages')
        status_set('maintenance', 'Installing apt packages')
        apt_install(packages_to_install)
    packages_removed = remove_old_packages()

    if run_in_apache():
        disable_unused_apache_sites()

    log('Regenerating configuration files')
    status_set('maintenance', 'Regenerating configuration files')
    CONFIGS.write_all()

    # See LP bug 1519035
    leader_init_db_if_ready()

    update_nrpe_config()

    if packages_removed:
        status_set('maintenance', 'Restarting services')
        log("Package purge detected, restarting services", "INFO")
        for s in services():
            service_restart(s)
        stop_manager_instance()

    if is_elected_leader(CLUSTER_RES):
        log('Cluster leader - ensuring endpoint configuration is up to '
            'date', level=DEBUG)
        update_all_identity_relation_units()
Ejemplo n.º 4
0
def notify_storage_rings_available():
    """Notify peer swift-storage relations that they should synchronise ring
    and builder files.

    Note that this should only be called from the leader unit.
    """
    if not is_elected_leader(SWIFT_HA_RES):
        log(
            "Ring availability storage-relation broadcast requested by "
            "non-leader - skipping",
            level=WARNING)
        return

    hostname = get_hostaddr()
    hostname = format_ipv6_addr(hostname) or hostname
    path = os.path.basename(get_www_dir())
    rings_url = 'http://{}/{}'.format(hostname, path)
    trigger = uuid.uuid4()
    # Notify storage nodes that there is a new ring to fetch.
    log("Notifying storage nodes that new rings are ready for sync.",
        level=INFO)
    for relid in relation_ids('swift-storage'):
        relation_set(relation_id=relid,
                     swift_hash=get_swift_hash(),
                     rings_url=rings_url,
                     trigger=trigger)
Ejemplo n.º 5
0
def upgrade_charm():
    status_set('maintenance', 'Installing apt packages')
    apt_install(filter_installed_packages(determine_packages()))
    unison.ssh_authorized_peers(user=SSH_USER,
                                group=SSH_USER,
                                peer_interface='cluster',
                                ensure_local_user=True)

    ensure_ssl_dirs()

    if run_in_apache():
        disable_unused_apache_sites()

    CONFIGS.write_all()

    # See LP bug 1519035
    leader_init_db_if_ready()

    update_nrpe_config()

    if is_elected_leader(CLUSTER_RES):
        log(
            'Cluster leader - ensuring endpoint configuration is up to '
            'date',
            level=DEBUG)
        update_all_identity_relation_units()
Ejemplo n.º 6
0
def do_openstack_upgrade(configs):
    """Perform an upgrade of glance.  Takes care of upgrading
    packages, rewriting configs + database migration and potentially
    any other post-upgrade actions.

    :param configs: The charms main OSConfigRenderer object.

    """
    new_src = config('openstack-origin')
    new_os_rel = get_os_codename_install_source(new_src)

    log('Performing OpenStack upgrade to %s.' % (new_os_rel))

    configure_installation_source(new_src)
    dpkg_opts = [
        '--option',
        'Dpkg::Options::=--force-confnew',
        '--option',
        'Dpkg::Options::=--force-confdef',
    ]
    apt_update()
    apt_upgrade(options=dpkg_opts, fatal=True, dist=True)
    reset_os_release()
    apt_install(determine_packages(), fatal=True)

    # set CONFIGS to load templates from new release and regenerate config
    configs.set_release(openstack_release=new_os_rel)
    configs.write_all()

    [service_stop(s) for s in services()]
    if is_elected_leader(CLUSTER_RES):
        migrate_database()
    # Don't start services if the unit is supposed to be paused.
    if not is_unit_paused_set():
        [service_start(s) for s in services()]
Ejemplo n.º 7
0
    def sync_rings_request(self, broker_token, broker_timestamp,
                           builders_only=False):
        """Request for peers to sync rings from leader.

        NOTE: this action must only be performed by the cluster leader.

        :param broker_token: token to identify sync request.
        :param broker_timestamp: timestamp for peer and storage sync - this
                                 MUST bethe same as the one used for storage
                                 sync.
        :param builders_only: if False, tell peers to sync builders only (not
                              rings).
        """
        if not is_elected_leader(SWIFT_HA_RES):
            errmsg = "Leader function called by non-leader"
            raise SwiftProxyCharmException(errmsg)

        rq = self.template()
        rq['trigger'] = str(uuid.uuid4())

        if builders_only:
            rq['sync-only-builders'] = 1

        rq['broker-token'] = broker_token
        rq['broker-timestamp'] = broker_timestamp
        rq['builder-broker'] = self._hostname
        return rq
Ejemplo n.º 8
0
def identity_credentials_changed(relation_id=None, remote_unit=None):
    """Update the identity credentials relation on change

    Calls add_credentials_to_keystone

    :param relation_id: Relation id of the relation
    :param remote_unit: Related unit on the relation
    """
    if is_elected_leader(CLUSTER_RES):
        if expect_ha() and not is_clustered():
            log("Expected to be HA but no hacluster relation yet", level=INFO)
            return
        if not is_db_ready():
            log("identity-credentials-relation-changed hook fired before db "
                "ready - deferring until db ready", level=WARNING)
            return

        if not is_db_initialised():
            log("Database not yet initialised - deferring "
                "identity-credentials-relation updates", level=INFO)
            return

        unit_ready, _ = check_api_application_ready()
        if not unit_ready:
            log(
                ("Keystone charm unit not ready - deferring identity-relation "
                 "updates"),
                level=INFO)
            return

        # Create the tenant user
        add_credentials_to_keystone(relation_id, remote_unit)
    else:
        log('Deferring identity_credentials_changed() to service leader.')
Ejemplo n.º 9
0
def notify_storage_rings_available(broker_timestamp):
    """Notify peer swift-storage relations that they should synchronise ring
    and builder files.

    Note that this should only be called from the leader unit.

    @param broker_timestamp: timestamp for peer and storage sync - this MUST be
                             the same as the one used for peer sync.
    """
    if not is_elected_leader(SWIFT_HA_RES):
        log("Ring availability storage-relation broadcast requested by "
            "non-leader - skipping", level=WARNING)
        return

    hostname = get_hostaddr()
    hostname = format_ipv6_addr(hostname) or hostname
    path = os.path.basename(get_www_dir())
    rings_url = 'http://{}/{}'.format(hostname, path)

    # TODO(hopem): consider getting rid of this trigger since the timestamp
    #              should do the job.
    trigger = uuid.uuid4()

    # Notify storage nodes that there is a new ring to fetch.
    log("Notifying storage nodes that new rings are ready for sync.",
        level=INFO)
    for relid in relation_ids('swift-storage'):
        relation_set(relation_id=relid, swift_hash=get_swift_hash(),
                     rings_url=rings_url, broker_timestamp=broker_timestamp,
                     trigger=trigger)
Ejemplo n.º 10
0
def leader_init_db_if_ready(use_current_context=False):
    """ Initialise the keystone db if it is ready and mark it as initialised.

    NOTE: this must be idempotent.
    """
    if not is_elected_leader(CLUSTER_RES):
        log("Not leader - skipping db init", level=DEBUG)
        return

    if is_db_initialised():
        log("Database already initialised - skipping db init", level=DEBUG)
        update_all_identity_relation_units(check_db_ready=False)
        return

    # Bugs 1353135 & 1187508. Dbs can appear to be ready before the
    # units acl entry has been added. So, if the db supports passing
    # a list of permitted units then check if we're in the list.
    if not is_db_ready(use_current_context=use_current_context):
        log('Allowed_units list provided and this unit not present',
            level=INFO)
        return

    migrate_database()
    # Ensure any existing service entries are updated in the
    # new database backend. Also avoid duplicate db ready check.
    update_all_identity_relation_units(check_db_ready=False)
    update_all_domain_backends()
Ejemplo n.º 11
0
def db_changed(relation_id=None, unit=None, admin=None):
    if not is_elected_leader(DC_RESOURCE_NAME):
        log('Service is peered, clearing db relation'
            ' as this service unit is not the leader')
        relation_clear(relation_id)
        return

    if admin not in [True, False]:
        admin = relation_type() == 'db-admin'

    db_name, _ = (unit or remote_unit()).split("/")
    username = db_name
    db_helper = get_db_helper()
    addr = relation_get('private-address', unit=unit, rid=relation_id)
    password = db_helper.configure_db(addr, db_name, username, admin=admin)

    db_host = get_db_host(addr, interface=relation_type())

    relation_set(relation_id=relation_id,
                 relation_settings={
                     'user': username,
                     'password': password,
                     'host': db_host,
                     'database': db_name,
                 })
Ejemplo n.º 12
0
def notify_peers_builders_available(broker_token, builders_only=False):
    """Notify peer swift-proxy units that they should synchronise ring and
    builder files.

    Note that this should only be called from the leader unit.
    """
    if not is_elected_leader(SWIFT_HA_RES):
        log("Ring availability peer broadcast requested by non-leader - "
            "skipping", level=WARNING)
        return

    if not broker_token:
        log("No broker token - aborting sync", level=WARNING)
        return

    cluster_rids = relation_ids('cluster')
    if not cluster_rids:
        log("Cluster relation not yet available - skipping sync", level=DEBUG)
        return

    if builders_only:
        type = "builders"
    else:
        type = "builders & rings"

    # Notify peers that builders are available
    log("Notifying peer(s) that %s are ready for sync." % type, level=INFO)
    rq = SwiftProxyClusterRPC().sync_rings_request(broker_token,
                                                   builders_only=builders_only)
    for rid in cluster_rids:
        log("Notifying rid=%s (%s)" % (rid, rq), level=DEBUG)
        relation_set(relation_id=rid, relation_settings=rq)
Ejemplo n.º 13
0
def do_openstack_upgrade(configs):
    """Perform an uprade of cinder. Takes care of upgrading
    packages, rewriting configs + database migration and
    potentially any other post-upgrade actions.

    :param configs: The charms main OSConfigRenderer object.

    """
    new_src = config('openstack-origin')
    new_os_rel = get_os_codename_install_source(new_src)

    juju_log('Performing OpenStack upgrade to %s.' % (new_os_rel))

    configure_installation_source(new_src)
    dpkg_opts = [
        '--option', 'Dpkg::Options::=--force-confnew',
        '--option', 'Dpkg::Options::=--force-confdef',
    ]
    apt_update()
    apt_upgrade(options=dpkg_opts, fatal=True, dist=True)
    apt_install(determine_packages(), fatal=True)

    # set CONFIGS to load templates from new release and regenerate config
    configs.set_release(openstack_release=new_os_rel)
    configs.write_all()

    # Stop/start services and migrate DB if leader
    [service_stop(s) for s in services()]
    if is_elected_leader(CLUSTER_RES):
        migrate_database()
    [service_start(s) for s in services()]
def db_changed():
    rel = os_release('glance-common')

    if 'shared-db' not in CONFIGS.complete_contexts():
        juju_log('shared-db relation incomplete. Peer not ready?')
        return

    CONFIGS.write(GLANCE_REGISTRY_CONF)
    # since folsom, a db connection setting in glance-api.conf is required.
    if rel != "essex":
        CONFIGS.write(GLANCE_API_CONF)

    if is_elected_leader(CLUSTER_RES):
        # Bugs 1353135 & 1187508. Dbs can appear to be ready before the units
        # acl entry has been added. So, if the db supports passing a list of
        # permitted units then check if we're in the list.
        allowed_units = relation_get('allowed_units')
        if allowed_units and local_unit() in allowed_units.split():
            if rel == "essex":
                status = call(['glance-manage', 'db_version'])
                if status != 0:
                    juju_log('Setting version_control to 0')
                    cmd = ["glance-manage", "version_control", "0"]
                    check_call(cmd)

            juju_log('Cluster leader, performing db sync')
            migrate_database()
        else:
            juju_log('allowed_units either not presented, or local unit '
                     'not in acl list: %s' % allowed_units)

    for rid in relation_ids('image-service'):
        image_service_joined(rid)
Ejemplo n.º 15
0
def remove_devices(args):
    """ Removes the device(s) from the ring(s).

    Removes the device(s) from the ring(s) based on the search pattern.

    :raises SwiftProxyCharmException: if pattern action_get('search-value')
        doesn't match any device in the ring.
    """
    if not is_elected_leader(SWIFT_HA_RES):
        action_fail('Must run action on leader unit')
        return

    rings_valid = ['account', 'container', 'object', 'all']
    rings_to_update = []
    ring = action_get('ring')
    if ring not in rings_valid:
        action_fail("Invalid ring name '{}'. Should be one of: {}".format(
            ring, ', '.join(rings_valid)))
        return
    if ring == 'all':
        rings_to_update.extend(['account', 'container', 'object'])
    else:
        rings_to_update.append(ring)
    for ring_to_update in rings_to_update:
        ring_to_update_builder = ring_to_update + '.builder'
        ring_to_update_path = os.path.join(SWIFT_CONF_DIR,
                                           ring_to_update_builder)
        remove_from_ring(ring_to_update_path, action_get('search-value'))
    balance_rings()
Ejemplo n.º 16
0
def identity_credentials_changed(relation_id=None, remote_unit=None):
    """Update the identity credentials relation on change

    Calls add_credentials_to_keystone

    :param relation_id: Relation id of the relation
    :param remote_unit: Related unit on the relation
    """
    if is_elected_leader(CLUSTER_RES):
        if not is_db_ready():
            log(
                "identity-credentials-relation-changed hook fired before db "
                "ready - deferring until db ready",
                level=WARNING)
            return

        if not is_db_initialised():
            log(
                "Database not yet initialised - deferring "
                "identity-credentials-relation updates",
                level=INFO)
            return

        # Create the tenant user
        add_credentials_to_keystone(relation_id, remote_unit)
    else:
        log('Deferring identity_credentials_changed() to service leader.')
Ejemplo n.º 17
0
def set_weight(args):
    """ Sets the device's weight.

    Sets the device's weight based on the search pattern.

    :raises SwiftProxyCharmException: if pattern action_get('search-value')
        doesn't match any device in the ring.
    """
    if not is_elected_leader(SWIFT_HA_RES):
        action_fail('Must run action on leader unit')
        return

    rings_valid = ['account', 'container', 'object', 'all']
    ring = action_get('ring')
    if ring not in rings_valid:
        action_fail('Invalid ring name.')
        return
    if ring == 'all':
        rings_to_update = ['account', 'container', 'object']
    else:
        rings_to_update = [ring]
    for ring_to_update in rings_to_update:
        ring_to_update_builder = ring_to_update + '.builder'
        ring_to_update_path = os.path.join(SWIFT_CONF_DIR,
                                           ring_to_update_builder)
        set_weight_in_ring(ring_to_update_path, action_get('search-value'),
                           str(action_get('weight')))
    balance_rings()
Ejemplo n.º 18
0
def leader_init_db_if_ready(skip_acl_check=False, skip_cells_restarts=False,
                            db_rid=None, unit=None):
    """Initialise db if leader and db not yet intialised.

    NOTE: must be called from database context.
    """
    if not is_elected_leader(CLUSTER_RES):
        log("Not leader - skipping db init", level=DEBUG)
        return

    if is_db_initialised():
        log("Database already initialised - skipping db init", level=DEBUG)
        return

    # Bugs 1353135 & 1187508. Dbs can appear to be ready before the units
    # acl entry has been added. So, if the db supports passing a list of
    # permitted units then check if we're in the list.
    allowed_units = relation_get('nova_allowed_units', rid=db_rid, unit=unit)
    if skip_acl_check or (allowed_units and local_unit() in
                          allowed_units.split()):
        status_set('maintenance', 'Running nova db migration')
        migrate_nova_database()
        log('Triggering remote cloud-compute restarts.')
        [compute_joined(rid=rid, remote_restart=True)
            for rid in relation_ids('cloud-compute')]
        log('Triggering remote neutron-network-service restarts.')
        [quantum_joined(rid=rid, remote_restart=True)
            for rid in relation_ids('quantum-network-service')]
        if not skip_cells_restarts:
            log('Triggering remote cell restarts.')
            [nova_cell_relation_joined(rid=rid, remote_restart=True)
             for rid in relation_ids('cell')]
    else:
        log('allowed_units either not presented, or local unit '
            'not in acl list: %s' % repr(allowed_units))
Ejemplo n.º 19
0
def do_openstack_upgrade(configs):
    """
    Perform an upgrade.  Takes care of upgrading packages, rewriting
    configs, database migrations and potentially any other post-upgrade
    actions.

    :param configs: The charms main OSConfigRenderer object.
    """
    cur_os_rel = os_release('neutron-common')
    new_src = config('openstack-origin')
    new_os_rel = get_os_codename_install_source(new_src)

    log('Performing OpenStack upgrade to %s.' % (new_os_rel))

    configure_installation_source(new_src)
    dpkg_opts = [
        '--option', 'Dpkg::Options::=--force-confnew',
        '--option', 'Dpkg::Options::=--force-confdef',
    ]
    apt_update(fatal=True)
    apt_upgrade(options=dpkg_opts, fatal=True, dist=True)
    pkgs = determine_packages(new_os_rel)
    # Sort packages just to make unit tests easier
    pkgs.sort()
    apt_install(packages=pkgs,
                options=dpkg_opts,
                fatal=True)

    # set CONFIGS to load templates from new release
    configs.set_release(openstack_release=new_os_rel)
    # Before kilo it's nova-cloud-controllers job
    if is_elected_leader(CLUSTER_RES):
        #stamp_neutron_database(cur_os_rel)
        migrate_neutron_database()
Ejemplo n.º 20
0
def storage_changed():
    """Storage relation.

    Only the leader unit can update and distribute rings so if we are not the
    leader we ignore this event and wait for a resync request from the leader.
    """
    if not is_elected_leader(SWIFT_HA_RES):
        log(
            "Not the leader - deferring storage relation change to leader "
            "unit.",
            level=DEBUG)
        return

    log("Storage relation changed -processing", level=DEBUG)
    host_ip = get_host_ip()
    if not host_ip:
        log(
            "No host ip found in storage relation - deferring storage "
            "relation",
            level=WARNING)
        return

    update_rsync_acls()

    zone = get_zone(config('zone-assignment'))
    node_settings = {
        'ip': host_ip,
        'zone': zone,
        'account_port': relation_get('account_port'),
        'object_port': relation_get('object_port'),
        'container_port': relation_get('container_port'),
    }

    if None in node_settings.values():
        missing = [k for k, v in node_settings.items() if v is None]
        log("Relation not ready - some required values not provided by "
            "relation (missing={})".format(', '.join(missing)),
            level=INFO)
        return None

    for k in ['zone', 'account_port', 'object_port', 'container_port']:
        node_settings[k] = int(node_settings[k])

    CONFIGS.write_all()

    # Allow for multiple devs per unit, passed along as a : separated list
    # Update and balance rings.
    nodes = []
    devs = relation_get('device')
    if devs:
        for dev in devs.split(':'):
            node = {k: v for k, v in node_settings.items()}
            node['device'] = dev
            nodes.append(node)

    update_rings(nodes)
    if not openstack.is_unit_paused_set():
        # Restart proxy here in case no config changes made (so
        # restart_on_change() ineffective).
        service_restart('swift-proxy')
def pgsql_db_changed():
    rel = os_release('glance-common')

    if 'pgsql-db' not in CONFIGS.complete_contexts():
        juju_log('pgsql-db relation incomplete. Peer not ready?')
        return

    CONFIGS.write(GLANCE_REGISTRY_CONF)
    # since folsom, a db connection setting in glance-api.conf is required.
    if rel != "essex":
        CONFIGS.write(GLANCE_API_CONF)

    if is_elected_leader(CLUSTER_RES):
        if rel == "essex":
            status = call(['glance-manage', 'db_version'])
            if status != 0:
                juju_log('Setting version_control to 0')
                cmd = ["glance-manage", "version_control", "0"]
                check_call(cmd)

        juju_log('Cluster leader, performing db sync')
        migrate_database()

    for rid in relation_ids('image-service'):
        image_service_joined(rid)
Ejemplo n.º 22
0
def pgsql_db_changed():
    rel = os_release('glance-common')

    if 'pgsql-db' not in CONFIGS.complete_contexts():
        juju_log('pgsql-db relation incomplete. Peer not ready?')
        return

    CONFIGS.write(GLANCE_REGISTRY_CONF)
    # since folsom, a db connection setting in glance-api.conf is required.
    if rel != "essex":
        CONFIGS.write(GLANCE_API_CONF)

    if is_elected_leader(CLUSTER_RES):
        if rel == "essex":
            status = call(['glance-manage', 'db_version'])
            if status != 0:
                juju_log('Setting version_control to 0')
                cmd = ["glance-manage", "version_control", "0"]
                check_call(cmd)

        juju_log('Cluster leader, performing db sync')
        migrate_database()

    for rid in relation_ids('image-service'):
        image_service_joined(rid)
def db_changed(relation_id=None, unit=None, admin=None):
    if not is_elected_leader(DC_RESOURCE_NAME):
        log('Service is peered, clearing db relation'
            ' as this service unit is not the leader')
        relation_clear(relation_id)
        return

    if is_clustered():
        db_host = config('vip')
    else:
        if config('prefer-ipv6'):
            db_host = get_ipv6_addr(exc_list=[config('vip')])[0]
        else:
            db_host = unit_get('private-address')

    if admin not in [True, False]:
        admin = relation_type() == 'db-admin'
    db_name, _ = remote_unit().split("/")
    username = db_name
    db_helper = get_db_helper()
    addr = relation_get('private-address', unit=unit, rid=relation_id)
    password = db_helper.configure_db(addr, db_name, username, admin=admin)

    relation_set(relation_id=relation_id,
                 relation_settings={
                     'user': username,
                     'password': password,
                     'host': db_host,
                     'database': db_name,
                 })
def leader_init_db_if_ready(skip_acl_check=False, skip_cells_restarts=False,
                            db_rid=None, unit=None):
    """Initialise db if leader and db not yet intialised.

    NOTE: must be called from database context.
    """
    if not is_elected_leader(CLUSTER_RES):
        log("Not leader - skipping db init", level=DEBUG)
        return

    if is_db_initialised():
        log("Database already initialised - skipping db init", level=DEBUG)
        return

    # Bugs 1353135 & 1187508. Dbs can appear to be ready before the units
    # acl entry has been added. So, if the db supports passing a list of
    # permitted units then check if we're in the list.
    allowed_units = relation_get('nova_allowed_units', rid=db_rid, unit=unit)
    if skip_acl_check or (allowed_units and local_unit() in
                          allowed_units.split()):
        status_set('maintenance', 'Running nova db migration')
        migrate_nova_database()
        log('Triggering remote cloud-compute restarts.')
        [compute_joined(rid=rid, remote_restart=True)
            for rid in relation_ids('cloud-compute')]
        log('Triggering remote neutron-network-service restarts.')
        [quantum_joined(rid=rid, remote_restart=True)
            for rid in relation_ids('quantum-network-service')]
        if not skip_cells_restarts:
            log('Triggering remote cell restarts.')
            [nova_cell_relation_joined(rid=rid, remote_restart=True)
             for rid in relation_ids('cell')]
    else:
        log('allowed_units either not presented, or local unit '
            'not in acl list: %s' % repr(allowed_units))
Ejemplo n.º 25
0
def update_all_identity_relation_units(check_db_ready=True):
    CONFIGS.write_all()
    if check_db_ready and not is_db_ready():
        log('Allowed_units list provided and this unit not present',
            level=INFO)
        return

    if not is_db_initialised():
        log(
            "Database not yet initialised - deferring identity-relation "
            "updates",
            level=INFO)
        return

    if is_elected_leader(CLUSTER_RES):
        ensure_initial_admin(config)

    log('Firing identity_changed hook for all related services.')
    for rid in relation_ids('identity-service'):
        for unit in related_units(rid):
            identity_changed(relation_id=rid, remote_unit=unit)
    log('Firing admin_relation_changed hook for all related services.')
    for rid in relation_ids('identity-admin'):
        admin_relation_changed(rid)
    log('Firing identity_credentials_changed hook for all related services.')
    for rid in relation_ids('identity-credentials'):
        for unit in related_units(rid):
            identity_credentials_changed(relation_id=rid, remote_unit=unit)
Ejemplo n.º 26
0
def leader_init_db_if_ready(use_current_context=False):
    """ Initialise the keystone db if it is ready and mark it as initialised.

    NOTE: this must be idempotent.
    """
    if not is_elected_leader(CLUSTER_RES):
        log("Not leader - skipping db init", level=DEBUG)
        return

    if is_db_initialised():
        log("Database already initialised - skipping db init", level=DEBUG)
        update_all_identity_relation_units(check_db_ready=False)
        return

    # Bugs 1353135 & 1187508. Dbs can appear to be ready before the
    # units acl entry has been added. So, if the db supports passing
    # a list of permitted units then check if we're in the list.
    if not is_db_ready(use_current_context=use_current_context):
        log('Allowed_units list provided and this unit not present',
            level=INFO)
        return

    migrate_database()
    # Ensure any existing service entries are updated in the
    # new database backend. Also avoid duplicate db ready check.
    update_all_identity_relation_units(check_db_ready=False)
def cluster_joined(relation_id=None):
    if config('prefer-ipv6'):
        relation_settings = {'hostname': socket.gethostname(),
                             'private-address': get_ipv6_addr()[0]}
        relation_set(relation_id=relation_id,
                     relation_settings=relation_settings)

    if is_relation_made('ha') and \
            config('ha-vip-only') is False:
        log('hacluster relation is present, skipping native '
            'rabbitmq cluster config.')
        return

    configure_nodename()

    try:
        if not is_leader():
            log('Not the leader, deferring cookie propagation to leader')
            return
    except NotImplementedError:
        if is_newer():
            log('cluster_joined: Relation greater.')
            return

    if not os.path.isfile(rabbit.COOKIE_PATH):
        log('erlang cookie missing from %s' % rabbit.COOKIE_PATH,
            level=ERROR)
        return

    if not is_sufficient_peers():
        return

    if is_elected_leader('res_rabbitmq_vip'):
        cookie = open(rabbit.COOKIE_PATH, 'r').read().strip()
        peer_store('cookie', cookie)
Ejemplo n.º 28
0
def upgrade_charm():
    pre_install_hooks()

    # Ensure older passwd files in /var/lib/juju are moved to
    # /var/lib/rabbitmq which will end up replicated if clustered
    for f in [f for f in os.listdir('/var/lib/juju')
              if os.path.isfile(os.path.join('/var/lib/juju', f))]:
        if f.endswith('.passwd'):
            s = os.path.join('/var/lib/juju', f)
            d = os.path.join('/var/lib/charm/{}'.format(service_name()), f)

            log('upgrade_charm: Migrating stored passwd'
                ' from %s to %s.' % (s, d))
            shutil.move(s, d)
    if is_elected_leader('res_rabbitmq_vip'):
        rabbit.migrate_passwords_to_peer_relation()

    # explicitly update buggy file name naigos.passwd
    old = os.path.join('var/lib/rabbitmq', 'naigos.passwd')
    if os.path.isfile(old):
        new = os.path.join('var/lib/rabbitmq', 'nagios.passwd')
        shutil.move(old, new)

    # NOTE(freyes): cluster_with() will take care of marking the node as
    # 'clustered' for existing deployments (LP: #1691510).
    rabbit.cluster_with()

    # Ensure all client connections are up to date on upgrade
    update_clients()
Ejemplo n.º 29
0
def update_all_identity_relation_units(check_db_ready=True):
    CONFIGS.write_all()
    if is_unit_paused_set():
        return
    if check_db_ready and not is_db_ready():
        log('Allowed_units list provided and this unit not present',
            level=INFO)
        return

    if not is_db_initialised():
        log("Database not yet initialised - deferring identity-relation "
            "updates", level=INFO)
        return

    if is_elected_leader(CLUSTER_RES):
        ensure_initial_admin(config)

    log('Firing identity_changed hook for all related services.')
    for rid in relation_ids('identity-service'):
        for unit in related_units(rid):
            identity_changed(relation_id=rid, remote_unit=unit)
    log('Firing admin_relation_changed hook for all related services.')
    for rid in relation_ids('identity-admin'):
        admin_relation_changed(rid)
    log('Firing identity_credentials_changed hook for all related services.')
    for rid in relation_ids('identity-credentials'):
        for unit in related_units(rid):
            identity_credentials_changed(relation_id=rid, remote_unit=unit)
Ejemplo n.º 30
0
def cluster_changed():
    unison.ssh_authorized_peers(user=SSH_USER, group="juju_keystone", peer_interface="cluster", ensure_local_user=True)
    # NOTE(jamespage) re-echo passwords for peer storage
    echo_whitelist = ["_passwd", "identity-service:", "ssl-cert-master", "db-initialised", "ssl-cert-available-updates"]
    log("Peer echo whitelist: %s" % (echo_whitelist), level=DEBUG)
    peer_echo(includes=echo_whitelist, force=True)

    check_peer_actions()

    initialise_pki()

    # Figure out if we need to mandate a sync
    units = get_ssl_sync_request_units()
    synced_units = relation_get(attribute="ssl-synced-units", unit=local_unit())
    diff = None
    if synced_units:
        synced_units = json.loads(synced_units)
        diff = set(units).symmetric_difference(set(synced_units))

    if units and (not synced_units or diff):
        log("New peers joined and need syncing - %s" % (", ".join(units)), level=DEBUG)
        update_all_identity_relation_units_force_sync()
    else:
        update_all_identity_relation_units()

    if not is_elected_leader(CLUSTER_RES) and is_ssl_cert_master():
        # Force and sync and trigger a sync master re-election since we are not
        # leader anymore.
        force_ssl_sync()
    else:
        CONFIGS.write_all()
Ejemplo n.º 31
0
def upgrade_charm():
    pre_install_hooks()
    add_source(config('source'), config('key'))
    apt_update(fatal=True)

    # Ensure older passwd files in /var/lib/juju are moved to
    # /var/lib/rabbitmq which will end up replicated if clustered
    for f in [
            f for f in os.listdir('/var/lib/juju')
            if os.path.isfile(os.path.join('/var/lib/juju', f))
    ]:
        if f.endswith('.passwd'):
            s = os.path.join('/var/lib/juju', f)
            d = os.path.join('/var/lib/charm/{}'.format(service_name()), f)

            log('upgrade_charm: Migrating stored passwd'
                ' from %s to %s.' % (s, d))
            shutil.move(s, d)
    if is_elected_leader('res_rabbitmq_vip'):
        rabbit.migrate_passwords_to_peer_relation()

    # explicitly update buggy file name naigos.passwd
    old = os.path.join('var/lib/rabbitmq', 'naigos.passwd')
    if os.path.isfile(old):
        new = os.path.join('var/lib/rabbitmq', 'nagios.passwd')
        shutil.move(old, new)
Ejemplo n.º 32
0
def db_changed():
    rel = os_release('glance-common')

    if 'shared-db' not in CONFIGS.complete_contexts():
        juju_log('shared-db relation incomplete. Peer not ready?')
        return

    CONFIGS.write(GLANCE_REGISTRY_CONF)
    # since folsom, a db connection setting in glance-api.conf is required.
    if rel != "essex":
        CONFIGS.write(GLANCE_API_CONF)

    if is_elected_leader(CLUSTER_RES):
        # Bugs 1353135 & 1187508. Dbs can appear to be ready before the units
        # acl entry has been added. So, if the db supports passing a list of
        # permitted units then check if we're in the list.
        allowed_units = relation_get('allowed_units')
        if allowed_units and local_unit() in allowed_units.split():
            if rel == "essex":
                status = call(['glance-manage', 'db_version'])
                if status != 0:
                    juju_log('Setting version_control to 0')
                    cmd = ["glance-manage", "version_control", "0"]
                    check_call(cmd)

            juju_log('Cluster leader, performing db sync')
            migrate_database()
        else:
            juju_log('allowed_units either not presented, or local unit '
                     'not in acl list: %s' % allowed_units)
Ejemplo n.º 33
0
    def stop_proxy_request(self, peers_only=False, token=None):
        """Request to stop peer proxy service.

        A token can optionally be supplied in case we want to restart a
        previously triggered sync e.g. following a leader change notification.

        NOTE: this action must only be performed by the cluster leader.

        :param peers_only: If True, indicates that we only want peer
                           (i.e. proxy not storage) units to be notified.
        :param token: optional request token expected to be echoed in ACK from
                      peer. If token not provided, a new one is generated.
        """
        if not is_elected_leader(SWIFT_HA_RES):
            errmsg = "Leader function called by non-leader"
            raise SwiftProxyCharmException(errmsg)

        rq = self.template()
        if not token:
            token = str(uuid.uuid4())

        rq['trigger'] = token
        rq[self.KEY_STOP_PROXY_SVC] = rq['trigger']
        if peers_only:
            rq['peers-only'] = 1

        rq['builder-broker'] = self._hostname
        return rq
Ejemplo n.º 34
0
def do_openstack_upgrade(configs):
    """
    Perform an upgrade.  Takes care of upgrading packages, rewriting
    configs, database migrations and potentially any other post-upgrade
    actions.

    :param configs: The charms main OSConfigRenderer object.
    """
    cur_os_rel = os_release('neutron-common')
    new_src = config('openstack-origin')
    new_os_rel = get_os_codename_install_source(new_src)

    log('Performing OpenStack upgrade to %s.' % (new_os_rel))

    configure_installation_source(new_src)
    dpkg_opts = [
        '--option',
        'Dpkg::Options::=--force-confnew',
        '--option',
        'Dpkg::Options::=--force-confdef',
    ]
    apt_update(fatal=True)
    apt_upgrade(options=dpkg_opts, fatal=True, dist=True)
    pkgs = determine_packages(new_os_rel)
    # Sort packages just to make unit tests easier
    pkgs.sort()
    apt_install(packages=pkgs, options=dpkg_opts, fatal=True)

    # set CONFIGS to load templates from new release
    configs.set_release(openstack_release=new_os_rel)
    # Before kilo it's nova-cloud-controllers job
    if is_elected_leader(CLUSTER_RES) and new_os_rel >= 'kilo':
        stamp_neutron_database(cur_os_rel)
        migrate_neutron_database()
Ejemplo n.º 35
0
def identity_credentials_changed(relation_id=None, remote_unit=None):
    """Update the identity credentials relation on change

    Calls add_credentials_to_keystone

    :param relation_id: Relation id of the relation
    :param remote_unit: Related unit on the relation
    """
    if is_elected_leader(CLUSTER_RES):
        if expect_ha() and not is_clustered():
            log("Expected to be HA but no hacluster relation yet", level=INFO)
            return
        if not is_db_ready():
            log("identity-credentials-relation-changed hook fired before db "
                "ready - deferring until db ready", level=WARNING)
            return

        if not is_db_initialised():
            log("Database not yet initialised - deferring "
                "identity-credentials-relation updates", level=INFO)
            return

        # Create the tenant user
        add_credentials_to_keystone(relation_id, remote_unit)
    else:
        log('Deferring identity_credentials_changed() to service leader.')
Ejemplo n.º 36
0
def leader_init_db_if_ready(use_current_context=False):
    """ Initialise the keystone db if it is ready and mark it as initialised.

    NOTE: this must be idempotent.
    """
    if not is_elected_leader(CLUSTER_RES):
        log("Not leader - skipping db init", level=DEBUG)
        return

    if is_db_initialised():
        log("Database already initialised - skipping db init", level=DEBUG)
        update_all_identity_relation_units()
        return

    # Bugs 1353135 & 1187508. Dbs can appear to be ready before the
    # units acl entry has been added. So, if the db supports passing
    # a list of permitted units then check if we're in the list.
    if not is_db_ready(use_current_context=use_current_context):
        log('Allowed_units list provided and this unit not present',
            level=INFO)
        return

    migrate_database()
    bootstrap_keystone(configs=CONFIGS)
    ensure_initial_admin(config)
    if CompareOpenStackReleases(os_release('keystone')) >= 'liberty':
        CONFIGS.write(POLICY_JSON)
    # Ensure any existing service entries are updated in the
    # new database backend.
    update_all_identity_relation_units()
    update_all_domain_backends()
Ejemplo n.º 37
0
def db_changed():
    if 'shared-db' not in CONFIGS.complete_contexts():
        log('shared-db relation incomplete. Peer not ready?')
        return
    CONFIGS.write_all()

    if is_elected_leader(CLUSTER_RES):
        # Bugs 1353135 & 1187508. Dbs can appear to be ready before the units
        # acl entry has been added. So, if the db supports passing a list of
        # permitted units then check if we're in the list.
        allowed_units = relation_get('nova_allowed_units')
        if allowed_units and local_unit() in allowed_units.split():
            status_set('maintenance', 'Running nova db migration')
            migrate_nova_database()
            log('Triggering remote cloud-compute restarts.')
            [compute_joined(rid=rid, remote_restart=True)
                for rid in relation_ids('cloud-compute')]
            log('Triggering remote cell restarts.')
            [nova_cell_relation_joined(rid=rid, remote_restart=True)
                for rid in relation_ids('cell')]
            conditional_neutron_migration()
        else:
            log('allowed_units either not presented, or local unit '
                'not in acl list: %s' % repr(allowed_units))

    for r_id in relation_ids('nova-api'):
        nova_api_relation_joined(rid=r_id)
Ejemplo n.º 38
0
def db_changed():
    if 'shared-db' not in CONFIGS.complete_contexts():
        log('shared-db relation incomplete. Peer not ready?')
        return
    CONFIGS.write_all()

    if is_elected_leader(CLUSTER_RES):
        # Bugs 1353135 & 1187508. Dbs can appear to be ready before the units
        # acl entry has been added. So, if the db supports passing a list of
        # permitted units then check if we're in the list.
        allowed_units = relation_get('nova_allowed_units')
        if allowed_units and local_unit() in allowed_units.split():
            status_set('maintenance', 'Running nova db migration')
            migrate_nova_database()
            log('Triggering remote cloud-compute restarts.')
            [
                compute_joined(rid=rid, remote_restart=True)
                for rid in relation_ids('cloud-compute')
            ]
            log('Triggering remote cell restarts.')
            [
                nova_cell_relation_joined(rid=rid, remote_restart=True)
                for rid in relation_ids('cell')
            ]
            conditional_neutron_migration()
        else:
            log('allowed_units either not presented, or local unit '
                'not in acl list: %s' % repr(allowed_units))

    for r_id in relation_ids('nova-api'):
        nova_api_relation_joined(rid=r_id)
def _do_openstack_upgrade(new_src):
    enable_policy_rcd()
    # All upgrades to Liberty are forced to step through Kilo. Liberty does
    # not have the migrate_flavor_data option (Bug #1511466) available so it
    # must be done pre-upgrade
    if os_release('nova-common') == 'kilo' and is_elected_leader(CLUSTER_RES):
        migrate_nova_flavors()
    new_os_rel = get_os_codename_install_source(new_src)
    log('Performing OpenStack upgrade to %s.' % (new_os_rel))

    configure_installation_source(new_src)
    dpkg_opts = [
        '--option',
        'Dpkg::Options::=--force-confnew',
        '--option',
        'Dpkg::Options::=--force-confdef',
    ]

    apt_update(fatal=True)
    apt_upgrade(options=dpkg_opts, fatal=True, dist=True)
    apt_install(determine_packages(), fatal=True)

    disable_policy_rcd()

    # NOTE(jamespage) upgrade with existing config files as the
    # havana->icehouse migration enables new service_plugins which
    # create issues with db upgrades
    reset_os_release()
    configs = register_configs(release=new_os_rel)
    configs.write_all()

    if new_os_rel >= 'mitaka' and not database_setup(prefix='novaapi'):
        # NOTE: Defer service restarts and database migrations for now
        #       as nova_api database is not yet created
        if (relation_ids('cluster') and is_elected_leader(CLUSTER_RES)):
            # NOTE: reset dbsync state so that migration will complete
            #       when the nova_api database is setup.
            peer_store('dbsync_state', None)
        return configs

    if is_elected_leader(CLUSTER_RES):
        status_set('maintenance', 'Running nova db migration')
        migrate_nova_database()
    if not is_unit_paused_set():
        [service_start(s) for s in services()]

    return configs
Ejemplo n.º 40
0
def pgsql_db_changed():
    if 'pgsql-db' not in CONFIGS.complete_contexts():
        juju_log('pgsql-db relation incomplete. Peer not ready?')
        return
    CONFIGS.write(CINDER_CONF)
    if is_elected_leader(CLUSTER_RES):
        juju_log('Cluster leader, performing db sync')
        migrate_database()
Ejemplo n.º 41
0
def ha_changed():
    CONFIGS.write_all()

    clustered = relation_get('clustered')
    if clustered and is_elected_leader(CLUSTER_RES):
        log('Cluster configured, notifying other services and updating '
            'keystone endpoint configuration')
        update_all_identity_relation_units()
Ejemplo n.º 42
0
def cluster_joined():
    install_ceilometer_ocf()

    # If this node is the elected leader then share our secret with other nodes
    if is_elected_leader('grp_ceilometer_vips'):
        peer_store('shared_secret', get_shared_secret())

    CONFIGS.write_all()
Ejemplo n.º 43
0
def ha_changed():
    CONFIGS.write_all()

    clustered = relation_get('clustered')
    if clustered and is_elected_leader(CLUSTER_RES):
        log('Cluster configured, notifying other services and updating '
            'keystone endpoint configuration')
        update_all_identity_relation_units()
Ejemplo n.º 44
0
def pgsql_db_changed():
    if 'pgsql-db' not in CONFIGS.complete_contexts():
        juju_log('pgsql-db relation incomplete. Peer not ready?')
        return
    CONFIGS.write(CINDER_CONF)
    if is_elected_leader(CLUSTER_RES):
        juju_log('Cluster leader, performing db sync')
        migrate_database()
Ejemplo n.º 45
0
def identity_changed(relation_id=None, remote_unit=None):
    notifications_checksums = {}
    notifications_endpoints = {}
    if is_elected_leader(CLUSTER_RES):
        if not is_db_ready():
            log(
                "identity-service-relation-changed hook fired before db "
                "ready - deferring until db ready",
                level=WARNING)
            return

        if not is_db_initialised():
            log(
                "Database not yet initialised - deferring identity-relation "
                "updates",
                level=INFO)
            return

        if expect_ha() and not is_clustered():
            log("Expected to be HA but no hacluster relation yet", level=INFO)
            return

        add_service_to_keystone(relation_id, remote_unit)
        if is_service_present('neutron', 'network'):
            delete_service_entry('quantum', 'network')
        settings = relation_get(rid=relation_id, unit=remote_unit)

        # If endpoint has changed, notify to units related over the
        # identity-notifications interface. We base the decision to notify on
        # whether admin_url, public_url or internal_url have changed from
        # previous notify.
        service = settings.get('service')
        if service:
            key = '%s-endpoint-changed' % service
            notifications_endpoints[key] = endpoints_dict(settings)
            notifications_checksums[key] = endpoints_checksum(settings)
        else:
            # Some services don't set their name in the 'service' key in the
            # relation, for those their name is calculated from the prefix of
            # keys. See `assemble_endpoints()` for details.
            single = {
                'service', 'region', 'public_url', 'admin_url', 'internal_url'
            }
            endpoints = assemble_endpoints(settings)
            for ep in endpoints.keys():
                if single.issubset(endpoints[ep]):
                    key = '%s-endpoint-changed' % ep
                    log('endpoint: %s' % ep)
                    notifications_endpoints[key] = (endpoints_dict(
                        endpoints[ep]))
                    notifications_checksums[key] = (endpoints_checksum(
                        endpoints[ep]))
    else:
        log('Deferring identity_changed() to service leader.')

    if notifications_endpoints or notifications_checksums:
        send_notifications(notifications_checksums, notifications_endpoints)
Ejemplo n.º 46
0
 def __call__(self):
     token_expiration = int(config('token-expiration'))
     ctxt = {
         'enabled': (fernet_enabled() and
                     is_elected_leader(DC_RESOURCE_NAME)),
         'unit_name': local_unit(),
         'charm_dir': charm_dir(),
         'minute': ('*/5' if token_expiration > 300 else '*')
     }
     return ctxt
Ejemplo n.º 47
0
def storage_changed():
    """Storage relation.

    Only the leader unit can update and distribute rings so if we are not the
    leader we ignore this event and wait for a resync request from the leader.
    """
    if not is_elected_leader(SWIFT_HA_RES):
        log("Not the leader - ignoring storage relation until leader ready.",
            level=DEBUG)
        return

    log("Leader established, updating ring builders", level=INFO)
    host_ip = get_host_ip()
    if not host_ip:
        log("No host ip found in storage relation - deferring storage "
            "relation", level=WARNING)
        return

    update_rsync_acls()

    zone = get_zone(config('zone-assignment'))
    node_settings = {
        'ip': host_ip,
        'zone': zone,
        'account_port': relation_get('account_port'),
        'object_port': relation_get('object_port'),
        'container_port': relation_get('container_port'),
    }

    if None in node_settings.itervalues():
        missing = [k for k, v in node_settings.iteritems() if v is None]
        log("Relation not ready - some required values not provided by "
            "relation (missing=%s)" % (', '.join(missing)), level=INFO)
        return None

    for k in ['zone', 'account_port', 'object_port', 'container_port']:
        node_settings[k] = int(node_settings[k])

    CONFIGS.write_all()

    # Allow for multiple devs per unit, passed along as a : separated list
    # Update and balance rings.
    nodes = []
    devs = relation_get('device')
    if devs:
        for dev in devs.split(':'):
            node = {k: v for k, v in node_settings.items()}
            node['device'] = dev
            nodes.append(node)

    update_rings(nodes)
    if not is_paused():
        # Restart proxy here in case no config changes made (so
        # pause_aware_restart_on_change() ineffective).
        service_restart('swift-proxy')
def config_changed():

    if config('prefer-ipv6'):
        rabbit.assert_charm_supports_ipv6()

    # Add archive source if provided
    add_source(config('source'), config('key'))
    apt_update(fatal=True)
    # Copy in defaults file for updated ulimits
    shutil.copyfile(
        'templates/rabbitmq-server',
        '/etc/default/rabbitmq-server')
    # Install packages to ensure any changes to source
    # result in an upgrade if applicable.
    status_set('maintenance', 'Installing/upgrading RabbitMQ packages')
    apt_install(rabbit.PACKAGES, fatal=True)

    open_port(5672)

    chown(RABBIT_DIR, rabbit.RABBIT_USER, rabbit.RABBIT_USER)
    chmod(RABBIT_DIR, 0o775)

    configure_nodename()

    if config('management_plugin') is True:
        rabbit.enable_plugin(MAN_PLUGIN)
        open_port(55672)
    else:
        rabbit.disable_plugin(MAN_PLUGIN)
        close_port(55672)

    rabbit.set_all_mirroring_queues(config('mirroring-queues'))
    rabbit.ConfigRenderer(
        rabbit.CONFIG_FILES).write_all()

    if is_relation_made("ha"):
        ha_is_active_active = config("ha-vip-only")

        if ha_is_active_active:
            update_nrpe_checks()
        else:
            if is_elected_leader('res_rabbitmq_vip'):
                update_nrpe_checks()
            else:
                log("hacluster relation is present but this node is not active"
                    " skipping update nrpe checks")
    else:
        update_nrpe_checks()

    # NOTE(jamespage)
    # trigger amqp_changed to pickup and changes to network
    # configuration via the access-network config option.
    for rid in relation_ids('amqp'):
        for unit in related_units(rid):
            amqp_changed(relation_id=rid, remote_unit=unit)
Ejemplo n.º 49
0
def storage_joined():
    if not is_elected_leader(SWIFT_HA_RES):
        log("New storage relation joined - stopping proxy until ring builder "
            "synced", level=INFO)
        service_stop('swift-proxy')

        # This unit is not currently responsible for distributing rings but
        # may become so at some time in the future so we do this to avoid the
        # possibility of storage nodes getting out-of-date rings by deprecating
        # any existing ones from the www dir.
        mark_www_rings_deleted()
Ejemplo n.º 50
0
def config_changed_postupgrade():
    save_script_rc()
    release = os_release('keystone')
    if run_in_apache(release=release):
        # Need to ensure mod_wsgi is installed and apache2 is reloaded
        # immediatly as charm querys its local keystone before restart
        # decorator can fire
        apt_install(filter_installed_packages(determine_packages()))
        # when deployed from source, init scripts aren't installed
        service_pause('keystone')

        disable_unused_apache_sites()
        if WSGI_KEYSTONE_API_CONF in CONFIGS.templates:
            CONFIGS.write(WSGI_KEYSTONE_API_CONF)
        if not is_unit_paused_set():
            restart_pid_check('apache2')
            stop_manager_instance()

    if enable_memcache(release=release):
        # If charm or OpenStack have been upgraded then the list of required
        # packages may have changed so ensure they are installed.
        apt_install(filter_installed_packages(determine_packages()))

    if is_leader() and fernet_enabled():
        key_setup()
        key_leader_set()

    configure_https()
    open_port(config('service-port'))

    update_nrpe_config()

    CONFIGS.write_all()

    if snap_install_requested() and not is_unit_paused_set():
        service_restart('snap.keystone.*')
        stop_manager_instance()

    if (is_db_initialised() and is_elected_leader(CLUSTER_RES) and not
            is_unit_paused_set()):
        ensure_initial_admin(config)
        if CompareOpenStackReleases(
                os_release('keystone')) >= 'liberty':
            CONFIGS.write(POLICY_JSON)

    update_all_identity_relation_units()
    update_all_domain_backends()
    update_all_fid_backends()

    for r_id in relation_ids('ha'):
        ha_joined(relation_id=r_id)

    notify_middleware_with_release_version()
def ha_relation_changed():
    clustered = relation_get('clustered')
    if (clustered and is_elected_leader(DC_RESOURCE_NAME)):
        log('Cluster configured, notifying other services')
        # Tell all related services to start using the VIP
        update_shared_db_rels()
        for r_id in relation_ids('db'):
            for unit in related_units(r_id):
                db_changed(r_id, unit, admin=False)
        for r_id in relation_ids('db-admin'):
            for unit in related_units(r_id):
                db_changed(r_id, unit, admin=True)
Ejemplo n.º 52
0
def ha_changed():
    CONFIGS.write_all()

    clustered = relation_get('clustered')
    if clustered:
        log('Cluster configured, notifying other services and updating '
            'keystone endpoint configuration')
        if (is_db_initialised() and is_elected_leader(CLUSTER_RES) and not
                is_unit_paused_set()):
            ensure_initial_admin(config)
            update_all_identity_relation_units()
            update_all_domain_backends()
            update_all_fid_backends()
Ejemplo n.º 53
0
def postgresql_nova_db_changed():
    if 'pgsql-nova-db' not in CONFIGS.complete_contexts():
        log('pgsql-nova-db relation incomplete. Peer not ready?')
        return
    CONFIGS.write_all()

    if is_elected_leader(CLUSTER_RES):
        status_set('maintenance', 'Running nova db migration')
        migrate_nova_database()
        log('Triggering remote cloud-compute restarts.')
        [compute_joined(rid=rid, remote_restart=True)
         for rid in relation_ids('cloud-compute')]
        conditional_neutron_migration()
Ejemplo n.º 54
0
def db_changed():
    if 'shared-db' not in CONFIGS.complete_contexts():
        log('shared-db relation incomplete. Peer not ready?')
        return
    CONFIGS.write(HEAT_CONF)

    if is_elected_leader(CLUSTER_RES):
        allowed_units = relation_get('heat_allowed_units')
        if allowed_units and local_unit() in allowed_units.split():
            log('Cluster leader, performing db sync')
            migrate_database()
        else:
            log('allowed_units either not presented, or local unit '
                'not in acl list: {}'.format(repr(allowed_units)))
Ejemplo n.º 55
0
def config_changed():
    if is_elected_leader(SWIFT_HA_RES):
        log("Leader established, generating ring builders", level=INFO)
        # initialize new storage rings.
        for path in SWIFT_RINGS.itervalues():
            if not os.path.exists(path):
                initialize_ring(path,
                                config('partition-power'),
                                config('replicas'),
                                config('min-hours'))

    if config('prefer-ipv6'):
        status_set('maintenance', 'Configuring ipv6')
        setup_ipv6()

    configure_https()
    open_port(config('bind-port'))
    update_nrpe_config()

    # Determine whether or not we should do an upgrade.
    if not config('action-managed-upgrade') and \
            openstack.openstack_upgrade_available('python-swift'):
        do_openstack_upgrade(CONFIGS)
        status_set('maintenance', 'Running openstack upgrade')

    status_set('maintenance', 'Updating and balancing rings')
    update_rings(min_part_hours=config('min-hours'))

    if not config('disable-ring-balance') and is_elected_leader(SWIFT_HA_RES):
        # Try ring balance. If rings are balanced, no sync will occur.
        balance_rings()

    for r_id in relation_ids('identity-service'):
        keystone_joined(relid=r_id)

    for r_id in relation_ids('object-store'):
        object_store_joined(relation_id=r_id)
Ejemplo n.º 56
0
def _do_openstack_upgrade(new_src):
    enable_policy_rcd()
    cur_os_rel = os_release('nova-common')
    new_os_rel = get_os_codename_install_source(new_src)
    log('Performing OpenStack upgrade to %s.' % (new_os_rel))

    configure_installation_source(new_src)
    dpkg_opts = [
        '--option', 'Dpkg::Options::=--force-confnew',
        '--option', 'Dpkg::Options::=--force-confdef',
    ]

    # NOTE(jamespage) pre-stamp neutron database before upgrade from grizzly
    if cur_os_rel == 'grizzly':
        neutron_db_manage(['stamp', 'grizzly'])

    apt_update(fatal=True)
    apt_upgrade(options=dpkg_opts, fatal=True, dist=True)
    apt_install(determine_packages(), fatal=True)

    if cur_os_rel == 'grizzly':
        # NOTE(jamespage) when upgrading from grizzly->havana, config
        # files need to be generated prior to performing the db upgrade
        reset_os_release()
        configs = register_configs(release=new_os_rel)
        configs.write_all()
        neutron_db_manage(['upgrade', 'head'])
    else:
        if new_os_rel < 'kilo':
            neutron_db_manage(['stamp', cur_os_rel])
            migrate_neutron_database()
        # NOTE(jamespage) upgrade with existing config files as the
        # havana->icehouse migration enables new service_plugins which
        # create issues with db upgrades
        reset_os_release()
        configs = register_configs(release=new_os_rel)
        configs.write_all()

    if new_os_rel == 'icehouse':
        # NOTE(jamespage) default plugin switch to ml2@icehouse
        ml2_migration()

    if is_elected_leader(CLUSTER_RES):
        migrate_nova_database()
    [service_start(s) for s in services()]

    disable_policy_rcd()

    return configs
Ejemplo n.º 57
0
def identity_changed(relation_id=None, remote_unit=None):
    CONFIGS.write_all()

    notifications = {}
    if is_elected_leader(CLUSTER_RES):
        if not is_db_ready():
            log("identity-service-relation-changed hook fired before db "
                "ready - deferring until db ready", level=WARNING)
            return

        if not is_db_initialised():
            log("Database not yet initialised - deferring identity-relation "
                "updates", level=INFO)
            return

        if expect_ha() and not is_clustered():
            log("Expected to be HA but no hacluster relation yet", level=INFO)
            return

        add_service_to_keystone(relation_id, remote_unit)
        if is_service_present('neutron', 'network'):
            delete_service_entry('quantum', 'network')
        settings = relation_get(rid=relation_id, unit=remote_unit)
        service = settings.get('service', None)
        if service:
            # If service is known and endpoint has changed, notify service if
            # it is related with notifications interface.
            csum = hashlib.sha256()
            # We base the decision to notify on whether these parameters have
            # changed (if csum is unchanged from previous notify, relation will
            # not fire).
            csum.update(settings.get('public_url', None))
            csum.update(settings.get('admin_url', None))
            csum.update(settings.get('internal_url', None))
            notifications['%s-endpoint-changed' % (service)] = csum.hexdigest()
    else:
        # Each unit needs to set the db information otherwise if the unit
        # with the info dies the settings die with it Bug# 1355848
        for rel_id in relation_ids('identity-service'):
            peerdb_settings = peer_retrieve_by_prefix(rel_id)
            # Ensure the null'd settings are unset in the relation.
            peerdb_settings = filter_null(peerdb_settings)
            if 'service_password' in peerdb_settings:
                relation_set(relation_id=rel_id, **peerdb_settings)

        log('Deferring identity_changed() to service leader.')

    if notifications:
        send_notifications(notifications)
Ejemplo n.º 58
0
def cluster_changed():
    key = SwiftProxyClusterRPC.KEY_NOTIFY_LEADER_CHANGED
    leader_changed = relation_get(attribute=key)
    if leader_changed:
        log("Leader changed notification received from peer unit. Since this "
            "most likely occurred during a ring sync proxies will be "
            "disabled until the leader is restored and a fresh sync request "
            "is set out", level=WARNING)
        service_stop("swift-proxy")
        return

    if is_elected_leader(SWIFT_HA_RES):
        cluster_leader_actions()
    else:
        cluster_non_leader_actions()
Ejemplo n.º 59
0
def pgsql_db_changed():
    if 'pgsql-db' not in CONFIGS.complete_contexts():
        log('pgsql-db relation incomplete. Peer not ready?')
    else:
        CONFIGS.write(KEYSTONE_CONF)
        if is_elected_leader(CLUSTER_RES):
            if not is_db_ready(use_current_context=True):
                log('Allowed_units list provided and this unit not present',
                    level=INFO)
                return

            migrate_database()
            # Ensure any existing service entries are updated in the
            # new database backend. Also avoid duplicate db ready check.
            update_all_identity_relation_units(check_db_ready=False)