def provision_control():
    host_name = gethostname()
    host_ip = gethostbyname(unit_get("private-address"))
    a_ip, a_port = [ (gethostbyname(relation_get("private-address", unit, rid)),
                      port)
                     for rid in relation_ids("contrail-api")
                     for unit, port in
                     ((unit, relation_get("port", unit, rid)) for unit in related_units(rid))
                     if port ][0]
    user, password, tenant = [ (relation_get("service_username", unit, rid),
                                relation_get("service_password", unit, rid),
                                relation_get("service_tenant_name", unit, rid))
                               for rid in relation_ids("identity-admin")
                               for unit in related_units(rid) ][0]
    log("Provisioning control {}".format(host_ip))
    check_call(["contrail-provision-control",
                "--host_name", host_name,
                "--host_ip", host_ip,
                "--router_asn", "64512",
                "--api_server_ip", a_ip,
                "--api_server_port", str(a_port),
                "--oper", "add",
                "--admin_user", user,
                "--admin_password", password,
                "--admin_tenant_name", tenant])
def update_all_identity_relation_units(check_db_ready=True):
    CONFIGS.write_all()
    if is_unit_paused_set():
        return
    if check_db_ready and not is_db_ready():
        log('Allowed_units list provided and this unit not present',
            level=INFO)
        return

    if not is_db_initialised():
        log("Database not yet initialised - deferring identity-relation "
            "updates", level=INFO)
        return

    if is_elected_leader(CLUSTER_RES):
        ensure_initial_admin(config)

    log('Firing identity_changed hook for all related services.')
    for rid in relation_ids('identity-service'):
        for unit in related_units(rid):
            identity_changed(relation_id=rid, remote_unit=unit)
    log('Firing admin_relation_changed hook for all related services.')
    for rid in relation_ids('identity-admin'):
        admin_relation_changed(rid)
    log('Firing identity_credentials_changed hook for all related services.')
    for rid in relation_ids('identity-credentials'):
        for unit in related_units(rid):
            identity_credentials_changed(relation_id=rid, remote_unit=unit)
Beispiel #3
0
    def _coordinator_context(self):
        """Attempt to create a usable tooz coordinator URL from zk or memcache

        This'll see if we have zookeeper or memcached relations and use that
        found as the coordinator. Note memcahe is only for testing and
        zookeeper will be preferred if both are found.
        """

        # NOTE: Neither the zookeeper or memcache charms do any kind of
        # clustering of peers, so we just look for one that tells us its
        # port and point at that.
        zk_relation_ids = relation_ids('zookeeper')
        for rid in zk_relation_ids:
            for unit in related_units(rid):
                rel_data = relation_get(unit=unit, rid=rid)
                zk_port = rel_data.get('port')
                zk_addr = rel_data.get('private-address')
                if zk_port:
                    url = 'kazoo://%s:%s?timeout=5' % (zk_addr, zk_port)
                    log('Using zookeeper @ %s for astara coordination' % url)
                    return {'coordination_url': url}

        memcached_relation_ids = relation_ids('cache')
        for rid in memcached_relation_ids:
            for unit in related_units(rid):
                rel_data = relation_get(unit=unit, rid=rid)
                mc_port = rel_data.get('tcp-port')
                mc_addr = rel_data.get('private-address')
                if mc_port:
                    url = 'mecached://%s:%s' % (mc_port, mc_addr)
                    log('Using memcached @ %s for astara coordination' % url)
                    return {'coordination_url': url}

        log('no astara coordination relation data found')
        return {}
def update_all_identity_relation_units(check_db_ready=True):
    if is_unit_paused_set():
        return
    if check_db_ready and not is_db_ready():
        log('Allowed_units list provided and this unit not present',
            level=INFO)
        return

    if not is_db_initialised():
        log("Database not yet initialised - deferring identity-relation "
            "updates", level=INFO)
        return
    if not is_expected_scale():
        log("Keystone charm and it's dependencies not yet at expected scale "
            "- deferring identity-relation updates", level=INFO)
        return

    log('Firing identity_changed hook for all related services.')
    for rid in relation_ids('identity-service'):
        for unit in related_units(rid):
            identity_changed(relation_id=rid, remote_unit=unit)
    log('Firing admin_relation_changed hook for all related services.')
    for rid in relation_ids('identity-admin'):
        admin_relation_changed(rid)
    log('Firing identity_credentials_changed hook for all related services.')
    for rid in relation_ids('identity-credentials'):
        for unit in related_units(rid):
            identity_credentials_changed(relation_id=rid, remote_unit=unit)
def provision_vrouter():
    host_name = gethostname()
    host_ip = netifaces.ifaddresses("vhost0")[netifaces.AF_INET][0]["addr"]
    a_port = None
    a_ip = config.get("contrail-api-ip")
    if a_ip:
        a_port = config.get("contrail-api-port")
        if a_port is None:
            a_port = api_port()
    else:
        a_ip, a_port = [ (gethostbyname(relation_get("private-address", unit, rid)),
                          port)
                         for rid in relation_ids("contrail-api")
                         for unit, port in
                         ((unit, relation_get("port", unit, rid)) for unit in related_units(rid))
                         if port ][0]
    user, password, tenant = [ (relation_get("service_username", unit, rid),
                                relation_get("service_password", unit, rid),
                                relation_get("service_tenant_name", unit, rid))
                               for rid in relation_ids("identity-admin")
                               for unit in related_units(rid) ][0]
    log("Provisioning vrouter {}".format(host_ip))
    check_call(["contrail-provision-vrouter",
                "--host_name", host_name,
                "--host_ip", host_ip,
                "--api_server_ip", a_ip,
                "--api_server_port", str(a_port),
                "--oper", "add",
                "--admin_user", user,
                "--admin_password", password,
                "--admin_tenant_name", tenant])
def provision_local_metadata():
    a_port = None
    a_ip = config.get("contrail-api-ip")
    if a_ip:
        a_port = config.get("contrail-api-port")
        if a_port is None:
            a_port = api_port()
    else:
        a_ip, a_port = [ (gethostbyname(relation_get("private-address", unit, rid)),
                          port)
                         for rid in relation_ids("contrail-api")
                         for unit, port in
                         ((unit, relation_get("port", unit, rid)) for unit in related_units(rid))
                         if port ][0]
    user, password = [ (relation_get("service_username", unit, rid),
                        relation_get("service_password", unit, rid))
                       for rid in relation_ids("identity-admin")
                       for unit in related_units(rid) ][0]
    log("Provisioning local metadata service 127.0.0.1:8775")
    check_call(["contrail-provision-linklocal",
                "--api_server_ip", a_ip,
                "--api_server_port", str(a_port),
                "--linklocal_service_name", "metadata",
                "--linklocal_service_ip", "169.254.169.254",
                "--linklocal_service_port", "80",
                "--ipfabric_service_ip", "127.0.0.1",
                "--ipfabric_service_port", "8775",
                "--oper", "add",
                "--admin_user", user,
                "--admin_password", password])
def get_cluster_id():
    """ Return cluster id (lp1776171)

    Return cluster ID for MySQL asynchronous replication
    :returns: int cluster_id
    """
    if not config('cluster-id'):
        msg = ("Master / Slave relation requires 'cluster-id' option")
        status_set("blocked", msg)
        raise ClusterIDRequired(msg)
    cluster_id = config('cluster-id')
    for rid in relation_ids('master'):
        for unit in related_units(rid):
            if relation_get(attribute='cluster_id',
                            rid=rid,
                            unit=unit) == cluster_id:
                msg = ("'cluster-id' option must be unique within a cluster")
                status_set('blocked', msg)
                raise ClusterIDIdentical(msg)
    for rid in relation_ids('slave'):
        for unit in related_units(rid):
            if relation_get(attribute='cluster_id',
                            rid=rid,
                            unit=unit) == cluster_id:
                msg = ("'cluster-id' option must be unique within a cluster")
                status_set('blocked', msg)
                raise ClusterIDIdentical(msg)
    return cluster_id
def upgrade_charm():
    apt_install(filter_installed_packages(determine_packages()),
                fatal=True)
    for r_id in relation_ids('amqp'):
        amqp_joined(relation_id=r_id)
    for r_id in relation_ids('identity-service'):
        identity_joined(rid=r_id)
    for r_id in relation_ids('cloud-compute'):
        for unit in related_units(r_id):
            compute_changed(r_id, unit)
    for r_id in relation_ids('shared-db'):
        db_joined(relation_id=r_id)

    rels = ['shared-db', 'pgsql-nova-db']
    for rname in rels:
        for rid in relation_ids(rname):
            for unit in related_units(rid):
                if rname == 'pgsql-nova-db':
                    leader_init_db_if_ready(skip_acl_check=True,
                                            skip_cells_restarts=True,
                                            db_rid=rid, unit=unit)
                else:
                    leader_init_db_if_ready(db_rid=rid, unit=unit)

    update_nrpe_config()
    update_nova_consoleauth_config()
def ha_relation_changed():
    clustered = relation_get('clustered')
    if (clustered and is_elected_leader(DC_RESOURCE_NAME)):
        log('Cluster configured, notifying other services')
        # Tell all related services to start using the VIP
        update_shared_db_rels()
        for r_id in relation_ids('db'):
            for unit in related_units(r_id):
                db_changed(r_id, unit, admin=False)
        for r_id in relation_ids('db-admin'):
            for unit in related_units(r_id):
                db_changed(r_id, unit, admin=True)
Beispiel #10
0
 def rndc_master_ips(self):
     rndc_master_ips = []
     rndc_master_ip = ch_ip.get_relation_ip('dns-backend')
     rndc_master_ips.append(rndc_master_ip)
     cluster_relid = hookenv.relation_ids('cluster')[0]
     if hookenv.related_units(relid=cluster_relid):
         for unit in hookenv.related_units(relid=cluster_relid):
             rndc_master_ip = hookenv.relation_get('rndc-address',
                                                   rid=cluster_relid,
                                                   unit=unit)
             if rndc_master_ip is not None:
                 rndc_master_ips.append(rndc_master_ip)
     return rndc_master_ips
def cluster_ready():
    """Determine if each node in the cluster is ready and the cluster is
    complete with the expected number of peers.

    Once cluster_ready returns True it is safe to execute client relation
    hooks. Having min-cluster-size set will guarantee cluster_ready will not
    return True until the expected number of peers are clustered and ready.

    If min-cluster-size is not set it must assume the cluster is ready in order
    to allow for single unit deployments.

    @returns boolean
    """
    min_size = config('min-cluster-size')
    units = 1
    for rid in relation_ids('cluster'):
        units += len(related_units(rid))
    if not min_size:
        min_size = units

    if not is_sufficient_peers():
        return False
    elif min_size > 1:
        if not clustered():
            return False
        clustered_units = 1
        for rid in relation_ids('cluster'):
            for remote_unit in related_units(rid):
                if not relation_get(attribute='clustered',
                                    rid=rid,
                                    unit=remote_unit):
                    log("{} is not yet clustered".format(remote_unit),
                        DEBUG)
                    return False
                else:
                    clustered_units += 1
        if clustered_units < min_size:
            log("Fewer than minimum cluster size:{} rabbit units reporting "
                "clustered".format(min_size),
                DEBUG)
            return False
        else:
            log("All {} rabbit units reporting clustered"
                "".format(min_size),
                DEBUG)
            return True

    log("Must assume this is a single unit returning 'cluster' ready", DEBUG)
    return True
    def __call__(self):

        middlewares = []

        for rid in relation_ids('keystone-middleware'):
            if related_units(rid):
                for unit in related_units(rid):
                    middleware_name = relation_get('middleware_name',
                                                   rid=rid,
                                                   unit=unit)
                    if middleware_name:
                        middlewares.append(middleware_name)
        return {
            'middlewares': ",".join(middlewares)
        }
def cluster_wait():
    ''' Wait for operations based on modulo distribution

    Use the distributed_wait function to determine how long to wait before
    running an operation like restart or cluster join. By setting modulo to
    the exact number of nodes in the cluster we get serial operations.

    Check for explicit configuration parameters for modulo distribution.
    The config setting modulo-nodes has first priority. If modulo-nodes is not
    set, check min-cluster-size. Finally, if neither value is set, determine
    how many peers there are from the cluster relation.

    @side_effect: distributed_wait is called which calls time.sleep()
    @return: None
    '''
    wait = config('known-wait')
    if config('modulo-nodes') is not None:
        # modulo-nodes has first priority
        num_nodes = config('modulo-nodes')
    elif config('min-cluster-size'):
        # min-cluster-size is consulted next
        num_nodes = config('min-cluster-size')
    else:
        # If nothing explicit is configured, determine cluster size based on
        # peer relations
        num_nodes = 1
        for rid in relation_ids('cluster'):
            num_nodes += len(related_units(rid))
    distributed_wait(modulo=num_nodes, wait=wait)
def is_sufficient_peers():
    """Sufficient number of expected peers to build a complete cluster

    If min-cluster-size has been provided, check that we have sufficient
    number of peers who have presented a hostname for a complete cluster.

    If not defined assume a single unit.

    @returns boolean
    """
    min_size = config('min-cluster-size')
    if min_size:
        log("Checking for minimum of {} peer units".format(min_size),
            level=DEBUG)

        # Include this unit
        units = 1
        for rid in relation_ids('cluster'):
            for unit in related_units(rid):
                if relation_get(attribute='hostname',
                                rid=rid, unit=unit):
                    units += 1

        if units < min_size:
            log("Insufficient number of peer units to form cluster "
                "(expected=%s, got=%s)" % (min_size, units), level=INFO)
            return False
        else:
            log("Sufficient number of peer units to form cluster {}"
                "".format(min_size, level=DEBUG))
            return True
    else:
        log("min-cluster-size is not defined, race conditions may occur if "
            "this is not a single unit deployment.", level=WARNING)
        return True
def is_bootstrapped():
    """Determine if each node in the cluster has been bootstrapped and the
    cluster is complete with the expected number of peers.

    Check that each node in the cluster, including this one, has set
    bootstrap-uuid on the cluster relation.

    Having min-cluster-size set will guarantee is_bootstrapped will not
    return True until the expected number of peers are bootstrapped. If
    min-cluster-size is not set, it will check peer relations to estimate the
    expected cluster size. If min-cluster-size is not set and there are no
    peers it must assume the cluster is bootstrapped in order to allow for
    single unit deployments.

    @returns boolean
    """
    min_size = get_min_cluster_size()
    if not is_sufficient_peers():
        return False
    elif min_size > 1:
        uuids = []
        for relation_id in relation_ids('cluster'):
            units = related_units(relation_id) or []
            units.append(local_unit())
            for unit in units:
                if not relation_get(attribute='bootstrap-uuid',
                                    rid=relation_id,
                                    unit=unit):
                    log("{} is not yet clustered".format(unit),
                        DEBUG)
                    return False
                else:
                    bootstrap_uuid = relation_get(attribute='bootstrap-uuid',
                                                  rid=relation_id,
                                                  unit=unit)
                    if bootstrap_uuid:
                        uuids.append(bootstrap_uuid)

        if len(uuids) < min_size:
            log("Fewer than minimum cluster size: "
                "{} percona units reporting clustered".format(min_size),
                DEBUG)
            return False
        elif len(set(uuids)) > 1:
            raise Exception("Found inconsistent bootstrap uuids: "
                            "{}".format((uuids)))
        else:
            log("All {} percona units reporting clustered".format(min_size),
                DEBUG)
    elif not seeded():
        # Single unit deployment but not yet bootstrapped
        return False

    # Set INITIAL_CLUSTERED_KEY as the cluster has fully bootstrapped
    kvstore = kv()
    if not kvstore.get(INITIAL_CLUSTERED_KEY, False):
        kvstore.set(key=INITIAL_CLUSTERED_KEY, value=True)
        kvstore.flush()

    return True
 def restart_trigger(self):
     rt = None
     for rid in relation_ids('cloud-compute'):
         for unit in related_units(rid):
             rt = relation_get('restart_trigger', rid=rid, unit=unit)
             if rt:
                 return rt
def get_relation_settings(keys):
    """Fetch required relation settings.

    If any setting is unset ('' or None) we return None.

    :param keys: Setting keys to look for.
    """
    settings = {}
    try:
        for rid in relation_ids('gerrit-configurator'):
            for unit in related_units(rid):
                for key in keys:
                    settings[key] = relation_get(key, rid=rid, unit=unit)

    except Exception as exc:
        log('Failed to get gerrit relation data (%s).' % (exc), level=WARNING)
        return

    missing = [k for k, v in settings.iteritems() if not v]
    if missing:
        log("Missing value for '%s' in gerrit relation." %
            (','.join(missing)), level=WARNING)
        return

    return settings
Beispiel #18
0
def update_zuul():
    zuul_units = []

    for rid in relation_ids('zuul-configurator'):
        [zuul_units.append(u) for u in related_units(rid)]

    if not zuul_units:
        log('*** No related zuul units, skipping config.')
        return

    log("*** Updating zuul.")
    layout_path = '/etc/zuul/layout.yaml'

    if not os.path.isdir(ZUUL_CONFIG_DIR):
        log('Could not find zuul config directory at expected location, '
            'skipping zuul update (%s)' % ZUUL_CONFIG_DIR)
        return

    log('Installing layout from %s to %s.' % (ZUUL_CONFIG_DIR, layout_path))
    common.sync_dir(ZUUL_CONFIG_DIR, layout_path)

    stop_zuul()
    start_zuul()

    return True
Beispiel #19
0
    def __call__(self):
        ctxt = {}
        for rid in relation_ids(self.interface):
            for unit in related_units(rid):
                sub_config = relation_get('subordinate_configuration',
                                          rid=rid, unit=unit)
                if sub_config and sub_config != '':
                    try:
                        sub_config = json.loads(sub_config)
                    except:
                        log('Could not parse JSON from subordinate_config '
                            'setting from %s' % rid, level=ERROR)
                        continue

                    if self.service not in sub_config:
                        log('Found subordinate_config on %s but it contained'
                            'nothing for %s service' % (rid, self.service))
                        continue

                    sub_config = sub_config[self.service]
                    if self.config_file not in sub_config:
                        log('Found subordinate_config on %s but it contained'
                            'nothing for %s' % (rid, self.config_file))
                        continue

                    sub_config = sub_config[self.config_file]
                    for k, v in sub_config.iteritems():
                        ctxt[k] = v

        if not ctxt:
            ctxt['sections'] = {}

        return ctxt
Beispiel #20
0
    def __call__(self):
        '''
        Builds half a context for the haproxy template, which describes
        all peers to be included in the cluster.  Each charm needs to include
        its own context generator that describes the port mapping.
        '''
        if not relation_ids('cluster'):
            return {}

        cluster_hosts = {}
        l_unit = local_unit().replace('/', '-')
        cluster_hosts[l_unit] = unit_get('private-address')

        for rid in relation_ids('cluster'):
            for unit in related_units(rid):
                _unit = unit.replace('/', '-')
                addr = relation_get('private-address', rid=rid, unit=unit)
                cluster_hosts[_unit] = addr

        ctxt = {
            'units': cluster_hosts,
        }
        if len(cluster_hosts.keys()) > 1:
            # Enable haproxy when we have enough peers.
            log('Ensuring haproxy enabled in /etc/default/haproxy.')
            with open('/etc/default/haproxy', 'w') as out:
                out.write('ENABLED=1\n')
            return ctxt
        log('HAProxy context is incomplete, this unit has no peers.')
        return {}
Beispiel #21
0
    def __call__(self):
        '''This generates context for /etc/ceph/ceph.conf templates'''
        if not relation_ids('ceph'):
            return {}
        log('Generating template context for ceph')
        mon_hosts = []
        auth = None
        key = None
        for rid in relation_ids('ceph'):
            for unit in related_units(rid):
                mon_hosts.append(relation_get('private-address', rid=rid,
                                              unit=unit))
                auth = relation_get('auth', rid=rid, unit=unit)
                key = relation_get('key', rid=rid, unit=unit)

        ctxt = {
            'mon_hosts': ' '.join(mon_hosts),
            'auth': auth,
            'key': key,
        }

        if not os.path.isdir('/etc/ceph'):
            os.mkdir('/etc/ceph')

        if not context_complete(ctxt):
            return {}

        ensure_packages(['ceph-common'])

        return ctxt
Beispiel #22
0
def is_request_complete_for_rid(request, rid):
    """Check if a given request has been completed on the given relation

    @param request: A CephBrokerRq object
    @param rid: Relation ID
    """
    broker_key = get_broker_rsp_key()
    for unit in related_units(rid):
        rdata = relation_get(rid=rid, unit=unit)
        if rdata.get(broker_key):
            rsp = CephBrokerRsp(rdata.get(broker_key))
            if rsp.request_id == request.request_id:
                if not rsp.exit_code:
                    return True
        else:
            # The remote unit sent no reply targeted at this unit so either the
            # remote ceph cluster does not support unit targeted replies or it
            # has not processed our request yet.
            if rdata.get('broker_rsp'):
                request_data = json.loads(rdata['broker_rsp'])
                if request_data.get('request-id'):
                    log('Ignoring legacy broker_rsp without unit key as remote '
                        'service supports unit specific replies', level=DEBUG)
                else:
                    log('Using legacy broker_rsp as remote service does not '
                        'supports unit specific replies', level=DEBUG)
                    rsp = CephBrokerRsp(rdata['broker_rsp'])
                    if not rsp.exit_code:
                        return True

    return False
Beispiel #23
0
def create_ogr_zone(args):
    aggr_name = action_get('aggregate-name')
    avail_zone = action_get('avail-zone')
    ogr_compute = action_get('ogr-compute')

    cmd = "su - ubuntu -c 'source nova.rc && nova aggregate-create {} {}'"\
          .format(aggr_name, avail_zone)
    commands.getoutput(cmd)
    cmd = "su - ubuntu -c 'source nova.rc && nova aggregate-add-host {} {}'"\
          .format(aggr_name, ogr_compute)
    commands.getoutput(cmd)
    if config("openstack-version") == "liberty" or \
       config("openstack-version") == "mitaka":
        cmd = "su - ubuntu -c 'source nova.rc && nova aggregate-details {}'"\
              .format(aggr_name)
    else:
        cmd = "su - ubuntu -c 'source nova.rc && nova aggregate-show {}'"\
              .format(aggr_name)
    res = commands.getoutput(cmd)
    action_set({'result-map.message': res})
    relation_info = {
        'aggr-name': aggr_name
    }
    if config("openstack-version") == "pike" or \
       config("openstack-version") == "ocata":
        for rid in relation_ids('neutron-api-cplane'):
            for unit in related_units(rid):
                relation_set(relation_id=rid, relation_settings=relation_info)
 def __call__(self):
     for rid in relation_ids('quantum-network-service'):
         for unit in related_units(rid):
             ctxt = {
                 'keystone_host': relation_get('keystone_host',
                                               rid=rid, unit=unit),
                 'service_port': relation_get('service_port', rid=rid,
                                              unit=unit),
                 'auth_port': relation_get('auth_port', rid=rid, unit=unit),
                 'service_tenant': relation_get('service_tenant',
                                                rid=rid, unit=unit),
                 'service_username': relation_get('service_username',
                                                  rid=rid, unit=unit),
                 'service_password': relation_get('service_password',
                                                  rid=rid, unit=unit),
                 'quantum_host': relation_get('quantum_host',
                                              rid=rid, unit=unit),
                 'quantum_port': relation_get('quantum_port',
                                              rid=rid, unit=unit),
                 'quantum_url': relation_get('quantum_url',
                                             rid=rid, unit=unit),
                 'region': relation_get('region',
                                        rid=rid, unit=unit),
                 # XXX: Hard-coded http.
                 'service_protocol': 'http',
                 'auth_protocol': 'http',
             }
             if context_complete(ctxt):
                 return ctxt
     return {}
def config_changed():
    # neutron-server runs if < juno. Neutron-server creates mysql tables
    # which will subsequently cause db migratoins to fail if >= juno.
    # Disable neutron-server if >= juno
    if os_release('nova-common') >= 'juno':
        with open('/etc/init/neutron-server.override', 'wb') as out:
            out.write('manual\n')
    if config('prefer-ipv6'):
        status_set('maintenance', 'configuring ipv6')
        setup_ipv6()
        sync_db_with_multi_ipv6_addresses(config('database'),
                                          config('database-user'),
                                          relation_prefix='nova')

    global CONFIGS
    if git_install_requested():
        status_set('maintenance', 'Running Git install')
        if config_value_changed('openstack-origin-git'):
            git_install(config('openstack-origin-git'))
    elif not config('action-managed-upgrade'):
        if openstack_upgrade_available('nova-common'):
            status_set('maintenance', 'Running openstack upgrade')
            CONFIGS = do_openstack_upgrade(CONFIGS)
            [neutron_api_relation_joined(rid=rid, remote_restart=True)
                for rid in relation_ids('neutron-api')]
            # NOTE(jamespage): Force re-fire of shared-db joined hook
            # to ensure that nova_api database is setup if required.
            [db_joined(relation_id=r_id)
                for r_id in relation_ids('shared-db')]

    save_script_rc()
    configure_https()
    CONFIGS.write_all()
    if console_attributes('protocol'):
        if not git_install_requested():
            status_set('maintenance', 'Configuring guest console access')
            apt_update()
            packages = console_attributes('packages') or []
            filtered = filter_installed_packages(packages)
            if filtered:
                apt_install(filtered, fatal=True)

        [compute_joined(rid=rid)
            for rid in relation_ids('cloud-compute')]

    for r_id in relation_ids('identity-service'):
        identity_joined(rid=r_id)
    for rid in relation_ids('zeromq-configuration'):
        zeromq_configuration_relation_joined(rid)
    [cluster_joined(rid) for rid in relation_ids('cluster')]
    update_nrpe_config()

    # If the region value has changed, notify the cloud-compute relations
    # to ensure the value is propagated to the compute nodes.
    if config_value_changed('region'):
        for rid in relation_ids('cloud-compute'):
            for unit in related_units(rid):
                compute_changed(rid, unit)

    update_nova_consoleauth_config()
Beispiel #26
0
    def __call__(self):
        self.database = self.database or config('database')
        self.user = self.user or config('database-user')
        if None in [self.database, self.user]:
            log('Could not generate shared_db context. '
                'Missing required charm config options. '
                '(database name and user)')
            raise OSContextError
        ctxt = {}

        password_setting = 'password'
        if self.relation_prefix:
            password_setting = self.relation_prefix + '_password'

        for rid in relation_ids('shared-db'):
            for unit in related_units(rid):
                passwd = relation_get(password_setting, rid=rid, unit=unit)
                ctxt = {
                    'database_host': relation_get('db_host', rid=rid,
                                                  unit=unit),
                    'database': self.database,
                    'database_user': self.user,
                    'database_password': passwd,
                }
                if context_complete(ctxt):
                    return ctxt
        return {}
def get_service_name(relid):
    '''Return the service name for the other end of relid.'''
    units = hookenv.related_units(relid)
    if units:
        return units[0].split('/', 1)[0]
    else:
        return None
Beispiel #28
0
    def single_mode_map(self):
        """Return map of local addresses only if this is a single node cluster

           @return dict of local address info e.g.
               {'cluster_hosts':
                   {'this_unit_private_addr': {
                        'backends': {
                            'this_unit-1': 'this_unit_private_addr'},
                        'network': 'this_unit_private_addr/private_netmask'},
                'internal_addresses': ['intaddr']}
        """
        relation_info = {}
        try:
            cluster_relid = hookenv.relation_ids('cluster')[0]
            if not hookenv.related_units(relid=cluster_relid):
                relation_info = {
                    'cluster_hosts': self.local_default_addresses(),
                    'internal_addresses': self.internal_addresses,
                }
                net_split = self.local_network_split_addresses()
                for key in net_split.keys():
                    relation_info['cluster_hosts'][key] = net_split[key]
        except IndexError:
            pass
        return relation_info
def get_ceph_nodes():
    ''' Query named relation 'ceph' to detemine current nodes '''
    hosts = []
    for r_id in relation_ids('ceph'):
        for unit in related_units(r_id):
            hosts.append(relation_get('private-address', unit=unit, rid=r_id))
    return hosts
Beispiel #30
0
    def __call__(self):
        log('Generating template context for identity-service')
        ctxt = {}

        for rid in relation_ids('identity-service'):
            for unit in related_units(rid):
                ctxt = {
                    'service_port': relation_get('service_port', rid=rid,
                                                 unit=unit),
                    'service_host': relation_get('service_host', rid=rid,
                                                 unit=unit),
                    'auth_host': relation_get('auth_host', rid=rid, unit=unit),
                    'auth_port': relation_get('auth_port', rid=rid, unit=unit),
                    'admin_tenant_name': relation_get('service_tenant',
                                                      rid=rid, unit=unit),
                    'admin_user': relation_get('service_username', rid=rid,
                                               unit=unit),
                    'admin_password': relation_get('service_password', rid=rid,
                                                   unit=unit),
                    # XXX: Hard-coded http.
                    'service_protocol': 'http',
                    'auth_protocol': 'http',
                }
                if context_complete(ctxt):
                    return ctxt
        return {}