Пример #1
0
def update_all_identity_relation_units(check_db_ready=True):
    if is_unit_paused_set():
        return
    if check_db_ready and not is_db_ready():
        log('Allowed_units list provided and this unit not present',
            level=INFO)
        return

    if not is_db_initialised():
        log("Database not yet initialised - deferring identity-relation "
            "updates", level=INFO)
        return
    if not is_expected_scale():
        log("Keystone charm and it's dependencies not yet at expected scale "
            "- deferring identity-relation updates", level=INFO)
        return

    log('Firing identity_changed hook for all related services.')
    for rid in relation_ids('identity-service'):
        for unit in related_units(rid):
            identity_changed(relation_id=rid, remote_unit=unit)
    log('Firing admin_relation_changed hook for all related services.')
    for rid in relation_ids('identity-admin'):
        admin_relation_changed(rid)
    log('Firing identity_credentials_changed hook for all related services.')
    for rid in relation_ids('identity-credentials'):
        for unit in related_units(rid):
            identity_credentials_changed(relation_id=rid, remote_unit=unit)
Пример #2
0
def update_all_identity_relation_units(check_db_ready=True):
    CONFIGS.write_all()
    if is_unit_paused_set():
        return
    if check_db_ready and not is_db_ready():
        log('Allowed_units list provided and this unit not present',
            level=INFO)
        return

    if not is_db_initialised():
        log("Database not yet initialised - deferring identity-relation "
            "updates", level=INFO)
        return

    if is_elected_leader(CLUSTER_RES):
        ensure_initial_admin(config)

    log('Firing identity_changed hook for all related services.')
    for rid in relation_ids('identity-service'):
        for unit in related_units(rid):
            identity_changed(relation_id=rid, remote_unit=unit)
    log('Firing admin_relation_changed hook for all related services.')
    for rid in relation_ids('identity-admin'):
        admin_relation_changed(rid)
    log('Firing identity_credentials_changed hook for all related services.')
    for rid in relation_ids('identity-credentials'):
        for unit in related_units(rid):
            identity_credentials_changed(relation_id=rid, remote_unit=unit)
def get_cluster_id():
    """ Return cluster id (lp1776171)

    Return cluster ID for MySQL asynchronous replication
    :returns: int cluster_id
    """
    if not config('cluster-id'):
        msg = ("Master / Slave relation requires 'cluster-id' option")
        status_set("blocked", msg)
        raise ClusterIDRequired(msg)
    cluster_id = config('cluster-id')
    for rid in relation_ids('master'):
        for unit in related_units(rid):
            if relation_get(attribute='cluster_id',
                            rid=rid,
                            unit=unit) == cluster_id:
                msg = ("'cluster-id' option must be unique within a cluster")
                status_set('blocked', msg)
                raise ClusterIDIdentical(msg)
    for rid in relation_ids('slave'):
        for unit in related_units(rid):
            if relation_get(attribute='cluster_id',
                            rid=rid,
                            unit=unit) == cluster_id:
                msg = ("'cluster-id' option must be unique within a cluster")
                status_set('blocked', msg)
                raise ClusterIDIdentical(msg)
    return cluster_id
def provision_control():
    host_name = gethostname()
    host_ip = gethostbyname(unit_get("private-address"))
    a_ip, a_port = [ (gethostbyname(relation_get("private-address", unit, rid)),
                      port)
                     for rid in relation_ids("contrail-api")
                     for unit, port in
                     ((unit, relation_get("port", unit, rid)) for unit in related_units(rid))
                     if port ][0]
    user, password, tenant = [ (relation_get("service_username", unit, rid),
                                relation_get("service_password", unit, rid),
                                relation_get("service_tenant_name", unit, rid))
                               for rid in relation_ids("identity-admin")
                               for unit in related_units(rid) ][0]
    log("Provisioning control {}".format(host_ip))
    check_call(["contrail-provision-control",
                "--host_name", host_name,
                "--host_ip", host_ip,
                "--router_asn", "64512",
                "--api_server_ip", a_ip,
                "--api_server_port", str(a_port),
                "--oper", "add",
                "--admin_user", user,
                "--admin_password", password,
                "--admin_tenant_name", tenant])
def provision_vrouter():
    host_name = gethostname()
    host_ip = netifaces.ifaddresses("vhost0")[netifaces.AF_INET][0]["addr"]
    a_port = None
    a_ip = config.get("contrail-api-ip")
    if a_ip:
        a_port = config.get("contrail-api-port")
        if a_port is None:
            a_port = api_port()
    else:
        a_ip, a_port = [ (gethostbyname(relation_get("private-address", unit, rid)),
                          port)
                         for rid in relation_ids("contrail-api")
                         for unit, port in
                         ((unit, relation_get("port", unit, rid)) for unit in related_units(rid))
                         if port ][0]
    user, password, tenant = [ (relation_get("service_username", unit, rid),
                                relation_get("service_password", unit, rid),
                                relation_get("service_tenant_name", unit, rid))
                               for rid in relation_ids("identity-admin")
                               for unit in related_units(rid) ][0]
    log("Provisioning vrouter {}".format(host_ip))
    check_call(["contrail-provision-vrouter",
                "--host_name", host_name,
                "--host_ip", host_ip,
                "--api_server_ip", a_ip,
                "--api_server_port", str(a_port),
                "--oper", "add",
                "--admin_user", user,
                "--admin_password", password,
                "--admin_tenant_name", tenant])
def provision_local_metadata():
    a_port = None
    a_ip = config.get("contrail-api-ip")
    if a_ip:
        a_port = config.get("contrail-api-port")
        if a_port is None:
            a_port = api_port()
    else:
        a_ip, a_port = [ (gethostbyname(relation_get("private-address", unit, rid)),
                          port)
                         for rid in relation_ids("contrail-api")
                         for unit, port in
                         ((unit, relation_get("port", unit, rid)) for unit in related_units(rid))
                         if port ][0]
    user, password = [ (relation_get("service_username", unit, rid),
                        relation_get("service_password", unit, rid))
                       for rid in relation_ids("identity-admin")
                       for unit in related_units(rid) ][0]
    log("Provisioning local metadata service 127.0.0.1:8775")
    check_call(["contrail-provision-linklocal",
                "--api_server_ip", a_ip,
                "--api_server_port", str(a_port),
                "--linklocal_service_name", "metadata",
                "--linklocal_service_ip", "169.254.169.254",
                "--linklocal_service_port", "80",
                "--ipfabric_service_ip", "127.0.0.1",
                "--ipfabric_service_port", "8775",
                "--oper", "add",
                "--admin_user", user,
                "--admin_password", password])
def upgrade_charm():
    apt_install(filter_installed_packages(determine_packages()),
                fatal=True)
    for r_id in relation_ids('amqp'):
        amqp_joined(relation_id=r_id)
    for r_id in relation_ids('identity-service'):
        identity_joined(rid=r_id)
    for r_id in relation_ids('cloud-compute'):
        for unit in related_units(r_id):
            compute_changed(r_id, unit)
    for r_id in relation_ids('shared-db'):
        db_joined(relation_id=r_id)

    rels = ['shared-db', 'pgsql-nova-db']
    for rname in rels:
        for rid in relation_ids(rname):
            for unit in related_units(rid):
                if rname == 'pgsql-nova-db':
                    leader_init_db_if_ready(skip_acl_check=True,
                                            skip_cells_restarts=True,
                                            db_rid=rid, unit=unit)
                else:
                    leader_init_db_if_ready(db_rid=rid, unit=unit)

    update_nrpe_config()
    update_nova_consoleauth_config()
Пример #8
0
    def _coordinator_context(self):
        """Attempt to create a usable tooz coordinator URL from zk or memcache

        This'll see if we have zookeeper or memcached relations and use that
        found as the coordinator. Note memcahe is only for testing and
        zookeeper will be preferred if both are found.
        """

        # NOTE: Neither the zookeeper or memcache charms do any kind of
        # clustering of peers, so we just look for one that tells us its
        # port and point at that.
        zk_relation_ids = relation_ids('zookeeper')
        for rid in zk_relation_ids:
            for unit in related_units(rid):
                rel_data = relation_get(unit=unit, rid=rid)
                zk_port = rel_data.get('port')
                zk_addr = rel_data.get('private-address')
                if zk_port:
                    url = 'kazoo://%s:%s?timeout=5' % (zk_addr, zk_port)
                    log('Using zookeeper @ %s for astara coordination' % url)
                    return {'coordination_url': url}

        memcached_relation_ids = relation_ids('cache')
        for rid in memcached_relation_ids:
            for unit in related_units(rid):
                rel_data = relation_get(unit=unit, rid=rid)
                mc_port = rel_data.get('tcp-port')
                mc_addr = rel_data.get('private-address')
                if mc_port:
                    url = 'mecached://%s:%s' % (mc_port, mc_addr)
                    log('Using memcached @ %s for astara coordination' % url)
                    return {'coordination_url': url}

        log('no astara coordination relation data found')
        return {}
def ha_relation_changed():
    clustered = relation_get('clustered')
    if (clustered and is_elected_leader(DC_RESOURCE_NAME)):
        log('Cluster configured, notifying other services')
        # Tell all related services to start using the VIP
        update_shared_db_rels()
        for r_id in relation_ids('db'):
            for unit in related_units(r_id):
                db_changed(r_id, unit, admin=False)
        for r_id in relation_ids('db-admin'):
            for unit in related_units(r_id):
                db_changed(r_id, unit, admin=True)
Пример #10
0
 def rndc_master_ips(self):
     rndc_master_ips = []
     rndc_master_ip = ch_ip.get_relation_ip('dns-backend')
     rndc_master_ips.append(rndc_master_ip)
     cluster_relid = hookenv.relation_ids('cluster')[0]
     if hookenv.related_units(relid=cluster_relid):
         for unit in hookenv.related_units(relid=cluster_relid):
             rndc_master_ip = hookenv.relation_get('rndc-address',
                                                   rid=cluster_relid,
                                                   unit=unit)
             if rndc_master_ip is not None:
                 rndc_master_ips.append(rndc_master_ip)
     return rndc_master_ips
def cluster_ready():
    """Determine if each node in the cluster is ready and the cluster is
    complete with the expected number of peers.

    Once cluster_ready returns True it is safe to execute client relation
    hooks. Having min-cluster-size set will guarantee cluster_ready will not
    return True until the expected number of peers are clustered and ready.

    If min-cluster-size is not set it must assume the cluster is ready in order
    to allow for single unit deployments.

    @returns boolean
    """
    min_size = config('min-cluster-size')
    units = 1
    for rid in relation_ids('cluster'):
        units += len(related_units(rid))
    if not min_size:
        min_size = units

    if not is_sufficient_peers():
        return False
    elif min_size > 1:
        if not clustered():
            return False
        clustered_units = 1
        for rid in relation_ids('cluster'):
            for remote_unit in related_units(rid):
                if not relation_get(attribute='clustered',
                                    rid=rid,
                                    unit=remote_unit):
                    log("{} is not yet clustered".format(remote_unit),
                        DEBUG)
                    return False
                else:
                    clustered_units += 1
        if clustered_units < min_size:
            log("Fewer than minimum cluster size:{} rabbit units reporting "
                "clustered".format(min_size),
                DEBUG)
            return False
        else:
            log("All {} rabbit units reporting clustered"
                "".format(min_size),
                DEBUG)
            return True

    log("Must assume this is a single unit returning 'cluster' ready", DEBUG)
    return True
Пример #12
0
    def __call__(self):

        middlewares = []

        for rid in relation_ids('keystone-middleware'):
            if related_units(rid):
                for unit in related_units(rid):
                    middleware_name = relation_get('middleware_name',
                                                   rid=rid,
                                                   unit=unit)
                    if middleware_name:
                        middlewares.append(middleware_name)
        return {
            'middlewares': ",".join(middlewares)
        }
Пример #13
0
def get_ceph_nodes():
    ''' Query named relation 'ceph' to detemine current nodes '''
    hosts = []
    for r_id in relation_ids('ceph'):
        for unit in related_units(r_id):
            hosts.append(relation_get('private-address', unit=unit, rid=r_id))
    return hosts
Пример #14
0
def is_request_complete_for_rid(request, rid):
    """Check if a given request has been completed on the given relation

    @param request: A CephBrokerRq object
    @param rid: Relation ID
    """
    broker_key = get_broker_rsp_key()
    for unit in related_units(rid):
        rdata = relation_get(rid=rid, unit=unit)
        if rdata.get(broker_key):
            rsp = CephBrokerRsp(rdata.get(broker_key))
            if rsp.request_id == request.request_id:
                if not rsp.exit_code:
                    return True
        else:
            # The remote unit sent no reply targeted at this unit so either the
            # remote ceph cluster does not support unit targeted replies or it
            # has not processed our request yet.
            if rdata.get('broker_rsp'):
                request_data = json.loads(rdata['broker_rsp'])
                if request_data.get('request-id'):
                    log('Ignoring legacy broker_rsp without unit key as remote '
                        'service supports unit specific replies', level=DEBUG)
                else:
                    log('Using legacy broker_rsp as remote service does not '
                        'supports unit specific replies', level=DEBUG)
                    rsp = CephBrokerRsp(rdata['broker_rsp'])
                    if not rsp.exit_code:
                        return True

    return False
Пример #15
0
    def __call__(self):
        self.database = self.database or config('database')
        self.user = self.user or config('database-user')
        if None in [self.database, self.user]:
            log('Could not generate shared_db context. '
                'Missing required charm config options. '
                '(database name and user)')
            raise OSContextError
        ctxt = {}

        password_setting = 'password'
        if self.relation_prefix:
            password_setting = self.relation_prefix + '_password'

        for rid in relation_ids('shared-db'):
            for unit in related_units(rid):
                passwd = relation_get(password_setting, rid=rid, unit=unit)
                ctxt = {
                    'database_host': relation_get('db_host', rid=rid,
                                                  unit=unit),
                    'database': self.database,
                    'database_user': self.user,
                    'database_password': passwd,
                }
                if context_complete(ctxt):
                    return ctxt
        return {}
Пример #16
0
    def __call__(self):
        log('Generating template context for identity-service')
        ctxt = {}

        for rid in relation_ids('identity-service'):
            for unit in related_units(rid):
                ctxt = {
                    'service_port': relation_get('service_port', rid=rid,
                                                 unit=unit),
                    'service_host': relation_get('service_host', rid=rid,
                                                 unit=unit),
                    'auth_host': relation_get('auth_host', rid=rid, unit=unit),
                    'auth_port': relation_get('auth_port', rid=rid, unit=unit),
                    'admin_tenant_name': relation_get('service_tenant',
                                                      rid=rid, unit=unit),
                    'admin_user': relation_get('service_username', rid=rid,
                                               unit=unit),
                    'admin_password': relation_get('service_password', rid=rid,
                                                   unit=unit),
                    # XXX: Hard-coded http.
                    'service_protocol': 'http',
                    'auth_protocol': 'http',
                }
                if context_complete(ctxt):
                    return ctxt
        return {}
def is_sufficient_peers():
    """Sufficient number of expected peers to build a complete cluster

    If min-cluster-size has been provided, check that we have sufficient
    number of peers who have presented a hostname for a complete cluster.

    If not defined assume a single unit.

    @returns boolean
    """
    min_size = config('min-cluster-size')
    if min_size:
        log("Checking for minimum of {} peer units".format(min_size),
            level=DEBUG)

        # Include this unit
        units = 1
        for rid in relation_ids('cluster'):
            for unit in related_units(rid):
                if relation_get(attribute='hostname',
                                rid=rid, unit=unit):
                    units += 1

        if units < min_size:
            log("Insufficient number of peer units to form cluster "
                "(expected=%s, got=%s)" % (min_size, units), level=INFO)
            return False
        else:
            log("Sufficient number of peer units to form cluster {}"
                "".format(min_size, level=DEBUG))
            return True
    else:
        log("min-cluster-size is not defined, race conditions may occur if "
            "this is not a single unit deployment.", level=WARNING)
        return True
def cluster_wait():
    ''' Wait for operations based on modulo distribution

    Use the distributed_wait function to determine how long to wait before
    running an operation like restart or cluster join. By setting modulo to
    the exact number of nodes in the cluster we get serial operations.

    Check for explicit configuration parameters for modulo distribution.
    The config setting modulo-nodes has first priority. If modulo-nodes is not
    set, check min-cluster-size. Finally, if neither value is set, determine
    how many peers there are from the cluster relation.

    @side_effect: distributed_wait is called which calls time.sleep()
    @return: None
    '''
    wait = config('known-wait')
    if config('modulo-nodes') is not None:
        # modulo-nodes has first priority
        num_nodes = config('modulo-nodes')
    elif config('min-cluster-size'):
        # min-cluster-size is consulted next
        num_nodes = config('min-cluster-size')
    else:
        # If nothing explicit is configured, determine cluster size based on
        # peer relations
        num_nodes = 1
        for rid in relation_ids('cluster'):
            num_nodes += len(related_units(rid))
    distributed_wait(modulo=num_nodes, wait=wait)
Пример #19
0
    def __call__(self):
        ctxt = {}
        for rid in relation_ids(self.interface):
            for unit in related_units(rid):
                sub_config = relation_get('subordinate_configuration',
                                          rid=rid, unit=unit)
                if sub_config and sub_config != '':
                    try:
                        sub_config = json.loads(sub_config)
                    except:
                        log('Could not parse JSON from subordinate_config '
                            'setting from %s' % rid, level=ERROR)
                        continue

                    if self.service not in sub_config:
                        log('Found subordinate_config on %s but it contained'
                            'nothing for %s service' % (rid, self.service))
                        continue

                    sub_config = sub_config[self.service]
                    if self.config_file not in sub_config:
                        log('Found subordinate_config on %s but it contained'
                            'nothing for %s' % (rid, self.config_file))
                        continue

                    sub_config = sub_config[self.config_file]
                    for k, v in sub_config.iteritems():
                        ctxt[k] = v

        if not ctxt:
            ctxt['sections'] = {}

        return ctxt
Пример #20
0
def create_ogr_zone(args):
    aggr_name = action_get('aggregate-name')
    avail_zone = action_get('avail-zone')
    ogr_compute = action_get('ogr-compute')

    cmd = "su - ubuntu -c 'source nova.rc && nova aggregate-create {} {}'"\
          .format(aggr_name, avail_zone)
    commands.getoutput(cmd)
    cmd = "su - ubuntu -c 'source nova.rc && nova aggregate-add-host {} {}'"\
          .format(aggr_name, ogr_compute)
    commands.getoutput(cmd)
    if config("openstack-version") == "liberty" or \
       config("openstack-version") == "mitaka":
        cmd = "su - ubuntu -c 'source nova.rc && nova aggregate-details {}'"\
              .format(aggr_name)
    else:
        cmd = "su - ubuntu -c 'source nova.rc && nova aggregate-show {}'"\
              .format(aggr_name)
    res = commands.getoutput(cmd)
    action_set({'result-map.message': res})
    relation_info = {
        'aggr-name': aggr_name
    }
    if config("openstack-version") == "pike" or \
       config("openstack-version") == "ocata":
        for rid in relation_ids('neutron-api-cplane'):
            for unit in related_units(rid):
                relation_set(relation_id=rid, relation_settings=relation_info)
def config_changed():
    # neutron-server runs if < juno. Neutron-server creates mysql tables
    # which will subsequently cause db migratoins to fail if >= juno.
    # Disable neutron-server if >= juno
    if os_release('nova-common') >= 'juno':
        with open('/etc/init/neutron-server.override', 'wb') as out:
            out.write('manual\n')
    if config('prefer-ipv6'):
        status_set('maintenance', 'configuring ipv6')
        setup_ipv6()
        sync_db_with_multi_ipv6_addresses(config('database'),
                                          config('database-user'),
                                          relation_prefix='nova')

    global CONFIGS
    if git_install_requested():
        status_set('maintenance', 'Running Git install')
        if config_value_changed('openstack-origin-git'):
            git_install(config('openstack-origin-git'))
    elif not config('action-managed-upgrade'):
        if openstack_upgrade_available('nova-common'):
            status_set('maintenance', 'Running openstack upgrade')
            CONFIGS = do_openstack_upgrade(CONFIGS)
            [neutron_api_relation_joined(rid=rid, remote_restart=True)
                for rid in relation_ids('neutron-api')]
            # NOTE(jamespage): Force re-fire of shared-db joined hook
            # to ensure that nova_api database is setup if required.
            [db_joined(relation_id=r_id)
                for r_id in relation_ids('shared-db')]

    save_script_rc()
    configure_https()
    CONFIGS.write_all()
    if console_attributes('protocol'):
        if not git_install_requested():
            status_set('maintenance', 'Configuring guest console access')
            apt_update()
            packages = console_attributes('packages') or []
            filtered = filter_installed_packages(packages)
            if filtered:
                apt_install(filtered, fatal=True)

        [compute_joined(rid=rid)
            for rid in relation_ids('cloud-compute')]

    for r_id in relation_ids('identity-service'):
        identity_joined(rid=r_id)
    for rid in relation_ids('zeromq-configuration'):
        zeromq_configuration_relation_joined(rid)
    [cluster_joined(rid) for rid in relation_ids('cluster')]
    update_nrpe_config()

    # If the region value has changed, notify the cloud-compute relations
    # to ensure the value is propagated to the compute nodes.
    if config_value_changed('region'):
        for rid in relation_ids('cloud-compute'):
            for unit in related_units(rid):
                compute_changed(rid, unit)

    update_nova_consoleauth_config()
Пример #22
0
    def single_mode_map(self):
        """Return map of local addresses only if this is a single node cluster

           @return dict of local address info e.g.
               {'cluster_hosts':
                   {'this_unit_private_addr': {
                        'backends': {
                            'this_unit-1': 'this_unit_private_addr'},
                        'network': 'this_unit_private_addr/private_netmask'},
                'internal_addresses': ['intaddr']}
        """
        relation_info = {}
        try:
            cluster_relid = hookenv.relation_ids('cluster')[0]
            if not hookenv.related_units(relid=cluster_relid):
                relation_info = {
                    'cluster_hosts': self.local_default_addresses(),
                    'internal_addresses': self.internal_addresses,
                }
                net_split = self.local_network_split_addresses()
                for key in net_split.keys():
                    relation_info['cluster_hosts'][key] = net_split[key]
        except IndexError:
            pass
        return relation_info
Пример #23
0
    def __call__(self):
        '''
        Builds half a context for the haproxy template, which describes
        all peers to be included in the cluster.  Each charm needs to include
        its own context generator that describes the port mapping.
        '''
        if not relation_ids('cluster'):
            return {}

        cluster_hosts = {}
        l_unit = local_unit().replace('/', '-')
        cluster_hosts[l_unit] = unit_get('private-address')

        for rid in relation_ids('cluster'):
            for unit in related_units(rid):
                _unit = unit.replace('/', '-')
                addr = relation_get('private-address', rid=rid, unit=unit)
                cluster_hosts[_unit] = addr

        ctxt = {
            'units': cluster_hosts,
        }
        if len(cluster_hosts.keys()) > 1:
            # Enable haproxy when we have enough peers.
            log('Ensuring haproxy enabled in /etc/default/haproxy.')
            with open('/etc/default/haproxy', 'w') as out:
                out.write('ENABLED=1\n')
            return ctxt
        log('HAProxy context is incomplete, this unit has no peers.')
        return {}
 def restart_trigger(self):
     rt = None
     for rid in relation_ids('cloud-compute'):
         for unit in related_units(rid):
             rt = relation_get('restart_trigger', rid=rid, unit=unit)
             if rt:
                 return rt
Пример #25
0
 def __call__(self):
     for rid in relation_ids('quantum-network-service'):
         for unit in related_units(rid):
             ctxt = {
                 'keystone_host': relation_get('keystone_host',
                                               rid=rid, unit=unit),
                 'service_port': relation_get('service_port', rid=rid,
                                              unit=unit),
                 'auth_port': relation_get('auth_port', rid=rid, unit=unit),
                 'service_tenant': relation_get('service_tenant',
                                                rid=rid, unit=unit),
                 'service_username': relation_get('service_username',
                                                  rid=rid, unit=unit),
                 'service_password': relation_get('service_password',
                                                  rid=rid, unit=unit),
                 'quantum_host': relation_get('quantum_host',
                                              rid=rid, unit=unit),
                 'quantum_port': relation_get('quantum_port',
                                              rid=rid, unit=unit),
                 'quantum_url': relation_get('quantum_url',
                                             rid=rid, unit=unit),
                 'region': relation_get('region',
                                        rid=rid, unit=unit),
                 # XXX: Hard-coded http.
                 'service_protocol': 'http',
                 'auth_protocol': 'http',
             }
             if context_complete(ctxt):
                 return ctxt
     return {}
Пример #26
0
def update_zuul():
    zuul_units = []

    for rid in relation_ids('zuul-configurator'):
        [zuul_units.append(u) for u in related_units(rid)]

    if not zuul_units:
        log('*** No related zuul units, skipping config.')
        return

    log("*** Updating zuul.")
    layout_path = '/etc/zuul/layout.yaml'

    if not os.path.isdir(ZUUL_CONFIG_DIR):
        log('Could not find zuul config directory at expected location, '
            'skipping zuul update (%s)' % ZUUL_CONFIG_DIR)
        return

    log('Installing layout from %s to %s.' % (ZUUL_CONFIG_DIR, layout_path))
    common.sync_dir(ZUUL_CONFIG_DIR, layout_path)

    stop_zuul()
    start_zuul()

    return True
def is_bootstrapped():
    """Determine if each node in the cluster has been bootstrapped and the
    cluster is complete with the expected number of peers.

    Check that each node in the cluster, including this one, has set
    bootstrap-uuid on the cluster relation.

    Having min-cluster-size set will guarantee is_bootstrapped will not
    return True until the expected number of peers are bootstrapped. If
    min-cluster-size is not set, it will check peer relations to estimate the
    expected cluster size. If min-cluster-size is not set and there are no
    peers it must assume the cluster is bootstrapped in order to allow for
    single unit deployments.

    @returns boolean
    """
    min_size = get_min_cluster_size()
    if not is_sufficient_peers():
        return False
    elif min_size > 1:
        uuids = []
        for relation_id in relation_ids('cluster'):
            units = related_units(relation_id) or []
            units.append(local_unit())
            for unit in units:
                if not relation_get(attribute='bootstrap-uuid',
                                    rid=relation_id,
                                    unit=unit):
                    log("{} is not yet clustered".format(unit),
                        DEBUG)
                    return False
                else:
                    bootstrap_uuid = relation_get(attribute='bootstrap-uuid',
                                                  rid=relation_id,
                                                  unit=unit)
                    if bootstrap_uuid:
                        uuids.append(bootstrap_uuid)

        if len(uuids) < min_size:
            log("Fewer than minimum cluster size: "
                "{} percona units reporting clustered".format(min_size),
                DEBUG)
            return False
        elif len(set(uuids)) > 1:
            raise Exception("Found inconsistent bootstrap uuids: "
                            "{}".format((uuids)))
        else:
            log("All {} percona units reporting clustered".format(min_size),
                DEBUG)
    elif not seeded():
        # Single unit deployment but not yet bootstrapped
        return False

    # Set INITIAL_CLUSTERED_KEY as the cluster has fully bootstrapped
    kvstore = kv()
    if not kvstore.get(INITIAL_CLUSTERED_KEY, False):
        kvstore.set(key=INITIAL_CLUSTERED_KEY, value=True)
        kvstore.flush()

    return True
Пример #28
0
    def __call__(self):
        '''This generates context for /etc/ceph/ceph.conf templates'''
        if not relation_ids('ceph'):
            return {}
        log('Generating template context for ceph')
        mon_hosts = []
        auth = None
        key = None
        for rid in relation_ids('ceph'):
            for unit in related_units(rid):
                mon_hosts.append(relation_get('private-address', rid=rid,
                                              unit=unit))
                auth = relation_get('auth', rid=rid, unit=unit)
                key = relation_get('key', rid=rid, unit=unit)

        ctxt = {
            'mon_hosts': ' '.join(mon_hosts),
            'auth': auth,
            'key': key,
        }

        if not os.path.isdir('/etc/ceph'):
            os.mkdir('/etc/ceph')

        if not context_complete(ctxt):
            return {}

        ensure_packages(['ceph-common'])

        return ctxt
Пример #29
0
def get_relation_settings(keys):
    """Fetch required relation settings.

    If any setting is unset ('' or None) we return None.

    :param keys: Setting keys to look for.
    """
    settings = {}
    try:
        for rid in relation_ids('gerrit-configurator'):
            for unit in related_units(rid):
                for key in keys:
                    settings[key] = relation_get(key, rid=rid, unit=unit)

    except Exception as exc:
        log('Failed to get gerrit relation data (%s).' % (exc), level=WARNING)
        return

    missing = [k for k, v in settings.iteritems() if not v]
    if missing:
        log("Missing value for '%s' in gerrit relation." %
            (','.join(missing)), level=WARNING)
        return

    return settings
Пример #30
0
def get_service_name(relid):
    '''Return the service name for the other end of relid.'''
    units = hookenv.related_units(relid)
    if units:
        return units[0].split('/', 1)[0]
    else:
        return None
def update_unit_status():
    if not config.get("vrouter-provisioned"):
        units = [unit for rid in relation_ids("contrail-controller")
                          for unit in related_units(rid)]
        if units:
            status_set("waiting", "There is no enough info to provision.")
        else:
            status_set("blocked", "Missing relation to contrail-controller")

    status, _ = _get_agent_status()
    if status == 'initializing':
        # some hacks
        log("Run agent hack: service restart")
        service_restart("contrail-vrouter-agent")
        sleep(10)
        status, msg = _get_agent_status()
        if status == 'initializing' and "(No Configuration for self)" in msg:
            log("Run agent hack: reinitialize config client")
            ip = config.get("api_ip")
            try:
                params = ["curl", "-s"]
                proto = "http"
                ssl_enabled = config.get("ssl_enabled", False)
                if ssl_enabled:
                    params.extend([
                        "--cacert", "/etc/contrail/ssl/certs/ca-cert.pem",
                        "--cert", "/etc/contrail/ssl/certs/server.pem",
                        "--key", "/etc/contrail/ssl/private/server-privkey.pem"
                    ])
                    proto = "https"
                url = ("{proto}://{ip}:8083/Snh_ConfigClientReinitReq?"
                       .format(proto=proto, ip=ip))
                params.append(url)
                check_call(params)
                sleep(5)
                status, _ = _get_agent_status()
            except Exception as e:
                log("Reinitialize returns error: " + str(e))

    if status == 'active':
        status_set("active", "Unit is ready")
        return

    status_set("waiting", "vrouter-agent is not up")
Пример #32
0
 def __call__(self):
     ctxt = super(NeutronApiSDNContext, self).__call__()
     defaults = {
         'core-plugin': {
             'templ_key': 'core_plugin',
             'value': 'neutron.plugins.ml2.plugin.Ml2Plugin',
         },
         'neutron-plugin-config': {
             'templ_key': 'neutron_plugin_config',
             'value': '/etc/neutron/plugins/ml2/ml2_conf.ini',
         },
         'service-plugins': {
             'templ_key': 'service_plugins',
             'value': 'router,firewall,lbaas,vpnaas,metering',
         },
         'restart-trigger': {
             'templ_key': 'restart_trigger',
             'value': '',
         },
         'quota-driver': {
             'templ_key': 'quota_driver',
             'value': '',
         },
         'api-extensions-path': {
             'templ_key': 'api_extensions_path',
             'value': '',
         },
     }
     for rid in relation_ids('neutron-plugin-api-subordinate'):
         for unit in related_units(rid):
             rdata = relation_get(rid=rid, unit=unit)
             plugin = rdata.get('neutron-plugin')
             if not plugin:
                 continue
             ctxt['neutron_plugin'] = plugin
             for key in defaults.keys():
                 remote_value = rdata.get(key)
                 ctxt_key = defaults[key]['templ_key']
                 if remote_value:
                     ctxt[ctxt_key] = remote_value
                 else:
                     ctxt[ctxt_key] = defaults[key]['value']
             return ctxt
     return ctxt
Пример #33
0
def update_all_identity_relation_units(check_db_ready=True):
    CONFIGS.write_all()
    if check_db_ready and not is_db_ready():
        log('Allowed_units list provided and this unit not present',
            level=INFO)
        return

    if not is_db_initialised():
        log("Database not yet initialised - deferring identity-relation "
            "updates", level=INFO)
        return

    if is_elected_leader(CLUSTER_RES):
        ensure_initial_admin(config)

    log('Firing identity_changed hook for all related services.')
    for rid in relation_ids('identity-service'):
        for unit in related_units(rid):
            identity_changed(relation_id=rid, remote_unit=unit)
Пример #34
0
def disable_nova_metadata(cmp_os_source=None):
    """Check whether nova mnetadata service should be disabled."""
    if not cmp_os_source:
        cmp_os_source = CompareOpenStackReleases(os_release('neutron-common'))
    if cmp_os_source >= 'rocky':
        secret = None
        for name in ['quantum', 'neutron']:
            for rid in relation_ids('{}-network-service'.format(name)):
                for unit in related_units(rid):
                    rdata = relation_get(rid=rid, unit=unit)
                    # The presence of the secret shows the
                    # nova-cloud-controller charm is running a metadata
                    # service so it can be disabled locally.
                    if rdata.get('shared-metadata-secret'):
                        secret = rdata.get('shared-metadata-secret')
        disable = bool(secret)
    else:
        disable = False
    return disable
Пример #35
0
def _migrate_conversations():  # noqa
    """
    Due to issue #28 (https://github.com/juju-solutions/charms.reactive/issues/28),
    conversations needed to be updated to be namespaced per relation ID for SERVICE
    and UNIT scope.  To ensure backwards compatibility, this updates all convs in
    the old format to the new.

    TODO: Remove in 2.0.0
    """
    for key, data in unitdata.kv().getrange('reactive.conversations.').items():
        if 'local-data' in key:
            continue
        if 'namespace' in data:
            continue
        relation_name = data.pop('relation_name')
        if data['scope'] == scopes.GLOBAL:
            data['namespace'] = relation_name
            unitdata.kv().set(key, data)
        else:
            # split the conv based on the relation ID
            new_keys = []
            for rel_id in hookenv.relation_ids(relation_name):
                new_key = Conversation._key(rel_id, data['scope'])
                new_units = set(hookenv.related_units(rel_id)) & set(data['units'])
                if new_units:
                    unitdata.kv().set(new_key, {
                        'namespace': rel_id,
                        'scope': data['scope'],
                        'units': sorted(new_units),
                    })
                    new_keys.append(new_key)
            unitdata.kv().unset(key)
            # update the states pointing to the old conv key to point to the
            # (potentially multiple) new key(s)
            for flag in get_flags():
                value = _get_flag_value(flag)
                if not value:
                    continue
                if key not in value['conversations']:
                    continue
                value['conversations'].remove(key)
                value['conversations'].extend(new_keys)
                set_flag(flag, value)
Пример #36
0
def contrail_analytics_departed():
    units = [
        unit for rid in relation_ids("contrail-controller")
        for unit in related_units(rid)
    ]
    if not units:
        for key in [
                "auth_info", "auth_mode", "orchestrator_info", "ssl_enabled",
                "rabbitmq_vhost", "rabbitmq_user", "rabbitmq_password",
                "rabbitmq_hosts"
        ]:
            config.pop(key, None)
        if is_container_launched(CONTAINER_NAME):
            status_set(
                "blocked",
                "Container is present but cloud orchestrator was disappeared."
                " Please kill container by yourself or "
                "restore cloud orchestrator.")
    update_charm_status()
def config_changed():
    if config('prefer-ipv6'):
        setup_ipv6()
        localhost = 'ip6-localhost'
    else:
        localhost = 'localhost'

    if (os_release('openstack-dashboard') == 'icehouse'
            and config('offline-compression') in ['no', 'False']):
        apt_install(filter_installed_packages(['python-lesscpy']), fatal=True)

    # Ensure default role changes are propagated to keystone
    for relid in relation_ids('identity-service'):
        keystone_joined(relid)
    enable_ssl()

    if not config('action-managed-upgrade'):
        if openstack_upgrade_available('openstack-dashboard'):
            status_set('maintenance', 'Upgrading to new OpenStack release')
            do_openstack_upgrade(configs=CONFIGS)

    env_vars = {
        'OPENSTACK_URL_HORIZON':
        "http://{}:70{}|Login+-+OpenStack".format(localhost,
                                                  config('webroot')),
        'OPENSTACK_SERVICE_HORIZON':
        "apache2",
        'OPENSTACK_PORT_HORIZON_SSL':
        433,
        'OPENSTACK_PORT_HORIZON':
        70
    }
    save_script_rc(**env_vars)
    update_nrpe_config()
    CONFIGS.write_all()
    check_custom_theme()
    open_port(80)
    open_port(443)
    for relid in relation_ids('certificates'):
        for unit in related_units(relid):
            certs_changed(relation_id=relid, unit=unit)

    websso_trusted_dashboard_changed()
Пример #38
0
    def get_allowed_units(self, database, username, relation_id=None, prefix=None):
        """Get list of units with access grants for database with username.

        This is typically used to provide shared-db relations with a list of
        which units have been granted access to the given database.
        """
        if not self.connection:
            self.connect(password=self.get_mysql_root_password())
        allowed_units = set()
        if not prefix:
            prefix = database
        for unit in related_units(relation_id):
            settings = relation_get(rid=relation_id, unit=unit)
            # First check for setting with prefix, then without
            for attr in ["%s_hostname" % (prefix), 'hostname']:
                hosts = settings.get(attr, None)
                if hosts:
                    break

            if hosts:
                # hostname can be json-encoded list of hostnames
                try:
                    hosts = json.loads(hosts)
                except ValueError:
                    hosts = [hosts]
            else:
                hosts = [settings['private-address']]

            if hosts:
                for host in hosts:
                    host = self.normalize_address(host)
                    if self.grant_exists(database, username, host):
                        log("Grant exists for host '%s' on db '%s'" %
                            (host, database), level=DEBUG)
                        if unit not in allowed_units:
                            allowed_units.add(unit)
                    else:
                        log("Grant does NOT exist for host '%s' on db '%s'" %
                            (host, database), level=DEBUG)
            else:
                log("No hosts found for grant check", level=INFO)

        return allowed_units
Пример #39
0
def send_ssl_sync_request():
    """Set sync request on cluster relation.

    Value set equals number of ssl configs currently enabled so that if they
    change, we ensure that certs are synced. This setting is consumed by
    cluster-relation-changed ssl master. We also clear the 'synced' set to
    guarantee that a sync will occur.

    Note the we do nothing if the setting is already applied.
    """
    unit = local_unit().replace('/', '-')
    # Start with core config (e.g. used for signing revoked token list)
    ssl_config = 0b1

    use_https = config('use-https')
    if use_https and bool_from_string(use_https):
        ssl_config ^= 0b10

    https_service_endpoints = config('https-service-endpoints')
    if (https_service_endpoints and bool_from_string(https_service_endpoints)):
        ssl_config ^= 0b100

    enable_pki = config('enable-pki')
    if enable_pki and bool_from_string(enable_pki):
        ssl_config ^= 0b1000

    key = 'ssl-sync-required-%s' % (unit)
    settings = {key: ssl_config}

    prev = 0b0
    rid = None
    for rid in relation_ids('cluster'):
        for unit in related_units(rid):
            _prev = relation_get(rid=rid, unit=unit, attribute=key) or 0b0
            if _prev and _prev > prev:
                prev = bin(_prev)

    if rid and prev ^ ssl_config:
        if is_leader():
            clear_ssl_synced_units()

        log("Setting %s=%s" % (key, bin(ssl_config)), level=DEBUG)
        relation_set(relation_id=rid, relation_settings=settings)
Пример #40
0
def controller_ctx():
    """Get the ipaddress of all contrail control nodes"""
    auth_mode = config.get("auth_mode")
    if auth_mode is None:
        # NOTE: auth_mode must be transmitted by controller
        return {}

    controller_ip_list = []
    for rid in relation_ids("contrail-analytics"):
        for unit in related_units(rid):
            if unit.startswith("contrail-controller"):
                ip = relation_get("private-address", unit, rid)
                controller_ip_list.append(ip)
    sort_key = lambda ip: struct.unpack("!L", inet_aton(ip))[0]
    controller_ip_list = sorted(controller_ip_list, key=sort_key)
    return {
        "auth_mode": auth_mode,
        "controller_servers": controller_ip_list,
    }
Пример #41
0
def set_all_host_strings():
    private_string = []
    public_string = []
    vip_string = []
    scan_string = []
    ssh_pub_key = []

    for rid in relation_ids('slave'):
        for unit in related_units(rid):
            if relation_get(attribute='private-string\
', unit=unit, rid=rid):
                private_string = pickle.loads(relation_get(attribute='private-\
string', unit=unit, rid=rid))
                public_string = pickle.loads(relation_get(attribute='public-\
string', unit=unit, rid=rid))
                vip_string = pickle.loads(relation_get(attribute='vip-string\
', unit=unit, rid=rid))
                scan_string = pickle.loads(relation_get(attribute='scan-string\
', unit=unit, rid=rid))
                ssh_pub_key = pickle.loads(relation_get(attribute='ssh-pub-key\
', unit=unit, rid=rid))

    if private_string:
        flush_host()
        for value in private_string:
            config_host(value, 'private')

        for value in public_string:
            config_host(value, 'public')
            add_ssh_known_host(value)

        for value in vip_string:
            config_host(value, 'vip')

        for value in scan_string:
            config_host(value, 'scan')

        for value in ssh_pub_key:
            config_ssh_key(value)

        set_disk_permission()

        send_notification("slave-state", 'install')
 def __call__(self):
     '''
     Obtains the glance API server from the image-service relation.  Useful
     in nova and cinder (currently).
     '''
     log('Generating template context for image-service.')
     rids = relation_ids('image-service')
     if not rids:
         return {}
     for rid in rids:
         for unit in related_units(rid):
             api_server = relation_get('glance-api-server',
                                       rid=rid,
                                       unit=unit)
             if api_server:
                 return {'glance_api_servers': api_server}
     log('ImageService context is incomplete. '
         'Missing required relation data.')
     return {}
 def __call__(self):
     ctxt = {}
     for rid in relation_ids('neutron-load-balancer'):
         for unit in related_units(rid):
             rdata = relation_get(rid=rid, unit=unit)
             try:
                 ctxt['load_balancer_name'] = json.loads(
                     rdata.get('name'))
                 ctxt['load_balancer_base_url'] = json.loads(
                     rdata.get('base_url'))
             except TypeError:
                 pass
             except json.decoder.JSONDecodeError:
                 log(traceback.format_exc())
                 raise ValueError('Invalid load balancer data'
                                  ' - check the related charm')
             if self.context_complete(ctxt):
                 return ctxt
     return {}
Пример #44
0
def contrail_controller_departed():
    if not remote_unit().startswith("contrail-openstack-compute"):
        return

    units = [
        unit for rid in relation_ids("contrail-openstack-compute")
        for unit in related_units(rid)
    ]
    if units:
        return
    config.pop("orchestrator_info")
    if is_leader():
        update_northbound_relations()
    if is_container_launched(CONTAINER_NAME):
        status_set(
            "blocked",
            "Container is present but cloud orchestrator was disappeared."
            " Please kill container by yourself or restore cloud orchestrator."
        )
def servers_ctx():
    controller_ip_list = []
    analytics_ip_list = []
    for rid in relation_ids("contrail-analyticsdb"):
        for unit in related_units(rid):
            utype = relation_get("unit-type", unit, rid)
            ip = relation_get("private-address", unit, rid)
            if utype == "controller":
                controller_ip_list.append(ip)
            if utype == "analytics":
                analytics_ip_list.append(ip)

    sort_key = lambda ip: struct.unpack("!L", inet_aton(ip))[0]
    controller_ip_list = sorted(controller_ip_list, key=sort_key)
    analytics_ip_list = sorted(analytics_ip_list, key=sort_key)
    return {
        "controller_servers": controller_ip_list,
        "analytics_servers": analytics_ip_list
    }
Пример #46
0
def _get_broker_rid_unit_for_previous_request():
    """Gets the broker rid and unit combination that has a response for the
     previous sent request."""
    broker_key = get_broker_rsp_key()

    log("Broker key is {}.".format(broker_key), level=DEBUG)

    for rid in relation_ids('ceph'):
        previous_request = get_previous_request(rid)
        for unit in related_units(rid):
            rdata = relation_get(rid=rid, unit=unit)
            if rdata.get(broker_key):
                rsp = CephBrokerRsp(rdata.get(broker_key))
                if rsp.request_id == previous_request.request_id:
                    log("Found broker rid/unit: {}/{}".format(rid, unit),
                        level=DEBUG)
                    return rid, unit
    log("There is no broker response for any unit at the moment.", level=DEBUG)
    return None, None
Пример #47
0
def tsn_ctx():
    result = dict()
    result["csn_mode"] = config.get("csn-mode")
    if not result["csn_mode"]:
        return result

    tsn_ip_list = []
    for rid in relation_ids("agent-cluster"):
        for unit in related_units(rid):
            ip = relation_get("vhost-address", unit, rid)
            if ip:
                tsn_ip_list.append(ip)
    # add own ip address
    vhost_ip = get_vhost_ip()
    if vhost_ip:
        tsn_ip_list.append(vhost_ip)

    result["tsn_nodes"] = tsn_ip_list
    return result
def update_clients(check_deferred_restarts=True):
    """Update amqp client relation hooks

    IFF leader node is ready. Client nodes are considered ready once the leader
    has already run amqp_changed.

    :param check_deferred_events: Whether to check if restarts are
                                  permitted before running hook.
    :type check_deferred_events: bool
    """
    if check_deferred_restarts and get_deferred_restarts():
        log("Not sendinfg client update as a restart is pending.", INFO)
        return
    if rabbit.leader_node_is_ready() or rabbit.client_node_is_ready():
        for rid in relation_ids('amqp'):
            for unit in related_units(rid):
                amqp_changed(relation_id=rid,
                             remote_unit=unit,
                             check_deferred_restarts=check_deferred_restarts)
Пример #49
0
def process_data():
    identity = ""
    data = OrderedDict()
    all_strings = {}

    for rid in relation_ids('master'):
        for unit in related_units(rid):
            identity = relation_get(attribute='identity\
', unit=unit, rid=rid)
            raw_private_string = relation_get(attribute='private-string\
',
                                              unit=unit,
                                              rid=rid)
            raw_public_string = relation_get(attribute='public-string\
',
                                             unit=unit,
                                             rid=rid)
            raw_vip_string = relation_get(attribute='vip-string\
',
                                          unit=unit,
                                          rid=rid)
            raw_ssh_key = relation_get(attribute='host-ssh-key\
',
                                       unit=unit,
                                       rid=rid)

            juju_log('Relation confirmed from {}'.format(identity))

            if identity:
                data = json.load(open(NODE_DATA_FILE))
                if identity in data.keys():
                    pass
                else:
                    all_strings['private'] = pickle.loads(raw_private_string)
                    all_strings['public'] = pickle.loads(raw_public_string)
                    all_strings['vip'] = pickle.loads(raw_vip_string)
                    all_strings['ssh_pub_key'] = pickle.loads(raw_ssh_key)
                    data[identity] = all_strings

                    juju_log('Storing node {} data {}'.format(
                        identity, data[identity]))
                    json.dump(data, open(NODE_DATA_FILE, 'w'))
Пример #50
0
    def _config_changed():
        # if we are paused, delay doing any config changed hooks.
        # It is forced on the resume.
        if is_unit_paused_set():
            log("Unit is pause or upgrading. Skipping config_changed", "WARN")
            return

        install_packages()

        if config('prefer-ipv6'):
            status_set('maintenance', 'configuring ipv6')
            setup_ipv6()

        for r_id in relation_ids('identity-service'):
            identity_changed(relid=r_id)

        for r_id in relation_ids('cluster'):
            cluster_joined(rid=r_id)

        # NOTE(jamespage): Re-exec mon relation for any changes to
        #                  enable ceph pool permissions restrictions
        for r_id in relation_ids('mon'):
            for unit in related_units(r_id):
                mon_relation(r_id, unit)

        # Re-trigger hacluster relations to switch to ifaceless
        # vip configuration
        for r_id in relation_ids('ha'):
            ha_relation_joined(r_id)

        # Refire certificates relations for VIP changes
        for r_id in relation_ids('certificates'):
            certs_joined(r_id)

        process_multisite_relations()

        CONFIGS.write_all()
        configure_https()

        update_nrpe_config()

        open_port(port=listen_port())
Пример #51
0
def sufficient_osds(minimum_osds=3):
    '''
    Determine if the minimum number of OSD's have been
    bootstrapped into the cluster.

    @param expected_osds: The minimum number of OSD's required
    @return: boolean indicating whether the required number of
             OSD's where detected.
    '''
    bootstrapped_osds = 0
    for r_id in relation_ids('osd'):
        for unit in related_units(r_id):
            unit_osds = relation_get(attribute='bootstrapped-osds',
                                     unit=unit,
                                     rid=r_id)
            if unit_osds is not None:
                bootstrapped_osds += int(unit_osds)
    if bootstrapped_osds >= minimum_osds:
        return True
    return False
Пример #52
0
def contrail_controller_departed():
    changed = _rebuild_orchestrator_info()

    issu_present = False
    for rid in relation_ids("contrail-controller"):
        for unit in related_units(rid):
            utype = relation_get('unit-type', unit, rid)
            if utype == "issu":
                issu_present = True

    if not issu_present and config.get("maintenance") == 'issu':
        # TODO: finish ISSU process
        config.pop("maintenance", None)
        config.pop("issu_controller_ips", None)
        config.pop("issu_controller_data_ips", None)
        config.pop("issu_analytics_ips", None)
        changed = True
    if changed:
        update_northbound_relations()
        update_southbound_relations()
Пример #53
0
 def __call__(self):
     backends = []
     for rid in relation_ids('storage-backend'):
         for unit in related_units(rid):
             backend_name = relation_get('backend_name',
                                         unit, rid)
             if backend_name:
                 backends.append(backend_name)
     # Ocata onwards all backends must be in there own sectional config
     if CompareOpenStackReleases(os_release('cinder-common')) >= "ocata":
         if relation_ids('ceph'):
             backends.append('CEPH')
         if enable_lvm():
             backends.append('LVM')
     # Use the package default backend to stop the service flapping.
     if not backends:
         backends = ['LVM']
     return {
         'active_backends': backends,
         'backends': ",".join(backends)}
Пример #54
0
 def __call__(self):
     backends = []
     for rid in relation_ids('storage-backend'):
         for unit in related_units(rid):
             backend_name = relation_get('backend_name', unit, rid)
             if backend_name:
                 backends.append(backend_name)
     # Ocata onwards all backends must be in there own sectional config
     if os_release('cinder-common') >= "ocata":
         if relation_ids('ceph'):
             backends.append('CEPH')
         if enable_lvm():
             backends.append('LVM')
     if len(backends) > 0:
         return {
             'active_backends': backends,
             'backends': ",".join(backends)
         }
     else:
         return {}
Пример #55
0
def write_jjb_config():
    log('*** Writing jenkins-job-builder config: %s.' % JJB_CONFIG)
    jenkins = {}
    admin_user, admin_cred = admin_credentials()
    for rid in relation_ids('jenkins-configurator'):
        for unit in related_units(rid):
            jenkins = {
                'jenkins_url': relation_get('jenkins_url', rid=rid, unit=unit),
                'username': admin_user,
                'password': admin_cred,
            }

            if (None not in jenkins.values() and '' not in jenkins.values()):
                with open(JJB_CONFIG, 'w') as out:
                    out.write(JJB_CONFIG_TEMPLATE % jenkins)
                log('*** Wrote jenkins-job-builder config: %s.' % JJB_CONFIG)
                return True

    log('*** Not enough data in principle relation. Not writing config.')
    return False
 def __call__(self):
     ctxt = {}
     for rid in relation_ids('cloud-compute'):
         for unit in related_units(rid):
             rel = {'rid': rid, 'unit': unit}
             proto = relation_get('console_access_protocol', **rel)
             if not proto:
                 # only bother with units that have a proto set.
                 continue
             ctxt['console_keymap'] = relation_get('console_keymap', **rel)
             ctxt['console_access_protocol'] = proto
             ctxt['console_vnc_type'] = True if 'vnc' in proto else False
             if proto == 'vnc':
                 ctxt = dict(ctxt, **self.get_console_info('xvpvnc', **rel))
                 ctxt = dict(ctxt, **self.get_console_info('novnc', **rel))
             else:
                 ctxt = dict(ctxt, **self.get_console_info(proto, **rel))
             break
     ctxt['console_listen_addr'] = resolve_address(endpoint_type=INTERNAL)
     return ctxt
Пример #57
0
def update_rsync_acls():
    """Get Host IP of each storage unit and broadcast acl to all units."""
    hosts = []

    if not is_elected_leader(SWIFT_HA_RES):
        log("Skipping rsync acl update since not leader", level=DEBUG)
        return

    # Get all unit addresses
    for rid in relation_ids('swift-storage'):
        for unit in related_units(rid):
            hosts.append(get_host_ip(rid=rid, unit=unit))

    rsync_hosts = ' '.join(hosts)
    log("Broadcasting acl '{}' to all storage units".format(rsync_hosts),
        level=DEBUG)
    # We add a timestamp so that the storage units know which is the newest
    settings = {'rsync_allowed_hosts': rsync_hosts, 'timestamp': time.time()}
    for rid in relation_ids('swift-storage'):
        relation_set(relation_id=rid, **settings)
Пример #58
0
def client_relation_joined(relid=None):
    if ready():
        service_name = None
        if relid is None:
            units = [remote_unit()]
            service_name = units[0].split('/')[0]
        else:
            units = related_units(relid)
            if len(units) > 0:
                service_name = units[0].split('/')[0]

        if service_name is not None:
            public_addr = get_public_addr()
            data = {'key': ceph.get_named_key(service_name),
                    'auth': 'cephx',
                    'ceph-public-address': public_addr}
            relation_set(relation_id=relid,
                         relation_settings=data)
    else:
        log('FSID or admin key not provided, please configure them')
Пример #59
0
    def neutron_context_no_auth_data(self):
        """If the charm has a cloud-credentials relation then a subset
        of data is needed to complete this context."""
        neutron_ctxt = {'neutron_url': None}
        for rid in relation_ids('cloud-compute'):
            for unit in related_units(rid):
                rel = {'rid': rid, 'unit': unit}

                url = _neutron_url(**rel)
                if not url:
                    # only bother with units that have a neutron url set.
                    continue

                neutron_ctxt = {
                    'neutron_auth_strategy': 'keystone',
                    'neutron_plugin': _neutron_plugin(),
                    'neutron_url': url,
                }

        return neutron_ctxt
Пример #60
0
def cluster():
    ''' Called when a unit joins a cluster relationship. '''
    hookenv.log('Starting the cluster-relation-joined hook.')

    join_command = [CONSUL.abspath(), 'join']
    cluster_rid = hookenv.relation_ids('cluster')
    if cluster_rid:
        peers = hookenv.related_units(cluster_rid[0])
        if peers:
            for peer in peers:
                data = hookenv.relation_get(unit=peer, rid=cluster_rid[0])
                join_command.append(data.get('private-address'))
            hookenv.log(join_command)
            # Call the consul join command.
            output = subprocess.check_output(join_command)
            hookenv.log(output)
    else:
        hookenv.log('No peers to join with.')

    hookenv.log('The cluster-relation-joined hook finished.')