示例#1
0
def get_db_host(client_hostname):
    """Get address of local database host.

    If an access-network has been configured, expect selected address to be
    on that network. If none can be found, revert to primary address.

    If vip(s) are configured, chooses first available.
    """
    vips = config('vip').split() if config('vip') else []
    access_network = config('access-network')
    if access_network:
        client_ip = get_host_ip(client_hostname)
        if is_address_in_network(access_network, client_ip):
            if is_clustered():
                for vip in vips:
                    if is_address_in_network(access_network, vip):
                        return vip

                log("Unable to identify a VIP in the access-network '%s'" %
                    (access_network), level=WARNING)
            else:
                return get_address_in_network(access_network)
        else:
            log("Client address '%s' not in access-network '%s'" %
                (client_ip, access_network), level=WARNING)

    if is_clustered() and vips:
        return vips[0]  # NOTE on private network

    if config('prefer-ipv6'):
        return get_ipv6_addr(exc_list=vips)[0]

    return unit_get('private-address')
def get_db_host(client_hostname, interface='shared-db'):
    """Get address of local database host.

    If an access-network has been configured, expect selected address to be
    on that network. If none can be found, revert to primary address.

    If network spaces are supported (Juju >= 2.0), use network-get to
    retrieve the network binding for the interface.

    If vip(s) are configured, chooses first available.
    """
    vips = config('vip').split() if config('vip') else []
    access_network = config('access-network')
    client_ip = get_host_ip(client_hostname)
    if access_network:
        if is_address_in_network(access_network, client_ip):
            if is_clustered():
                for vip in vips:
                    if is_address_in_network(access_network, vip):
                        return vip

                log("Unable to identify a VIP in the access-network '%s'" %
                    (access_network), level=WARNING)
            else:
                return get_address_in_network(access_network)
        else:
            log("Client address '%s' not in access-network '%s'" %
                (client_ip, access_network), level=WARNING)
    else:
        try:
            # NOTE(jamespage)
            # Try to use network spaces to resolve binding for
            # interface, and to resolve the VIP associated with
            # the binding if provided.
            interface_binding = network_get_primary_address(interface)
            if is_clustered() and vips:
                interface_cidr = resolve_network_cidr(interface_binding)
                for vip in vips:
                    if is_address_in_network(interface_cidr, vip):
                        return vip
            return interface_binding
        except NotImplementedError:
            # NOTE(jamespage): skip - fallback to previous behaviour
            pass

    if is_clustered() and vips:
        return vips[0]  # NOTE on private network

    if config('prefer-ipv6'):
        return get_ipv6_addr(exc_list=vips)[0]

    return unit_get('private-address')
示例#3
0
    def __call__(self):
        if isinstance(self.external_ports, basestring):
            self.external_ports = [self.external_ports]
        if (not self.external_ports or not https()):
            return {}

        self.configure_ca()
        self.enable_modules()

        ctxt = {
            'namespace': self.service_namespace,
            'endpoints': [],
            'ext_ports': []
        }

        for cn in self.canonical_names():
            self.configure_cert(cn)

        addresses = []
        vips = []
        if config('vip'):
            vips = config('vip').split()

        for network_type in ['os-internal-network',
                             'os-admin-network',
                             'os-public-network']:
            address = get_address_in_network(config(network_type),
                                             unit_get('private-address'))
            if len(vips) > 0 and is_clustered():
                for vip in vips:
                    if is_address_in_network(config(network_type),
                                             vip):
                        addresses.append((address, vip))
                        break
            elif is_clustered():
                addresses.append((address, config('vip')))
            else:
                addresses.append((address, address))

        for address, endpoint in set(addresses):
            for api_port in self.external_ports:
                ext_port = determine_apache_port(api_port)
                int_port = determine_api_port(api_port)
                portmap = (address, endpoint, int(ext_port), int(int_port))
                ctxt['endpoints'].append(portmap)
                ctxt['ext_ports'].append(int(ext_port))
        ctxt['ext_ports'] = list(set(ctxt['ext_ports']))
        return ctxt
示例#4
0
def resolve_address(endpoint_type=PUBLIC):
    resolved_address = None
    if is_clustered():
        if config(_address_map[endpoint_type]['config']) is None:
            # Assume vip is simple and pass back directly
            resolved_address = config('vip')
        else:
            for vip in config('vip').split():
                if is_address_in_network(
                        config(_address_map[endpoint_type]['config']),
                        vip):
                    resolved_address = vip
    else:
        if config('prefer-ipv6'):
            fallback_addr = get_ipv6_addr()
        else:
            fallback_addr = unit_get(_address_map[endpoint_type]['fallback'])
        resolved_address = get_address_in_network(
            config(_address_map[endpoint_type]['config']), fallback_addr)

    if resolved_address is None:
        raise ValueError('Unable to resolve a suitable IP address'
                         ' based on charm state and configuration')
    else:
        return resolved_address
def ha_changed():
    if not is_clustered():
        return
    vip = config('vip')
    log('ha_changed(): We are now HA clustered. '
        'Advertising our VIP (%s) to all AMQP clients.' %
        vip)
示例#6
0
def balance_rings():
    '''handle doing ring balancing and distribution.'''
    new_ring = False
    for ring in SWIFT_RINGS.itervalues():
        if balance_ring(ring):
            log('Balanced ring %s' % ring)
            new_ring = True
    if not new_ring:
        return

    for ring in SWIFT_RINGS.keys():
        f = '%s.ring.gz' % ring
        shutil.copyfile(os.path.join(SWIFT_CONF_DIR, f),
                        os.path.join(WWW_DIR, f))

    if cluster.eligible_leader(SWIFT_HA_RES):
        msg = 'Broadcasting notification to all storage nodes that new '\
              'ring is ready for consumption.'
        log(msg)
        path = WWW_DIR.split('/var/www/')[1]
        trigger = uuid.uuid4()

        if cluster.is_clustered():
            hostname = config('vip')
        else:
            hostname = unit_get('private-address')

        rings_url = 'http://%s/%s' % (hostname, path)
        # notify storage nodes that there is a new ring to fetch.
        for relid in relation_ids('swift-storage'):
            relation_set(relation_id=relid, swift_hash=get_swift_hash(),
                         rings_url=rings_url, trigger=trigger)

    service_restart('swift-proxy')
def master_joined(interface='master'):
    cluster_id = get_cluster_id()
    if not is_clustered():
        log("Not clustered yet", level=DEBUG)
        return
    relation_settings = {}
    leader_settings = leader_get()
    if is_leader():
        if not leader_settings.get('async-rep-password'):
            # Replication password cannot be longer than 32 characters
            leader_set({'async-rep-password': pwgen(32)})
            return
        configure_master()
        master_address, master_file, master_position = (
            get_master_status(interface))
        if leader_settings.get('master-address') is not master_address:
            leader_settings['master-address'] = master_address
            leader_settings['master-file'] = master_file
            leader_settings['master-position'] = master_position
        leader_set(leader_settings)
        relation_settings = {'leader': True}
    else:
        relation_settings = {'leader': False}
    relation_settings['cluster_id'] = cluster_id
    relation_settings['master_address'] = leader_settings['master-address']
    relation_settings['master_file'] = leader_settings['master-file']
    relation_settings['master_password'] = \
        leader_settings['async-rep-password']
    relation_settings['master_position'] = leader_settings['master-position']
    log("Setting master relation: '{}'".format(relation_settings), level=INFO)
    for rid in relation_ids(interface):
        relation_set(relation_id=rid, relation_settings=relation_settings)
def identity_credentials_changed(relation_id=None, remote_unit=None):
    """Update the identity credentials relation on change

    Calls add_credentials_to_keystone

    :param relation_id: Relation id of the relation
    :param remote_unit: Related unit on the relation
    """
    if is_elected_leader(CLUSTER_RES):
        if expect_ha() and not is_clustered():
            log("Expected to be HA but no hacluster relation yet", level=INFO)
            return
        if not is_db_ready():
            log("identity-credentials-relation-changed hook fired before db "
                "ready - deferring until db ready", level=WARNING)
            return

        if not is_db_initialised():
            log("Database not yet initialised - deferring "
                "identity-credentials-relation updates", level=INFO)
            return

        # Create the tenant user
        add_credentials_to_keystone(relation_id, remote_unit)
    else:
        log('Deferring identity_credentials_changed() to service leader.')
def identity_joined(rid=None, relation_trigger=False):
    if config('vip') and not is_clustered():
        log('Defering registration until clustered', level=DEBUG)
        return

    public_url = '{}:{}'.format(canonical_url(CONFIGS, PUBLIC),
                                api_port('neutron-server'))
    admin_url = '{}:{}'.format(canonical_url(CONFIGS, ADMIN),
                               api_port('neutron-server'))
    internal_url = '{}:{}'.format(canonical_url(CONFIGS, INTERNAL),
                                  api_port('neutron-server')
                                  )
    rel_settings = {
        'neutron_service': 'neutron',
        'neutron_region': config('region'),
        'neutron_public_url': public_url,
        'neutron_admin_url': admin_url,
        'neutron_internal_url': internal_url,
        'quantum_service': None,
        'quantum_region': None,
        'quantum_public_url': None,
        'quantum_admin_url': None,
        'quantum_internal_url': None,
    }
    if relation_trigger:
        rel_settings['relation_trigger'] = str(uuid.uuid4())
    relation_set(relation_id=rid, relation_settings=rel_settings)
def db_changed(relation_id=None, unit=None, admin=None):
    if not is_elected_leader(DC_RESOURCE_NAME):
        log('Service is peered, clearing db relation'
            ' as this service unit is not the leader')
        relation_clear(relation_id)
        return

    if is_clustered():
        db_host = config('vip')
    else:
        if config('prefer-ipv6'):
            db_host = get_ipv6_addr(exc_list=[config('vip')])[0]
        else:
            db_host = unit_get('private-address')

    if admin not in [True, False]:
        admin = relation_type() == 'db-admin'
    db_name, _ = remote_unit().split("/")
    username = db_name
    db_helper = get_db_helper()
    addr = relation_get('private-address', unit=unit, rid=relation_id)
    password = db_helper.configure_db(addr, db_name, username, admin=admin)

    relation_set(relation_id=relation_id,
                 relation_settings={
                     'user': username,
                     'password': password,
                     'host': db_host,
                     'database': db_name,
                 })
示例#11
0
    def get_network_addresses(self):
        """For each network configured, return corresponding address and vip
           (if available).

        Returns a list of tuples of the form:

            [(address_in_net_a, vip_in_net_a),
             (address_in_net_b, vip_in_net_b),
             ...]

            or, if no vip(s) available:

            [(address_in_net_a, address_in_net_a),
             (address_in_net_b, address_in_net_b),
             ...]
        """
        addresses = []
        if config('vip'):
            vips = config('vip').split()
        else:
            vips = []

        for net_type in ['os-internal-network', 'os-admin-network',
                         'os-public-network']:
            addr = get_address_in_network(config(net_type),
                                          unit_get('private-address'))
            if len(vips) > 1 and is_clustered():
                if not config(net_type):
                    log("Multiple networks configured but net_type "
                        "is None (%s)." % net_type, level=WARNING)
                    continue

                for vip in vips:
                    if is_address_in_network(config(net_type), vip):
                        addresses.append((addr, vip))
                        break

            elif is_clustered() and config('vip'):
                addresses.append((addr, config('vip')))
            else:
                addresses.append((addr, addr))

        return sorted(addresses)
def ha_changed():
    if not is_clustered():
        return
    vip = config('vip')
    log('ha_changed(): We are now HA clustered. '
        'Advertising our VIP (%s) to all AMQP clients.' %
        vip)
    # need to re-authenticate all clients since node-name changed.
    for rid in relation_ids('amqp'):
        for unit in related_units(rid):
            amqp_changed(relation_id=rid, remote_unit=unit)
def identity_joined(rid=None):
    if hookenv.config('vip') and not ch_cluster.is_clustered():
        hookenv.log('Defering registration until clustered',
                    level=hookenv.DEBUG)
        return
    public_url = ch_ip.canonical_url(CONFIGS, ch_ip.PUBLIC)
    internal_url = ch_ip.canonical_url(CONFIGS, ch_ip.INTERNAL)
    admin_url = ch_ip.canonical_url(CONFIGS, ch_ip.ADMIN)
    hookenv.relation_set(relation_id=rid,
                         **ncc_utils.determine_endpoints(public_url,
                                                         internal_url,
                                                         admin_url))
def slave_joined(interface='slave'):
    relation_settings = {}
    cluster_id = get_cluster_id()
    if not is_clustered():
        log("Not clustered yet", level=DEBUG)
        return
    if is_leader():
        configure_slave()
    relation_settings = {'slave_address':
                         network_get_primary_address(interface)}
    relation_settings['cluster_id'] = cluster_id
    log("Setting slave relation: '{}'".format(relation_settings), level=INFO)
    for rid in relation_ids(interface):
        relation_set(relation_id=rid, relation_settings=relation_settings)
示例#15
0
    def neutron_ctxt(self):
        if https():
            proto = 'https'
        else:
            proto = 'http'

        if is_clustered():
            host = config('vip')
        else:
            host = unit_get('private-address')

        ctxt = {'network_manager': self.network_manager,
                'neutron_url': '%s://%s:%s' % (proto, host, '9696')}
        return ctxt
示例#16
0
def identity_changed(relation_id=None, remote_unit=None):
    CONFIGS.write_all()

    notifications = {}
    if is_elected_leader(CLUSTER_RES):
        if not is_db_ready():
            log("identity-service-relation-changed hook fired before db "
                "ready - deferring until db ready", level=WARNING)
            return

        if not is_db_initialised():
            log("Database not yet initialised - deferring identity-relation "
                "updates", level=INFO)
            return

        if expect_ha() and not is_clustered():
            log("Expected to be HA but no hacluster relation yet", level=INFO)
            return

        add_service_to_keystone(relation_id, remote_unit)
        if is_service_present('neutron', 'network'):
            delete_service_entry('quantum', 'network')
        settings = relation_get(rid=relation_id, unit=remote_unit)
        service = settings.get('service', None)
        if service:
            # If service is known and endpoint has changed, notify service if
            # it is related with notifications interface.
            csum = hashlib.sha256()
            # We base the decision to notify on whether these parameters have
            # changed (if csum is unchanged from previous notify, relation will
            # not fire).
            csum.update(settings.get('public_url', None))
            csum.update(settings.get('admin_url', None))
            csum.update(settings.get('internal_url', None))
            notifications['%s-endpoint-changed' % (service)] = csum.hexdigest()
    else:
        # Each unit needs to set the db information otherwise if the unit
        # with the info dies the settings die with it Bug# 1355848
        for rel_id in relation_ids('identity-service'):
            peerdb_settings = peer_retrieve_by_prefix(rel_id)
            # Ensure the null'd settings are unset in the relation.
            peerdb_settings = filter_null(peerdb_settings)
            if 'service_password' in peerdb_settings:
                relation_set(relation_id=rel_id, **peerdb_settings)

        log('Deferring identity_changed() to service leader.')

    if notifications:
        send_notifications(notifications)
示例#17
0
def keystone_joined(relation_id=None):
    if config('vip') and not is_clustered():
        juju_log('Defering registration until clustered', level=DEBUG)
        return

    public_url = '{}:9292'.format(canonical_url(CONFIGS, PUBLIC))
    internal_url = '{}:9292'.format(canonical_url(CONFIGS, INTERNAL))
    admin_url = '{}:9292'.format(canonical_url(CONFIGS, ADMIN))
    relation_data = {
        'service': 'glance',
        'region': config('region'),
        'public_url': public_url,
        'admin_url': admin_url,
        'internal_url': internal_url, }

    relation_set(relation_id=relation_id, **relation_data)
    def __call__(self):
        ctxt = {}

        if (hookenv.config('console-ssl-cert') and
                hookenv.config('console-ssl-key') and
                hookenv.config('console-access-protocol')):
            ssl_dir = '/etc/nova/ssl/'
            if not os.path.exists(ssl_dir):
                hookenv.log('Creating %s.' % ssl_dir, level=hookenv.DEBUG)
                os.mkdir(ssl_dir)

            cert_path = os.path.join(ssl_dir, 'nova_cert.pem')
            decode_ssl_cert = base64.b64decode(
                hookenv.config('console-ssl-cert'))

            key_path = os.path.join(ssl_dir, 'nova_key.pem')
            decode_ssl_key = base64.b64decode(
                hookenv.config('console-ssl-key'))

            with open(cert_path, 'wb') as fh:
                fh.write(decode_ssl_cert)
            with open(key_path, 'wb') as fh:
                fh.write(decode_ssl_key)

            ctxt['ssl_only'] = True
            ctxt['ssl_cert'] = cert_path
            ctxt['ssl_key'] = key_path

            if ch_cluster.is_clustered():
                ip_addr = ch_ip.resolve_address(endpoint_type=ch_ip.PUBLIC)
            else:
                ip_addr = hookenv.unit_get('private-address')

            ip_addr = ch_network_ip.format_ipv6_addr(ip_addr) or ip_addr

            _proto = hookenv.config('console-access-protocol')
            url = "https://%s:%s%s" % (
                ip_addr,
                common.console_attributes('proxy-port', proto=_proto),
                common.console_attributes('proxy-page', proto=_proto))

            if _proto == 'novnc':
                ctxt['novncproxy_base_url'] = url
            elif _proto == 'spice':
                ctxt['html5proxy_base_url'] = url

        return ctxt
示例#19
0
文件: ip.py 项目: BillTheBest/hyper-c
def resolve_address(endpoint_type=PUBLIC):
    """Return unit address depending on net config.

    If unit is clustered with vip(s) and has net splits defined, return vip on
    correct network. If clustered with no nets defined, return primary vip.

    If not clustered, return unit address ensuring address is on configured net
    split if one is configured.

    :param endpoint_type: Network endpoing type
    """
    resolved_address = _get_address_override(endpoint_type)
    if resolved_address:
        return resolved_address

    vips = config('vip')
    if vips:
        vips = vips.split()

    net_type = ADDRESS_MAP[endpoint_type]['config']
    net_addr = config(net_type)
    net_fallback = ADDRESS_MAP[endpoint_type]['fallback']
    clustered = is_clustered()
    if clustered:
        if not net_addr:
            # If no net-splits defined, we expect a single vip
            resolved_address = vips[0]
        else:
            for vip in vips:
                if is_address_in_network(net_addr, vip):
                    resolved_address = vip
                    break
    else:
        if config('prefer-ipv6'):
            fallback_addr = get_ipv6_addr(exc_list=vips)[0]
        else:
            fallback_addr = unit_get(net_fallback)

        resolved_address = get_address_in_network(net_addr, fallback_addr)

    if resolved_address is None:
        raise ValueError("Unable to resolve a suitable IP address based on "
                         "charm state and configuration. (net_type=%s, "
                         "clustered=%s)" % (net_type, clustered))

    return resolved_address
    def __call__(self):
        if isinstance(self.external_ports, basestring):
            self.external_ports = [self.external_ports]
        if not self.external_ports or not https():
            return {}

        self.configure_cert()
        self.enable_modules()

        ctxt = {"namespace": self.service_namespace, "private_address": unit_get("private-address"), "endpoints": []}
        for ext_port in self.external_ports:
            if peer_units() or is_clustered():
                int_port = determine_haproxy_port(ext_port)
            else:
                int_port = determine_api_port(ext_port)
            portmap = (int(ext_port), int(int_port))
            ctxt["endpoints"].append(portmap)
        return ctxt
示例#21
0
def admin_relation_changed(relation_id=None):
    # TODO: fixup
    if expect_ha() and not is_clustered():
        log("Expected to be HA but no hacluster relation yet", level=INFO)
        return
    relation_data = {
        'service_hostname': resolve_address(ADMIN),
        'service_port': config('service-port'),
        'service_username': config('admin-user'),
        'service_tenant_name': config('admin-role'),
        'service_region': config('region'),
        'service_protocol': 'https' if https() else 'http',
        'api_version': get_api_version(),
    }
    if relation_data['api_version'] > 2:
        relation_data['service_user_domain_name'] = ADMIN_DOMAIN
        relation_data['service_project_domain_name'] = ADMIN_DOMAIN
        relation_data['service_project_name'] = ADMIN_PROJECT
    relation_data['service_password'] = get_admin_passwd()
    relation_set(relation_id=relation_id, **relation_data)
示例#22
0
def keystone_joined(relid=None):
    if not cluster.eligible_leader(SWIFT_HA_RES):
        return
    if cluster.is_clustered():
        hostname = config('vip')
    else:
        hostname = unit_get('private-address')
    port = config('bind-port')
    if cluster.https():
        proto = 'https'
    else:
        proto = 'http'
    admin_url = '%s://%s:%s' % (proto, hostname, port)
    internal_url = public_url = '%s/v1/AUTH_$(tenant_id)s' % admin_url
    relation_set(service='swift',
                 region=config('region'),
                 public_url=public_url, internal_url=internal_url,
                 admin_url=admin_url,
                 requested_roles=config('operator-roles'),
                 relation_id=relid)
示例#23
0
def admin_relation_changed(relation_id=None):
    # TODO: fixup
    if expect_ha() and not is_clustered():
        log("Expected to be HA but no hacluster relation yet", level=INFO)
        return
    relation_data = {
        'service_hostname': resolve_address(ADMIN),
        'service_port': config('service-port'),
        'service_username': config('admin-user'),
        'service_tenant_name': config('admin-role'),
        'service_region': config('region'),
        'service_protocol': 'https' if https() else 'http',
        'api_version': get_api_version(),
    }
    if relation_data['api_version'] > 2:
        relation_data['service_user_domain_name'] = ADMIN_DOMAIN
        relation_data['service_project_domain_name'] = ADMIN_DOMAIN
        relation_data['service_project_name'] = ADMIN_PROJECT
    relation_data['service_password'] = get_admin_passwd()
    relation_set(relation_id=relation_id, **relation_data)
示例#24
0
    def __call__(self):
        if isinstance(self.external_ports, basestring):
            self.external_ports = [self.external_ports]
        if (not self.external_ports or not https()):
            return {}

        self.configure_cert()
        self.enable_modules()

        ctxt = {
            'namespace': self.service_namespace,
            'private_address': unit_get('private-address'),
            'endpoints': []
        }
        if is_clustered():
            ctxt['private_address'] = config('vip')
        for api_port in self.external_ports:
            ext_port = determine_apache_port(api_port)
            int_port = determine_api_port(api_port)
            portmap = (int(ext_port), int(int_port))
            ctxt['endpoints'].append(portmap)
        return ctxt
示例#25
0
    def __call__(self):
        if isinstance(self.external_ports, basestring):
            self.external_ports = [self.external_ports]
        if (not self.external_ports or not https()):
            return {}

        self.configure_cert()
        self.enable_modules()

        ctxt = {
            'namespace': self.service_namespace,
            'private_address': unit_get('private-address'),
            'endpoints': []
        }
        for ext_port in self.external_ports:
            if peer_units() or is_clustered():
                int_port = determine_haproxy_port(ext_port)
            else:
                int_port = determine_api_port(ext_port)
            portmap = (int(ext_port), int(int_port))
            ctxt['endpoints'].append(portmap)
        return ctxt
示例#26
0
def identity_credentials_changed(relation_id=None, remote_unit=None):
    """Update the identity credentials relation on change

    Calls add_credentials_to_keystone

    :param relation_id: Relation id of the relation
    :param remote_unit: Related unit on the relation
    """
    if is_elected_leader(CLUSTER_RES):
        if expect_ha() and not is_clustered():
            log("Expected to be HA but no hacluster relation yet", level=INFO)
            return
        if not is_db_ready():
            log(
                "identity-credentials-relation-changed hook fired before db "
                "ready - deferring until db ready",
                level=WARNING)
            return

        if not is_db_initialised():
            log(
                "Database not yet initialised - deferring "
                "identity-credentials-relation updates",
                level=INFO)
            return

        unit_ready, _ = check_api_application_ready()
        if not unit_ready:
            log(("Keystone charm unit not ready - deferring identity-relation "
                 "updates"),
                level=INFO)
            return

        # Create the tenant user
        add_credentials_to_keystone(relation_id, remote_unit)
    else:
        log('Deferring identity_credentials_changed() to service leader.')
示例#27
0
def keystone_joined(relid=None):
    cmp_codename = CompareOpenStackReleases(
        get_os_codename_install_source(config('openstack-origin')))
    if cmp_codename >= 'queens':
        log('Skipping endpoint registration for >= Queens', level=DEBUG)
        return

    if config('vip') and not is_clustered():
        log('Defering registration until clustered', level=DEBUG)
        return

    public_url = "{}:{}".format(canonical_url(CONFIGS, PUBLIC),
                                CEILOMETER_PORT)
    admin_url = "{}:{}".format(canonical_url(CONFIGS, ADMIN), CEILOMETER_PORT)
    internal_url = "{}:{}".format(canonical_url(CONFIGS, INTERNAL),
                                  CEILOMETER_PORT)
    region = config("region")
    relation_set(relation_id=relid,
                 service=CEILOMETER_SERVICE,
                 public_url=public_url,
                 admin_url=admin_url,
                 internal_url=internal_url,
                 requested_roles=CEILOMETER_ROLE,
                 region=region)
def cluster_ready():
    """Determine if each node in the cluster is ready to respond to client
    requests.

    Once cluster_ready returns True it is safe to execute client relation
    hooks.

    If a VIP is set do not return ready until hacluster relationship is
    complete.

    @returns boolean
    """
    if config("vip") and not is_clustered():
        log("Waiting on hacluster to complete clustering, not clustered yet.",
            DEBUG)
        return False

    min_cluster_size = get_min_cluster_size()
    # Single unit deployment return state of seeded
    if int(min_cluster_size) == 1:
        return seeded()

    peers = {}
    for relation_id in relation_ids('cluster'):
        units = related_units(relation_id) or []
        if local_unit() not in units:
            units.append(local_unit())
        for unit in units:
            peers[unit] = relation_get(attribute='ready',
                                       rid=relation_id,
                                       unit=unit)

    if len(peers) >= min_cluster_size:
        return all(peers.values())

    return False
def cluster_ready():
    """Determine if each node in the cluster is ready to respond to client
    requests.

    Once cluster_ready returns True it is safe to execute client relation
    hooks.

    If a VIP is set do not return ready until hacluster relationship is
    complete.

    @returns boolean
    """
    if config("vip") and not is_clustered():
        log("Waiting on hacluster to complete clustering, not clustered yet.",
            DEBUG)
        return False

    min_cluster_size = get_min_cluster_size()
    # Single unit deployment return state of seeded
    if int(min_cluster_size) == 1:
        return seeded()

    peers = {}
    for relation_id in relation_ids('cluster'):
        units = related_units(relation_id) or []
        if local_unit() not in units:
            units.append(local_unit())
        for unit in units:
            peers[unit] = relation_get(attribute='ready',
                                       rid=relation_id,
                                       unit=unit)

    if len(peers) >= min_cluster_size:
        return all(peers.values())

    return False
示例#30
0
def amqp_changed(relation_id=None, remote_unit=None):
    singleset = set(['username', 'vhost'])
    host_addr = rabbit.get_unit_ip()

    if rabbit.leader_node_is_ready():
        relation_settings = {'hostname': host_addr,
                             'private-address': host_addr}
        # NOTE: active/active case
        if config('prefer-ipv6'):
            relation_settings['private-address'] = host_addr

        current = relation_get(rid=relation_id, unit=remote_unit)
        if singleset.issubset(current):
            if not all([current.get('username'), current.get('vhost')]):
                log('Relation not ready.', DEBUG)
                return

            # Provide credentials to relations. If password is already
            # available on peer relation then use it instead of reconfiguring.
            username = current['username']
            vhost = current['vhost']
            admin = current.get('admin', False)
            amqp_rid = relation_id or get_relation_id()
            password = configure_amqp(username, vhost, amqp_rid, admin=admin)
            relation_settings['password'] = password
        else:
            # NOTE(hopem): we should look at removing this code since i don't
            #              think it's ever used anymore and stems from the days
            #              when we needed to ensure consistency between
            #              peerstorage (replaced by leader get/set) and amqp
            #              relations.
            queues = {}
            for k, v in current.iteritems():
                amqp_rid = k.split('_')[0]
                x = '_'.join(k.split('_')[1:])
                if amqp_rid not in queues:
                    queues[amqp_rid] = {}

                queues[amqp_rid][x] = v

            for amqp_rid in queues:
                if singleset.issubset(queues[amqp_rid]):
                    username = queues[amqp_rid]['username']
                    vhost = queues[amqp_rid]['vhost']
                    password = configure_amqp(username, vhost, amqp_rid,
                                              admin=admin)
                    key = '_'.join([amqp_rid, 'password'])
                    relation_settings[key] = password

        ssl_utils.configure_client_ssl(relation_settings)

        if is_clustered():
            relation_settings['clustered'] = 'true'
            # NOTE(dosaboy): this stanza can be removed once we fully remove
            #                deprecated HA support.
            if is_relation_made('ha'):
                # active/passive settings
                relation_settings['vip'] = config('vip')
                # or ha-vip-only to support active/active, but
                # accessed via a VIP for older clients.
                if config('ha-vip-only') is True:
                    relation_settings['ha-vip-only'] = 'true'

        # set if need HA queues or not
        if cmp_pkgrevno('rabbitmq-server', '3.0.1') < 0:
            relation_settings['ha_queues'] = True

        log("Updating relation {} keys {}"
            .format(relation_id or get_relation_id(),
                    ','.join(relation_settings.keys())), DEBUG)
        peer_store_and_set(relation_id=relation_id,
                           relation_settings=relation_settings)
    elif not is_leader() and rabbit.client_node_is_ready():
        log("Propagating peer settings to all amqp relations", DEBUG)

        # NOTE(jamespage) clear relation to deal with data being
        #                 removed from peer storage.
        relation_clear(relation_id)

        # Each unit needs to set the db information otherwise if the unit
        # with the info dies the settings die with it Bug# 1355848
        for rel_id in relation_ids('amqp'):
            peerdb_settings = peer_retrieve_by_prefix(rel_id)
            if 'password' in peerdb_settings:
                peerdb_settings['hostname'] = host_addr
                peerdb_settings['private-address'] = host_addr
                relation_set(relation_id=rel_id, **peerdb_settings)
def get_db_host(client_hostname, interface='shared-db'):
    """Get address of local database host for use by db clients

    If an access-network has been configured, expect selected address to be
    on that network. If none can be found, revert to primary address.

    If network spaces are supported (Juju >= 2.0), use network-get to
    retrieve the network binding for the interface.

    If DNSHA is set pass os-access-hostname

    If vip(s) are configured, chooses first available.

    @param client_hostname: hostname of client side relation setting hostname.
                            Only used if access-network is configured
    @param interface: Network space binding to check.
                      Usually the relationship name.
    @returns IP for use with db clients
    """
    vips = config('vip').split() if config('vip') else []
    dns_ha = config('dns-ha')
    access_network = config('access-network')
    if is_clustered() and dns_ha:
        log("Using DNS HA hostname: {}".format(config('os-access-hostname')))
        return config('os-access-hostname')
    elif access_network:
        client_ip = resolve_hostname_to_ip(client_hostname)
        if is_address_in_network(access_network, client_ip):
            if is_clustered():
                for vip in vips:
                    if is_address_in_network(access_network, vip):
                        return vip

                log("Unable to identify a VIP in the access-network '%s'" %
                    (access_network),
                    level=WARNING)
            else:
                return get_address_in_network(access_network)
        else:
            log("Client address '%s' not in access-network '%s'" %
                (client_ip, access_network),
                level=WARNING)
    else:
        try:
            # NOTE(jamespage)
            # Try to use network spaces to resolve binding for
            # interface, and to resolve the VIP associated with
            # the binding if provided.
            interface_binding = network_get_primary_address(interface)
            if is_clustered() and vips:
                interface_cidr = resolve_network_cidr(interface_binding)
                for vip in vips:
                    if is_address_in_network(interface_cidr, vip):
                        return vip
            return interface_binding
        except NotImplementedError:
            # NOTE(jamespage): skip - fallback to previous behaviour
            pass

    if is_clustered() and vips:
        return vips[0]  # NOTE on private network

    if config('prefer-ipv6'):
        return get_ipv6_addr(exc_list=vips)[0]

    # Last resort
    return unit_get('private-address')
def ha_changed():
    if not is_clustered():
        return
    vip = config('vip')
    log('ha_changed(): We are now HA clustered. '
        'Advertising our VIP (%s) to all AMQP clients.' % vip)
def amqp_changed(relation_id=None,
                 remote_unit=None,
                 check_deferred_restarts=True):
    """Update amqp relations.

    :param relation_id: Relation id to update
    :type relation_id: str
    :param remote_unit: Remote unit on relation_id to update
    :type remote_unit: str
    :param check_deferred_events: Whether to check if restarts are
                                  permitted before running hook.
    :type check_deferred_events: bool
    """
    allowed, reason = is_hook_allowed(
        'amqp-relation-changed',
        check_deferred_restarts=check_deferred_restarts)
    if not allowed:
        log(reason, "WARN")
        return
    singleset = set(['username', 'vhost'])
    host_addr = ch_ip.get_relation_ip(
        rabbit_net_utils.AMQP_INTERFACE,
        cidr_network=config(rabbit_net_utils.AMQP_OVERRIDE_CONFIG))

    sent_update = False
    if rabbit.leader_node_is_ready():
        relation_settings = {
            'hostname': host_addr,
            'private-address': host_addr
        }
        # NOTE: active/active case
        if config('prefer-ipv6'):
            relation_settings['private-address'] = host_addr

        current = relation_get(rid=relation_id, unit=remote_unit)
        if singleset.issubset(current):
            if not all([current.get('username'), current.get('vhost')]):
                log('Relation not ready.', DEBUG)
                return

            # Provide credentials to relations. If password is already
            # available on peer relation then use it instead of reconfiguring.
            username = current['username']
            vhost = current['vhost']
            admin = current.get('admin', False)
            ttlname = current.get('ttlname')
            ttlreg = current.get('ttlreg')
            ttl = current.get('ttl')
            amqp_rid = relation_id or get_relation_id()
            password = configure_amqp(username,
                                      vhost,
                                      amqp_rid,
                                      admin=admin,
                                      ttlname=ttlname,
                                      ttlreg=ttlreg,
                                      ttl=ttl)
            relation_settings['password'] = password
        else:
            # NOTE(hopem): we should look at removing this code since i don't
            #              think it's ever used anymore and stems from the days
            #              when we needed to ensure consistency between
            #              peerstorage (replaced by leader get/set) and amqp
            #              relations.
            queues = {}
            for k, v in current.items():
                amqp_rid = k.split('_')[0]
                x = '_'.join(k.split('_')[1:])
                if amqp_rid not in queues:
                    queues[amqp_rid] = {}

                queues[amqp_rid][x] = v

            for amqp_rid in queues:
                if singleset.issubset(queues[amqp_rid]):
                    username = queues[amqp_rid]['username']
                    vhost = queues[amqp_rid]['vhost']
                    ttlname = queues[amqp_rid].get('ttlname')
                    ttlreg = queues[amqp_rid].get('ttlreg')
                    ttl = queues[amqp_rid].get('ttl')
                    password = configure_amqp(username,
                                              vhost,
                                              amqp_rid,
                                              admin=admin,
                                              ttlname=ttlname,
                                              ttlreg=ttlreg,
                                              ttl=ttl)
                    key = '_'.join([amqp_rid, 'password'])
                    relation_settings[key] = password

        ssl_utils.configure_client_ssl(relation_settings)

        if is_clustered():
            relation_settings['clustered'] = 'true'
            # NOTE(dosaboy): this stanza can be removed once we fully remove
            #                deprecated HA support.
            if is_relation_made('ha'):
                # active/passive settings
                relation_settings['vip'] = config('vip')
                # or ha-vip-only to support active/active, but
                # accessed via a VIP for older clients.
                if config('ha-vip-only') is True:
                    relation_settings['ha-vip-only'] = 'true'

        # set if need HA queues or not
        if cmp_pkgrevno('rabbitmq-server', '3.0.1') < 0:
            relation_settings['ha_queues'] = True

        log(
            "Updating relation {} keys {}".format(
                relation_id or get_relation_id(),
                ','.join(relation_settings.keys())), DEBUG)
        peer_store_and_set(relation_id=relation_id,
                           relation_settings=relation_settings)
        sent_update = True
    elif not is_leader() and rabbit.client_node_is_ready():
        if not rabbit.clustered():
            log("This node is not clustered yet, defer sending data to client",
                level=DEBUG)
            return
        log("Propagating peer settings to all amqp relations", DEBUG)

        # NOTE(jamespage) clear relation to deal with data being
        #                 removed from peer storage.
        relation_clear(relation_id)

        # Each unit needs to set the db information otherwise if the unit
        # with the info dies the settings die with it Bug# 1355848
        for rel_id in relation_ids('amqp'):
            peerdb_settings = peer_retrieve_by_prefix(rel_id)
            if 'password' in peerdb_settings:
                peerdb_settings['hostname'] = host_addr
                peerdb_settings['private-address'] = host_addr
                relation_set(relation_id=rel_id, **peerdb_settings)
                sent_update = True
    kvstore = kv()
    update_done = kvstore.get(INITIAL_CLIENT_UPDATE_KEY, False)
    if sent_update and not update_done:
        kvstore.set(key=INITIAL_CLIENT_UPDATE_KEY, value=True)
        kvstore.flush()
示例#34
0
def identity_joined(rid=None):
    if config('vip') and not is_clustered():
        log('Defering registration until clustered', level=DEBUG)
        return

    settings = {}

    if not service_enabled('api'):
        juju_log('api service not enabled; skipping endpoint '
                 'registration')
        return

    cinder_release = os_release('cinder-common')
    if CompareOpenStackReleases(cinder_release) < 'pike':
        public_url = '{}:{}/v1/$(tenant_id)s'.format(
            canonical_url(CONFIGS, PUBLIC),
            config('api-listening-port')
        )
        internal_url = '{}:{}/v1/$(tenant_id)s'.format(
            canonical_url(CONFIGS, INTERNAL),
            config('api-listening-port')
        )
        admin_url = '{}:{}/v1/$(tenant_id)s'.format(
            canonical_url(CONFIGS, ADMIN),
            config('api-listening-port')
        )
        settings.update({
            'region': None,
            'service': None,
            'public_url': None,
            'internal_url': None,
            'admin_url': None,
            'cinder_region': config('region'),
            'cinder_service': 'cinder',
            'cinder_public_url': public_url,
            'cinder_internal_url': internal_url,
            'cinder_admin_url': admin_url,
        })
    if CompareOpenStackReleases(cinder_release) >= 'icehouse':
        # NOTE(jamespage) register v2 endpoint as well
        public_url = '{}:{}/v2/$(tenant_id)s'.format(
            canonical_url(CONFIGS, PUBLIC),
            config('api-listening-port')
        )
        internal_url = '{}:{}/v2/$(tenant_id)s'.format(
            canonical_url(CONFIGS, INTERNAL),
            config('api-listening-port')
        )
        admin_url = '{}:{}/v2/$(tenant_id)s'.format(
            canonical_url(CONFIGS, ADMIN),
            config('api-listening-port')
        )
        settings.update({
            'cinderv2_region': config('region'),
            'cinderv2_service': 'cinderv2',
            'cinderv2_public_url': public_url,
            'cinderv2_internal_url': internal_url,
            'cinderv2_admin_url': admin_url,
        })
    if CompareOpenStackReleases(cinder_release) >= 'pike':
        # NOTE(jamespage) register v3 endpoint as well
        public_url = '{}:{}/v3/$(tenant_id)s'.format(
            canonical_url(CONFIGS, PUBLIC),
            config('api-listening-port')
        )
        internal_url = '{}:{}/v3/$(tenant_id)s'.format(
            canonical_url(CONFIGS, INTERNAL),
            config('api-listening-port')
        )
        admin_url = '{}:{}/v3/$(tenant_id)s'.format(
            canonical_url(CONFIGS, ADMIN),
            config('api-listening-port')
        )
        settings.update({
            'cinderv3_region': config('region'),
            'cinderv3_service': 'cinderv3',
            'cinderv3_public_url': public_url,
            'cinderv3_internal_url': internal_url,
            'cinderv3_admin_url': admin_url,
        })
    relation_set(relation_id=rid, **settings)
示例#35
0
def identity_changed(relation_id=None, remote_unit=None):
    notifications_checksums = {}
    notifications_endpoints = {}
    if is_elected_leader(CLUSTER_RES):
        if not is_db_ready():
            log(
                "identity-service-relation-changed hook fired before db "
                "ready - deferring until db ready",
                level=WARNING)
            return

        if not is_db_initialised():
            log(
                "Database not yet initialised - deferring identity-relation "
                "updates",
                level=INFO)
            return

        if expect_ha() and not is_clustered():
            log("Expected to be HA but no hacluster relation yet", level=INFO)
            return

        add_service_to_keystone(relation_id, remote_unit)
        if is_service_present('neutron', 'network'):
            delete_service_entry('quantum', 'network')
        settings = relation_get(rid=relation_id, unit=remote_unit)

        # If endpoint has changed, notify to units related over the
        # identity-notifications interface. We base the decision to notify on
        # whether admin_url, public_url or internal_url have changed from
        # previous notify.
        service = settings.get('service')
        if service:
            key = '%s-endpoint-changed' % service
            notifications_endpoints[key] = endpoints_dict(settings)
            notifications_checksums[key] = endpoints_checksum(settings)
        else:
            # Some services don't set their name in the 'service' key in the
            # relation, for those their name is calculated from the prefix of
            # keys. See `assemble_endpoints()` for details.
            single = {
                'service', 'region', 'public_url', 'admin_url', 'internal_url'
            }
            endpoints = assemble_endpoints(settings)
            for ep in endpoints.keys():
                if single.issubset(endpoints[ep]):
                    key = '%s-endpoint-changed' % ep
                    log('endpoint: %s' % ep)
                    notifications_endpoints[key] = (endpoints_dict(
                        endpoints[ep]))
                    notifications_checksums[key] = (endpoints_checksum(
                        endpoints[ep]))
    else:
        # Each unit needs to set the db information otherwise if the unit
        # with the info dies the settings die with it Bug# 1355848
        for rel_id in relation_ids('identity-service'):
            peerdb_settings = peer_retrieve_by_prefix(rel_id)
            # Ensure the null'd settings are unset in the relation.
            peerdb_settings = filter_null(peerdb_settings)
            if 'service_password' in peerdb_settings:
                relation_set(relation_id=rel_id, **peerdb_settings)

        log('Deferring identity_changed() to service leader.')

    if notifications_endpoints or notifications_checksums:
        send_notifications(notifications_checksums, notifications_endpoints)
示例#36
0
def identity_joined(rid=None):
    if config('vip') and not is_clustered():
        log('Defering registration until clustered', level=DEBUG)
        return

    settings = {}

    if not service_enabled('api'):
        juju_log('api service not enabled; skipping endpoint '
                 'registration')
        return

    cinder_release = os_release('cinder-common')
    if CompareOpenStackReleases(cinder_release) < 'pike':
        public_url = '{}:{}/v1/$(tenant_id)s'.format(
            canonical_url(CONFIGS, PUBLIC),
            config('api-listening-port')
        )
        internal_url = '{}:{}/v1/$(tenant_id)s'.format(
            canonical_url(CONFIGS, INTERNAL),
            config('api-listening-port')
        )
        admin_url = '{}:{}/v1/$(tenant_id)s'.format(
            canonical_url(CONFIGS, ADMIN),
            config('api-listening-port')
        )
        settings.update({
            'region': None,
            'service': None,
            'public_url': None,
            'internal_url': None,
            'admin_url': None,
            'cinder_region': config('region'),
            'cinder_service': 'cinder',
            'cinder_public_url': public_url,
            'cinder_internal_url': internal_url,
            'cinder_admin_url': admin_url,
        })
    if CompareOpenStackReleases(cinder_release) >= 'icehouse':
        # NOTE(jamespage) register v2 endpoint as well
        public_url = '{}:{}/v2/$(tenant_id)s'.format(
            canonical_url(CONFIGS, PUBLIC),
            config('api-listening-port')
        )
        internal_url = '{}:{}/v2/$(tenant_id)s'.format(
            canonical_url(CONFIGS, INTERNAL),
            config('api-listening-port')
        )
        admin_url = '{}:{}/v2/$(tenant_id)s'.format(
            canonical_url(CONFIGS, ADMIN),
            config('api-listening-port')
        )
        settings.update({
            'cinderv2_region': config('region'),
            'cinderv2_service': 'cinderv2',
            'cinderv2_public_url': public_url,
            'cinderv2_internal_url': internal_url,
            'cinderv2_admin_url': admin_url,
        })
    if CompareOpenStackReleases(cinder_release) >= 'pike':
        # NOTE(jamespage) register v3 endpoint as well
        public_url = '{}:{}/v3/$(tenant_id)s'.format(
            canonical_url(CONFIGS, PUBLIC),
            config('api-listening-port')
        )
        internal_url = '{}:{}/v3/$(tenant_id)s'.format(
            canonical_url(CONFIGS, INTERNAL),
            config('api-listening-port')
        )
        admin_url = '{}:{}/v3/$(tenant_id)s'.format(
            canonical_url(CONFIGS, ADMIN),
            config('api-listening-port')
        )
        settings.update({
            'cinderv3_region': config('region'),
            'cinderv3_service': 'cinderv3',
            'cinderv3_public_url': public_url,
            'cinderv3_internal_url': internal_url,
            'cinderv3_admin_url': admin_url,
        })
    relation_set(relation_id=rid, **settings)
示例#37
0
 def test_is_not_clustered(self):
     '''It determines whether or not a unit is clustered'''
     self.relation_ids.return_value = ['ha:0']
     self.relation_list.return_value = ['ha/0']
     self.relation_get.return_value = None
     self.assertFalse(cluster_utils.is_clustered())
示例#38
0
def amqp_changed(relation_id=None, remote_unit=None):
    host_addr = rabbit.get_unit_ip()

    # TODO: Simplify what the non-leader needs to do
    if not is_leader() and rabbit.client_node_is_ready():
        # NOTE(jamespage) clear relation to deal with data being
        #                 removed from peer storage
        relation_clear(relation_id)
        # Each unit needs to set the db information otherwise if the unit
        # with the info dies the settings die with it Bug# 1355848
        exc_list = ['hostname', 'private-address']
        for rel_id in relation_ids('amqp'):
            peerdb_settings = peer_retrieve_by_prefix(rel_id,
                                                      exc_list=exc_list)
            peerdb_settings['hostname'] = host_addr
            peerdb_settings['private-address'] = host_addr
            if 'password' in peerdb_settings:
                relation_set(relation_id=rel_id, **peerdb_settings)

        log('amqp_changed(): Deferring amqp_changed' ' to the leader.')

        # NOTE: active/active case
        if config('prefer-ipv6'):
            relation_settings = {'private-address': host_addr}
            relation_set(relation_id=relation_id,
                         relation_settings=relation_settings)

        return

    # Bail if not completely ready
    if not rabbit.leader_node_is_ready():
        return

    relation_settings = {}
    settings = relation_get(rid=relation_id, unit=remote_unit)

    singleset = set(['username', 'vhost'])

    if singleset.issubset(settings):
        if None in [settings['username'], settings['vhost']]:
            log('amqp_changed(): Relation not ready.')
            return

        relation_settings['password'] = configure_amqp(
            username=settings['username'],
            vhost=settings['vhost'],
            admin=settings.get('admin', False))
    else:
        queues = {}
        for k, v in settings.iteritems():
            amqp = k.split('_')[0]
            x = '_'.join(k.split('_')[1:])
            if amqp not in queues:
                queues[amqp] = {}
            queues[amqp][x] = v
        for amqp in queues:
            if singleset.issubset(queues[amqp]):
                relation_settings['_'.join([amqp,
                                            'password'])] = configure_amqp(
                                                queues[amqp]['username'],
                                                queues[amqp]['vhost'])

    relation_settings['hostname'] = \
        relation_settings['private-address'] = \
        rabbit.get_unit_ip()

    ssl_utils.configure_client_ssl(relation_settings)

    if is_clustered():
        relation_settings['clustered'] = 'true'
        if is_relation_made('ha'):
            # active/passive settings
            relation_settings['vip'] = config('vip')
            # or ha-vip-only to support active/active, but
            # accessed via a VIP for older clients.
            if config('ha-vip-only') is True:
                relation_settings['ha-vip-only'] = 'true'

    # set if need HA queues or not
    if cmp_pkgrevno('rabbitmq-server', '3.0.1') < 0:
        relation_settings['ha_queues'] = True
    peer_store_and_set(relation_id=relation_id,
                       relation_settings=relation_settings)
def amqp_changed(relation_id=None, remote_unit=None):
    singleset = set(['username', 'vhost'])
    host_addr = ch_ip.get_relation_ip(
        rabbit_net_utils.AMQP_INTERFACE,
        cidr_network=config(rabbit_net_utils.AMQP_OVERRIDE_CONFIG))

    if rabbit.leader_node_is_ready():
        relation_settings = {'hostname': host_addr,
                             'private-address': host_addr}
        # NOTE: active/active case
        if config('prefer-ipv6'):
            relation_settings['private-address'] = host_addr

        current = relation_get(rid=relation_id, unit=remote_unit)
        if singleset.issubset(current):
            if not all([current.get('username'), current.get('vhost')]):
                log('Relation not ready.', DEBUG)
                return

            # Provide credentials to relations. If password is already
            # available on peer relation then use it instead of reconfiguring.
            username = current['username']
            vhost = current['vhost']
            admin = current.get('admin', False)
            amqp_rid = relation_id or get_relation_id()
            password = configure_amqp(username, vhost, amqp_rid, admin=admin)
            relation_settings['password'] = password
        else:
            # NOTE(hopem): we should look at removing this code since i don't
            #              think it's ever used anymore and stems from the days
            #              when we needed to ensure consistency between
            #              peerstorage (replaced by leader get/set) and amqp
            #              relations.
            queues = {}
            for k, v in current.items():
                amqp_rid = k.split('_')[0]
                x = '_'.join(k.split('_')[1:])
                if amqp_rid not in queues:
                    queues[amqp_rid] = {}

                queues[amqp_rid][x] = v

            for amqp_rid in queues:
                if singleset.issubset(queues[amqp_rid]):
                    username = queues[amqp_rid]['username']
                    vhost = queues[amqp_rid]['vhost']
                    password = configure_amqp(username, vhost, amqp_rid,
                                              admin=admin)
                    key = '_'.join([amqp_rid, 'password'])
                    relation_settings[key] = password

        ssl_utils.configure_client_ssl(relation_settings)

        if is_clustered():
            relation_settings['clustered'] = 'true'
            # NOTE(dosaboy): this stanza can be removed once we fully remove
            #                deprecated HA support.
            if is_relation_made('ha'):
                # active/passive settings
                relation_settings['vip'] = config('vip')
                # or ha-vip-only to support active/active, but
                # accessed via a VIP for older clients.
                if config('ha-vip-only') is True:
                    relation_settings['ha-vip-only'] = 'true'

        # set if need HA queues or not
        if cmp_pkgrevno('rabbitmq-server', '3.0.1') < 0:
            relation_settings['ha_queues'] = True

        log("Updating relation {} keys {}"
            .format(relation_id or get_relation_id(),
                    ','.join(relation_settings.keys())), DEBUG)
        peer_store_and_set(relation_id=relation_id,
                           relation_settings=relation_settings)
    elif not is_leader() and rabbit.client_node_is_ready():
        log("Propagating peer settings to all amqp relations", DEBUG)

        # NOTE(jamespage) clear relation to deal with data being
        #                 removed from peer storage.
        relation_clear(relation_id)

        # Each unit needs to set the db information otherwise if the unit
        # with the info dies the settings die with it Bug# 1355848
        for rel_id in relation_ids('amqp'):
            peerdb_settings = peer_retrieve_by_prefix(rel_id)
            if 'password' in peerdb_settings:
                peerdb_settings['hostname'] = host_addr
                peerdb_settings['private-address'] = host_addr
                relation_set(relation_id=rel_id, **peerdb_settings)
示例#40
0
def resolve_address(endpoint_type=PUBLIC, override=True):
    """Return unit address depending on net config.

    If unit is clustered with vip(s) and has net splits defined, return vip on
    correct network. If clustered with no nets defined, return primary vip.

    If not clustered, return unit address ensuring address is on configured net
    split if one is configured, or a Juju 2.0 extra-binding has been used.

    :param endpoint_type: Network endpoing type
    :param override: Accept hostname overrides or not
    """
    resolved_address = None
    if override:
        resolved_address = _get_address_override(endpoint_type)
        if resolved_address:
            return resolved_address

    vips = config('vip')
    if vips:
        vips = vips.split()

    net_type = ADDRESS_MAP[endpoint_type]['config']
    net_addr = config(net_type)
    net_fallback = ADDRESS_MAP[endpoint_type]['fallback']
    binding = ADDRESS_MAP[endpoint_type]['binding']
    clustered = is_clustered()

    if clustered and vips:
        if net_addr:
            for vip in vips:
                if is_address_in_network(net_addr, vip):
                    resolved_address = vip
                    break
        else:
            # NOTE: endeavour to check vips against network space
            #       bindings
            try:
                bound_cidr = resolve_network_cidr(
                    network_get_primary_address(binding)
                )
                for vip in vips:
                    if is_address_in_network(bound_cidr, vip):
                        resolved_address = vip
                        break
            except NotImplementedError:
                # If no net-splits configured and no support for extra
                # bindings/network spaces so we expect a single vip
                resolved_address = vips[0]
    else:
        if config('prefer-ipv6'):
            fallback_addr = get_ipv6_addr(exc_list=vips)[0]
        else:
            fallback_addr = unit_get(net_fallback)

        if net_addr:
            resolved_address = get_address_in_network(net_addr, fallback_addr)
        else:
            # NOTE: only try to use extra bindings if legacy network
            #       configuration is not in use
            try:
                resolved_address = network_get_primary_address(binding)
            except NotImplementedError:
                resolved_address = fallback_addr

    if resolved_address is None:
        raise ValueError("Unable to resolve a suitable IP address based on "
                         "charm state and configuration. (net_type=%s, "
                         "clustered=%s)" % (net_type, clustered))

    return resolved_address
def amqp_changed(relation_id=None, remote_unit=None):
    if config('prefer-ipv6'):
        host_addr = get_ipv6_addr()[0]
    else:
        host_addr = unit_get('private-address')

    if not is_elected_leader('res_rabbitmq_vip'):
        # NOTE(jamespage) clear relation to deal with data being
        #                 removed from peer storage
        relation_clear(relation_id)
        # Each unit needs to set the db information otherwise if the unit
        # with the info dies the settings die with it Bug# 1355848
        exc_list = ['hostname', 'private-address']
        for rel_id in relation_ids('amqp'):
            peerdb_settings = peer_retrieve_by_prefix(rel_id,
                                                      exc_list=exc_list)
            peerdb_settings['hostname'] = host_addr
            peerdb_settings['private-address'] = host_addr
            if 'password' in peerdb_settings:
                relation_set(relation_id=rel_id, **peerdb_settings)

        log('amqp_changed(): Deferring amqp_changed'
            ' to is_elected_leader.')

        # NOTE: active/active case
        if config('prefer-ipv6'):
            relation_settings = {'private-address': host_addr}
            relation_set(relation_id=relation_id,
                         relation_settings=relation_settings)

        return

    relation_settings = {}
    settings = relation_get(rid=relation_id, unit=remote_unit)

    singleset = set(['username', 'vhost'])

    if singleset.issubset(settings):
        if None in [settings['username'], settings['vhost']]:
            log('amqp_changed(): Relation not ready.')
            return

        relation_settings['password'] = configure_amqp(
            username=settings['username'],
            vhost=settings['vhost'],
            admin=settings.get('admin', False))
    else:
        queues = {}
        for k, v in settings.iteritems():
            amqp = k.split('_')[0]
            x = '_'.join(k.split('_')[1:])
            if amqp not in queues:
                queues[amqp] = {}
            queues[amqp][x] = v
        for amqp in queues:
            if singleset.issubset(queues[amqp]):
                relation_settings[
                    '_'.join([amqp, 'password'])] = configure_amqp(
                    queues[amqp]['username'],
                    queues[amqp]['vhost'])

    if config('prefer-ipv6'):
        relation_settings['private-address'] = host_addr
    else:
        # NOTE(jamespage)
        # override private-address settings if access-network is
        # configured and an appropriate network interface is configured.
        relation_settings['hostname'] = \
            relation_settings['private-address'] = \
            get_address_in_network(config('access-network'),
                                   unit_get('private-address'))

    ssl_utils.configure_client_ssl(relation_settings)

    if is_clustered():
        relation_settings['clustered'] = 'true'
        if is_relation_made('ha'):
            # active/passive settings
            relation_settings['vip'] = config('vip')
            # or ha-vip-only to support active/active, but
            # accessed via a VIP for older clients.
            if config('ha-vip-only') is True:
                relation_settings['ha-vip-only'] = 'true'

    # set if need HA queues or not
    if cmp_pkgrevno('rabbitmq-server', '3.0.1') < 0:
        relation_settings['ha_queues'] = True
    peer_store_and_set(relation_id=relation_id,
                       relation_settings=relation_settings)
def get_db_host(client_hostname, interface='shared-db'):
    """Get address of local database host for use by db clients

    If an access-network has been configured, expect selected address to be
    on that network. If none can be found, revert to primary address.

    If network spaces are supported (Juju >= 2.0), use network-get to
    retrieve the network binding for the interface.

    If DNSHA is set pass os-access-hostname

    If vip(s) are configured, chooses first available.

    @param client_hostname: hostname of client side relation setting hostname.
                            Only used if access-network is configured
    @param interface: Network space binding to check.
                      Usually the relationship name.
    @returns IP for use with db clients
    """
    vips = config('vip').split() if config('vip') else []
    dns_ha = config('dns-ha')
    access_network = config('access-network')
    if is_clustered() and dns_ha:
        log("Using DNS HA hostname: {}".format(config('os-access-hostname')))
        return config('os-access-hostname')
    elif access_network:
        client_ip = resolve_hostname_to_ip(client_hostname)
        if is_address_in_network(access_network, client_ip):
            if is_clustered():
                for vip in vips:
                    if is_address_in_network(access_network, vip):
                        return vip

                log("Unable to identify a VIP in the access-network '%s'" %
                    (access_network), level=WARNING)
            else:
                return get_address_in_network(access_network)
        else:
            log("Client address '%s' not in access-network '%s'" %
                (client_ip, access_network), level=WARNING)
    else:
        try:
            # NOTE(jamespage)
            # Try to use network spaces to resolve binding for
            # interface, and to resolve the VIP associated with
            # the binding if provided.
            interface_binding = network_get_primary_address(interface)
            if is_clustered() and vips:
                interface_cidr = resolve_network_cidr(interface_binding)
                for vip in vips:
                    if is_address_in_network(interface_cidr, vip):
                        return vip
            return interface_binding
        except NotImplementedError:
            # NOTE(jamespage): skip - fallback to previous behaviour
            pass

    if is_clustered() and vips:
        return vips[0]  # NOTE on private network

    if config('prefer-ipv6'):
        return get_ipv6_addr(exc_list=vips)[0]

    # Last resort
    return unit_get('private-address')