def cluster_joined(relation_id=None):
    settings = {}

    for addr_type in ch_context.ADDRESS_TYPES:
        address = ch_network_ip.get_relation_ip(
            addr_type,
            cidr_network=hookenv.config('os-{}-network'.format(addr_type)))
        if address:
            settings['{}-address'.format(addr_type)] = address

    settings['private-address'] = ch_network_ip.get_relation_ip('cluster')

    hookenv.relation_set(relation_id=relation_id, relation_settings=settings)
Exemple #2
0
    def _cluster_joined():
        settings = {}

        for addr_type in ADDRESS_TYPES:
            address = get_relation_ip(addr_type,
                                      cidr_network=config(
                                          'os-{}-network'.format(addr_type)))
            if address:
                settings['{}-address'.format(addr_type)] = address

        settings['private-address'] = get_relation_ip('cluster')

        relation_set(relation_id=rid, relation_settings=settings)
def cluster_joined(relation_id=None):
    settings = {}

    for addr_type in ADDRESS_TYPES:
        address = get_relation_ip(
            addr_type,
            cidr_network=config('os-{}-network'.format(addr_type)))
        if address:
            settings['{}-address'.format(addr_type)] = address

    settings['private-address'] = get_relation_ip('cluster')

    relation_set(relation_id=relation_id, relation_settings=settings)
def cluster_joined(relation_id=None):
    settings = {}

    for addr_type in ch_context.ADDRESS_TYPES:
        address = ch_network_ip.get_relation_ip(
            addr_type,
            cidr_network=hookenv.config('os-{}-network'.format(addr_type)))
        if address:
            settings['{}-address'.format(addr_type)] = address

    settings['private-address'] = ch_network_ip.get_relation_ip('cluster')

    hookenv.relation_set(relation_id=relation_id, relation_settings=settings)
Exemple #5
0
def prometheus_relation(relid=None,
                        unit=None,
                        prometheus_permitted=None,
                        module_enabled=None):
    if not ceph.is_bootstrapped():
        return
    if prometheus_permitted is None:
        prometheus_permitted = cmp_pkgrevno('ceph', '12.2.0') >= 0
    if module_enabled is None:
        module_enabled = (is_mgr_module_enabled('prometheus')
                          or mgr_enable_module('prometheus'))
    log("checking if prometheus module is enabled")
    if prometheus_permitted and module_enabled:
        log("Updating prometheus")
        data = {
            'hostname': get_relation_ip('prometheus'),
            'port': 9283,
        }
        relation_set(relation_id=relid, relation_settings=data)
    else:
        log("Couldn't enable prometheus, but are related. "
            "Prometheus is available in Ceph version: {} ; "
            "Prometheus Module is enabled: {}".format(prometheus_permitted,
                                                      module_enabled),
            level=WARNING)
def compute_joined(rid=None):
    # NOTE(james-page) in MAAS environments the actual hostname is a CNAME
    # record so won't get scanned based on private-address which is an IP
    # add the hostname configured locally to the relation.
    settings = {
        'hostname':
        gethostname(),
        'private-address':
        get_relation_ip('migration',
                        cidr_network=config('libvirt-migration-network')),
    }

    az = get_availability_zone()
    if az:
        relation_set(relation_id=rid, availability_zone=az)

    if migration_enabled():
        auth_type = config('migration-auth-type')
        settings['migration_auth_type'] = auth_type
        if auth_type == 'ssh':
            settings['ssh_public_key'] = public_ssh_key()
        relation_set(relation_id=rid, **settings)
    if config('enable-resize'):
        settings['nova_ssh_public_key'] = public_ssh_key(user='******')
        relation_set(relation_id=rid, **settings)
    def __call__(self):
        '''
        Horizon specific HAProxy context; haproxy is used all the time
        in the openstack dashboard charm so a single instance just
        self refers
        '''
        cluster_hosts = {}
        l_unit = local_unit().replace('/', '-')
        if config('prefer-ipv6'):
            cluster_hosts[l_unit] = get_ipv6_addr(exc_list=[config('vip')])[0]
        else:
            cluster_hosts[l_unit] = get_relation_ip('cluster')

        for rid in relation_ids('cluster'):
            for unit in related_units(rid):
                _unit = unit.replace('/', '-')
                addr = relation_get('private-address', rid=rid, unit=unit)
                cluster_hosts[_unit] = addr

        log('Ensuring haproxy enabled in /etc/default/haproxy.')
        with open('/etc/default/haproxy', 'w') as out:
            out.write('ENABLED=1\n')

        ctxt = {
            'units': cluster_hosts,
            'service_ports': {
                'dash_insecure': [80, 70],
                'dash_secure': [443, 433]
            },
            'prefer_ipv6': config('prefer-ipv6'),
            'haproxy_expose_stats': config('haproxy-expose-stats')
        }
        return ctxt
    def local_network_split_addresses(self):
        """Map of local units addresses for each address type

           NOTE: This excludes private-address
           @return dict of backends and networks for local unit e.g.
               {'this_unit_admin_addr': {
                    'backends': {
                        'this_unit-1': 'this_unit_admin_addr'},
                    'network': 'this_unit_admin_addr/admin_netmask'},
                'this_unit_internal_addr': {
                    'backends': {
                        'this_unit-1': 'this_unit_internal_addr'},
                    'network': 'this_unit_internal_addr/internal_netmask'},
                'this_unit_public_addr': {
                    'backends': {
                        'this_unit-1': 'this_unit_public_addr'},
                    'network': 'this_unit_public_addr/public_netmask'}}
        """
        config = hookenv.config()
        _cluster_hosts = collections.OrderedDict()
        for addr_type in ADDRESS_TYPES:
            cfg_opt = os_ip.ADDRESS_MAP[addr_type]['config']
            laddr = ch_ip.get_relation_ip(
                os_ip.ADDRESS_MAP[addr_type]['binding'],
                config.get(cfg_opt))
            if laddr:
                netmask = ch_ip.get_netmask_for_address(laddr)
                _cluster_hosts[laddr] = {
                    'network': "{}/{}".format(laddr, netmask),
                    'backends': collections.OrderedDict(
                        [(self.local_unit_name, laddr)])}
        return _cluster_hosts
def swift_storage_relation_joined(rid=None):
    if config('encrypt') and not vaultlocker.vault_relation_complete():
        log('Encryption configured and vault not ready, deferring',
            level=DEBUG)
        return
    rel_settings = {
        'zone': config('zone'),
        'object_port': config('object-server-port'),
        'container_port': config('container-server-port'),
        'account_port': config('account-server-port'),
    }
    if enable_replication():
        replication_ip = network_get_primary_address('replication')
        cluster_ip = network_get_primary_address('cluster')
        rel_settings.update({
            'ip_rep': replication_ip,
            'ip_cls': cluster_ip,
            'region': config('storage-region'),
            'object_port_rep': config('object-server-port-rep'),
            'container_port_rep': config('container-server-port-rep'),
            'account_port_rep': config('account-server-port-rep')})
    db = kv()
    devs = db.get('prepared-devices', [])
    devs = [os.path.basename(d) for d in devs]
    rel_settings['device'] = ':'.join(devs)
    # Keep a reference of devices we are adding to the ring
    remember_devices(devs)

    rel_settings['private-address'] = get_relation_ip('swift-storage')

    relation_set(relation_id=rid, relation_settings=rel_settings)
Exemple #10
0
def db_joined():
    if is_relation_made('pgsql-db'):
        # error, postgresql is used
        e = ('Attempting to associate a mysql database when there is already '
             'associated a postgresql one')
        log(e, level=ERROR)
        raise Exception(e)

    if config('prefer-ipv6'):
        sync_db_with_multi_ipv6_addresses(config('database'),
                                          config('database-user'))
    else:
        # Avoid churn check for access-network early
        access_network = None
        for unit in related_units():
            access_network = relation_get(unit=unit,
                                          attribute='access-network')
            if access_network:
                break
        host = get_relation_ip('shared-db', cidr_network=access_network)

        conf = config()
        relation_set(database=conf['database'],
                     username=conf['database-user'],
                     hostname=host)
Exemple #11
0
    def set_wsgate_info(self, wsgate):
        is_ready = reactive.flags.is_flag_set('config.complete')
        has_ssl = reactive.flags.get_state('ssl.enabled')
        ha_available = reactive.flags.is_flag_set('ha.available')
        proto = "https" if has_ssl is True else "http"
        local_ip = ch_ip.get_relation_ip("internal")
        addr = self.config["vip"] if ha_available else local_ip
        allowed_user = self._get_allowed_user()

        if not allowed_user:
            # We don't have AD credentials yet. Defer for later
            return

        relation_data = {
            "enabled": is_ready,
            "html5_proxy_base_url": "%(proto)s://%(address)s:%(port)s/" % {
                "proto": proto,
                "address": addr,
                "port": self.api_ports["wsgate"][os_ip.PUBLIC],
            },
            "allow_user": allowed_user,
        }
        for unit in wsgate.all_joined_units:
            wsgate.set_wsgate_info(
                unit.relation.relation_id,
                relation_data)
def cluster_joined(relation_id=None):
    settings = {}

    for addr_type in ADDRESS_TYPES:
        address = get_relation_ip(
            addr_type,
            cidr_network=config('os-{}-network'.format(addr_type)))
        if address:
            settings['{}-address'.format(addr_type)] = address

    settings['private-address'] = get_relation_ip('cluster')

    relation_set(relation_id=relation_id, relation_settings=settings)

    if not relation_id:
        check_local_db_actions_complete()
def secrets_storage_joined(relation_id=None):
    relation_set(relation_id=relation_id,
                 secret_backend='charm-vaultlocker',
                 isolated=True,
                 access_address=get_relation_ip('secrets-storage'),
                 unit_name=local_unit(),
                 hostname=socket.gethostname())
    def get_network_addresses(self):
        """For each network configured, return corresponding address and vip
           (if available).

        Returns a list of tuples of the form:

            [(address_in_net_a, vip_in_net_a),
             (address_in_net_b, vip_in_net_b),
             ...]

            or, if no vip(s) available:

            [(address_in_net_a, address_in_net_a),
             (address_in_net_b, address_in_net_b),
             ...]
        """
        addresses = []
        for net_type in ADDRESS_TYPES:
            net_cfg_opt = os_ip.ADDRESS_MAP[net_type]['config']
            config_cidr = getattr(self, net_cfg_opt.replace('-', '_'), None)
            if hookenv.config(net_cfg_opt):
                addr = ch_ip.get_address_in_network(
                    config_cidr,
                    hookenv.unit_get('private-address'))
            else:
                addr = ch_ip.get_relation_ip(
                    os_ip.ADDRESS_MAP[net_type]['binding'])
            addresses.append(
                (addr, os_ip.resolve_address(endpoint_type=net_type)))
        return sorted(addresses)
    def __call__(self):
        '''
        Horizon specific HAProxy context; haproxy is used all the time
        in the openstack dashboard charm so a single instance just
        self refers
        '''
        cluster_hosts = {}
        l_unit = local_unit().replace('/', '-')
        if config('prefer-ipv6'):
            cluster_hosts[l_unit] = get_ipv6_addr(exc_list=[config('vip')])[0]
        else:
            cluster_hosts[l_unit] = get_relation_ip('cluster')

        for rid in relation_ids('cluster'):
            for unit in related_units(rid):
                _unit = unit.replace('/', '-')
                addr = relation_get('private-address', rid=rid, unit=unit)
                cluster_hosts[_unit] = addr

        log('Ensuring haproxy enabled in /etc/default/haproxy.')
        with open('/etc/default/haproxy', 'w') as out:
            out.write('ENABLED=1\n')

        ctxt = {
            'units': cluster_hosts,
            'service_ports': {
                'dash_insecure': [80, 70],
                'dash_secure': [443, 433]
            },
            'prefer_ipv6': config('prefer-ipv6')
        }
        return ctxt
    def __call__(self):
        ctxt = {}
        # Use the address used in the cloud-compute relation in templates for
        # this host
        host_ip = get_relation_ip('cloud-compute',
                                  cidr_network=config('os-internal-network'))

        if host_ip:
            # NOTE: do not format this even for ipv6 (see bug 1499656)
            ctxt['host_ip'] = host_ip

            # the contents of the Nova ``host`` configuration option is
            # referenced throughout a OpenStack deployment, an example being
            # Neutron port bindings.  It's value should not change after a
            # individual units initial deployment.
            #
            # We do want to migrate to using FQDNs so we enable this for new
            # installations.
            db = kv()
            if db.get('install_version', 0) >= 1910:
                fqdn = socket.getfqdn(host_ip)
                if '.' in fqdn:
                    # only populate the value if getfqdn() is able to find an
                    # actual FQDN for this host.  If not, we revert back to
                    # not setting the configuration option and use Nova's
                    # default behaviour.
                    ctxt['host'] = fqdn

        return ctxt
Exemple #17
0
def hanode_relation_joined(relid=None):
    relation_set(
        relation_id=relid,
        relation_settings={
            'private-address': get_relation_ip('hanode'),
            'hostname': get_hostname()}
    )
Exemple #18
0
    def local_network_split_addresses(self):
        """Map of local units addresses for each address type

           NOTE: This excludes private-address
           @return dict of backends and networks for local unit e.g.
               {'this_unit_admin_addr': {
                    'backends': {
                        'this_unit-1': 'this_unit_admin_addr'},
                    'network': 'this_unit_admin_addr/admin_netmask'},
                'this_unit_internal_addr': {
                    'backends': {
                        'this_unit-1': 'this_unit_internal_addr'},
                    'network': 'this_unit_internal_addr/internal_netmask'},
                'this_unit_public_addr': {
                    'backends': {
                        'this_unit-1': 'this_unit_public_addr'},
                    'network': 'this_unit_public_addr/public_netmask'}}
        """
        config = hookenv.config()
        _cluster_hosts = {}
        for addr_type in ADDRESS_TYPES:
            cfg_opt = os_ip.ADDRESS_MAP[addr_type]['config']
            laddr = ch_ip.get_relation_ip(
                os_ip.ADDRESS_MAP[addr_type]['binding'], config.get(cfg_opt))
            if laddr:
                netmask = ch_ip.get_netmask_for_address(laddr)
                _cluster_hosts[laddr] = {
                    'network':
                    "{}/{}".format(laddr, netmask),
                    'backends':
                    collections.OrderedDict([(self.local_unit_name, laddr)])
                }
        return _cluster_hosts
Exemple #19
0
def get_certificate_sans(bindings=None):
    """Get all possible IP addresses for certificate SANs.
    """
    _sans = [unit_get('private-address')]
    if bindings:
        # Add default API bindings to bindings list
        bindings = set(bindings + get_default_api_bindings())
    else:
        # Use default API bindings
        bindings = get_default_api_bindings()

    for binding in bindings:
        # Check for config override
        try:
            net_config = config(ADDRESS_MAP[binding]['config'])
        except KeyError:
            # There is no configuration network for this binding name
            net_config = None
        # Using resolve_address is likely redundant. Keeping it here in
        # case there is an edge case it handles.
        net_addr = resolve_address(endpoint_type=binding)
        ip = get_relation_ip(binding, cidr_network=net_config)
        _sans = _sans + [net_addr, ip]
        vip = get_vip_in_network(resolve_network_cidr(ip))
        if vip:
            _sans.append(vip)
    return set(_sans)
def leader_elected():
    '''Set the leader nodes IP'''
    if is_leader():
        leader_set(**{'leader-ip': get_relation_ip('cluster')})
    else:
        log('leader-elected hook executed, but this unit is not the leader',
            level=INFO)
    def get_network_addresses(self):
        """For each network configured, return corresponding address and vip
           (if available).

        Returns a list of tuples of the form:

            [(address_in_net_a, vip_in_net_a),
             (address_in_net_b, vip_in_net_b),
             ...]

            or, if no vip(s) available:

            [(address_in_net_a, address_in_net_a),
             (address_in_net_b, address_in_net_b),
             ...]
        """
        addresses = []
        for net_type in ADDRESS_TYPES:
            net_cfg_opt = os_ip.ADDRESS_MAP[net_type]['config']
            config_cidr = getattr(self, net_cfg_opt.replace('-', '_'), None)
            if hookenv.config(net_cfg_opt):
                addr = ch_ip.get_address_in_network(
                    config_cidr,
                    hookenv.unit_get('private-address'))
            else:
                addr = ch_ip.get_relation_ip(
                    os_ip.ADDRESS_MAP[net_type]['binding'])
            addresses.append(
                (addr, os_ip.resolve_address(endpoint_type=net_type)))
        return sorted(addresses)
def cluster_joined(relation_id=None):
    settings = {}

    for addr_type in ADDRESS_TYPES:
        address = get_relation_ip(
            addr_type,
            cidr_network=config('os-{}-network'.format(addr_type)))
        if address:
            settings['{}-address'.format(addr_type)] = address

    settings['private-address'] = get_relation_ip('cluster')

    relation_set(relation_id=relation_id, relation_settings=settings)

    # Only do if this is fired by cluster rel
    if not relation_id:
        check_local_db_actions_complete()
Exemple #23
0
 def get_database_setup(self):
     return [
         {
             'database': 'gnocchi',
             'username': '******',
             'hostname': ch_ip.get_relation_ip(DB_INTERFACE)
         },
     ]
Exemple #24
0
    def test_get_relation_ip(self, assert_charm_supports_ipv6, get_ipv6_addr,
                             unit_get, get_address_in_network,
                             network_get_primary_address, config):
        ACCESS_IP = '10.50.1.1'
        ACCESS_NETWORK = '10.50.1.0/24'
        AMQP_IP = '10.200.1.1'
        IPV6_IP = '2001:DB8::1'
        DEFAULT_IP = '172.16.1.1'
        assert_charm_supports_ipv6.return_value = True
        get_ipv6_addr.return_value = [IPV6_IP]
        unit_get.return_value = DEFAULT_IP
        get_address_in_network.return_value = DEFAULT_IP
        network_get_primary_address.return_value = AMQP_IP

        # Network-get calls
        _config = {'prefer-ipv6': False}
        config.side_effect = lambda key: _config.get(key)

        network_get_primary_address.side_effect = NotImplementedError
        self.assertEqual(DEFAULT_IP, net_ip.get_relation_ip('amqp'))

        network_get_primary_address.side_effect = net_ip.NoNetworkBinding
        self.assertEqual(DEFAULT_IP, net_ip.get_relation_ip('doesnotexist'))

        network_get_primary_address.side_effect = None
        self.assertEqual(AMQP_IP, net_ip.get_relation_ip('amqp'))

        self.assertFalse(get_address_in_network.called)

        # Specific CIDR network
        get_address_in_network.return_value = ACCESS_IP
        network_get_primary_address.return_value = DEFAULT_IP
        self.assertEqual(
            ACCESS_IP,
            net_ip.get_relation_ip('shared-db',
                                   cidr_network=ACCESS_NETWORK))
        get_address_in_network.assert_called_with(ACCESS_NETWORK, DEFAULT_IP)

        self.assertFalse(assert_charm_supports_ipv6.called)

        # IPv6
        _config = {'prefer-ipv6': True}
        config.side_effect = lambda key: _config.get(key)
        self.assertEqual(IPV6_IP, net_ip.get_relation_ip('amqp'))
        assert_charm_supports_ipv6.assert_called_with()
def memcached_joined():
    """When memcache relation joins we want to set our private address as the
    spaces address rather than leaving it as the unit address.  This is to
    support network spaces in the memcached charm.
    """
    relation_set(
        relation_id=None,
        relation_settings={'private-address': get_relation_ip('memcache')})
    memcached_common()
def pgsql_db_joined():
    if is_relation_made('shared-db'):
        # raise error
        e = ('Attempting to associate a postgresql database when'
             ' there is already associated a mysql one')
        log(e, level=ERROR)
        raise Exception(e)

    relation_set(**{'database': config('database'),
                    'private-address': get_relation_ip('psql-db')})
def cluster_joined(relation_id=None):
    # If this node is the elected leader then share our secret with other nodes
    if is_elected_leader('grp_ceilometer_vips'):
        peer_store('shared_secret', get_shared_secret())

    CONFIGS.write_all()

    settings = {}

    for addr_type in ADDRESS_TYPES:
        address = get_relation_ip(addr_type,
                                  cidr_network=config(
                                      'os-{}-network'.format(addr_type)))
        if address:
            settings['{}-address'.format(addr_type)] = address

    settings['private-address'] = get_relation_ip('cluster')

    relation_set(relation_id=relation_id, relation_settings=settings)
def provider_ip(cls):
    """Return the provider binding network IP

    Use the extra binding, provider, to determine the correct provider network
    IP.

    :returns: string IP address
    """

    return ch_ip.get_relation_ip(PROVIDER_BINDING)
def speaker_ip(cls):
    """Return the BGP speaker binding network IP

    Use the interface-bgp relation binding, to determine the correct bgp
    network IP.

    :returns: string IP address
    """

    return ch_ip.get_relation_ip(SPEAKER_BINDING)
def get_unit_amqp_endpoint_data():
    """Get the hostname and ip address associated with amqp interface.

    :returns: Tuple containing ip address and hostname.
    :rtype: (str, str)
    """
    ip = get_relation_ip(
        rabbit_net_utils.AMQP_INTERFACE,
        cidr_network=config(rabbit_net_utils.AMQP_OVERRIDE_CONFIG))
    return ip, get_hostname(ip)
def get_unit_amqp_endpoint_data():
    """Get the hostname and ip address associated with amqp interface.

    :returns: Tuple containing ip address and hostname.
    :rtype: (str, str)
    """
    ip = get_relation_ip(rabbit_net_utils.AMQP_INTERFACE,
                         cidr_network=config(
                             rabbit_net_utils.AMQP_OVERRIDE_CONFIG))
    return ip, get_hostname(ip)
def db_joined(relation_id=None):
    if is_relation_made('pgsql-nova-db') or \
            is_relation_made('pgsql-neutron-db'):
        # error, postgresql is used
        e = ('Attempting to associate a mysql database when there is already '
             'associated a postgresql one')
        log(e, level=ERROR)
        raise Exception(e)

    cmp_os_release = CompareOpenStackReleases(os_release('nova-common'))
    if config('prefer-ipv6'):
        sync_db_with_multi_ipv6_addresses(config('database'),
                                          config('database-user'),
                                          relation_prefix='nova')

        if cmp_os_release >= 'mitaka':
            # NOTE: mitaka uses a second nova-api database as well
            sync_db_with_multi_ipv6_addresses('nova_api',
                                              config('database-user'),
                                              relation_prefix='novaapi')

        if cmp_os_release >= 'ocata':
            # NOTE: ocata requires cells v2
            sync_db_with_multi_ipv6_addresses('nova_cell0',
                                              config('database-user'),
                                              relation_prefix='novacell0')
    else:
        # Avoid churn check for access-network early
        access_network = None
        for unit in related_units(relid=relation_id):
            access_network = relation_get(rid=relation_id,
                                          unit=unit,
                                          attribute='access-network')
            if access_network:
                break
        host = get_relation_ip('shared-db', cidr_network=access_network)

        relation_set(nova_database=config('database'),
                     nova_username=config('database-user'),
                     nova_hostname=host,
                     relation_id=relation_id)

        if cmp_os_release >= 'mitaka':
            # NOTE: mitaka uses a second nova-api database as well
            relation_set(novaapi_database='nova_api',
                         novaapi_username=config('database-user'),
                         novaapi_hostname=host,
                         relation_id=relation_id)

        if cmp_os_release >= 'ocata':
            # NOTE: ocata requires cells v2
            relation_set(novacell0_database='nova_cell0',
                         novacell0_username=config('database-user'),
                         novacell0_hostname=host,
                         relation_id=relation_id)
    def __call__(self):
        ctxt = {}
        # Use the address used in the cloud-compute relation in templates for
        # this host
        host_ip = get_relation_ip('cloud-compute')

        if host_ip:
            # NOTE: do not format this even for ipv6 (see bug 1499656)
            ctxt['host_ip'] = host_ip

        return ctxt
Exemple #34
0
def cluster_address(cls):
    """Determine this unit's cluster address.

    Using the relation binding determine this unit's cluster address.

    :param cls: Class
    :type cls: MySQLInnoDBClusterCharm class
    :returns: Address
    :rtype: str
    """
    return ch_net_ip.get_relation_ip("cluster")
Exemple #35
0
def db_joined(relation_id=None):
    cmp_os_release = ch_utils.CompareOpenStackReleases(
        ch_utils.os_release('nova-common'))
    if hookenv.config('prefer-ipv6'):
        ch_utils.sync_db_with_multi_ipv6_addresses(
            hookenv.config('database'),
            hookenv.config('database-user'),
            relation_prefix='nova')

        if cmp_os_release >= 'mitaka':
            # NOTE: mitaka uses a second nova-api database as well
            ch_utils.sync_db_with_multi_ipv6_addresses(
                'nova_api',
                hookenv.config('database-user'),
                relation_prefix='novaapi')

        if cmp_os_release >= 'ocata':
            # NOTE: ocata requires cells v2
            ch_utils.sync_db_with_multi_ipv6_addresses(
                'nova_cell0',
                hookenv.config('database-user'),
                relation_prefix='novacell0')
    else:
        # Avoid churn check for access-network early
        access_network = None
        for unit in hookenv.related_units(relid=relation_id):
            access_network = hookenv.relation_get(rid=relation_id,
                                                  unit=unit,
                                                  attribute='access-network')
            if access_network:
                break
        host = ch_network_ip.get_relation_ip('shared-db',
                                             cidr_network=access_network)

        hookenv.relation_set(nova_database=hookenv.config('database'),
                             nova_username=hookenv.config('database-user'),
                             nova_hostname=host,
                             relation_id=relation_id)

        if cmp_os_release >= 'mitaka':
            # NOTE: mitaka uses a second nova-api database as well
            hookenv.relation_set(
                novaapi_database='nova_api',
                novaapi_username=hookenv.config('database-user'),
                novaapi_hostname=host,
                relation_id=relation_id)

        if cmp_os_release >= 'ocata':
            # NOTE: ocata requires cells v2
            hookenv.relation_set(
                novacell0_database='nova_cell0',
                novacell0_username=hookenv.config('database-user'),
                novacell0_hostname=host,
                relation_id=relation_id)
def memcached_joined():
    """When memcache relation joins we want to set our private address as the
    spaces address rather than leaving it as the unit address.  This is to
    support network spaces in the memcached charm.
    """
    hookenv.relation_set(
        relation_id=None,
        relation_settings={
            'private-address': ch_network_ip.get_relation_ip('memcache')
        })
    memcached_common()
    def __call__(self):
        ctxt = {}
        # Use the address used in the cloud-compute relation in templates for
        # this host
        host_ip = get_relation_ip('cloud-compute')

        if host_ip:
            # NOTE: do not format this even for ipv6 (see bug 1499656)
            ctxt['host_ip'] = host_ip

        return ctxt
Exemple #38
0
def db_router_address(cls):
    """Determine this unit's DB-Router address.

    Using the relation binding determine this unit's address for the DB-Router
    relation.

    :param cls: Class
    :type cls: MySQLInnoDBClusterCharm class
    :returns: Address
    :rtype: str
    """
    return ch_net_ip.get_relation_ip("db-router")
def db_joined(relation_id=None):
    cmp_os_release = ch_utils.CompareOpenStackReleases(
        ch_utils.os_release('nova-common'))
    if hookenv.config('prefer-ipv6'):
        ch_utils.sync_db_with_multi_ipv6_addresses(
            hookenv.config('database'),
            hookenv.config('database-user'),
            relation_prefix='nova')

        if cmp_os_release >= 'mitaka':
            # NOTE: mitaka uses a second nova-api database as well
            ch_utils.sync_db_with_multi_ipv6_addresses(
                'nova_api',
                hookenv.config('database-user'),
                relation_prefix='novaapi')

        if cmp_os_release >= 'ocata':
            # NOTE: ocata requires cells v2
            ch_utils.sync_db_with_multi_ipv6_addresses(
                'nova_cell0',
                hookenv.config('database-user'),
                relation_prefix='novacell0')
    else:
        # Avoid churn check for access-network early
        access_network = None
        for unit in hookenv.related_units(relid=relation_id):
            access_network = hookenv.relation_get(rid=relation_id, unit=unit,
                                                  attribute='access-network')
            if access_network:
                break
        host = ch_network_ip.get_relation_ip('shared-db',
                                             cidr_network=access_network)

        hookenv.relation_set(nova_database=hookenv.config('database'),
                             nova_username=hookenv.config('database-user'),
                             nova_hostname=host,
                             relation_id=relation_id)

        if cmp_os_release >= 'mitaka':
            # NOTE: mitaka uses a second nova-api database as well
            hookenv.relation_set(
                novaapi_database='nova_api',
                novaapi_username=hookenv.config('database-user'),
                novaapi_hostname=host,
                relation_id=relation_id)

        if cmp_os_release >= 'ocata':
            # NOTE: ocata requires cells v2
            hookenv.relation_set(
                novacell0_database='nova_cell0',
                novacell0_username=hookenv.config('database-user'),
                novacell0_hostname=host,
                relation_id=relation_id)
def db_joined(rid=None):
    if is_relation_made('pgsql-db'):
        # error, postgresql is used
        e = ('Attempting to associate a mysql database when there is already '
             'associated a postgresql one')
        log(e, level=ERROR)
        raise Exception(e)

    relation_set(relation_id=rid,
                 nova_database=config('database'),
                 nova_username=config('database-user'),
                 nova_hostname=get_relation_ip('shared-db'))
Exemple #41
0
def db_joined(rid=None):
    if is_relation_made('pgsql-db'):
        # error, postgresql is used
        e = ('Attempting to associate a mysql database when there is already '
             'associated a postgresql one')
        log(e, level=ERROR)
        raise Exception(e)

    relation_set(relation_id=rid,
                 nova_database=config('database'),
                 nova_username=config('database-user'),
                 nova_hostname=get_relation_ip('shared-db'))
Exemple #42
0
def cluster_joined(rid=None, ssl_sync_request=True):
    unison.ssh_authorized_peers(user=SSH_USER,
                                group=SSH_USER,
                                peer_interface='cluster',
                                ensure_local_user=True)

    settings = {}

    for addr_type in ADDRESS_TYPES:
        address = get_relation_ip(addr_type,
                                  cidr_network=config(
                                      'os-{}-network'.format(addr_type)))
        if address:
            settings['{}-address'.format(addr_type)] = address

    settings['private-address'] = get_relation_ip('cluster')

    relation_set(relation_id=rid, relation_settings=settings)

    if ssl_sync_request:
        send_ssl_sync_request()
def leader_elected():
    '''Set the leader nodes IP'''
    if is_leader():
        leader_set(**{'leader-ip': get_relation_ip('cluster')})
    else:
        log('leader-elected hook executed, but this unit is not the leader',
            level=INFO)
    # NOTE(tkurek): re-set 'master' relation data
    if relation_ids('master'):
        master_joined()
    # NOTE(tkurek): configure new leader
    if relation_ids('slave'):
        configure_slave()
 def rndc_master_ips(self):
     rndc_master_ips = []
     rndc_master_ip = ch_ip.get_relation_ip('dns-backend')
     rndc_master_ips.append(rndc_master_ip)
     cluster_relid = hookenv.relation_ids('cluster')[0]
     if hookenv.related_units(relid=cluster_relid):
         for unit in hookenv.related_units(relid=cluster_relid):
             rndc_master_ip = hookenv.relation_get('rndc-address',
                                                   rid=cluster_relid,
                                                   unit=unit)
             if rndc_master_ip is not None:
                 rndc_master_ips.append(rndc_master_ip)
     return rndc_master_ips
def shared_db_cell_joined(relation_id=None):
    access_network = None
    for unit in hookenv.related_units(relid=relation_id):
        access_network = hookenv.relation_get(rid=relation_id, unit=unit,
                                              attribute='access-network')
        if access_network:
            break
        host = ch_network_ip.get_relation_ip('shared-db',
                                             cidr_network=access_network)
    cell_db = {
        'nova_database': 'nova',
        'nova_username': hookenv.config('database-user'),
        'nova_hostname': host}
    hookenv.relation_set(relation_id=relation_id, **cell_db)
def db_joined():
    if config('prefer-ipv6'):
        sync_db_with_multi_ipv6_addresses(config('database'),
                                          config('database-user'))
    else:
        # Avoid churn check for access-network early
        access_network = None
        for unit in related_units():
            access_network = relation_get(unit=unit,
                                          attribute='access-network')
            if access_network:
                break
        host = get_relation_ip('shared-db', cidr_network=access_network)

        relation_set(database=config('database'),
                     username=config('database-user'),
                     hostname=host)
def compute_joined(rid=None):
    # NOTE(james-page) in MAAS environments the actual hostname is a CNAME
    # record so won't get scanned based on private-address which is an IP
    # add the hostname configured locally to the relation.
    settings = {
        'hostname': gethostname(),
        'private-address': get_relation_ip('cloud-compute'),
    }

    if migration_enabled():
        auth_type = config('migration-auth-type')
        settings['migration_auth_type'] = auth_type
        if auth_type == 'ssh':
            settings['ssh_public_key'] = public_ssh_key()
        relation_set(relation_id=rid, **settings)
    if config('enable-resize'):
        settings['nova_ssh_public_key'] = public_ssh_key(user='******')
        relation_set(relation_id=rid, **settings)
    def internal_addresses(self):
        """Return list of internal addresses of this unit and peers

           Return list of internal addresses of this unit and peers. If no
           internal address cidr has been set return private addresses.

           @return list [ip1, ip2, ...]
        """
        cfg_opt = os_ip.ADDRESS_MAP[os_ip.INTERNAL]['config']
        int_net = self.config.get(cfg_opt)
        laddr = ch_ip.get_relation_ip(
            os_ip.ADDRESS_MAP[os_ip.INTERNAL]['binding'],
            int_net)
        try:
            hosts = sorted(
                list(self.cluster_hosts[laddr]['backends'].values()))
        except KeyError:
            hosts = [laddr]
        return hosts
    def add_network_split_addresses(self):
        """Populate cluster_hosts with addresses of this unit and its
           peers on each address type

           @return None
        """
        for addr_type in ADDRESS_TYPES:
            cfg_opt = os_ip.ADDRESS_MAP[addr_type]['config']
            laddr = ch_ip.get_relation_ip(
                os_ip.ADDRESS_MAP[addr_type]['binding'],
                self.config.get(cfg_opt))
            if laddr:
                self.cluster_hosts[laddr] = \
                    self.local_network_split_addresses()[laddr]
                key = '{}-address'.format(
                    os_ip.ADDRESS_MAP[addr_type]['binding'])
                for _unit, _laddr in self.relation.ip_map(address_key=key):
                    if _laddr:
                        self.cluster_hosts[laddr]['backends'][_unit] = _laddr
def prepare():
    # Use the pause feature to stop mysql during the duration of the upgrade
    pause_unit_helper(register_configs())
    # Set this unit to series upgrading
    set_unit_upgrading()
    # The leader will "bootstrap" with no wrep peers
    # Non-leaders will point only at the newly upgraded leader until the
    # cluster series upgrade is completed.
    # Set cluster_series_upgrading for the duration of the cluster series
    # upgrade. This will be unset with the action
    # complete-cluster-series-upgrade on the leader node.
    hosts = []

    if not leader_get('cluster_series_upgrade_leader'):
        leader_set(cluster_series_upgrading=True)
        leader_set(
            cluster_series_upgrade_leader=get_relation_ip('cluster'))
    else:
        hosts = [leader_get('cluster_series_upgrade_leader')]

    # Render config
    render_config(hosts)
def cluster_joined(relation_id=None):
    relation_settings = {
        'hostname': rabbit.get_unit_hostname(),
        'private-address':
            ch_ip.get_relation_ip(
                rabbit_net_utils.CLUSTER_INTERFACE,
                cidr_network=config(rabbit_net_utils.CLUSTER_OVERRIDE_CONFIG)),
    }

    relation_set(relation_id=relation_id,
                 relation_settings=relation_settings)

    if is_relation_made('ha') and \
            config('ha-vip-only') is False:
        log('hacluster relation is present, skipping native '
            'rabbitmq cluster config.')
        return

    try:
        if not is_leader():
            log('Not the leader, deferring cookie propagation to leader')
            return
    except NotImplementedError:
        if is_newer():
            log('cluster_joined: Relation greater.')
            return

    if not os.path.isfile(rabbit.COOKIE_PATH):
        log('erlang cookie missing from %s' % rabbit.COOKIE_PATH,
            level=ERROR)
        return

    if is_leader():
        log('Leader peer_storing cookie', level=INFO)
        cookie = open(rabbit.COOKIE_PATH, 'r').read().strip()
        peer_store('cookie', cookie)
        peer_store('leader_node_ip', unit_private_ip())
        peer_store('leader_node_hostname', rabbit.get_unit_hostname())
Exemple #52
0
    def update_peers(self, cluster):
        """Update peers in the cluster about the addresses that this unit
        holds.

        NOTE(AJK): This uses the helper is_data_changed() to track whether this
        has already been done, and doesn't re-advertise the changes if nothing
        has changed.

        @param cluster: the interface object for the cluster relation
        """
        laddrs = []
        for addr_type in sorted(os_ip.ADDRESS_MAP.keys()):
            cidr = self.config.get(os_ip.ADDRESS_MAP[addr_type]['config'])
            laddr = ch_ip.get_relation_ip(
                os_ip.ADDRESS_MAP[addr_type]['binding'],
                cidr)
            laddrs.append((addr_type, laddr))
        with is_data_changed('update_peers.laddrs', laddrs) as changed:
            if changed:
                for (addr_type, laddr) in laddrs:
                    cluster.set_address(
                        os_ip.ADDRESS_MAP[addr_type]['binding'],
                        laddr)
def upgrade():

    if is_leader():
        if is_unit_paused_set() or is_unit_upgrading_set():
            log('Unit is paused, skiping upgrade', level=INFO)
            return

        # Leader sets on upgrade
        leader_set(**{'leader-ip': get_relation_ip('cluster')})
        configure_sstuser(sst_password())
        if not leader_get('root-password') and leader_get('mysql.passwd'):
            leader_set(**{'root-password': leader_get('mysql.passwd')})

        # On upgrade-charm we assume the cluster was complete at some point
        kvstore = kv()
        initial_clustered = kvstore.get(INITIAL_CLUSTERED_KEY, False)
        if not initial_clustered:
            kvstore.set(key=INITIAL_CLUSTERED_KEY, value=True)
            kvstore.flush()

        # broadcast the bootstrap-uuid
        wsrep_ready = get_wsrep_value('wsrep_ready') or ""
        if wsrep_ready.lower() in ['on', 'ready']:
            cluster_state_uuid = get_wsrep_value('wsrep_cluster_state_uuid')
            if cluster_state_uuid:
                mark_seeded()
                notify_bootstrapped(cluster_uuid=cluster_state_uuid)
    else:
        # Ensure all the peers have the bootstrap-uuid attribute set
        # as this is all happening during the upgrade-charm hook is reasonable
        # to expect the cluster is running.

        # Wait until the leader has set the
        try:
            update_bootstrap_uuid()
        except LeaderNoBootstrapUUIDError:
            status_set('waiting', "Waiting for bootstrap-uuid set by leader")
Exemple #54
0
def gateway_relation():
    relation_set(hostname=get_relation_ip('gateway-relation'),
                 port=config('port'))
def config_changed():

    if is_unit_paused_set():
        log("Do not run config_changed while unit is paused", "WARNING")
        return

    # Update hosts with this unit's information
    cluster_ip = ch_ip.get_relation_ip(
        rabbit_net_utils.CLUSTER_INTERFACE,
        cidr_network=config(rabbit_net_utils.CLUSTER_OVERRIDE_CONFIG))
    rabbit.update_hosts_file({cluster_ip: rabbit.get_unit_hostname()})

    # Add archive source if provided and not in the upgrade process
    if not leader_get("cluster_series_upgrading"):
        add_source(config('source'), config('key'))
    # Copy in defaults file for updated ulimits
    shutil.copyfile(
        'templates/rabbitmq-server',
        '/etc/default/rabbitmq-server')

    # Install packages to ensure any changes to source
    # result in an upgrade if applicable only if we change the 'source'
    # config option
    if rabbit.archive_upgrade_available():
        # Avoid packge upgrade collissions
        # Stopping and attempting to start rabbitmqs at the same time leads to
        # failed restarts
        rabbit.cluster_wait()
        rabbit.install_or_upgrade_packages()

    if config('ssl') == 'off':
        open_port(5672)
        close_port(int(config('ssl_port')))
    elif config('ssl') == 'on':
        open_port(5672)
        open_port(int(config('ssl_port')))
    elif config('ssl') == 'only':
        close_port(5672)
        open_port(int(config('ssl_port')))
    else:
        log("Unknown ssl config value: '%s'" % config('ssl'), level=ERROR)

    chown(RABBIT_DIR, rabbit.RABBIT_USER, rabbit.RABBIT_USER)
    chmod(RABBIT_DIR, 0o775)

    if config('management_plugin') is True:
        rabbit.enable_plugin(MAN_PLUGIN)
        open_port(rabbit.get_managment_port())
    else:
        rabbit.disable_plugin(MAN_PLUGIN)
        close_port(rabbit.get_managment_port())
        # LY: Close the old managment port since it may have been opened in a
        #     previous version of the charm. close_port is a noop if the port
        #     is not open
        close_port(55672)

    rabbit.ConfigRenderer(
        rabbit.CONFIG_FILES).write_all()

    if is_relation_made("ha"):
        ha_is_active_active = config("ha-vip-only")

        if ha_is_active_active:
            update_nrpe_checks()
        else:
            if is_elected_leader('res_rabbitmq_vip'):
                update_nrpe_checks()
            else:
                log("hacluster relation is present but this node is not active"
                    " skipping update nrpe checks")
    elif is_relation_made('nrpe-external-master'):
        update_nrpe_checks()

    # Only set values if this is the leader
    if not is_leader():
        return

    rabbit.set_all_mirroring_queues(config('mirroring-queues'))

    # Update cluster in case min-cluster-size has changed
    for rid in relation_ids('cluster'):
        for unit in related_units(rid):
            cluster_changed(relation_id=rid, remote_unit=unit)

    # NOTE(jamespage): Workaround until we have a good way
    #                  of generally disabling notifications
    #                  based on which services are deployed.
    if 'openstack' in rabbit.list_vhosts():
        rabbit.configure_notification_ttl('openstack',
                                          config('notification-ttl'))
def secrets_storage_joined(relation_id=None):
    relation_set(relation_id=relation_id,
                 secret_backend='charm-vaultlocker',
                 isolated=True,
                 access_address=get_relation_ip('secrets-storage'),
                 hostname=socket.gethostname())
def amqp_changed(relation_id=None, remote_unit=None):
    singleset = set(['username', 'vhost'])
    host_addr = ch_ip.get_relation_ip(
        rabbit_net_utils.AMQP_INTERFACE,
        cidr_network=config(rabbit_net_utils.AMQP_OVERRIDE_CONFIG))

    if rabbit.leader_node_is_ready():
        relation_settings = {'hostname': host_addr,
                             'private-address': host_addr}
        # NOTE: active/active case
        if config('prefer-ipv6'):
            relation_settings['private-address'] = host_addr

        current = relation_get(rid=relation_id, unit=remote_unit)
        if singleset.issubset(current):
            if not all([current.get('username'), current.get('vhost')]):
                log('Relation not ready.', DEBUG)
                return

            # Provide credentials to relations. If password is already
            # available on peer relation then use it instead of reconfiguring.
            username = current['username']
            vhost = current['vhost']
            admin = current.get('admin', False)
            amqp_rid = relation_id or get_relation_id()
            password = configure_amqp(username, vhost, amqp_rid, admin=admin)
            relation_settings['password'] = password
        else:
            # NOTE(hopem): we should look at removing this code since i don't
            #              think it's ever used anymore and stems from the days
            #              when we needed to ensure consistency between
            #              peerstorage (replaced by leader get/set) and amqp
            #              relations.
            queues = {}
            for k, v in current.items():
                amqp_rid = k.split('_')[0]
                x = '_'.join(k.split('_')[1:])
                if amqp_rid not in queues:
                    queues[amqp_rid] = {}

                queues[amqp_rid][x] = v

            for amqp_rid in queues:
                if singleset.issubset(queues[amqp_rid]):
                    username = queues[amqp_rid]['username']
                    vhost = queues[amqp_rid]['vhost']
                    password = configure_amqp(username, vhost, amqp_rid,
                                              admin=admin)
                    key = '_'.join([amqp_rid, 'password'])
                    relation_settings[key] = password

        ssl_utils.configure_client_ssl(relation_settings)

        if is_clustered():
            relation_settings['clustered'] = 'true'
            # NOTE(dosaboy): this stanza can be removed once we fully remove
            #                deprecated HA support.
            if is_relation_made('ha'):
                # active/passive settings
                relation_settings['vip'] = config('vip')
                # or ha-vip-only to support active/active, but
                # accessed via a VIP for older clients.
                if config('ha-vip-only') is True:
                    relation_settings['ha-vip-only'] = 'true'

        # set if need HA queues or not
        if cmp_pkgrevno('rabbitmq-server', '3.0.1') < 0:
            relation_settings['ha_queues'] = True

        log("Updating relation {} keys {}"
            .format(relation_id or get_relation_id(),
                    ','.join(relation_settings.keys())), DEBUG)
        peer_store_and_set(relation_id=relation_id,
                           relation_settings=relation_settings)
    elif not is_leader() and rabbit.client_node_is_ready():
        log("Propagating peer settings to all amqp relations", DEBUG)

        # NOTE(jamespage) clear relation to deal with data being
        #                 removed from peer storage.
        relation_clear(relation_id)

        # Each unit needs to set the db information otherwise if the unit
        # with the info dies the settings die with it Bug# 1355848
        for rel_id in relation_ids('amqp'):
            peerdb_settings = peer_retrieve_by_prefix(rel_id)
            if 'password' in peerdb_settings:
                peerdb_settings['hostname'] = host_addr
                peerdb_settings['private-address'] = host_addr
                relation_set(relation_id=rel_id, **peerdb_settings)
 def joined(self):
     conv = self.conversation()
     conv.set_state('{relation_name}.related')
     conv.set_remote('private-address',
                     ch_ip.get_relation_ip(conv.relation_name))
     hookenv.log('States: {}'.format(get_states().keys()))
    def control_listen_ip(self):
        """IP local rndc service listens on

        :returns: str: IP local rndc listens on
        """
        return ch_ip.get_relation_ip('dns-backend')
def cluster_joined(relation_id=None):
    if config('controller-app-mode') == 'msm' or config('controller-app-mode') == 'doctl':
        private_addr = get_relation_ip('cluster')
        relation_set(relation_id=relation_id,
                     relation_settings={'private-address': private_addr})