Esempio n. 1
0
def db_joined():
    if is_relation_made('pgsql-nova-db') or \
            is_relation_made('pgsql-neutron-db'):
        # error, postgresql is used
        e = ('Attempting to associate a mysql database when there is already '
             'associated a postgresql one')
        log(e, level=ERROR)
        raise Exception(e)

    if network_manager() in ['quantum', 'neutron']:
        config_neutron = True
    else:
        config_neutron = False

    if config('prefer-ipv6'):
        sync_db_with_multi_ipv6_addresses(config('database'),
                                          config('database-user'),
                                          relation_prefix='nova')

        if config_neutron:
            sync_db_with_multi_ipv6_addresses(config('neutron-database'),
                                              config('neutron-database-user'),
                                              relation_prefix='neutron')
    else:
        host = unit_get('private-address')
        relation_set(nova_database=config('database'),
                     nova_username=config('database-user'),
                     nova_hostname=host)

        if config_neutron:
            # XXX: Renaming relations from quantum_* to neutron_* here.
            relation_set(neutron_database=config('neutron-database'),
                         neutron_username=config('neutron-database-user'),
                         neutron_hostname=host)
Esempio n. 2
0
def db_joined(relation_id=None):
    if is_relation_made('pgsql-nova-db') or \
            is_relation_made('pgsql-neutron-db'):
        # error, postgresql is used
        e = ('Attempting to associate a mysql database when there is already '
             'associated a postgresql one')
        log(e, level=ERROR)
        raise Exception(e)

    if config('prefer-ipv6'):
        sync_db_with_multi_ipv6_addresses(config('database'),
                                          config('database-user'),
                                          relation_prefix='nova')

        if os_release('nova-common') >= 'mitaka':
            # NOTE: mitaka uses a second nova-api database as well
            sync_db_with_multi_ipv6_addresses('nova_api',
                                              config('database-user'),
                                              relation_prefix='novaapi')

    else:
        host = unit_get('private-address')
        relation_set(nova_database=config('database'),
                     nova_username=config('database-user'),
                     nova_hostname=host,
                     relation_id=relation_id)

        if os_release('nova-common') >= 'mitaka':
            # NOTE: mitaka uses a second nova-api database as well
            relation_set(novaapi_database='nova_api',
                         novaapi_username=config('database-user'),
                         novaapi_hostname=host,
                         relation_id=relation_id)
Esempio n. 3
0
def db_joined():
    if is_relation_made('pgsql-nova-db') or \
            is_relation_made('pgsql-neutron-db'):
        # error, postgresql is used
        e = ('Attempting to associate a mysql database when there is already '
             'associated a postgresql one')
        log(e, level=ERROR)
        raise Exception(e)

    if network_manager() in ['quantum', 'neutron']:
        config_neutron = True
    else:
        config_neutron = False

    if config('prefer-ipv6'):
        sync_db_with_multi_ipv6_addresses(config('database'),
                                          config('database-user'),
                                          relation_prefix='nova')

        if config_neutron:
            sync_db_with_multi_ipv6_addresses(config('neutron-database'),
                                              config('neutron-database-user'),
                                              relation_prefix='neutron')
    else:
        host = unit_get('private-address')
        relation_set(nova_database=config('database'),
                     nova_username=config('database-user'),
                     nova_hostname=host)

        if config_neutron:
            # XXX: Renaming relations from quantum_* to neutron_* here.
            relation_set(neutron_database=config('neutron-database'),
                         neutron_username=config('neutron-database-user'),
                         neutron_hostname=host)
def db_joined(relation_id=None):
    if is_relation_made('pgsql-nova-db') or \
            is_relation_made('pgsql-neutron-db'):
        # error, postgresql is used
        e = ('Attempting to associate a mysql database when there is already '
             'associated a postgresql one')
        log(e, level=ERROR)
        raise Exception(e)

    if config('prefer-ipv6'):
        sync_db_with_multi_ipv6_addresses(config('database'),
                                          config('database-user'),
                                          relation_prefix='nova')

        if os_release('nova-common') >= 'mitaka':
            # NOTE: mitaka uses a second nova-api database as well
            sync_db_with_multi_ipv6_addresses('nova_api',
                                              config('database-user'),
                                              relation_prefix='novaapi')

    else:
        host = unit_get('private-address')
        relation_set(nova_database=config('database'),
                     nova_username=config('database-user'),
                     nova_hostname=host,
                     relation_id=relation_id)

        if os_release('nova-common') >= 'mitaka':
            # NOTE: mitaka uses a second nova-api database as well
            relation_set(novaapi_database='nova_api',
                         novaapi_username=config('database-user'),
                         novaapi_hostname=host,
                         relation_id=relation_id)
def db_joined(relation_id=None):
    if is_relation_made('pgsql-nova-db') or \
            is_relation_made('pgsql-neutron-db'):
        # error, postgresql is used
        e = ('Attempting to associate a mysql database when there is already '
             'associated a postgresql one')
        log(e, level=ERROR)
        raise Exception(e)

    cmp_os_release = CompareOpenStackReleases(os_release('nova-common'))
    if config('prefer-ipv6'):
        sync_db_with_multi_ipv6_addresses(config('database'),
                                          config('database-user'),
                                          relation_prefix='nova')

        if cmp_os_release >= 'mitaka':
            # NOTE: mitaka uses a second nova-api database as well
            sync_db_with_multi_ipv6_addresses('nova_api',
                                              config('database-user'),
                                              relation_prefix='novaapi')

        if cmp_os_release >= 'ocata':
            # NOTE: ocata requires cells v2
            sync_db_with_multi_ipv6_addresses('nova_cell0',
                                              config('database-user'),
                                              relation_prefix='novacell0')
    else:
        # Avoid churn check for access-network early
        access_network = None
        for unit in related_units(relid=relation_id):
            access_network = relation_get(rid=relation_id,
                                          unit=unit,
                                          attribute='access-network')
            if access_network:
                break
        host = get_relation_ip('shared-db', cidr_network=access_network)

        relation_set(nova_database=config('database'),
                     nova_username=config('database-user'),
                     nova_hostname=host,
                     relation_id=relation_id)

        if cmp_os_release >= 'mitaka':
            # NOTE: mitaka uses a second nova-api database as well
            relation_set(novaapi_database='nova_api',
                         novaapi_username=config('database-user'),
                         novaapi_hostname=host,
                         relation_id=relation_id)

        if cmp_os_release >= 'ocata':
            # NOTE: ocata requires cells v2
            relation_set(novacell0_database='nova_cell0',
                         novacell0_username=config('database-user'),
                         novacell0_hostname=host,
                         relation_id=relation_id)
def cluster_changed(relation_id=None, remote_unit=None):
    # Future travelers beware ordering is significant
    rdata = relation_get(rid=relation_id, unit=remote_unit)

    # sync passwords
    blacklist = ['hostname', 'private-address', 'public-address']
    whitelist = [a for a in rdata.keys() if a not in blacklist]
    peer_echo(includes=whitelist)

    cookie = peer_retrieve('cookie')
    if not cookie:
        log('cluster_changed: cookie not yet set.', level=INFO)
        return

    if rdata:
        hostname = rdata.get('hostname', None)
        private_address = rdata.get('private-address', None)

        if hostname and private_address:
            rabbit.update_hosts_file({private_address: hostname})

    # sync the cookie with peers if necessary
    update_cookie()

    if is_relation_made('ha') and \
            config('ha-vip-only') is False:
        log(
            'hacluster relation is present, skipping native '
            'rabbitmq cluster config.',
            level=INFO)
        return

    if rabbit.is_sufficient_peers():
        # NOTE(freyes): all the nodes need to marked as 'clustered'
        # (LP: #1691510)
        rabbit.cluster_with()
        # Local rabbit maybe clustered now so check and inform clients if
        # needed.
        update_clients()
        if is_leader():
            if (leader_get(rabbit.CLUSTER_MODE_KEY) != config(
                    rabbit.CLUSTER_MODE_KEY)):
                log("Informing peers via leaderdb to change {} to {}".format(
                    rabbit.CLUSTER_MODE_KEY, config(rabbit.CLUSTER_MODE_KEY)))
                leader_set(
                    {rabbit.CLUSTER_MODE_KEY: config(rabbit.CLUSTER_MODE_KEY)})
                rabbit.ConfigRenderer(rabbit.CONFIG_FILES).write_all()

    if not is_leader() and is_relation_made('nrpe-external-master'):
        update_nrpe_checks()
    def __call__(self):
        """Used to generate template context to be added to cinder.conf in the
        presence of a ceph relation.
        """
        if not is_relation_made('ceph', 'key'):
            return {}

        release = get_os_codename_package('cinder-common')
        if CompareOpenStackReleases(release) < "icehouse":
            raise Exception("Unsupported version of Openstack")

        service = service_name()
        backup_driver = 'cinder.backup.drivers.ceph'
        return {
            "cinder": {
                "/etc/cinder/cinder.conf": {
                    "sections": {
                        'DEFAULT': [
                            ('backup_driver', backup_driver),
                            ('backup_ceph_conf',
                                os.path.join('/var/lib/charm',
                                             service, 'ceph.conf')),
                            ('backup_ceph_pool', service),
                            ('backup_ceph_user', service),
                        ]
                    }
                }
            }
        }
Esempio n. 8
0
 def __call__(self):
     ctxt = {
         'notifications': 'False',
     }
     if is_relation_made(self.amqp_relation):
         ctxt['notifications'] = "True"
     return ctxt
Esempio n. 9
0
def register_configs():
    ''' Register config files with their respective contexts. '''
    release = get_os_codename_install_source(config('openstack-origin'))
    configs = templating.OSConfigRenderer(templates_dir=TEMPLATES,
                                          openstack_release=release)

    plugin = remap_plugin(config('plugin'))
    name = networking_name()
    if plugin == 'ovs':
        # NOTE: deal with switch to ML2 plugin for >= icehouse
        drop_config = NEUTRON_ML2_PLUGIN_CONF
        if release >= 'icehouse':
            drop_config = NEUTRON_OVS_PLUGIN_CONF
        if drop_config in CONFIG_FILES[name][plugin]:
            CONFIG_FILES[name][plugin].pop(drop_config)

    if is_relation_made('amqp-nova'):
        amqp_nova_ctxt = context.AMQPContext(
            ssl_dir=NOVA_CONF_DIR,
            rel_name='amqp-nova',
            relation_prefix='nova')
    else:
        amqp_nova_ctxt = context.AMQPContext(
            ssl_dir=NOVA_CONF_DIR,
            rel_name='amqp')
    CONFIG_FILES[name][plugin][NOVA_CONF][
        'hook_contexts'].append(amqp_nova_ctxt)
    for conf in CONFIG_FILES[name][plugin]:
        configs.register(conf,
                         CONFIG_FILES[name][plugin][conf]['hook_contexts'])
    return configs
Esempio n. 10
0
def assess_status():
    '''Assess status of current unit'''
    application_version_set(get_upstream_version(VERSION_PACKAGE))

    # Check that the no-bootstrap config option is set in conjunction with
    # having the bootstrap-source relation established
    if not config('no-bootstrap') and is_relation_made('bootstrap-source'):
        status_set(
            'blocked', 'Cannot join the bootstrap-source relation when '
            'no-bootstrap is False')
        return

    moncount = int(config('monitor-count'))
    units = get_peer_units()
    # not enough peers and mon_count > 1
    if len(units.keys()) < moncount:
        status_set(
            'blocked', 'Insufficient peer units to bootstrap'
            ' cluster (require {})'.format(moncount))
        return

    # mon_count > 1, peers, but no ceph-public-address
    ready = sum(1 for unit_ready in units.values() if unit_ready)
    if ready < moncount:
        status_set('waiting', 'Peer units detected, waiting for addresses')
        return

    # active - bootstrapped + quorum status check
    if ceph.is_bootstrapped() and ceph.is_quorum():
        status_set('active', 'Unit is ready and clustered')
    else:
        # Unit should be running and clustered, but no quorum
        # TODO: should this be blocked or waiting?
        status_set('blocked', 'Unit not clustered (no quorum)')
    def __call__(self):
        """
        Used to generate template context to be added to cinder.conf in the
        presence of a ceph relation.
        """
        if not is_relation_made('ceph', 'key'):
            return {}
        service = service_name()
        os_codename = get_os_codename_package('cinder-common')
        if CompareOpenStackReleases(os_codename) >= "icehouse":
            volume_driver = 'cinder.volume.drivers.rbd.RBDDriver'
        else:
            volume_driver = 'cinder.volume.driver.RBDDriver'

        if config('rbd-pool-name'):
            pool_name = config('rbd-pool-name')
        else:
            pool_name = service
        section = {service: [('volume_backend_name', service),
                             ('volume_driver', volume_driver),
                             ('rbd_pool', pool_name),
                             ('rbd_user', service),
                             ('rbd_secret_uuid', leader_get('secret-uuid')),
                             ('rbd_ceph_conf', ceph_config_file())]}

        if CompareOpenStackReleases(os_codename) >= "mitaka":
            section[service].append(('report_discard_supported', True))

        if CompareOpenStackReleases(os_codename) >= "queens":
            section[service].append(('rbd_exclusive_cinder_pool', True))
            section[service].append(
                ('rbd_flatten_volume_from_snapshot',
                 config('rbd-flatten-volume-from-snapshot')))

        return {'cinder': {'/etc/cinder/cinder.conf': {'sections': section}}}
def upgrade_charm():
    emit_cephconf()
    apt_install(packages=filter_installed_packages(ceph.PACKAGES), fatal=True)
    ceph.update_monfs()
    mon_relation_joined()
    if is_relation_made("nrpe-external-master"):
        update_nrpe_config()
Esempio n. 13
0
def upgrade_charm():
    apt_install(filter_installed_packages(determine_packages()), fatal=True)
    # NOTE: ensure psutil install for hugepages configuration
    status_set('maintenance', 'Installing apt packages')
    apt_install(filter_installed_packages(['python-psutil']))
    packages_removed = remove_old_packages()
    if packages_removed and not is_unit_paused_set():
        log("Package purge detected, restarting services", "INFO")
        for s in services():
            service_restart(s)

    for r_id in relation_ids('amqp'):
        amqp_joined(relation_id=r_id)

    if is_relation_made('nrpe-external-master'):
        update_nrpe_config()

    # Fix previously wrongly created path permissions
    # LP: https://bugs.launchpad.net/charm-cinder-ceph/+bug/1779676
    asok_path = '/var/run/ceph/'
    gid = grp.getgrnam("kvm").gr_gid
    if gid and os.path.isdir(asok_path) and gid != os.stat(asok_path).st_gid:
        log("{} not owned by group 'kvm', fixing permissions.".format(
            asok_path))
        shutil.chown(asok_path, group='kvm')
def upgrade_charm():
    apt_install(filter_installed_packages(determine_packages()),
                fatal=True)
    # NOTE: ensure psutil install for hugepages configuration
    status_set('maintenance', 'Installing apt packages')
    apt_install(filter_installed_packages(['python-psutil']))
    packages_removed = remove_old_packages()
    if packages_removed and not is_unit_paused_set():
        log("Package purge detected, restarting services", "INFO")
        for s in services():
            service_restart(s)

    for r_id in relation_ids('amqp'):
        amqp_joined(relation_id=r_id)

    if is_relation_made('nrpe-external-master'):
        update_nrpe_config()

    # Fix previously wrongly created path permissions
    # LP: https://bugs.launchpad.net/charm-cinder-ceph/+bug/1779676
    asok_path = '/var/run/ceph/'
    gid = grp.getgrnam("kvm").gr_gid
    if gid and os.path.isdir(asok_path) and gid != os.stat(asok_path).st_gid:
        log("{} not owned by group 'kvm', fixing permissions."
            .format(asok_path))
        shutil.chown(asok_path, group='kvm')
def resolve_config_files(plugin, release):
    '''
    Resolve configuration files and contexts

    :param plugin: shortname of plugin e.g. ovs
    :param release: openstack release codename
    :returns: dict of configuration files, contexts
              and associated services
    '''
    config_files = deepcopy(CONFIG_FILES)
    if plugin == OVS:
        # NOTE: deal with switch to ML2 plugin for >= icehouse
        drop_config = [NEUTRON_OVS_AGENT_CONF]
        if release >= 'mitaka':
            # ml2 -> ovs_agent
            drop_config = [NEUTRON_ML2_PLUGIN_CONF]

        for _config in drop_config:
            if _config in config_files[plugin]:
                config_files[plugin].pop(_config)

    if is_relation_made('amqp-nova'):
        amqp_nova_ctxt = context.AMQPContext(
            ssl_dir=NOVA_CONF_DIR,
            rel_name='amqp-nova',
            relation_prefix='nova')
    else:
        amqp_nova_ctxt = context.AMQPContext(
            ssl_dir=NOVA_CONF_DIR,
            rel_name='amqp')
    config_files[plugin][NOVA_CONF][
        'hook_contexts'].append(amqp_nova_ctxt)
    return config_files
    def __call__(self):
        '''
        Extends the main charmhelpers HAProxyContext with a port mapping
        specific to this charm.
        Also used to extend nova.conf context with correct api_listening_ports
        '''
        from nova_cc_utils import api_port
        ctxt = super(HAProxyContext, self).__call__()

        # determine which port api processes should bind to, depending
        # on existence of haproxy + apache frontends
        compute_api = determine_api_port(api_port('nova-api-os-compute'),
                                         singlenode_mode=True)
        ec2_api = determine_api_port(api_port('nova-api-ec2'),
                                     singlenode_mode=True)
        s3_api = determine_api_port(api_port('nova-objectstore'),
                                    singlenode_mode=True)
        neutron_api = determine_api_port(api_port('neutron-server'),
                                         singlenode_mode=True)

        # Apache ports
        a_compute_api = determine_apache_port(api_port('nova-api-os-compute'),
                                              singlenode_mode=True)
        a_ec2_api = determine_apache_port(api_port('nova-api-ec2'),
                                          singlenode_mode=True)
        a_s3_api = determine_apache_port(api_port('nova-objectstore'),
                                         singlenode_mode=True)
        a_neutron_api = determine_apache_port(api_port('neutron-server'),
                                              singlenode_mode=True)

        # to be set in nova.conf accordingly.
        listen_ports = {
            'osapi_compute_listen_port': compute_api,
            'ec2_listen_port': ec2_api,
            's3_listen_port': s3_api,
        }

        port_mapping = {
            'nova-api-os-compute': [
                api_port('nova-api-os-compute'), a_compute_api],
            'nova-api-ec2': [
                api_port('nova-api-ec2'), a_ec2_api],
            'nova-objectstore': [
                api_port('nova-objectstore'), a_s3_api],
        }

        if not is_relation_made('neutron-api'):
            if neutron.network_manager() == 'neutron':
                port_mapping.update({
                    'neutron-server': [
                        api_port('neutron-server'), a_neutron_api]
                })
                # neutron.conf listening port, set separte from nova's.
                ctxt['neutron_bind_port'] = neutron_api

        # for haproxy.conf
        ctxt['service_ports'] = port_mapping
        # for nova.conf
        ctxt['listen_ports'] = listen_ports
        return ctxt
Esempio n. 17
0
def peer_store_and_set(relation_id=None, peer_relation_name='cluster',
                       peer_store_fatal=False, relation_settings=None,
                       delimiter='_', **kwargs):
    """Store passed-in arguments both in argument relation and in peer storage.

    It functions like doing relation_set() and peer_store() at the same time,
    with the same data.

    @param relation_id: the id of the relation to store the data on. Defaults
                        to the current relation.
    @param peer_store_fatal: Set to True, the function will raise an exception
                             should the peer sotrage not be avialable."""

    relation_settings = relation_settings if relation_settings else {}
    relation_set(relation_id=relation_id,
                 relation_settings=relation_settings,
                 **kwargs)
    if is_relation_made(peer_relation_name):
        for key, value in six.iteritems(dict(list(kwargs.items()) +
                                             list(relation_settings.items()))):
            key_prefix = relation_id or current_relation_id()
            peer_store(key_prefix + delimiter + key,
                       value,
                       relation_name=peer_relation_name)
    else:
        if peer_store_fatal:
            raise ValueError('Unable to detect '
                             'peer relation {}'.format(peer_relation_name))
Esempio n. 18
0
def guard_map():
    '''Map of services and required interfaces that must be present before
    the service should be allowed to start'''
    gmap = {}
    nova_services = deepcopy(BASE_SERVICES)
    if os_release('nova-common') not in ['essex', 'folsom']:
        nova_services.append('nova-conductor')

    nova_interfaces = ['identity-service', 'amqp']
    if relation_ids('pgsql-nova-db'):
        nova_interfaces.append('pgsql-nova-db')
    else:
        nova_interfaces.append('shared-db')

    for svc in nova_services:
        gmap[svc] = nova_interfaces

    net_manager = network_manager()
    if net_manager in ['neutron', 'quantum'] and \
            not is_relation_made('neutron-api'):
        neutron_interfaces = ['identity-service', 'amqp']
        if relation_ids('pgsql-neutron-db'):
            neutron_interfaces.append('pgsql-neutron-db')
        else:
            neutron_interfaces.append('shared-db')
        if network_manager() == 'quantum':
            gmap['quantum-server'] = neutron_interfaces
        else:
            gmap['neutron-server'] = neutron_interfaces

    return gmap
def cluster_joined(relation_id=None):
    if config('prefer-ipv6'):
        relation_settings = {'hostname': socket.gethostname(),
                             'private-address': get_ipv6_addr()[0]}
        relation_set(relation_id=relation_id,
                     relation_settings=relation_settings)

    if is_relation_made('ha') and \
            config('ha-vip-only') is False:
        log('hacluster relation is present, skipping native '
            'rabbitmq cluster config.')
        return

    configure_nodename()

    try:
        if not is_leader():
            log('Not the leader, deferring cookie propagation to leader')
            return
    except NotImplementedError:
        if is_newer():
            log('cluster_joined: Relation greater.')
            return

    if not os.path.isfile(rabbit.COOKIE_PATH):
        log('erlang cookie missing from %s' % rabbit.COOKIE_PATH,
            level=ERROR)
        return

    if not is_sufficient_peers():
        return

    if is_elected_leader('res_rabbitmq_vip'):
        cookie = open(rabbit.COOKIE_PATH, 'r').read().strip()
        peer_store('cookie', cookie)
def resolve_config_files(plugin, release):
    '''
    Resolve configuration files and contexts

    :param plugin: shortname of plugin e.g. ovs
    :param release: openstack release codename
    :returns: dict of configuration files, contexts
              and associated services
    '''
    config_files = deepcopy(CONFIG_FILES)
    if plugin == OVS:
        # NOTE: deal with switch to ML2 plugin for >= icehouse
        drop_config = [NEUTRON_OVS_AGENT_CONF]
        if release >= 'mitaka':
            # ml2 -> ovs_agent
            drop_config = [NEUTRON_ML2_PLUGIN_CONF]

        for _config in drop_config:
            if _config in config_files[plugin]:
                config_files[plugin].pop(_config)

    if is_relation_made('amqp-nova'):
        amqp_nova_ctxt = context.AMQPContext(ssl_dir=NOVA_CONF_DIR,
                                             rel_name='amqp-nova',
                                             relation_prefix='nova')
    else:
        amqp_nova_ctxt = context.AMQPContext(ssl_dir=NOVA_CONF_DIR,
                                             rel_name='amqp')
    config_files[plugin][NOVA_CONF]['hook_contexts'].append(amqp_nova_ctxt)
    return config_files
Esempio n. 21
0
def upgrade_charm():
    emit_cephconf()
    apt_install(packages=filter_installed_packages(ceph.determine_packages()),
                fatal=True)
    try:
        # we defer and explicitly run `ceph-create-keys` from
        # add_keyring_to_ceph() as part of bootstrap process
        # LP: #1719436.
        service_pause('ceph-create-keys')
    except ValueError:
        pass
    ceph.update_monfs()
    mon_relation_joined()
    if is_relation_made("nrpe-external-master"):
        update_nrpe_config()
    if not ceph.monitor_key_exists('admin', 'autotune'):
        autotune = config('pg-autotune')
        if (cmp_pkgrevno('ceph', '14.2.0') >= 0
                and (autotune == 'true' or autotune == 'auto')):
            ceph.monitor_key_set('admin', 'autotune', 'true')
        else:
            ceph.monitor_key_set('admin', 'autotune', 'false')

    # NOTE(jamespage):
    # Reprocess broker requests to ensure that any cephx
    # key permission changes are applied
    notify_client()
    notify_radosgws()
    notify_rbd_mirrors()
    notify_prometheus()
Esempio n. 22
0
def neutron_settings():
    neutron_settings = {}
    if is_relation_made('neutron-api', 'neutron-plugin'):
        neutron_api_info = NeutronAPIContext()()
        neutron_settings.update({
            # XXX: Rename these relations settings?
            'quantum_plugin':
            neutron_api_info['neutron_plugin'],
            'region':
            config('region'),
            'quantum_security_groups':
            neutron_api_info['neutron_security_groups'],
            'quantum_url':
            neutron_api_info['neutron_url'],
        })
    else:
        neutron_settings.update({
            # XXX: Rename these relations settings?
            'quantum_plugin':
            neutron_plugin(),
            'region':
            config('region'),
            'quantum_security_groups':
            config('quantum-security-groups'),
            'quantum_url':
            "{}:{}".format(canonical_url(CONFIGS, INTERNAL),
                           str(api_port('neutron-server'))),
        })
    neutron_url = urlparse(neutron_settings['quantum_url'])
    neutron_settings['quantum_host'] = neutron_url.hostname
    neutron_settings['quantum_port'] = neutron_url.port
    return neutron_settings
Esempio n. 23
0
def guard_map():
    '''Map of services and required interfaces that must be present before
    the service should be allowed to start'''
    gmap = {}
    nova_services = deepcopy(BASE_SERVICES)
    if os_release('nova-common') not in ['essex', 'folsom']:
        nova_services.append('nova-conductor')

    nova_interfaces = ['identity-service', 'amqp']
    if relation_ids('pgsql-nova-db'):
        nova_interfaces.append('pgsql-nova-db')
    else:
        nova_interfaces.append('shared-db')

    for svc in nova_services:
        gmap[svc] = nova_interfaces

    net_manager = network_manager()
    if net_manager in ['neutron', 'quantum'] and \
            not is_relation_made('neutron-api'):
        neutron_interfaces = ['identity-service', 'amqp']
        if relation_ids('pgsql-neutron-db'):
            neutron_interfaces.append('pgsql-neutron-db')
        else:
            neutron_interfaces.append('shared-db')
        if network_manager() == 'quantum':
            gmap['quantum-server'] = neutron_interfaces
        else:
            gmap['neutron-server'] = neutron_interfaces

    return gmap
 def __call__(self):
     """
     Used to generate template context to be added to cinder.conf in the
     presence of a ceph relation.
     """
     if not is_relation_made('ceph', 'key'):
         return {}
     service = service_name()
     os_codename = get_os_codename_package('cinder-common')
     if CompareOpenStackReleases(os_codename) >= "icehouse":
         volume_driver = 'cinder.volume.drivers.rbd.RBDDriver'
     else:
         volume_driver = 'cinder.volume.driver.RBDDriver'
     return {
         "cinder": {
             "/etc/cinder/cinder.conf": {
                 "sections": {
                     service: [
                         ('volume_backend_name', service),
                         ('volume_driver', volume_driver),
                         ('rbd_pool', service),
                         ('rbd_user', service),
                         ('rbd_secret_uuid', leader_get('secret-uuid')),
                         ('rbd_ceph_conf', ceph_config_file()),
                     ]
                 }
             }
         }
     }
Esempio n. 25
0
def peer_store_and_set(relation_id=None,
                       peer_relation_name='cluster',
                       peer_store_fatal=False,
                       relation_settings=None,
                       delimiter='_',
                       **kwargs):
    """Store passed-in arguments both in argument relation and in peer storage.

    It functions like doing relation_set() and peer_store() at the same time,
    with the same data.

    @param relation_id: the id of the relation to store the data on. Defaults
                        to the current relation.
    @param peer_store_fatal: Set to True, the function will raise an exception
                             should the peer sotrage not be avialable."""

    relation_settings = relation_settings if relation_settings else {}
    relation_set(relation_id=relation_id,
                 relation_settings=relation_settings,
                 **kwargs)
    if is_relation_made(peer_relation_name):
        for key, value in six.iteritems(
                dict(list(kwargs.items()) + list(relation_settings.items()))):
            key_prefix = relation_id or current_relation_id()
            peer_store(key_prefix + delimiter + key,
                       value,
                       relation_name=peer_relation_name)
    else:
        if peer_store_fatal:
            raise ValueError('Unable to detect '
                             'peer relation {}'.format(peer_relation_name))
Esempio n. 26
0
def db_joined():
    if is_relation_made('pgsql-db'):
        # error, postgresql is used
        e = ('Attempting to associate a mysql database when there is already '
             'associated a postgresql one')
        log(e, level=ERROR)
        raise Exception(e)

    if config('prefer-ipv6'):
        sync_db_with_multi_ipv6_addresses(config('database'),
                                          config('database-user'))
    else:
        # Avoid churn check for access-network early
        access_network = None
        for unit in related_units():
            access_network = relation_get(unit=unit,
                                          attribute='access-network')
            if access_network:
                break
        host = get_relation_ip('shared-db', cidr_network=access_network)

        conf = config()
        relation_set(database=conf['database'],
                     username=conf['database-user'],
                     hostname=host)
Esempio n. 27
0
def cluster_changed(relation_id=None, remote_unit=None):
    # Future travelers beware ordering is significant
    rdata = relation_get(rid=relation_id, unit=remote_unit)

    # sync passwords
    blacklist = ['hostname', 'private-address', 'public-address']
    whitelist = [a for a in rdata.keys() if a not in blacklist]
    peer_echo(includes=whitelist)

    cookie = peer_retrieve('cookie')
    if not cookie:
        log('cluster_changed: cookie not yet set.', level=INFO)
        return

    if rdata:
        hostname = rdata.get('hostname', None)
        private_address = rdata.get('private-address', None)

        if hostname and private_address:
            rabbit.update_hosts_file({private_address: hostname})

    # sync the cookie with peers if necessary
    update_cookie()

    if is_relation_made('ha') and \
            config('ha-vip-only') is False:
        log('hacluster relation is present, skipping native '
            'rabbitmq cluster config.', level=INFO)
        return

    # NOTE(freyes): all the nodes need to marked as 'clustered' (LP: #1691510)
    rabbit.cluster_with()

    if not is_leader():
        update_nrpe_checks()
Esempio n. 28
0
def neutron_settings():
    neutron_settings = {}
    if is_relation_made('neutron-api', 'neutron-plugin'):
        neutron_api_info = NeutronAPIContext()()
        neutron_settings.update({
            # XXX: Rename these relations settings?
            'quantum_plugin': neutron_api_info['neutron_plugin'],
            'region': config('region'),
            'quantum_security_groups':
            neutron_api_info['neutron_security_groups'],
            'quantum_url': neutron_api_info['neutron_url'],
        })
    else:
        neutron_settings.update({
            # XXX: Rename these relations settings?
            'quantum_plugin': neutron_plugin(),
            'region': config('region'),
            'quantum_security_groups': config('quantum-security-groups'),
            'quantum_url': "{}:{}".format(canonical_url(CONFIGS, INTERNAL),
                                          str(api_port('neutron-server'))),
        })
    neutron_url = urlparse(neutron_settings['quantum_url'])
    neutron_settings['quantum_host'] = neutron_url.hostname
    neutron_settings['quantum_port'] = neutron_url.port
    return neutron_settings
Esempio n. 29
0
 def test_is_relation_made_different_key(self, relation_get, related_units,
                                         relation_ids):
     relation_get.return_value = 'hostname'
     related_units.return_value = ['test/1']
     relation_ids.return_value = ['test:0']
     self.assertTrue(hookenv.is_relation_made('test', keys='auth'))
     relation_get.assert_called_with('auth', rid='test:0', unit='test/1')
def resolve_config_files(plugin, release):
    '''
    Resolve configuration files and contexts

    :param plugin: shortname of plugin e.g. ovs
    :param release: openstack release codename
    :returns: dict of configuration files, contexts
              and associated services
    '''
    config_files = deepcopy(get_config_files())
    drop_config = []
    cmp_os_release = CompareOpenStackReleases(release)
    if plugin == OVS:
        # NOTE: deal with switch to ML2 plugin for >= icehouse
        drop_config = [NEUTRON_OVS_AGENT_CONF]
        if cmp_os_release >= 'mitaka':
            # ml2 -> ovs_agent
            drop_config = [NEUTRON_ML2_PLUGIN_CONF]

    # Use MAAS1.9 for MTU and external port config on xenial and above
    if CompareHostReleases(lsb_release()['DISTRIB_CODENAME']) >= 'xenial':
        drop_config.extend([EXT_PORT_CONF, PHY_NIC_MTU_CONF])

    # Rename to lbaasv2 in newton
    if cmp_os_release < 'newton':
        drop_config.extend([NEUTRON_LBAASV2_AA_PROFILE_PATH])
    else:
        drop_config.extend([NEUTRON_LBAAS_AA_PROFILE_PATH])

    # Drop lbaasv2 at train
    # or drop if disable-lbaas option is true
    if disable_neutron_lbaas():
        if cmp_os_release >= 'newton':
            drop_config.extend([
                NEUTRON_LBAASV2_AA_PROFILE_PATH,
                NEUTRON_LBAAS_AGENT_CONF,
            ])
        else:
            drop_config.extend([
                NEUTRON_LBAAS_AA_PROFILE_PATH,
                NEUTRON_LBAAS_AGENT_CONF,
            ])

    if disable_nova_metadata(cmp_os_release):
        drop_config.extend(get_nova_config_files().keys())
    else:
        if is_relation_made('amqp-nova'):
            amqp_nova_ctxt = context.AMQPContext(ssl_dir=NOVA_CONF_DIR,
                                                 rel_name='amqp-nova',
                                                 relation_prefix='nova')
        else:
            amqp_nova_ctxt = context.AMQPContext(ssl_dir=NOVA_CONF_DIR,
                                                 rel_name='amqp')
        config_files[plugin][NOVA_CONF]['hook_contexts'].append(amqp_nova_ctxt)

    for _config in drop_config:
        if _config in config_files[plugin]:
            config_files[plugin].pop(_config)
    return config_files
Esempio n. 31
0
def config_changed():
    # Get the cfg object so we can see if the no-bootstrap value has changed
    # and triggered this hook invocation
    cfg = config()
    if config('prefer-ipv6'):
        assert_charm_supports_ipv6()

    check_for_upgrade()

    log('Monitor hosts are ' + repr(get_mon_hosts()))

    sysctl_dict = config('sysctl')
    if sysctl_dict:
        create_sysctl(sysctl_dict, '/etc/sysctl.d/50-ceph-charm.conf')
    if relations_of_type('nrpe-external-master'):
        update_nrpe_config()

    if is_leader():
        if not config('no-bootstrap'):
            if not leader_get('fsid') or not leader_get('monitor-secret'):
                if config('fsid'):
                    fsid = config('fsid')
                else:
                    fsid = "{}".format(uuid.uuid1())
                if config('monitor-secret'):
                    mon_secret = config('monitor-secret')
                else:
                    mon_secret = "{}".format(ceph.generate_monitor_secret())
                status_set('maintenance', 'Creating FSID and Monitor Secret')
                opts = {
                    'fsid': fsid,
                    'monitor-secret': mon_secret,
                }
                log("Settings for the cluster are: {}".format(opts))
                leader_set(opts)
        elif cfg.changed('no-bootstrap') and \
                is_relation_made('bootstrap-source'):
            # User changed the no-bootstrap config option, we're the leader,
            # and the bootstrap-source relation has been made. The charm should
            # be in a blocked state indicating that the no-bootstrap option
            # must be set. This block is invoked when the user is trying to
            # get out of that scenario by enabling no-bootstrap.
            bootstrap_source_relation_changed()
    elif leader_get('fsid') is None or leader_get('monitor-secret') is None:
        log('still waiting for leader to setup keys')
        status_set('waiting', 'Waiting for leader to setup keys')
        sys.exit(0)

    emit_cephconf()

    # Support use of single node ceph
    if (not ceph.is_bootstrapped() and int(config('monitor-count')) == 1
            and is_leader()):
        status_set('maintenance', 'Bootstrapping single Ceph MON')
        ceph.bootstrap_monitor_cluster(leader_get('monitor-secret'))
        ceph.wait_for_bootstrap()
        if cmp_pkgrevno('ceph', '12.0.0') >= 0:
            status_set('maintenance', 'Bootstrapping single Ceph MGR')
            ceph.bootstrap_manager()
def config_changed():
    # if we are paused, delay doing any config changed hooks.  It is forced on
    # the resume.
    if is_unit_paused_set():
        return

    if config('prefer-ipv6'):
        assert_charm_supports_ipv6()

    hosts = get_cluster_hosts()
    clustered = len(hosts) > 1
    bootstrapped = is_bootstrapped()

    # NOTE: only configure the cluster if we have sufficient peers. This only
    # applies if min-cluster-size is provided and is used to avoid extraneous
    # configuration changes and premature bootstrapping as the cluster is
    # deployed.
    if is_sufficient_peers():
        try:
            # NOTE(jamespage): try with leadership election
            if is_leader():
                log("Leader unit - bootstrap required=%s" % (not bootstrapped),
                    DEBUG)
                render_config_restart_on_changed(clustered, hosts,
                                                 bootstrap=not bootstrapped)
            elif bootstrapped:
                log("Cluster is bootstrapped - configuring mysql on this node",
                    DEBUG)
                render_config_restart_on_changed(clustered, hosts)
            else:
                log("Not configuring", DEBUG)

        except NotImplementedError:
            # NOTE(jamespage): fallback to legacy behaviour.
            oldest = oldest_peer(peer_units())
            if oldest:
                log("Leader unit - bootstrap required=%s" % (not bootstrapped),
                    DEBUG)
                render_config_restart_on_changed(clustered, hosts,
                                                 bootstrap=not bootstrapped)
            elif bootstrapped:
                log("Cluster is bootstrapped - configuring mysql on this node",
                    DEBUG)
                render_config_restart_on_changed(clustered, hosts)
            else:
                log("Not configuring", DEBUG)

    # Notify any changes to the access network
    update_shared_db_rels()

    # (re)install pcmkr agent
    install_mysql_ocf()

    if relation_ids('ha'):
        # make sure all the HA resources are (re)created
        ha_relation_joined()

    if is_relation_made('nrpe-external-master'):
        update_nrpe_config()
def config_changed():
    if not config('action-managed-upgrade'):
        if openstack_upgrade_available('ceilometer-common'):
            status_set('maintenance', 'Running openstack upgrade')
            do_openstack_upgrade(CONFIGS)
    if is_relation_made('nrpe-external-master'):
        update_nrpe_config()
    CONFIGS.write_all()
Esempio n. 34
0
 def __call__(self):
     ctxt = {}
     if is_relation_made('zeromq-configuration', 'host'):
         for rid in relation_ids('zeromq-configuration'):
                 for unit in related_units(rid):
                     ctxt['zmq_nonce'] = relation_get('nonce', unit, rid)
                     ctxt['zmq_host'] = relation_get('host', unit, rid)
     return ctxt
Esempio n. 35
0
def update_status():
    if is_relation_made("rest"):
        if service_running(service_name):
            status_set(status_active, msg_service_running)
        else:
            status_set(status_maintenance, msg_service_stopped)
    else:
        status_set(status_blocked, msg_missing_es_rel)
Esempio n. 36
0
 def test_is_relation_made(self, relation_get, related_units, relation_ids):
     relation_get.return_value = 'hostname'
     related_units.return_value = ['test/1']
     relation_ids.return_value = ['test:0']
     self.assertTrue(hookenv.is_relation_made('test'))
     relation_get.assert_called_with('private-address',
                                     rid='test:0',
                                     unit='test/1')
Esempio n. 37
0
def pgsql_db_joined(relation_id=None):
    if is_relation_made('shared-db'):
        # raise error
        e = ('Attempting to associate a postgresql database when there'
             ' is already associated a mysql one')
        log(e, level=ERROR)
        raise Exception(e)
    relation_set(database=config('database'), relation_id=relation_id)
Esempio n. 38
0
def pgsql_db_joined():
    if is_relation_made("shared-db"):
        # raise error
        e = "Attempting to associate a postgresql database when there" " is already associated a mysql one"
        log(e, level=ERROR)
        raise Exception(e)

    relation_set(database=config("database"))
Esempio n. 39
0
def assess_status():
    '''Assess status of current unit'''
    application_version_set(get_upstream_version(VERSION_PACKAGE))
    if is_unit_upgrading_set():
        status_set(
            "blocked", "Ready for do-release-upgrade and reboot. "
            "Set complete when finished.")
        return

    # Check that the no-bootstrap config option is set in conjunction with
    # having the bootstrap-source relation established
    if not config('no-bootstrap') and is_relation_made('bootstrap-source'):
        status_set(
            'blocked', 'Cannot join the bootstrap-source relation when '
            'no-bootstrap is False')
        return

    moncount = int(config('monitor-count'))
    units = get_peer_units()
    # not enough peers and mon_count > 1
    if len(units.keys()) < moncount:
        status_set(
            'blocked', 'Insufficient peer units to bootstrap'
            ' cluster (require {})'.format(moncount))
        return

    # mon_count > 1, peers, but no ceph-public-address
    ready = sum(1 for unit_ready in units.values() if unit_ready)
    if ready < moncount:
        status_set('waiting', 'Peer units detected, waiting for addresses')
        return

    configured_rbd_features = config('default-rbd-features')
    if has_rbd_mirrors() and configured_rbd_features:
        if add_rbd_mirror_features(
                configured_rbd_features) != configured_rbd_features:
            # The configured RBD features bitmap does not contain the features
            # required for RBD Mirroring
            status_set(
                'blocked', 'Configuration mismatch: RBD Mirroring '
                'enabled but incorrect value set for '
                '``default-rbd-features``')
            return

    # active - bootstrapped + quorum status check
    if ceph.is_bootstrapped() and ceph.is_quorum():
        expected_osd_count = config('expected-osd-count') or 3
        if sufficient_osds(expected_osd_count):
            status_set('active', 'Unit is ready and clustered')
        else:
            status_set(
                'waiting', 'Monitor bootstrapped but waiting for number of'
                ' OSDs to reach expected-osd-count ({})'.format(
                    expected_osd_count))
    else:
        # Unit should be running and clustered, but no quorum
        # TODO: should this be blocked or waiting?
        status_set('blocked', 'Unit not clustered (no quorum)')
def config_changed():
    if config('prefer-ipv6'):
        status_set('maintenance', 'configuring ipv6')
        assert_charm_supports_ipv6()

    global CONFIGS
    if git_install_requested():
        if config_value_changed('openstack-origin-git'):
            status_set('maintenance', 'Running Git install')
            git_install(config('openstack-origin-git'))
    elif not config('action-managed-upgrade'):
        if openstack_upgrade_available('nova-common'):
            status_set('maintenance', 'Running openstack upgrade')
            do_openstack_upgrade(CONFIGS)

    sysctl_dict = config('sysctl')
    if sysctl_dict:
        create_sysctl(sysctl_dict, '/etc/sysctl.d/50-nova-compute.conf')

    if migration_enabled() and config('migration-auth-type') == 'ssh':
        # Check-in with nova-c-c and register new ssh key, if it has just been
        # generated.
        status_set('maintenance', 'SSH key exchange')
        initialize_ssh_keys()
        import_authorized_keys()

    if config('enable-resize') is True:
        enable_shell(user='******')
        status_set('maintenance', 'SSH key exchange')
        initialize_ssh_keys(user='******')
        import_authorized_keys(user='******', prefix='nova')
    else:
        disable_shell(user='******')

    if config('instances-path') is not None:
        fp = config('instances-path')
        fix_path_ownership(fp, user='******')

    [compute_joined(rid) for rid in relation_ids('cloud-compute')]
    for rid in relation_ids('zeromq-configuration'):
        zeromq_configuration_relation_joined(rid)

    for rid in relation_ids('neutron-plugin'):
        neutron_plugin_joined(rid)

    if is_relation_made("nrpe-external-master"):
        update_nrpe_config()

    if config('hugepages'):
        install_hugepages()

    if (config('libvirt-image-backend') == 'rbd' and
            assert_libvirt_imagebackend_allowed()):
        for rid in relation_ids('ceph'):
            for unit in related_units(rid):
                ceph_changed(rid=rid, unit=unit)

    CONFIGS.write_all()
Esempio n. 41
0
    def __call__(self):
        ctxt = {}
        if is_relation_made('zeromq-configuration', 'host'):
            for rid in relation_ids('zeromq-configuration'):
                for unit in related_units(rid):
                    ctxt['zmq_nonce'] = relation_get('nonce', unit, rid)
                    ctxt['zmq_host'] = relation_get('host', unit, rid)

        return ctxt
def pgsql_nova_db_joined():
    if is_relation_made('shared-db'):
        # raise error
        e = ('Attempting to associate a postgresql database'
             ' when there is already associated a mysql one')
        log(e, level=ERROR)
        raise Exception(e)

    relation_set(database=config('database'))
Esempio n. 43
0
def upgrade_charm():
    # NOTE: ensure psutil install for hugepages configuration
    status_set('maintenance', 'Installing apt packages')
    apt_install(filter_installed_packages(['python-psutil']))
    for r_id in relation_ids('amqp'):
        amqp_joined(relation_id=r_id)

    if is_relation_made('nrpe-external-master'):
        update_nrpe_config()
Esempio n. 44
0
def upgrade_charm():
    # NOTE: ensure psutil install for hugepages configuration
    status_set('maintenance', 'Installing apt packages')
    apt_install(filter_installed_packages(['python-psutil']))
    for r_id in relation_ids('amqp'):
        amqp_joined(relation_id=r_id)

    if is_relation_made('nrpe-external-master'):
        update_nrpe_config()
def db_joined():
    if is_relation_made('mysql-db'):
        # error, mysql is used
        e = ('Attempting to associate a mongo database when there is already '
             'associated a mysql one')
        log(e, level=ERROR)
        raise Exception(e)

    relation_set(ceilometer_database=CEILOMETER_DB)
def pgsql_db_joined():
    if is_relation_made('shared-db'):
        # raise error
        e = ('Attempting to associate a postgresql database when'
             ' there is already associated a mysql one')
        log(e, level=ERROR)
        raise Exception(e)

    relation_set(**{'database': config('database'),
                    'private-address': get_relation_ip('psql-db')})
def config_changed():
    """
    Only run the config_changed ansible playbook tags if elasticsearch
    is present in the bundle
    """
    if is_relation_made("elasticsearch"):
        status_set(status_maintenance, msg_config_changed)
        apply_playbook(playbook, tags=['config_changed'])
    else:
        status_set(status_blocked, msg_missing_es_rel)
def config_changed():

    if config('prefer-ipv6'):
        rabbit.assert_charm_supports_ipv6()

    # Add archive source if provided
    add_source(config('source'), config('key'))
    apt_update(fatal=True)
    # Copy in defaults file for updated ulimits
    shutil.copyfile(
        'templates/rabbitmq-server',
        '/etc/default/rabbitmq-server')
    # Install packages to ensure any changes to source
    # result in an upgrade if applicable.
    status_set('maintenance', 'Installing/upgrading RabbitMQ packages')
    apt_install(rabbit.PACKAGES, fatal=True)

    open_port(5672)

    chown(RABBIT_DIR, rabbit.RABBIT_USER, rabbit.RABBIT_USER)
    chmod(RABBIT_DIR, 0o775)

    configure_nodename()

    if config('management_plugin') is True:
        rabbit.enable_plugin(MAN_PLUGIN)
        open_port(55672)
    else:
        rabbit.disable_plugin(MAN_PLUGIN)
        close_port(55672)

    rabbit.set_all_mirroring_queues(config('mirroring-queues'))
    rabbit.ConfigRenderer(
        rabbit.CONFIG_FILES).write_all()

    if is_relation_made("ha"):
        ha_is_active_active = config("ha-vip-only")

        if ha_is_active_active:
            update_nrpe_checks()
        else:
            if is_elected_leader('res_rabbitmq_vip'):
                update_nrpe_checks()
            else:
                log("hacluster relation is present but this node is not active"
                    " skipping update nrpe checks")
    else:
        update_nrpe_checks()

    # NOTE(jamespage)
    # trigger amqp_changed to pickup and changes to network
    # configuration via the access-network config option.
    for rid in relation_ids('amqp'):
        for unit in related_units(rid):
            amqp_changed(relation_id=rid, remote_unit=unit)
def config_changed():
    # if we are paused, delay doing any config changed hooks.
    # It is forced on the resume.
    if is_unit_paused_set():
        log("Unit is pause or upgrading. Skipping config_changed", "WARN")
        return

    apt_install(filter_installed_packages(get_packages()), fatal=True)
    if is_relation_made('nrpe-external-master'):
        update_nrpe_config()
    CONFIGS.write_all()
Esempio n. 50
0
def db_joined(relation_id=None):
    if is_relation_made('pgsql-db'):
        # raise error
        e = ('Attempting to associate a mysql database when there is already '
             'associated a postgresql one')
        log(e, level=ERROR)
        raise Exception(e)
    relation_set(username=config('database-user'),
                 database=config('database'),
                 hostname=unit_get('private-address'),
                 relation_id=relation_id)
Esempio n. 51
0
def db_joined(relation_id=None):
    if is_relation_made('pgsql-db'):
        # raise error
        e = ('Attempting to associate a mysql database when there is already '
             'associated a postgresql one')
        log(e, level=ERROR)
        raise Exception(e)
    relation_set(username=config('database-user'),
                 database=config('database'),
                 hostname=unit_get('private-address'),
                 relation_id=relation_id)
def db_joined(rid=None):
    if is_relation_made('pgsql-db'):
        # error, postgresql is used
        e = ('Attempting to associate a mysql database when there is already '
             'associated a postgresql one')
        log(e, level=ERROR)
        raise Exception(e)

    relation_set(relation_id=rid,
                 nova_database=config('database'),
                 nova_username=config('database-user'),
                 nova_hostname=get_relation_ip('shared-db'))
Esempio n. 53
0
def amqp_changed():
    if 'amqp' not in CONFIGS.complete_contexts():
        log('amqp relation incomplete. Peer not ready?')
        return
    CONFIGS.write(NOVA_CONF)
    if not is_relation_made('neutron-api'):
        if network_manager() == 'quantum':
            CONFIGS.write(QUANTUM_CONF)
        if network_manager() == 'neutron':
            CONFIGS.write(NEUTRON_CONF)
    [nova_cell_relation_joined(rid=rid)
        for rid in relation_ids('cell')]
 def __call__(self):
     """
     Used to generate template context to be added to glance-api.conf in
     the presence of a ceph relation.
     """
     if not is_relation_made(relation="ceph", keys="key"):
         return {}
     service = service_name()
     return {
         # ensure_ceph_pool() creates pool based on service name.
         "rbd_pool": service,
         "rbd_user": service,
     }
Esempio n. 55
0
 def __call__(self):
     """Used to generate template context to be added to glance-api.conf in
     the presence of a ceph relation.
     """
     if not is_relation_made(relation="ceph",
                             keys="key"):
         return {}
     service = service_name()
     return {
         # pool created based on service name.
         'rbd_pool': service,
         'rbd_user': service,
     }
def cluster_changed():
    # Future travelers beware ordering is significant
    rdata = relation_get()
    # sync passwords
    blacklist = ['hostname', 'private-address', 'public-address']
    whitelist = [a for a in rdata.keys() if a not in blacklist]
    peer_echo(includes=whitelist)

    cookie = peer_retrieve('cookie')
    if not cookie:
        log('cluster_joined: cookie not yet set.', level=INFO)
        return

    rdata = relation_get()
    if rdata:
        hostname = rdata.get('hostname', None)
        private_address = rdata.get('private-address', None)

        if hostname and private_address:
            rabbit.update_hosts_file({private_address: hostname})

    if not is_sufficient_peers():
        log('Not enough peers, waiting until leader is configured',
            level=INFO)
        return

    # sync the cookie with peers if necessary
    update_cookie()

    if is_relation_made('ha') and \
            config('ha-vip-only') is False:
        log('hacluster relation is present, skipping native '
            'rabbitmq cluster config.', level=INFO)
        return

    # cluster with node?
    try:
        if not is_leader():
            rabbit.cluster_with()
            update_nrpe_checks()
    except NotImplementedError:
        if is_newer():
            rabbit.cluster_with()
            update_nrpe_checks()

    # If cluster has changed peer db may have changed so run amqp_changed
    # to sync any changes
    for rid in relation_ids('amqp'):
        for unit in related_units(rid):
            amqp_changed(relation_id=rid, remote_unit=unit)
def mysql_db_joined():
    if is_relation_made('shared-db'):
        # error, mongo is used
        e = ('Attempting to associate a mysql database when there is already '
             'associated a mongo one')
        log(e, level=ERROR)
        raise Exception(e)

    host = unit_get('private-address')
    conf = config()
    relation_set(database=conf['database'],
                 username=conf['database-user'],
                 hostname=host)

    subprocess.call(['ceilometer-dbsync'])
Esempio n. 58
0
def db_joined():
    if is_relation_made('pgsql-db'):
        # error, postgresql is used
        e = ('Attempting to associate a mysql database when there is already '
             'associated a postgresql one')
        log(e, level=ERROR)
        raise Exception(e)

    if config('prefer-ipv6'):
        sync_db_with_multi_ipv6_addresses(config('database'),
                                          config('database-user'))
    else:
        relation_set(database=config('database'),
                     username=config('database-user'),
                     hostname=unit_get('private-address'))