def nm_changed():
    CONFIGS.write_all()
    if relation_get('ca_cert'):
        ca_crt = b64decode(relation_get('ca_cert'))
        install_ca_cert(ca_crt)

    if config('ha-legacy-mode'):
        cache_env_data()

    # Disable nova metadata if possible,
    if disable_nova_metadata():
        remove_legacy_nova_metadata()
    else:
        # NOTE: nova-api-metadata needs to be restarted
        #       once the nova-conductor is up and running
        #       on the nova-cc units.
        restart_nonce = relation_get('restart_trigger')
        if restart_nonce is not None:
            db = kv()
            previous_nonce = db.get('restart_nonce')
            if previous_nonce != restart_nonce:
                if not is_unit_paused_set():
                    service_restart('nova-api-metadata')
                db.set('restart_nonce', restart_nonce)
                db.flush()
예제 #2
0
파일: hooks.py 프로젝트: Lukasa/etcd-charm
def main(cluster_data={}):

    # Grab the boilerplate config entries
    cluster_data['unit_name'] = environ['JUJU_UNIT_NAME'].replace('/', '')
    cluster_data['private_address'] = private_address
    cluster_data['public_address'] = public_address
    cluster_data['cluster_state'] = 'new'

    if not leader_status:
        cluster_data['cluster_state'] = hookenv.relation_get('cluster-state')
        leader_address = hookenv.relation_get('leader-address')

        # do self registration
        if not db.get('registered'):
            cmd = "/opt/etcd/etcdctl -C http://{}:4001 member add {}" \
                  " http://{}:7001".format(leader_address,
                                           cluster_data['unit_name'],
                                           private_address)
            print(cmd)
            check_call(shlex.split(cmd))
            db.set('registered', True)

    # introspect the cluster, and form the cluster string.
    # https://github.com/coreos/etcd/blob/master/Documentation/configuration.md#-initial-cluster

    templating.render('etcd.conf.jinja2', '/etc/init/etcd.conf',
                      cluster_data, owner='root', group='root')

    host.service('restart', 'etcd')
예제 #3
0
파일: hooks.py 프로젝트: Lukasa/etcd-charm
def cluster_relation_changed():
    cluster_data = {}
    # Useful when doing runtime based configuration. (units added after cluster
    # bootstrap) see docs:
    # https://github.com/coreos/etcd/blob/master/Documentation/runtime-configuration.md
    if leader_status:
        token = cluster_token()
        print 'Initializing cluster with {}'.format(token)
        hookenv.relation_set(hookenv.relation_id(),
                             {'leader-address': private_address,
                              'cluster-state': 'existing',
                              'cluster-token': token,
                              'cluster': cluster_string()})
        cluster_data['cluster'] = cluster_string()

    if not leader_status:
        # A token is only generated once on a cluster.
        token = hookenv.relation_get('cluster-token')
        cluster_data['cluster'] = hookenv.relation_get('cluster')

    if not token:
        print "No token available on relationship - exiting"
        return
    cluster_data['token'] = token
    main(cluster_data)
def get_cluster_id():
    """ Return cluster id (lp1776171)

    Return cluster ID for MySQL asynchronous replication
    :returns: int cluster_id
    """
    if not config('cluster-id'):
        msg = ("Master / Slave relation requires 'cluster-id' option")
        status_set("blocked", msg)
        raise ClusterIDRequired(msg)
    cluster_id = config('cluster-id')
    for rid in relation_ids('master'):
        for unit in related_units(rid):
            if relation_get(attribute='cluster_id',
                            rid=rid,
                            unit=unit) == cluster_id:
                msg = ("'cluster-id' option must be unique within a cluster")
                status_set('blocked', msg)
                raise ClusterIDIdentical(msg)
    for rid in relation_ids('slave'):
        for unit in related_units(rid):
            if relation_get(attribute='cluster_id',
                            rid=rid,
                            unit=unit) == cluster_id:
                msg = ("'cluster-id' option must be unique within a cluster")
                status_set('blocked', msg)
                raise ClusterIDIdentical(msg)
    return cluster_id
def provision_vrouter():
    host_name = gethostname()
    host_ip = netifaces.ifaddresses("vhost0")[netifaces.AF_INET][0]["addr"]
    a_port = None
    a_ip = config.get("contrail-api-ip")
    if a_ip:
        a_port = config.get("contrail-api-port")
        if a_port is None:
            a_port = api_port()
    else:
        a_ip, a_port = [ (gethostbyname(relation_get("private-address", unit, rid)),
                          port)
                         for rid in relation_ids("contrail-api")
                         for unit, port in
                         ((unit, relation_get("port", unit, rid)) for unit in related_units(rid))
                         if port ][0]
    user, password, tenant = [ (relation_get("service_username", unit, rid),
                                relation_get("service_password", unit, rid),
                                relation_get("service_tenant_name", unit, rid))
                               for rid in relation_ids("identity-admin")
                               for unit in related_units(rid) ][0]
    log("Provisioning vrouter {}".format(host_ip))
    check_call(["contrail-provision-vrouter",
                "--host_name", host_name,
                "--host_ip", host_ip,
                "--api_server_ip", a_ip,
                "--api_server_port", str(a_port),
                "--oper", "add",
                "--admin_user", user,
                "--admin_password", password,
                "--admin_tenant_name", tenant])
def provision_local_metadata():
    a_port = None
    a_ip = config.get("contrail-api-ip")
    if a_ip:
        a_port = config.get("contrail-api-port")
        if a_port is None:
            a_port = api_port()
    else:
        a_ip, a_port = [ (gethostbyname(relation_get("private-address", unit, rid)),
                          port)
                         for rid in relation_ids("contrail-api")
                         for unit, port in
                         ((unit, relation_get("port", unit, rid)) for unit in related_units(rid))
                         if port ][0]
    user, password = [ (relation_get("service_username", unit, rid),
                        relation_get("service_password", unit, rid))
                       for rid in relation_ids("identity-admin")
                       for unit in related_units(rid) ][0]
    log("Provisioning local metadata service 127.0.0.1:8775")
    check_call(["contrail-provision-linklocal",
                "--api_server_ip", a_ip,
                "--api_server_port", str(a_port),
                "--linklocal_service_name", "metadata",
                "--linklocal_service_ip", "169.254.169.254",
                "--linklocal_service_port", "80",
                "--ipfabric_service_ip", "127.0.0.1",
                "--ipfabric_service_port", "8775",
                "--oper", "add",
                "--admin_user", user,
                "--admin_password", password])
def is_bootstrapped():
    """Determine if each node in the cluster has been bootstrapped and the
    cluster is complete with the expected number of peers.

    Check that each node in the cluster, including this one, has set
    bootstrap-uuid on the cluster relation.

    Having min-cluster-size set will guarantee is_bootstrapped will not
    return True until the expected number of peers are bootstrapped. If
    min-cluster-size is not set, it will check peer relations to estimate the
    expected cluster size. If min-cluster-size is not set and there are no
    peers it must assume the cluster is bootstrapped in order to allow for
    single unit deployments.

    @returns boolean
    """
    min_size = get_min_cluster_size()
    if not is_sufficient_peers():
        return False
    elif min_size > 1:
        uuids = []
        for relation_id in relation_ids('cluster'):
            units = related_units(relation_id) or []
            units.append(local_unit())
            for unit in units:
                if not relation_get(attribute='bootstrap-uuid',
                                    rid=relation_id,
                                    unit=unit):
                    log("{} is not yet clustered".format(unit),
                        DEBUG)
                    return False
                else:
                    bootstrap_uuid = relation_get(attribute='bootstrap-uuid',
                                                  rid=relation_id,
                                                  unit=unit)
                    if bootstrap_uuid:
                        uuids.append(bootstrap_uuid)

        if len(uuids) < min_size:
            log("Fewer than minimum cluster size: "
                "{} percona units reporting clustered".format(min_size),
                DEBUG)
            return False
        elif len(set(uuids)) > 1:
            raise Exception("Found inconsistent bootstrap uuids: "
                            "{}".format((uuids)))
        else:
            log("All {} percona units reporting clustered".format(min_size),
                DEBUG)
    elif not seeded():
        # Single unit deployment but not yet bootstrapped
        return False

    # Set INITIAL_CLUSTERED_KEY as the cluster has fully bootstrapped
    kvstore = kv()
    if not kvstore.get(INITIAL_CLUSTERED_KEY, False):
        kvstore.set(key=INITIAL_CLUSTERED_KEY, value=True)
        kvstore.flush()

    return True
예제 #8
0
    def __call__(self):
        '''This generates context for /etc/ceph/ceph.conf templates'''
        if not relation_ids('ceph'):
            return {}
        log('Generating template context for ceph')
        mon_hosts = []
        auth = None
        key = None
        for rid in relation_ids('ceph'):
            for unit in related_units(rid):
                mon_hosts.append(relation_get('private-address', rid=rid,
                                              unit=unit))
                auth = relation_get('auth', rid=rid, unit=unit)
                key = relation_get('key', rid=rid, unit=unit)

        ctxt = {
            'mon_hosts': ' '.join(mon_hosts),
            'auth': auth,
            'key': key,
        }

        if not os.path.isdir('/etc/ceph'):
            os.mkdir('/etc/ceph')

        if not context_complete(ctxt):
            return {}

        ensure_packages(['ceph-common'])

        return ctxt
예제 #9
0
    def __call__(self):
        self.database = self.database or config('database')
        self.user = self.user or config('database-user')
        if None in [self.database, self.user]:
            log('Could not generate shared_db context. '
                'Missing required charm config options. '
                '(database name and user)')
            raise OSContextError
        ctxt = {}

        password_setting = 'password'
        if self.relation_prefix:
            password_setting = self.relation_prefix + '_password'

        for rid in relation_ids('shared-db'):
            for unit in related_units(rid):
                passwd = relation_get(password_setting, rid=rid, unit=unit)
                ctxt = {
                    'database_host': relation_get('db_host', rid=rid,
                                                  unit=unit),
                    'database': self.database,
                    'database_user': self.user,
                    'database_password': passwd,
                }
                if context_complete(ctxt):
                    return ctxt
        return {}
예제 #10
0
    def __call__(self):
        if not relation_ids('ceph'):
            return {}

        log('Generating template context for ceph', level=DEBUG)
        mon_hosts = []
        auth = None
        key = None
        use_syslog = str(config('use-syslog')).lower()
        for rid in relation_ids('ceph'):
            for unit in related_units(rid):
                auth = relation_get('auth', rid=rid, unit=unit)
                key = relation_get('key', rid=rid, unit=unit)
                ceph_pub_addr = relation_get('ceph-public-address', rid=rid,
                                             unit=unit)
                unit_priv_addr = relation_get('private-address', rid=rid,
                                              unit=unit)
                ceph_addr = ceph_pub_addr or unit_priv_addr
                ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr
                mon_hosts.append(ceph_addr)

        ctxt = {'mon_hosts': ' '.join(sorted(mon_hosts)),
                'auth': auth,
                'key': key,
                'use_syslog': use_syslog}

        if not os.path.isdir('/etc/ceph'):
            os.mkdir('/etc/ceph')

        if not context_complete(ctxt):
            return {}

        ensure_packages(['ceph-common'])
        return ctxt
def collector_relation_changed():
    host = relation_get("hostname")
    port = relation_get("port")

    # Check the list length so pop doesn't fail
    if host is None or port is None:
        log("host or port is none")
        return
    else:
        relation_data = relations_of_type("ceph")
        if not relation_data:
            return
        try:
            hostname = subprocess.check_output(["hostname", "-f"]).replace(".", "_").rstrip("\n")
            relation = relation_data[0]["__unit__"]
            unit_tag = "unit-{0}".format(relation.replace("/", "-"))
            log("unit_tag: " + str(unit_tag))
            root_key = "{unit_tag}.{hostname}.ceph".format(unit_tag=unit_tag, hostname=hostname)

            carbon = {"host": host, "port": port, "root_key": root_key}

            update_service_config(service_dict={"outputs": ["carbon"], "carbon": carbon})
            restart()
        except subprocess.CalledProcessError as err:
            log("Service restart failed with err: " + err.message)
예제 #12
0
 def __call__(self):
     for rid in relation_ids('quantum-network-service'):
         for unit in related_units(rid):
             ctxt = {
                 'keystone_host': relation_get('keystone_host',
                                               rid=rid, unit=unit),
                 'service_port': relation_get('service_port', rid=rid,
                                              unit=unit),
                 'auth_port': relation_get('auth_port', rid=rid, unit=unit),
                 'service_tenant': relation_get('service_tenant',
                                                rid=rid, unit=unit),
                 'service_username': relation_get('service_username',
                                                  rid=rid, unit=unit),
                 'service_password': relation_get('service_password',
                                                  rid=rid, unit=unit),
                 'quantum_host': relation_get('quantum_host',
                                              rid=rid, unit=unit),
                 'quantum_port': relation_get('quantum_port',
                                              rid=rid, unit=unit),
                 'quantum_url': relation_get('quantum_url',
                                             rid=rid, unit=unit),
                 'region': relation_get('region',
                                        rid=rid, unit=unit),
                 # XXX: Hard-coded http.
                 'service_protocol': 'http',
                 'auth_protocol': 'http',
             }
             if context_complete(ctxt):
                 return ctxt
     return {}
def ssh_compute_add(public_key, rid=None, unit=None, user=None):
    # If remote compute node hands us a hostname, ensure we have a
    # known hosts entry for its IP, hostname and FQDN.
    private_address = relation_get(rid=rid, unit=unit,
                                   attribute='private-address')
    hosts = [private_address]

    if not is_ipv6(private_address):
        if relation_get('hostname'):
            hosts.append(relation_get('hostname'))

        if not is_ip(private_address):
            hosts.append(get_host_ip(private_address))
            hosts.append(private_address.split('.')[0])
        else:
            hn = get_hostname(private_address)
            hosts.append(hn)
            hosts.append(hn.split('.')[0])

    for host in list(set(hosts)):
        add_known_host(host, unit, user)

    if not ssh_authorized_key_exists(public_key, unit, user):
        log('Saving SSH authorized key for compute host at %s.' %
            private_address)
        add_authorized_key(public_key, unit, user)
예제 #14
0
def ceph_changed():
    if 'ceph' not in CONFIGS.complete_contexts():
        log('ceph relation incomplete. Peer not ready?')
        return

    if not ensure_ceph_keyring(service=service_name(), user='******',
                               group='nova'):
        log('Could not create ceph keyring: peer not ready?')
        return

    CONFIGS.write(ceph_config_file())
    CONFIGS.write(CEPH_SECRET)
    CONFIGS.write(NOVA_CONF)

    # With some refactoring, this can move into NovaComputeCephContext
    # and allow easily extended to support other compute flavors.
    if config('virt-type') in ['kvm', 'qemu', 'lxc'] and relation_get('key'):
        create_libvirt_secret(secret_file=CEPH_SECRET,
                              secret_uuid=CEPH_SECRET_UUID,
                              key=relation_get('key'))

    if (config('libvirt-image-backend') == 'rbd' and
            assert_libvirt_imagebackend_allowed()):
        if is_request_complete(get_ceph_request()):
            log('Request complete')
            # Ensure that nova-compute is restarted since only now can we
            # guarantee that ceph resources are ready.
            service_restart('nova-compute')
        else:
            send_request_if_needed(get_ceph_request())
예제 #15
0
def master_state_relation_changed():
    if not relation_get('identity'):
        juju_log('Relationship with slave-state not yet complete')
        return
    process_clustered_data()
    state = relation_get('state')
    if check_all_clustered_nodes(state):
        if state == 'install':
            status_set('maintenance', 'Installing grid')
            if install_grid():
                install_root_scripts()
                send_notification("master-state", "cluster")
        elif state == 'clustered':
            if install_db():
                status_set('maintenance', 'Installing Database')
                install_db_root_scripts()
                send_notification("master-state", "database")
        elif state == 'final':
                send_notification("master-state", "final")
                set_oracle_env()
                create_db()
                configure_database()
                for rid in relation_ids('oracle'):
                    oracle_relation_changed(relation_id=rid)
                juju_log("Oracle Rac 12C installation is succeeded on master")
                status_set('active', 'Unit is ready')
예제 #16
0
def domain_backend_changed(relation_id=None, unit=None):
    if get_api_version() < 3:
        log('Domain specific backend identity configuration only supported '
            'with Keystone v3 API, skipping domain creation and '
            'restart.')
        return

    domain_name = relation_get(attribute='domain-name',
                               unit=unit,
                               rid=relation_id)
    if domain_name:
        # NOTE(jamespage): Only create domain data from lead
        #                  unit when clustered and database
        #                  is configured and created.
        if is_leader() and is_db_ready() and is_db_initialised():
            create_or_show_domain(domain_name)
        # NOTE(jamespage): Deployment may have multiple domains,
        #                  with different identity backends so
        #                  ensure that a domain specific nonce
        #                  is checked for restarts of keystone
        restart_nonce = relation_get(attribute='restart-nonce',
                                     unit=unit,
                                     rid=relation_id)
        domain_nonce_key = 'domain-restart-nonce-{}'.format(domain_name)
        db = unitdata.kv()
        if restart_nonce != db.get(domain_nonce_key):
            if not is_unit_paused_set():
                service_restart(keystone_service())
            db.set(domain_nonce_key, restart_nonce)
            db.flush()
def ssh_authorized_peers(peer_interface, user, group=None,
                         ensure_local_user=False):
    """
    Main setup function, should be called from both peer -changed and -joined
    hooks with the same parameters.
    """
    if ensure_local_user:
        ensure_user(user, group)
    priv_key, pub_key = get_keypair(user)
    hook = hook_name()
    if hook == '%s-relation-joined' % peer_interface:
        relation_set(ssh_pub_key=pub_key)
    elif hook == '%s-relation-changed' % peer_interface:
        hosts = []
        keys = []

        for r_id in relation_ids(peer_interface):
            for unit in related_units(r_id):
                ssh_pub_key = relation_get('ssh_pub_key',
                                           rid=r_id,
                                           unit=unit)
                priv_addr = relation_get('private-address',
                                         rid=r_id,
                                         unit=unit)
                if ssh_pub_key:
                    keys.append(ssh_pub_key)
                    hosts.append(priv_addr)
                else:
                    log('ssh_authorized_peers(): ssh_pub_key '
                        'missing for unit %s, skipping.' % unit)
        write_authorized_keys(user, keys)
        write_known_hosts(user, hosts)
        authed_hosts = ':'.join(hosts)
        relation_set(ssh_authorized_hosts=authed_hosts)
def check_local_db_actions_complete():
    """Check if we have received db init'd notification and restart services
    if we have not already.

    NOTE: this must only be called from peer relation context.
    """
    if not is_db_initialised():
        return

    settings = relation_get() or {}
    if settings:
        init_id = settings.get(NEUTRON_DB_INIT_RKEY)
        echoed_init_id = relation_get(unit=local_unit(),
                                      attribute=NEUTRON_DB_INIT_ECHO_RKEY)

        # If we have received an init notification from a peer unit
        # (assumed to be the leader) then restart neutron-api and echo the
        # notification and don't restart again unless we receive a new
        # (different) notification.
        if is_new_dbinit_notification(init_id, echoed_init_id):
            if not is_unit_paused_set():
                log("Restarting neutron services following db "
                    "initialisation", level=DEBUG)
                service_restart('neutron-server')

            # Echo notification
            relation_set(**{NEUTRON_DB_INIT_ECHO_RKEY: init_id})
예제 #19
0
    def _coordinator_context(self):
        """Attempt to create a usable tooz coordinator URL from zk or memcache

        This'll see if we have zookeeper or memcached relations and use that
        found as the coordinator. Note memcahe is only for testing and
        zookeeper will be preferred if both are found.
        """

        # NOTE: Neither the zookeeper or memcache charms do any kind of
        # clustering of peers, so we just look for one that tells us its
        # port and point at that.
        zk_relation_ids = relation_ids('zookeeper')
        for rid in zk_relation_ids:
            for unit in related_units(rid):
                rel_data = relation_get(unit=unit, rid=rid)
                zk_port = rel_data.get('port')
                zk_addr = rel_data.get('private-address')
                if zk_port:
                    url = 'kazoo://%s:%s?timeout=5' % (zk_addr, zk_port)
                    log('Using zookeeper @ %s for astara coordination' % url)
                    return {'coordination_url': url}

        memcached_relation_ids = relation_ids('cache')
        for rid in memcached_relation_ids:
            for unit in related_units(rid):
                rel_data = relation_get(unit=unit, rid=rid)
                mc_port = rel_data.get('tcp-port')
                mc_addr = rel_data.get('private-address')
                if mc_port:
                    url = 'mecached://%s:%s' % (mc_port, mc_addr)
                    log('Using memcached @ %s for astara coordination' % url)
                    return {'coordination_url': url}

        log('no astara coordination relation data found')
        return {}
예제 #20
0
def admin_credentials():
    """fetches admin credentials either from charm config or remote jenkins
    service"""

    for rid in relation_ids('jenkins-configurator'):
        admin_user = None
        admin_cred = None
        for unit in related_units(rid):
            jenkins_admin_user = relation_get('jenkins-admin-user',
                                              rid=rid, unit=unit)
            jenkins_token = relation_get('jenkins-token',
                                         rid=rid, unit=unit)
            if (jenkins_admin_user and jenkins_token) and '' not in \
               [jenkins_admin_user, jenkins_token]:
                log(('Configurating Jenkins credentials '
                     'from charm configuration.'))
                return jenkins_admin_user, jenkins_token

            admin_user = relation_get('admin_username', rid=rid, unit=unit)
            admin_cred = relation_get('admin_password', rid=rid, unit=unit)
            if (admin_user and admin_cred) and \
               '' not in [admin_user, admin_cred]:
                log('Configuring Jenkins credentials from Jenkins relation.')
                return (admin_user, admin_cred)

    return (None, None)
예제 #21
0
def ceph_relation_changed():
    # Request that pools be created
    rq = CephBrokerRq()
    rq.add_op_create_pool(name="preserve_data",
                          replica_count=3,
                          weight=None)
    if is_request_complete(rq, relation='mon'):
        log('Broker request complete', level=DEBUG)
        public_addr = relation_get('ceph-public-address')
        auth = relation_get('auth')
        key = relation_get('key')
        if key and auth and public_addr:
            mon_hosts = get_mon_hosts()
            context = {
                'auth_supported': auth,
                'mon_hosts': ' '.join(mon_hosts),
                'use_syslog': 'true',
                'loglevel': config('loglevel'),
            }
            emit_cephconf(ceph_context=context)
            write_config(config_file_name='ceph.json', contents={
                'config_file': '/etc/ceph/ceph.conf',
                'user_id': 'preserve',
                'data_pool': 'preserve_data',
            })
            write_cephx_key(key)
            setup_backup_cron()
    else:
        send_request_if_needed(rq, relation='mon')
    def __call__(self):
        self.database = self.database or config("database")
        self.user = self.user or config("database-user")
        if None in [self.database, self.user]:
            log(
                "Could not generate shared_db context. "
                "Missing required charm config options. "
                "(database name and user)"
            )
            raise OSContextError
        ctxt = {}

        password_setting = "password"
        if self.relation_prefix:
            password_setting = self.relation_prefix + "_password"

        for rid in relation_ids("shared-db"):
            for unit in related_units(rid):
                passwd = relation_get(password_setting, rid=rid, unit=unit)
                ctxt = {
                    "database_host": relation_get("db_host", rid=rid, unit=unit),
                    "database": self.database,
                    "database_user": self.user,
                    "database_password": passwd,
                }
                if context_complete(ctxt):
                    return ctxt
        return {}
예제 #23
0
def process_data():
    identity = ""
    data = OrderedDict()
    all_strings = {}

    for rid in relation_ids('master'):
        for unit in related_units(rid):
            identity = relation_get(attribute='identity\
', unit=unit, rid=rid)
            raw_private_string = relation_get(attribute='private-string\
', unit=unit, rid=rid)
            raw_public_string = relation_get(attribute='public-string\
', unit=unit, rid=rid)
            raw_vip_string = relation_get(attribute='vip-string\
', unit=unit, rid=rid)
            raw_ssh_key = relation_get(attribute='host-ssh-key\
', unit=unit, rid=rid)

            juju_log('Relation confirmed from {}'.format(identity))

            if identity:
                data = json.load(open(NODE_DATA_FILE))
                if identity in data.keys():
                    pass
                else:
                    all_strings['private'] = pickle.loads(raw_private_string)
                    all_strings['public'] = pickle.loads(raw_public_string)
                    all_strings['vip'] = pickle.loads(raw_vip_string)
                    all_strings['ssh_pub_key'] = pickle.loads(raw_ssh_key)
                    data[identity] = all_strings

                    juju_log('Storing node {} data {}'.format(identity,
                                                              data[identity]))
                    json.dump(data, open(NODE_DATA_FILE, 'w'))
예제 #24
0
    def get_allowed_units(database, username):
        allowed_units = set()
        for relid in hookenv.relation_ids('shared-db'):
            for unit in hookenv.related_units(relid):
                attr = "%s_%s" % (database, 'hostname')
                hosts = hookenv.relation_get(attribute=attr, unit=unit,
                                             rid=relid)
                if not hosts:
                    hosts = [hookenv.relation_get(attribute='private-address',
                                                  unit=unit, rid=relid)]
                else:
                    # hostname can be json-encoded list of hostnames
                    try:
                        hosts = json.loads(hosts)
                    except ValueError:
                        pass

                if not isinstance(hosts, list):
                    hosts = [hosts]

                if hosts:
                    for host in hosts:
                        utils.juju_log('INFO', "Checking host '%s' grant" %
                                       (host))
                        if grant_exists(database, username, host):
                            if unit not in allowed_units:
                                allowed_units.add(unit)
                else:
                    utils.juju_log('INFO', "No hosts found for grant check")

        return allowed_units
    def __call__(self):
        """This generates context for /etc/ceph/ceph.conf templates"""
        if not relation_ids("ceph"):
            return {}
        log("Generating template context for ceph")
        mon_hosts = []
        auth = None
        key = None
        for rid in relation_ids("ceph"):
            for unit in related_units(rid):
                mon_hosts.append(relation_get("private-address", rid=rid, unit=unit))
                auth = relation_get("auth", rid=rid, unit=unit)
                key = relation_get("key", rid=rid, unit=unit)

        ctxt = {"mon_hosts": " ".join(mon_hosts), "auth": auth, "key": key}

        if not os.path.isdir("/etc/ceph"):
            os.mkdir("/etc/ceph")

        if not context_complete(ctxt):
            return {}

        ensure_packages(["ceph-common"])

        return ctxt
예제 #26
0
def main(cluster_data={}):

    # Grab the boilerplate config entries
    cluster_data['unit_name'] = environ['JUJU_UNIT_NAME'].replace('/', '')
    cluster_data['private_address'] = private_address
    cluster_data['public_address'] = public_address
    cluster_data['cluster_state'] = 'new'

    if not leader_status:
        cluster_data['cluster_state'] = hookenv.relation_get('cluster-state')
        leader_address = hookenv.relation_get('leader-address')

        # do self registration
        if not db.get('registered'):
            cmd = "etcdctl -C http://{}:2379 member add {}" \
                  " http://{}:2380".format(leader_address,
                                           cluster_data['unit_name'],
                                           private_address)
            print(cmd)
            check_call(shlex.split(cmd))
            db.set('registered', True)

    # introspect the cluster, and form the cluster string.
    # https://github.com/coreos/etcd/blob/master/Documentation/configuration.md#-initial-cluster

    templating.render('etcd.default.jinja2', '/etc/default/etcd',
                      cluster_data, owner='root', group='root')

    host.service('stop', 'etcd')
    check_output(['rm', '-Rf', '/var/lib/etcd/default'])
    host.service('start', 'etcd')
    if leader_status:
        status_set('active', 'Etcd leader running')
    else:
        status_set('active', 'Etcd follower running')
예제 #27
0
    def get_data(self):
        peers = []
        peers6 = []

        for rid in hookenv.relation_ids(self.name):
            for unit in hookenv.related_units(rid):
                rel = hookenv.relation_get(attribute='addr',
                                           rid=rid,
                                           unit=unit)

                if rel is not None:
                    addr = resolve_domain_name(rel)

                    if addr:
                        peers.append(addr)

                rel6 = hookenv.relation_get(attribute='addr6',
                                            rid=rid,
                                            unit=unit)

                if rel6 is not None:
                    peers6.append(rel6)

        self['bgp_peers'] = peers
        self['bgp_peers6'] = peers6
        self['router_id'] = router_id()

        return
예제 #28
0
def storage_changed():
    zone = get_zone(config('zone-assignment'))
    node_settings = {
        'ip': openstack.get_host_ip(relation_get('private-address')),
        'zone': zone,
        'account_port': relation_get('account_port'),
        'object_port': relation_get('object_port'),
        'container_port': relation_get('container_port'),
    }
    if None in node_settings.itervalues():
        log('storage_changed: Relation not ready.')
        return None

    for k in ['zone', 'account_port', 'object_port', 'container_port']:
        node_settings[k] = int(node_settings[k])

    CONFIGS.write_all()

    # allow for multiple devs per unit, passed along as a : separated list
    devs = relation_get('device').split(':')
    for dev in devs:
        node_settings['device'] = dev
        for ring in SWIFT_RINGS.itervalues():
            if not exists_in_ring(ring, node_settings):
                add_to_ring(ring, node_settings)

    if should_balance([r for r in SWIFT_RINGS.itervalues()]):
        balance_rings()
def provision_control():
    host_name = gethostname()
    host_ip = gethostbyname(unit_get("private-address"))
    a_ip, a_port = [ (gethostbyname(relation_get("private-address", unit, rid)),
                      port)
                     for rid in relation_ids("contrail-api")
                     for unit, port in
                     ((unit, relation_get("port", unit, rid)) for unit in related_units(rid))
                     if port ][0]
    user, password, tenant = [ (relation_get("service_username", unit, rid),
                                relation_get("service_password", unit, rid),
                                relation_get("service_tenant_name", unit, rid))
                               for rid in relation_ids("identity-admin")
                               for unit in related_units(rid) ][0]
    log("Provisioning control {}".format(host_ip))
    check_call(["contrail-provision-control",
                "--host_name", host_name,
                "--host_ip", host_ip,
                "--router_asn", "64512",
                "--api_server_ip", a_ip,
                "--api_server_port", str(a_port),
                "--oper", "add",
                "--admin_user", user,
                "--admin_password", password,
                "--admin_tenant_name", tenant])
예제 #30
0
    def __call__(self):
        if not relation_ids("ceph"):
            return {}

        log("Generating template context for ceph", level=DEBUG)
        mon_hosts = []
        auth = None
        key = None
        use_syslog = str(config("use-syslog")).lower()
        for rid in relation_ids("ceph"):
            for unit in related_units(rid):
                auth = relation_get("auth", rid=rid, unit=unit)
                key = relation_get("key", rid=rid, unit=unit)
                ceph_pub_addr = relation_get("ceph-public-address", rid=rid, unit=unit)
                unit_priv_addr = relation_get("private-address", rid=rid, unit=unit)
                ceph_addr = ceph_pub_addr or unit_priv_addr
                ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr
                mon_hosts.append(ceph_addr)

        ctxt = {"mon_hosts": " ".join(sorted(mon_hosts)), "auth": auth, "key": key, "use_syslog": use_syslog}

        if not os.path.isdir("/etc/ceph"):
            os.mkdir("/etc/ceph")

        if not context_complete(ctxt):
            return {}

        ensure_packages(["ceph-common"])
        return ctxt
예제 #31
0
def all_relations(relation_types=CLIENT_RELATION_TYPES):
    for reltype in relation_types:
        for relid in hookenv.relation_ids(reltype):
            for unit in hookenv.related_units(relid):
                yield reltype, relid, unit, hookenv.relation_get(unit=unit,
                                                                 rid=relid)
예제 #32
0
def cluster_leader_actions():
    """Cluster relation hook actions to be performed by leader units.

    NOTE: must be called by leader from cluster relation hook.
    """
    log("Cluster changed by unit={} (local is leader)".format(remote_unit()),
        level=DEBUG)

    rx_settings = relation_get() or {}
    tx_settings = relation_get(unit=local_unit()) or {}

    rx_rq_token = rx_settings.get(SwiftProxyClusterRPC.KEY_STOP_PROXY_SVC)
    rx_ack_token = rx_settings.get(SwiftProxyClusterRPC.KEY_STOP_PROXY_SVC_ACK)

    tx_rq_token = tx_settings.get(SwiftProxyClusterRPC.KEY_STOP_PROXY_SVC)
    tx_ack_token = tx_settings.get(SwiftProxyClusterRPC.KEY_STOP_PROXY_SVC_ACK)

    rx_leader_changed = \
        rx_settings.get(SwiftProxyClusterRPC.KEY_NOTIFY_LEADER_CHANGED)
    if rx_leader_changed:
        log(
            "Leader change notification received and this is leader so "
            "retrying sync.",
            level=INFO)
        # FIXME: check that we were previously part of a successful sync to
        #        ensure we have good rings.
        cluster_sync_rings(peers_only=tx_settings.get('peers-only', False),
                           token=rx_leader_changed)
        return

    rx_resync_request = \
        rx_settings.get(SwiftProxyClusterRPC.KEY_REQUEST_RESYNC)
    resync_request_ack_key = SwiftProxyClusterRPC.KEY_REQUEST_RESYNC_ACK
    tx_resync_request_ack = tx_settings.get(resync_request_ack_key)
    if rx_resync_request and tx_resync_request_ack != rx_resync_request:
        log("Unit '{}' has requested a resync".format(remote_unit()),
            level=INFO)
        cluster_sync_rings(peers_only=True)
        relation_set(**{resync_request_ack_key: rx_resync_request})
        return

    # If we have received an ack token ensure it is not associated with a
    # request we received from another peer. If it is, this would indicate
    # a leadership change during a sync and this unit will abort the sync or
    # attempt to restore the original leader so to be able to complete the
    # sync.

    if rx_ack_token and rx_ack_token == tx_rq_token:
        # Find out if all peer units have been stopped.
        responses = []
        for rid in relation_ids('cluster'):
            for unit in related_units(rid):
                responses.append(relation_get(rid=rid, unit=unit))

        # Ensure all peers stopped before starting sync
        if is_all_peers_stopped(responses):
            key = 'peers-only'
            if not all_responses_equal(responses, key, must_exist=False):
                msg = ("Did not get equal response from every peer unit for "
                       "'{}'".format(key))
                raise SwiftProxyCharmException(msg)

            peers_only = bool(
                get_first_available_value(responses, key, default=0))
            log("Syncing rings and builders (peers-only={})".format(
                peers_only),
                level=DEBUG)
            broadcast_rings_available(broker_token=rx_ack_token,
                                      storage=not peers_only)
        else:
            key = SwiftProxyClusterRPC.KEY_STOP_PROXY_SVC_ACK
            acks = ', '.join([rsp[key] for rsp in responses if key in rsp])
            log("Not all peer apis stopped - skipping sync until all peers "
                "ready (current='{}', token='{}')".format(acks, tx_ack_token),
                level=INFO)
    elif ((rx_ack_token and (rx_ack_token == tx_ack_token))
          or (rx_rq_token and (rx_rq_token == rx_ack_token))):
        log(
            "It appears that the cluster leader has changed mid-sync - "
            "stopping proxy service",
            level=WARNING)
        service_stop('swift-proxy')
        broker = rx_settings.get('builder-broker')
        if broker:
            # If we get here, manual intervention will be required in order
            # to restore the cluster.
            raise SwiftProxyCharmException(
                "Failed to restore previous broker '{}' as leader".format(
                    broker))
        else:
            raise SwiftProxyCharmException(
                "No builder-broker on rx_settings relation from '{}' - "
                "unable to attempt leader restore".format(remote_unit()))
    else:
        log("Not taking any sync actions", level=DEBUG)

    CONFIGS.write_all()
예제 #33
0
def cluster_non_leader_actions():
    """Cluster relation hook actions to be performed by non-leader units.

    NOTE: must be called by non-leader from cluster relation hook.
    """
    log("Cluster changed by unit={} (local is non-leader)".format(
        remote_unit()),
        level=DEBUG)
    rx_settings = relation_get() or {}
    tx_settings = relation_get(unit=local_unit()) or {}

    token = rx_settings.get(SwiftProxyClusterRPC.KEY_NOTIFY_LEADER_CHANGED)
    if token:
        log(
            "Leader-changed notification received from peer unit. Since "
            "this most likely occurred during a ring sync proxies will "
            "be disabled until the leader is restored and a fresh sync "
            "request is set out",
            level=WARNING)
        service_stop("swift-proxy")
        return

    rx_rq_token = rx_settings.get(SwiftProxyClusterRPC.KEY_STOP_PROXY_SVC)

    # Check whether we have been requested to stop proxy service
    if rx_rq_token:
        log("Peer request to stop proxy service received ({}) - sending ack".
            format(rx_rq_token),
            level=INFO)
        service_stop('swift-proxy')
        peers_only = rx_settings.get('peers-only', None)
        rq = SwiftProxyClusterRPC().stop_proxy_ack(echo_token=rx_rq_token,
                                                   echo_peers_only=peers_only)
        relation_set(relation_settings=rq)
        return

    # Check if there are any builder files we can sync from the leader.
    broker = rx_settings.get('builder-broker', None)
    broker_token = rx_settings.get('broker-token', None)
    broker_timestamp = rx_settings.get('broker-timestamp', None)
    tx_ack_token = tx_settings.get(SwiftProxyClusterRPC.KEY_STOP_PROXY_SVC_ACK)
    if not broker:
        log("No ring/builder update available", level=DEBUG)
        if not openstack.is_unit_paused_set():
            service_start('swift-proxy')

        return
    elif broker_token:
        if tx_ack_token:
            if broker_token == tx_ack_token:
                log("Broker and ACK tokens match ({})".format(broker_token),
                    level=DEBUG)
            else:
                log("Received ring/builder update notification but tokens do "
                    "not match (broker-token={}/ack-token={})".format(
                        broker_token, tx_ack_token),
                    level=WARNING)
                return
        else:
            log(
                "Broker token available without handshake, assuming we just "
                "joined and rings won't change",
                level=DEBUG)
    else:
        log("Not taking any sync actions", level=DEBUG)
        return

    # If we upgrade from cluster that did not use timestamps, the new peer will
    # need to request a re-sync from the leader
    if not is_most_recent_timestamp(broker_timestamp):
        if not timestamps_available(excluded_unit=remote_unit()):
            log("Requesting resync")
            rq = SwiftProxyClusterRPC().request_resync(broker_token)
            relation_set(relation_settings=rq)
        else:
            log(
                "Did not receive most recent broker timestamp but timestamps "
                "are available - waiting for next timestamp",
                level=INFO)

        return

    log("Ring/builder update available", level=DEBUG)
    builders_only = int(rx_settings.get('sync-only-builders', 0))
    path = os.path.basename(get_www_dir())
    try:
        sync_proxy_rings('http://{}/{}'.format(broker, path),
                         rings=not builders_only)
    except CalledProcessError:
        log(
            "Ring builder sync failed, builders not yet available - "
            "leader not ready?",
            level=WARNING)
        return

    # Re-enable the proxy once all builders and rings are synced
    if fully_synced():
        log("Ring builders synced - starting proxy", level=INFO)
        CONFIGS.write_all()
        if not openstack.is_unit_paused_set():
            service_start('swift-proxy')
    else:
        log(
            "Not all builders and rings synced yet - waiting for peer sync "
            "before starting proxy",
            level=INFO)
예제 #34
0
def shared_db_changed(relation_id=None, unit=None):
    if not seeded():
        log(
            "Percona cluster not yet bootstrapped - deferring shared-db rel "
            "until bootstrapped", DEBUG)
        return

    if not is_elected_leader(DC_RESOURCE_NAME):
        # NOTE(jamespage): relation level data candidate
        log('Service is peered, clearing shared-db relation '
            'as this service unit is not the leader')
        relation_clear(relation_id)
        # Each unit needs to set the db information otherwise if the unit
        # with the info dies the settings die with it Bug# 1355848
        if is_relation_made('cluster'):
            for rel_id in relation_ids('shared-db'):
                peerdb_settings = \
                    peer_retrieve_by_prefix(rel_id, exc_list=['hostname'])

                passwords = [
                    key for key in peerdb_settings.keys()
                    if 'password' in key.lower()
                ]
                if len(passwords) > 0:
                    relation_set(relation_id=rel_id, **peerdb_settings)
        return

    settings = relation_get(unit=unit, rid=relation_id)
    access_network = config('access-network')
    db_helper = get_db_helper()

    peer_store_and_set(relation_id=relation_id,
                       relation_settings={'access-network': access_network})

    singleset = set(['database', 'username', 'hostname'])
    if singleset.issubset(settings):
        # Process a single database configuration
        hostname = settings['hostname']
        database = settings['database']
        username = settings['username']

        normalized_address = get_host_ip(hostname)
        if access_network and not is_address_in_network(
                access_network, normalized_address):
            # NOTE: for configurations using access-network, only setup
            #       database access if remote unit has presented a
            #       hostname or ip address thats within the configured
            #       network cidr
            log("Host '%s' not in access-network '%s' - ignoring" %
                (normalized_address, access_network),
                level=INFO)
            return

        # NOTE: do this before querying access grants
        password = configure_db_for_hosts(hostname, database, username,
                                          db_helper)

        allowed_units = db_helper.get_allowed_units(database,
                                                    username,
                                                    relation_id=relation_id)
        allowed_units = unit_sorted(allowed_units)
        allowed_units = ' '.join(allowed_units)
        relation_set(relation_id=relation_id, allowed_units=allowed_units)

        db_host = get_db_host(hostname)
        peer_store_and_set(relation_id=relation_id,
                           db_host=db_host,
                           password=password)
    else:
        # Process multiple database setup requests.
        # from incoming relation data:
        #  nova_database=xxx nova_username=xxx nova_hostname=xxx
        #  quantum_database=xxx quantum_username=xxx quantum_hostname=xxx
        # create
        # {
        #   "nova": {
        #        "username": xxx,
        #        "database": xxx,
        #        "hostname": xxx
        #    },
        #    "quantum": {
        #        "username": xxx,
        #        "database": xxx,
        #        "hostname": xxx
        #    }
        # }
        #
        databases = {}
        for k, v in settings.iteritems():
            db = k.split('_')[0]
            x = '_'.join(k.split('_')[1:])
            if db not in databases:
                databases[db] = {}
            databases[db][x] = v

        allowed_units = {}
        return_data = {}
        for db in databases:
            if singleset.issubset(databases[db]):
                database = databases[db]['database']
                hostname = databases[db]['hostname']
                username = databases[db]['username']

                normalized_address = get_host_ip(hostname)
                if (access_network and not is_address_in_network(
                        access_network, normalized_address)):
                    # NOTE: for configurations using access-network,
                    #       only setup database access if remote unit
                    #       has presented a hostname or ip address
                    #       thats within the configured network cidr
                    return

                # NOTE: do this before querying access grants
                password = configure_db_for_hosts(hostname, database, username,
                                                  db_helper)

                a_units = db_helper.get_allowed_units(database,
                                                      username,
                                                      relation_id=relation_id)
                a_units = ' '.join(unit_sorted(a_units))
                allowed_units['%s_allowed_units' % (db)] = a_units

                return_data['%s_password' % (db)] = password
                db_host = get_db_host(hostname)

        if allowed_units:
            relation_set(relation_id=relation_id, **allowed_units)
        else:
            log("No allowed_units - not setting relation settings",
                level=DEBUG)

        if return_data:
            peer_store_and_set(relation_id=relation_id,
                               db_host=db_host,
                               **return_data)
        else:
            log("No return data - not setting relation settings", level=DEBUG)
 def _add_to_config(key):
     value = relation_get(key)
     if value:
         config[key] = value
예제 #36
0
def contrail_analyticsdb_changed():
    data = relation_get()
    _value_changed(data, "db-user", "db_user")
    _value_changed(data, "db-password", "db_password")
    update_charm_status()
예제 #37
0
def get_cluster_hosts():
    """Get the bootstrapped cluster peers

    Determine the cluster peers that have bootstrapped and return the list
    hosts. Secondarily, update the hosts file with IPv6 address name
    resolution.

    The returned host list is intended to be used in the
    wsrep_cluster_address=gcomm:// setting. Therefore, the hosts must have
    already been bootstrapped. If an un-bootstrapped host happens to be first
    in the list, mysql will fail to start.

    @side_effect update_hosts_file called for IPv6 hostname resolution
    @returns list of hosts
    """
    hosts_map = {}

    local_cluster_address = get_cluster_host_ip()

    # We need to add this localhost dns name to /etc/hosts along with peer
    # hosts to ensure percona gets consistently resolved addresses.
    if config('prefer-ipv6'):
        addr = get_ipv6_addr(exc_list=[config('vip')], fatal=True)[0]
        hosts_map = {addr: socket.gethostname()}

    hosts = []
    for relid in relation_ids('cluster'):
        for unit in related_units(relid):
            rdata = relation_get(unit=unit, rid=relid)
            # NOTE(dosaboy): see LP: #1599447
            cluster_address = rdata.get('cluster-address',
                                        rdata.get('private-address'))
            if config('prefer-ipv6'):
                hostname = rdata.get('hostname')
                if not hostname or hostname in hosts:
                    log("(unit=%s) Ignoring hostname '%s' provided by cluster "
                        "relation for addr %s" %
                        (unit, hostname, cluster_address),
                        level=DEBUG)
                    continue
                else:
                    log("(unit=%s) hostname '%s' provided by cluster relation "
                        "for addr %s" % (unit, hostname, cluster_address),
                        level=DEBUG)

                hosts_map[cluster_address] = hostname
                host = hostname
            else:
                host = resolve_hostname_to_ip(cluster_address)
            # Add only cluster peers who have set bootstrap-uuid
            # An indiction they themselves are bootstrapped.
            # Un-bootstrapped hosts in gcom lead mysql to fail to start
            # if it happens to be the first address in the list
            # Also fix strange bug when executed from actions where the local
            # unit is returned in related_units. We do not want the local IP
            # in the gcom hosts list.
            if (rdata.get('bootstrap-uuid') and host not in hosts
                    and host != local_cluster_address):
                hosts.append(host)

    if hosts_map:
        update_hosts_file(hosts_map)

    # Return a sorted list to avoid uneccessary restarts
    hosts.sort()
    return hosts
예제 #38
0
def analytics_changed_departed():
    data = relation_get()
    _value_changed(data, "analytics_ips", "analytics_ips")
    update_southbound_relations()
    utils.update_ziu("analytics-changed")
    utils.update_charm_status()
예제 #39
0
    def __call__(self):
        log('Generating template context for amqp', level=DEBUG)
        conf = config()
        if self.relation_prefix:
            user_setting = '%s-rabbit-user' % (self.relation_prefix)
            vhost_setting = '%s-rabbit-vhost' % (self.relation_prefix)
        else:
            user_setting = 'rabbit-user'
            vhost_setting = 'rabbit-vhost'

        try:
            username = conf[user_setting]
            vhost = conf[vhost_setting]
        except KeyError as e:
            log('Could not generate shared_db context. Missing required charm '
                'config options: %s.' % e,
                level=ERROR)
            raise OSContextError

        ctxt = {}
        for rid in relation_ids(self.rel_name):
            ha_vip_only = False
            for unit in related_units(rid):
                if relation_get('clustered', rid=rid, unit=unit):
                    ctxt['clustered'] = True
                    vip = relation_get('vip', rid=rid, unit=unit)
                    vip = format_ipv6_addr(vip) or vip
                    ctxt['rabbitmq_host'] = vip
                else:
                    host = relation_get('private-address', rid=rid, unit=unit)
                    host = format_ipv6_addr(host) or host
                    ctxt['rabbitmq_host'] = host

                ctxt.update({
                    'rabbitmq_user':
                    username,
                    'rabbitmq_password':
                    relation_get('password', rid=rid, unit=unit),
                    'rabbitmq_virtual_host':
                    vhost,
                })

                ssl_port = relation_get('ssl_port', rid=rid, unit=unit)
                if ssl_port:
                    ctxt['rabbit_ssl_port'] = ssl_port

                ssl_ca = relation_get('ssl_ca', rid=rid, unit=unit)
                if ssl_ca:
                    ctxt['rabbit_ssl_ca'] = ssl_ca

                if relation_get('ha_queues', rid=rid, unit=unit) is not None:
                    ctxt['rabbitmq_ha_queues'] = True

                ha_vip_only = relation_get('ha-vip-only', rid=rid,
                                           unit=unit) is not None

                if context_complete(ctxt):
                    if 'rabbit_ssl_ca' in ctxt:
                        if not self.ssl_dir:
                            log(
                                "Charm not setup for ssl support but ssl ca "
                                "found",
                                level=INFO)
                            break

                        ca_path = os.path.join(self.ssl_dir,
                                               'rabbit-client-ca.pem')
                        with open(ca_path, 'w') as fh:
                            fh.write(b64decode(ctxt['rabbit_ssl_ca']))
                            ctxt['rabbit_ssl_ca'] = ca_path

                    # Sufficient information found = break out!
                    break

            # Used for active/active rabbitmq >= grizzly
            if (('clustered' not in ctxt or ha_vip_only)
                    and len(related_units(rid)) > 1):
                rabbitmq_hosts = []
                for unit in related_units(rid):
                    host = relation_get('private-address', rid=rid, unit=unit)
                    host = format_ipv6_addr(host) or host
                    rabbitmq_hosts.append(host)

                ctxt['rabbitmq_hosts'] = ','.join(sorted(rabbitmq_hosts))

        oslo_messaging_flags = conf.get('oslo-messaging-flags', None)
        if oslo_messaging_flags:
            ctxt['oslo_messaging_flags'] = config_flags_parser(
                oslo_messaging_flags)

        if not context_complete(ctxt):
            return {}

        return ctxt
    def __call__(self):
        from neutron_api_utils import api_port
        ctxt = super(NeutronCCContext, self).__call__()
        if config('neutron-plugin') == 'nsx':
            ctxt['nsx_username'] = config('nsx-username')
            ctxt['nsx_password'] = config('nsx-password')
            ctxt['nsx_tz_uuid'] = config('nsx-tz-uuid')
            ctxt['nsx_l3_uuid'] = config('nsx-l3-uuid')
            if 'nsx-controllers' in config():
                ctxt['nsx_controllers'] = \
                    ','.join(config('nsx-controllers').split())
                ctxt['nsx_controllers_list'] = \
                    config('nsx-controllers').split()
        if config('neutron-plugin') == 'plumgrid':
            ctxt['pg_username'] = config('plumgrid-username')
            ctxt['pg_password'] = config('plumgrid-password')
            ctxt['virtual_ip'] = config('plumgrid-virtual-ip')
        elif config('neutron-plugin') == 'midonet':
            ctxt.update(MidonetContext()())
            identity_context = IdentityServiceContext(service='neutron',
                                                      service_user='******')()
            if identity_context is not None:
                ctxt.update(identity_context)
        ctxt['l2_population'] = self.neutron_l2_population
        ctxt['enable_dvr'] = self.neutron_dvr
        ctxt['l3_ha'] = self.neutron_l3ha
        if self.neutron_l3ha:
            ctxt['max_l3_agents_per_router'] = \
                config('max-l3-agents-per-router')
            ctxt['min_l3_agents_per_router'] = \
                config('min-l3-agents-per-router')
        ctxt['dhcp_agents_per_network'] = config('dhcp-agents-per-network')
        ctxt['overlay_network_type'] = self.neutron_overlay_network_type
        ctxt['external_network'] = config('neutron-external-network')
        if config('neutron-plugin') in ['vsp']:
            _config = config()
            for k, v in _config.iteritems():
                if k.startswith('vsd'):
                    ctxt[k.replace('-', '_')] = v
            for rid in relation_ids('vsd-rest-api'):
                for unit in related_units(rid):
                    rdata = relation_get(rid=rid, unit=unit)
                    vsd_ip = rdata.get('vsd-ip-address')
                    if os_release('neutron-server') >= 'kilo':
                        cms_id_value = rdata.get('nuage-cms-id')
                        log('relation data:cms_id required for'
                            ' nuage plugin: {}'.format(cms_id_value))
                        if cms_id_value is not None:
                            ctxt['cms_id'] = '{}'.format(cms_id_value)
                    log('relation data:vsd-ip-address: {}'.format(vsd_ip))
                    if vsd_ip is not None:
                        ctxt['vsd_server'] = '{}:8443'.format(vsd_ip)
            if 'vsd_server' not in ctxt:
                ctxt['vsd_server'] = '1.1.1.1:8443'
        ctxt['verbose'] = config('verbose')
        ctxt['debug'] = config('debug')
        ctxt['neutron_bind_port'] = \
            determine_api_port(api_port('neutron-server'),
                               singlenode_mode=True)
        ctxt['quota_security_group'] = config('quota-security-group')
        ctxt['quota_security_group_rule'] = \
            config('quota-security-group-rule')
        ctxt['quota_network'] = config('quota-network')
        ctxt['quota_subnet'] = config('quota-subnet')
        ctxt['quota_port'] = config('quota-port')
        ctxt['quota_vip'] = config('quota-vip')
        ctxt['quota_pool'] = config('quota-pool')
        ctxt['quota_member'] = config('quota-member')
        ctxt['quota_health_monitors'] = config('quota-health-monitors')
        ctxt['quota_router'] = config('quota-router')
        ctxt['quota_floatingip'] = config('quota-floatingip')

        n_api_settings = self.get_neutron_api_rel_settings()
        if n_api_settings:
            ctxt.update(n_api_settings)

        flat_providers = config('flat-network-providers')
        if flat_providers:
            ctxt['network_providers'] = ','.join(flat_providers.split())

        vlan_ranges = config('vlan-ranges')
        if vlan_ranges:
            ctxt['vlan_ranges'] = ','.join(vlan_ranges.split())

        vni_ranges = config('vni-ranges')
        if vni_ranges:
            ctxt['vni_ranges'] = ','.join(vni_ranges.split())

        ctxt['enable_ml2_port_security'] = config('enable-ml2-port-security')

        return ctxt
def tls_certificates_relation_changed():
    if common_utils.tls_changed(utils.MODULE, relation_get()):
        update_southbound_relations()
        _notify_haproxy_services()
        utils.update_nrpe_config()
        utils.update_charm_status()
예제 #42
0
    def __call__(self):
        if not relation_ids('cluster') and not self.singlenode_mode:
            return {}

        if config('prefer-ipv6'):
            addr = get_ipv6_addr(exc_list=[config('vip')])[0]
        else:
            addr = get_host_ip(unit_get('private-address'))

        l_unit = local_unit().replace('/', '-')
        cluster_hosts = {}

        # NOTE(jamespage): build out map of configured network endpoints
        # and associated backends
        for addr_type in ADDRESS_TYPES:
            cfg_opt = 'os-{}-network'.format(addr_type)
            laddr = get_address_in_network(config(cfg_opt))
            if laddr:
                netmask = get_netmask_for_address(laddr)
                cluster_hosts[laddr] = {
                    'network': "{}/{}".format(laddr, netmask),
                    'backends': {
                        l_unit: laddr
                    }
                }
                for rid in relation_ids('cluster'):
                    for unit in related_units(rid):
                        _laddr = relation_get('{}-address'.format(addr_type),
                                              rid=rid,
                                              unit=unit)
                        if _laddr:
                            _unit = unit.replace('/', '-')
                            cluster_hosts[laddr]['backends'][_unit] = _laddr

        # NOTE(jamespage) add backend based on private address - this
        # with either be the only backend or the fallback if no acls
        # match in the frontend
        cluster_hosts[addr] = {}
        netmask = get_netmask_for_address(addr)
        cluster_hosts[addr] = {
            'network': "{}/{}".format(addr, netmask),
            'backends': {
                l_unit: addr
            }
        }
        for rid in relation_ids('cluster'):
            for unit in related_units(rid):
                _laddr = relation_get('private-address', rid=rid, unit=unit)
                if _laddr:
                    _unit = unit.replace('/', '-')
                    cluster_hosts[addr]['backends'][_unit] = _laddr

        ctxt = {'frontends': cluster_hosts, 'default_backend': addr}

        if config('haproxy-server-timeout'):
            ctxt['haproxy_server_timeout'] = config('haproxy-server-timeout')

        if config('haproxy-client-timeout'):
            ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout')

        if config('prefer-ipv6'):
            ctxt['ipv6'] = True
            ctxt['local_host'] = 'ip6-localhost'
            ctxt['haproxy_host'] = '::'
            ctxt['stat_port'] = ':::8888'
        else:
            ctxt['local_host'] = '127.0.0.1'
            ctxt['haproxy_host'] = '0.0.0.0'
            ctxt['stat_port'] = ':8888'

        for frontend in cluster_hosts:
            if (len(cluster_hosts[frontend]['backends']) > 1
                    or self.singlenode_mode):
                # Enable haproxy when we have enough peers.
                log('Ensuring haproxy enabled in /etc/default/haproxy.',
                    level=DEBUG)
                with open('/etc/default/haproxy', 'w') as out:
                    out.write('ENABLED=1\n')

                return ctxt

        log('HAProxy context is incomplete, this unit has no peers.',
            level=INFO)
        return {}
예제 #43
0
def cloud_controller_relation_changed():
    controller = relation_get('private-address')
    if controller:
        metadata_agent_config.update({'nova_metadata_ip': controller})
        cplane_config(metadata_agent_config, METADATA_AGENT_INI, 'DEFAULT')
예제 #44
0
def auth_relation_changed():
    auth_ip = relation_get('private-address')
    cmd = "sed -i '/doctl.auth_url*/c doctl.auth_url = http://{}:35357/v3' \
/etc/docker-proxy/doctl_config.ini".format(auth_ip)
    os.system(cmd)
예제 #45
0
    def neutron_context(self):
        # generate config context for neutron or quantum. these get converted
        # directly into flags in nova.conf
        # NOTE: Its up to release templates to set correct driver
        neutron_ctxt = {'neutron_url': None}
        for rid in relation_ids('cloud-compute'):
            for unit in related_units(rid):
                rel = {'rid': rid, 'unit': unit}

                url = _neutron_url(**rel)
                if not url:
                    # only bother with units that have a neutron url set.
                    continue

                neutron_ctxt = {
                    'auth_protocol':
                    relation_get('auth_protocol', **rel) or 'http',
                    'service_protocol':
                    relation_get('service_protocol', **rel) or 'http',
                    'service_port':
                    relation_get('service_port', **rel) or '5000',
                    'neutron_auth_strategy':
                    'keystone',
                    'keystone_host':
                    relation_get('auth_host', **rel),
                    'auth_port':
                    relation_get('auth_port', **rel),
                    'neutron_admin_tenant_name':
                    relation_get('service_tenant_name', **rel),
                    'neutron_admin_username':
                    relation_get('service_username', **rel),
                    'neutron_admin_password':
                    relation_get('service_password', **rel),
                    'api_version':
                    relation_get('api_version', **rel) or '2.0',
                    'neutron_plugin':
                    _neutron_plugin(),
                    'neutron_url':
                    url,
                }
                # DNS domain is optional
                dns_domain = relation_get('dns_domain', **rel)
                if dns_domain:
                    neutron_ctxt['dns_domain'] = dns_domain
                admin_domain = relation_get('admin_domain_name', **rel)
                if admin_domain:
                    neutron_ctxt['neutron_admin_domain_name'] = admin_domain

        missing = [k for k, v in neutron_ctxt.items() if v in ['', None]]
        if missing:
            log('Missing required relation settings for Quantum: ' +
                ' '.join(missing))
            return {}

        neutron_ctxt['neutron_security_groups'] = _neutron_security_groups()

        ks_url = '%s://%s:%s/v%s' % (
            neutron_ctxt['auth_protocol'], neutron_ctxt['keystone_host'],
            neutron_ctxt['auth_port'], neutron_ctxt['api_version'])
        neutron_ctxt['neutron_admin_auth_url'] = ks_url

        return neutron_ctxt
예제 #46
0
    def __call__(self):
        try:
            import hvac
        except ImportError:
            # BUG: #1862085 - if the relation is made to vault, but the
            # 'encrypt' option is not made, then the charm errors with an
            # import warning.  This catches that, logs a warning, and returns
            # with an empty context.
            hookenv.log(
                "VaultKVContext: trying to use hvac pythong module "
                "but it's not available.  Is secrets-stroage relation "
                "made, but encrypt option not set?",
                level=hookenv.WARNING)
            # return an empty context on hvac import error
            return {}
        ctxt = {}
        # NOTE(hopem): see https://bugs.launchpad.net/charm-helpers/+bug/1849323
        db = unitdata.kv()
        # currently known-good secret-id
        secret_id = db.get('secret-id')

        for relation_id in hookenv.relation_ids(self.interfaces[0]):
            for unit in hookenv.related_units(relation_id):
                data = hookenv.relation_get(unit=unit, rid=relation_id)
                vault_url = data.get('vault_url')
                role_id = data.get('{}_role_id'.format(hookenv.local_unit()))
                token = data.get('{}_token'.format(hookenv.local_unit()))

                if all([vault_url, role_id, token]):
                    token = json.loads(token)
                    vault_url = json.loads(vault_url)

                    # Tokens may change when secret_id's are being
                    # reissued - if so use token to get new secret_id
                    token_success = False
                    try:
                        secret_id = retrieve_secret_id(url=vault_url,
                                                       token=token)
                        token_success = True
                    except hvac.exceptions.InvalidRequest:
                        # Try next
                        pass

                    if token_success:
                        db.set('secret-id', secret_id)
                        db.flush()

                        ctxt['vault_url'] = vault_url
                        ctxt['role_id'] = json.loads(role_id)
                        ctxt['secret_id'] = secret_id
                        ctxt['secret_backend'] = self.secret_backend
                        vault_ca = data.get('vault_ca')
                        if vault_ca:
                            ctxt['vault_ca'] = json.loads(vault_ca)

                        self.complete = True
                        break
                    else:
                        if secret_id:
                            ctxt['vault_url'] = vault_url
                            ctxt['role_id'] = json.loads(role_id)
                            ctxt['secret_id'] = secret_id
                            ctxt['secret_backend'] = self.secret_backend
                            vault_ca = data.get('vault_ca')
                            if vault_ca:
                                ctxt['vault_ca'] = json.loads(vault_ca)

            if self.complete:
                break

        if ctxt:
            self.complete = True

        return ctxt
def tls_certificates_relation_changed():
    if common_utils.tls_changed(utils.MODULE, relation_get()):
        utils.update_charm_status()
예제 #48
0
#!/usr/bin/env python3

import json
import os
import sys
from subprocess import check_call

sys.path.append('lib')

from charmhelpers.core import hookenv, unitdata

if __name__ == '__main__':
    relname = hookenv.relation_type()
    role, _ = hookenv.relation_to_role_and_interface(relname)

    local_data = hookenv.relation_get()
    env = {}
    env.update(os.environ)
    env['ETCDCTL_ENDPOINT'] = hookenv.config().get('etcd')
    check_call([
        'etcdctl', 'set', '/{{ relay_name }}/{{ counterpart }}',
        json.dumps(local_data)
    ],
               env=env)

    kv = unitdata.kv()
    kv.set('relay.local.relation.name', relname)
    kv.set('relay.local.relation.role', role)
    kv.set('relay.remote.relation.role', '{{ counterpart }}')
    kv.flush(save=True)
예제 #49
0
def amqp_changed(relation_id=None, remote_unit=None):
    singleset = set(['username', 'vhost'])
    host_addr = rabbit.get_unit_ip()

    if rabbit.leader_node_is_ready():
        relation_settings = {'hostname': host_addr,
                             'private-address': host_addr}
        # NOTE: active/active case
        if config('prefer-ipv6'):
            relation_settings['private-address'] = host_addr

        current = relation_get(rid=relation_id, unit=remote_unit)
        if singleset.issubset(current):
            if not all([current.get('username'), current.get('vhost')]):
                log('Relation not ready.', DEBUG)
                return

            # Provide credentials to relations. If password is already
            # available on peer relation then use it instead of reconfiguring.
            username = current['username']
            vhost = current['vhost']
            admin = current.get('admin', False)
            amqp_rid = relation_id or get_relation_id()
            password = configure_amqp(username, vhost, amqp_rid, admin=admin)
            relation_settings['password'] = password
        else:
            # NOTE(hopem): we should look at removing this code since i don't
            #              think it's ever used anymore and stems from the days
            #              when we needed to ensure consistency between
            #              peerstorage (replaced by leader get/set) and amqp
            #              relations.
            queues = {}
            for k, v in current.iteritems():
                amqp_rid = k.split('_')[0]
                x = '_'.join(k.split('_')[1:])
                if amqp_rid not in queues:
                    queues[amqp_rid] = {}

                queues[amqp_rid][x] = v

            for amqp_rid in queues:
                if singleset.issubset(queues[amqp_rid]):
                    username = queues[amqp_rid]['username']
                    vhost = queues[amqp_rid]['vhost']
                    password = configure_amqp(username, vhost, amqp_rid,
                                              admin=admin)
                    key = '_'.join([amqp_rid, 'password'])
                    relation_settings[key] = password

        ssl_utils.configure_client_ssl(relation_settings)

        if is_clustered():
            relation_settings['clustered'] = 'true'
            # NOTE(dosaboy): this stanza can be removed once we fully remove
            #                deprecated HA support.
            if is_relation_made('ha'):
                # active/passive settings
                relation_settings['vip'] = config('vip')
                # or ha-vip-only to support active/active, but
                # accessed via a VIP for older clients.
                if config('ha-vip-only') is True:
                    relation_settings['ha-vip-only'] = 'true'

        # set if need HA queues or not
        if cmp_pkgrevno('rabbitmq-server', '3.0.1') < 0:
            relation_settings['ha_queues'] = True

        log("Updating relation {} keys {}"
            .format(relation_id or get_relation_id(),
                    ','.join(relation_settings.keys())), DEBUG)
        peer_store_and_set(relation_id=relation_id,
                           relation_settings=relation_settings)
    elif not is_leader() and rabbit.client_node_is_ready():
        log("Propagating peer settings to all amqp relations", DEBUG)

        # NOTE(jamespage) clear relation to deal with data being
        #                 removed from peer storage.
        relation_clear(relation_id)

        # Each unit needs to set the db information otherwise if the unit
        # with the info dies the settings die with it Bug# 1355848
        for rel_id in relation_ids('amqp'):
            peerdb_settings = peer_retrieve_by_prefix(rel_id)
            if 'password' in peerdb_settings:
                peerdb_settings['hostname'] = host_addr
                peerdb_settings['private-address'] = host_addr
                relation_set(relation_id=rel_id, **peerdb_settings)
예제 #50
0
def install_kafka_connector():
    server_path = "/opt/wso2esb/wso2esb-{}/repository/deployment/server".format(
        ESB_VERSION)

    # copy connector
    synapse_libs_path = '{}/synapse-libs'.format(server_path)
    if not os.path.exists(synapse_libs_path):
        os.makedirs(synapse_libs_path)
    shutil.copy(
        charm_dir() +
        '/files/kafka-connector-%s.zip' % KAFKA_CONNECTOR_VERSION,
        synapse_libs_path)
    os.chown(
        charm_dir() +
        '/files/kafka-connector-%s.zip' % KAFKA_CONNECTOR_VERSION,
        pwd.getpwnam("esbuser").pw_uid,
        grp.getgrnam("wso2").gr_gid)

    # enable connector
    kafka_enable_path = "{}/synapse-configs/default/imports/".format(
        server_path)
    if not os.path.exists(kafka_enable_path):
        os.makedirs(kafka_enable_path)
    templating.render(
        source='{org.wso2.carbon.connector}kafka.xml',
        target='%s/{org.wso2.carbon.connector}kafka.xml' % kafka_enable_path,
        context={},
        owner='esbuser',
        group='wso2',
    )
    # Wait for kafka connector to come online
    time.sleep(20)

    # Copy sequence
    kafka_hostname = relation_get('private-address')
    kafka_topic = "test"
    sequence_path = "{}/synapse-configs/default/sequences".format(server_path)
    if not os.path.exists(sequence_path):
        os.makedirs(sequence_path)
    templating.render(
        source='postTopic.xml',
        target='{}/postTopic.xml'.format('/opt/wso2esb'),
        context={
            'kafka_broker': kafka_hostname,
            'kafka_topic': kafka_topic,
        },
        owner='esbuser',
        group='wso2',
    )
    shutil.move('/opt/wso2esb/postTopic.xml', sequence_path + '/postTopic.xml')

    # Copy API
    api_path = "{}/synapse-configs/default/api".format(server_path)
    if not os.path.exists(api_path):
        os.makedirs(api_path)
    templating.render(
        source='kafka.xml',
        target='{}/kafka.xml'.format(api_path),
        context={},
        owner='esbuser',
        group='wso2',
    )
예제 #51
0
def bootstrap_source_relation_changed():
    """Handles relation data changes on the bootstrap-source relation.

    The bootstrap-source relation to share remote bootstrap information with
    the ceph-mon charm. This relation is used to exchange the remote
    ceph-public-addresses which are used for the mon's, the fsid, and the
    monitor-secret.
    """
    if not config('no-bootstrap'):
        status_set(
            'blocked', 'Cannot join the bootstrap-source relation when '
            'no-bootstrap is False')
        return

    if not is_leader():
        log('Deferring leader-setting updates to the leader unit')
        return

    curr_fsid = leader_get('fsid')
    curr_secret = leader_get('monitor-secret')
    for relid in relation_ids('bootstrap-source'):
        for unit in related_units(relid=relid):
            mon_secret = relation_get('monitor-secret', unit, relid)
            fsid = relation_get('fsid', unit, relid)

            if not (mon_secret and fsid):
                log('Relation data is not ready as the fsid or the '
                    'monitor-secret are missing from the relation: '
                    'mon_secret = {} and fsid = {} '.format(mon_secret, fsid))
                continue

            if not (curr_fsid or curr_secret):
                curr_fsid = fsid
                curr_secret = mon_secret
            else:
                # The fsids and secrets need to match or the local monitors
                # will fail to join the mon cluster. If they don't,
                # bail because something needs to be investigated.
                assert curr_fsid == fsid, \
                    "bootstrap fsid '{}' != current fsid '{}'".format(
                        fsid, curr_fsid)
                assert curr_secret == mon_secret, \
                    "bootstrap secret '{}' != current secret '{}'".format(
                        mon_secret, curr_secret)
            opts = {
                'fsid': fsid,
                'monitor-secret': mon_secret,
            }
            try:
                leader_set(opts)
                log('Updating leader settings for fsid and monitor-secret '
                    'from remote relation data: {}'.format(opts))
            except Exception as e:
                # we're probably not the leader an exception occured
                # let's log it anyway.
                log("leader_set failed: {}".format(str(e)))

    # The leader unit needs to bootstrap itself as it won't receive the
    # leader-settings-changed hook elsewhere.
    if curr_fsid:
        mon_relation()
 def region(self):
     region = None
     for rid in relation_ids('cloud-compute'):
         for unit in related_units(rid):
             region = relation_get('region', rid=rid, unit=unit)
     return region
예제 #53
0
def get_int_from_relation(name, unit=None, rid=None):
    value = relation_get(name, unit, rid)
    return int(value if value else -1)
예제 #54
0
def control_node_ctx():
    return { "control_nodes": [ gethostbyname(relation_get("private-address", unit, rid))
                                for rid in relation_ids("control-node")
                                for unit in related_units(rid) ] }
예제 #55
0
def get_unit_addr(relid, unitid):
    return hookenv.relation_get(attribute='private-address',
                                unit=unitid,
                                rid=relid)
예제 #56
0
def tls_certificates_relation_changed():
    # it can be fired several times without server's cert
    if common_utils.tls_changed(utils.MODULE, relation_get()):
        utils.update_charm_status()
예제 #57
0
    def write(self):
        try:
            nagios_uid = pwd.getpwnam('nagios').pw_uid
            nagios_gid = grp.getgrnam('nagios').gr_gid
        except Exception:
            log("Nagios user not set up, nrpe checks not updated")
            return

        if not os.path.exists(NRPE.nagios_logdir):
            os.mkdir(NRPE.nagios_logdir)
            os.chown(NRPE.nagios_logdir, nagios_uid, nagios_gid)

        nrpe_monitors = {}
        monitors = {"monitors": {"remote": {"nrpe": nrpe_monitors}}}

        # check that the charm can write to the conf dir.  If not, then nagios
        # probably isn't installed, and we can defer.
        if not self.does_nrpe_conf_dir_exist():
            return

        for nrpecheck in self.checks:
            nrpecheck.write(self.nagios_context, self.hostname,
                            self.nagios_servicegroups)
            nrpe_monitors[nrpecheck.shortname] = {
                "command": nrpecheck.command,
            }
            # If we were passed max_check_attempts, add that to the relation data
            if nrpecheck.max_check_attempts is not None:
                nrpe_monitors[nrpecheck.shortname][
                    'max_check_attempts'] = nrpecheck.max_check_attempts

        # update-status hooks are configured to firing every 5 minutes by
        # default. When nagios-nrpe-server is restarted, the nagios server
        # reports checks failing causing unnecessary alerts. Let's not restart
        # on update-status hooks.
        if not hook_name() == 'update-status':
            service('restart', 'nagios-nrpe-server')

        monitor_ids = relation_ids("local-monitors") + \
            relation_ids("nrpe-external-master")
        for rid in monitor_ids:
            reldata = relation_get(unit=local_unit(), rid=rid)
            if 'monitors' in reldata:
                # update the existing set of monitors with the new data
                old_monitors = yaml.safe_load(reldata['monitors'])
                old_nrpe_monitors = old_monitors['monitors']['remote']['nrpe']
                # remove keys that are in the remove_check_queue
                old_nrpe_monitors = {
                    k: v
                    for k, v in old_nrpe_monitors.items()
                    if k not in self.remove_check_queue
                }
                # update/add nrpe_monitors
                old_nrpe_monitors.update(nrpe_monitors)
                old_monitors['monitors']['remote']['nrpe'] = old_nrpe_monitors
                # write back to the relation
                relation_set(relation_id=rid, monitors=yaml.dump(old_monitors))
            else:
                # write a brand new set of monitors, as no existing ones.
                relation_set(relation_id=rid, monitors=yaml.dump(monitors))

        self.remove_check_queue.clear()
예제 #58
0
def jenkins_context():
    for rid in relation_ids('jenkins-configurator'):
        for unit in related_units(rid):
            return relation_get(rid=rid, unit=unit)
def _neutron_url(rid, unit):
        # supports legacy relation settings.
        return (relation_get('neutron_url', rid=rid, unit=unit) or
                relation_get('quantum_url', rid=rid, unit=unit))
예제 #60
0
def shared_db_changed(relation_id=None, unit=None):
    if not seeded():
        log(
            "Percona cluster not yet bootstrapped - deferring shared-db rel "
            "until bootstrapped", DEBUG)
        return

    if not is_leader() and client_node_is_ready():
        clear_and_populate_client_db_relations(relation_id, 'shared-db')
        return

    # Bail if leader is not ready
    if not leader_node_is_ready():
        return

    settings = relation_get(unit=unit, rid=relation_id)
    access_network = config('access-network')
    db_helper = get_db_helper()

    peer_store_and_set(relation_id=relation_id,
                       relation_settings={'access-network': access_network})

    singleset = set(['database', 'username', 'hostname'])
    if singleset.issubset(settings):
        # Process a single database configuration
        hostname = settings['hostname']
        database = settings['database']
        username = settings['username']

        normalized_address = resolve_hostname_to_ip(hostname)
        if access_network and not is_address_in_network(
                access_network, normalized_address):
            # NOTE: for configurations using access-network, only setup
            #       database access if remote unit has presented a
            #       hostname or ip address thats within the configured
            #       network cidr
            log("Host '%s' not in access-network '%s' - ignoring" %
                (normalized_address, access_network),
                level=INFO)
            return

        # NOTE: do this before querying access grants
        password = configure_db_for_hosts(hostname, database, username,
                                          db_helper)

        allowed_units = db_helper.get_allowed_units(database,
                                                    username,
                                                    relation_id=relation_id)
        allowed_units = unit_sorted(allowed_units)
        allowed_units = ' '.join(allowed_units)
        relation_set(relation_id=relation_id, allowed_units=allowed_units)

        db_host = get_db_host(hostname)
        peer_store_and_set(relation_id=relation_id,
                           db_host=db_host,
                           password=password,
                           allowed_units=allowed_units)
    else:
        # Process multiple database setup requests.
        # from incoming relation data:
        #  nova_database=xxx nova_username=xxx nova_hostname=xxx
        #  quantum_database=xxx quantum_username=xxx quantum_hostname=xxx
        # create
        # {
        #   "nova": {
        #        "username": xxx,
        #        "database": xxx,
        #        "hostname": xxx
        #    },
        #    "quantum": {
        #        "username": xxx,
        #        "database": xxx,
        #        "hostname": xxx
        #    }
        # }
        #
        databases = {}
        for k, v in settings.iteritems():
            db = k.split('_')[0]
            x = '_'.join(k.split('_')[1:])
            if db not in databases:
                databases[db] = {}
            databases[db][x] = v

        allowed_units = {}
        return_data = {}
        for db in databases:
            if singleset.issubset(databases[db]):
                database = databases[db]['database']
                hostname = databases[db]['hostname']
                username = databases[db]['username']

                normalized_address = resolve_hostname_to_ip(hostname)
                if (access_network and not is_address_in_network(
                        access_network, normalized_address)):
                    # NOTE: for configurations using access-network,
                    #       only setup database access if remote unit
                    #       has presented a hostname or ip address
                    #       thats within the configured network cidr
                    return

                # NOTE: do this before querying access grants
                password = configure_db_for_hosts(hostname, database, username,
                                                  db_helper)

                a_units = db_helper.get_allowed_units(database,
                                                      username,
                                                      relation_id=relation_id)
                a_units = ' '.join(unit_sorted(a_units))
                allowed_units_key = '%s_allowed_units' % (db)
                allowed_units[allowed_units_key] = a_units

                return_data['%s_password' % (db)] = password
                return_data[allowed_units_key] = a_units
                db_host = get_db_host(hostname)

        if allowed_units:
            relation_set(relation_id=relation_id, **allowed_units)
        else:
            log("No allowed_units - not setting relation settings",
                level=DEBUG)

        if return_data:
            peer_store_and_set(relation_id=relation_id,
                               db_host=db_host,
                               **return_data)
        else:
            log("No return data - not setting relation settings", level=DEBUG)