def db_changed(relation_id=None, unit=None, admin=None):
    if not is_elected_leader(DC_RESOURCE_NAME):
        log('Service is peered, clearing db relation'
            ' as this service unit is not the leader')
        relation_clear(relation_id)
        return

    if admin not in [True, False]:
        admin = relation_type() == 'db-admin'

    db_name, _ = remote_unit().split("/")
    username = db_name
    db_helper = get_db_helper()
    addr = relation_get('private-address', unit=unit, rid=relation_id)
    password = db_helper.configure_db(addr, db_name, username, admin=admin)

    db_host = get_db_host(addr, interface=relation_type())

    relation_set(relation_id=relation_id,
                 relation_settings={
                     'user': username,
                     'password': password,
                     'host': db_host,
                     'database': db_name,
                 })
def db_changed(relation_id=None, unit=None, admin=None):
    if not is_elected_leader(DC_RESOURCE_NAME):
        log('Service is peered, clearing db relation'
            ' as this service unit is not the leader')
        relation_clear(relation_id)
        return

    if is_clustered():
        db_host = config('vip')
    else:
        if config('prefer-ipv6'):
            db_host = get_ipv6_addr(exc_list=[config('vip')])[0]
        else:
            db_host = unit_get('private-address')

    if admin not in [True, False]:
        admin = relation_type() == 'db-admin'
    db_name, _ = remote_unit().split("/")
    username = db_name
    db_helper = get_db_helper()
    addr = relation_get('private-address', unit=unit, rid=relation_id)
    password = db_helper.configure_db(addr, db_name, username, admin=admin)

    relation_set(relation_id=relation_id,
                 relation_settings={
                     'user': username,
                     'password': password,
                     'host': db_host,
                     'database': db_name,
                 })
Example #3
0
    def join(cls, scope):
        """
        Get or create a conversation for the given scope and active hook context.

        The current remote unit for the active hook context will be added to
        the conversation.

        Note: This uses :mod:`charmhelpers.core.unitdata` and requires that
        :meth:`~charmhelpers.core.unitdata.Storage.flush` be called.
        """
        relation_name = hookenv.relation_type()
        relation_id = hookenv.relation_id()
        unit = hookenv.remote_unit()
        service = hookenv.remote_service_name()
        if scope is scopes.UNIT:
            scope = unit
            namespace = relation_id
        elif scope is scopes.SERVICE:
            scope = service
            namespace = relation_id
        else:
            namespace = relation_name
        key = cls._key(namespace, scope)
        data = unitdata.kv().get(key, {'namespace': namespace, 'scope': scope, 'units': []})
        conversation = cls.deserialize(data)
        conversation.units.add(unit)
        unitdata.kv().set(key, cls.serialize(conversation))
        return conversation
def unprovision_metadata():
    if not remote_unit():
        return
    relation = relation_type()
    ip = None
    if relation == "neutron-metadata":
        ip = gethostbyname(relation_get("private-address"))
    else:
        ip = [
            gethostbyname(relation_get("private-address", unit, rid))
            for rid in relation_ids("neutron-metadata")
            for unit in related_units(rid)
        ][0]
    user = None
    password = None
    if relation == "identity-admin":
        user = relation_get("service_username")
        password = relation_get("service_password")
    else:
        user, password = [(relation_get("service_username", unit, rid),
                           relation_get("service_password", unit, rid))
                          for rid in relation_ids("identity-admin")
                          for unit in related_units(rid)][0]
    log("Unprovisioning metadata service {}:8775".format(ip))
    check_call([
        "contrail-provision-linklocal", "--api_server_ip", "127.0.0.1",
        "--api_server_port",
        str(api_port()), "--linklocal_service_name", "metadata",
        "--linklocal_service_ip", "169.254.169.254",
        "--linklocal_service_port", "80", "--ipfabric_service_ip", ip,
        "--ipfabric_service_port", "8775", "--oper", "del", "--admin_user",
        user, "--admin_password", password
    ])
Example #5
0
    def join(cls, scope):
        """
        Get or create a conversation for the given scope and active hook context.

        The current remote unit for the active hook context will be added to
        the conversation.

        Note: This uses :mod:`charmhelpers.core.unitdata` and requires that
        :meth:`~charmhelpers.core.unitdata.Storage.flush` be called.
        """
        relation_name = hookenv.relation_type()
        relation_id = hookenv.relation_id()
        unit = hookenv.remote_unit()
        service = hookenv.remote_service_name()
        if scope is scopes.UNIT:
            scope = unit
            namespace = relation_id
        elif scope is scopes.SERVICE:
            scope = service
            namespace = relation_id
        else:
            namespace = relation_name
        key = cls._key(namespace, scope)
        data = unitdata.kv().get(key, {
            'namespace': namespace,
            'scope': scope,
            'units': []
        })
        conversation = cls.deserialize(data)
        conversation.units.add(unit)
        unitdata.kv().set(key, cls.serialize(conversation))
        return conversation
Example #6
0
def unprovision_vrouter():
    relation = relation_type()
    if relation and not remote_unit():
        return
    host_name = gethostname()
    host_ip = netifaces.ifaddresses("vhost0")[netifaces.AF_INET][0]["addr"]
    a_ip = config.previous("contrail-api-ip")
    a_port = None
    if a_ip:
        a_port = config.previous("contrail-api-port")
        if a_port is None:
            a_port = api_port()
    elif relation == "contrail-api":
        a_ip = gethostbyname(relation_get("private-address"))
        a_port = relation_get("port")
    else:
        a_ip, a_port = [ (gethostbyname(relation_get("private-address", unit, rid)),
                          relation_get("port", unit, rid))
                         for rid in relation_ids("contrail-api")
                         for unit in related_units(rid) ][0]
    user = None
    password = None
    tenant = None
    if relation == "identity-admin":
        user = relation_get("service_username")
        password = relation_get("service_password")
        tenant = relation_get("service_tenant_name")
    else:
        user, password, tenant = [ (relation_get("service_username", unit, rid),
                                    relation_get("service_password", unit, rid),
                                    relation_get("service_tenant_name", unit, rid))
                                   for rid in relation_ids("identity-admin")
                                   for unit in related_units(rid) ][0]
    log("Unprovisioning vrouter {}".format(host_ip))
    check_call(["contrail-provision-vrouter",
                "--host_name", host_name,
                "--host_ip", host_ip,
                "--api_server_ip", a_ip,
                "--api_server_port", str(a_port),
                "--oper", "del",
                "--admin_user", user,
                "--admin_password", password,
                "--admin_tenant_name", tenant])
Example #7
0
def update_relations(context, namespace_separator=':'):
    """Update the context with the relation data."""
    # Add any relation data prefixed with the relation type.
    relation_type = hookenv.relation_type()
    relations = []
    context['current_relation'] = {}
    if relation_type is not None:
        relation_data = hookenv.relation_get()
        context['current_relation'] = relation_data
        # Deprecated: the following use of relation data as keys
        # directly in the context will be removed.
        relation_data = dict(
            ("{relation_type}{namespace_separator}{key}".format(
                relation_type=relation_type,
                key=key,
                namespace_separator=namespace_separator), val)
            for key, val in relation_data.items())
        relation_data = dict_keys_without_hyphens(relation_data)
        context.update(relation_data)
        relations = hookenv.relations_of_type(relation_type)
        relations = [dict_keys_without_hyphens(rel) for rel in relations]

    context['relations_full'] = hookenv.relations()

    # the hookenv.relations() data structure is effectively unusable in
    # templates and other contexts when trying to access relation data other
    # than the current relation. So provide a more useful structure that works
    # with any hook.
    local_unit = hookenv.local_unit()
    relations = {}
    for rname, rids in context['relations_full'].items():
        relations[rname] = []
        for rid, rdata in rids.items():
            data = rdata.copy()
            if local_unit in rdata:
                data.pop(local_unit)
            for unit_name, rel_data in data.items():
                new_data = {'__relid__': rid, '__unit__': unit_name}
                new_data.update(rel_data)
                relations[rname].append(new_data)
    context['relations'] = relations
Example #8
0
def unprovision_control():
    if not remote_unit():
        return
    host_name = gethostname()
    host_ip = gethostbyname(unit_get("private-address"))
    relation = relation_type()
    a_ip = None
    a_port = None
    if relation == "contrail-api":
        a_ip = gethostbyname(relation_get("private-address"))
        a_port = relation_get("port")
    else:
        a_ip, a_port = [
            (gethostbyname(relation_get("private-address", unit,
                                        rid)), relation_get("port", unit, rid))
            for rid in relation_ids("contrail-api")
            for unit in related_units(rid)
        ][0]
    user = None
    password = None
    tenant = None
    if relation == "identity-admin":
        user = relation_get("service_username")
        password = relation_get("service_password")
        tenant = relation_get("service_tenant_name")
    else:
        user, password, tenant = [(relation_get("service_username", unit, rid),
                                   relation_get("service_password", unit, rid),
                                   relation_get("service_tenant_name", unit,
                                                rid))
                                  for rid in relation_ids("identity-admin")
                                  for unit in related_units(rid)][0]
    log("Unprovisioning control {}".format(host_ip))
    check_call([
        "contrail-provision-control", "--host_name", host_name, "--host_ip",
        host_ip, "--router_asn", "64512", "--api_server_ip", a_ip,
        "--api_server_port",
        str(a_port), "--oper", "del", "--admin_user", user, "--admin_password",
        password, "--admin_tenant_name", tenant
    ])
def unprovision_control():
    if not remote_unit():
        return
    host_name = gethostname()
    host_ip = gethostbyname(unit_get("private-address"))
    relation = relation_type()
    a_ip = None
    a_port = None
    if relation == "contrail-api":
        a_ip = gethostbyname(relation_get("private-address"))
        a_port = relation_get("port")
    else:
        a_ip, a_port = [ (gethostbyname(relation_get("private-address", unit, rid)),
                          relation_get("port", unit, rid))
                         for rid in relation_ids("contrail-api")
                         for unit in related_units(rid) ][0]
    user = None
    password = None
    tenant = None
    if relation == "identity-admin":
        user = relation_get("service_username")
        password = relation_get("service_password")
        tenant = relation_get("service_tenant_name")
    else:
        user, password, tenant = [ (relation_get("service_username", unit, rid),
                                    relation_get("service_password", unit, rid),
                                    relation_get("service_tenant_name", unit, rid))
                                   for rid in relation_ids("identity-admin")
                                   for unit in related_units(rid) ][0]
    log("Unprovisioning control {}".format(host_ip))
    check_call(["contrail-provision-control",
                "--host_name", host_name,
                "--host_ip", host_ip,
                "--router_asn", "64512",
                "--api_server_ip", a_ip,
                "--api_server_port", str(a_port),
                "--oper", "del",
                "--admin_user", user,
                "--admin_password", password,
                "--admin_tenant_name", tenant])
Example #10
0
def unprovision_local_metadata():
    relation = relation_type()
    if relation and not remote_unit():
        return
    a_ip = config.previous("contrail-api-ip")
    a_port = None
    if a_ip:
        a_port = config.previous("contrail-api-port")
        if a_port is None:
            a_port = api_port()
    elif relation == "contrail-api":
        a_ip = gethostbyname(relation_get("private-address"))
        a_port = relation_get("port")
    else:
        a_ip, a_port = [ (gethostbyname(relation_get("private-address", unit, rid)),
                          relation_get("port", unit, rid))
                         for rid in relation_ids("contrail-api")
                         for unit in related_units(rid) ][0]
    user = None
    password = None
    if relation == "identity-admin":
        user = relation_get("service_username")
        password = relation_get("service_password")
    else:
        user, password = [ (relation_get("service_username", unit, rid),
                            relation_get("service_password", unit, rid))
                           for rid in relation_ids("identity-admin")
                           for unit in related_units(rid) ][0]
    log("Unprovisioning local metadata service 127.0.0.1:8775")
    check_call(["contrail-provision-linklocal",
                "--api_server_ip", a_ip,
                "--api_server_port", str(a_port),
                "--linklocal_service_name", "metadata",
                "--linklocal_service_ip", "169.254.169.254",
                "--linklocal_service_port", "80",
                "--ipfabric_service_ip", "127.0.0.1",
                "--ipfabric_service_port", "8775",
                "--oper", "del",
                "--admin_user", user,
                "--admin_password", password])
def hook(*hook_patterns):
    """
    Register the decorated function to run when the current hook matches any of
    the ``hook_patterns``.

    The hook patterns can use the ``{interface:...}`` and ``{A,B,...}`` syntax
    supported by `any_hook`.

    If the hook is a relation hook, an instance of that relation class will be
    passed in to the decorated function.

    For example, to match any joined or changed hook for any relation using the
    ``mysql`` interface::

        class MySQLRelation(RelationBase):
            @hook('{interface:mysql}-relation-{joined,changed}')
            def joined_or_changed(self):
                pass

    This can be used from Bash using the ``reactive.sh`` helpers::

        source `which reactive.sh`

        hook '{interface:mysql}-relation-{joined,changed}'; then
            chlp relation_call $JUJU_RELATION handle_relation
        kooh

    The Bash helper uses the `any_hook` ``chlp`` command, and the above is
    exactly equivalent to::

        source `which reactive.sh`

        if chlp any_hook '{interface:mysql}-relation-{joined,changed}'; then
            chlp relation_call $JUJU_RELATION handle_relation
        kooh
    """
    return Handler.decorator(
        lambda: any_hook(*hook_patterns),
        lambda: filter(None, [RelationBase.from_name(hookenv.relation_type())]))
def db_changed(relation_id=None, unit=None, admin=None):

    # Is this db-admin or db relation
    if admin not in [True, False]:
        admin = relation_type() == 'db-admin'
    if admin:
        relation_name = 'db-admin'
    else:
        relation_name = 'db'

    if not seeded():
        log(
            "Percona cluster not yet bootstrapped - deferring {} relation "
            "until bootstrapped.".format(relation_name), DEBUG)
        return

    if not is_leader() and client_node_is_ready():
        clear_and_populate_client_db_relations(relation_id, relation_name)
        return

    # Bail if leader is not ready
    if not leader_node_is_ready():
        return

    db_name, _ = (unit or remote_unit()).split("/")
    username = db_name
    db_helper = get_db_helper()
    addr = relation_get('private-address', unit=unit, rid=relation_id)
    password = db_helper.configure_db(addr, db_name, username, admin=admin)

    db_host = get_db_host(addr, interface=relation_name)

    peer_store_and_set(relation_id=relation_id,
                       user=username,
                       password=password,
                       host=db_host,
                       database=db_name)
def db_changed(relation_id=None, unit=None, admin=None):

    # Is this db-admin or db relation
    if admin not in [True, False]:
        admin = relation_type() == 'db-admin'
    if admin:
        relation_name = 'db-admin'
    else:
        relation_name = 'db'

    if not seeded():
        log("Percona cluster not yet bootstrapped - deferring {} relation "
            "until bootstrapped.".format(relation_name), DEBUG)
        return

    if not is_leader() and client_node_is_ready():
        clear_and_populate_client_db_relations(relation_id, relation_name)
        return

    # Bail if leader is not ready
    if not leader_node_is_ready():
        return

    db_name, _ = (unit or remote_unit()).split("/")
    username = db_name
    db_helper = get_db_helper()
    addr = relation_get('private-address', unit=unit, rid=relation_id)
    password = db_helper.configure_db(addr, db_name, username, admin=admin)

    db_host = get_db_host(addr, interface=relation_name)

    peer_store_and_set(relation_id=relation_id,
                       user=username,
                       password=password,
                       host=db_host,
                       database=db_name)
Example #14
0
 def depart_conv():
     cls(hookenv.relation_type()).conversation().depart()
Example #15
0
#!/usr/bin/env python3

import json
import os
import sys
from subprocess import check_call

sys.path.append('lib')

from charmhelpers.core import hookenv, unitdata

if __name__ == '__main__':
    relname = hookenv.relation_type()
    role, _ = hookenv.relation_to_role_and_interface(relname)

    local_data = hookenv.relation_get()
    env = {}
    env.update(os.environ)
    env['ETCDCTL_ENDPOINT'] = hookenv.config().get('etcd')
    check_call([
        'etcdctl', 'set', '/{{ relay_name }}/{{ counterpart }}',
        json.dumps(local_data)
    ],
               env=env)

    kv = unitdata.kv()
    kv.set('relay.local.relation.name', relname)
    kv.set('relay.local.relation.role', role)
    kv.set('relay.remote.relation.role', '{{ counterpart }}')
    kv.flush(save=True)
Example #16
0
 def arg_gen():
     # use a generator to defer calling of hookenv.relation_type, for tests
     rel = RelationBase.from_name(hookenv.relation_type())
     if rel:
         yield rel
 def depart_conv():
     rel = RelationBase.from_name(hookenv.relation_type())
     rel.conversation().depart()
 def __init__(self, relation_name=None, units=None, scope=None):
     self.relation_name = relation_name or hookenv.relation_type()
     self.units = set(units or [hookenv.remote_unit()])
     self.scope = scope or hookenv.remote_unit()
Example #19
0
 def depart_conv():
     rel = RelationBase.from_name(hookenv.relation_type())
     rel.conversation().depart()
Example #20
0
def ha_relation_changed():
    # Check that we are related to a principle and that
    # it has already provided the required corosync configuration
    if not get_corosync_conf():
        log('Unable to configure corosync right now, deferring configuration',
            level=INFO)
        return

    relids_hanode = relation_ids('hanode')
    if relids_hanode:
        log('Ready to form cluster - informing peers', level=DEBUG)
        relation_set(relation_id=relids_hanode[0], ready=True)

        # If a trigger-corosync-update attribute exists in the relation,
        # the Juju leader may have requested all its peers to update
        # the corosync.conf list of nodes. If it's the case, no other
        # action will be run (a future hook re: ready=True may trigger
        # other logic)
        # NOTE(lourot): it's not necessary to test for
        # `remote_unit() != principal_unit()` here as this is only False (both
        # units are the same) when the relation type is `ha`.
        if (relation_type() == 'hanode'
                and trigger_corosync_update_from_leader(
                    remote_unit(), relids_hanode[0])):
            return

    else:
        log('Ready to form cluster, but not related to peers just yet',
            level=INFO)
        return

    # Check that there's enough nodes in order to perform the
    # configuration of the HA cluster
    if len(get_cluster_nodes()) < int(config('cluster_count')):
        log('Not enough nodes in cluster, deferring configuration', level=INFO)
        return

    relids = relation_ids('ha') or relation_ids('juju-info')
    if len(relids) == 1:  # Should only ever be one of these
        # Obtain relation information
        relid = relids[0]
        units = related_units(relid)
        if len(units) < 1:
            log('No principle unit found, deferring configuration', level=INFO)
            return

        unit = units[0]
        log('Parsing cluster configuration using rid: %s, unit: %s' %
            (relid, unit),
            level=DEBUG)
        resources = parse_data(relid, unit, 'resources')
        delete_resources = parse_data(relid, unit, 'delete_resources')
        resource_params = parse_data(relid, unit, 'resource_params')
        groups = parse_data(relid, unit, 'groups')
        ms = parse_data(relid, unit, 'ms')
        orders = parse_data(relid, unit, 'orders')
        colocations = parse_data(relid, unit, 'colocations')
        clones = parse_data(relid, unit, 'clones')
        locations = parse_data(relid, unit, 'locations')
        init_services = parse_data(relid, unit, 'init_services')
    else:
        log('Related to %s ha services' % (len(relids)), level=DEBUG)
        return

    if True in [ra.startswith('ocf:openstack') for ra in resources.values()]:
        apt_install('openstack-resource-agents')
    if True in [ra.startswith('ocf:ceph') for ra in resources.values()]:
        apt_install('ceph-resource-agents')

    if True in [ra.startswith('ocf:maas') for ra in resources.values()]:
        try:
            validate_dns_ha()
        except MAASConfigIncomplete as ex:
            log(ex.args[0], level=ERROR)
            status_set('blocked', ex.args[0])
            # if an exception is raised the hook will end up in error state
            # which will obfuscate the workload status and message.
            return

        log('Setting up access to MAAS API', level=INFO)
        setup_maas_api()
        # Update resource_parms for DNS resources to include MAAS URL and
        # credentials
        for resource in resource_params.keys():
            if resource.endswith("_hostname"):
                res_ipaddr = get_ip_addr_from_resource_params(
                    resource_params[resource])
                resource_params[resource] += (
                    ' maas_url="{}" maas_credentials="{}"'
                    ''.format(config('maas_url'), config('maas_credentials')))
                write_maas_dns_address(resource, res_ipaddr)

    # NOTE: this should be removed in 15.04 cycle as corosync
    # configuration should be set directly on subordinate
    configure_corosync()
    try_pcmk_wait()

    # Only configure the cluster resources
    # from the oldest peer unit.
    if is_leader():
        run_initial_setup()
        log('Setting cluster symmetry', level=INFO)
        set_cluster_symmetry()
        log('Deleting Resources' % (delete_resources), level=DEBUG)
        for res_name in delete_resources:
            if pcmk.crm_opt_exists(res_name):
                if ocf_file_exists(res_name, resources):
                    log('Stopping and deleting resource %s' % res_name,
                        level=DEBUG)
                    if pcmk.crm_res_running(res_name):
                        pcmk.commit('crm -w -F resource stop %s' % res_name)
                else:
                    log('Cleanuping and deleting resource %s' % res_name,
                        level=DEBUG)
                    pcmk.commit('crm resource cleanup %s' % res_name)
                # Daemon process may still be running after the upgrade.
                kill_legacy_ocf_daemon_process(res_name)

                # Stop the resource before the deletion (LP: #1838528)
                log('Stopping %s' % res_name, level=INFO)
                pcmk.commit('crm -w -F resource stop %s' % res_name)
                log('Deleting %s' % res_name, level=INFO)
                pcmk.commit('crm -w -F configure delete %s' % res_name)

        log('Configuring Resources: %s' % (resources), level=DEBUG)
        for res_name, res_type in resources.items():
            # disable the service we are going to put in HA
            if res_type.split(':')[0] == "lsb":
                disable_lsb_services(res_type.split(':')[1])
                if service_running(res_type.split(':')[1]):
                    service_stop(res_type.split(':')[1])
            elif (len(init_services) != 0 and res_name in init_services
                  and init_services[res_name]):
                disable_upstart_services(init_services[res_name])
                if service_running(init_services[res_name]):
                    service_stop(init_services[res_name])
            # Put the services in HA, if not already done so
            # if not pcmk.is_resource_present(res_name):
            if not pcmk.crm_opt_exists(res_name):
                if res_name not in resource_params:
                    cmd = 'crm -w -F configure primitive %s %s' % (res_name,
                                                                   res_type)
                else:
                    cmd = ('crm -w -F configure primitive %s %s %s' %
                           (res_name, res_type, resource_params[res_name]))

                pcmk.commit(cmd)
                log('%s' % cmd, level=DEBUG)
                if config('monitor_host'):
                    cmd = ('crm -F configure location Ping-%s %s rule '
                           '-inf: pingd lte 0' % (res_name, res_name))
                    pcmk.commit(cmd)

            else:
                # the resource already exists so it will be updated.
                code = pcmk.crm_update_resource(res_name, res_type,
                                                resource_params.get(res_name))
                if code != 0:
                    msg = "Cannot update pcmkr resource: {}".format(res_name)
                    status_set('blocked', msg)
                    raise Exception(msg)

        log('Configuring Groups: %s' % (groups), level=DEBUG)
        for grp_name, grp_params in groups.items():
            if not pcmk.crm_opt_exists(grp_name):
                cmd = ('crm -w -F configure group %s %s' %
                       (grp_name, grp_params))
                pcmk.commit(cmd)
                log('%s' % cmd, level=DEBUG)

        log('Configuring Master/Slave (ms): %s' % (ms), level=DEBUG)
        for ms_name, ms_params in ms.items():
            if not pcmk.crm_opt_exists(ms_name):
                cmd = 'crm -w -F configure ms %s %s' % (ms_name, ms_params)
                pcmk.commit(cmd)
                log('%s' % cmd, level=DEBUG)

        log('Configuring Orders: %s' % (orders), level=DEBUG)
        for ord_name, ord_params in orders.items():
            if not pcmk.crm_opt_exists(ord_name):
                cmd = 'crm -w -F configure order %s %s' % (ord_name,
                                                           ord_params)
                pcmk.commit(cmd)
                log('%s' % cmd, level=DEBUG)

        log('Configuring Clones: %s' % clones, level=DEBUG)
        for cln_name, cln_params in clones.items():
            if not pcmk.crm_opt_exists(cln_name):
                cmd = 'crm -w -F configure clone %s %s' % (cln_name,
                                                           cln_params)
                pcmk.commit(cmd)
                log('%s' % cmd, level=DEBUG)

        # Ordering is important here, colocation and location constraints
        # reference resources. All resources referenced by the constraints
        # need to exist otherwise constraint creation will fail.

        log('Configuring Colocations: %s' % colocations, level=DEBUG)
        for col_name, col_params in colocations.items():
            if not pcmk.crm_opt_exists(col_name):
                cmd = 'crm -w -F configure colocation %s %s' % (col_name,
                                                                col_params)
                pcmk.commit(cmd)
                log('%s' % cmd, level=DEBUG)

        log('Configuring Locations: %s' % locations, level=DEBUG)
        for loc_name, loc_params in locations.items():
            if not pcmk.crm_opt_exists(loc_name):
                cmd = 'crm -w -F configure location %s %s' % (loc_name,
                                                              loc_params)
                pcmk.commit(cmd)
                log('%s' % cmd, level=DEBUG)

        for res_name, res_type in resources.items():
            if len(init_services) != 0 and res_name in init_services:
                # Checks that the resources are running and started.
                # Ensure that clones are excluded as the resource is
                # not directly controllable (dealt with below)
                # Ensure that groups are cleaned up as a whole rather
                # than as individual resources.
                if (res_name not in clones.values()
                        and res_name not in groups.values()
                        and not pcmk.crm_res_running(res_name)):
                    # Just in case, cleanup the resources to ensure they get
                    # started in case they failed for some unrelated reason.
                    cmd = 'crm resource cleanup %s' % res_name
                    pcmk.commit(cmd)

        for cl_name in clones:
            # Always cleanup clones
            cmd = 'crm resource cleanup %s' % cl_name
            pcmk.commit(cmd)

        for grp_name in groups:
            # Always cleanup groups
            cmd = 'crm resource cleanup %s' % grp_name
            pcmk.commit(cmd)

        # All members of the cluster need to be registered before resources
        # that reference them can be created.
        if len(get_member_ready_nodes()) >= int(config('cluster_count')):
            log('Configuring any remote nodes', level=INFO)
            remote_resources = configure_pacemaker_remote_resources()
            resources.update(remote_resources)
            configure_resources_on_remotes(resources=resources,
                                           clones=clones,
                                           groups=groups)

            stonith_resources = {}
            stonith_remote_res = configure_pacemaker_remote_stonith_resource()
            stonith_resources.update(stonith_remote_res)
            if stonith_remote_res:
                stonith_peer_res = configure_peer_stonith_resource()
                stonith_resources.update(stonith_peer_res)
            configure_resources_on_remotes(resources=stonith_resources,
                                           clones=clones,
                                           groups=groups)
            configure_stonith()
        else:
            log('Deferring configuration of any remote nodes', level=INFO)

    for rel_id in relation_ids('ha'):
        relation_set(relation_id=rel_id, clustered="yes")

    # Inform peers that local configuration is complete and this member
    # is ready
    for rel_id in relids_hanode:
        relation_set(relation_id=rel_id, member_ready=True)
Example #21
0
 def arg_gen():
     # use a generator to defer calling of hookenv.relation_type, for tests
     rel = RelationBase.from_name(hookenv.relation_type())
     if rel:
         yield rel
Example #22
0
    def test_gets_the_relation_type(self, os_):
        os_.environ = {
            'JUJU_RELATION': 'foo',
        }

        self.assertEqual(hookenv.relation_type(), 'foo')
Example #23
0
 def test_relation_type_none_if_not_in_environment(self, os_):
     os_.environ = {}
     self.assertEqual(hookenv.relation_type(), None)
Example #24
0
 def __init__(self, relation_name=None, units=None, scope=None):
     self.relation_name = relation_name or hookenv.relation_type()
     self.units = set(units or [hookenv.remote_unit()])
     self.scope = scope or hookenv.remote_unit()
Example #25
0
#!/usr/bin/env python3

import json
import os
import sys
from subprocess import check_call

sys.path.append('lib')

from charmhelpers.core import hookenv, unitdata


if __name__ == '__main__':
    relname = hookenv.relation_type()
    role, _ = hookenv.relation_to_role_and_interface(relname)

    local_data = hookenv.relation_get()
    env = {}
    env.update(os.environ)
    env['ETCDCTL_ENDPOINT'] = hookenv.config().get('etcd')
    check_call(['etcdctl', 'set', '/{{ relay_name }}/{{ counterpart }}', json.dumps(local_data)], env=env)

    kv = unitdata.kv()
    kv.set('relay.local.relation.name', relname)
    kv.set('relay.local.relation.role', role)
    kv.set('relay.remote.relation.role', '{{ counterpart }}')
    kv.flush(save=True)

    # Invoke update-status immediately to trigger polling etcd
    os.execl(os.path.join(hookenv.charm_dir(), 'hooks/update-status'), 'update-status')