예제 #1
0
def cluster_changed():
    data = relation_get()
    log("Peer relation changed with {}: {}".format(remote_unit(), data))

    ip = data.get("unit-address")
    data_ip = data.get("data-address")
    if not ip or not data_ip:
        log("There is no unit-address or data-address in the relation")
        return

    if config.get('local-rabbitmq-hostname-resolution'):
        rabbit_hostname = data.get('rabbitmq-hostname')
        if ip and rabbit_hostname:
            utils.update_hosts_file(ip, rabbit_hostname)

    if is_leader():
        unit = remote_unit()
        _address_changed(unit, ip, 'ip')
        _address_changed(unit, data_ip, 'data_ip')

    update_northbound_relations()
    update_southbound_relations()
    update_issu_relations()
    utils.update_ziu("cluster-changed")
    utils.update_charm_status()
예제 #2
0
 def wrapper(servicename, *args, **kw):
     if hookenv.remote_unit():
         hookenv.log("** Action {}/{} ({})".format(hookenv.hook_name(),
                                                   func.__name__,
                                                   hookenv.remote_unit()))
     else:
         hookenv.log("** Action {}/{}".format(hookenv.hook_name(),
                                              func.__name__))
     return func(*args, **kw)
예제 #3
0
 def wrapper(servicename, *args, **kw):
     if hookenv.remote_unit():
         hookenv.log("** Action {}/{} ({})".format(hookenv.hook_name(),
                                                   func.__name__,
                                                   hookenv.remote_unit()))
     else:
         hookenv.log("** Action {}/{}".format(hookenv.hook_name(),
                                              func.__name__))
     return func(*args, **kw)
def cluster_changed():
    data = relation_get()
    log("Peer relation changed with {}: {}".format(remote_unit(), data))

    ip = data.get("unit-address")
    if not ip:
        log("There is no unit-address in the relation")
    elif is_leader():
        unit = remote_unit()
        _address_changed(unit, ip)
        utils.update_charm_status()
def cluster_joined(rel_id=None):
    ip = common_utils.get_ip()
    settings = {
        "unit-address":
        ip,
        "data-address":
        common_utils.get_ip(config_param="data-network", fallback=ip)
    }

    if config.get('local-rabbitmq-hostname-resolution'):
        settings.update({
            "rabbitmq-hostname":
            utils.get_contrail_rabbit_hostname(),
        })

        # a remote unit might have already set rabbitmq-hostname if
        # it came up before this unit was provisioned so the -changed
        # event will not fire for it and we have to handle it here
        data = relation_get()
        log("Joined the peer relation with {}: {}".format(remote_unit(), data))
        ip = data.get("unit-address")
        rabbit_hostname = data.get('rabbitmq-hostname')
        if ip and rabbit_hostname:
            utils.update_hosts_file(ip, rabbit_hostname)

    relation_set(relation_id=rel_id, relation_settings=settings)
    utils.update_charm_status()
def unprovision_metadata():
    if not remote_unit():
        return
    relation = relation_type()
    ip = None
    if relation == "neutron-metadata":
        ip = gethostbyname(relation_get("private-address"))
    else:
        ip = [
            gethostbyname(relation_get("private-address", unit, rid))
            for rid in relation_ids("neutron-metadata")
            for unit in related_units(rid)
        ][0]
    user = None
    password = None
    if relation == "identity-admin":
        user = relation_get("service_username")
        password = relation_get("service_password")
    else:
        user, password = [(relation_get("service_username", unit, rid),
                           relation_get("service_password", unit, rid))
                          for rid in relation_ids("identity-admin")
                          for unit in related_units(rid)][0]
    log("Unprovisioning metadata service {}:8775".format(ip))
    check_call([
        "contrail-provision-linklocal", "--api_server_ip", "127.0.0.1",
        "--api_server_port",
        str(api_port()), "--linklocal_service_name", "metadata",
        "--linklocal_service_ip", "169.254.169.254",
        "--linklocal_service_port", "80", "--ipfabric_service_ip", ip,
        "--ipfabric_service_port", "8775", "--oper", "del", "--admin_user",
        user, "--admin_password", password
    ])
예제 #7
0
    def connection_string(self, unit=None):
        ''':class:`ConnectionString` to the remote unit, or None.

        unit defaults to the active remote unit.

        You should normally use the master or standbys attributes rather
        than this method.

        If the unit is related multiple times using the same relation
        name, the first one found is returned.
        '''
        if unit is None:
            unit = hookenv.remote_unit()

        found = False
        for relation in self.relations:
            if unit not in relation.joined_units:
                continue
            found = True
            conn_str = _cs(relation.joined_units[unit])
            if conn_str:
                return conn_str

        if found:
            return None  # unit found, but not yet ready.

        raise LookupError(unit)  # unit is not related.
예제 #8
0
    def connection_string(self, unit=None):
        ''':class:`ConnectionString` to the remote unit, or None.

        unit defaults to the active remote unit.

        You should normally use the master or standbys attributes rather
        than this method.

        If the unit is related multiple times using the same relation
        name, the first one found is returned.
        '''
        if unit is None:
            unit = hookenv.remote_unit()

        found = False
        for relation in self.relations:
            if unit not in relation.joined_units:
                continue
            found = True
            conn_str = _cs(relation.joined_units[unit])
            if conn_str:
                return conn_str

        if found:
            return None  # unit found, but not yet ready.

        raise LookupError(unit)  # unit is not related.
예제 #9
0
def get_all_remote(conv, key):
    """
    Current conversation method get_remote implementation only
    return one value. But sometime we want get all remote key/value
    for conversation scope GLOBAL and SERVICE
    conv is the conversation to work with .
    This need to be called in a relation hook handler
    """
    values = {} 
    cur_rid = hookenv.relation_id()
    departing = hookenv.hook_name().endswith('-relation-departed')
    for relation_id in conv.relation_ids:
       units = hookenv.related_units(relation_id)
       if departing and cur_rid == relation_id:
          # Work around the fact that Juju 2.0 doesn't include the
          # departing unit in relation-list during the -departed hook,
          # by adding it back in ourselves.
          units.append(hookenv.remote_unit())
       for unit in units:
          if unit not in units:
             continue
          value = hookenv.relation_get(key, unit, relation_id)
          if value:
             values[unit] =  value
    return values
예제 #10
0
def client_relation(relid=None, unit=None):
    send_osd_settings()
    if ready_for_service():
        log('mon cluster in quorum and osds bootstrapped '
            '- providing client with keys, processing broker requests')
        service_name = get_client_application_name(relid, unit)
        if not service_name:
            log('Unable to determine remote service name, deferring '
                'processing of broker requests')
            return
        public_addr = get_public_addr()
        data = {
            'key': ceph.get_named_key(service_name),
            'auth': config('auth-supported'),
            'ceph-public-address': public_addr
        }
        rbd_features = get_rbd_features()
        if rbd_features:
            data['rbd-features'] = rbd_features
        if not unit:
            unit = remote_unit()
        if is_unsupported_cmr(unit):
            return
        data.update(
            handle_broker_request(relid, unit, add_legacy_response=True))
        relation_set(relation_id=relid, relation_settings=data)
def radosgw_relation(relid=None, unit=None):
    # Install radosgw for admin tools
    apt_install(packages=filter_installed_packages(['radosgw']))
    if not unit:
        unit = remote_unit()

    # NOTE: radosgw needs some usage OSD storage, so defer key
    #       provision until OSD units are detected.
    if ceph.is_quorum() and related_osds():
        log('mon cluster in quorum and osds related '
            '- providing radosgw with keys')
        public_addr = get_public_addr()
        data = {
            'fsid': leader_get('fsid'),
            'radosgw_key': ceph.get_radosgw_key(),
            'auth': config('auth-supported'),
            'ceph-public-address': public_addr,
        }

        settings = relation_get(rid=relid, unit=unit)
        """Process broker request(s)."""
        if 'broker_req' in settings:
            if ceph.is_leader():
                rsp = process_requests(settings['broker_req'])
                unit_id = unit.replace('/', '-')
                unit_response_key = 'broker-rsp-' + unit_id
                data[unit_response_key] = rsp
            else:
                log("Not leader - ignoring broker request", level=DEBUG)

        relation_set(relation_id=relid, relation_settings=data)
    else:
        log('mon cluster not in quorum or no osds - deferring key provision')
def contrail_ifmap_joined():
    if is_leader():
        creds = leader_get("ifmap-creds")
        creds = json.loads(creds) if creds else {}

        # prune credentials because we can't remove them directly lp #1469731
        creds = {
            rid: {
                unit: units[unit]
                for unit, units in ((unit, creds[rid])
                                    for unit in related_units(rid))
                if unit in units
            }
            for rid in relation_ids("contrail-ifmap") if rid in creds
        }

        rid = relation_id()
        if rid not in creds:
            creds[rid] = {}
        cs = creds[rid]
        unit = remote_unit()
        if unit in cs:
            return
        # generate new credentials for unit
        cs[unit] = {"username": unit, "password": pwgen(32)}
        leader_set({"ifmap-creds": json.dumps(creds)})
        write_ifmap_config()
        service_restart("supervisor-config")
        relation_set(creds=json.dumps(cs))
예제 #13
0
def remove_proxy():
    """Remove the haproxy configuration when the relation is removed."""
    hookenv.status_set("maintenance", "Removing reverse proxy relation")
    hookenv.log("Removing config for: {}".format(hookenv.remote_unit()))

    hookenv.status_set("active", HEALTHY)
    clear_flag("reverseproxy.configured")
예제 #14
0
    def conversation(self, scope=None):
        """
        Get a single conversation, by scope, that this relation is currently handling.

        If the scope is not given, the correct scope is inferred by the current
        hook execution context.  If there is no current hook execution context, it
        is assume that there is only a single global conversation scope for this
        relation.  If this relation's scope is not global and there is no current
        hook execution context, then an error is raised.
        """
        if scope is None:
            if self.scope is scopes.UNIT:
                scope = hookenv.remote_unit()
            elif self.scope is scopes.SERVICE:
                scope = hookenv.remote_service_name()
            else:
                scope = self.scope
        if scope is None:
            raise ValueError(
                'Unable to determine default scope: no current hook or global scope'
            )
        for conversation in self._conversations:
            if conversation.scope == scope:
                return conversation
        else:
            raise ValueError("Conversation with scope '%s' not found" % scope)
예제 #15
0
    def join(cls, scope):
        """
        Get or create a conversation for the given scope and active hook context.

        The current remote unit for the active hook context will be added to
        the conversation.

        Note: This uses :mod:`charmhelpers.core.unitdata` and requires that
        :meth:`~charmhelpers.core.unitdata.Storage.flush` be called.
        """
        relation_name = hookenv.relation_type()
        relation_id = hookenv.relation_id()
        unit = hookenv.remote_unit()
        service = hookenv.remote_service_name()
        if scope is scopes.UNIT:
            scope = unit
            namespace = relation_id
        elif scope is scopes.SERVICE:
            scope = service
            namespace = relation_id
        else:
            namespace = relation_name
        key = cls._key(namespace, scope)
        data = unitdata.kv().get(key, {'namespace': namespace, 'scope': scope, 'units': []})
        conversation = cls.deserialize(data)
        conversation.units.add(unit)
        unitdata.kv().set(key, cls.serialize(conversation))
        return conversation
예제 #16
0
 def new_munge_consumer(self):
     remote_unit = hookenv.remote_unit()
     if remote_unit:
         mk = leadership.leader_get('munge_key')
         hookenv.log(
             'new_munge_consumer(): join event from %s, publishing key: %s'
             % (remote_unit, mk))
예제 #17
0
    def join(cls, scope):
        """
        Get or create a conversation for the given scope and active hook context.

        The current remote unit for the active hook context will be added to
        the conversation.

        Note: This uses :mod:`charmhelpers.core.unitdata` and requires that
        :meth:`~charmhelpers.core.unitdata.Storage.flush` be called.
        """
        relation_name = hookenv.relation_type()
        relation_id = hookenv.relation_id()
        unit = hookenv.remote_unit()
        service = hookenv.remote_service_name()
        if scope is scopes.UNIT:
            scope = unit
            namespace = relation_id
        elif scope is scopes.SERVICE:
            scope = service
            namespace = relation_id
        else:
            namespace = relation_name
        key = cls._key(namespace, scope)
        data = unitdata.kv().get(key, {
            'namespace': namespace,
            'scope': scope,
            'units': []
        })
        conversation = cls.deserialize(data)
        conversation.units.add(unit)
        unitdata.kv().set(key, cls.serialize(conversation))
        return conversation
예제 #18
0
    def get_remote(self, key, default=None):
        """
        Get a value from the remote end(s) of this conversation.

        Note that if a conversation's scope encompasses multiple units, then
        those units are expected to agree on their data, whether that is through
        relying on a single leader to set the data or by all units eventually
        converging to identical data.  Thus, this method returns the first
        value that it finds set by any of its units.
        """
        cur_rid = hookenv.relation_id()
        departing = hookenv.hook_name().endswith('-relation-departed')
        for relation_id in self.relation_ids:
            units = hookenv.related_units(relation_id)
            if departing and cur_rid == relation_id:
                # Work around the fact that Juju 2.0 doesn't include the
                # departing unit in relation-list during the -departed hook,
                # by adding it back in ourselves.
                units.append(hookenv.remote_unit())
            for unit in units:
                if unit not in self.units:
                    continue
                value = hookenv.relation_get(key, unit, relation_id)
                if value:
                    return value
        return default
예제 #19
0
 def departed(self):
     path = self._rsyslog_conf_path(hookenv.remote_unit())
     if path and os.path.exists(path):
         os.remove(path)
         reactive.set_state("syslog.needs_restart")
     self.remove_state("{relation_name}.available")
     self.conversation().depart()
예제 #20
0
    def get_remote(self, key, default=None):
        """
        Get a value from the remote end(s) of this conversation.

        Note that if a conversation's scope encompasses multiple units, then
        those units are expected to agree on their data, whether that is through
        relying on a single leader to set the data or by all units eventually
        converging to identical data.  Thus, this method returns the first
        value that it finds set by any of its units.
        """
        cur_rid = hookenv.relation_id()
        departing = hookenv.hook_name().endswith("-relation-departed")
        for relation_id in self.relation_ids:
            units = hookenv.related_units(relation_id)
            if departing and cur_rid == relation_id:
                # Work around the fact that Juju 2.0 doesn't include the
                # departing unit in relation-list during the -departed hook,
                # by adding it back in ourselves.
                units.append(hookenv.remote_unit())
            for unit in units:
                if unit not in self.units:
                    continue
                value = hookenv.relation_get(key, unit, relation_id)
                if value:
                    return value
        return default
def contrail_ifmap_joined():
    if is_leader():
        creds = leader_get("ifmap-creds")
        creds = json.loads(creds) if creds else {}

        # prune credentials because we can't remove them directly lp #1469731
        creds = { rid: { unit: units[unit]
                         for unit, units in
                         ((unit, creds[rid]) for unit in related_units(rid))
                         if unit in units }
                  for rid in relation_ids("contrail-ifmap")
                  if rid in creds }

        rid = relation_id()
        if rid not in creds:
            creds[rid] = {}
        cs = creds[rid]
        unit = remote_unit()
        if unit in cs:
            return
        # generate new credentials for unit
        cs[unit] = { "username": unit, "password": pwgen(32) }
        leader_set({"ifmap-creds": json.dumps(creds)})
        write_ifmap_config()
        service_restart("supervisor-config")
        relation_set(creds=json.dumps(cs))
예제 #22
0
def radosgw_relation(relid=None, unit=None):
    # Install radosgw for admin tools
    apt_install(packages=filter_installed_packages(['radosgw']))
    if not unit:
        unit = remote_unit()
    """Process broker request(s)."""
    if ceph.is_quorum():
        settings = relation_get(rid=relid, unit=unit)
        if 'broker_req' in settings:
            if not ceph.is_leader():
                log("Not leader - ignoring broker request", level=DEBUG)
            else:
                rsp = process_requests(settings['broker_req'])
                unit_id = unit.replace('/', '-')
                unit_response_key = 'broker-rsp-' + unit_id
                log('mon cluster in quorum - providing radosgw with keys')
                public_addr = get_public_addr()
                data = {
                    'fsid': leader_get('fsid'),
                    'radosgw_key': ceph.get_radosgw_key(),
                    'auth': config('auth-supported'),
                    'ceph-public-address': public_addr,
                    unit_response_key: rsp,
                }
                relation_set(relation_id=relid, relation_settings=data)
    else:
        log('mon cluster not in quorum - deferring key provision')
예제 #23
0
def cluster_departed():
    if is_leader():
        unit = remote_unit()
        for var_name in ["ip", "data_ip"]:
            ips = common_utils.json_loads(
                leader_get("controller_{}s".format(var_name)), dict())
            if unit not in ips:
                return
            old_ip = ips.pop(unit)
            ip_list = common_utils.json_loads(
                leader_get("controller_{}_list".format(var_name)), list())
            ip_list.remove(old_ip)
            log("{}_LIST: {}    {}S: {}".format(var_name.upper(), str(ip_list),
                                                var_name.upper(), str(ips)))

            settings = {
                "controller_{}_list".format(var_name): json.dumps(ip_list),
                "controller_{}s".format(var_name): json.dumps(ips)
            }
            leader_set(settings=settings)

    update_northbound_relations()
    update_southbound_relations()
    update_issu_relations()
    utils.update_charm_status()
예제 #24
0
def radosgw_relation(relid=None, unit=None):
    # Install radosgw for admin tools
    apt_install(packages=filter_installed_packages(['radosgw']))
    if not unit:
        unit = remote_unit()

    # NOTE: radosgw needs some usage OSD storage, so defer key
    #       provision until OSD units are detected.
    if ready_for_service():
        log('mon cluster in quorum and osds bootstrapped '
            '- providing radosgw with keys')
        public_addr = get_public_addr()
        data = {
            'fsid': leader_get('fsid'),
            'auth': config('auth-supported'),
            'ceph-public-address': public_addr,
        }
        key_name = relation_get('key_name', unit=unit, rid=relid)
        if key_name:
            # New style, per unit keys
            data['{}_key'.format(key_name)] = (ceph.get_radosgw_key(
                name=key_name))
        else:
            # Old style global radosgw key
            data['radosgw_key'] = ceph.get_radosgw_key()

        data.update(handle_broker_request(relid, unit))
        relation_set(relation_id=relid, relation_settings=data)
예제 #25
0
def radosgw_relation(relid=None, unit=None):
    # Install radosgw for admin tools
    apt_install(packages=filter_installed_packages(['radosgw']))
    if not unit:
        unit = remote_unit()

    # NOTE: radosgw needs some usage OSD storage, so defer key
    #       provision until OSD units are detected.
    if ready():
        log('mon cluster in quorum and osds related '
            '- providing radosgw with keys')
        public_addr = get_public_addr()
        data = {
            'fsid': config('fsid'),
            'radosgw_key': ceph.get_radosgw_key(),
            'auth': 'cephx',
            'ceph-public-address': public_addr,
        }

        settings = relation_get(rid=relid, unit=unit)
        """Process broker request(s)."""
        if 'broker_req' in settings:
            rsp = process_requests(settings['broker_req'])
            unit_id = unit.replace('/', '-')
            unit_response_key = 'broker-rsp-' + unit_id
            data[unit_response_key] = rsp

        relation_set(relation_id=relid, relation_settings=data)
    else:
        log('FSID or admin key not provided, please configure them')
def radosgw_relation(relid=None, unit=None):
    # Install radosgw for admin tools
    apt_install(packages=filter_installed_packages(['radosgw']))
    if not unit:
        unit = remote_unit()

    # NOTE: radosgw needs some usage OSD storage, so defer key
    #       provision until OSD units are detected.
    if ready():
        log('mon cluster in quorum and osds related '
            '- providing radosgw with keys')
        public_addr = get_public_addr()
        data = {
            'fsid': config('fsid'),
            'radosgw_key': ceph.get_radosgw_key(),
            'auth': 'cephx',
            'ceph-public-address': public_addr,
        }

        settings = relation_get(rid=relid, unit=unit)
        """Process broker request(s)."""
        if 'broker_req' in settings:
            if ceph.is_leader():
                rsp = process_requests(settings['broker_req'])
                unit_id = unit.replace('/', '-')
                unit_response_key = 'broker-rsp-' + unit_id
                data[unit_response_key] = rsp
            else:
                log("Not leader - ignoring broker request", level=DEBUG)

        relation_set(relation_id=relid, relation_settings=data)
    else:
        log('FSID or admin key not provided, please configure them')
예제 #27
0
def db_changed(relation_id=None, unit=None, admin=None):
    if not is_elected_leader(DC_RESOURCE_NAME):
        log('Service is peered, clearing db relation'
            ' as this service unit is not the leader')
        relation_clear(relation_id)
        return

    if admin not in [True, False]:
        admin = relation_type() == 'db-admin'

    db_name, _ = (unit or remote_unit()).split("/")
    username = db_name
    db_helper = get_db_helper()
    addr = relation_get('private-address', unit=unit, rid=relation_id)
    password = db_helper.configure_db(addr, db_name, username, admin=admin)

    db_host = get_db_host(addr, interface=relation_type())

    relation_set(relation_id=relation_id,
                 relation_settings={
                     'user': username,
                     'password': password,
                     'host': db_host,
                     'database': db_name,
                 })
예제 #28
0
def get_all_remote(conv, key):
    """
    Current conversation method get_remote implementation only
    return one value. But sometime we want get all remote key/value
    for conversation scope GLOBAL and SERVICE
    conv is the conversation to work with .
    This need to be called in a relation hook handler
    """
    values = {}
    cur_rid = hookenv.relation_id()
    departing = hookenv.hook_name().endswith('-relation-departed')
    for relation_id in conv.relation_ids:
        units = hookenv.related_units(relation_id)
        if departing and cur_rid == relation_id:
            # Work around the fact that Juju 2.0 doesn't include the
            # departing unit in relation-list during the -departed hook,
            # by adding it back in ourselves.
            units.append(hookenv.remote_unit())
        for unit in units:
            if unit not in units:
                continue
            value = hookenv.relation_get(key, unit, relation_id)
            if value:
                values[unit] = value
    return values
예제 #29
0
def radosgw_relation(relid=None, unit=None):
    # Install radosgw for admin tools
    apt_install(packages=filter_installed_packages(['radosgw']))
    if not unit:
        unit = remote_unit()
    """Process broker request(s)."""
    if ceph.is_quorum():
        settings = relation_get(rid=relid, unit=unit)
        if 'broker_req' in settings:
            if not ceph.is_leader():
                log("Not leader - ignoring broker request", level=DEBUG)
            else:
                rsp = process_requests(settings['broker_req'])
                unit_id = unit.replace('/', '-')
                unit_response_key = 'broker-rsp-' + unit_id
                log('mon cluster in quorum - providing radosgw with keys')
                public_addr = get_public_addr()
                data = {
                    'fsid': leader_get('fsid'),
                    'radosgw_key': ceph.get_radosgw_key(),
                    'auth': config('auth-supported'),
                    'ceph-public-address': public_addr,
                    unit_response_key: rsp,
                }
                relation_set(relation_id=relid, relation_settings=data)
    else:
        log('mon cluster not in quorum - deferring key provision')
예제 #30
0
    def get_clustername_ack(self):
        epunit = hookenv.remote_unit()
        hookenv.log("get_clustername_ack(): remote unit: %s" % epunit)
        joined_units = self.all_joined_units

        # also pick up ip-adress etc to dbd here
        if epunit != None:
            namerequest = joined_units[epunit].received.get('requested_clustername')
            nameresult = joined_units[epunit].received.get('accepted_clustername')
            dbd_host = joined_units[epunit].received.get('dbd_host')
            if nameresult:
                hookenv.log("get_clustername_ack(): name %s was accepted by %s on %s" % (nameresult, epunit, dbd_host))
                # all is fine
                flags.set_flag('slurm-controller.dbdname-accepted')
            else:
                status_set('blocked', 'Cluster name %s rejected by DBD on %s: name already taken. Run juju config <slurm-controller-charm> clustername=New_Name' % (namerequest, epunit))
                hookenv.log("get_clustername_ack(): request for %s was rejected by %s" % (namerequest, epunit))
                flags.clear_flag('slurm-controller.dbdname-requested')

            """
            TODO: raise some flag so that layer-slurm-controller reconfigures
            itself+peers and updates config on all nodes
            """
        # clear all the flags that was sent in changed() on the provider side
        flags.clear_flag('endpoint.slurm-dbd-consumer.changed.requested_clustername')
        flags.clear_flag('endpoint.slurm-dbd-consumer.changed.accepted_clustername')
예제 #31
0
def rbd_mirror_relation(relid=None, unit=None, recurse=True):
    if ready_for_service():
        log('mon cluster in quorum and osds bootstrapped '
            '- providing rbd-mirror client with keys')
        if not unit:
            unit = remote_unit()
        # handle broker requests first to get a updated pool map
        data = (handle_broker_request(relid, unit, recurse=recurse))
        data.update({
            'auth':
            config('auth-supported'),
            'ceph-public-address':
            get_public_addr(),
            'pools':
            json.dumps(ceph.list_pools_detail(), sort_keys=True)
        })
        cluster_addr = get_cluster_addr()
        if cluster_addr:
            data['ceph-cluster-address'] = cluster_addr
        # handle both classic and reactive Endpoint peers
        try:
            unique_id = json.loads(
                relation_get('unique_id', unit=unit, rid=relid))
        except (TypeError, json.decoder.JSONDecodeError):
            unique_id = relation_get('unique_id', unit=unit, rid=relid)
        if unique_id:
            data['{}_key'.format(unique_id)] = ceph.get_rbd_mirror_key(
                'rbd-mirror.{}'.format(unique_id))

        relation_set(relation_id=relid, relation_settings=data)

        # make sure clients are updated with the appropriate RBD features
        # bitmap.
        if recurse:
            notify_client()
예제 #32
0
def mds_relation_joined(relid=None, unit=None):
    if ceph.is_quorum() and related_osds():
        log('mon cluster in quorum and OSDs related'
            '- providing mds client with keys')
        mds_name = relation_get(attribute='mds-name', rid=relid, unit=unit)
        if not unit:
            unit = remote_unit()
        public_addr = get_public_addr()
        data = {
            'fsid': leader_get('fsid'),
            'mds_key': ceph.get_mds_key(name=mds_name),
            'auth': config('auth-supported'),
            'ceph-public-address': public_addr
        }
        settings = relation_get(rid=relid, unit=unit)
        """Process broker request(s)."""
        if 'broker_req' in settings:
            if ceph.is_leader():
                rsp = process_requests(settings['broker_req'])
                unit_id = unit.replace('/', '-')
                unit_response_key = 'broker-rsp-' + unit_id
                data[unit_response_key] = rsp
            else:
                log("Not leader - ignoring mds broker request", level=DEBUG)

        relation_set(relation_id=relid, relation_settings=data)
    else:
        log('Waiting on mon quorum or min osds before provisioning mds keys')
def db_changed(relation_id=None, unit=None, admin=None):
    if not is_elected_leader(DC_RESOURCE_NAME):
        log('Service is peered, clearing db relation'
            ' as this service unit is not the leader')
        relation_clear(relation_id)
        return

    if is_clustered():
        db_host = config('vip')
    else:
        if config('prefer-ipv6'):
            db_host = get_ipv6_addr(exc_list=[config('vip')])[0]
        else:
            db_host = unit_get('private-address')

    if admin not in [True, False]:
        admin = relation_type() == 'db-admin'
    db_name, _ = remote_unit().split("/")
    username = db_name
    db_helper = get_db_helper()
    addr = relation_get('private-address', unit=unit, rid=relation_id)
    password = db_helper.configure_db(addr, db_name, username, admin=admin)

    relation_set(relation_id=relation_id,
                 relation_settings={
                     'user': username,
                     'password': password,
                     'host': db_host,
                     'database': db_name,
                 })
예제 #34
0
def remove_proxy():
    hookenv.status_set(
        'maintenance',
        'Removing reverse proxy relation')
    hookenv.log("Removing config for: {}".format(
        hookenv.remote_unit()))

    hookenv.status_set('active', HEALTHY)
    clear_flag('reverseproxy.configured')
예제 #35
0
 def get_remote_cert(self):
     """
     Return the signed certificate from the relation data.
     """
     # Get the conversation scoped to the service name.
     conv = self.conversation()
     # relation we want to get the value for this service's conversation only.
     cert_id = '{0}_client_cert'.format(hookenv.remote_unit())
     return conv.get_remote(cert_id)
예제 #36
0
 def get_config_names(self, configs):
     ''' Note this requires a remote unit '''
     names = []
     for index, config in enumerate(configs):
         remote_unit = hookenv.remote_unit().replace(
             '/', '-') + '-{}'.format(index)
         backend_name = config['group_id'] or remote_unit
         names.append((remote_unit, backend_name))
     return names
예제 #37
0
 def get_remote_key(self):
     """
     Return the client key from the relation data.
     """
     # Get the conversation scoped to the unit name.
     conv = self.conversation()
     # relation we want to get the value for this unit's conversation only.
     key_id = '{0}_client_key'.format(hookenv.remote_unit())
     return conv.get_remote(key_id)
예제 #38
0
 def departed(self):
     """Indicate the relation is no longer available and not connected."""
     # Slave hostname is derived from unit name so
     # this is pretty safe
     slavehost = remote_unit()
     log("Deleting slave with hostname %s." % slavehost)
     api = Api()
     api.delete_node(slavehost.replace("/", "-"))
     self.remove_state("{relation_name}.available")
     self.remove_state("{relation_name}.connected")
예제 #39
0
def configure_proxy():
    """Configure reverse proxy settings when haproxy is related."""
    hookenv.status_set("maintenance", "Applying reverse proxy configuration")
    hookenv.log("Configuring reverse proxy via: {}".format(hookenv.remote_unit()))

    interface = endpoint_from_name("reverseproxy")
    gitlab.configure_proxy(interface)

    hookenv.status_set("active", HEALTHY)
    set_flag("reverseproxy.configured")
예제 #40
0
def setCluster_config():
    relation = relations()
    idKubeMaster = relation_ids()[0]
    remoteUnit = remote_unit()
    kubeMasterIP = relation["kubemaster"][idKubeMaster][remoteUnit][
        "private-address"]
    call([
        "kubectl", "config", "set-cluster", "juju",
        "--insecure-skip-tls-verify=true", "--server=http://" + kubeMasterIP
    ])  # Get Kubemaster IP address.
예제 #41
0
 def depart(self):
     """
     Remove the current remote unit, for the active hook context, from
     this conversation.  This should be called from a `-departed` hook.
     """
     unit = hookenv.remote_unit()
     self.units.remove(unit)
     if self.units:
         unitdata.kv().set(self.key, self.serialize(self))
     else:
         unitdata.kv().unset(self.key)
 def endpoint(self):
     """
     Returns the monitoring endpoint.
     """
     service = hookenv.remote_unit()
     if service:
         conv = self.conversation(scope=service)
         pa = conv.get_remote('hostname') or conv.get_remote('private-address')
         return pa
     else:
         return None
예제 #43
0
def ssh_directory_for_unit():
    remote_service = remote_unit().split('/')[0]
    _dir = os.path.join(NOVA_SSH_DIR, remote_service)
    for d in [NOVA_SSH_DIR, _dir]:
        if not os.path.isdir(d):
            os.mkdir(d)
    for f in ['authorized_keys', 'known_hosts']:
        f = os.path.join(_dir, f)
        if not os.path.isfile(f):
            open(f, 'w').close()
    return _dir
예제 #44
0
    def joined_changed(self):
        """
        Used to check and set relational data and set the 
        requested state.
        """
        # Get the conversation scoped to the service name.
        conv = self.conversation()
        unit = hookenv.remote_unit()

        if self.previous_cert(unit) != self.requested_cert(unit):
            conv.set_state('key.cert.requested')
예제 #45
0
def client_relation():
    log('Begin client-relation hook.')

    if ceph.is_quorum():
        log('mon cluster in quorum - providing client with keys')
        service_name = remote_unit().split('/')[0]
        relation_set(key=ceph.get_named_key(service_name),
                     auth=config('auth-supported'))
    else:
        log('mon cluster not in quorum - deferring key provision')

    log('End client-relation hook.')
def ssh_directory_for_unit(user=None):
    remote_service = remote_unit().split("/")[0]
    if user:
        remote_service = "{}_{}".format(remote_service, user)
    _dir = os.path.join(NOVA_SSH_DIR, remote_service)
    for d in [NOVA_SSH_DIR, _dir]:
        if not os.path.isdir(d):
            os.mkdir(d)
    for f in ["authorized_keys", "known_hosts"]:
        f = os.path.join(_dir, f)
        if not os.path.isfile(f):
            open(f, "w").close()
    return _dir
예제 #47
0
def cluster_non_leader_actions():
    """Cluster relation hook actions to be performed by non-leader units.

    NOTE: must be called by non-leader from cluster relation hook.
    """
    log("Cluster changed by unit=%s (local is non-leader)" % (remote_unit()),
        level=DEBUG)
    settings = relation_get() or {}

    # Check whether we have been requested to stop proxy service
    rq_key = SwiftProxyClusterRPC.KEY_STOP_PROXY_SVC
    token = settings.get(rq_key, None)
    if token:
        log("Peer request to stop proxy service received (%s) - sending ack" %
            (token), level=INFO)
        service_stop('swift-proxy')
        peers_only = settings.get('peers-only', None)
        rq = SwiftProxyClusterRPC().stop_proxy_ack(echo_token=token,
                                                   echo_peers_only=peers_only)
        relation_set(relation_settings=rq)
        return

    # Check if there are any builder files we can sync from the leader.
    log("Non-leader peer - checking if updated rings available", level=DEBUG)
    broker = settings.get('builder-broker', None)
    if not broker:
        log("No update available", level=DEBUG)
        if not is_paused():
            service_start('swift-proxy')
        return

    builders_only = int(settings.get('sync-only-builders', 0))
    path = os.path.basename(get_www_dir())
    try:
        sync_proxy_rings('http://%s/%s' % (broker, path),
                         rings=not builders_only)
    except CalledProcessError:
        log("Ring builder sync failed, builders not yet available - "
            "leader not ready?", level=WARNING)
        return None

    # Re-enable the proxy once all builders and rings are synced
    if fully_synced():
        log("Ring builders synced - starting proxy", level=INFO)
        CONFIGS.write_all()
        if not is_paused():
            service_start('swift-proxy')
    else:
        log("Not all builders and rings synced yet - waiting for peer sync "
            "before starting proxy", level=INFO)
예제 #48
0
    def depart(self):
        """
        Remove the current remote unit, for the active hook context, from
        this conversation.  This should be called from a `-departed` hook.

        TODO: Need to figure out a way to have this called implicitly, to
        ensure cleaning up of conversations that are no longer needed.
        """
        unit = hookenv.remote_unit()
        self.units.remove(unit)
        if self.units:
            unitdata.kv().set(self.key, self.serialize(self))
        else:
            unitdata.kv().unset(self.key)
def ssh_directory_for_unit(unit=None, user=None):
    if unit:
        remote_service = unit.split('/')[0]
    else:
        remote_service = remote_unit().split('/')[0]
    if user:
        remote_service = "{}_{}".format(remote_service, user)
    _dir = os.path.join(NOVA_SSH_DIR, remote_service)
    for d in [NOVA_SSH_DIR, _dir]:
        if not os.path.isdir(d):
            os.mkdir(d)
    for f in ['authorized_keys', 'known_hosts']:
        f = os.path.join(_dir, f)
        if not os.path.isfile(f):
            open(f, 'w').close()
    return _dir
예제 #50
0
def cluster_leader_actions():
    """Cluster relation hook actions to be performed by leader units.

    NOTE: must be called by leader from cluster relation hook.
    """
    log("Cluster changed by unit=%s (local is leader)" % (remote_unit()),
        level=DEBUG)

    # If we have received an ack, check other units
    settings = relation_get() or {}
    ack_key = SwiftProxyClusterRPC.KEY_STOP_PROXY_SVC_ACK

    # Protect against leader changing mid-sync
    if settings.get(SwiftProxyClusterRPC.KEY_STOP_PROXY_SVC):
        log("Sync request received yet this is leader unit. This would "
            "indicate that the leader has changed mid-sync - stopping proxy "
            "and notifying peers", level=ERROR)
        service_stop('swift-proxy')
        SwiftProxyClusterRPC().notify_leader_changed()
        return
    elif ack_key in settings:
        token = settings[ack_key]
        # Find out if all peer units have been stopped.
        responses = []
        for rid in relation_ids('cluster'):
            for unit in related_units(rid):
                responses.append(relation_get(rid=rid, unit=unit))

        # Ensure all peers stopped before starting sync
        if all_peers_stopped(responses):
            key = 'peers-only'
            if not all_responses_equal(responses, key, must_exist=False):
                msg = ("Did not get equal response from every peer unit for "
                       "'%s'" % (key))
                raise SwiftProxyCharmException(msg)

            peers_only = int(get_first_available_value(responses, key,
                                                       default=0))
            log("Syncing rings and builders (peers-only=%s)" % (peers_only),
                level=DEBUG)
            broadcast_rings_available(token, storage=not peers_only)
        else:
            log("Not all peer apis stopped - skipping sync until all peers "
                "ready (got %s)" % (responses), level=INFO)

    CONFIGS.write_all()
def unprovision_vrouter():
    relation = relation_type()
    if relation and not remote_unit():
        return
    host_name = gethostname()
    host_ip = netifaces.ifaddresses("vhost0")[netifaces.AF_INET][0]["addr"]
    a_ip = config.previous("contrail-api-ip")
    a_port = None
    if a_ip:
        a_port = config.previous("contrail-api-port")
        if a_port is None:
            a_port = api_port()
    elif relation == "contrail-api":
        a_ip = gethostbyname(relation_get("private-address"))
        a_port = relation_get("port")
    else:
        a_ip, a_port = [ (gethostbyname(relation_get("private-address", unit, rid)),
                          relation_get("port", unit, rid))
                         for rid in relation_ids("contrail-api")
                         for unit in related_units(rid) ][0]
    user = None
    password = None
    tenant = None
    if relation == "identity-admin":
        user = relation_get("service_username")
        password = relation_get("service_password")
        tenant = relation_get("service_tenant_name")
    else:
        user, password, tenant = [ (relation_get("service_username", unit, rid),
                                    relation_get("service_password", unit, rid),
                                    relation_get("service_tenant_name", unit, rid))
                                   for rid in relation_ids("identity-admin")
                                   for unit in related_units(rid) ][0]
    log("Unprovisioning vrouter {}".format(host_ip))
    check_call(["contrail-provision-vrouter",
                "--host_name", host_name,
                "--host_ip", host_ip,
                "--api_server_ip", a_ip,
                "--api_server_port", str(a_port),
                "--oper", "del",
                "--admin_user", user,
                "--admin_password", password,
                "--admin_tenant_name", tenant])
예제 #52
0
def client_relation(relid=None):
    if ceph.is_quorum():
        log('mon cluster in quorum - providing client with keys')
        service_name = None
        if relid is None:
            service_name = remote_unit().split('/')[0]
        else:
            units = related_units(relid)
            if len(units) > 0:
                service_name = units[0].split('/')[0]
        if service_name is not None:
            data = {
                'key': ceph.get_named_key(service_name),
                'auth': config('auth-supported'),
                'ceph-public-address': get_public_addr(),
            }
            relation_set(relation_id=relid,
                         relation_settings=data)
    else:
        log('mon cluster not in quorum - deferring key provision')
def client_relation_changed():
    """Process broker requests from ceph client relations."""
    if ready():
        settings = relation_get()
        if 'broker_req' in settings:
            if not ceph.is_leader():
                log("Not leader - ignoring broker request", level=DEBUG)
            else:
                rsp = process_requests(settings['broker_req'])
                unit_id = remote_unit().replace('/', '-')
                unit_response_key = 'broker-rsp-' + unit_id
                # broker_rsp is being left for backward compatibility,
                # unit_response_key superscedes it
                data = {
                    'broker_rsp': rsp,
                    unit_response_key: rsp,
                }
                relation_set(relation_settings=data)
    else:
        log('FSID or admin key not provided, please configure them')
def client_relation_joined(relid=None):
    if ready():
        service_name = None
        if relid is None:
            units = [remote_unit()]
            service_name = units[0].split('/')[0]
        else:
            units = related_units(relid)
            if len(units) > 0:
                service_name = units[0].split('/')[0]

        if service_name is not None:
            public_addr = get_public_addr()
            data = {'key': ceph.get_named_key(service_name),
                    'auth': 'cephx',
                    'ceph-public-address': public_addr}
            relation_set(relation_id=relid,
                         relation_settings=data)
    else:
        log('FSID or admin key not provided, please configure them')
def unprovision_control():
    if not remote_unit():
        return
    host_name = gethostname()
    host_ip = gethostbyname(unit_get("private-address"))
    relation = relation_type()
    a_ip = None
    a_port = None
    if relation == "contrail-api":
        a_ip = gethostbyname(relation_get("private-address"))
        a_port = relation_get("port")
    else:
        a_ip, a_port = [ (gethostbyname(relation_get("private-address", unit, rid)),
                          relation_get("port", unit, rid))
                         for rid in relation_ids("contrail-api")
                         for unit in related_units(rid) ][0]
    user = None
    password = None
    tenant = None
    if relation == "identity-admin":
        user = relation_get("service_username")
        password = relation_get("service_password")
        tenant = relation_get("service_tenant_name")
    else:
        user, password, tenant = [ (relation_get("service_username", unit, rid),
                                    relation_get("service_password", unit, rid),
                                    relation_get("service_tenant_name", unit, rid))
                                   for rid in relation_ids("identity-admin")
                                   for unit in related_units(rid) ][0]
    log("Unprovisioning control {}".format(host_ip))
    check_call(["contrail-provision-control",
                "--host_name", host_name,
                "--host_ip", host_ip,
                "--router_asn", "64512",
                "--api_server_ip", a_ip,
                "--api_server_port", str(a_port),
                "--oper", "del",
                "--admin_user", user,
                "--admin_password", password,
                "--admin_tenant_name", tenant])
def unprovision_local_metadata():
    relation = relation_type()
    if relation and not remote_unit():
        return
    a_ip = config.previous("contrail-api-ip")
    a_port = None
    if a_ip:
        a_port = config.previous("contrail-api-port")
        if a_port is None:
            a_port = api_port()
    elif relation == "contrail-api":
        a_ip = gethostbyname(relation_get("private-address"))
        a_port = relation_get("port")
    else:
        a_ip, a_port = [ (gethostbyname(relation_get("private-address", unit, rid)),
                          relation_get("port", unit, rid))
                         for rid in relation_ids("contrail-api")
                         for unit in related_units(rid) ][0]
    user = None
    password = None
    if relation == "identity-admin":
        user = relation_get("service_username")
        password = relation_get("service_password")
    else:
        user, password = [ (relation_get("service_username", unit, rid),
                            relation_get("service_password", unit, rid))
                           for rid in relation_ids("identity-admin")
                           for unit in related_units(rid) ][0]
    log("Unprovisioning local metadata service 127.0.0.1:8775")
    check_call(["contrail-provision-linklocal",
                "--api_server_ip", a_ip,
                "--api_server_port", str(a_port),
                "--linklocal_service_name", "metadata",
                "--linklocal_service_ip", "169.254.169.254",
                "--linklocal_service_port", "80",
                "--ipfabric_service_ip", "127.0.0.1",
                "--ipfabric_service_port", "8775",
                "--oper", "del",
                "--admin_user", user,
                "--admin_password", password])
def db_changed(relation_id=None, unit=None, admin=None):

    # Is this db-admin or db relation
    if admin not in [True, False]:
        admin = relation_type() == 'db-admin'
    if admin:
        relation_name = 'db-admin'
    else:
        relation_name = 'db'

    if not seeded():
        log("Percona cluster not yet bootstrapped - deferring {} relation "
            "until bootstrapped.".format(relation_name), DEBUG)
        return

    if not is_leader() and client_node_is_ready():
        clear_and_populate_client_db_relations(relation_id, relation_name)
        return

    # Bail if leader is not ready
    if not leader_node_is_ready():
        return

    db_name, _ = (unit or remote_unit()).split("/")
    username = db_name
    db_helper = get_db_helper()
    addr = relation_get('private-address', unit=unit, rid=relation_id)
    password = db_helper.configure_db(addr, db_name, username, admin=admin)

    db_host = get_db_host(addr, interface=relation_name)

    peer_store_and_set(relation_id=relation_id,
                       user=username,
                       password=password,
                       host=db_host,
                       database=db_name)
예제 #58
0
    def conversation(self, scope=None):
        """
        Get a single conversation, by scope, that this relation is currently handling.

        If the scope is not given, the correct scope is inferred by the current
        hook execution context.  If there is no current hook execution context, it
        is assume that there is only a single global conversation scope for this
        relation.  If this relation's scope is not global and there is no current
        hook execution context, then an error is raised.
        """
        if scope is None:
            if self.scope is scopes.UNIT:
                scope = hookenv.remote_unit()
            elif self.scope is scopes.SERVICE:
                scope = hookenv.remote_service_name()
            else:
                scope = self.scope
        if scope is None:
            raise ValueError('Unable to determine default scope: no current hook or global scope')
        for conversation in self._conversations:
            if conversation.scope == scope:
                return conversation
        else:
            raise ValueError("Conversation with scope '%s' not found" % scope)