def keystone_fid_service_provider_changed():
    if get_api_version() < 3:
        log('Identity federation is only supported with keystone v3')
        return
    if CompareOpenStackReleases(os_release('keystone')) < 'ocata':
        log('Ignoring keystone-fid-service-provider relation as it is'
            ' not supported on releases older than Ocata')
        return
    # for the join case a keystone public-facing hostname and service
    # port need to be set
    update_keystone_fid_service_provider(relation_id=relation_id())

    # handle relation data updates (if any), e.g. remote_id_attribute
    # and a restart will be handled via a nonce, not restart_on_change
    CONFIGS.write(KEYSTONE_CONF)

    # The relation is container-scoped so this keystone unit's unitdata
    # will only contain a nonce of a single fid subordinate for a given
    # fid backend (relation id)
    restart_nonce = relation_get('restart-nonce')
    if restart_nonce:
        nonce = json.loads(restart_nonce)
        # multiplex by relation id for multiple federated identity
        # provider charms
        fid_nonce_key = 'fid-restart-nonce-{}'.format(relation_id())
        db = unitdata.kv()
        if restart_nonce != db.get(fid_nonce_key):
            restart_keystone()
            db.set(fid_nonce_key, nonce)
            db.flush()
def contrail_ifmap_joined():
    if is_leader():
        creds = leader_get("ifmap-creds")
        creds = json.loads(creds) if creds else {}

        # prune credentials because we can't remove them directly lp #1469731
        creds = { rid: { unit: units[unit]
                         for unit, units in
                         ((unit, creds[rid]) for unit in related_units(rid))
                         if unit in units }
                  for rid in relation_ids("contrail-ifmap")
                  if rid in creds }

        rid = relation_id()
        if rid not in creds:
            creds[rid] = {}
        cs = creds[rid]
        unit = remote_unit()
        if unit in cs:
            return
        # generate new credentials for unit
        cs[unit] = { "username": unit, "password": pwgen(32) }
        leader_set({"ifmap-creds": json.dumps(creds)})
        write_ifmap_config()
        service_restart("supervisor-config")
        relation_set(creds=json.dumps(cs))
Exemple #3
0
def cluster_relation_changed():
    cluster_data = {}
    # Useful when doing runtime based configuration. (units added after cluster
    # bootstrap) see docs:
    # https://github.com/coreos/etcd/blob/master/Documentation/runtime-configuration.md
    if leader_status:
        token = cluster_token()
        print 'Initializing cluster with {}'.format(token)
        hookenv.relation_set(hookenv.relation_id(),
                             {'leader-address': private_address,
                              'cluster-state': 'existing',
                              'cluster-token': token,
                              'cluster': cluster_string()})
        cluster_data['cluster'] = cluster_string()

    if not leader_status:
        # A token is only generated once on a cluster.
        token = hookenv.relation_get('cluster-token')
        cluster_data['cluster'] = hookenv.relation_get('cluster')

    if not token:
        print "No token available on relationship - exiting"
        return
    cluster_data['token'] = token
    main(cluster_data)
    def get_remote(self, key, default=None):
        """
        Get a value from the remote end(s) of this conversation.

        Note that if a conversation's scope encompasses multiple units, then
        those units are expected to agree on their data, whether that is through
        relying on a single leader to set the data or by all units eventually
        converging to identical data.  Thus, this method returns the first
        value that it finds set by any of its units.
        """
        cur_rid = hookenv.relation_id()
        departing = hookenv.hook_name().endswith("-relation-departed")
        for relation_id in self.relation_ids:
            units = hookenv.related_units(relation_id)
            if departing and cur_rid == relation_id:
                # Work around the fact that Juju 2.0 doesn't include the
                # departing unit in relation-list during the -departed hook,
                # by adding it back in ourselves.
                units.append(hookenv.remote_unit())
            for unit in units:
                if unit not in self.units:
                    continue
                value = hookenv.relation_get(key, unit, relation_id)
                if value:
                    return value
        return default
def get_all_remote(conv, key):
    """
    Current conversation method get_remote implementation only
    return one value. But sometime we want get all remote key/value
    for conversation scope GLOBAL and SERVICE
    conv is the conversation to work with .
    This need to be called in a relation hook handler
    """
    values = {} 
    cur_rid = hookenv.relation_id()
    departing = hookenv.hook_name().endswith('-relation-departed')
    for relation_id in conv.relation_ids:
       units = hookenv.related_units(relation_id)
       if departing and cur_rid == relation_id:
          # Work around the fact that Juju 2.0 doesn't include the
          # departing unit in relation-list during the -departed hook,
          # by adding it back in ourselves.
          units.append(hookenv.remote_unit())
       for unit in units:
          if unit not in units:
             continue
          value = hookenv.relation_get(key, unit, relation_id)
          if value:
             values[unit] =  value
    return values
Exemple #6
0
    def join(cls, scope):
        """
        Get or create a conversation for the given scope and active hook context.

        The current remote unit for the active hook context will be added to
        the conversation.

        Note: This uses :mod:`charmhelpers.core.unitdata` and requires that
        :meth:`~charmhelpers.core.unitdata.Storage.flush` be called.
        """
        relation_name = hookenv.relation_type()
        relation_id = hookenv.relation_id()
        unit = hookenv.remote_unit()
        service = hookenv.remote_service_name()
        if scope is scopes.UNIT:
            scope = unit
            namespace = relation_id
        elif scope is scopes.SERVICE:
            scope = service
            namespace = relation_id
        else:
            namespace = relation_name
        key = cls._key(namespace, scope)
        data = unitdata.kv().get(key, {'namespace': namespace, 'scope': scope, 'units': []})
        conversation = cls.deserialize(data)
        conversation.units.add(unit)
        unitdata.kv().set(key, cls.serialize(conversation))
        return conversation
Exemple #7
0
 def mock_relation_set(relation_id=None, relation_settings=None, **kwargs):
     if relation_id is None:
         relation_id = hookenv.relation_id()
     unit = hookenv.local_unit()
     relinfo = mock_relation_get(unit=unit, rid=relation_id)
     if relation_settings is not None:
         relinfo.update(relation_settings)
     relinfo.update(kwargs)
     return None
def get_service_ip(endpoint):
    try:
        info = network_get(endpoint, relation_id())
        if 'ingress-addresses' in info:
            addr = info['ingress-addresses'][0]
            if len(addr):
                return addr
        else:
            log("No ingress-addresses: {}".format(info))
    except Exception as e:
        log("Caught exception checking for service IP: {}".format(e))

    return None
def website_relation_changed(*args):
    """
    Set the hostname and the port for reverse proxy relations
    """
    config = hookenv.config()
    port_config = PORTS.get(config['service_type'])
    if port_config:
        port = port_config['open']
    else:
        port = PORTS['signing']['open']

    relation_set(
        relation_id(), {'port': port, 'hostname': local_unit().split('/')[0]})
 def mock_relation_get(attribute=None, unit=None, rid=None):
     if rid is None:
         rid = hookenv.relation_id()
     if unit is None:
         unit = hookenv.remove_unit()
     service, unit_num = unit.split('/')
     unit_num = int(unit_num)
     relinfos.setdefault(rid, {})
     relinfos[rid].setdefault(
         unit, {'private-address': '10.20.0.{}'.format(unit_num)})
     relinfo = relinfos[rid][unit]
     if attribute is None or attribute == '-':
         return relinfo
     return relinfo.get(attribute)
Exemple #11
0
 def mock_relation_get(attribute=None, unit=None, rid=None):
     if rid is None:
         rid = hookenv.relation_id()
     if unit is None:
         unit = hookenv.remove_unit()
     service, unit_num = unit.split('/')
     unit_num = int(unit_num)
     relinfos.setdefault(rid, {})
     relinfos[rid].setdefault(
         unit, {'private-address': '10.20.0.{}'.format(unit_num)})
     relinfo = relinfos[rid][unit]
     if attribute is None or attribute == '-':
         return relinfo
     return relinfo.get(attribute)
def cluster_joined():
    if config('prefer-ipv6'):
        addr = get_ipv6_addr(exc_list=[config('vip')])[0]
        relation_settings = {'private-address': addr,
                             'hostname': socket.gethostname()}
        log("Setting cluster relation: '%s'" % (relation_settings),
            level=INFO)
        relation_set(relation_settings=relation_settings)

    # Ensure all new peers are aware
    cluster_state_uuid = relation_get('bootstrap-uuid', unit=local_unit())
    if cluster_state_uuid:
        notify_bootstrapped(cluster_rid=relation_id(),
                            cluster_uuid=cluster_state_uuid)
def configure_relation_data():
    cfg = config()
    endpoint = endpoint_from_flag('endpoint.bitcoind.joined')

    info = network_get('bitcoind', relation_id())
    log('network info {0}'.format(info))
    host = info['ingress-addresses'][0]
    if host == "":
        log("no service address yet")
        return
    else:
        endpoint.configure(host=host, port=cfg.get('btc-rpcport'),
                           user=cfg.get('btc-rpcuser'),
                           password=cfg.get('btc-rpcpassword'))
Exemple #14
0
def update_nrpe_config(nagios):
    unit_data = unitdata.kv()
    nagios_hostname = unit_data.get('nagios.hostname', None)
    nagios_host_context = unit_data.get('nagios.host_context', None)

    # require the nrpe-external-master relation to provide the host context
    if in_relation_hook() and relation_id().\
            startswith('nrpe-external-master:'):
        rel = relation_get()
        if 'nagios_host_context' in rel:
            nagios_host_context = rel['nagios_host_context']
            unit_data.set('nagios.host_context', nagios_host_context)

            # We have to strip the nagios host context from the nagios hostname
            # since the nagios.add_check will put it back again...
            nagios_hostname = rel['nagios_hostname']
            if nagios_hostname.startswith(nagios_host_context + '-'):
                nagios_hostname = nagios_hostname[len(nagios_host_context +
                                                      '-'):]

            unit_data.set('nagios.hostname', nagios_hostname)

    if not nagios_hostname or not nagios_host_context:
        return
    # The above boilerplate is needed until this issue is fixed:
    #
    # https://github.com/cmars/nrpe-external-master-interface/issues/6

    status_set('maintenance', 'Updating Nagios configs')

    creds = Credentials()
    check = [
        '/usr/lib/nagios/plugins/check_http',
        '-H',
        'localhost',
        '-p',
        '8080',
        '-u',
        urlparse(Api().url).path,
        '-a',
        "{}:{}".format(creds.username(), creds.token()),
    ]
    nagios.add_check(check,
                     name="check_jenkins_http",
                     description="Verify Jenkins HTTP is up.",
                     context=nagios_host_context,
                     unit=nagios_hostname)

    status_set('active', 'Ready')
Exemple #15
0
def database_relation_joined():
    juju_log("database_relation_joined")
    my_hostname = unit_get('public-address')
    my_port = config('port')
    my_replset = config('replicaset')
    juju_log("my_hostname: %s" % my_hostname)
    juju_log("my_port: %s" % my_port)
    juju_log("my_replset: %s" % my_replset)
    return (relation_set(
        relation_id(), {
            'hostname': my_hostname,
            'port': my_port,
            'replset': my_replset,
            'type': 'database',
        }))
def cluster_with():
    if is_unit_paused_set():
        log("Do not run cluster_with while unit is paused", "WARNING")
        return

    log('Clustering with new node')

    # check the leader and try to cluster with it
    node = leader_node()
    if node:
        if node in running_nodes():
            log('Host already clustered with %s.' % node)

            cluster_rid = relation_id('cluster', local_unit())
            is_clustered = relation_get(attribute='clustered',
                                        rid=cluster_rid,
                                        unit=local_unit())

            log('am I clustered?: %s' % bool(is_clustered), level=DEBUG)
            if not is_clustered:
                # NOTE(freyes): this node needs to be marked as clustered, it's
                # part of the cluster according to 'rabbitmqctl cluster_status'
                # (LP: #1691510)
                relation_set(relation_id=cluster_rid,
                             clustered=get_unit_hostname(),
                             timestamp=time.time())

            return False
        # NOTE: The primary problem rabbitmq has clustering is when
        # more than one node attempts to cluster at the same time.
        # The asynchronous nature of hook firing nearly guarantees
        # this. Using cluster_wait based on modulo_distribution
        cluster_wait()
        try:
            join_cluster(node)
            # NOTE: toggle the cluster relation to ensure that any peers
            #       already clustered re-assess status correctly
            relation_set(clustered=get_unit_hostname(), timestamp=time.time())
            return True
        except subprocess.CalledProcessError as e:
            status_set('blocked', 'Failed to cluster with %s. Exception: %s'
                       % (node, e))
            start_app()
    else:
        status_set('waiting', 'Leader not available for clustering')
        return False

    return False
Exemple #17
0
def notify_relation(relation, changed=False, relation_ids=None):
    default_host = get_hostname()
    default_port = 80

    for rid in relation_ids or get_relation_ids(relation):
        service_names = set()
        if rid is None:
            rid = relation_id()
        for relation_data in relations_for_id(rid):
            if 'service_name' in relation_data:
                service_names.add(relation_data['service_name'])

            if changed:
                if 'is-proxy' in relation_data:
                    remote_service = (
                        "%s__%d" %
                        (relation_data['hostname'], relation_data['port']))
                    open(
                        "%s/%s.is.proxy" %
                        (default_haproxy_service_config_dir, remote_service),
                        'a').close()

        service_name = None
        if len(service_names) == 1:
            service_name = service_names.pop()
        elif len(service_names) > 1:
            log("Remote units requested more than a single service name."
                "Falling back to default host/port.")

        if service_name is not None:
            # If a specfic service has been asked for then return the ip:port
            # for that service, else pass back the default
            requestedservice = get_config_service(service_name)
            my_host = get_hostname(requestedservice['service_host'])
            my_port = requestedservice['service_port']
        else:
            my_host = default_host
            my_port = default_port

        all_services = ""
        services_dict = create_services()
        if services_dict is not None:
            all_services = yaml.safe_dump(sorted(services_dict.itervalues()))

        relation_set(relation_id=rid,
                     port=str(my_port),
                     hostname=my_host,
                     all_services=all_services)
Exemple #18
0
def notify_relation(relation, changed=False, relation_ids=None):
    default_host = get_hostname()
    default_port = 80

    for rid in relation_ids or get_relation_ids(relation):
        service_names = set()
        if rid is None:
            rid = relation_id()
        for relation_data in relations_for_id(rid):
            if 'service_name' in relation_data:
                service_names.add(relation_data['service_name'])

            if changed:
                if 'is-proxy' in relation_data:
                    remote_service = ("%s__%d" % (relation_data['hostname'],
                                                  relation_data['port']))
                    open("%s/%s.is.proxy" % (
                        default_haproxy_service_config_dir,
                        remote_service), 'a').close()

        service_name = None
        if len(service_names) == 1:
            service_name = service_names.pop()
        elif len(service_names) > 1:
            log("Remote units requested more than a single service name."
                "Falling back to default host/port.")

        if service_name is not None:
            # If a specfic service has been asked for then return the ip:port
            # for that service, else pass back the default
            requestedservice = get_config_service(service_name)
            my_host = get_hostname(requestedservice['service_host'])
            my_port = requestedservice['service_port']
        else:
            my_host = default_host
            my_port = default_port

        all_services = ""
        services_dict = create_services()
        if services_dict is not None:
            all_services = yaml.safe_dump(sorted(services_dict.itervalues()))

        relation_set(relation_id=rid, port=str(my_port),
                     hostname=my_host,
                     all_services=all_services)
def cluster_with():
    log('Clustering with new node')

    # check the leader and try to cluster with it
    node = leader_node()
    if node:
        if node in running_nodes():
            log('Host already clustered with %s.' % node)

            cluster_rid = relation_id('cluster', local_unit())
            is_clustered = relation_get(attribute='clustered', rid=cluster_rid)

            log('am I clustered?: %s' % bool(is_clustered), level=DEBUG)
            if not is_clustered:
                # NOTE(freyes): this node needs to be marked as clustered, it's
                # part of the cluster according to 'rabbitmqctl cluster_status'
                # (LP: #1691510)
                relation_set(relation_id=cluster_rid,
                             clustered=get_unit_hostname(),
                             timestamp=time.time())

            return False
        # NOTE: The primary problem rabbitmq has clustering is when
        # more than one node attempts to cluster at the same time.
        # The asynchronous nature of hook firing nearly guarantees
        # this. Using random time wait is a hack until we can
        # implement charmhelpers.coordinator.
        status_set('maintenance',
                   'Random wait for join_cluster to avoid collisions')
        time.sleep(random.random() * 100)
        try:
            join_cluster(node)
            # NOTE: toggle the cluster relation to ensure that any peers
            #       already clustered re-assess status correctly
            relation_set(clustered=get_unit_hostname(), timestamp=time.time())
            return True
        except subprocess.CalledProcessError as e:
            status_set('blocked', 'Failed to cluster with %s. Exception: %s'
                       % (node, e))
            start_app()
    else:
        status_set('waiting', 'Leader not available for clustering')
        return False

    return False
Exemple #20
0
def provide_database(mysql):
    log('db requested')

    for request, application in mysql.database_requests().items():
        log('request -> {0} for app -> {1}'.format(request, application))
        database_name = get_state('database')
        user = get_state('user')
        password = get_state('password')

        log('db params: {0}:{1}@{2}'.format(user, password, database_name))
        info = network_get('server', relation_id())
        log('network info {0}'.format(info))

        mysql.provide_database(
            request_id=request,
            host=info['ingress-addresses'][0],
            port=3306,
            database_name=database_name,
            user=user,
            password=password,
        )
Exemple #21
0
def replica_set_relation_joined():
    juju_log("replica_set_relation_joined")
    my_hostname = unit_get('public-address')
    my_port = config('port')
    my_replset = config('replicaset')
    my_install_order = os.environ['JUJU_UNIT_NAME'].split('/')[1]
    juju_log("my_hostname: %s" % my_hostname)
    juju_log("my_port: %s" % my_port)
    juju_log("my_replset: %s" % my_replset)
    juju_log("my_install_order: %s" % my_install_order)
    enable_replset(my_replset)
    restart_mongod()

    relation_set(
        relation_id(), {
            'hostname': my_hostname,
            'port': my_port,
            'replset': my_replset,
            'install-order': my_install_order,
            'type': 'replset',
        })
Exemple #22
0
def is_pod_up(endpoint):
    """Check to see if the pod of a relation is up.

    application-vimdb: 19:29:10 INFO unit.vimdb/0.juju-log network info

    In the example below:
    - 10.1.1.105 is the address of the application pod.
    - 10.152.183.199 is the service cluster ip

    {
        'bind-addresses': [{
            'macaddress': '',
            'interfacename': '',
            'addresses': [{
                'hostname': '',
                'address': '10.1.1.105',
                'cidr': ''
            }]
        }],
        'egress-subnets': [
            '10.152.183.199/32'
        ],
        'ingress-addresses': [
            '10.152.183.199',
            '10.1.1.105'
        ]
    }
    """
    try:
        info = network_get(endpoint, relation_id())

        # Check to see if the pod has been assigned it's internal and
        # external ips
        for ingress in info['ingress-addresses']:
            if len(ingress) == 0:
                return False
    except:
        return False

    return True
def neutron_plugin_api_subordinate_relation_joined(relid=None):
    relation_data = {}
    if is_db_initialised():
        db_migration_key = 'migrate-database-nonce'
        if not relid:
            relid = relation_id()
        leader_key = '{}-{}'.format(db_migration_key, relid)
        for unit in related_units(relid):
            nonce = relation_get(db_migration_key, rid=relid, unit=unit)
            if nonce:
                if is_leader() and leader_get(leader_key) != nonce:
                    migrate_neutron_database(upgrade=True)
                    # track nonce in leader storage to avoid superfluous
                    # migrations
                    leader_set({leader_key: nonce})
                # set nonce back on relation to signal completion to other end
                # we do this regardless of leadership status so that
                # subordinates connected to non-leader units can proceed.
                relation_data[db_migration_key] = nonce

    relation_data['neutron-api-ready'] = 'no'
    if is_api_ready(CONFIGS):
        relation_data['neutron-api-ready'] = 'yes'
    if not manage_plugin():
        neutron_cc_ctxt = NeutronCCContext()()
        plugin_instance = NeutronApiSDNContext()
        neutron_config_data = {
            k: v
            for k, v in neutron_cc_ctxt.items()
            if plugin_instance.is_allowed(k)
        }
        if neutron_config_data:
            relation_data['neutron_config_data'] = json.dumps(
                neutron_config_data)
    relation_set(relation_id=relid, **relation_data)

    # there is no race condition with the neutron service restart
    # as juju propagates the changes done in relation_set only after
    # the hook exists
    CONFIGS.write_all()
Exemple #24
0
def contrail_controller_joined():
    settings = {"private-address": get_ip(), "port": 8082}
    relation_set(relation_settings=settings)
    if is_leader():
        update_southbound_relations(rid=relation_id())
Exemple #25
0
    mkdir -p $TMPDIR

    mkdir -p /var/vcap/nfs/shared

    sed -i "s|ccadmin:[email protected]:5432/ccdb|$DB_SCHEMA_USER:$DB_SCHEMA_PASSWORD@$DB_HOST:$DB_HOST_PORT/$DB_DB|" $CLOUD_CONTROLLER_NG_CONFIG

    juju-log $JUJU_REMOTE_UNIT modified its settings
    juju-log Relation settings:
    relation-get
    juju-log Relation members:
    relation-list
    '''


@hooks.hook('nats-relation-changed')
def nats_relation_changed():
    pass


hook_name = os.path.basename(sys.argv[0])
juju_log_dir = "/var/log/juju"

if __name__ == '__main__':
    # Hook and context overview. The various replication and client
    # hooks interact in complex ways.
    log("Running {} hook".format(hook_name))
    if hookenv.relation_id():
        log("Relation {} with {}".format(
            hookenv.relation_id(), hookenv.remote_unit()))
    hooks.execute(sys.argv)
def contrail_auth_joined():
    update_relations(rid=relation_id())
    update_status()
Exemple #27
0
def analyticsdb_joined():
    settings = {"private-address": get_ip(), 'unit-type': 'controller'}
    relation_set(relation_settings=settings)
    if is_leader():
        update_northbound_relations(rid=relation_id())
Exemple #28
0
def website_interface(hook_name=None):
    if hook_name is None:
        return None
    # Notify website relation but only for the current relation in context.
    notify_website(changed=hook_name == "changed",
                   relation_ids=(relation_id(),))
def contrail_kubernetes_config_joined():
    _notify_contrail_kubernetes_node(rid=relation_id())
def contrail_controller_joined():
    _notify_controller(rid=relation_id())
def tls_certificates_relation_joined():
    _update_tls(rid=relation_id())
def cluster_joined():
    _notify_cluster(rid=relation_id())
Exemple #33
0
def network_relation_changed():
    relation_id = hookenv.relation_id()
    hookenv.relation_set(relation_id, ignore_errors=True)
def amqp_joined():
    _notify_amqp(rid=relation_id())
Exemple #35
0
def proxy_relation_changed():
    hookenv.relation_set(hookenv.relation_id(), {'cluster': cluster_string()})
def analyticsdb_joined():
    settings = {'private-address': get_ip()}
    relation_set(relation_settings=settings)
    if is_leader():
        _update_relation(rid=relation_id())
def analyticsdb_cluster_joined():
    _update_cluster(rid=relation_id())
def contrail_analytics_joined():
    _update_analytics(rid=relation_id())
Exemple #39
0
def cluster_relation_joined():
    hookenv.log('cluster relation joined')
    return hookenv.relation_set(hookenv.relation_id(), return_information('qdb-cluster', get_this_hostname(), get_this_port('qdb_port')))
Exemple #40
0
def contrail_issu_relation_joined():
    update_issu_relations(rid=relation_id())
Exemple #41
0
def database_relation_joined():
    hookenv.log("database relation joined")
    return hookenv.relation_set(hookenv.relation_id(), return_information('qdb', get_this_hostname(), get_this_port('qdb_port')))
def analyticsdb_joined():
    _update_analyticsdb(rid=relation_id())
def http_services_joined():
    _notify_proxy_services(rid=relation_id())
Exemple #44
0
def proxy_relation_changed():
    hookenv.relation_set(hookenv.relation_id(),
                         {'cluster': cluster_string()})
Exemple #45
0
def admin_relation_joined():
    hookenv.log('admin relation joined')
    return hookenv.relation_set(hookenv.relation_id(), return_information('http', get_this_hostname(), get_this_port('admin_port')))