Beispiel #1
0
    def write(self):
        try:
            nagios_uid = pwd.getpwnam('nagios').pw_uid
            nagios_gid = grp.getgrnam('nagios').gr_gid
        except Exception:
            log("Nagios user not set up, nrpe checks not updated")
            return

        if not os.path.exists(NRPE.nagios_logdir):
            os.mkdir(NRPE.nagios_logdir)
            os.chown(NRPE.nagios_logdir, nagios_uid, nagios_gid)

        nrpe_monitors = {}
        monitors = {"monitors": {"remote": {"nrpe": nrpe_monitors}}}
        for nrpecheck in self.checks:
            nrpecheck.write(self.nagios_context, self.hostname,
                            self.nagios_servicegroups)
            nrpe_monitors[nrpecheck.shortname] = {
                "command": nrpecheck.command,
            }

        # update-status hooks are configured to firing every 5 minutes by
        # default. When nagios-nrpe-server is restarted, the nagios server
        # reports checks failing causing unnecessary alerts. Let's not restart
        # on update-status hooks.
        if not hook_name() == 'update-status':
            service('restart', 'nagios-nrpe-server')

        monitor_ids = relation_ids("local-monitors") + \
            relation_ids("nrpe-external-master")
        for rid in monitor_ids:
            relation_set(relation_id=rid, monitors=yaml.dump(monitors))
def contrail_ifmap_joined():
    if is_leader():
        creds = leader_get("ifmap-creds")
        creds = json.loads(creds) if creds else {}

        # prune credentials because we can't remove them directly lp #1469731
        creds = { rid: { unit: units[unit]
                         for unit, units in
                         ((unit, creds[rid]) for unit in related_units(rid))
                         if unit in units }
                  for rid in relation_ids("contrail-ifmap")
                  if rid in creds }

        rid = relation_id()
        if rid not in creds:
            creds[rid] = {}
        cs = creds[rid]
        unit = remote_unit()
        if unit in cs:
            return
        # generate new credentials for unit
        cs[unit] = { "username": unit, "password": pwgen(32) }
        leader_set({"ifmap-creds": json.dumps(creds)})
        write_ifmap_config()
        service_restart("supervisor-config")
        relation_set(creds=json.dumps(cs))
Beispiel #3
0
 def __init__(self, hostname=None, primary=True):
     super(NRPE, self).__init__()
     self.config = config()
     self.primary = primary
     self.nagios_context = self.config['nagios_context']
     if 'nagios_servicegroups' in self.config and self.config['nagios_servicegroups']:
         self.nagios_servicegroups = self.config['nagios_servicegroups']
     else:
         self.nagios_servicegroups = self.nagios_context
     self.unit_name = local_unit().replace('/', '-')
     if hostname:
         self.hostname = hostname
     else:
         nagios_hostname = get_nagios_hostname()
         if nagios_hostname:
             self.hostname = nagios_hostname
         else:
             self.hostname = "{}-{}".format(self.nagios_context, self.unit_name)
     self.checks = []
     # Iff in an nrpe-external-master relation hook, set primary status
     relation = relation_ids('nrpe-external-master')
     if relation:
         log("Setting charm primary status {}".format(primary))
         for rid in relation_ids('nrpe-external-master'):
             relation_set(relation_id=rid, relation_settings={'primary': self.primary})
def radosgw_relation(relid=None, unit=None):
    # Install radosgw for admin tools
    apt_install(packages=filter_installed_packages(['radosgw']))
    if not unit:
        unit = remote_unit()

    # NOTE: radosgw needs some usage OSD storage, so defer key
    #       provision until OSD units are detected.
    if ready():
        log('mon cluster in quorum and osds related '
            '- providing radosgw with keys')
        public_addr = get_public_addr()
        data = {
            'fsid': config('fsid'),
            'radosgw_key': ceph.get_radosgw_key(),
            'auth': 'cephx',
            'ceph-public-address': public_addr,
        }

        settings = relation_get(rid=relid, unit=unit)
        """Process broker request(s)."""
        if 'broker_req' in settings:
            if ceph.is_leader():
                rsp = process_requests(settings['broker_req'])
                unit_id = unit.replace('/', '-')
                unit_response_key = 'broker-rsp-' + unit_id
                data[unit_response_key] = rsp
            else:
                log("Not leader - ignoring broker request", level=DEBUG)

        relation_set(relation_id=relid, relation_settings=data)
    else:
        log('FSID or admin key not provided, please configure them')
def gateway_node_joined(relation_id=None):
    '''
    This hook is run when relation between plumgrid-gateway and
    plumgrid-director is made.
    '''
    rel_data = {'gateway-peer': 'gateway-peer'}
    relation_set(relation_id=relation_id, **rel_data)
def add_contrail_api():
    # check relation dependencies
    if not config_get("contrail-api-configured") \
       and config_get("amqp-ready") \
       and config_get("cassandra-ready") \
       and config_get("identity-admin-ready") \
       and config_get("zookeeper-ready"):
        api_p = api_port()
        port = str(api_p)
        try:
            # wait until api is up
            check_url("http://localhost:" + port)
        except urllib2.URLError:
            log("contrail-api service has failed to start correctly on port {}".format(port),
                "CRITICAL")
            log("This is typically due to a runtime error in related services",
                "CRITICAL")
            raise
        config["contrail-api-configured"] = True

        # inform relations
        for rid in relation_ids("contrail-api"):
            relation_set(relation_id=rid, port=api_p, vip=config.get("vip"))

        configure_floating_ip_pools()
def object_store_joined(relation_id=None):
    relation_data = {
        'swift-url':
        "{}:{}".format(canonical_url(CONFIGS, INTERNAL), config('bind-port'))
    }

    relation_set(relation_id=relation_id, **relation_data)
def cluster_joined(relation_id=None):
    if config('prefer-ipv6'):
        relation_settings = {'hostname': socket.gethostname(),
                             'private-address': get_ipv6_addr()[0]}
        relation_set(relation_id=relation_id,
                     relation_settings=relation_settings)

    if is_relation_made('ha') and \
            config('ha-vip-only') is False:
        log('hacluster relation is present, skipping native '
            'rabbitmq cluster config.')
        return

    configure_nodename()

    try:
        if not is_leader():
            log('Not the leader, deferring cookie propagation to leader')
            return
    except NotImplementedError:
        if is_newer():
            log('cluster_joined: Relation greater.')
            return

    if not os.path.isfile(rabbit.COOKIE_PATH):
        log('erlang cookie missing from %s' % rabbit.COOKIE_PATH,
            level=ERROR)
        return

    if not is_sufficient_peers():
        return

    if is_elected_leader('res_rabbitmq_vip'):
        cookie = open(rabbit.COOKIE_PATH, 'r').read().strip()
        peer_store('cookie', cookie)
def nova_cell_relation_joined(rid=None, remote_restart=True):
    rel_settings = {
        'nova_url': "%s:8774/v2" % canonical_url(CONFIGS, INTERNAL)
    }
    if remote_restart:
        rel_settings['restart_trigger'] = str(uuid.uuid4())
    relation_set(relation_id=rid, **rel_settings)
Beispiel #10
0
    def write(self):
        try:
            nagios_uid = pwd.getpwnam('nagios').pw_uid
            nagios_gid = grp.getgrnam('nagios').gr_gid
        except:
            log("Nagios user not set up, nrpe checks not updated")
            return

        if not os.path.exists(NRPE.nagios_logdir):
            os.mkdir(NRPE.nagios_logdir)
            os.chown(NRPE.nagios_logdir, nagios_uid, nagios_gid)

        nrpe_monitors = {}
        monitors = {"monitors": {"remote": {"nrpe": nrpe_monitors}}}
        for nrpecheck in self.checks:
            nrpecheck.write(self.nagios_context, self.hostname,
                            self.nagios_servicegroups)
            nrpe_monitors[nrpecheck.shortname] = {
                "command": nrpecheck.command,
            }

        service('restart', 'nagios-nrpe-server')

        for rid in relation_ids("local-monitors"):
            relation_set(relation_id=rid, monitors=yaml.dump(monitors))
def update_nova_consoleauth_config():
    """
    Configure nova-consoleauth pacemaker resources
    """
    relids = relation_ids('ha')
    if len(relids) == 0:
        log('Related to {} ha services'.format(len(relids)), level='DEBUG')
        ha_relid = None
        data = {}
    else:
        ha_relid = relids[0]
        data = relation_get(rid=ha_relid) or {}

    # initialize keys in case this is a new dict
    data.setdefault('delete_resources', [])
    for k in ['colocations', 'init_services', 'resources', 'resource_params']:
        data.setdefault(k, {})

    if config('single-nova-consoleauth') and console_attributes('protocol'):
        for item in ['vip_consoleauth', 'res_nova_consoleauth']:
            try:
                data['delete_resources'].remove(item)
            except ValueError:
                pass  # nothing to remove, we are good

        # the new pcmkr resources have to be added to the existing ones
        data['colocations']['vip_consoleauth'] = COLO_CONSOLEAUTH
        data['init_services']['res_nova_consoleauth'] = 'nova-consoleauth'
        data['resources']['res_nova_consoleauth'] = AGENT_CONSOLEAUTH
        data['resource_params']['res_nova_consoleauth'] = AGENT_CA_PARAMS

        for rid in relation_ids('ha'):
            relation_set(rid, **data)

        # nova-consoleauth will be managed by pacemaker, so mark it as manual
        if relation_ids('ha'):
            with open(NOVA_CONSOLEAUTH_OVERRIDE, 'w') as fp:
                fp.write('manual\n')
                fp.flush()

    elif (not config('single-nova-consoleauth') and
          console_attributes('protocol')):
        for item in ['vip_consoleauth', 'res_nova_consoleauth']:
            if item not in data['delete_resources']:
                data['delete_resources'].append(item)

        # remove them from the rel, so they aren't recreated when the hook
        # is recreated
        data['colocations'].pop('vip_consoleauth', None)
        data['init_services'].pop('res_nova_consoleauth', None)
        data['resources'].pop('res_nova_consoleauth', None)
        data['resource_params'].pop('res_nova_consoleauth', None)

        for rid in relation_ids('ha'):
            relation_set(rid, **data)

        try:
            os.remove(NOVA_CONSOLEAUTH_OVERRIDE)
        except FileNotFoundError as e:
            log(str(e), level='DEBUG')
Beispiel #12
0
def db_joined():
    if is_relation_made('pgsql-nova-db') or \
            is_relation_made('pgsql-neutron-db'):
        # error, postgresql is used
        e = ('Attempting to associate a mysql database when there is already '
             'associated a postgresql one')
        log(e, level=ERROR)
        raise Exception(e)

    if network_manager() in ['quantum', 'neutron']:
        config_neutron = True
    else:
        config_neutron = False

    if config('prefer-ipv6'):
        sync_db_with_multi_ipv6_addresses(config('database'),
                                          config('database-user'),
                                          relation_prefix='nova')

        if config_neutron:
            sync_db_with_multi_ipv6_addresses(config('neutron-database'),
                                              config('neutron-database-user'),
                                              relation_prefix='neutron')
    else:
        host = unit_get('private-address')
        relation_set(nova_database=config('database'),
                     nova_username=config('database-user'),
                     nova_hostname=host)

        if config_neutron:
            # XXX: Renaming relations from quantum_* to neutron_* here.
            relation_set(neutron_database=config('neutron-database'),
                         neutron_username=config('neutron-database-user'),
                         neutron_hostname=host)
def identity_joined(rid=None):
    public_url = canonical_url(CONFIGS, PUBLIC)
    internal_url = canonical_url(CONFIGS, INTERNAL)
    admin_url = canonical_url(CONFIGS, ADMIN)
    relation_set(relation_id=rid, **determine_endpoints(public_url,
                                                        internal_url,
                                                        admin_url))
def set_ready_on_peers():
    """ Set ready on peers

    Notify peers this unit is clustered and ready to serve clients
    """
    for relid in relation_ids('cluster'):
        relation_set(relation_id=relid, ready=True)
Beispiel #15
0
def apache_input(apache):
    template = """
[[inputs.apache]]
  urls = {{ urls }}
"""
    config_path = '{}/{}.conf'.format(get_configs_dir(), 'apache')
    port = '8080'
    vhost = render(source='apache-server-status.tmpl',
                   templates_dir=get_templates_dir(),
                   target=None,
                   context={'port': port})
    relation_info = {"ports": port,
                     "domain": "apache-status",
                     "enabled": True,
                     "site_config": vhost,
                     "site_modules": "status"}
    urls = []
    rels = hookenv.relations_of_type('apache')
    for rel in rels:
        hookenv.relation_set(rel['__relid__'], relation_settings=relation_info)
        addr = rel['private-address']
        url = 'http://{}:{}/server-status?auto'.format(addr, port)
        urls.append(url)
    if urls:
        context = {"urls": json.dumps(urls)}
        input_config = render_template(template, context) + \
            render_extra_options("inputs", "apache")
        hookenv.log("Updating {} plugin config file".format('apache'))
        host.write_file(config_path, input_config.encode('utf-8'))
        set_state('plugins.apache.configured')
    elif os.path.exists(config_path):
        os.unlink(config_path)
Beispiel #16
0
    def set_remote(self, key=None, value=None, data=None, **kwdata):
        """
        Set data for the remote end(s) of this conversation.

        Data can be passed in either as a single dict, or as key-word args.

        Note that, in Juju, setting relation data is inherently service scoped.
        That is, if the conversation only includes a single unit, the data will
        still be set for that unit's entire service.

        However, if this conversation's scope encompasses multiple services,
        the data will be set for all of those services.

        :param str key: The name of a field to set.
        :param value: A value to set.
        :param dict data: A mapping of keys to values.
        :param \*\*kwdata: A mapping of keys to values, as keyword arguments.
        """
        if data is None:
            data = {}
        if key is not None:
            data[key] = value
        data.update(kwdata)
        if not data:
            return
        for relation_id in self.relation_ids:
            hookenv.relation_set(relation_id, data)
def identity_joined(rid=None):
    juju_log('**********identity-service-relation-joined')
    if not service_enabled('api'):
        juju_log('api service not enabled; skipping endpoint registration')
        return

    public_url = '{}:{}/v1/$(tenant_id)s'.format(
        canonical_url(CONFIGS, PUBLIC),
        config('api-listening-port')
    )
    internal_url = '{}:{}/v1/$(tenant_id)s'.format(
        canonical_url(CONFIGS, INTERNAL),
        config('api-listening-port')
    )
    admin_url = '{}:{}/v1/$(tenant_id)s'.format(
        canonical_url(CONFIGS, ADMIN),
        config('api-listening-port')
    )
    settings = {
        'region': None,
        'service': None,
        'public_url': None,
        'internal_url': None,
        'admin_url': None,
        'vsm_region': config('region'),
        'vsm_service': 'vsm',
        'vsm_public_url': public_url,
        'vsm_internal_url': internal_url,
        'vsm_admin_url': admin_url,
    }
    juju_log("**********settings is %s" % str(settings))
    juju_log("**********relation_id is %s" % str(rid))
    relation_set(relation_id=rid, **settings)
Beispiel #18
0
def create_ogr_zone(args):
    aggr_name = action_get('aggregate-name')
    avail_zone = action_get('avail-zone')
    ogr_compute = action_get('ogr-compute')

    cmd = "su - ubuntu -c 'source nova.rc && nova aggregate-create {} {}'"\
          .format(aggr_name, avail_zone)
    commands.getoutput(cmd)
    cmd = "su - ubuntu -c 'source nova.rc && nova aggregate-add-host {} {}'"\
          .format(aggr_name, ogr_compute)
    commands.getoutput(cmd)
    if config("openstack-version") == "liberty" or \
       config("openstack-version") == "mitaka":
        cmd = "su - ubuntu -c 'source nova.rc && nova aggregate-details {}'"\
              .format(aggr_name)
    else:
        cmd = "su - ubuntu -c 'source nova.rc && nova aggregate-show {}'"\
              .format(aggr_name)
    res = commands.getoutput(cmd)
    action_set({'result-map.message': res})
    relation_info = {
        'aggr-name': aggr_name
    }
    if config("openstack-version") == "pike" or \
       config("openstack-version") == "ocata":
        for rid in relation_ids('neutron-api-cplane'):
            for unit in related_units(rid):
                relation_set(relation_id=rid, relation_settings=relation_info)
Beispiel #19
0
def lxd_relation_joined(rid=None):
    settings = {}
    settings['password'] = lxd_trust_password()
    settings['hostname'] = gethostname()
    settings['address'] = unit_get('private-address')
    relation_set(relation_id=rid,
                 relation_settings=settings)
def identity_joined(rid=None, relation_trigger=False):
    if config('vip') and not is_clustered():
        log('Defering registration until clustered', level=DEBUG)
        return

    public_url = '{}:{}'.format(canonical_url(CONFIGS, PUBLIC),
                                api_port('neutron-server'))
    admin_url = '{}:{}'.format(canonical_url(CONFIGS, ADMIN),
                               api_port('neutron-server'))
    internal_url = '{}:{}'.format(canonical_url(CONFIGS, INTERNAL),
                                  api_port('neutron-server')
                                  )
    rel_settings = {
        'neutron_service': 'neutron',
        'neutron_region': config('region'),
        'neutron_public_url': public_url,
        'neutron_admin_url': admin_url,
        'neutron_internal_url': internal_url,
        'quantum_service': None,
        'quantum_region': None,
        'quantum_public_url': None,
        'quantum_admin_url': None,
        'quantum_internal_url': None,
    }
    if relation_trigger:
        rel_settings['relation_trigger'] = str(uuid.uuid4())
    relation_set(relation_id=rid, relation_settings=rel_settings)
def db_changed(relation_id=None, unit=None, admin=None):
    if not is_elected_leader(DC_RESOURCE_NAME):
        log('Service is peered, clearing db relation'
            ' as this service unit is not the leader')
        relation_clear(relation_id)
        return

    if is_clustered():
        db_host = config('vip')
    else:
        if config('prefer-ipv6'):
            db_host = get_ipv6_addr(exc_list=[config('vip')])[0]
        else:
            db_host = unit_get('private-address')

    if admin not in [True, False]:
        admin = relation_type() == 'db-admin'
    db_name, _ = remote_unit().split("/")
    username = db_name
    db_helper = get_db_helper()
    addr = relation_get('private-address', unit=unit, rid=relation_id)
    password = db_helper.configure_db(addr, db_name, username, admin=admin)

    relation_set(relation_id=relation_id,
                 relation_settings={
                     'user': username,
                     'password': password,
                     'host': db_host,
                     'database': db_name,
                 })
Beispiel #22
0
def cluster_relation_changed():
    cluster_data = {}
    # Useful when doing runtime based configuration. (units added after cluster
    # bootstrap) see docs:
    # https://github.com/coreos/etcd/blob/master/Documentation/runtime-configuration.md
    if leader_status:
        token = cluster_token()
        print 'Initializing cluster with {}'.format(token)
        hookenv.relation_set(hookenv.relation_id(),
                             {'leader-address': private_address,
                              'cluster-state': 'existing',
                              'cluster-token': token,
                              'cluster': cluster_string()})
        cluster_data['cluster'] = cluster_string()

    if not leader_status:
        # A token is only generated once on a cluster.
        token = hookenv.relation_get('cluster-token')
        cluster_data['cluster'] = hookenv.relation_get('cluster')

    if not token:
        print "No token available on relationship - exiting"
        return
    cluster_data['token'] = token
    main(cluster_data)
def neutron_api_relation_joined(rid=None):
    base_url = canonical_url(CONFIGS, INTERNAL)
    neutron_url = '%s:%s' % (base_url, api_port('neutron-server'))
    relation_data = {
        'enable-sriov': config('enable-sriov'),
        'neutron-url': neutron_url,
        'neutron-plugin': config('neutron-plugin'),
    }
    if config('neutron-security-groups'):
        relation_data['neutron-security-groups'] = "yes"
    else:
        relation_data['neutron-security-groups'] = "no"

    if is_api_ready(CONFIGS):
        relation_data['neutron-api-ready'] = "yes"
    else:
        relation_data['neutron-api-ready'] = "no"

    # LP Bug#1805645
    dns_domain = get_dns_domain()
    if dns_domain:
        relation_data['dns-domain'] = dns_domain

    relation_set(relation_id=rid, **relation_data)
    # Nova-cc may have grabbed the neutron endpoint so kick identity-service
    # relation to register that its here
    for r_id in relation_ids('identity-service'):
        identity_joined(rid=r_id, relation_trigger=True)
Beispiel #24
0
def db_joined():
    relation_set(quantum_username=DB_USER,
                 quantum_database=QUANTUM_DB,
                 quantum_hostname=unit_get('private-address'),
                 nova_username=NOVA_DB_USER,
                 nova_database=NOVA_DB,
                 nova_hostname=unit_get('private-address'))
Beispiel #25
0
def joined():
    log('#gluu-server relation joined called...')
    rel_data  = {
                    'unit' : 'gluu-server',
                    'host' : unit_public_ip(),
                }
    hookenv.relation_set(rel_data)
def oracle_relation_changed(relation_id=None):
    if config('slave-units-number'):
        if check_all_clustered_nodes('final'):
            relation_info = {
                'oracle-host': '{}-scan'.format(config('scan-name')),
                'db-service': '{}'.format(config('db-service')),
                'scan-string': pickle.dumps(get_scan_str()),
                'db-password': '******'.format(config('db-password')),
                'db-path': '+DATA'
            }
            juju_log('Sending relation info to Cplane Controller')
            relation_set(relation_id=relation_id,
                         relation_settings=relation_info)
    else:
        hostname = socket.gethostname()
        relation_info = {
            'oracle-host': hostname,
            'db-service': '{}'.format(config('db-service')),
            'db-password': '******'.format(config('db-password')),
            'db-path': '/u01/app/oracle/oradata/CPLANE/'
        }
        for num in range(0, 5):
            if get_db_status() is False:
                juju_log("Service is not registered with listener... \
                          Retry checking it after 60 sec")
                time.sleep(60)
            else:
                juju_log("Service is regitered with listener")
                juju_log('Sending relation info to Cplane Controller')
                relation_set(relation_id=relation_id,
                             relation_settings=relation_info)
                break
Beispiel #27
0
def radosgw_relation(relid=None, unit=None):
    # Install radosgw for admin tools
    apt_install(packages=filter_installed_packages(['radosgw']))
    if not unit:
        unit = remote_unit()
    """Process broker request(s)."""
    if ceph.is_quorum():
        settings = relation_get(rid=relid, unit=unit)
        if 'broker_req' in settings:
            if not ceph.is_leader():
                log("Not leader - ignoring broker request", level=DEBUG)
            else:
                rsp = process_requests(settings['broker_req'])
                unit_id = unit.replace('/', '-')
                unit_response_key = 'broker-rsp-' + unit_id
                log('mon cluster in quorum - providing radosgw with keys')
                public_addr = get_public_addr()
                data = {
                    'fsid': leader_get('fsid'),
                    'radosgw_key': ceph.get_radosgw_key(),
                    'auth': config('auth-supported'),
                    'ceph-public-address': public_addr,
                    unit_response_key: rsp,
                }
                relation_set(relation_id=relid, relation_settings=data)
    else:
        log('mon cluster not in quorum - deferring key provision')
def ssh_authorized_peers(peer_interface, user, group=None,
                         ensure_local_user=False):
    """
    Main setup function, should be called from both peer -changed and -joined
    hooks with the same parameters.
    """
    if ensure_local_user:
        ensure_user(user, group)
    priv_key, pub_key = get_keypair(user)
    hook = hook_name()
    if hook == '%s-relation-joined' % peer_interface:
        relation_set(ssh_pub_key=pub_key)
    elif hook == '%s-relation-changed' % peer_interface:
        hosts = []
        keys = []

        for r_id in relation_ids(peer_interface):
            for unit in related_units(r_id):
                ssh_pub_key = relation_get('ssh_pub_key',
                                           rid=r_id,
                                           unit=unit)
                priv_addr = relation_get('private-address',
                                         rid=r_id,
                                         unit=unit)
                if ssh_pub_key:
                    keys.append(ssh_pub_key)
                    hosts.append(priv_addr)
                else:
                    log('ssh_authorized_peers(): ssh_pub_key '
                        'missing for unit %s, skipping.' % unit)
        write_authorized_keys(user, keys)
        write_known_hosts(user, hosts)
        authed_hosts = ':'.join(hosts)
        relation_set(ssh_authorized_hosts=authed_hosts)
def ha_joined():
    config = get_hacluster_config()
    resources = {
        'res_cinder_vip': 'ocf:heartbeat:IPaddr2',
        'res_cinder_haproxy': 'lsb:haproxy'
    }

    vip_params = 'params ip="%s" cidr_netmask="%s" nic="%s"' % \
                 (config['vip'], config['vip_cidr'], config['vip_iface'])
    resource_params = {
        'res_cinder_vip': vip_params,
        'res_cinder_haproxy': 'op monitor interval="5s"'
    }
    init_services = {
        'res_cinder_haproxy': 'haproxy'
    }
    clones = {
        'cl_cinder_haproxy': 'res_cinder_haproxy'
    }
    relation_set(init_services=init_services,
                 corosync_bindiface=config['ha-bindiface'],
                 corosync_mcastport=config['ha-mcastport'],
                 resources=resources,
                 resource_params=resource_params,
                 clones=clones)
Beispiel #30
0
 def save(self):
     super(JSONDBwHooks, self).save()
     # post save we need to iterate the data and update any hooks
     # if the relation already exists we should subsequently invoke the hook
     charm_dir = SAVED_ENV.get("CHARM_DIR", "")
     hookdir = path(charm_dir) / "hooks"
     # XXX: we don't prune old hook files
     for e in self:
         hookname = "{}-relation-changed".format(e['name'])
         hook = hookdir / hookname
         reldata = ' '.join(['{}="{}"'.format(k, v) for k, v in \
                             e['data'].items()])
         hook.write_text(self.HOOK.format(reldata))
         hook.chmod("a+rx")
     # XXX: we call this sync for now, this should be very low
     # contention in the prototype
     # XXX: this fails silently and allows for failure in the case
     # of already added endpoints
         if charm_dir:
             subprocess.call(['endpoint-add',
                              e['name'],
                              e['interface']],
                             env=SAVED_ENV)
             rid = hookenv.relation_ids(e['name'])
             if rid:
                 hookenv.relation_set(rid, e['data'])
Beispiel #31
0
def hanode_relation_joined(relid=None):
    relation_set(
        relation_id=relid,
        relation_settings={'private-address': get_relation_ip('hanode')})
def tls_certificates_relation_joined():
    settings = common_utils.get_tls_settings(common_utils.get_ip())
    relation_set(relation_settings=settings)
def contrail_kubernetes_config_joined(rel_id=None):
    data = {}
    data["pod_subnets"] = config.get("pod_subnets")
    data["nested_mode"] = config.get("nested_mode")
    data["nested_mode_config"] = config.get("nested_mode_config")
    relation_set(relation_id=rel_id, relation_settings=data)
def contrail_analytics_joined():
    settings = {"private-address": get_ip()}
    relation_set(relation_settings=settings)
def analytics_cluster_joined():
    settings = {"private-address": get_ip()}
    relation_set(relation_settings=settings)

    update_charm_status()
def contrail_analyticsdb_joined():
    settings = {"private-address": get_ip(), 'unit-type': 'analytics'}
    relation_set(relation_settings=settings)
Beispiel #37
0
def ha_relation_changed():
    # Check that we are related to a principle and that
    # it has already provided the required corosync configuration
    if not get_corosync_conf():
        log('Unable to configure corosync right now, deferring configuration',
            level=INFO)
        return

    if relation_ids('hanode'):
        log('Ready to form cluster - informing peers', level=DEBUG)
        relation_set(relation_id=relation_ids('hanode')[0], ready=True)
    else:
        log('Ready to form cluster, but not related to peers just yet',
            level=INFO)
        return

    # Check that there's enough nodes in order to perform the
    # configuration of the HA cluster
    if len(get_cluster_nodes()) < int(config('cluster_count')):
        log('Not enough nodes in cluster, deferring configuration', level=INFO)
        return

    relids = relation_ids('ha') or relation_ids('juju-info')
    if len(relids) == 1:  # Should only ever be one of these
        # Obtain relation information
        relid = relids[0]
        units = related_units(relid)
        if len(units) < 1:
            log('No principle unit found, deferring configuration', level=INFO)
            return

        unit = units[0]
        log('Parsing cluster configuration using rid: %s, unit: %s' %
            (relid, unit),
            level=DEBUG)
        resources = parse_data(relid, unit, 'resources')
        delete_resources = parse_data(relid, unit, 'delete_resources')
        resource_params = parse_data(relid, unit, 'resource_params')
        groups = parse_data(relid, unit, 'groups')
        ms = parse_data(relid, unit, 'ms')
        orders = parse_data(relid, unit, 'orders')
        colocations = parse_data(relid, unit, 'colocations')
        clones = parse_data(relid, unit, 'clones')
        locations = parse_data(relid, unit, 'locations')
        init_services = parse_data(relid, unit, 'init_services')
    else:
        log('Related to %s ha services' % (len(relids)), level=DEBUG)
        return

    if True in [ra.startswith('ocf:openstack') for ra in resources.values()]:
        apt_install('openstack-resource-agents')
    if True in [ra.startswith('ocf:ceph') for ra in resources.values()]:
        apt_install('ceph-resource-agents')

    if True in [ra.startswith('ocf:maas') for ra in resources.values()]:
        try:
            validate_dns_ha()
        except MAASConfigIncomplete as ex:
            log(ex.args[0], level=ERROR)
            status_set('blocked', ex.args[0])
            # if an exception is raised the hook will end up in error state
            # which will obfuscate the workload status and message.
            return

        log('Setting up access to MAAS API', level=INFO)
        setup_maas_api()
        # Update resource_parms for DNS resources to include MAAS URL and
        # credentials
        for resource in resource_params.keys():
            if resource.endswith("_hostname"):
                res_ipaddr = get_ip_addr_from_resource_params(
                    resource_params[resource])
                resource_params[resource] += (
                    ' maas_url="{}" maas_credentials="{}"'
                    ''.format(config('maas_url'), config('maas_credentials')))
                write_maas_dns_address(resource, res_ipaddr)

    # NOTE: this should be removed in 15.04 cycle as corosync
    # configuration should be set directly on subordinate
    configure_corosync()
    try_pcmk_wait()
    failure_timeout = config('failure_timeout')
    configure_cluster_global(failure_timeout)
    configure_monitor_host()
    configure_stonith()

    # Only configure the cluster resources
    # from the oldest peer unit.
    if is_leader():
        log('Setting cluster symmetry', level=INFO)
        set_cluster_symmetry()
        log('Deleting Resources' % (delete_resources), level=DEBUG)
        for res_name in delete_resources:
            if pcmk.crm_opt_exists(res_name):
                if ocf_file_exists(res_name, resources):
                    log('Stopping and deleting resource %s' % res_name,
                        level=DEBUG)
                    if pcmk.crm_res_running(res_name):
                        pcmk.commit('crm -w -F resource stop %s' % res_name)
                else:
                    log('Cleanuping and deleting resource %s' % res_name,
                        level=DEBUG)
                    pcmk.commit('crm resource cleanup %s' % res_name)
                # Daemon process may still be running after the upgrade.
                kill_legacy_ocf_daemon_process(res_name)

                # Stop the resource before the deletion (LP: #1838528)
                log('Stopping %s' % res_name, level=INFO)
                pcmk.commit('crm -w -F resource stop %s' % res_name)
                log('Deleting %s' % res_name, level=INFO)
                pcmk.commit('crm -w -F configure delete %s' % res_name)

        log('Configuring Resources: %s' % (resources), level=DEBUG)
        for res_name, res_type in resources.items():
            # disable the service we are going to put in HA
            if res_type.split(':')[0] == "lsb":
                disable_lsb_services(res_type.split(':')[1])
                if service_running(res_type.split(':')[1]):
                    service_stop(res_type.split(':')[1])
            elif (len(init_services) != 0 and res_name in init_services
                  and init_services[res_name]):
                disable_upstart_services(init_services[res_name])
                if service_running(init_services[res_name]):
                    service_stop(init_services[res_name])
            # Put the services in HA, if not already done so
            # if not pcmk.is_resource_present(res_name):
            if not pcmk.crm_opt_exists(res_name):
                if res_name not in resource_params:
                    cmd = 'crm -w -F configure primitive %s %s' % (res_name,
                                                                   res_type)
                else:
                    cmd = ('crm -w -F configure primitive %s %s %s' %
                           (res_name, res_type, resource_params[res_name]))

                pcmk.commit(cmd)
                log('%s' % cmd, level=DEBUG)
                if config('monitor_host'):
                    cmd = ('crm -F configure location Ping-%s %s rule '
                           '-inf: pingd lte 0' % (res_name, res_name))
                    pcmk.commit(cmd)

            else:
                # the resource already exists so it will be updated.
                code = pcmk.crm_update_resource(res_name, res_type,
                                                resource_params.get(res_name))
                if code != 0:
                    msg = "Cannot update pcmkr resource: {}".format(res_name)
                    status_set('blocked', msg)
                    raise Exception(msg)

        log('Configuring Groups: %s' % (groups), level=DEBUG)
        for grp_name, grp_params in groups.items():
            if not pcmk.crm_opt_exists(grp_name):
                cmd = ('crm -w -F configure group %s %s' %
                       (grp_name, grp_params))
                pcmk.commit(cmd)
                log('%s' % cmd, level=DEBUG)

        log('Configuring Master/Slave (ms): %s' % (ms), level=DEBUG)
        for ms_name, ms_params in ms.items():
            if not pcmk.crm_opt_exists(ms_name):
                cmd = 'crm -w -F configure ms %s %s' % (ms_name, ms_params)
                pcmk.commit(cmd)
                log('%s' % cmd, level=DEBUG)

        log('Configuring Orders: %s' % (orders), level=DEBUG)
        for ord_name, ord_params in orders.items():
            if not pcmk.crm_opt_exists(ord_name):
                cmd = 'crm -w -F configure order %s %s' % (ord_name,
                                                           ord_params)
                pcmk.commit(cmd)
                log('%s' % cmd, level=DEBUG)

        log('Configuring Clones: %s' % clones, level=DEBUG)
        for cln_name, cln_params in clones.items():
            if not pcmk.crm_opt_exists(cln_name):
                cmd = 'crm -w -F configure clone %s %s' % (cln_name,
                                                           cln_params)
                pcmk.commit(cmd)
                log('%s' % cmd, level=DEBUG)

        # Ordering is important here, colocation and location constraints
        # reference resources. All resources referenced by the constraints
        # need to exist otherwise constraint creation will fail.

        log('Configuring Colocations: %s' % colocations, level=DEBUG)
        for col_name, col_params in colocations.items():
            if not pcmk.crm_opt_exists(col_name):
                cmd = 'crm -w -F configure colocation %s %s' % (col_name,
                                                                col_params)
                pcmk.commit(cmd)
                log('%s' % cmd, level=DEBUG)

        log('Configuring Locations: %s' % locations, level=DEBUG)
        for loc_name, loc_params in locations.items():
            if not pcmk.crm_opt_exists(loc_name):
                cmd = 'crm -w -F configure location %s %s' % (loc_name,
                                                              loc_params)
                pcmk.commit(cmd)
                log('%s' % cmd, level=DEBUG)

        for res_name, res_type in resources.items():
            if len(init_services) != 0 and res_name in init_services:
                # Checks that the resources are running and started.
                # Ensure that clones are excluded as the resource is
                # not directly controllable (dealt with below)
                # Ensure that groups are cleaned up as a whole rather
                # than as individual resources.
                if (res_name not in clones.values()
                        and res_name not in groups.values()
                        and not pcmk.crm_res_running(res_name)):
                    # Just in case, cleanup the resources to ensure they get
                    # started in case they failed for some unrelated reason.
                    cmd = 'crm resource cleanup %s' % res_name
                    pcmk.commit(cmd)

        for cl_name in clones:
            # Always cleanup clones
            cmd = 'crm resource cleanup %s' % cl_name
            pcmk.commit(cmd)

        for grp_name in groups:
            # Always cleanup groups
            cmd = 'crm resource cleanup %s' % grp_name
            pcmk.commit(cmd)

        # All members of the cluster need to be registered before resources
        # that reference them can be created.
        if len(get_member_ready_nodes()) >= int(config('cluster_count')):
            log('Configuring any remote nodes', level=INFO)
            remote_resources = configure_pacemaker_remote_resources()
            stonith_resource = configure_pacemaker_remote_stonith_resource()
            resources.update(remote_resources)
            resources.update(stonith_resource)
            configure_resources_on_remotes(resources=resources,
                                           clones=clones,
                                           groups=groups)
        else:
            log('Deferring configuration of any remote nodes', level=INFO)

    for rel_id in relation_ids('ha'):
        relation_set(relation_id=rel_id, clustered="yes")

    # Inform peers that local configuration is complete and this member
    # is ready
    for rel_id in relation_ids('hanode'):
        relation_set(relation_id=rel_id, member_ready=True)
def nova_vgpu_joined(relid=None, remote_restart=False):
    if remote_restart:
        rel_settings = {'restart-trigger': str(uuid.uuid4())}
        relation_set(relation_id=relid, relation_settings=rel_settings)
def amqp_joined(relation_id=None):
    relation_set(relation_id=relation_id,
                 username=config('rabbit-user'),
                 vhost=config('rabbit-vhost'))
def identity_joined(rid=None):
    if config('vip') and not is_clustered():
        log('Defering registration until clustered', level=DEBUG)
        return

    settings = {}

    if not service_enabled('api'):
        juju_log('api service not enabled; skipping endpoint ' 'registration')
        return

    cinder_release = os_release('cinder-common')
    if CompareOpenStackReleases(cinder_release) < 'pike':
        public_url = '{}:{}/v1/$(tenant_id)s'.format(
            canonical_url(CONFIGS, PUBLIC), config('api-listening-port'))
        internal_url = '{}:{}/v1/$(tenant_id)s'.format(
            canonical_url(CONFIGS, INTERNAL), config('api-listening-port'))
        admin_url = '{}:{}/v1/$(tenant_id)s'.format(
            canonical_url(CONFIGS, ADMIN), config('api-listening-port'))
        settings.update({
            'region': None,
            'service': None,
            'public_url': None,
            'internal_url': None,
            'admin_url': None,
            'cinder_region': config('region'),
            'cinder_service': 'cinder',
            'cinder_public_url': public_url,
            'cinder_internal_url': internal_url,
            'cinder_admin_url': admin_url,
        })
    if CompareOpenStackReleases(cinder_release) >= 'icehouse':
        # NOTE(jamespage) register v2 endpoint as well
        public_url = '{}:{}/v2/$(tenant_id)s'.format(
            canonical_url(CONFIGS, PUBLIC), config('api-listening-port'))
        internal_url = '{}:{}/v2/$(tenant_id)s'.format(
            canonical_url(CONFIGS, INTERNAL), config('api-listening-port'))
        admin_url = '{}:{}/v2/$(tenant_id)s'.format(
            canonical_url(CONFIGS, ADMIN), config('api-listening-port'))
        settings.update({
            'cinderv2_region': config('region'),
            'cinderv2_service': 'cinderv2',
            'cinderv2_public_url': public_url,
            'cinderv2_internal_url': internal_url,
            'cinderv2_admin_url': admin_url,
        })
    if CompareOpenStackReleases(cinder_release) >= 'pike':
        # NOTE(jamespage) register v3 endpoint as well
        public_url = '{}:{}/v3/$(tenant_id)s'.format(
            canonical_url(CONFIGS, PUBLIC), config('api-listening-port'))
        internal_url = '{}:{}/v3/$(tenant_id)s'.format(
            canonical_url(CONFIGS, INTERNAL), config('api-listening-port'))
        admin_url = '{}:{}/v3/$(tenant_id)s'.format(
            canonical_url(CONFIGS, ADMIN), config('api-listening-port'))
        settings.update({
            'cinderv3_region': config('region'),
            'cinderv3_service': 'cinderv3',
            'cinderv3_public_url': public_url,
            'cinderv3_internal_url': internal_url,
            'cinderv3_admin_url': admin_url,
        })
    relation_set(relation_id=rid, **settings)
def amqp_joined(relation_id=None):
    conf = config()
    relation_set(relation_id=relation_id,
                 username=conf['rabbit-user'],
                 vhost=conf['rabbit-vhost'])
Beispiel #42
0
def send_auth_key():
    key = get_pcmkr_key()
    if key:
        for rel_id in relation_ids('pacemaker-remote'):
            relation_set(relation_id=rel_id, **{'pacemaker-key': key})
Beispiel #43
0
def contrail_controller_joined():
    settings = {'dpdk': config["dpdk"], 'unit-type': 'agent'}
    relation_set(relation_settings=settings)
def cluster_leader_actions():
    """Cluster relation hook actions to be performed by leader units.

    NOTE: must be called by leader from cluster relation hook.
    """
    log("Cluster changed by unit=%s (local is leader)" % (remote_unit()),
        level=DEBUG)

    rx_settings = relation_get() or {}
    tx_settings = relation_get(unit=local_unit()) or {}

    rx_rq_token = rx_settings.get(SwiftProxyClusterRPC.KEY_STOP_PROXY_SVC)
    rx_ack_token = rx_settings.get(SwiftProxyClusterRPC.KEY_STOP_PROXY_SVC_ACK)

    tx_rq_token = tx_settings.get(SwiftProxyClusterRPC.KEY_STOP_PROXY_SVC)
    tx_ack_token = tx_settings.get(SwiftProxyClusterRPC.KEY_STOP_PROXY_SVC_ACK)

    rx_leader_changed = \
        rx_settings.get(SwiftProxyClusterRPC.KEY_NOTIFY_LEADER_CHANGED)
    if rx_leader_changed:
        log(
            "Leader change notification received and this is leader so "
            "retrying sync.",
            level=INFO)
        # FIXME: check that we were previously part of a successful sync to
        #        ensure we have good rings.
        cluster_sync_rings(peers_only=tx_settings.get('peers-only', False),
                           token=rx_leader_changed)
        return

    rx_resync_request = \
        rx_settings.get(SwiftProxyClusterRPC.KEY_REQUEST_RESYNC)
    resync_request_ack_key = SwiftProxyClusterRPC.KEY_REQUEST_RESYNC_ACK
    tx_resync_request_ack = tx_settings.get(resync_request_ack_key)
    if rx_resync_request and tx_resync_request_ack != rx_resync_request:
        log("Unit '%s' has requested a resync" % (remote_unit()), level=INFO)
        cluster_sync_rings(peers_only=True)
        relation_set(**{resync_request_ack_key: rx_resync_request})
        return

    # If we have received an ack token ensure it is not associated with a
    # request we received from another peer. If it is, this would indicate
    # a leadership change during a sync and this unit will abort the sync or
    # attempt to restore the original leader so to be able to complete the
    # sync.

    if rx_ack_token and rx_ack_token == tx_rq_token:
        # Find out if all peer units have been stopped.
        responses = []
        for rid in relation_ids('cluster'):
            for unit in related_units(rid):
                responses.append(relation_get(rid=rid, unit=unit))

        # Ensure all peers stopped before starting sync
        if is_all_peers_stopped(responses):
            key = 'peers-only'
            if not all_responses_equal(responses, key, must_exist=False):
                msg = ("Did not get equal response from every peer unit for "
                       "'%s'" % (key))
                raise SwiftProxyCharmException(msg)

            peers_only = bool(
                get_first_available_value(responses, key, default=0))
            log("Syncing rings and builders (peers-only=%s)" % (peers_only),
                level=DEBUG)
            broadcast_rings_available(broker_token=rx_ack_token,
                                      storage=not peers_only)
        else:
            key = SwiftProxyClusterRPC.KEY_STOP_PROXY_SVC_ACK
            acks = ', '.join([rsp[key] for rsp in responses if key in rsp])
            log("Not all peer apis stopped - skipping sync until all peers "
                "ready (current='%s', token='%s')" % (acks, tx_ack_token),
                level=INFO)
    elif ((rx_ack_token and (rx_ack_token == tx_ack_token))
          or (rx_rq_token and (rx_rq_token == rx_ack_token))):
        log(
            "It appears that the cluster leader has changed mid-sync - "
            "stopping proxy service",
            level=WARNING)
        service_stop('swift-proxy')
        broker = rx_settings.get('builder-broker')
        if broker:
            # If we get here, manual intervention will be required in order
            # to restore the cluster.
            msg = ("Failed to restore previous broker '%s' as leader" %
                   (broker))
            raise SwiftProxyCharmException(msg)
        else:
            msg = ("No builder-broker on rx_settings relation from '%s' - "
                   "unable to attempt leader restore" % (remote_unit()))
            raise SwiftProxyCharmException(msg)
    else:
        log("Not taking any sync actions", level=DEBUG)

    CONFIGS.write_all()
def shared_db_changed(relation_id=None, unit=None):
    if not seeded():
        log("Percona cluster not yet bootstrapped - deferring shared-db rel "
            "until bootstrapped", DEBUG)
        return

    if not is_elected_leader(DC_RESOURCE_NAME):
        # NOTE(jamespage): relation level data candidate
        log('Service is peered, clearing shared-db relation '
            'as this service unit is not the leader')
        relation_clear(relation_id)
        # Each unit needs to set the db information otherwise if the unit
        # with the info dies the settings die with it Bug# 1355848
        if is_relation_made('cluster'):
            for rel_id in relation_ids('shared-db'):
                peerdb_settings = \
                    peer_retrieve_by_prefix(rel_id, exc_list=['hostname'])

                passwords = [key for key in peerdb_settings.keys()
                             if 'password' in key.lower()]
                if len(passwords) > 0:
                    relation_set(relation_id=rel_id, **peerdb_settings)
        return

    settings = relation_get(unit=unit, rid=relation_id)
    access_network = config('access-network')
    db_helper = get_db_helper()

    peer_store_and_set(relation_id=relation_id,
                       relation_settings={'access-network': access_network})

    singleset = set(['database', 'username', 'hostname'])
    if singleset.issubset(settings):
        # Process a single database configuration
        hostname = settings['hostname']
        database = settings['database']
        username = settings['username']

        normalized_address = get_host_ip(hostname)
        if access_network and not is_address_in_network(access_network,
                                                        normalized_address):
            # NOTE: for configurations using access-network, only setup
            #       database access if remote unit has presented a
            #       hostname or ip address thats within the configured
            #       network cidr
            log("Host '%s' not in access-network '%s' - ignoring" %
                (normalized_address, access_network), level=INFO)
            return

        # NOTE: do this before querying access grants
        password = configure_db_for_hosts(hostname, database, username,
                                          db_helper)

        allowed_units = db_helper.get_allowed_units(database, username,
                                                    relation_id=relation_id)
        allowed_units = unit_sorted(allowed_units)
        allowed_units = ' '.join(allowed_units)
        relation_set(relation_id=relation_id, allowed_units=allowed_units)

        db_host = get_db_host(hostname)
        peer_store_and_set(relation_id=relation_id,
                           db_host=db_host,
                           password=password)
    else:
        # Process multiple database setup requests.
        # from incoming relation data:
        #  nova_database=xxx nova_username=xxx nova_hostname=xxx
        #  quantum_database=xxx quantum_username=xxx quantum_hostname=xxx
        # create
        # {
        #   "nova": {
        #        "username": xxx,
        #        "database": xxx,
        #        "hostname": xxx
        #    },
        #    "quantum": {
        #        "username": xxx,
        #        "database": xxx,
        #        "hostname": xxx
        #    }
        # }
        #
        databases = {}
        for k, v in settings.iteritems():
            db = k.split('_')[0]
            x = '_'.join(k.split('_')[1:])
            if db not in databases:
                databases[db] = {}
            databases[db][x] = v

        allowed_units = {}
        return_data = {}
        for db in databases:
            if singleset.issubset(databases[db]):
                database = databases[db]['database']
                hostname = databases[db]['hostname']
                username = databases[db]['username']

                normalized_address = get_host_ip(hostname)
                if (access_network and
                        not is_address_in_network(access_network,
                                                  normalized_address)):
                    # NOTE: for configurations using access-network,
                    #       only setup database access if remote unit
                    #       has presented a hostname or ip address
                    #       thats within the configured network cidr
                    return

                # NOTE: do this before querying access grants
                password = configure_db_for_hosts(hostname, database, username,
                                                  db_helper)

                a_units = db_helper.get_allowed_units(database, username,
                                                      relation_id=relation_id)
                a_units = ' '.join(unit_sorted(a_units))
                allowed_units['%s_allowed_units' % (db)] = a_units

                return_data['%s_password' % (db)] = password
                db_host = get_db_host(hostname)

        if allowed_units:
            relation_set(relation_id=relation_id, **allowed_units)
        else:
            log("No allowed_units - not setting relation settings",
                level=DEBUG)

        if return_data:
            peer_store_and_set(relation_id=relation_id, db_host=db_host,
                               **return_data)
        else:
            log("No return data - not setting relation settings", level=DEBUG)
def signal_ziu(key, value):
    log("ZIU: signal {} = {}".format(key, value))
    for rname in ziu_relations:
        for rid in relation_ids(rname):
            relation_set(relation_id=rid, relation_settings={key: value})
    config_set(key, value)
def certs_joined(relation_id=None):
    relation_set(relation_id=relation_id,
                 relation_settings=get_certificate_request())
Beispiel #48
0
def amqp_joined():
    conf = hookenv.config()
    hookenv.relation_set(username=conf['rabbit-user'],
                         vhost=conf['rabbit-vhost'])
Beispiel #49
0
def plugin_relation_joined(rel_id=None):
    bin_path = '/usr/bin'
    relation_set(release=os_release("openstack-dashboard"),
                 relation_id=rel_id,
                 bin_path=bin_path,
                 openstack_dir=INSTALL_DIR)
def ha_relation_joined(relation_id=None):
    settings = generate_ha_relation_data('glance')
    relation_set(relation_id=relation_id, **settings)
Beispiel #51
0
def cluster_joined(relation_id=None):
    private_addr = get_relation_ip('cluster')
    relation_set(relation_id=relation_id,
                 relation_settings={'private-address': private_addr})
Beispiel #52
0
def website_relation_joined():
    relation_set(port=70, hostname=unit_get('private-address'))
Beispiel #53
0
def mon_relation_joined():
    public_addr = get_public_addr()
    for relid in relation_ids('mon'):
        relation_set(relation_id=relid,
                     relation_settings={'ceph-public-address': public_addr})
def contrail_controller_joined():
    settings = {'unit-type': 'command'}
    relation_set(relation_settings=settings)
def lxd_joined(relid=None):
    relation_set(relation_id=relid, user='******')
def cluster_non_leader_actions():
    """Cluster relation hook actions to be performed by non-leader units.

    NOTE: must be called by non-leader from cluster relation hook.
    """
    log("Cluster changed by unit=%s (local is non-leader)" % (remote_unit()),
        level=DEBUG)
    rx_settings = relation_get() or {}
    tx_settings = relation_get(unit=local_unit()) or {}

    token = rx_settings.get(SwiftProxyClusterRPC.KEY_NOTIFY_LEADER_CHANGED)
    if token:
        log(
            "Leader-changed notification received from peer unit. Since "
            "this most likely occurred during a ring sync proxies will "
            "be disabled until the leader is restored and a fresh sync "
            "request is set out",
            level=WARNING)
        service_stop("swift-proxy")
        return

    rx_rq_token = rx_settings.get(SwiftProxyClusterRPC.KEY_STOP_PROXY_SVC)

    # Check whether we have been requested to stop proxy service
    if rx_rq_token:
        log("Peer request to stop proxy service received (%s) - sending ack" %
            (rx_rq_token),
            level=INFO)
        service_stop('swift-proxy')
        peers_only = rx_settings.get('peers-only', None)
        rq = SwiftProxyClusterRPC().stop_proxy_ack(echo_token=rx_rq_token,
                                                   echo_peers_only=peers_only)
        relation_set(relation_settings=rq)
        return

    # Check if there are any builder files we can sync from the leader.
    broker = rx_settings.get('builder-broker', None)
    broker_token = rx_settings.get('broker-token', None)
    broker_timestamp = rx_settings.get('broker-timestamp', None)
    tx_ack_token = tx_settings.get(SwiftProxyClusterRPC.KEY_STOP_PROXY_SVC_ACK)
    if not broker:
        log("No ring/builder update available", level=DEBUG)
        if not openstack.is_unit_paused_set():
            service_start('swift-proxy')

        return
    elif broker_token:
        if tx_ack_token:
            if broker_token == tx_ack_token:
                log("Broker and ACK tokens match (%s)" % (broker_token),
                    level=DEBUG)
            else:
                log("Received ring/builder update notification but tokens do "
                    "not match (broker-token=%s/ack-token=%s)" %
                    (broker_token, tx_ack_token),
                    level=WARNING)
                return
        else:
            log(
                "Broker token available without handshake, assuming we just "
                "joined and rings won't change",
                level=DEBUG)
    else:
        log("Not taking any sync actions", level=DEBUG)
        return

    # If we upgrade from cluster that did not use timestamps, the new peer will
    # need to request a re-sync from the leader
    if not is_most_recent_timestamp(broker_timestamp):
        if not timestamps_available(excluded_unit=remote_unit()):
            log("Requesting resync")
            rq = SwiftProxyClusterRPC().request_resync(broker_token)
            relation_set(relation_settings=rq)
        else:
            log(
                "Did not receive most recent broker timestamp but timestamps "
                "are available - waiting for next timestamp",
                level=INFO)

        return

    log("Ring/builder update available", level=DEBUG)
    builders_only = int(rx_settings.get('sync-only-builders', 0))
    path = os.path.basename(get_www_dir())
    try:
        sync_proxy_rings('http://%s/%s' % (broker, path),
                         rings=not builders_only)
    except CalledProcessError:
        log(
            "Ring builder sync failed, builders not yet available - "
            "leader not ready?",
            level=WARNING)
        return

    # Re-enable the proxy once all builders and rings are synced
    if fully_synced():
        log("Ring builders synced - starting proxy", level=INFO)
        CONFIGS.write_all()
        if not openstack.is_unit_paused_set():
            service_start('swift-proxy')
    else:
        log(
            "Not all builders and rings synced yet - waiting for peer sync "
            "before starting proxy",
            level=INFO)
def secrets_storage_joined(relation_id=None):
    relation_set(relation_id=relation_id,
                 secret_backend=vaultlocker.VAULTLOCKER_BACKEND,
                 isolated=True,
                 access_address=get_relation_ip('secrets-storage'),
                 hostname=gethostname())
def cloud_credentials_joined():
    svc_name = local_unit().split('/')[0].replace('-', '_')
    relation_set(username=svc_name)
def _handle_ceph_request():
    """Handles the logic for sending and acknowledging Ceph broker requests."""

    # First, we create a request. We will test if this request is equivalent
    # to a previous one. If it is not, we will send it.
    request = get_ceph_request()

    log("New ceph request {} created.".format(request.request_id), level=DEBUG)

    # Here we will know if the new request is equivalent, and if it is, whether
    # it has completed, or just sent.
    states = get_request_states(request, relation='ceph')

    log("Request states: {}.".format(states), level=DEBUG)

    complete = True
    sent = True

    # According to existing ceph broker messaging logic, we are expecting only
    # 1 rid.
    for rid in states.keys():
        if not states[rid]['complete']:
            complete = False
        if not states[rid]['sent']:
            sent = False
        if not sent and not complete:
            break

    # If either complete or sent is True, then get_request_states has validated
    # that the current request is equivalent to a previously sent request.
    if complete:
        log('Previous request complete.')

        # If the request is complete, we need to restart nova once and mark it
        # restarted. The broker response comes from a specific unit, and can
        # only be read when this hook is invoked by the remote unit (the
        # broker), unless specifically queried for the given unit. Therefore,
        # we iterate across all units to find which has the broker response,
        # and we process the response regardless of this execution context.
        broker_rid, broker_unit = _get_broker_rid_unit_for_previous_request()

        # If we cannot determine which unit has the response, then it means
        # there is no response yet.
        if (broker_rid, broker_unit) == (None, None):
            log(
                "Aborting because there is no broker response "
                "for any unit at the moment.",
                level=DEBUG)
            return

        # Ensure that nova-compute is restarted since only now can we
        # guarantee that ceph resources are ready, but only if not paused.
        if (not is_unit_paused_set() and not is_broker_action_done(
                'nova_compute_restart', broker_rid, broker_unit)):
            log('Restarting Nova Compute as per request '
                '{}.'.format(request.request_id),
                level=DEBUG)
            service_restart('nova-compute')
            mark_broker_action_done('nova_compute_restart', broker_rid,
                                    broker_unit)
    else:
        if sent:
            log("Request {} already sent, not sending "
                "another.".format(request.request_id),
                level=DEBUG)
        else:
            log("Request {} not sent, sending it "
                "now.".format(request.request_id),
                level=DEBUG)
            for rid in relation_ids('ceph'):
                log('Sending request {}'.format(request.request_id),
                    level=DEBUG)
                relation_set(relation_id=rid, broker_req=request.request)
Beispiel #60
0
def keystone_credentials_joined(relid=None):
    relation_set(relation_id=relid,
                 username=CEILOMETER_SERVICE,
                 requested_roles=CEILOMETER_ROLE)