コード例 #1
0
ファイル: keystone_hooks.py プロジェクト: akanda/astara-juju
def config_changed_postupgrade():
    # Ensure ssl dir exists and is unison-accessible
    ensure_ssl_dir()

    check_call(['chmod', '-R', 'g+wrx', '/var/lib/keystone/'])

    ensure_ssl_dirs()

    save_script_rc()
    configure_https()

    update_nrpe_config()
    CONFIGS.write_all()

    if is_pki_enabled():
        initialise_pki()

    update_all_identity_relation_units()

    for rid in relation_ids('identity-admin'):
        admin_relation_changed(rid)

    # Ensure sync request is sent out (needed for any/all ssl change)
    send_ssl_sync_request()

    for r_id in relation_ids('ha'):
        ha_joined(relation_id=r_id)
コード例 #2
0
def cluster_changed():
    CONFIGS.write_all()
    if hookenv.relation_ids('cluster'):
        ch_peerstorage.peer_echo(includes=['dbsync_state'])
        dbsync_state = ch_peerstorage.peer_retrieve('dbsync_state')
        if dbsync_state == 'complete':
            if not ch_utils.is_unit_paused_set():
                for svc in ncc_utils.services():
                    ch_host.service_resume(svc)
            else:
                hookenv.log('Unit is in paused state, not issuing '
                            'start/resume to all services')
        else:
            if not ch_utils.is_unit_paused_set():
                hookenv.log('Database sync not ready. Shutting down services')
                for svc in ncc_utils.services():
                    ch_host.service_pause(svc)
            else:
                hookenv.log(
                    'Database sync not ready. Would shut down services but '
                    'unit is in paused state, not issuing stop/pause to all '
                    'services')
    # The shared metadata secret is stored in the leader-db and if its changed
    # the gateway needs to know.
    for rid in hookenv.relation_ids('quantum-network-service'):
        quantum_joined(rid=rid, remote_restart=False)
コード例 #3
0
def upgrade_charm():
    ch_fetch.apt_install(
        ch_fetch.filter_installed_packages(
            ncc_utils.determine_packages()), fatal=True)
    packages_removed = ncc_utils.remove_old_packages()
    if packages_removed:
        hookenv.log("Package purge detected, restarting services", "INFO")
        for s in ncc_utils.services():
            ch_host.service_restart(s)

    # For users already using bionic-rocky which are upgrading their
    # charm only we need ensure to not end-up with the old
    # 'wsgi-openstack-api' and the new 'wsgi-placement-api' apache
    # configurations installed at the same time.
    ncc_utils.stop_deprecated_services()
    ncc_utils.disable_package_apache_site(service_reload=True)

    for r_id in hookenv.relation_ids('amqp'):
        amqp_joined(relation_id=r_id)
    for r_id in hookenv.relation_ids('identity-service'):
        identity_joined(rid=r_id)
    for r_id in hookenv.relation_ids('cloud-compute'):
        for unit in hookenv.related_units(r_id):
            compute_changed(r_id, unit)
    for r_id in hookenv.relation_ids('shared-db'):
        db_joined(relation_id=r_id)

    leader_init_db_if_ready_allowed_units()

    update_nrpe_config()
コード例 #4
0
ファイル: nova_cc_hooks.py プロジェクト: akanda/astara-juju
def db_changed():
    if 'shared-db' not in CONFIGS.complete_contexts():
        log('shared-db relation incomplete. Peer not ready?')
        return
    CONFIGS.write_all()

    if is_elected_leader(CLUSTER_RES):
        # Bugs 1353135 & 1187508. Dbs can appear to be ready before the units
        # acl entry has been added. So, if the db supports passing a list of
        # permitted units then check if we're in the list.
        allowed_units = relation_get('nova_allowed_units')
        if allowed_units and local_unit() in allowed_units.split():
            status_set('maintenance', 'Running nova db migration')
            migrate_nova_database()
            log('Triggering remote cloud-compute restarts.')
            [compute_joined(rid=rid, remote_restart=True)
                for rid in relation_ids('cloud-compute')]
            log('Triggering remote cell restarts.')
            [nova_cell_relation_joined(rid=rid, remote_restart=True)
                for rid in relation_ids('cell')]
            conditional_neutron_migration()
        else:
            log('allowed_units either not presented, or local unit '
                'not in acl list: %s' % repr(allowed_units))

    for r_id in relation_ids('nova-api'):
        nova_api_relation_joined(rid=r_id)
コード例 #5
0
def update_nova_relation(remote_restart=False):
    for rid in hookenv.relation_ids('cloud-compute'):
        compute_joined(rid=rid, remote_restart=remote_restart)
    for rid in hookenv.relation_ids('quantum-network-service'):
        quantum_joined(rid=rid, remote_restart=remote_restart)
    for rid in hookenv.relation_ids('nova-cell-api'):
        nova_cell_api_relation_joined(rid=rid, remote_restart=remote_restart)
コード例 #6
0
ファイル: nrpe.py プロジェクト: gnuoy/charm-hacluster
 def __init__(self, hostname=None, primary=True):
     super(NRPE, self).__init__()
     self.config = config()
     self.primary = primary
     self.nagios_context = self.config['nagios_context']
     if 'nagios_servicegroups' in self.config and self.config['nagios_servicegroups']:
         self.nagios_servicegroups = self.config['nagios_servicegroups']
     else:
         self.nagios_servicegroups = self.nagios_context
     self.unit_name = local_unit().replace('/', '-')
     if hostname:
         self.hostname = hostname
     else:
         nagios_hostname = get_nagios_hostname()
         if nagios_hostname:
             self.hostname = nagios_hostname
         else:
             self.hostname = "{}-{}".format(self.nagios_context, self.unit_name)
     self.checks = []
     # Iff in an nrpe-external-master relation hook, set primary status
     relation = relation_ids('nrpe-external-master')
     if relation:
         log("Setting charm primary status {}".format(primary))
         for rid in relation_ids('nrpe-external-master'):
             relation_set(relation_id=rid, relation_settings={'primary': self.primary})
コード例 #7
0
ファイル: nova_cc_utils.py プロジェクト: BillTheBest/hyper-c
def guard_map():
    '''Map of services and required interfaces that must be present before
    the service should be allowed to start'''
    gmap = {}
    nova_services = deepcopy(BASE_SERVICES)
    if os_release('nova-common') not in ['essex', 'folsom']:
        nova_services.append('nova-conductor')

    nova_interfaces = ['identity-service', 'amqp']
    if relation_ids('pgsql-nova-db'):
        nova_interfaces.append('pgsql-nova-db')
    else:
        nova_interfaces.append('shared-db')

    for svc in nova_services:
        gmap[svc] = nova_interfaces

    net_manager = network_manager()
    if net_manager in ['neutron', 'quantum'] and \
            not is_relation_made('neutron-api'):
        neutron_interfaces = ['identity-service', 'amqp']
        if relation_ids('pgsql-neutron-db'):
            neutron_interfaces.append('pgsql-neutron-db')
        else:
            neutron_interfaces.append('shared-db')
        if network_manager() == 'quantum':
            gmap['quantum-server'] = neutron_interfaces
        else:
            gmap['neutron-server'] = neutron_interfaces

    return gmap
コード例 #8
0
ファイル: hooks.py プロジェクト: openstack/charm-ceph-radosgw
 def _cluster_changed():
     CONFIGS.write_all()
     for r_id in relation_ids('identity-service'):
         identity_joined(relid=r_id)
     for r_id in relation_ids('certificates'):
         for unit in related_units(r_id):
             certs_changed(r_id, unit)
コード例 #9
0
ファイル: hooks.py プロジェクト: openstack/charm-ceph-radosgw
def process_multisite_relations():
    """Re-trigger any pending master/slave relations"""
    for r_id in relation_ids('master'):
        master_relation_joined(r_id)
    for r_id in relation_ids('slave'):
        for unit in related_units(r_id):
            slave_relation_changed(r_id, unit)
コード例 #10
0
    def __call__(self):
        """This generates context for /etc/ceph/ceph.conf templates"""
        if not relation_ids("ceph"):
            return {}
        log("Generating template context for ceph")
        mon_hosts = []
        auth = None
        key = None
        for rid in relation_ids("ceph"):
            for unit in related_units(rid):
                mon_hosts.append(relation_get("private-address", rid=rid, unit=unit))
                auth = relation_get("auth", rid=rid, unit=unit)
                key = relation_get("key", rid=rid, unit=unit)

        ctxt = {"mon_hosts": " ".join(mon_hosts), "auth": auth, "key": key}

        if not os.path.isdir("/etc/ceph"):
            os.mkdir("/etc/ceph")

        if not context_complete(ctxt):
            return {}

        ensure_packages(["ceph-common"])

        return ctxt
コード例 #11
0
    def __call__(self):
        """
        Builds half a context for the haproxy template, which describes
        all peers to be included in the cluster.  Each charm needs to include
        its own context generator that describes the port mapping.
        """
        if not relation_ids("cluster"):
            return {}

        cluster_hosts = {}
        l_unit = local_unit().replace("/", "-")
        cluster_hosts[l_unit] = unit_get("private-address")

        for rid in relation_ids("cluster"):
            for unit in related_units(rid):
                _unit = unit.replace("/", "-")
                addr = relation_get("private-address", rid=rid, unit=unit)
                cluster_hosts[_unit] = addr

        ctxt = {"units": cluster_hosts}
        if len(cluster_hosts.keys()) > 1:
            # Enable haproxy when we have enough peers.
            log("Ensuring haproxy enabled in /etc/default/haproxy.")
            with open("/etc/default/haproxy", "w") as out:
                out.write("ENABLED=1\n")
            return ctxt
        log("HAProxy context is incomplete, this unit has no peers.")
        return {}
コード例 #12
0
def provision_control():
    host_name = gethostname()
    host_ip = gethostbyname(unit_get("private-address"))
    a_ip, a_port = [ (gethostbyname(relation_get("private-address", unit, rid)),
                      port)
                     for rid in relation_ids("contrail-api")
                     for unit, port in
                     ((unit, relation_get("port", unit, rid)) for unit in related_units(rid))
                     if port ][0]
    user, password, tenant = [ (relation_get("service_username", unit, rid),
                                relation_get("service_password", unit, rid),
                                relation_get("service_tenant_name", unit, rid))
                               for rid in relation_ids("identity-admin")
                               for unit in related_units(rid) ][0]
    log("Provisioning control {}".format(host_ip))
    check_call(["contrail-provision-control",
                "--host_name", host_name,
                "--host_ip", host_ip,
                "--router_asn", "64512",
                "--api_server_ip", a_ip,
                "--api_server_port", str(a_port),
                "--oper", "add",
                "--admin_user", user,
                "--admin_password", password,
                "--admin_tenant_name", tenant])
コード例 #13
0
ファイル: nrpe.py プロジェクト: coreycb/charm-keystone
    def write(self):
        try:
            nagios_uid = pwd.getpwnam('nagios').pw_uid
            nagios_gid = grp.getgrnam('nagios').gr_gid
        except:
            log("Nagios user not set up, nrpe checks not updated")
            return

        if not os.path.exists(NRPE.nagios_logdir):
            os.mkdir(NRPE.nagios_logdir)
            os.chown(NRPE.nagios_logdir, nagios_uid, nagios_gid)

        nrpe_monitors = {}
        monitors = {"monitors": {"remote": {"nrpe": nrpe_monitors}}}
        for nrpecheck in self.checks:
            nrpecheck.write(self.nagios_context, self.hostname,
                            self.nagios_servicegroups)
            nrpe_monitors[nrpecheck.shortname] = {
                "command": nrpecheck.command,
            }

        service('restart', 'nagios-nrpe-server')

        monitor_ids = relation_ids("local-monitors") + \
            relation_ids("nrpe-external-master")
        for rid in monitor_ids:
            relation_set(relation_id=rid, monitors=yaml.dump(monitors))
コード例 #14
0
def update_all_identity_relation_units(check_db_ready=True):
    if is_unit_paused_set():
        return
    if check_db_ready and not is_db_ready():
        log('Allowed_units list provided and this unit not present',
            level=INFO)
        return

    if not is_db_initialised():
        log("Database not yet initialised - deferring identity-relation "
            "updates", level=INFO)
        return
    if not is_expected_scale():
        log("Keystone charm and it's dependencies not yet at expected scale "
            "- deferring identity-relation updates", level=INFO)
        return

    log('Firing identity_changed hook for all related services.')
    for rid in relation_ids('identity-service'):
        for unit in related_units(rid):
            identity_changed(relation_id=rid, remote_unit=unit)
    log('Firing admin_relation_changed hook for all related services.')
    for rid in relation_ids('identity-admin'):
        admin_relation_changed(rid)
    log('Firing identity_credentials_changed hook for all related services.')
    for rid in relation_ids('identity-credentials'):
        for unit in related_units(rid):
            identity_credentials_changed(relation_id=rid, remote_unit=unit)
コード例 #15
0
ファイル: astara_context.py プロジェクト: akanda/astara-juju
    def _coordinator_context(self):
        """Attempt to create a usable tooz coordinator URL from zk or memcache

        This'll see if we have zookeeper or memcached relations and use that
        found as the coordinator. Note memcahe is only for testing and
        zookeeper will be preferred if both are found.
        """

        # NOTE: Neither the zookeeper or memcache charms do any kind of
        # clustering of peers, so we just look for one that tells us its
        # port and point at that.
        zk_relation_ids = relation_ids('zookeeper')
        for rid in zk_relation_ids:
            for unit in related_units(rid):
                rel_data = relation_get(unit=unit, rid=rid)
                zk_port = rel_data.get('port')
                zk_addr = rel_data.get('private-address')
                if zk_port:
                    url = 'kazoo://%s:%s?timeout=5' % (zk_addr, zk_port)
                    log('Using zookeeper @ %s for astara coordination' % url)
                    return {'coordination_url': url}

        memcached_relation_ids = relation_ids('cache')
        for rid in memcached_relation_ids:
            for unit in related_units(rid):
                rel_data = relation_get(unit=unit, rid=rid)
                mc_port = rel_data.get('tcp-port')
                mc_addr = rel_data.get('private-address')
                if mc_port:
                    url = 'mecached://%s:%s' % (mc_port, mc_addr)
                    log('Using memcached @ %s for astara coordination' % url)
                    return {'coordination_url': url}

        log('no astara coordination relation data found')
        return {}
コード例 #16
0
    def __call__(self):
        if not relation_ids("ceph"):
            return {}

        log("Generating template context for ceph", level=DEBUG)
        mon_hosts = []
        auth = None
        key = None
        use_syslog = str(config("use-syslog")).lower()
        for rid in relation_ids("ceph"):
            for unit in related_units(rid):
                auth = relation_get("auth", rid=rid, unit=unit)
                key = relation_get("key", rid=rid, unit=unit)
                ceph_pub_addr = relation_get("ceph-public-address", rid=rid, unit=unit)
                unit_priv_addr = relation_get("private-address", rid=rid, unit=unit)
                ceph_addr = ceph_pub_addr or unit_priv_addr
                ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr
                mon_hosts.append(ceph_addr)

        ctxt = {"mon_hosts": " ".join(sorted(mon_hosts)), "auth": auth, "key": key, "use_syslog": use_syslog}

        if not os.path.isdir("/etc/ceph"):
            os.mkdir("/etc/ceph")

        if not context_complete(ctxt):
            return {}

        ensure_packages(["ceph-common"])
        return ctxt
コード例 #17
0
def leader_settings_changed():
    # NOTE(jamespage): lead unit will seed libvirt secret UUID
    #                  re-exec relations that use this data.
    for r_id in relation_ids('ceph-access'):
        ceph_access_joined(r_id)
    for r_id in relation_ids('storage-backend'):
        storage_backend(r_id)
コード例 #18
0
def is_db_initialised(cluster_rid=None):
    """
    Check whether a db intialisation has been performed by any peer unit.

    We base our decision on whether we or any of our peers has previously
    sent or echoed an initialisation notification.

    @param cluster_rid: current relation id. If none provided, all cluster
                        relation ids will be checked.
    @return: True if there has been a db initialisation otherwise False.
    """
    if cluster_rid:
        rids = [cluster_rid]
    else:
        rids = relation_ids('cluster')

    shared_db_rel_id = (relation_ids('shared-db') or [None])[0]
    if not shared_db_rel_id:
        return False

    for c_rid in rids:
        units = related_units(relid=c_rid) + [local_unit()]
        for unit in units:
            settings = relation_get(unit=unit, rid=c_rid) or {}
            for key in [NEUTRON_DB_INIT_RKEY, NEUTRON_DB_INIT_ECHO_RKEY]:
                if shared_db_rel_id in settings.get(key, ''):
                    return True

    return False
コード例 #19
0
ファイル: nrpe.py プロジェクト: gnuoy/charm-hacluster
    def write(self):
        try:
            nagios_uid = pwd.getpwnam('nagios').pw_uid
            nagios_gid = grp.getgrnam('nagios').gr_gid
        except Exception:
            log("Nagios user not set up, nrpe checks not updated")
            return

        if not os.path.exists(NRPE.nagios_logdir):
            os.mkdir(NRPE.nagios_logdir)
            os.chown(NRPE.nagios_logdir, nagios_uid, nagios_gid)

        nrpe_monitors = {}
        monitors = {"monitors": {"remote": {"nrpe": nrpe_monitors}}}
        for nrpecheck in self.checks:
            nrpecheck.write(self.nagios_context, self.hostname,
                            self.nagios_servicegroups)
            nrpe_monitors[nrpecheck.shortname] = {
                "command": nrpecheck.command,
            }

        # update-status hooks are configured to firing every 5 minutes by
        # default. When nagios-nrpe-server is restarted, the nagios server
        # reports checks failing causing unnecessary alerts. Let's not restart
        # on update-status hooks.
        if not hook_name() == 'update-status':
            service('restart', 'nagios-nrpe-server')

        monitor_ids = relation_ids("local-monitors") + \
            relation_ids("nrpe-external-master")
        for rid in monitor_ids:
            relation_set(relation_id=rid, monitors=yaml.dump(monitors))
コード例 #20
0
def update_nova_consoleauth_config():
    """
    Configure nova-consoleauth pacemaker resources
    """
    relids = relation_ids('ha')
    if len(relids) == 0:
        log('Related to {} ha services'.format(len(relids)), level='DEBUG')
        ha_relid = None
        data = {}
    else:
        ha_relid = relids[0]
        data = relation_get(rid=ha_relid) or {}

    # initialize keys in case this is a new dict
    data.setdefault('delete_resources', [])
    for k in ['colocations', 'init_services', 'resources', 'resource_params']:
        data.setdefault(k, {})

    if config('single-nova-consoleauth') and console_attributes('protocol'):
        for item in ['vip_consoleauth', 'res_nova_consoleauth']:
            try:
                data['delete_resources'].remove(item)
            except ValueError:
                pass  # nothing to remove, we are good

        # the new pcmkr resources have to be added to the existing ones
        data['colocations']['vip_consoleauth'] = COLO_CONSOLEAUTH
        data['init_services']['res_nova_consoleauth'] = 'nova-consoleauth'
        data['resources']['res_nova_consoleauth'] = AGENT_CONSOLEAUTH
        data['resource_params']['res_nova_consoleauth'] = AGENT_CA_PARAMS

        for rid in relation_ids('ha'):
            relation_set(rid, **data)

        # nova-consoleauth will be managed by pacemaker, so mark it as manual
        if relation_ids('ha'):
            with open(NOVA_CONSOLEAUTH_OVERRIDE, 'w') as fp:
                fp.write('manual\n')
                fp.flush()

    elif (not config('single-nova-consoleauth') and
          console_attributes('protocol')):
        for item in ['vip_consoleauth', 'res_nova_consoleauth']:
            if item not in data['delete_resources']:
                data['delete_resources'].append(item)

        # remove them from the rel, so they aren't recreated when the hook
        # is recreated
        data['colocations'].pop('vip_consoleauth', None)
        data['init_services'].pop('res_nova_consoleauth', None)
        data['resources'].pop('res_nova_consoleauth', None)
        data['resource_params'].pop('res_nova_consoleauth', None)

        for rid in relation_ids('ha'):
            relation_set(rid, **data)

        try:
            os.remove(NOVA_CONSOLEAUTH_OVERRIDE)
        except FileNotFoundError as e:
            log(str(e), level='DEBUG')
コード例 #21
0
def provision_local_metadata():
    a_port = None
    a_ip = config.get("contrail-api-ip")
    if a_ip:
        a_port = config.get("contrail-api-port")
        if a_port is None:
            a_port = api_port()
    else:
        a_ip, a_port = [ (gethostbyname(relation_get("private-address", unit, rid)),
                          port)
                         for rid in relation_ids("contrail-api")
                         for unit, port in
                         ((unit, relation_get("port", unit, rid)) for unit in related_units(rid))
                         if port ][0]
    user, password = [ (relation_get("service_username", unit, rid),
                        relation_get("service_password", unit, rid))
                       for rid in relation_ids("identity-admin")
                       for unit in related_units(rid) ][0]
    log("Provisioning local metadata service 127.0.0.1:8775")
    check_call(["contrail-provision-linklocal",
                "--api_server_ip", a_ip,
                "--api_server_port", str(a_port),
                "--linklocal_service_name", "metadata",
                "--linklocal_service_ip", "169.254.169.254",
                "--linklocal_service_port", "80",
                "--ipfabric_service_ip", "127.0.0.1",
                "--ipfabric_service_port", "8775",
                "--oper", "add",
                "--admin_user", user,
                "--admin_password", password])
コード例 #22
0
ファイル: context.py プロジェクト: CiscoSystems/juju-vem
    def __call__(self):
        '''
        Builds half a context for the haproxy template, which describes
        all peers to be included in the cluster.  Each charm needs to include
        its own context generator that describes the port mapping.
        '''
        if not relation_ids('cluster'):
            return {}

        cluster_hosts = {}
        l_unit = local_unit().replace('/', '-')
        cluster_hosts[l_unit] = unit_get('private-address')

        for rid in relation_ids('cluster'):
            for unit in related_units(rid):
                _unit = unit.replace('/', '-')
                addr = relation_get('private-address', rid=rid, unit=unit)
                cluster_hosts[_unit] = addr

        ctxt = {
            'units': cluster_hosts,
        }
        if len(cluster_hosts.keys()) > 1:
            # Enable haproxy when we have enough peers.
            log('Ensuring haproxy enabled in /etc/default/haproxy.')
            with open('/etc/default/haproxy', 'w') as out:
                out.write('ENABLED=1\n')
            return ctxt
        log('HAProxy context is incomplete, this unit has no peers.')
        return {}
コード例 #23
0
def leader_init_db_if_ready(skip_acl_check=False, skip_cells_restarts=False,
                            db_rid=None, unit=None):
    """Initialise db if leader and db not yet intialised.

    NOTE: must be called from database context.
    """
    if not is_elected_leader(CLUSTER_RES):
        log("Not leader - skipping db init", level=DEBUG)
        return

    if is_db_initialised():
        log("Database already initialised - skipping db init", level=DEBUG)
        return

    # Bugs 1353135 & 1187508. Dbs can appear to be ready before the units
    # acl entry has been added. So, if the db supports passing a list of
    # permitted units then check if we're in the list.
    allowed_units = relation_get('nova_allowed_units', rid=db_rid, unit=unit)
    if skip_acl_check or (allowed_units and local_unit() in
                          allowed_units.split()):
        status_set('maintenance', 'Running nova db migration')
        migrate_nova_database()
        log('Triggering remote cloud-compute restarts.')
        [compute_joined(rid=rid, remote_restart=True)
            for rid in relation_ids('cloud-compute')]
        log('Triggering remote neutron-network-service restarts.')
        [quantum_joined(rid=rid, remote_restart=True)
            for rid in relation_ids('quantum-network-service')]
        if not skip_cells_restarts:
            log('Triggering remote cell restarts.')
            [nova_cell_relation_joined(rid=rid, remote_restart=True)
             for rid in relation_ids('cell')]
    else:
        log('allowed_units either not presented, or local unit '
            'not in acl list: %s' % repr(allowed_units))
コード例 #24
0
def get_cluster_id():
    """ Return cluster id (lp1776171)

    Return cluster ID for MySQL asynchronous replication
    :returns: int cluster_id
    """
    if not config('cluster-id'):
        msg = ("Master / Slave relation requires 'cluster-id' option")
        status_set("blocked", msg)
        raise ClusterIDRequired(msg)
    cluster_id = config('cluster-id')
    for rid in relation_ids('master'):
        for unit in related_units(rid):
            if relation_get(attribute='cluster_id',
                            rid=rid,
                            unit=unit) == cluster_id:
                msg = ("'cluster-id' option must be unique within a cluster")
                status_set('blocked', msg)
                raise ClusterIDIdentical(msg)
    for rid in relation_ids('slave'):
        for unit in related_units(rid):
            if relation_get(attribute='cluster_id',
                            rid=rid,
                            unit=unit) == cluster_id:
                msg = ("'cluster-id' option must be unique within a cluster")
                status_set('blocked', msg)
                raise ClusterIDIdentical(msg)
    return cluster_id
コード例 #25
0
def config_changed():
    if config('prefer-ipv6'):
        setup_ipv6()
        status_set('maintenance', 'Sync DB')
        sync_db_with_multi_ipv6_addresses(config('database'),
                                          config('database-user'))

    if git_install_requested():
        if config_value_changed('openstack-origin-git'):
            status_set('maintenance', 'Running Git install')
            git_install(config('openstack-origin-git'))
    elif not config('action-managed-upgrade'):
        if openstack_upgrade_available('glance-common'):
            status_set('maintenance', 'Upgrading OpenStack release')
            do_openstack_upgrade(CONFIGS)

    open_port(9292)
    configure_https()

    update_nrpe_config()

    # Pickup and changes due to network reference architecture
    # configuration
    [keystone_joined(rid) for rid in relation_ids('identity-service')]
    [image_service_joined(rid) for rid in relation_ids('image-service')]
    [cluster_joined(rid) for rid in relation_ids('cluster')]
    for r_id in relation_ids('ha'):
        ha_relation_joined(relation_id=r_id)
コード例 #26
0
ファイル: context.py プロジェクト: CiscoSystems/juju-vem
    def __call__(self):
        '''This generates context for /etc/ceph/ceph.conf templates'''
        if not relation_ids('ceph'):
            return {}
        log('Generating template context for ceph')
        mon_hosts = []
        auth = None
        key = None
        for rid in relation_ids('ceph'):
            for unit in related_units(rid):
                mon_hosts.append(relation_get('private-address', rid=rid,
                                              unit=unit))
                auth = relation_get('auth', rid=rid, unit=unit)
                key = relation_get('key', rid=rid, unit=unit)

        ctxt = {
            'mon_hosts': ' '.join(mon_hosts),
            'auth': auth,
            'key': key,
        }

        if not os.path.isdir('/etc/ceph'):
            os.mkdir('/etc/ceph')

        if not context_complete(ctxt):
            return {}

        ensure_packages(['ceph-common'])

        return ctxt
コード例 #27
0
ファイル: context.py プロジェクト: wuwenbin2/onos-controller
    def __call__(self):
        if not relation_ids('ceph'):
            return {}

        log('Generating template context for ceph', level=DEBUG)
        mon_hosts = []
        auth = None
        key = None
        use_syslog = str(config('use-syslog')).lower()
        for rid in relation_ids('ceph'):
            for unit in related_units(rid):
                auth = relation_get('auth', rid=rid, unit=unit)
                key = relation_get('key', rid=rid, unit=unit)
                ceph_pub_addr = relation_get('ceph-public-address', rid=rid,
                                             unit=unit)
                unit_priv_addr = relation_get('private-address', rid=rid,
                                              unit=unit)
                ceph_addr = ceph_pub_addr or unit_priv_addr
                ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr
                mon_hosts.append(ceph_addr)

        ctxt = {'mon_hosts': ' '.join(sorted(mon_hosts)),
                'auth': auth,
                'key': key,
                'use_syslog': use_syslog}

        if not os.path.isdir('/etc/ceph'):
            os.mkdir('/etc/ceph')

        if not context_complete(ctxt):
            return {}

        ensure_packages(['ceph-common'])
        return ctxt
コード例 #28
0
def provision_vrouter():
    host_name = gethostname()
    host_ip = netifaces.ifaddresses("vhost0")[netifaces.AF_INET][0]["addr"]
    a_port = None
    a_ip = config.get("contrail-api-ip")
    if a_ip:
        a_port = config.get("contrail-api-port")
        if a_port is None:
            a_port = api_port()
    else:
        a_ip, a_port = [ (gethostbyname(relation_get("private-address", unit, rid)),
                          port)
                         for rid in relation_ids("contrail-api")
                         for unit, port in
                         ((unit, relation_get("port", unit, rid)) for unit in related_units(rid))
                         if port ][0]
    user, password, tenant = [ (relation_get("service_username", unit, rid),
                                relation_get("service_password", unit, rid),
                                relation_get("service_tenant_name", unit, rid))
                               for rid in relation_ids("identity-admin")
                               for unit in related_units(rid) ][0]
    log("Provisioning vrouter {}".format(host_ip))
    check_call(["contrail-provision-vrouter",
                "--host_name", host_name,
                "--host_ip", host_ip,
                "--api_server_ip", a_ip,
                "--api_server_port", str(a_port),
                "--oper", "add",
                "--admin_user", user,
                "--admin_password", password,
                "--admin_tenant_name", tenant])
コード例 #29
0
def update_all_identity_relation_units(check_db_ready=True):
    CONFIGS.write_all()
    if is_unit_paused_set():
        return
    if check_db_ready and not is_db_ready():
        log('Allowed_units list provided and this unit not present',
            level=INFO)
        return

    if not is_db_initialised():
        log("Database not yet initialised - deferring identity-relation "
            "updates", level=INFO)
        return

    if is_elected_leader(CLUSTER_RES):
        ensure_initial_admin(config)

    log('Firing identity_changed hook for all related services.')
    for rid in relation_ids('identity-service'):
        for unit in related_units(rid):
            identity_changed(relation_id=rid, remote_unit=unit)
    log('Firing admin_relation_changed hook for all related services.')
    for rid in relation_ids('identity-admin'):
        admin_relation_changed(rid)
    log('Firing identity_credentials_changed hook for all related services.')
    for rid in relation_ids('identity-credentials'):
        for unit in related_units(rid):
            identity_credentials_changed(relation_id=rid, remote_unit=unit)
コード例 #30
0
def upgrade_charm():
    apt_install(filter_installed_packages(determine_packages()),
                fatal=True)
    for r_id in relation_ids('amqp'):
        amqp_joined(relation_id=r_id)
    for r_id in relation_ids('identity-service'):
        identity_joined(rid=r_id)
    for r_id in relation_ids('cloud-compute'):
        for unit in related_units(r_id):
            compute_changed(r_id, unit)
    for r_id in relation_ids('shared-db'):
        db_joined(relation_id=r_id)

    rels = ['shared-db', 'pgsql-nova-db']
    for rname in rels:
        for rid in relation_ids(rname):
            for unit in related_units(rid):
                if rname == 'pgsql-nova-db':
                    leader_init_db_if_ready(skip_acl_check=True,
                                            skip_cells_restarts=True,
                                            db_rid=rid, unit=unit)
                else:
                    leader_init_db_if_ready(db_rid=rid, unit=unit)

    update_nrpe_config()
    update_nova_consoleauth_config()
コード例 #31
0
def resource_map(release=None):
    """
    Dynamically generate a map of resources that will be managed for a single
    hook execution.
    """
    resource_map = deepcopy(BASE_RESOURCE_MAP)
    release = release or os_release('cinder-common', base='icehouse')
    if relation_ids('backup-backend'):
        resource_map[CINDER_CONF]['services'].append('cinder-backup')
        resource_map[ceph_config_file()]['services'].append('cinder-backup')

    if relation_ids('ceph') and hook_name() != 'ceph-relation-broken':
        # need to create this early, new peers will have a relation during
        # registration # before they've run the ceph hooks to create the
        # directory.
        # !!! FIX: These side effects seem inappropriate for this method
        mkdir(os.path.dirname(CEPH_CONF))
        mkdir(os.path.dirname(ceph_config_file()))

        # Install ceph config as an alternative for co-location with
        # ceph and ceph-osd charm - cinder ceph.conf will be
        # lower priority than both of these but thats OK
        if not os.path.exists(ceph_config_file()):
            # touch file for pre-templated generation
            open(ceph_config_file(), 'w').close()
        install_alternative(os.path.basename(CEPH_CONF), CEPH_CONF,
                            ceph_config_file())
    else:
        resource_map.pop(ceph_config_file())

    if os.path.exists('/etc/apache2/conf-available'):
        resource_map.pop(APACHE_SITE_CONF)
    else:
        resource_map.pop(APACHE_SITE_24_CONF)

    # Remove services from map which are not enabled by user config
    for cfg in resource_map.keys():
        resource_map[cfg]['services'] = \
            filter_services(resource_map[cfg]['services'])

    if enable_memcache(source=config()['openstack-origin']):
        resource_map[MEMCACHED_CONF] = {
            'contexts': [context.MemcacheContext()],
            'services': ['memcached']
        }

    if run_in_apache():
        for cfile in resource_map:
            svcs = resource_map[cfile]['services']
            if 'cinder-api' in svcs:
                svcs.remove('cinder-api')
                if 'apache2' not in svcs:
                    svcs.append('apache2')
        wsgi_script = "/usr/bin/cinder-wsgi"
        resource_map[WSGI_CINDER_API_CONF] = {
            'contexts': [
                context.WSGIWorkerConfigContext(name="cinder",
                                                script=wsgi_script),
                cinder_contexts.HAProxyContext()
            ],
            'services': ['apache2']
        }

    if release and CompareOpenStackReleases(release) < 'queens':
        resource_map.pop(CINDER_POLICY_JSON)

    return resource_map
コード例 #32
0
def any_changed():
    CONFIGS.write_all()
    configure_https()
    for rid in relation_ids('identity-service'):
        keystone_joined(relid=rid)
    ceilometer_joined()
コード例 #33
0
def _notify_nova():
    for rid in relation_ids("nova-compute"):
        if related_units(rid):
            nova_compute_joined(rid)
コード例 #34
0
def notify_client():
    for relid in relation_ids('client'):
        client_relation_joined(relid)
コード例 #35
0
def notify_osds():
    for relid in relation_ids('osd'):
        osd_relation(relid)
コード例 #36
0
def neutron_plugin_api_changed():
    CONFIGS.write_all()
    # If dvr setting has changed, need to pass that on
    for rid in relation_ids('neutron-plugin'):
        neutron_plugin_joined(relation_id=rid)
コード例 #37
0
def cluster_leader_actions():
    """Cluster relation hook actions to be performed by leader units.

    NOTE: must be called by leader from cluster relation hook.
    """
    log("Cluster changed by unit={} (local is leader)".format(remote_unit()),
        level=DEBUG)

    rx_settings = relation_get() or {}
    tx_settings = relation_get(unit=local_unit()) or {}

    rx_rq_token = rx_settings.get(SwiftProxyClusterRPC.KEY_STOP_PROXY_SVC)
    rx_ack_token = rx_settings.get(SwiftProxyClusterRPC.KEY_STOP_PROXY_SVC_ACK)

    tx_rq_token = tx_settings.get(SwiftProxyClusterRPC.KEY_STOP_PROXY_SVC)
    tx_ack_token = tx_settings.get(SwiftProxyClusterRPC.KEY_STOP_PROXY_SVC_ACK)

    rx_leader_changed = \
        rx_settings.get(SwiftProxyClusterRPC.KEY_NOTIFY_LEADER_CHANGED)
    if rx_leader_changed:
        log(
            "Leader change notification received and this is leader so "
            "retrying sync.",
            level=INFO)
        # FIXME: check that we were previously part of a successful sync to
        #        ensure we have good rings.
        cluster_sync_rings(peers_only=tx_settings.get('peers-only', False),
                           token=rx_leader_changed)
        return

    rx_resync_request = \
        rx_settings.get(SwiftProxyClusterRPC.KEY_REQUEST_RESYNC)
    resync_request_ack_key = SwiftProxyClusterRPC.KEY_REQUEST_RESYNC_ACK
    tx_resync_request_ack = tx_settings.get(resync_request_ack_key)
    if rx_resync_request and tx_resync_request_ack != rx_resync_request:
        log("Unit '{}' has requested a resync".format(remote_unit()),
            level=INFO)
        cluster_sync_rings(peers_only=True)
        relation_set(**{resync_request_ack_key: rx_resync_request})
        return

    # If we have received an ack token ensure it is not associated with a
    # request we received from another peer. If it is, this would indicate
    # a leadership change during a sync and this unit will abort the sync or
    # attempt to restore the original leader so to be able to complete the
    # sync.

    if rx_ack_token and rx_ack_token == tx_rq_token:
        # Find out if all peer units have been stopped.
        responses = []
        for rid in relation_ids('cluster'):
            for unit in related_units(rid):
                responses.append(relation_get(rid=rid, unit=unit))

        # Ensure all peers stopped before starting sync
        if is_all_peers_stopped(responses):
            key = 'peers-only'
            if not all_responses_equal(responses, key, must_exist=False):
                msg = ("Did not get equal response from every peer unit for "
                       "'{}'".format(key))
                raise SwiftProxyCharmException(msg)

            peers_only = bool(
                get_first_available_value(responses, key, default=0))
            log("Syncing rings and builders (peers-only={})".format(
                peers_only),
                level=DEBUG)
            broadcast_rings_available(broker_token=rx_ack_token,
                                      storage=not peers_only)
        else:
            key = SwiftProxyClusterRPC.KEY_STOP_PROXY_SVC_ACK
            acks = ', '.join([rsp[key] for rsp in responses if key in rsp])
            log("Not all peer apis stopped - skipping sync until all peers "
                "ready (current='{}', token='{}')".format(acks, tx_ack_token),
                level=INFO)
    elif ((rx_ack_token and (rx_ack_token == tx_ack_token))
          or (rx_rq_token and (rx_rq_token == rx_ack_token))):
        log(
            "It appears that the cluster leader has changed mid-sync - "
            "stopping proxy service",
            level=WARNING)
        service_stop('swift-proxy')
        broker = rx_settings.get('builder-broker')
        if broker:
            # If we get here, manual intervention will be required in order
            # to restore the cluster.
            raise SwiftProxyCharmException(
                "Failed to restore previous broker '{}' as leader".format(
                    broker))
        else:
            raise SwiftProxyCharmException(
                "No builder-broker on rx_settings relation from '{}' - "
                "unable to attempt leader restore".format(remote_unit()))
    else:
        log("Not taking any sync actions", level=DEBUG)

    CONFIGS.write_all()
コード例 #38
0
def signal_ziu(key, value):
    log("ZIU: signal {} = {}".format(key, value))
    for rname in ziu_relations:
        for rid in relation_ids(rname):
            relation_set(relation_id=rid, relation_settings={key: value})
    config_set(key, value)
コード例 #39
0
    def __call__(self):
        log('Generating template context for identity-service')
        ctxt = {}
        regions = set()

        for rid in relation_ids('identity-service'):
            for unit in related_units(rid):
                rdata = relation_get(rid=rid, unit=unit)
                default_role = config('default-role')
                lc_default_role = config('default-role').lower()
                for role in rdata.get('created_roles', '').split(','):
                    if role.lower() == lc_default_role:
                        default_role = role
                serv_host = rdata.get('service_host')
                serv_host = format_ipv6_addr(serv_host) or serv_host
                region = rdata.get('region')

                local_ctxt = {
                    'service_port': rdata.get('service_port'),
                    'service_host': serv_host,
                    'service_protocol':
                    rdata.get('service_protocol') or 'http',
                    'api_version': rdata.get('api_version', '2'),
                    'default_role': default_role
                }
                # If using keystone v3 the context is incomplete without the
                # admin domain id
                if local_ctxt['api_version'] == '3':
                    if not config('default_domain'):
                        local_ctxt['admin_domain_id'] = rdata.get(
                            'admin_domain_id')
                if not context_complete(local_ctxt):
                    continue

                # Update the service endpoint and title for each available
                # region in order to support multi-region deployments
                if region is not None:
                    endpoint = ("%(service_protocol)s://%(service_host)s"
                                ":%(service_port)s/v2.0") % local_ctxt
                    for reg in region.split():
                        regions.add((endpoint, reg))

                if len(ctxt) == 0:
                    ctxt = local_ctxt

        if len(regions) > 1:
            avail_regions = map(lambda r: {'endpoint': r[0], 'title': r[1]},
                                regions)
            ctxt['regions'] = sorted(avail_regions,
                                     key=lambda k: k['endpoint'])

        # Allow the endpoint types to be specified via a config parameter.
        # The config parameter accepts either:
        #  1. a single endpoint type to be specified, in which case the
        #     primary endpoint is configured
        #  2. a list of endpoint types, in which case the primary endpoint
        #     is taken as the first entry and the secondary endpoint is
        #     taken as the second entry. All subsequent entries are ignored.
        ep_types = config('endpoint-type')
        if ep_types:
            ep_types = [self.normalize(e) for e in ep_types.split(',')]
            ctxt['primary_endpoint'] = ep_types[0]
            if len(ep_types) > 1:
                ctxt['secondary_endpoint'] = ep_types[1]

        return ctxt
コード例 #40
0
ファイル: context.py プロジェクト: alefnode/cinder-vnx-fc
    def __call__(self):
        log('Generating template context for amqp')
        conf = config()
        try:
            username = conf['rabbit-user']
            vhost = conf['rabbit-vhost']
        except KeyError as e:
            log('Could not generate shared_db context. '
                'Missing required charm config options: %s.' % e)
            raise OSContextError
        ctxt = {}
        for rid in relation_ids('amqp'):
            ha_vip_only = False
            for unit in related_units(rid):
                if relation_get('clustered', rid=rid, unit=unit):
                    ctxt['clustered'] = True
                    ctxt['rabbitmq_host'] = relation_get('vip', rid=rid,
                                                         unit=unit)
                else:
                    ctxt['rabbitmq_host'] = relation_get('private-address',
                                                         rid=rid, unit=unit)
                ctxt.update({
                    'rabbitmq_user': username,
                    'rabbitmq_password': relation_get('password', rid=rid,
                                                      unit=unit),
                    'rabbitmq_virtual_host': vhost,
                })

                ssl_port = relation_get('ssl_port', rid=rid, unit=unit)
                if ssl_port:
                    ctxt['rabbit_ssl_port'] = ssl_port
                ssl_ca = relation_get('ssl_ca', rid=rid, unit=unit)
                if ssl_ca:
                    ctxt['rabbit_ssl_ca'] = ssl_ca

                if relation_get('ha_queues', rid=rid, unit=unit) is not None:
                    ctxt['rabbitmq_ha_queues'] = True

                ha_vip_only = relation_get('ha-vip-only',
                                           rid=rid, unit=unit) is not None

                if context_complete(ctxt):
                    if 'rabbit_ssl_ca' in ctxt:
                        if not self.ssl_dir:
                            log(("Charm not setup for ssl support "
                                 "but ssl ca found"))
                            break
                        ca_path = os.path.join(
                            self.ssl_dir, 'rabbit-client-ca.pem')
                        with open(ca_path, 'w') as fh:
                            fh.write(b64decode(ctxt['rabbit_ssl_ca']))
                            ctxt['rabbit_ssl_ca'] = ca_path
                    # Sufficient information found = break out!
                    break
            # Used for active/active rabbitmq >= grizzly
            if ('clustered' not in ctxt or ha_vip_only) \
                    and len(related_units(rid)) > 1:
                rabbitmq_hosts = []
                for unit in related_units(rid):
                    rabbitmq_hosts.append(relation_get('private-address',
                                                       rid=rid, unit=unit))
                ctxt['rabbitmq_hosts'] = ','.join(rabbitmq_hosts)
        if not context_complete(ctxt):
            return {}
        else:
            return ctxt
コード例 #41
0
ファイル: jjb.py プロジェクト: lutostag/charm-ci-configurator
def jenkins_context():
    for rid in relation_ids('jenkins-configurator'):
        for unit in related_units(rid):
            return relation_get(rid=rid, unit=unit)
コード例 #42
0
def _notify_neutron():
    for rid in relation_ids("neutron-api"):
        if related_units(rid):
            neutron_api_joined(rid)
コード例 #43
0
def update_all_domain_backends():
    """Re-trigger hooks for all domain-backend relations/units"""
    for rid in relation_ids('domain-backend'):
        for unit in related_units(rid):
            domain_backend_changed(relation_id=rid, unit=unit)
コード例 #44
0
def config_changed():

    # if we are paused or upgrading, delay doing any config changed hooks.
    # It is forced on the resume.
    if is_unit_paused_set() or is_unit_upgrading_set():
        log("Unit is paused or upgrading. Skipping config_changed", "WARN")
        return

    # It is critical that the installation is attempted first before any
    # rendering of the configuration files occurs.
    # install_percona_xtradb_cluster has the code to decide if this is the
    # leader or if the leader is bootstrapped and therefore ready for install.
    install_percona_xtradb_cluster()

    if config('prefer-ipv6'):
        assert_charm_supports_ipv6()

    hosts = get_cluster_hosts()
    leader_bootstrapped = is_leader_bootstrapped()
    leader_ip = leader_get('leader-ip')

    # Cluster upgrade adds some complication
    cluster_series_upgrading = leader_get("cluster_series_upgrading")
    if cluster_series_upgrading:
        leader = (leader_get('cluster_series_upgrade_leader') ==
                  get_relation_ip('cluster'))
        leader_ip = leader_get('cluster_series_upgrade_leader')
    else:
        leader = is_leader()
        leader_ip = leader_get('leader-ip')

    # (re)install pcmkr agent
    install_mysql_ocf()

    if leader:
        # If the cluster has not been fully bootstrapped once yet, use an empty
        # hosts list to avoid restarting the leader node's mysqld during
        # cluster buildup.
        # After, the cluster has bootstrapped at least one time, it is much
        # less likely to have restart collisions. It is then safe to use the
        # full hosts list and have the leader node's mysqld restart.
        # Empty hosts if cluster_series_upgrading
        if not clustered_once() or cluster_series_upgrading:
            hosts = []
        log(
            "Leader unit - bootstrap required={}".format(
                not leader_bootstrapped), DEBUG)
        render_config_restart_on_changed(hosts,
                                         bootstrap=not leader_bootstrapped)
    elif (leader_bootstrapped and is_sufficient_peers()
          and not cluster_series_upgrading):
        # Skip if cluster_series_upgrading
        # Speed up cluster process by bootstrapping when the leader has
        # bootstrapped if we have expected number of peers
        # However, in a cold boot scenario do not add the "old" leader
        # when it matches this host.
        if (leader_ip not in hosts and leader_ip != get_cluster_host_ip()):
            # Fix Bug #1738896
            hosts = [leader_ip] + hosts
        log("Leader is bootstrapped - configuring mysql on this node", DEBUG)
        # Rendering the mysqld.cnf and restarting is bootstrapping for a
        # non-leader node.
        render_config_restart_on_changed(hosts)
        # Assert we are bootstrapped. This will throw an
        # InconsistentUUIDError exception if UUIDs do not match.
        update_bootstrap_uuid()
    else:
        # Until the bootstrap-uuid attribute is set by the leader,
        # cluster_ready() will evaluate to False. So it is necessary to
        # feed this information to the user.
        status_set('waiting', "Waiting for bootstrap-uuid set by leader")
        log('Non-leader waiting on leader bootstrap, skipping render', DEBUG)
        return

    # Notify any changes to the access network
    update_client_db_relations()

    for rid in relation_ids('ha'):
        # make sure all the HA resources are (re)created
        ha_relation_joined(relation_id=rid)

    if is_relation_made('nrpe-external-master'):
        update_nrpe_config()

    open_port(DEFAULT_MYSQL_PORT)

    # the password needs to be updated only if the node was already
    # bootstrapped
    if is_bootstrapped():
        if is_leader():
            update_root_password()
        set_ready_on_peers()

    # NOTE(tkurek): re-set 'master' relation data
    if relation_ids('master'):
        master_joined()
コード例 #45
0
def config_changed():
    aci_opflex_install()
    configure_opflex()
    CONFIGS.write_all()
    for rid in relation_ids('neutron-plugin'):
        neutron_plugin_joined(relation_id=rid)
コード例 #46
0
 def __call__(self):
     ctxt = {}
     rids = [rid for rid in hookenv.relation_ids('cloud-compute')]
     if rids:
         ctxt['rids'] = rids
     return ctxt
コード例 #47
0
def mon_relation_joined():
    public_addr = get_public_addr()
    for relid in relation_ids('mon'):
        relation_set(relation_id=relid,
                     relation_settings={'ceph-public-address': public_addr})
コード例 #48
0
 def __call__(self):
     ctxt = {}
     rids = [rid for rid in hookenv.relation_ids('placement')]
     if rids:
         ctxt['rids'] = rids
     return ctxt
コード例 #49
0
def notify_radosgws():
    for relid in relation_ids('radosgw'):
        for unit in related_units(relid):
            radosgw_relation(relid=relid, unit=unit)
コード例 #50
0
def ha_changed():
    for relid in relation_ids('certificates'):
        certs_changed(relation_id=relid)
コード例 #51
0
def postgresql_neutron_db_changed():
    CONFIGS.write(NEUTRON_CONF)
    conditional_neutron_migration()
    for r_id in relation_ids('neutron-plugin-api-subordinate'):
        neutron_plugin_api_subordinate_relation_joined(relid=r_id)
コード例 #52
0
def _notify_controller():
    data = _get_orchestrator_info()
    for rid in relation_ids("contrail-controller"):
        if related_units(rid):
            relation_set(relation_id=rid, **data)
コード例 #53
0
def upgrade_charm():
    for rid in relation_ids('storage-backend'):
        storage_backend(rid)
コード例 #54
0
    def __call__(self):
        if not relation_ids(self.interfaces[0]):
            return {}

        host = socket.gethostname()
        systemd_rgw = False

        mon_hosts = []
        auths = []
        fsid = None

        for rid in relation_ids(self.interfaces[0]):
            for unit in related_units(rid):
                fsid = relation_get('fsid', rid=rid, unit=unit)
                _auth = relation_get('auth', rid=rid, unit=unit)
                if _auth:
                    auths.append(_auth)

                ceph_pub_addr = relation_get('ceph-public-address',
                                             rid=rid,
                                             unit=unit)
                unit_priv_addr = relation_get('private-address',
                                              rid=rid,
                                              unit=unit)
                ceph_addr = ceph_pub_addr or unit_priv_addr
                ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr
                if ceph_addr:
                    mon_hosts.append(ceph_addr)
                if relation_get('rgw.{}_key'.format(host), rid=rid, unit=unit):
                    systemd_rgw = True

        if len(set(auths)) != 1:
            e = ("Inconsistent or absent auth returned by mon units. Setting "
                 "auth_supported to 'none'")
            log(e, level=WARNING)
            auth = 'none'
        else:
            auth = auths[0]

        # /etc/init.d/radosgw mandates that a dns name is used for this
        # parameter so ensure that address is resolvable
        if config('prefer-ipv6'):
            ensure_host_resolvable_v6(host)

        port = determine_api_port(utils.listen_port(), singlenode_mode=True)
        if config('prefer-ipv6'):
            port = "[::]:%s" % (port)

        mon_hosts.sort()
        ctxt = {
            'auth_supported': auth,
            'mon_hosts': ' '.join(mon_hosts),
            'hostname': host,
            'old_auth': cmp_pkgrevno('radosgw', "0.51") < 0,
            'systemd_rgw': systemd_rgw,
            'use_syslog': str(config('use-syslog')).lower(),
            'loglevel': config('loglevel'),
            'port': port,
            'ipv6': config('prefer-ipv6'),
            # The public unit IP is only used in case the authentication is
            # *Not* keystone - in which case it is used to make sure the
            # storage endpoint returned by the built-in auth is the HAproxy
            # (since it defaults to the port the service runs on, and that is
            # not available externally). ~tribaal
            'unit_public_ip': unit_public_ip(),
            'fsid': fsid,
        }

        # NOTE(dosaboy): these sections must correspond to what is supported in
        #                the config template.
        sections = ['global', 'client.radosgw.gateway']
        user_provided = CephConfContext(permitted_sections=sections)()
        user_provided = {
            k.replace('.', '_'): user_provided[k]
            for k in user_provided
        }
        ctxt.update(user_provided)

        if self.context_complete(ctxt):
            # Multi-site Zone configuration is optional,
            # so add after assessment
            ctxt['rgw_zone'] = config('zone')
            return ctxt

        return {}
コード例 #55
0
    def __call__(self):
        '''
        Extends the main charmhelpers HAProxyContext with a port mapping
        specific to this charm.
        Also used to extend nova.conf context with correct api_listening_ports
        '''
        ctxt = super(HAProxyContext, self).__call__()

        os_rel = ch_utils.os_release('nova-common')
        cmp_os_rel = ch_utils.CompareOpenStackReleases(os_rel)
        # determine which port api processes should bind to, depending
        # on existence of haproxy + apache frontends
        compute_api = ch_cluster.determine_api_port(
            common.api_port('nova-api-os-compute'), singlenode_mode=True)
        ec2_api = ch_cluster.determine_api_port(
            common.api_port('nova-api-ec2'), singlenode_mode=True)
        s3_api = ch_cluster.determine_api_port(
            common.api_port('nova-objectstore'), singlenode_mode=True)
        placement_api = ch_cluster.determine_api_port(
            common.api_port('nova-placement-api'), singlenode_mode=True)
        metadata_api = ch_cluster.determine_api_port(
            common.api_port('nova-api-metadata'), singlenode_mode=True)
        # Apache ports
        a_compute_api = ch_cluster.determine_apache_port(
            common.api_port('nova-api-os-compute'), singlenode_mode=True)
        a_ec2_api = ch_cluster.determine_apache_port(
            common.api_port('nova-api-ec2'), singlenode_mode=True)
        a_s3_api = ch_cluster.determine_apache_port(
            common.api_port('nova-objectstore'), singlenode_mode=True)
        a_placement_api = ch_cluster.determine_apache_port(
            common.api_port('nova-placement-api'), singlenode_mode=True)
        a_metadata_api = ch_cluster.determine_apache_port(
            common.api_port('nova-api-metadata'), singlenode_mode=True)
        # to be set in nova.conf accordingly.
        listen_ports = {
            'osapi_compute_listen_port': compute_api,
            'ec2_listen_port': ec2_api,
            's3_listen_port': s3_api,
            'placement_listen_port': placement_api,
            'metadata_listen_port': metadata_api,
        }

        port_mapping = {
            'nova-api-os-compute': [
                common.api_port('nova-api-os-compute'), a_compute_api],
            'nova-api-ec2': [
                common.api_port('nova-api-ec2'), a_ec2_api],
            'nova-objectstore': [
                common.api_port('nova-objectstore'), a_s3_api],
            'nova-placement-api': [
                common.api_port('nova-placement-api'), a_placement_api],
            'nova-api-metadata': [
                common.api_port('nova-api-metadata'), a_metadata_api],
        }

        if cmp_os_rel >= 'kilo':
            del listen_ports['ec2_listen_port']
            del listen_ports['s3_listen_port']
            del port_mapping['nova-api-ec2']
            del port_mapping['nova-objectstore']

        rids = hookenv.relation_ids('placement')
        if (rids or
                cmp_os_rel < 'ocata' or
                cmp_os_rel > 'stein'):
            del listen_ports['placement_listen_port']
            del port_mapping['nova-placement-api']

        # for haproxy.conf
        ctxt['service_ports'] = port_mapping
        # for nova.conf
        ctxt['listen_ports'] = listen_ports
        return ctxt
コード例 #56
0
def upgrade_charm():
    install()
    update_nrpe_config()
    any_changed()
    for rid in relation_ids('cluster'):
        cluster_joined(relation_id=rid)
コード例 #57
0
 def region(self):
     region = None
     for rid in relation_ids('cloud-compute'):
         for unit in related_units(rid):
             region = relation_get('region', rid=rid, unit=unit)
     return region
コード例 #58
0
 def get_data(self):
     super(MonitorsRelation, self).get_data()
     if not hookenv.relation_ids(self.name):
         return
     addresses = [info['private-address'] for info in self['monitors']]
     self['monitor_allowed_hosts'] = ','.join(addresses)
コード例 #59
0
def _notify_heat():
    for rid in relation_ids("heat-plugin"):
        if related_units(rid):
            heat_plugin_joined(rid)
コード例 #60
0
def ha_relation_changed():
    # Check that we are related to a principle and that
    # it has already provided the required corosync configuration
    if not get_corosync_conf():
        log('Unable to configure corosync right now, deferring configuration',
            level=INFO)
        return

    if relation_ids('hanode'):
        log('Ready to form cluster - informing peers', level=DEBUG)
        relation_set(relation_id=relation_ids('hanode')[0], ready=True)
    else:
        log('Ready to form cluster, but not related to peers just yet',
            level=INFO)
        return

    # Check that there's enough nodes in order to perform the
    # configuration of the HA cluster
    if len(get_cluster_nodes()) < int(config('cluster_count')):
        log('Not enough nodes in cluster, deferring configuration', level=INFO)
        return

    relids = relation_ids('ha')
    if len(relids) == 1:  # Should only ever be one of these
        # Obtain relation information
        relid = relids[0]
        units = related_units(relid)
        if len(units) < 1:
            log('No principle unit found, deferring configuration', level=INFO)
            return

        unit = units[0]
        log('Parsing cluster configuration using rid: %s, unit: %s' %
            (relid, unit),
            level=DEBUG)
        resources = parse_data(relid, unit, 'resources')
        delete_resources = parse_data(relid, unit, 'delete_resources')
        resource_params = parse_data(relid, unit, 'resource_params')
        groups = parse_data(relid, unit, 'groups')
        ms = parse_data(relid, unit, 'ms')
        orders = parse_data(relid, unit, 'orders')
        colocations = parse_data(relid, unit, 'colocations')
        clones = parse_data(relid, unit, 'clones')
        locations = parse_data(relid, unit, 'locations')
        init_services = parse_data(relid, unit, 'init_services')
    else:
        log('Related to %s ha services' % (len(relids)), level=DEBUG)
        return

    if True in [
            ra.startswith('ocf:openstack') for ra in resources.itervalues()
    ]:
        apt_install('openstack-resource-agents')
    if True in [ra.startswith('ocf:ceph') for ra in resources.itervalues()]:
        apt_install('ceph-resource-agents')

    if True in [ra.startswith('ocf:maas') for ra in resources.values()]:
        if validate_dns_ha():
            log('Setting up access to MAAS API', level=INFO)
            setup_maas_api()
            # Update resource_parms for DNS resources to include MAAS URL and
            # credentials
            for resource in resource_params.keys():
                if resource.endswith("_hostname"):
                    resource_params[resource] += (
                        ' maas_url="{}" maas_credentials="{}"'
                        ''.format(config('maas_url'),
                                  config('maas_credentials')))
        else:
            msg = ("DNS HA is requested but maas_url "
                   "or maas_credentials are not set")
            status_set('blocked', msg)
            raise ValueError(msg)

    # NOTE: this should be removed in 15.04 cycle as corosync
    # configuration should be set directly on subordinate
    configure_corosync()
    pcmk.wait_for_pcmk()
    configure_cluster_global()
    configure_monitor_host()
    configure_stonith()

    # Only configure the cluster resources
    # from the oldest peer unit.
    if oldest_peer(peer_units()):
        log('Deleting Resources' % (delete_resources), level=DEBUG)
        for res_name in delete_resources:
            if pcmk.crm_opt_exists(res_name):
                if ocf_file_exists(res_name, resources):
                    log('Stopping and deleting resource %s' % res_name,
                        level=DEBUG)
                    if pcmk.crm_res_running(res_name):
                        pcmk.commit('crm -w -F resource stop %s' % res_name)
                else:
                    log('Cleanuping and deleting resource %s' % res_name,
                        level=DEBUG)
                    pcmk.commit('crm resource cleanup %s' % res_name)
                # Daemon process may still be running after the upgrade.
                kill_legacy_ocf_daemon_process(res_name)
                pcmk.commit('crm -w -F configure delete %s' % res_name)

        log('Configuring Resources: %s' % (resources), level=DEBUG)
        for res_name, res_type in resources.iteritems():
            # disable the service we are going to put in HA
            if res_type.split(':')[0] == "lsb":
                disable_lsb_services(res_type.split(':')[1])
                if service_running(res_type.split(':')[1]):
                    service_stop(res_type.split(':')[1])
            elif (len(init_services) != 0 and res_name in init_services
                  and init_services[res_name]):
                disable_upstart_services(init_services[res_name])
                if service_running(init_services[res_name]):
                    service_stop(init_services[res_name])
            # Put the services in HA, if not already done so
            # if not pcmk.is_resource_present(res_name):
            if not pcmk.crm_opt_exists(res_name):
                if res_name not in resource_params:
                    cmd = 'crm -w -F configure primitive %s %s' % (res_name,
                                                                   res_type)
                else:
                    cmd = ('crm -w -F configure primitive %s %s %s' %
                           (res_name, res_type, resource_params[res_name]))

                pcmk.commit(cmd)
                log('%s' % cmd, level=DEBUG)
                if config('monitor_host'):
                    cmd = ('crm -F configure location Ping-%s %s rule '
                           '-inf: pingd lte 0' % (res_name, res_name))
                    pcmk.commit(cmd)

        log('Configuring Groups: %s' % (groups), level=DEBUG)
        for grp_name, grp_params in groups.iteritems():
            if not pcmk.crm_opt_exists(grp_name):
                cmd = ('crm -w -F configure group %s %s' %
                       (grp_name, grp_params))
                pcmk.commit(cmd)
                log('%s' % cmd, level=DEBUG)

        log('Configuring Master/Slave (ms): %s' % (ms), level=DEBUG)
        for ms_name, ms_params in ms.iteritems():
            if not pcmk.crm_opt_exists(ms_name):
                cmd = 'crm -w -F configure ms %s %s' % (ms_name, ms_params)
                pcmk.commit(cmd)
                log('%s' % cmd, level=DEBUG)

        log('Configuring Orders: %s' % (orders), level=DEBUG)
        for ord_name, ord_params in orders.iteritems():
            if not pcmk.crm_opt_exists(ord_name):
                cmd = 'crm -w -F configure order %s %s' % (ord_name,
                                                           ord_params)
                pcmk.commit(cmd)
                log('%s' % cmd, level=DEBUG)

        log('Configuring Colocations: %s' % colocations, level=DEBUG)
        for col_name, col_params in colocations.iteritems():
            if not pcmk.crm_opt_exists(col_name):
                cmd = 'crm -w -F configure colocation %s %s' % (col_name,
                                                                col_params)
                pcmk.commit(cmd)
                log('%s' % cmd, level=DEBUG)

        log('Configuring Clones: %s' % clones, level=DEBUG)
        for cln_name, cln_params in clones.iteritems():
            if not pcmk.crm_opt_exists(cln_name):
                cmd = 'crm -w -F configure clone %s %s' % (cln_name,
                                                           cln_params)
                pcmk.commit(cmd)
                log('%s' % cmd, level=DEBUG)

        log('Configuring Locations: %s' % locations, level=DEBUG)
        for loc_name, loc_params in locations.iteritems():
            if not pcmk.crm_opt_exists(loc_name):
                cmd = 'crm -w -F configure location %s %s' % (loc_name,
                                                              loc_params)
                pcmk.commit(cmd)
                log('%s' % cmd, level=DEBUG)

        for res_name, res_type in resources.iteritems():
            if len(init_services) != 0 and res_name in init_services:
                # Checks that the resources are running and started.
                # Ensure that clones are excluded as the resource is
                # not directly controllable (dealt with below)
                # Ensure that groups are cleaned up as a whole rather
                # than as individual resources.
                if (res_name not in clones.values()
                        and res_name not in groups.values()
                        and not pcmk.crm_res_running(res_name)):
                    # Just in case, cleanup the resources to ensure they get
                    # started in case they failed for some unrelated reason.
                    cmd = 'crm resource cleanup %s' % res_name
                    pcmk.commit(cmd)

        for cl_name in clones:
            # Always cleanup clones
            cmd = 'crm resource cleanup %s' % cl_name
            pcmk.commit(cmd)

        for grp_name in groups:
            # Always cleanup groups
            cmd = 'crm resource cleanup %s' % grp_name
            pcmk.commit(cmd)

    for rel_id in relation_ids('ha'):
        relation_set(relation_id=rel_id, clustered="yes")