Ejemplo n.º 1
0
def check_optional_relations(configs):
    """Check that if we have a relation_id for high availability that we can
    get the hacluster config.  If we can't then we are blocked.  This function
    is called from assess_status/set_os_workload_status as the charm_func and
    needs to return either 'unknown', '' if there is no problem or the status,
    message if there is a problem.

    :param configs: an OSConfigRender() instance.
    :return 2-tuple: (string, string) = (status, message)
    """
    if relation_ids('ha'):
        try:
            get_hacluster_config()
        except:
            return ('blocked',
                    'hacluster missing configuration: '
                    'vip, vip_iface, vip_cidr')
    # NOTE: misc multi-site relation and config checks
    multisite_config = (config('realm'),
                        config('zonegroup'),
                        config('zone'))
    if relation_ids('master') or relation_ids('slave'):
        if not all(multisite_config):
            return ('blocked',
                    'multi-site configuration incomplete '
                    '(realm={realm}, zonegroup={zonegroup}'
                    ', zone={zone})'.format(**config()))
    if (all(multisite_config) and not
            (relation_ids('master') or relation_ids('slave'))):
        return ('blocked',
                'multi-site configuration but master/slave '
                'relation missing')
    if (all(multisite_config) and relation_ids('slave')):
        multisite_ready = False
        for rid in relation_ids('slave'):
            for unit in related_units(rid):
                if relation_get('url', unit=unit, rid=rid):
                    multisite_ready = True
                    continue
        if not multisite_ready:
            return ('waiting',
                    'multi-site master relation incomplete')
    master_configured = (
        leader_get('access_key'),
        leader_get('secret'),
        leader_get('restart_nonce'),
    )
    if (all(multisite_config) and
            relation_ids('master') and
            not all(master_configured)):
        return ('waiting',
                'waiting for configuration of master zone')
    # return 'unknown' as the lowest priority to not clobber an existing
    # status.
    return 'unknown', ''
Ejemplo n.º 2
0
def relay_client_credentials():
    ''' Write the client cert and key to the charm directory on followers. '''
    # offer a short circuit if we have already received broadcast
    # credentials for the cluster
    if leader_get('client_certificate') and leader_get('client_key'):
        with open('client.crt', 'w+') as fp:
            fp.write(leader_get('client_certificate'))
        with open('client.key', 'w+') as fp:
            fp.write(leader_get('client_key'))
        set_state('client-credentials-relayed')
        return
Ejemplo n.º 3
0
def register_node_with_leader(cluster):
    '''
    Control flow mechanism to perform self registration with the leader.

    Before executing self registration, we must adhere to the nature of offline
    static turnup rules. If we find a GUID in the member list without peering
    information the unit will enter a race condition and must wait for a clean
    status output before we can progress to self registration.
    '''
    # We're going to communicate with the leader, and we need our bootstrap
    # startup string once.. TBD after that.
    etcdctl = EtcdCtl()
    bag = EtcdDatabag()
    # Assume a hiccup during registration and attempt a retry
    if bag.cluster_unit_id:
        bag.cluster = bag.registration_peer_string
        render('defaults', '/etc/default/etcd', bag.__dict__)
        host.service_restart('etcd')
        time.sleep(2)

    peers = etcdctl.member_list(leader_get('leader_address'))
    for unit in peers:
        if 'client_urls' not in peers[unit].keys():
            # we cannot register. State not attainable.
            msg = 'Waiting for unit to complete registration'
            status_set('waiting', msg)
            return

    if not bag.cluster_unit_id:
        bag.leader_address = leader_get('leader_address')
        resp = etcdctl.register(bag.__dict__)
        if resp and 'cluster_unit_id' in resp.keys() and 'cluster' in resp.keys():  # noqa
            bag.cache_registration_detail('cluster_unit_id',
                                          resp['cluster_unit_id'])
            bag.cache_registration_detail('registration_peer_string',
                                          resp['cluster'])

            bag.cluster_unit_id = resp['cluster_unit_id']
            bag.cluster = resp['cluster']

    render('defaults', '/etc/default/etcd', bag.__dict__)
    host.service_restart('etcd')
    time.sleep(2)

    # Check health status before we say we are good
    etcdctl = EtcdCtl()
    status = etcdctl.cluster_health()
    if 'unhealthy' in status:
        status_set('blocked', 'Cluster not healthy')
        return
    open_port(bag.port)
    set_state('etcd.registered')
Ejemplo n.º 4
0
def leader_elected():
    if is_leader():
        if not leader_get('heat-domain-admin-passwd'):
            try:
                leader_set({'heat-domain-admin-passwd': pwgen(32)})
            except subprocess.CalledProcessError as e:
                log('leader_set: heat-domain-admin-password failed: {}'
                    .format(str(e)), level=WARNING)
        if not leader_get('heat-auth-encryption-key'):
            try:
                leader_set({'heat-auth-encryption-key': pwgen(32)})
            except subprocess.CalledProcessError as e:
                log('leader_set: heat-domain-admin-password failed: {}'
                    .format(str(e)), level=WARNING)
Ejemplo n.º 5
0
def send_single_connection_details(db):
    ''' '''
    cert = leader_get('client_certificate')
    key = leader_get('client_key')
    ca = leader_get('certificate_authority')
    # Set the key and cert on the db relation
    db.set_client_credentials(key, cert, ca)

    bag = EtcdDatabag()
    # Get all the peers participating in the cluster relation.
    members = [bag.private_address]
    # Create a connection string with this member on the configured port.
    connection_string = get_connection_string(members, bag.port)
    # Set the connection string on the db relation.
    db.set_connection_string(connection_string)
Ejemplo n.º 6
0
 def __call__(self):
     ctxt = {}
     # check if we have stored encryption key
     ctxt['encryption_key'] = get_encryption_key()
     ctxt['heat_domain_admin_passwd'] = (
         leader_get('heat-domain-admin-passwd'))
     return ctxt
Ejemplo n.º 7
0
def render_files(reldata=None):
    '''Use jinja templating to render the docker-compose.yml and master.json
    file to contain the dynamic data for the configuration files.'''
    context = {}
    # Load the context data with SDN data.
    context.update(gather_sdn_data())
    # Add the charm configuration data to the context.
    context.update(hookenv.config())
    if reldata:
        connection_string = reldata.get_connection_string()
        # Define where the etcd tls files will be kept.
        etcd_dir = '/etc/ssl/etcd'
        # Create paths to the etcd client ca, key, and cert file locations.
        ca = os.path.join(etcd_dir, 'client-ca.pem')
        key = os.path.join(etcd_dir, 'client-key.pem')
        cert = os.path.join(etcd_dir, 'client-cert.pem')
        # Save the client credentials (in relation data) to the paths provided.
        reldata.save_client_credentials(key, cert, ca)
        # Update the context so the template has the etcd information.
        context.update({'etcd_dir': etcd_dir,
                        'connection_string': connection_string,
                        'etcd_ca': ca,
                        'etcd_key': key,
                        'etcd_cert': cert})

    charm_dir = hookenv.charm_dir()
    rendered_kube_dir = os.path.join(charm_dir, 'files/kubernetes')
    if not os.path.exists(rendered_kube_dir):
        os.makedirs(rendered_kube_dir)
    rendered_manifest_dir = os.path.join(charm_dir, 'files/manifests')
    if not os.path.exists(rendered_manifest_dir):
        os.makedirs(rendered_manifest_dir)

    # Update the context with extra values, arch, manifest dir, and private IP.
    context.update({'arch': arch(),
                    'master_address': leader_get('master-address'),
                    'manifest_directory': rendered_manifest_dir,
                    'public_address': hookenv.unit_get('public-address'),
                    'private_address': hookenv.unit_get('private-address')})

    # Adapted from: http://kubernetes.io/docs/getting-started-guides/docker/
    target = os.path.join(rendered_kube_dir, 'docker-compose.yml')
    # Render the files/kubernetes/docker-compose.yml file that contains the
    # definition for kubelet and proxy.
    render('docker-compose.yml', target, context)

    if is_leader():
        # Source: https://github.com/kubernetes/...master/cluster/images/hyperkube  # noqa
        target = os.path.join(rendered_manifest_dir, 'master.json')
        # Render the files/manifests/master.json that contains parameters for
        # the apiserver, controller, and controller-manager
        render('master.json', target, context)
        # Source: ...cluster/addons/dns/skydns-svc.yaml.in
        target = os.path.join(rendered_manifest_dir, 'kubedns-svc.yaml')
        # Render files/kubernetes/kubedns-svc.yaml for the DNS service.
        render('kubedns-svc.yaml', target, context)
        # Source: ...cluster/addons/dns/skydns-rc.yaml.in
        target = os.path.join(rendered_manifest_dir, 'kubedns-rc.yaml')
        # Render files/kubernetes/kubedns-rc.yaml for the DNS pod.
        render('kubedns-rc.yaml', target, context)
Ejemplo n.º 8
0
 def cluster_token(self):
     """Getter to return the unique cluster token."""
     token = leader_get("token")
     if not token and is_leader():
         token = self.id_generator()
         leader_set({"token": token})
     return token
Ejemplo n.º 9
0
    def get_mysql_password(self, username=None, password=None):
        """Retrieve, generate or store a mysql password for the provided
        username using peer relation cluster."""
        excludes = []

        # First check peer relation.
        try:
            for key in self.passwd_keys(username):
                _password = leader_get(key)
                if _password:
                    break

            # If root password available don't update peer relation from local
            if _password and not username:
                excludes.append(self.root_passwd_file_template)

        except ValueError:
            # cluster relation is not yet started; use on-disk
            _password = None

        # If none available, generate new one
        if not _password:
            _password = self.get_mysql_password_on_disk(username, password)

        # Put on wire if required
        if self.migrate_passwd_to_leader_storage:
            self.migrate_passwords_to_leader_storage(excludes=excludes)

        return _password
Ejemplo n.º 10
0
    def cluster_string(self, proto='http', internal=True):
        ''' This method behaves slightly different depending on the
            context of its invocation. If the unit is the leader, the
            connection string should always be built and returned from
            the contents in unit data. Otherwise we should return the
            value set by the leader via leader-data

            @params proto - Determines the output prefix depending on need. eg:
                           http://127.0.0.1:4001 or etcd://127.0.0.1:4001
            @params internal - Boolean value to determine if management or
                               client cluster string is required.
        '''
        if is_leader():
            cluster_data = self.cluster_data()
            connection_string = ""
            if internal:
                for u in cluster_data:
                    connection_string += ",{}={}://{}:{}".format(u,  # noqa
                                                                 proto,
                                                                 cluster_data[u]['private_address'],  # noqa
                                                                 self.management_port)  # noqa
            else:
                for u in cluster_data:
                    connection_string += ",{}://{}:{}".format(proto,
                                                              cluster_data[u]['private_address'],  # noqa
                                                              self.port)
            return connection_string.lstrip(',')
        else:
            return leader_get('cluster')
    def get_mysql_password(self, username=None, password=None):
        """Retrieve, generate or store a mysql password for the provided
        username using peer relation cluster."""
        excludes = []

        # First check peer relation.
        try:
            for key in self.passwd_keys(username):
                _password = leader_get(key)
                if _password:
                    break

            # If root password available don't update peer relation from local
            if _password and not username:
                excludes.append(self.root_passwd_file_template)

        except ValueError:
            # cluster relation is not yet started; use on-disk
            _password = None

        # If none available, generate new one
        if not _password:
            _password = self.get_mysql_password_on_disk(username, password)

        # Put on wire if required
        if self.migrate_passwd_to_leader_storage:
            self.migrate_passwords_to_leader_storage(excludes=excludes)

        return _password
Ejemplo n.º 12
0
def get_seed_ips():
    '''Return the set of seed ip addresses.

    We use ip addresses rather than unit names, as we may need to use
    external seed ips at some point.
    '''
    return set((hookenv.leader_get('seeds') or '').split(','))
Ejemplo n.º 13
0
def remove_cluster_node(units):
    if not leader_get('master_ip') in units and is_flag_set(
            'leadership.is_leader'):
        leader_set({
            'master_ip':
            '{}:{}'.format(unit_private_ip(), retrieve_helper_port())
        })
Ejemplo n.º 14
0
def add_user():
    """Add a swauth user to swift."""
    if config('auth-type') == 'swauth':
        try_initialize_swauth()
        account = action_get('account')
        username = action_get('username')
        password = action_get('password')
        bind_port = config('bind-port')
        bind_port = determine_api_port(bind_port, singlenode_mode=True)
        success = True
        try:
            check_call([
                "swauth-add-user",
                "-A", "http://localhost:{}/auth/".format(bind_port),
                "-K", leader_get('swauth-admin-key'),
                "-a", account, username, password])
        except CalledProcessError as e:
            success = False
            log("Has a problem adding user: {}".format(e.output))
            action_fail(
                "Adding user {} failed with: \"{}\""
                .format(username, str(e)))
        if success:
            message = "Successfully added the user {}".format(username)
            action_set({
                'add-user.result': 'Success',
                'add-user.message': message,
            })
Ejemplo n.º 15
0
def is_leader_bootstrapped():
    """ Check that the leader is bootstrapped and has set required settings

    :side_effect: calls leader_get
    :returns: boolean
    """

    check_settings = [
        'bootstrap-uuid', 'mysql.passwd', 'root-password', 'sst-password',
        'leader-ip'
    ]
    leader_settings = leader_get()

    # Is the leader bootstrapped?
    for setting in check_settings:
        if leader_settings.get(setting) is None:
            log(
                "Leader is NOT bootstrapped {}: {}".format(
                    setting, leader_settings.get('bootstrap-uuid')), DEBUG)
            return False

    log(
        "Leader is bootstrapped uuid: {}".format(
            leader_settings.get('bootstrap-uuid')), DEBUG)
    return True
def nova_compute_joined(rel_id=None):
    if config.get("dpdk", False):
        # contrail nova packages contain vrouter vhostuser vif
        shutil.copy("files/40contrail", "/etc/apt/preferences.d")
        apt_install(["nova-compute", "libvirt-bin", "contrail-nova-vif"],
                    options=[
                        "--reinstall", "--force-yes", "-fy", "-o",
                        "Dpkg::Options::=--force-confnew"
                    ],
                    fatal=True)
        service_restart("nova-api-metadata")

    # create plugin config
    sections = {
        "DEFAULT":
        [("firewall_driver", "nova.virt.firewall.NoopFirewallDriver")]
    }
    if config.get("dpdk", False):
        sections["CONTRAIL"] = [("use_userspace_vhost", "True")]
        sections["libvirt"] = [("use_huge_pages", "True")]
    conf = {"nova-compute": {"/etc/nova/nova.conf": {"sections": sections}}}
    settings = {
        "metadata-shared-secret": leader_get("metadata-shared-secret"),
        "subordinate_configuration": json.dumps(conf)
    }
    relation_set(relation_id=rel_id, relation_settings=settings)
Ejemplo n.º 17
0
def mds_relation_joined(relid=None, unit=None):
    if ceph.is_quorum() and related_osds():
        log('mon cluster in quorum and OSDs related'
            '- providing mds client with keys')
        mds_name = relation_get(attribute='mds-name', rid=relid, unit=unit)
        if not unit:
            unit = remote_unit()
        public_addr = get_public_addr()
        data = {
            'fsid': leader_get('fsid'),
            'mds_key': ceph.get_mds_key(name=mds_name),
            'auth': config('auth-supported'),
            'ceph-public-address': public_addr
        }
        settings = relation_get(rid=relid, unit=unit)
        """Process broker request(s)."""
        if 'broker_req' in settings:
            if ceph.is_leader():
                rsp = process_requests(settings['broker_req'])
                unit_id = unit.replace('/', '-')
                unit_response_key = 'broker-rsp-' + unit_id
                data[unit_response_key] = rsp
            else:
                log("Not leader - ignoring mds broker request", level=DEBUG)

        relation_set(relation_id=relid, relation_settings=data)
    else:
        log('Waiting on mon quorum or min osds before provisioning mds keys')
Ejemplo n.º 18
0
def get_rabbitmq_connection_details():
    return {
        "rabbit_q_name": "vnc-config.issu-queue",
        "rabbit_vhost": "/",
        "rabbit_port": "5673",
        "rabbit_address_list": common_utils.json_loads(leader_get("controller_ip_list"), list()),
    }
Ejemplo n.º 19
0
def initialize_leadership_state():
    '''Initialize leadership.* states from the hook environment.

    Invoked by hookenv.atstart() so states are available in
    @hook decorated handlers.
    '''
    is_leader = hookenv.is_leader()
    if is_leader:
        hookenv.log('Initializing Leadership Layer (is leader)')
    else:
        hookenv.log('Initializing Leadership Layer (is follower)')

    reactive.helpers.toggle_state('leadership.is_leader', is_leader)

    previous = unitdata.kv().getrange('leadership.settings.', strip=True)
    current = hookenv.leader_get()

    # Handle deletions.
    for key in set(previous.keys()) - set(current.keys()):
        current[key] = None

    any_changed = False
    for key, value in current.items():
        reactive.helpers.toggle_state('leadership.changed.{}'.format(key),
                                      value != previous.get(key))
        if value != previous.get(key):
            any_changed = True
        reactive.helpers.toggle_state('leadership.set.{}'.format(key), value
                                      is not None)
    reactive.helpers.toggle_state('leadership.changed', any_changed)

    unitdata.kv().update(current, prefix='leadership.settings.')
Ejemplo n.º 20
0
def leader_elected():
    ip = common_utils.get_ip()
    data_ip = common_utils.get_ip(config_param="data-network", fallback=ip)
    for var_name in [("ip", "unit-address", ip),
                     ("data_ip", "data-address", data_ip)]:
        ip_list = common_utils.json_loads(leader_get("controller_{}_list".format(var_name[0])), list())
        ips = utils.get_controller_ips(var_name[1], var_name[2])
        if not ip_list:
            ip_list = ips.values()
            log("{}_LIST: {}    {}S: {}".format(var_name[0].upper(), str(ip_list), var_name[0].upper(), str(ips)))
            settings = {
                "controller_{}_list".format(var_name[0]): json.dumps(list(ip_list)),
                "controller_{}s".format(var_name[0]): json.dumps(ips)
            }
            leader_set(settings=settings)
        else:
            current_ip_list = ips.values()
            dead_ips = set(ip_list).difference(current_ip_list)
            new_ips = set(current_ip_list).difference(ip_list)
            if new_ips:
                log("There are a new controllers that are not in the list: "
                    + str(new_ips), level=ERROR)
            if dead_ips:
                log("There are a dead controllers that are in the list: "
                    + str(dead_ips), level=ERROR)

    update_northbound_relations()
    update_southbound_relations()
    utils.update_charm_status()
def update_issu_relations(rid=None):
    rids = [rid] if rid else relation_ids("contrail-issu")
    if not rids:
        return

    # controller_ips/data_ips are already dumped json
    settings = {
        "unit-type": "issu",
        "maintenance": config.get("maintenance"),
        "issu_controller_ips": leader_get("controller_ip_list"),
        "issu_controller_data_ips": leader_get("controller_data_ip_list"),
        "issu_analytics_ips": json.dumps(utils.get_analytics_list()),
    }

    for rid in rids:
        relation_set(relation_id=rid, relation_settings=settings)
def master_joined(interface='master'):
    cluster_id = get_cluster_id()
    if not is_clustered():
        log("Not clustered yet", level=DEBUG)
        return
    relation_settings = {}
    leader_settings = leader_get()
    if is_leader():
        if not leader_settings.get('async-rep-password'):
            # Replication password cannot be longer than 32 characters
            leader_set({'async-rep-password': pwgen(32)})
            return
        configure_master()
        master_address, master_file, master_position = (
            get_master_status(interface))
        if leader_settings.get('master-address') is not master_address:
            leader_settings['master-address'] = master_address
            leader_settings['master-file'] = master_file
            leader_settings['master-position'] = master_position
        leader_set(leader_settings)
        relation_settings = {'leader': True}
    else:
        relation_settings = {'leader': False}
    relation_settings['cluster_id'] = cluster_id
    relation_settings['master_address'] = leader_settings['master-address']
    relation_settings['master_file'] = leader_settings['master-file']
    relation_settings['master_password'] = \
        leader_settings['async-rep-password']
    relation_settings['master_position'] = leader_settings['master-position']
    log("Setting master relation: '{}'".format(relation_settings), level=INFO)
    for rid in relation_ids(interface):
        relation_set(relation_id=rid, relation_settings=relation_settings)
def radosgw_relation(relid=None, unit=None):
    # Install radosgw for admin tools
    apt_install(packages=filter_installed_packages(['radosgw']))
    if not unit:
        unit = remote_unit()

    # NOTE: radosgw needs some usage OSD storage, so defer key
    #       provision until OSD units are detected.
    if ceph.is_quorum() and related_osds():
        log('mon cluster in quorum and osds related '
            '- providing radosgw with keys')
        public_addr = get_public_addr()
        data = {
            'fsid': leader_get('fsid'),
            'radosgw_key': ceph.get_radosgw_key(),
            'auth': config('auth-supported'),
            'ceph-public-address': public_addr,
        }

        settings = relation_get(rid=relid, unit=unit)
        """Process broker request(s)."""
        if 'broker_req' in settings:
            if ceph.is_leader():
                rsp = process_requests(settings['broker_req'])
                unit_id = unit.replace('/', '-')
                unit_response_key = 'broker-rsp-' + unit_id
                data[unit_response_key] = rsp
            else:
                log("Not leader - ignoring broker request", level=DEBUG)

        relation_set(relation_id=relid, relation_settings=data)
    else:
        log('mon cluster not in quorum or no osds - deferring key provision')
def get_ceph_context():
    networks = get_networks('ceph-public-network')
    public_network = ', '.join(networks)

    networks = get_networks('ceph-cluster-network')
    cluster_network = ', '.join(networks)

    cephcontext = {
        'auth_supported': config('auth-supported'),
        'mon_hosts': ' '.join(get_mon_hosts()),
        'fsid': leader_get('fsid'),
        'old_auth': cmp_pkgrevno('ceph', "0.51") < 0,
        'use_syslog': str(config('use-syslog')).lower(),
        'ceph_public_network': public_network,
        'ceph_cluster_network': cluster_network,
        'loglevel': config('loglevel'),
        'dio': str(config('use-direct-io')).lower(),
    }

    if config('prefer-ipv6'):
        dynamic_ipv6_address = get_ipv6_addr()[0]
        if not public_network:
            cephcontext['public_addr'] = dynamic_ipv6_address
        if not cluster_network:
            cephcontext['cluster_addr'] = dynamic_ipv6_address
    else:
        cephcontext['public_addr'] = get_public_addr()
        cephcontext['cluster_addr'] = get_cluster_addr()

    # NOTE(dosaboy): these sections must correspond to what is supported in the
    #                config template.
    sections = ['global', 'mds', 'mon']
    cephcontext.update(CephConfContext(permitted_sections=sections)())
    return cephcontext
Ejemplo n.º 25
0
def get_local_charm_access_role_id():
    """Retrieve the id of the role for local charm access

    :returns: Id of local charm access role
    :rtype: str
    """
    return hookenv.leader_get(CHARM_ACCESS_ROLE_ID)
Ejemplo n.º 26
0
 def cluster_token(self):
     ''' Getter to return the unique cluster token. '''
     token = leader_get('token')
     if not token and is_leader():
         token = self.id_generator()
         leader_set({'token': token})
     return token
Ejemplo n.º 27
0
    def __call__(self):
        """
        Used to generate template context to be added to cinder.conf in the
        presence of a ceph relation.
        """
        if not is_relation_made('ceph', 'key'):
            return {}
        service = service_name()
        os_codename = get_os_codename_package('cinder-common')
        if CompareOpenStackReleases(os_codename) >= "icehouse":
            volume_driver = 'cinder.volume.drivers.rbd.RBDDriver'
        else:
            volume_driver = 'cinder.volume.driver.RBDDriver'

        if config('rbd-pool-name'):
            pool_name = config('rbd-pool-name')
        else:
            pool_name = service
        section = {service: [('volume_backend_name', service),
                             ('volume_driver', volume_driver),
                             ('rbd_pool', pool_name),
                             ('rbd_user', service),
                             ('rbd_secret_uuid', leader_get('secret-uuid')),
                             ('rbd_ceph_conf', ceph_config_file())]}

        if CompareOpenStackReleases(os_codename) >= "mitaka":
            section[service].append(('report_discard_supported', True))

        if CompareOpenStackReleases(os_codename) >= "queens":
            section[service].append(('rbd_exclusive_cinder_pool', True))
            section[service].append(
                ('rbd_flatten_volume_from_snapshot',
                 config('rbd-flatten-volume-from-snapshot')))

        return {'cinder': {'/etc/cinder/cinder.conf': {'sections': section}}}
Ejemplo n.º 28
0
def add_user():
    """Add a swauth user to swift."""
    if config('auth-type') == 'swauth':
        try_initialize_swauth()
        account = action_get('account')
        username = action_get('username')
        password = action_get('password')
        bind_port = config('bind-port')
        bind_port = determine_api_port(bind_port, singlenode_mode=True)
        success = True
        try:
            check_call([
                "swauth-add-user", "-A",
                "http://localhost:{}/auth/".format(bind_port), "-K",
                leader_get('swauth-admin-key'), "-a", account, username,
                password
            ])
        except CalledProcessError as e:
            success = False
            log("Has a problem adding user: {}".format(e.output))
            action_fail("Adding user {} failed with: \"{}\"".format(
                username, e.message))
        if success:
            message = "Successfully added the user {}".format(username)
            action_set({
                'add-user.result': 'Success',
                'add-user.message': message,
            })
Ejemplo n.º 29
0
    def create_initial_servers_and_domains(cls):
        """Create the nameserver entry and domains based on the charm user
        supplied config

        NOTE(AJK): This only wants to be done ONCE and by the leader, so we use
        leader settings to store that we've done it, after it's successfully
        completed.

        @returns None
        """
        KEY = 'create_initial_servers_and_domains'
        if hookenv.is_leader() and not hookenv.leader_get(KEY):
            nova_domain_name = hookenv.config('nova-domain')
            neutron_domain_name = hookenv.config('neutron-domain')
            with cls.check_zone_ids(nova_domain_name, neutron_domain_name):
                if hookenv.config('nameservers'):
                    for ns in hookenv.config('nameservers').split():
                        cls.create_server(ns)
                else:
                    hookenv.log('No nameserver specified, skipping creation of'
                                'nova and neutron domains',
                                level=hookenv.WARNING)
                    return
                if nova_domain_name:
                    cls.create_domain(
                        nova_domain_name,
                        hookenv.config('nova-domain-email'))
                if neutron_domain_name:
                    cls.create_domain(
                        neutron_domain_name,
                        hookenv.config('neutron-domain-email'))
            # if this fails, we weren't the leader any more; another unit may
            # attempt to do this too.
            hookenv.leader_set({KEY: 'done'})
Ejemplo n.º 30
0
def send_cluster_connection_details(cluster, db):
    ''' Need to set the cluster connection string and
    the client key and certificate on the relation object. '''
    cert = leader_get('client_certificate')
    key = leader_get('client_key')
    ca = leader_get('certificate_authority')
    # Set the key, cert, and ca on the db relation
    db.set_client_credentials(key, cert, ca)

    port = hookenv.config().get('port')
    # Get all the peers participating in the cluster relation.
    members = cluster.get_peer_addresses()
    # Create a connection string with all the members on the configured port.
    connection_string = get_connection_string(members, port)
    # Set the connection string on the db relation.
    db.set_connection_string(connection_string)
def get_context():
    ctx = {}
    ctx["log_level"] = config.get("log-level", "SYS_NOTICE")
    ctx["version"] = config.get("version", "4.0.0")
    ctx.update(json_loads(config.get("orchestrator_info"), dict()))

    ctx["ssl_enabled"] = config.get("ssl_enabled", False)
    ctx["db_user"] = leader_get("db_user")
    ctx["db_password"] = leader_get("db_password")
    ctx["analyticsdb_minimum_diskgb"] = config.get("cassandra-minimum-diskgb")

    ctx.update(servers_ctx())
    ctx.update(analyticsdb_ctx())
    log("CTX: {}".format(ctx))
    ctx.update(json_loads(config.get("auth_info"), dict()))
    return ctx
Ejemplo n.º 32
0
def get_seed_ips():
    '''Return the set of seed ip addresses.

    We use ip addresses rather than unit names, as we may need to use
    external seed ips at some point.
    '''
    return set((hookenv.leader_get('seeds') or '').split(','))
Ejemplo n.º 33
0
def leader_elected():
    if is_leader():
        if not leader_get('heat-domain-admin-passwd'):
            try:
                leader_set({'heat-domain-admin-passwd': pwgen(32)})
            except subprocess.CalledProcessError as e:
                log('leader_set: heat-domain-admin-password failed: {}'.format(
                    str(e)),
                    level=WARNING)
        if not leader_get('heat-auth-encryption-key'):
            try:
                leader_set({'heat-auth-encryption-key': pwgen(32)})
            except subprocess.CalledProcessError as e:
                log('leader_set: heat-domain-admin-password failed: {}'.format(
                    str(e)),
                    level=WARNING)
def master_joined(interface='master'):
    cluster_id = get_cluster_id()
    if not is_clustered():
        log("Not clustered yet", level=DEBUG)
        return
    relation_settings = {}
    leader_settings = leader_get()
    if is_leader():
        if not leader_settings.get('async-rep-password'):
            # Replication password cannot be longer than 32 characters
            leader_set({'async-rep-password': pwgen(32)})
            return
        configure_master()
        master_address, master_file, master_position = (
            get_master_status(interface))
        if leader_settings.get('master-address') is not master_address:
            leader_settings['master-address'] = master_address
            leader_settings['master-file'] = master_file
            leader_settings['master-position'] = master_position
        leader_set(leader_settings)
        relation_settings = {'leader': True}
    else:
        relation_settings = {'leader': False}
    relation_settings['cluster_id'] = cluster_id
    relation_settings['master_address'] = leader_settings['master-address']
    relation_settings['master_file'] = leader_settings['master-file']
    relation_settings['master_password'] = \
        leader_settings['async-rep-password']
    relation_settings['master_position'] = leader_settings['master-position']
    log("Setting master relation: '{}'".format(relation_settings), level=INFO)
    for rid in relation_ids(interface):
        relation_set(relation_id=rid, relation_settings=relation_settings)
    def __call__(self):
        vdata_values = super(NovaMetadataContext, self).__call__()

        release = ch_utils.os_release('nova-common')
        cmp_os_release = ch_utils.CompareOpenStackReleases(release)

        ctxt = {}

        if cmp_os_release >= 'rocky':
            ctxt.update(vdata_values)

            ctxt['metadata_proxy_shared_secret'] = hookenv.leader_get(
                'shared-metadata-secret')
            ctxt['enable_metadata'] = True
        else:
            hookenv.log("Vendor metadata has been configured but is not "
                        "effective in nova-cloud-controller because release "
                        "{} is prior to Rocky.".format(release),
                        level=hookenv.DEBUG)
            ctxt['enable_metadata'] = False

        # NOTE(ganso): always propagate config value for nova-compute since
        # we need to apply it there for all releases, and we cannot determine
        # whether nova-compute is really the one serving the vendor metadata
        for rid in hookenv.relation_ids('cloud-compute'):
            hookenv.relation_set(relation_id=rid,
                                 vendor_data=json.dumps(vdata_values))

        return ctxt
Ejemplo n.º 36
0
def radosgw_relation(relid=None, unit=None):
    # Install radosgw for admin tools
    apt_install(packages=filter_installed_packages(['radosgw']))
    if not unit:
        unit = remote_unit()
    if is_unsupported_cmr(unit):
        return

    # NOTE: radosgw needs some usage OSD storage, so defer key
    #       provision until OSD units are detected.
    if ready_for_service():
        log('mon cluster in quorum and osds bootstrapped '
            '- providing radosgw with keys')
        public_addr = get_public_addr()
        data = {
            'fsid': leader_get('fsid'),
            'auth': config('auth-supported'),
            'ceph-public-address': public_addr,
        }
        key_name = relation_get('key_name', unit=unit, rid=relid)
        if key_name:
            # New style, per unit keys
            data['{}_key'.format(key_name)] = (ceph.get_radosgw_key(
                name=key_name))
        else:
            # Old style global radosgw key
            data['radosgw_key'] = ceph.get_radosgw_key()

        data.update(handle_broker_request(relid, unit))
        relation_set(relation_id=relid, relation_settings=data)
Ejemplo n.º 37
0
def osd_relation(relid=None, unit=None):
    if ceph.is_quorum():
        log('mon cluster in quorum - providing fsid & keys')
        public_addr = get_public_addr()
        data = {
            'fsid':
            leader_get('fsid'),
            'osd_bootstrap_key':
            ceph.get_osd_bootstrap_key(),
            'auth':
            config('auth-supported'),
            'ceph-public-address':
            public_addr,
            'osd_upgrade_key':
            ceph.get_named_key('osd-upgrade', caps=ceph.osd_upgrade_caps),
        }

        data.update(handle_broker_request(relid, unit))
        relation_set(relation_id=relid, relation_settings=data)

        if is_leader():
            ceph_osd_releases = get_ceph_osd_releases()
            if len(ceph_osd_releases) == 1:
                execute_post_osd_upgrade_steps(ceph_osd_releases[0])

        # NOTE: radosgw key provision is gated on presence of OSD
        #       units so ensure that any deferred hooks are processed
        notify_radosgws()
        notify_client()
        notify_rbd_mirrors()
        send_osd_settings()
    else:
        log('mon cluster not in quorum - deferring fsid provision')
Ejemplo n.º 38
0
def install_etcd_certificates():
    ''' Copy the server cert and key to /etc/ssl/etcd and set the
    etcd.ssl.placed state. '''
    etcd_ssl_path = '/etc/ssl/etcd'
    if not os.path.exists(etcd_ssl_path):
        os.makedirs(etcd_ssl_path)

    kv = unitdata.kv()
    cert = kv.get('tls.server.certificate')
    with open('{}/server.pem'.format(etcd_ssl_path), 'w+') as f:
        f.write(cert)
    with open('{}/ca.pem'.format(etcd_ssl_path), 'w+') as f:
        f.write(leader_get('certificate_authority'))

    # schenanigans - each server makes its own key, when generating
    # the CSR. This is why its "magically" present.
    keypath = 'easy-rsa/easyrsa3/pki/private/{}.key'
    server = os.getenv('JUJU_UNIT_NAME').replace('/', '_')
    if os.path.exists(keypath.format(server)):
        shutil.copyfile(keypath.format(server),
                        '{}/server-key.pem'.format(etcd_ssl_path))
    else:
        shutil.copyfile(keypath.format(unit_get('public-address')),
                        '{}/server-key.pem'.format(etcd_ssl_path))

    set_state('etcd.ssl.placed')
Ejemplo n.º 39
0
    def _load_state(self):
        self.msg('Loading state'.format(self._name()))

        # All responses must be stored in the leadership settings.
        # The leader cannot use local state, as a different unit may
        # be leader next time. Which is fine, as the leadership
        # settings are always available.
        self.grants = json.loads(hookenv.leader_get(self.key) or '{}')

        local_unit = hookenv.local_unit()

        # All requests must be stored on the peer relation. This is
        # the only channel units have to communicate with the leader.
        # Even the leader needs to store its requests here, as a
        # different unit may be leader by the time the request can be
        # granted.
        if self.relid is None:
            # The peer relation is not available. Maybe we are early in
            # the units's lifecycle. Maybe this unit is standalone.
            # Fallback to using local state.
            self.msg('No peer relation. Loading local state')
            self.requests = {local_unit: self._load_local_state()}
        else:
            self.requests = self._load_peer_state()
            if local_unit not in self.requests:
                # The peer relation has just been joined. Update any state
                # loaded from our peers with our local state.
                self.msg('New peer relation. Merging local state')
                self.requests[local_unit] = self._load_local_state()
Ejemplo n.º 40
0
    def _load_state(self):
        self.msg('Loading state'.format(self._name()))

        # All responses must be stored in the leadership settings.
        # The leader cannot use local state, as a different unit may
        # be leader next time. Which is fine, as the leadership
        # settings are always available.
        self.grants = json.loads(hookenv.leader_get(self.key) or '{}')

        local_unit = hookenv.local_unit()

        # All requests must be stored on the peers relation. This is
        # the only channel units have to communicate with the leader.
        # Even the leader needs to store its requests here, as a
        # different unit may be leader by the time the request can be
        # granted.
        if self.relid is None:
            # The peers relation is not available. Maybe we are early in
            # the units's lifecycle. Maybe this unit is standalone.
            # Fallback to using local state.
            self.msg('No peer relation. Loading local state')
            self.requests = {local_unit: self._load_local_state()}
        else:
            self.requests = self._load_peer_state()
            if local_unit not in self.requests:
                # The peers relation has just been joined. Update any state
                # loaded from our peers with our local state.
                self.msg('New peer relation. Merging local state')
                self.requests[local_unit] = self._load_local_state()
Ejemplo n.º 41
0
    def create_initial_servers_and_domains(cls):
        """Create the nameserver entry and domains based on the charm user
        supplied config

        NOTE(AJK): This only wants to be done ONCE and by the leader, so we use
        leader settings to store that we've done it, after it's successfully
        completed.

        @returns None
        """
        KEY = 'create_initial_servers_and_domains'
        if hookenv.is_leader() and not hookenv.leader_get(KEY):
            nova_domain_name = hookenv.config('nova-domain')
            neutron_domain_name = hookenv.config('neutron-domain')
            with cls.check_zone_ids(nova_domain_name, neutron_domain_name):
                if hookenv.config('nameservers'):
                    for ns in hookenv.config('nameservers').split():
                        cls.create_server(ns)
                else:
                    hookenv.log('No nameserver specified, skipping creation of'
                                'nova and neutron domains',
                                level=hookenv.WARNING)
                    return
                if nova_domain_name:
                    cls.create_domain(
                        nova_domain_name,
                        hookenv.config('nova-domain-email'))
                if neutron_domain_name:
                    cls.create_domain(
                        neutron_domain_name,
                        hookenv.config('neutron-domain-email'))
            # if this fails, we weren't the leader any more; another unit may
            # attempt to do this too.
            hookenv.leader_set({KEY: 'done'})
Ejemplo n.º 42
0
def initialize_leadership_state():
    '''Initialize leadership.* states from the hook environment.

    Invoked by hookenv.atstart() so states are available in
    @hook decorated handlers.
    '''
    is_leader = hookenv.is_leader()
    if is_leader:
        hookenv.log('Initializing Leadership Layer (is leader)')
    else:
        hookenv.log('Initializing Leadership Layer (is follower)')

    reactive.helpers.toggle_state('leadership.is_leader', is_leader)

    previous = unitdata.kv().getrange('leadership.settings.', strip=True)
    current = hookenv.leader_get()

    # Handle deletions.
    for key in set(previous.keys()) - set(current.keys()):
        current[key] = None

    any_changed = False
    for key, value in current.items():
        reactive.helpers.toggle_state('leadership.changed.{}'.format(key),
                                      value != previous.get(key))
        if value != previous.get(key):
            any_changed = True
        reactive.helpers.toggle_state('leadership.set.{}'.format(key),
                                      value is not None)
    reactive.helpers.toggle_state('leadership.changed', any_changed)

    unitdata.kv().update(current, prefix='leadership.settings.')
def configure_floating_ip_pools():
    if is_leader():
        floating_pools = config.get("floating-ip-pools")
        previous_floating_pools = leader_get("floating-ip-pools")
        if floating_pools != previous_floating_pools:
            # create/destroy pools, activate/deactivate projects
            # according to new value
            pools = { (pool["project"],
                       pool["network"],
                       pool["pool-name"]): set(pool["target-projects"])
                      for pool in yaml.safe_load(floating_pools) } \
                    if floating_pools else {}
            previous_pools = {}
            if previous_floating_pools:
                for pool in yaml.safe_load(previous_floating_pools):
                    projects = pool["target-projects"]
                    name = (pool["project"], pool["network"], pool["pool-name"])
                    if name in pools:
                        previous_pools[name] = set(projects)
                    else:
                        floating_ip_pool_delete(name, projects)
            for name, projects in pools.iteritems():
                if name not in previous_pools:
                    floating_ip_pool_create(name, projects)
                else:
                    floating_ip_pool_update(name, projects, previous_pools[name])

            leader_set({"floating-ip-pools": floating_pools})
Ejemplo n.º 44
0
def emit_cephconf():
    cephcontext = {
        'auth_supported': config('auth-supported'),
        'mon_hosts': ' '.join(get_mon_hosts()),
        'fsid': leader_get('fsid'),
        'old_auth': cmp_pkgrevno('ceph', "0.51") < 0,
        'osd_journal_size': config('osd-journal-size'),
        'use_syslog': str(config('use-syslog')).lower(),
        'ceph_public_network': config('ceph-public-network'),
        'ceph_cluster_network': config('ceph-cluster-network'),
    }

    if config('prefer-ipv6'):
        dynamic_ipv6_address = get_ipv6_addr()[0]
        if not config('ceph-public-network'):
            cephcontext['public_addr'] = dynamic_ipv6_address
        if not config('ceph-cluster-network'):
            cephcontext['cluster_addr'] = dynamic_ipv6_address

    # Install ceph.conf as an alternative to support
    # co-existence with other charms that write this file
    charm_ceph_conf = "/var/lib/charm/{}/ceph.conf".format(service_name())
    mkdir(os.path.dirname(charm_ceph_conf))
    with open(charm_ceph_conf, 'w') as cephconf:
        cephconf.write(render_template('ceph.conf', cephcontext))
    install_alternative('ceph.conf', '/etc/ceph/ceph.conf',
                        charm_ceph_conf, 100)
def remove_metadata():
    if is_leader() and leader_get("metadata-provisioned"):
        # impossible to know if current hook is firing because
        # relation or leader is being removed lp #1469731
        if not relation_ids("cluster"):
            unprovision_metadata()
        leader_set({"metadata-provisioned": ""})
def contrail_ifmap_joined():
    if is_leader():
        creds = leader_get("ifmap-creds")
        creds = json.loads(creds) if creds else {}

        # prune credentials because we can't remove them directly lp #1469731
        creds = { rid: { unit: units[unit]
                         for unit, units in
                         ((unit, creds[rid]) for unit in related_units(rid))
                         if unit in units }
                  for rid in relation_ids("contrail-ifmap")
                  if rid in creds }

        rid = relation_id()
        if rid not in creds:
            creds[rid] = {}
        cs = creds[rid]
        unit = remote_unit()
        if unit in cs:
            return
        # generate new credentials for unit
        cs[unit] = { "username": unit, "password": pwgen(32) }
        leader_set({"ifmap-creds": json.dumps(creds)})
        write_ifmap_config()
        service_restart("supervisor-config")
        relation_set(creds=json.dumps(cs))
Ejemplo n.º 47
0
def emit_cephconf():
    networks = get_networks('ceph-public-network')
    public_network = ', '.join(networks)

    networks = get_networks('ceph-cluster-network')
    cluster_network = ', '.join(networks)

    cephcontext = {
        'auth_supported': config('auth-supported'),
        'mon_hosts': ' '.join(get_mon_hosts()),
        'fsid': leader_get('fsid'),
        'old_auth': cmp_pkgrevno('ceph', "0.51") < 0,
        'osd_journal_size': config('osd-journal-size'),
        'use_syslog': str(config('use-syslog')).lower(),
        'ceph_public_network': public_network,
        'ceph_cluster_network': cluster_network,
        'loglevel': config('loglevel'),
    }

    if config('prefer-ipv6'):
        dynamic_ipv6_address = get_ipv6_addr()[0]
        if not public_network:
            cephcontext['public_addr'] = dynamic_ipv6_address
        if not cluster_network:
            cephcontext['cluster_addr'] = dynamic_ipv6_address

    # Install ceph.conf as an alternative to support
    # co-existence with other charms that write this file
    charm_ceph_conf = "/var/lib/charm/{}/ceph.conf".format(service_name())
    mkdir(os.path.dirname(charm_ceph_conf), owner=ceph.ceph_user(),
          group=ceph.ceph_user())
    render('ceph.conf', charm_ceph_conf, cephcontext, perms=0o644)
    install_alternative('ceph.conf', '/etc/ceph/ceph.conf',
                        charm_ceph_conf, 100)
Ejemplo n.º 48
0
def radosgw_relation(relid=None, unit=None):
    # Install radosgw for admin tools
    apt_install(packages=filter_installed_packages(['radosgw']))
    if not unit:
        unit = remote_unit()
    """Process broker request(s)."""
    if ceph.is_quorum():
        settings = relation_get(rid=relid, unit=unit)
        if 'broker_req' in settings:
            if not ceph.is_leader():
                log("Not leader - ignoring broker request", level=DEBUG)
            else:
                rsp = process_requests(settings['broker_req'])
                unit_id = unit.replace('/', '-')
                unit_response_key = 'broker-rsp-' + unit_id
                log('mon cluster in quorum - providing radosgw with keys')
                public_addr = get_public_addr()
                data = {
                    'fsid': leader_get('fsid'),
                    'radosgw_key': ceph.get_radosgw_key(),
                    'auth': config('auth-supported'),
                    'ceph-public-address': public_addr,
                    unit_response_key: rsp,
                }
                relation_set(relation_id=relid, relation_settings=data)
    else:
        log('mon cluster not in quorum - deferring key provision')
Ejemplo n.º 49
0
def render_files(reldata=None):
    '''Use jinja templating to render the docker-compose.yml and master.json
    file to contain the dynamic data for the configuration files.'''
    context = {}
    # Load the context data with SDN data.
    context.update(gather_sdn_data())
    # Add the charm configuration data to the context.
    context.update(hookenv.config())
    if reldata:
        connection_string = reldata.get_connection_string()
        # Define where the etcd tls files will be kept.
        etcd_dir = '/etc/ssl/etcd'
        # Create paths to the etcd client ca, key, and cert file locations.
        ca = os.path.join(etcd_dir, 'client-ca.pem')
        key = os.path.join(etcd_dir, 'client-key.pem')
        cert = os.path.join(etcd_dir, 'client-cert.pem')
        # Save the client credentials (in relation data) to the paths provided.
        reldata.save_client_credentials(key, cert, ca)
        # Update the context so the template has the etcd information.
        context.update({'etcd_dir': etcd_dir,
                        'connection_string': connection_string,
                        'etcd_ca': ca,
                        'etcd_key': key,
                        'etcd_cert': cert})

    charm_dir = hookenv.charm_dir()
    rendered_kube_dir = os.path.join(charm_dir, 'files/kubernetes')
    if not os.path.exists(rendered_kube_dir):
        os.makedirs(rendered_kube_dir)
    rendered_manifest_dir = os.path.join(charm_dir, 'files/manifests')
    if not os.path.exists(rendered_manifest_dir):
        os.makedirs(rendered_manifest_dir)

    # Update the context with extra values, arch, manifest dir, and private IP.
    context.update({'arch': arch(),
                    'master_address': leader_get('master-address'),
                    'manifest_directory': rendered_manifest_dir,
                    'public_address': hookenv.unit_get('public-address'),
                    'private_address': hookenv.unit_get('private-address')})

    # Adapted from: http://kubernetes.io/docs/getting-started-guides/docker/
    target = os.path.join(rendered_kube_dir, 'docker-compose.yml')
    # Render the files/kubernetes/docker-compose.yml file that contains the
    # definition for kubelet and proxy.
    render('docker-compose.yml', target, context)

    if is_leader():
        # Source: https://github.com/kubernetes/...master/cluster/images/hyperkube  # noqa
        target = os.path.join(rendered_manifest_dir, 'master.json')
        # Render the files/manifests/master.json that contains parameters for
        # the apiserver, controller, and controller-manager
        render('master.json', target, context)
        # Source: ...cluster/addons/dns/skydns-svc.yaml.in
        target = os.path.join(rendered_manifest_dir, 'kubedns-svc.yaml')
        # Render files/kubernetes/kubedns-svc.yaml for the DNS service.
        render('kubedns-svc.yaml', target, context)
        # Source: ...cluster/addons/dns/skydns-rc.yaml.in
        target = os.path.join(rendered_manifest_dir, 'kubedns-rc.yaml')
        # Render files/kubernetes/kubedns-rc.yaml for the DNS pod.
        render('kubedns-rc.yaml', target, context)
def leader_elected():
    if not leader_get("db_user"):
        user = "******"
        password = uuid.uuid4().hex
        leader_set(db_user=user, db_password=password)
        _update_relation()
    update_charm_status()
Ejemplo n.º 51
0
def get_ca():
    """Get the root CA certificate.

    :returns: Root CA certificate
    :rtype: str
    """
    return hookenv.leader_get('root-ca')
def mon_relation():
    if leader_get('monitor-secret') is None:
        log('still waiting for leader to setup keys')
        status_set('waiting', 'Waiting for leader to setup keys')
        return
    emit_cephconf()

    moncount = int(config('monitor-count'))
    if len(get_mon_hosts()) >= moncount:
        status_set('maintenance', 'Bootstrapping MON cluster')
        ceph.bootstrap_monitor_cluster(leader_get('monitor-secret'))
        ceph.wait_for_bootstrap()
        ceph.wait_for_quorum()
        # If we can and want to
        if is_leader() and config('customize-failure-domain'):
            # But only if the environment supports it
            if os.environ.get('JUJU_AVAILABILITY_ZONE'):
                cmds = [
                    "ceph osd getcrushmap -o /tmp/crush.map",
                    "crushtool -d /tmp/crush.map| "
                    "sed 's/step chooseleaf firstn 0 type host/step "
                    "chooseleaf firstn 0 type rack/' > "
                    "/tmp/crush.decompiled",
                    "crushtool -c /tmp/crush.decompiled -o /tmp/crush.map",
                    "crushtool -i /tmp/crush.map --test",
                    "ceph osd setcrushmap -i /tmp/crush.map"
                ]
                for cmd in cmds:
                    try:
                        subprocess.check_call(cmd, shell=True)
                    except subprocess.CalledProcessError as e:
                        log("Failed to modify crush map:", level='error')
                        log("Cmd: {}".format(cmd), level='error')
                        log("Error: {}".format(e.output), level='error')
                        break
            else:
                log(
                    "Your Juju environment doesn't"
                    "have support for Availability Zones"
                )
        notify_osds()
        notify_radosgws()
        notify_client()
    else:
        log('Not enough mons ({}), punting.'
            .format(len(get_mon_hosts())))
def add_metadata():
    # check relation dependencies
    if is_leader() \
       and not leader_get("metadata-provisioned") \
       and config_get("contrail-api-configured") \
       and config_get("neutron-metadata-ready"):
        provision_metadata()
        leader_set({"metadata-provisioned": True})
def leader_changed():
    write_ifmap_config()
    creds = leader_get("ifmap-creds")
    creds = json.loads(creds) if creds else {}
    # set same credentials on relation
    for rid in relation_ids("contrail-ifmap"):
        if rid in creds:
            relation_set(relation_id=rid, creds=json.dumps(creds[rid]))
def config_changed():
    if config('prefer-ipv6'):
        assert_charm_supports_ipv6()

    # Check if an upgrade was requested
    check_for_upgrade()

    log('Monitor hosts are ' + repr(get_mon_hosts()))

    sysctl_dict = config('sysctl')
    if sysctl_dict:
        create_sysctl(sysctl_dict, '/etc/sysctl.d/50-ceph-charm.conf')
    if relations_of_type('nrpe-external-master'):
        update_nrpe_config()

    if is_leader():
        if not leader_get('fsid') or not leader_get('monitor-secret'):
            if config('fsid'):
                fsid = config('fsid')
            else:
                fsid = "{}".format(uuid.uuid1())
            if config('monitor-secret'):
                mon_secret = config('monitor-secret')
            else:
                mon_secret = "{}".format(ceph.generate_monitor_secret())
            status_set('maintenance', 'Creating FSID and Monitor Secret')
            opts = {
                'fsid': fsid,
                'monitor-secret': mon_secret,
            }
            log("Settings for the cluster are: {}".format(opts))
            leader_set(opts)
    else:
        if leader_get('fsid') is None or leader_get('monitor-secret') is None:
            log('still waiting for leader to setup keys')
            status_set('waiting', 'Waiting for leader to setup keys')
            sys.exit(0)

    emit_cephconf()

    # Support use of single node ceph
    if not ceph.is_bootstrapped() and int(config('monitor-count')) == 1:
        status_set('maintenance', 'Bootstrapping single Ceph MON')
        ceph.bootstrap_monitor_cluster(config('monitor-secret'))
        ceph.wait_for_bootstrap()
    install_apparmor_profile()
Ejemplo n.º 56
0
def leader_settings_changed():
    """When the leader settings changes the followers can get the certificate
    and install the certificate on their own system."""
    # Get the current CA value from leader_get.
    ca = leader_get("certificate_authority")
    if ca:
        hookenv.log("Installing the CA.")
        install_ca(ca)