コード例 #1
0
ファイル: __init__.py プロジェクト: BillTheBest/hyper-c
def peer_echo(includes=None, force=False):
    """Echo filtered attributes back onto the same relation for storage.

    This is a requirement to use the peerstorage module - it needs to be called
    from the peer relation's changed hook.

    If Juju leader support exists this will be a noop unless force is True.
    """
    try:
        is_leader()
    except NotImplementedError:
        pass
    else:
        if not force:
            return  # NOOP if leader-election is supported

    # Use original non-leader calls
    relation_get = _relation_get
    relation_set = _relation_set

    rdata = relation_get()
    echo_data = {}
    if includes is None:
        echo_data = rdata.copy()
        for ex in ['private-address', 'public-address']:
            if ex in echo_data:
                echo_data.pop(ex)
    else:
        for attribute, value in six.iteritems(rdata):
            for include in includes:
                if include in attribute:
                    echo_data[attribute] = value
    if len(echo_data) > 0:
        relation_set(relation_settings=echo_data)
コード例 #2
0
ファイル: tls.py プロジェクト: juju-solutions/layer-tls
def create_certificate_authority(certificate_authority=None):
    """Return the CA and server certificates for this system. If the CA is
    empty, generate a self signged certificate authority."""
    # followers are not special, do not generate a ca
    if not is_leader():
        return
    # Create an absolute path so current directory does not affect the result.
    easyrsa3_dir = os.path.join(hookenv.charm_dir(), "easy-rsa/easyrsa3")
    with chdir(easyrsa3_dir):
        ca_file = "pki/ca.crt"
        # Check if an old CA exists.
        if os.path.isfile(ca_file):
            # Initialize easy-rsa (by deleting old pki) so a CA can be created.
            init = "./easyrsa --batch init-pki 2>&1"
            check_call(split(init))
        # When the CA is not None write the CA file.
        if certificate_authority:
            # Write the certificate authority from configuration.
            with open(ca_file, "w") as fp:
                fp.write(certificate_authority)
        else:
            # The Certificate Authority does not exist build a self signed one.
            # The Common Name (CN) for a certificate must be an IP or hostname.
            cn = hookenv.unit_public_ip()
            # Create a self signed CA with the CN, stored pki/ca.crt
            build_ca = './easyrsa --batch "--req-cn={0}" build-ca nopass 2>&1'
            check_call(split(build_ca.format(cn)))
            # Read the CA so we can return the contents from this method.
            with open(ca_file, "r") as fp:
                certificate_authority = fp.read()
    set_state("certificate authority available")
    return certificate_authority
コード例 #3
0
ファイル: tls.py プロジェクト: juju-solutions/layer-tls
def create_csr(tls):
    """Create a certificate signing request (CSR). Only the followers need to
    run this operation."""
    if not is_leader():
        # Create an absolute path to easyrsa3 to change to that directory.
        easyrsa3_dir = os.path.join(hookenv.charm_dir(), "easy-rsa/easyrsa3")
        # Use an absolute path for this context manager.
        with chdir(easyrsa3_dir):
            # Must remove the path characters from the unit name.
            path_name = hookenv.local_unit().replace("/", "_")
            # The reqest will be named with unit_name.req
            req_file = "pki/reqs/{0}.req".format(path_name)
            # If the request already exists do not generate another one.
            if os.path.isfile(req_file):
                remove_state("create certificate signing request")
                return

            # The Common Name is the public address of the system.
            cn = hookenv.unit_public_ip()
            hookenv.log("Creating the CSR for {0}".format(path_name))
            sans = get_sans()
            # Create a CSR for this system with the subject and SANs.
            gen_req = "./easyrsa --batch --req-cn={0} --subject-alt-name={1}" " gen-req {2} nopass 2>&1".format(
                cn, sans, path_name
            )
            check_call(split(gen_req))
            # Read the CSR file.
            with open(req_file, "r") as fp:
                csr = fp.read()
            # Set the CSR on the relation object.
            tls.set_csr(csr)
    else:
        hookenv.log("The leader does not need to create a CSR.")
コード例 #4
0
ファイル: hooks.py プロジェクト: bureau14/qdb-juju-charms
def leader_elected():
    
    if hookenv.is_leader():
        make_this_leader()
    else:
        # update peer information but don't restart the node
        peer_to_leader()
コード例 #5
0
ファイル: tls.py プロジェクト: chuckbutler/layer-tls
def create_csr(tls):
    '''Create a certificate signing request (CSR). Only the followers need to
    run this operation.'''
    if not is_leader():
        with chdir('easy-rsa/easyrsa3'):
            # Must remove the path characters from the unit name.
            path_name = hookenv.local_unit().replace('/', '_')
            # The reqest will be named with unit_name.req
            req_file = 'pki/reqs/{0}.req'.format(path_name)
            # If the request already exists do not generate another one.
            if os.path.isfile(req_file):
                remove_state('create certificate signing request')
                return

            # The Common Name is the public address of the system.
            cn = hookenv.unit_public_ip()
            hookenv.log('Creating the CSR for {0}'.format(path_name))
            sans = get_sans()
            # Create a CSR for this system with the subject and SANs.
            gen_req = './easyrsa --batch --req-cn={0} --subject-alt-name={1}' \
                      ' gen-req {2} nopass 2>&1'.format(cn, sans, path_name)
            check_call(split(gen_req))
            # Read the CSR file.
            with open(req_file, 'r') as fp:
                csr = fp.read()
            # Set the CSR on the relation object.
            tls.set_csr(csr)
    else:
        hookenv.log('The leader does not need to create a CSR.')
コード例 #6
0
    def migrate_passwords_to_leader_storage(self, excludes=None):
        """Migrate any passwords storage on disk to leader storage."""
        if not is_leader():
            log("Skipping password migration as not the lead unit",
                level=DEBUG)
            return
        dirname = os.path.dirname(self.root_passwd_file_template)
        path = os.path.join(dirname, '*.passwd')
        for f in glob.glob(path):
            if excludes and f in excludes:
                log("Excluding %s from leader storage migration" % (f),
                    level=DEBUG)
                continue

            key = os.path.basename(f)
            with open(f, 'r') as passwd:
                _value = passwd.read().strip()

            try:
                leader_set(settings={key: _value})

                if self.delete_ondisk_passwd_file:
                    os.unlink(f)
            except ValueError:
                # NOTE cluster relation not yet ready - skip for now
                pass
コード例 #7
0
ファイル: tls.py プロジェクト: chuckbutler/layer-tls
def create_certificate_authority(certificate_authority=None):
    '''Return the CA and server certificates for this system. If the CA is
    empty, generate a self signged certificate authority.'''
    # followers are not special, do not generate a ca
    if not is_leader():
        return
    with chdir('easy-rsa/easyrsa3'):
        ca_file = 'pki/ca.crt'
        # Check if an old CA exists.
        if os.path.isfile(ca_file):
            # Initialize easy-rsa (by deleting old pki) so a CA can be created.
            init = './easyrsa --batch init-pki 2>&1'
            check_call(split(init))
        # When the CA is not None write the CA file.
        if certificate_authority:
            # Write the certificate authority from configuration.
            with open(ca_file, 'w') as fp:
                fp.write(certificate_authority)
        else:
            # The Certificate Authority does not exist build a self signed one.
            # The Common Name (CN) for a certificate must be an IP or hostname.
            cn = hookenv.unit_public_ip()
            # Create a self signed CA with the CN, stored pki/ca.crt
            build_ca = './easyrsa --batch "--req-cn={0}" build-ca nopass 2>&1'
            check_call(split(build_ca.format(cn)))
            # Read the CA so we can return the contents from this method.
            with open(ca_file, 'r') as fp:
                certificate_authority = fp.read()
    set_state('certificate authority available')
    return certificate_authority
コード例 #8
0
ファイル: tls.py プロジェクト: chuckbutler/layer-tls
def import_sign(tls):
    '''Import and sign the certificate signing request (CSR). Only the leader
    can sign the requests.'''
    if is_leader():
        hookenv.log('The leader needs to sign the csr requests.')
        # Get all the requests that are queued up to sign.
        csr_map = tls.get_csr_map()
        # Iterate over the unit names related to CSRs.
        for unit_name, csr in csr_map.items():
            with chdir('easy-rsa/easyrsa3'):
                temp_file = tempfile.NamedTemporaryFile(suffix='.csr')
                with open(temp_file.name, 'w') as fp:
                    fp.write(csr)
                # Must remove the path characters from the unit_name.
                path_name = unit_name.replace('/', '_')
                if not os.path.isfile('pki/reqs/{0}.req'.format(path_name)):
                    hookenv.log('Importing csr from {0}'.format(path_name))
                    # Create the command to import the request using path name.
                    import_req = './easyrsa --batch import-req {0} {1} 2>&1'
                    # easy-rsa import-req /tmp/temporary.csr path_name
                    check_call(split(import_req.format(temp_file.name,
                                                       path_name)))
                if not os.path.isfile('pki/issued/{0}.crt'.format(path_name)):
                    hookenv.log('Signing csr from {0}'.format(path_name))
                    # Create a command that signs the request.
                    sign_req = './easyrsa --batch sign-req server {0} 2>&1'
                    check_call(split(sign_req.format(path_name)))
                # Read in the signed certificate.
                cert_file = 'pki/issued/{0}.crt'.format(path_name)
                with open(cert_file, 'r') as fp:
                    certificate = fp.read()
                hookenv.log('Leader sending signed certificate over relation.')
                # Send the certificate over the relation.
                tls.set_cert(unit_name, certificate)
コード例 #9
0
ファイル: etcd.py プロジェクト: cmars/layer-etcd
    def cluster_string(self, proto='http', internal=True):
        ''' This method behaves slightly different depending on the
            context of its invocation. If the unit is the leader, the
            connection string should always be built and returned from
            the contents in unit data. Otherwise we should return the
            value set by the leader via leader-data

            @params proto - Determines the output prefix depending on need. eg:
                           http://127.0.0.1:4001 or etcd://127.0.0.1:4001
            @params internal - Boolean value to determine if management or
                               client cluster string is required.
        '''
        if is_leader():
            cluster_data = self.cluster_data()
            connection_string = ""
            if internal:
                for u in cluster_data:
                    connection_string += ",{}={}://{}:{}".format(u,  # noqa
                                                                 proto,
                                                                 cluster_data[u]['private_address'],  # noqa
                                                                 self.management_port)  # noqa
            else:
                for u in cluster_data:
                    connection_string += ",{}://{}:{}".format(proto,
                                                              cluster_data[u]['private_address'],  # noqa
                                                              self.port)
            return connection_string.lstrip(',')
        else:
            return leader_get('cluster')
コード例 #10
0
ファイル: tls.py プロジェクト: juju-solutions/layer-tls
def add_client_authorization():
    """easyrsa has a default OpenSSL configuration that does not support
    client authentication. Append "clientAuth" to the server ssl certificate
    configuration. This is not default, to enable this in your charm set the
    reactive state 'tls.client.authorization.required'.
    """
    if not is_leader():
        return
    else:
        hookenv.log("Configuring SSL PKI for clientAuth")

    # Get the absolute path to the charm directory.
    charm_dir = hookenv.charm_dir()
    # Create the relative path to the server file.
    server_file = "easy-rsa/easyrsa3/x509-types/server"
    # Use an absolute path so current directory does not affect the result.
    openssl_config = os.path.join(charm_dir, server_file)
    hookenv.log("Updating {0}".format(openssl_config))
    # Read the file in.
    with open(openssl_config, "r") as f:
        existing_template = f.readlines()

    # Enable client and server authorization for certificates
    xtype = [w.replace("serverAuth", "serverAuth, clientAuth") for w in existing_template]  # noqa
    # Write the configuration file back out.
    with open(openssl_config, "w+") as f:
        f.writelines(xtype)

    set_state("tls.client.authorization.added")
コード例 #11
0
def master_joined(interface='master'):
    cluster_id = get_cluster_id()
    if not is_clustered():
        log("Not clustered yet", level=DEBUG)
        return
    relation_settings = {}
    leader_settings = leader_get()
    if is_leader():
        if not leader_settings.get('async-rep-password'):
            # Replication password cannot be longer than 32 characters
            leader_set({'async-rep-password': pwgen(32)})
            return
        configure_master()
        master_address, master_file, master_position = (
            get_master_status(interface))
        if leader_settings.get('master-address') is not master_address:
            leader_settings['master-address'] = master_address
            leader_settings['master-file'] = master_file
            leader_settings['master-position'] = master_position
        leader_set(leader_settings)
        relation_settings = {'leader': True}
    else:
        relation_settings = {'leader': False}
    relation_settings['cluster_id'] = cluster_id
    relation_settings['master_address'] = leader_settings['master-address']
    relation_settings['master_file'] = leader_settings['master-file']
    relation_settings['master_password'] = \
        leader_settings['async-rep-password']
    relation_settings['master_position'] = leader_settings['master-position']
    log("Setting master relation: '{}'".format(relation_settings), level=INFO)
    for rid in relation_ids(interface):
        relation_set(relation_id=rid, relation_settings=relation_settings)
コード例 #12
0
def configure_floating_ip_pools():
    if is_leader():
        floating_pools = config.get("floating-ip-pools")
        previous_floating_pools = leader_get("floating-ip-pools")
        if floating_pools != previous_floating_pools:
            # create/destroy pools, activate/deactivate projects
            # according to new value
            pools = { (pool["project"],
                       pool["network"],
                       pool["pool-name"]): set(pool["target-projects"])
                      for pool in yaml.safe_load(floating_pools) } \
                    if floating_pools else {}
            previous_pools = {}
            if previous_floating_pools:
                for pool in yaml.safe_load(previous_floating_pools):
                    projects = pool["target-projects"]
                    name = (pool["project"], pool["network"], pool["pool-name"])
                    if name in pools:
                        previous_pools[name] = set(projects)
                    else:
                        floating_ip_pool_delete(name, projects)
            for name, projects in pools.iteritems():
                if name not in previous_pools:
                    floating_ip_pool_create(name, projects)
                else:
                    floating_ip_pool_update(name, projects, previous_pools[name])

            leader_set({"floating-ip-pools": floating_pools})
コード例 #13
0
    def create_initial_servers_and_domains(cls):
        """Create the nameserver entry and domains based on the charm user
        supplied config

        NOTE(AJK): This only wants to be done ONCE and by the leader, so we use
        leader settings to store that we've done it, after it's successfully
        completed.

        @returns None
        """
        KEY = 'create_initial_servers_and_domains'
        if hookenv.is_leader() and not hookenv.leader_get(KEY):
            nova_domain_name = hookenv.config('nova-domain')
            neutron_domain_name = hookenv.config('neutron-domain')
            with cls.check_zone_ids(nova_domain_name, neutron_domain_name):
                if hookenv.config('nameservers'):
                    for ns in hookenv.config('nameservers').split():
                        cls.create_server(ns)
                else:
                    hookenv.log('No nameserver specified, skipping creation of'
                                'nova and neutron domains',
                                level=hookenv.WARNING)
                    return
                if nova_domain_name:
                    cls.create_domain(
                        nova_domain_name,
                        hookenv.config('nova-domain-email'))
                if neutron_domain_name:
                    cls.create_domain(
                        neutron_domain_name,
                        hookenv.config('neutron-domain-email'))
            # if this fails, we weren't the leader any more; another unit may
            # attempt to do this too.
            hookenv.leader_set({KEY: 'done'})
コード例 #14
0
def remove_metadata():
    if is_leader() and leader_get("metadata-provisioned"):
        # impossible to know if current hook is firing because
        # relation or leader is being removed lp #1469731
        if not relation_ids("cluster"):
            unprovision_metadata()
        leader_set({"metadata-provisioned": ""})
コード例 #15
0
def contrail_ifmap_joined():
    if is_leader():
        creds = leader_get("ifmap-creds")
        creds = json.loads(creds) if creds else {}

        # prune credentials because we can't remove them directly lp #1469731
        creds = { rid: { unit: units[unit]
                         for unit, units in
                         ((unit, creds[rid]) for unit in related_units(rid))
                         if unit in units }
                  for rid in relation_ids("contrail-ifmap")
                  if rid in creds }

        rid = relation_id()
        if rid not in creds:
            creds[rid] = {}
        cs = creds[rid]
        unit = remote_unit()
        if unit in cs:
            return
        # generate new credentials for unit
        cs[unit] = { "username": unit, "password": pwgen(32) }
        leader_set({"ifmap-creds": json.dumps(creds)})
        write_ifmap_config()
        service_restart("supervisor-config")
        relation_set(creds=json.dumps(cs))
コード例 #16
0
def leader_init_db_if_ready(skip_acl_check=False, db_rid=None, unit=None):
    """Initialise db if leader and db not yet intialised.

    NOTE: must be called from database context.
    """
    if not hookenv.is_leader():
        hookenv.log("Not leader - skipping db init", level=hookenv.DEBUG)
        return

    if ncc_utils.is_db_initialised():
        hookenv.log("Database already initialised - skipping db init",
                    level=hookenv.DEBUG)
        return

    # Bugs 1353135 & 1187508. Dbs can appear to be ready before the units
    # acl entry has been added. So, if the db supports passing a list of
    # permitted units then check if we're in the list.
    allowed_units = hookenv.relation_get('nova_allowed_units',
                                         rid=db_rid, unit=unit)
    if skip_acl_check or (allowed_units and hookenv.local_unit() in
                          allowed_units.split()):
        hookenv.status_set('maintenance', 'Running nova db migration')
        ncc_utils.migrate_nova_databases()
        hookenv.log('Triggering remote restarts.')
        update_nova_relation(remote_restart=True)
    else:
        hookenv.log('allowed_units either not presented, or local unit '
                    'not in acl list: %s' % repr(allowed_units))
コード例 #17
0
ファイル: k8s.py プロジェクト: mbruzek/layer-k8s
def render_files(reldata=None):
    '''Use jinja templating to render the docker-compose.yml and master.json
    file to contain the dynamic data for the configuration files.'''
    context = {}
    # Load the context data with SDN data.
    context.update(gather_sdn_data())
    # Add the charm configuration data to the context.
    context.update(hookenv.config())
    if reldata:
        connection_string = reldata.get_connection_string()
        # Define where the etcd tls files will be kept.
        etcd_dir = '/etc/ssl/etcd'
        # Create paths to the etcd client ca, key, and cert file locations.
        ca = os.path.join(etcd_dir, 'client-ca.pem')
        key = os.path.join(etcd_dir, 'client-key.pem')
        cert = os.path.join(etcd_dir, 'client-cert.pem')
        # Save the client credentials (in relation data) to the paths provided.
        reldata.save_client_credentials(key, cert, ca)
        # Update the context so the template has the etcd information.
        context.update({'etcd_dir': etcd_dir,
                        'connection_string': connection_string,
                        'etcd_ca': ca,
                        'etcd_key': key,
                        'etcd_cert': cert})

    charm_dir = hookenv.charm_dir()
    rendered_kube_dir = os.path.join(charm_dir, 'files/kubernetes')
    if not os.path.exists(rendered_kube_dir):
        os.makedirs(rendered_kube_dir)
    rendered_manifest_dir = os.path.join(charm_dir, 'files/manifests')
    if not os.path.exists(rendered_manifest_dir):
        os.makedirs(rendered_manifest_dir)

    # Update the context with extra values, arch, manifest dir, and private IP.
    context.update({'arch': arch(),
                    'master_address': leader_get('master-address'),
                    'manifest_directory': rendered_manifest_dir,
                    'public_address': hookenv.unit_get('public-address'),
                    'private_address': hookenv.unit_get('private-address')})

    # Adapted from: http://kubernetes.io/docs/getting-started-guides/docker/
    target = os.path.join(rendered_kube_dir, 'docker-compose.yml')
    # Render the files/kubernetes/docker-compose.yml file that contains the
    # definition for kubelet and proxy.
    render('docker-compose.yml', target, context)

    if is_leader():
        # Source: https://github.com/kubernetes/...master/cluster/images/hyperkube  # noqa
        target = os.path.join(rendered_manifest_dir, 'master.json')
        # Render the files/manifests/master.json that contains parameters for
        # the apiserver, controller, and controller-manager
        render('master.json', target, context)
        # Source: ...cluster/addons/dns/skydns-svc.yaml.in
        target = os.path.join(rendered_manifest_dir, 'kubedns-svc.yaml')
        # Render files/kubernetes/kubedns-svc.yaml for the DNS service.
        render('kubedns-svc.yaml', target, context)
        # Source: ...cluster/addons/dns/skydns-rc.yaml.in
        target = os.path.join(rendered_manifest_dir, 'kubedns-rc.yaml')
        # Render files/kubernetes/kubedns-rc.yaml for the DNS pod.
        render('kubedns-rc.yaml', target, context)
コード例 #18
0
ファイル: k8s.py プロジェクト: mbruzek/layer-k8s
def config_changed():
    '''If the configuration values change, remove the available states.'''
    config = hookenv.config()
    if any(config.changed(key) for key in config.keys()):
        hookenv.log('The configuration options have changed.')
        # Use the Compose class that encapsulates the docker-compose commands.
        compose = Compose('files/kubernetes')
        if is_leader():
            hookenv.log('Removing master container and kubelet.available state.')  # noqa
            # Stop and remove the Kubernetes kubelet container.
            compose.kill('master')
            compose.rm('master')
            compose.kill('proxy')
            compose.rm('proxy')
            # Remove the state so the code can react to restarting kubelet.
            remove_state('kubelet.available')
        else:
            hookenv.log('Removing kubelet container and kubelet.available state.')  # noqa
            # Stop and remove the Kubernetes kubelet container.
            compose.kill('kubelet')
            compose.rm('kubelet')
            # Remove the state so the code can react to restarting kubelet.
            remove_state('kubelet.available')
            hookenv.log('Removing proxy container and proxy.available state.')
            # Stop and remove the Kubernetes proxy container.
            compose.kill('proxy')
            compose.rm('proxy')
            # Remove the state so the code can react to restarting proxy.
            remove_state('proxy.available')

    if config.changed('version'):
        hookenv.log('The version changed removing the states so the new '
                    'version of kubectl will be downloaded.')
        remove_state('kubectl.downloaded')
        remove_state('kubeconfig.created')
コード例 #19
0
def cluster_joined(relation_id=None):
    if config('prefer-ipv6'):
        relation_settings = {'hostname': socket.gethostname(),
                             'private-address': get_ipv6_addr()[0]}
        relation_set(relation_id=relation_id,
                     relation_settings=relation_settings)

    if is_relation_made('ha') and \
            config('ha-vip-only') is False:
        log('hacluster relation is present, skipping native '
            'rabbitmq cluster config.')
        return

    configure_nodename()

    try:
        if not is_leader():
            log('Not the leader, deferring cookie propagation to leader')
            return
    except NotImplementedError:
        if is_newer():
            log('cluster_joined: Relation greater.')
            return

    if not os.path.isfile(rabbit.COOKIE_PATH):
        log('erlang cookie missing from %s' % rabbit.COOKIE_PATH,
            level=ERROR)
        return

    if not is_sufficient_peers():
        return

    if is_elected_leader('res_rabbitmq_vip'):
        cookie = open(rabbit.COOKIE_PATH, 'r').read().strip()
        peer_store('cookie', cookie)
コード例 #20
0
def domain_backend_changed(relation_id=None, unit=None):
    if get_api_version() < 3:
        log('Domain specific backend identity configuration only supported '
            'with Keystone v3 API, skipping domain creation and '
            'restart.')
        return

    domain_name = relation_get(attribute='domain-name',
                               unit=unit,
                               rid=relation_id)
    if domain_name:
        # NOTE(jamespage): Only create domain data from lead
        #                  unit when clustered and database
        #                  is configured and created.
        if is_leader() and is_db_ready() and is_db_initialised():
            create_or_show_domain(domain_name)
        # NOTE(jamespage): Deployment may have multiple domains,
        #                  with different identity backends so
        #                  ensure that a domain specific nonce
        #                  is checked for restarts of keystone
        restart_nonce = relation_get(attribute='restart-nonce',
                                     unit=unit,
                                     rid=relation_id)
        domain_nonce_key = 'domain-restart-nonce-{}'.format(domain_name)
        db = unitdata.kv()
        if restart_nonce != db.get(domain_nonce_key):
            if not is_unit_paused_set():
                service_restart(keystone_service())
            db.set(domain_nonce_key, restart_nonce)
            db.flush()
コード例 #21
0
ファイル: leadership.py プロジェクト: stub42/layer-leadership
def initialize_leadership_state():
    '''Initialize leadership.* states from the hook environment.

    Invoked by hookenv.atstart() so states are available in
    @hook decorated handlers.
    '''
    is_leader = hookenv.is_leader()
    if is_leader:
        hookenv.log('Initializing Leadership Layer (is leader)')
    else:
        hookenv.log('Initializing Leadership Layer (is follower)')

    reactive.helpers.toggle_state('leadership.is_leader', is_leader)

    previous = unitdata.kv().getrange('leadership.settings.', strip=True)
    current = hookenv.leader_get()

    # Handle deletions.
    for key in set(previous.keys()) - set(current.keys()):
        current[key] = None

    any_changed = False
    for key, value in current.items():
        reactive.helpers.toggle_state('leadership.changed.{}'.format(key),
                                      value != previous.get(key))
        if value != previous.get(key):
            any_changed = True
        reactive.helpers.toggle_state('leadership.set.{}'.format(key),
                                      value is not None)
    reactive.helpers.toggle_state('leadership.changed', any_changed)

    unitdata.kv().update(current, prefix='leadership.settings.')
コード例 #22
0
def upgrade_charm():
    apt_install(determine_packages(), fatal=True)
    if remove_old_packages():
        log("Package purge detected, restarting services", "INFO")
        for s in services():
            service_restart(s)
    if is_leader():
        # if we are upgrading, then the old version might have used the
        # HEAT_PATH/encryption-key. So we grab the key from that, and put it in
        # leader settings to ensure that the key remains the same during an
        # upgrade.
        encryption_path = os.path.join(HEAT_PATH, 'encryption-key')
        if os.path.isfile(encryption_path):
            with open(encryption_path, 'r') as f:
                encryption_key = f.read()
            try:
                leader_set({'heat-auth-encryption-key': encryption_key})
            except subprocess.CalledProcessError as e:
                log("upgrade: leader_set: heat-auth-encryption-key failed,"
                    " didn't delete the existing file: {}.\n"
                    "Error was: ".format(encryption_path, str(e)),
                    level=WARNING)
            else:
                # now we just delete the file
                os.remove(encryption_path)
    leader_elected()
コード例 #23
0
    def handle(self):
        if not hookenv.is_leader():
            return  # Only the leader can grant requests.

        self.msg('Leader handling coordinator requests')

        # Clear our grants that have been released.
        for unit in self.grants.keys():
            for lock, grant_ts in list(self.grants[unit].items()):
                req_ts = self.requests.get(unit, {}).get(lock)
                if req_ts != grant_ts:
                    # The request timestamp does not match the granted
                    # timestamp. Several hooks on 'unit' may have run
                    # before the leader got a chance to make a decision,
                    # and 'unit' may have released its lock and attempted
                    # to reacquire it. This will change the timestamp,
                    # and we correctly revoke the old grant putting it
                    # to the end of the queue.
                    ts = datetime.strptime(self.grants[unit][lock],
                                           _timestamp_format)
                    del self.grants[unit][lock]
                    self.released(unit, lock, ts)

        # Grant locks
        for unit in self.requests.keys():
            for lock in self.requests[unit]:
                self.grant(lock, unit)
コード例 #24
0
 def update_pools(self):
     # designate-manage communicates with designate via message bus so no
     # need to set OS_ vars
     # NOTE(AJK) this runs with every hook (once most relations are up) and
     # so if it fails it will be picked up by the next relation change or
     # update-status.  i.e. it will heal eventually.
     if hookenv.is_leader():
         try:
             cmd = "designate-manage pool update"
             # Note(tinwood) that this command may fail if the pools.yaml
             # doesn't actually contain any pools.  This happens when the
             # relation is broken, which errors out the charm.  This stops
             # this happening and logs the error.
             subprocess.check_call(cmd.split(), timeout=60)
             # Update leader db to trigger restarts
             hookenv.leader_set(
                 {'pool-yaml-hash': host.file_hash(POOLS_YAML)})
         except subprocess.CalledProcessError as e:
             hookenv.log("designate-manage pool update failed: {}"
                         .format(str(e)))
         except subprocess.TimeoutExpired as e:
             # the timeout is if the rabbitmq server has gone away; it just
             # retries continuously; this lets the hook complete.
             hookenv.log("designate-manage pool command timed out: {}".
                         format(str(e)))
コード例 #25
0
def config_changed():
    # if we are paused, delay doing any config changed hooks.  It is forced on
    # the resume.
    if is_unit_paused_set():
        return

    if config('prefer-ipv6'):
        assert_charm_supports_ipv6()

    hosts = get_cluster_hosts()
    clustered = len(hosts) > 1
    bootstrapped = is_bootstrapped()

    # NOTE: only configure the cluster if we have sufficient peers. This only
    # applies if min-cluster-size is provided and is used to avoid extraneous
    # configuration changes and premature bootstrapping as the cluster is
    # deployed.
    if is_sufficient_peers():
        try:
            # NOTE(jamespage): try with leadership election
            if is_leader():
                log("Leader unit - bootstrap required=%s" % (not bootstrapped),
                    DEBUG)
                render_config_restart_on_changed(clustered, hosts,
                                                 bootstrap=not bootstrapped)
            elif bootstrapped:
                log("Cluster is bootstrapped - configuring mysql on this node",
                    DEBUG)
                render_config_restart_on_changed(clustered, hosts)
            else:
                log("Not configuring", DEBUG)

        except NotImplementedError:
            # NOTE(jamespage): fallback to legacy behaviour.
            oldest = oldest_peer(peer_units())
            if oldest:
                log("Leader unit - bootstrap required=%s" % (not bootstrapped),
                    DEBUG)
                render_config_restart_on_changed(clustered, hosts,
                                                 bootstrap=not bootstrapped)
            elif bootstrapped:
                log("Cluster is bootstrapped - configuring mysql on this node",
                    DEBUG)
                render_config_restart_on_changed(clustered, hosts)
            else:
                log("Not configuring", DEBUG)

    # Notify any changes to the access network
    update_shared_db_rels()

    # (re)install pcmkr agent
    install_mysql_ocf()

    if relation_ids('ha'):
        # make sure all the HA resources are (re)created
        ha_relation_joined()

    if is_relation_made('nrpe-external-master'):
        update_nrpe_config()
コード例 #26
0
def setup_sync_target_alone():
    '''If this is the only unit in the application then setup a sync target.
    This will likely by empty as zones.initialised is only unset when a unit
    frst comes up but the presence of the target allows subsequent units to
    bootstrap if leadership flips to them as they come up'''
    if hookenv.is_leader():
        designate_bind.setup_sync()
        reactive.set_state('zones.initialised')
コード例 #27
0
ファイル: k8s.py プロジェクト: AzTron/kubernetes
def final_messaging():
    '''Lower layers emit messages, and if we do not clear the status messaging
    queue, we are left with whatever the last method call sets status to. '''
    # It's good UX to have consistent messaging that the cluster is online
    if is_leader():
        status_set('active', 'Kubernetes leader running')
    else:
        status_set('active', 'Kubernetes follower running')
コード例 #28
0
def add_metadata():
    # check relation dependencies
    if is_leader() \
       and not leader_get("metadata-provisioned") \
       and config_get("contrail-api-configured") \
       and config_get("neutron-metadata-ready"):
        provision_metadata()
        leader_set({"metadata-provisioned": True})
コード例 #29
0
def leader_settings_changed():
    if not is_leader() and is_leader_ready() and is_oracle_relation_joined():
        set_oracle_host()
        set_data_source()
        if check_jboss_service() is True:
            pass
        else:
            start_services('create-db')
コード例 #30
0
    def cluster_token(self):
        if not is_leader():
            return leader_get('token')

        if not self.db.get('cluster-token'):
            token = self.id_generator()
            self.db.set('cluster-token', token)
            return token
        return self.db.get('cluster-token')
コード例 #31
0
def start_charm():
    if not hookenv.is_leader():
        hookenv.log("This unit is not a leader.")
        return False

    layer.status.maintenance('configuring container')

    image_info = layer.docker_resource.get_info('oci-image')
    model = os.environ['JUJU_MODEL_NAME']

    layer.caas_base.pod_spec_set(
        {
            'version':
            2,
            'serviceAccount': {
                'global':
                True,
                'rules': [
                    {
                        'apiGroups': ['apps'],
                        'resources': ['statefulsets', 'deployments'],
                        'verbs': ['*'],
                    },
                    {
                        'apiGroups': [''],
                        'resources': ['pods'],
                        'verbs': ['get', 'list', 'watch'],
                    },
                    {
                        'apiGroups': [''],
                        'resources': ['services'],
                        'verbs': ['*'],
                    },
                    {
                        'apiGroups': [''],
                        'resources': ['events'],
                        'verbs': ['get', 'list', 'watch', 'create'],
                    },
                    {
                        'apiGroups': ['kubeflow.org'],
                        'resources': [
                            'notebooks', 'notebooks/status',
                            'notebooks/finalizers'
                        ],
                        'verbs': ['*'],
                    },
                    {
                        'apiGroups': ['networking.istio.io'],
                        'resources': ['virtualservices'],
                        'verbs': ['*'],
                    },
                ],
            },
            'containers': [{
                'name': 'jupyter-controller',
                'command': ['/manager'],
                'config': {
                    'USE_ISTIO':
                    str(hookenv.is_relation_made('service-mesh')).lower(),
                    'ISTIO_GATEWAY':
                    f'{model}/kubeflow-gateway',
                    'ENABLE_CULLING':
                    hookenv.config('enable-culling'),
                },
                'imageDetails': {
                    'imagePath': image_info.registry_path,
                    'username': image_info.username,
                    'password': image_info.password,
                },
            }],
        },
        {
            'kubernetesResources': {
                "customResourceDefinitions": {
                    crd["metadata"]["name"]: crd["spec"]
                    for crd in yaml.safe_load_all(
                        Path("files/crds.yaml").read_text())
                },
                'serviceAccounts': [{
                    'name':
                    'jupyter-notebook',
                    'rules': [
                        {
                            'apiGroups': [''],
                            'resources':
                            ['pods', 'pods/log', 'secrets', 'services'],
                            'verbs': ['*'],
                        },
                        {
                            'apiGroups': ['', 'apps', 'extensions'],
                            'resources': ['deployments', 'replicasets'],
                            'verbs': ['*'],
                        },
                        {
                            'apiGroups': ['kubeflow.org'],
                            'resources': ['*'],
                            'verbs': ['*']
                        },
                        {
                            'apiGroups': ['batch'],
                            'resources': ['jobs'],
                            'verbs': ['*']
                        },
                    ],
                }],
            }
        },
    )

    layer.status.maintenance('creating container')
    set_flag('charm.started')
コード例 #32
0
    def set_mysql_password(self, username, password, current_password=None):
        """Update a mysql password for the provided username changing the
        leader settings

        To update root's password pass `None` in the username

        :param username: Username to change password of
        :type username: str
        :param password: New password for user.
        :type password: str
        :param current_password: Existing password for user.
        :type current_password: str
        """

        if username is None:
            username = '******'

        # get root password via leader-get, it may be that in the past (when
        # changes to root-password were not supported) the user changed the
        # password, so leader-get is more reliable source than
        # config.previous('root-password').
        rel_username = None if username == 'root' else username
        if not current_password:
            current_password = self.get_mysql_password(rel_username)

        # password that needs to be set
        new_passwd = password

        # update password for all users (e.g. root@localhost, root@::1, etc)
        try:
            self.connect(user=username, password=current_password)
            cursor = self.connection.cursor()
        except MySQLdb.OperationalError as ex:
            raise MySQLSetPasswordError(('Cannot connect using password in '
                                         'leader settings (%s)') % ex, ex)

        try:
            # NOTE(freyes): Due to skip-name-resolve root@$HOSTNAME account
            # fails when using SET PASSWORD so using UPDATE against the
            # mysql.user table is needed, but changes to this table are not
            # replicated across the cluster, so this update needs to run in
            # all the nodes. More info at
            # http://galeracluster.com/documentation-webpages/userchanges.html
            release = CompareHostReleases(lsb_release()['DISTRIB_CODENAME'])
            if release < 'bionic':
                SQL_UPDATE_PASSWD = ("UPDATE mysql.user SET password = "******"PASSWORD( %s ) WHERE user = %s;")
            else:
                # PXC 5.7 (introduced in Bionic) uses authentication_string
                SQL_UPDATE_PASSWD = ("UPDATE mysql.user SET "
                                     "authentication_string = "
                                     "PASSWORD( %s ) WHERE user = %s;")
            cursor.execute(SQL_UPDATE_PASSWD, (new_passwd, username))
            cursor.execute('FLUSH PRIVILEGES;')
            self.connection.commit()
        except MySQLdb.OperationalError as ex:
            raise MySQLSetPasswordError('Cannot update password: %s' % str(ex),
                                        ex)
        finally:
            cursor.close()

        # check the password was changed
        try:
            self.connect(user=username, password=new_passwd)
            self.execute('select 1;')
        except MySQLdb.OperationalError as ex:
            raise MySQLSetPasswordError(('Cannot connect using new password: '******'%s') % str(ex), ex)

        if not is_leader():
            log('Only the leader can set a new password in the relation',
                level=DEBUG)
            return

        for key in self.passwd_keys(rel_username):
            _password = leader_get(key)
            if _password:
                log('Updating password for %s (%s)' % (key, rel_username),
                    level=DEBUG)
                leader_set(settings={key: new_passwd})
コード例 #33
0
def mon_relation():
    if leader_get('monitor-secret') is None:
        log('still waiting for leader to setup keys')
        status_set('waiting', 'Waiting for leader to setup keys')
        return
    emit_cephconf()

    moncount = int(config('monitor-count'))
    if len(get_mon_hosts()) >= moncount:
        if ceph.is_bootstrapped():
            # The ceph-mon unit chosen for handling broker requests is based on
            # internal Ceph MON leadership and not Juju leadership.  To update
            # the rbd-mirror relation on all ceph-mon units after pool creation
            # the unit handling the broker request will update a nonce on the
            # mon relation.
            notify_rbd_mirrors()
        else:
            status_set('maintenance', 'Bootstrapping MON cluster')
            # the following call raises an exception
            # if it can't add the keyring
            try:
                ceph.bootstrap_monitor_cluster(leader_get('monitor-secret'))
            except FileNotFoundError as e:  # NOQA -- PEP8 is still PY2
                log("Couldn't bootstrap the monitor yet: {}".format(str(e)))
                exit(0)
            ceph.wait_for_bootstrap()
            ceph.wait_for_quorum()
            ceph.create_keyrings()
            if cmp_pkgrevno('ceph', '12.0.0') >= 0:
                status_set('maintenance', 'Bootstrapping Ceph MGR')
                ceph.bootstrap_manager()
            if ceph.monitor_key_exists('admin', 'autotune'):
                autotune = ceph.monitor_key_get('admin', 'autotune')
            else:
                ceph.wait_for_manager()
                autotune = config('pg-autotune')
                if (cmp_pkgrevno('ceph', '14.2.0') >= 0
                        and (autotune == 'true' or autotune == 'auto')):
                    ceph.monitor_key_set('admin', 'autotune', 'true')
                else:
                    ceph.monitor_key_set('admin', 'autotune', 'false')
            if ceph.monitor_key_get('admin', 'autotune') == 'true':
                try:
                    mgr_enable_module('pg_autoscaler')
                except subprocess.CalledProcessError:
                    log(
                        "Failed to initialize autoscaler, it must be "
                        "initialized on the last monitor",
                        level='info')
            # If we can and want to
            if is_leader() and config('customize-failure-domain'):
                # But only if the environment supports it
                if os.environ.get('JUJU_AVAILABILITY_ZONE'):
                    cmds = [
                        "ceph osd getcrushmap -o /tmp/crush.map",
                        "crushtool -d /tmp/crush.map| "
                        "sed 's/step chooseleaf firstn 0 type host/step "
                        "chooseleaf firstn 0 type rack/' > "
                        "/tmp/crush.decompiled",
                        "crushtool -c /tmp/crush.decompiled -o /tmp/crush.map",
                        "crushtool -i /tmp/crush.map --test",
                        "ceph osd setcrushmap -i /tmp/crush.map"
                    ]
                    for cmd in cmds:
                        try:
                            subprocess.check_call(cmd, shell=True)
                        except subprocess.CalledProcessError as e:
                            log("Failed to modify crush map:", level='error')
                            log("Cmd: {}".format(cmd), level='error')
                            log("Error: {}".format(e.output), level='error')
                            break
                else:
                    log("Your Juju environment doesn't"
                        "have support for Availability Zones")
            notify_osds()
            notify_radosgws()
            notify_client()
            notify_rbd_mirrors()
            notify_prometheus()
    else:
        log('Not enough mons ({}), punting.'.format(len(get_mon_hosts())))
コード例 #34
0
def config_changed():
    # Get the cfg object so we can see if the no-bootstrap value has changed
    # and triggered this hook invocation
    cfg = config()
    if config('prefer-ipv6'):
        assert_charm_supports_ipv6()

    check_for_upgrade()

    log('Monitor hosts are ' + repr(get_mon_hosts()))

    sysctl_dict = config('sysctl')
    if sysctl_dict:
        create_sysctl(sysctl_dict, '/etc/sysctl.d/50-ceph-charm.conf')
    if relations_of_type('nrpe-external-master'):
        update_nrpe_config()
    if config('enable-dashboard') and cmp_pkgrevno('ceph', '14.2.0') >= 0:
        apt_install(packages=filter_installed_packages(['ceph-mgr-dashboard']))

    if is_leader():
        if not config('no-bootstrap'):
            if not leader_get('fsid') or not leader_get('monitor-secret'):
                if config('fsid'):
                    fsid = config('fsid')
                else:
                    fsid = "{}".format(uuid.uuid1())
                if config('monitor-secret'):
                    mon_secret = config('monitor-secret')
                else:
                    mon_secret = "{}".format(ceph.generate_monitor_secret())
                opts = {
                    'fsid': fsid,
                    'monitor-secret': mon_secret,
                }
                try:
                    leader_set(opts)
                    status_set('maintenance',
                               'Created FSID and Monitor Secret')
                    log("Settings for the cluster are: {}".format(opts))
                except Exception as e:
                    # we're probably not the leader an exception occured
                    # let's log it anyway.
                    log("leader_set failed: {}".format(str(e)))
        elif (cfg.changed('no-bootstrap')
              and is_relation_made('bootstrap-source')):
            # User changed the no-bootstrap config option, we're the leader,
            # and the bootstrap-source relation has been made. The charm should
            # be in a blocked state indicating that the no-bootstrap option
            # must be set. This block is invoked when the user is trying to
            # get out of that scenario by enabling no-bootstrap.
            bootstrap_source_relation_changed()

        # This will only ensure that we are enabled if the 'pg-autotune' option
        # is explicitly set to 'true', and not if it is 'auto' or 'false'
        if (config('pg-autotune') == 'true'
                and cmp_pkgrevno('ceph', '14.2.0') >= 0):
            # The return value of the enable_module call will tell us if the
            # module was already enabled, in which case, we don't need to
            # re-configure the already configured pools
            if mgr_enable_module('pg_autoscaler'):
                ceph.monitor_key_set('admin', 'autotune', 'true')
                for pool in ceph.list_pools():
                    enable_pg_autoscale('admin', pool)
        if (config('enable-dashboard')
                and cmp_pkgrevno('ceph', '14.2.0') >= 0):
            log("enable-dashboard: {}".format(str(config('enable-dashboard'))))
            if mgr_enable_module('dashboard'):
                pass
            log("configure-dashboard")
            configure_dashboard()
    # unconditionally verify that the fsid and monitor-secret are set now
    # otherwise we exit until a leader does this.
    if leader_get('fsid') is None or leader_get('monitor-secret') is None:
        log('still waiting for leader to setup keys')
        status_set('waiting', 'Waiting for leader to setup keys')
        return

    emit_cephconf()

    # Support use of single node ceph
    if (not ceph.is_bootstrapped() and int(config('monitor-count')) == 1
            and is_leader()):
        status_set('maintenance', 'Bootstrapping single Ceph MON')
        # the following call raises an exception if it can't add the keyring
        try:
            ceph.bootstrap_monitor_cluster(leader_get('monitor-secret'))
        except FileNotFoundError as e:  # NOQA -- PEP8 is still PY2
            log("Couldn't bootstrap the monitor yet: {}".format(str(e)))
            return
        ceph.wait_for_bootstrap()
        ceph.wait_for_quorum()
        ceph.create_keyrings()
        if cmp_pkgrevno('ceph', '12.0.0') >= 0:
            status_set('maintenance', 'Bootstrapping single Ceph MGR')
            ceph.bootstrap_manager()

    # Update client relations
    notify_client()
コード例 #35
0
def config_changed():
    _decode_cert("ssl_ca")
    if is_leader():
        update_relations()
    update_status()
コード例 #36
0
ファイル: k8s.py プロジェクト: schubergphilis/kube-compose
def status_set(level, message):
    '''Output status message with leadership information.'''
    if is_leader():
        message = '(master) {0}'.format(message)
    hookenv.status_set(level, message)
コード例 #37
0
def render_files(reldata=None):
    '''Use jinja templating to render the docker-compose.yml and master.json
    file to contain the dynamic data for the configuration files.'''
    context = {}
    # Load the context data with SDN data.
    context.update(gather_sdn_data())
    # Add the charm configuration data to the context.
    context.update(hookenv.config())
    if reldata:
        connection_string = reldata.get_connection_string()
        # Define where the etcd tls files will be kept.
        etcd_dir = '/etc/ssl/etcd'
        # Create paths to the etcd client ca, key, and cert file locations.
        ca = os.path.join(etcd_dir, 'client-ca.pem')
        key = os.path.join(etcd_dir, 'client-key.pem')
        cert = os.path.join(etcd_dir, 'client-cert.pem')
        # Save the client credentials (in relation data) to the paths provided.
        reldata.save_client_credentials(key, cert, ca)
        # Update the context so the template has the etcd information.
        context.update({
            'etcd_dir': etcd_dir,
            'connection_string': connection_string,
            'etcd_ca': ca,
            'etcd_key': key,
            'etcd_cert': cert
        })

    charm_dir = hookenv.charm_dir()
    rendered_kube_dir = os.path.join(charm_dir, 'files/kubernetes')
    if not os.path.exists(rendered_kube_dir):
        os.makedirs(rendered_kube_dir)
    rendered_manifest_dir = os.path.join(charm_dir, 'files/manifests')
    if not os.path.exists(rendered_manifest_dir):
        os.makedirs(rendered_manifest_dir)

    # Update the context with extra values, arch, manifest dir, and private IP.
    context.update({
        'arch': arch(),
        'master_address': leader_get('master-address'),
        'manifest_directory': rendered_manifest_dir,
        'public_address': hookenv.unit_get('public-address'),
        'private_address': hookenv.unit_get('private-address')
    })

    # Adapted from: http://kubernetes.io/docs/getting-started-guides/docker/
    target = os.path.join(rendered_kube_dir, 'docker-compose.yml')
    # Render the files/kubernetes/docker-compose.yml file that contains the
    # definition for kubelet and proxy.
    render('docker-compose.yml', target, context)

    if is_leader():
        # Source: https://github.com/kubernetes/...master/cluster/images/hyperkube  # noqa
        target = os.path.join(rendered_manifest_dir, 'master.json')
        # Render the files/manifests/master.json that contains parameters for
        # the apiserver, controller, and controller-manager
        render('master.json', target, context)
        # Source: ...cluster/addons/dns/skydns-svc.yaml.in
        target = os.path.join(rendered_manifest_dir, 'kubedns-svc.yaml')
        # Render files/kubernetes/kubedns-svc.yaml for the DNS service.
        render('kubedns-svc.yaml', target, context)
        # Source: ...cluster/addons/dns/skydns-rc.yaml.in
        target = os.path.join(rendered_manifest_dir, 'kubedns-rc.yaml')
        # Render files/kubernetes/kubedns-rc.yaml for the DNS pod.
        render('kubedns-rc.yaml', target, context)
コード例 #38
0
def start_charm():
    if not hookenv.is_leader():
        hookenv.log("This unit is not a leader.")
        return False

    layer.status.maintenance('configuring container')

    image_info = layer.docker_resource.get_info('oci-image')

    debug_port = hookenv.config('debug-port')

    layer.caas_base.pod_spec_set(
        {
            'version':
            2,
            'serviceAccount': {
                'global':
                True,
                'rules': [
                    {
                        'apiGroups': ['*'],
                        'resources': ['*'],
                        'verbs': ['*']
                    },
                    {
                        'nonResourceURLs': ['*'],
                        'verbs': ['*']
                    },
                ],
            },
            'containers': [{
                'name':
                'metacontroller',
                'command': [
                    '/usr/bin/metacontroller',
                    '--logtostderr',
                    '-v=4',
                    '--discovery-interval=20s',
                    f'--debug-addr={debug_port}',
                ],
                'imageDetails': {
                    'imagePath': image_info.registry_path,
                    'username': image_info.username,
                    'password': image_info.password,
                },
                'ports': [{
                    'name': 'debug-http',
                    'containerPort': debug_port
                }],
            }],
        },
        {
            'kubernetesResources': {
                'customResourceDefinitions': {
                    crd['metadata']['name']: crd['spec']
                    for crd in yaml.safe_load_all(
                        Path("files/crds.yaml").read_text())
                }
            }
        },
    )

    layer.status.maintenance('creating container')
    set_flag('charm.started')
コード例 #39
0
def config_changed():
    # if we are paused, delay doing any config changed hooks.
    # It is forced on the resume.
    if ch_utils.is_unit_paused_set():
        hookenv.log("Unit is pause or upgrading. Skipping config_changed",
                    hookenv.WARNING)
        return

    # neutron-server runs if < juno. Neutron-server creates mysql tables
    # which will subsequently cause db migrations to fail if >= juno.
    # Disable neutron-server if >= juno
    if ch_utils.CompareOpenStackReleases(
            ch_utils.os_release('nova-common')) >= 'juno':
        try:
            ch_host.service_pause('neutron-server')
        except ValueError:
            # neutron-server service not installed, ignore.
            pass
    if hookenv.config('prefer-ipv6'):
        hookenv.status_set('maintenance', 'configuring ipv6')
        ncc_utils.setup_ipv6()
        ch_utils.sync_db_with_multi_ipv6_addresses(
            hookenv.config('database'),
            hookenv.config('database-user'),
            relation_prefix='nova')

    global CONFIGS
    if not hookenv.config('action-managed-upgrade'):
        if ch_utils.openstack_upgrade_available('nova-common'):
            hookenv.status_set('maintenance', 'Running openstack upgrade')
            ncc_utils.do_openstack_upgrade(CONFIGS)
            for rid in hookenv.relation_ids('neutron-api'):
                neutron_api_relation_joined(rid=rid, remote_restart=True)
            # NOTE(jamespage): Force re-fire of shared-db joined hook
            # to ensure that nova_api database is setup if required.
            for r_id in hookenv.relation_ids('shared-db'):
                db_joined(relation_id=r_id)

    ncc_utils.save_script_rc()
    configure_https()
    CONFIGS.write_all()

    # NOTE(jamespage): deal with any changes to the console and serial
    #                  console configuration options
    filtered = ch_fetch.filter_installed_packages(
        ncc_utils.determine_packages())
    if filtered:
        ch_fetch.apt_install(filtered, fatal=True)

    for r_id in hookenv.relation_ids('identity-service'):
        identity_joined(rid=r_id)
    for rid in hookenv.relation_ids('cluster'):
        cluster_joined(rid)
    update_nova_relation()

    update_nrpe_config()

    # If the region value has changed, notify the cloud-compute relations
    # to ensure the value is propagated to the compute nodes.
    if ch_utils.config_value_changed('region'):
        for rid in hookenv.relation_ids('cloud-compute'):
            set_region_on_relation_from_config(rid)

    ncc_utils.update_aws_compat_services()

    if hookenv.is_leader() and not ncc_utils.get_shared_metadatasecret():
        ncc_utils.set_shared_metadatasecret()
    for rid in hookenv.relation_ids('ha'):
        ha_joined(rid)
    if (not ch_utils.is_unit_paused_set() and
            ncc_utils.is_console_auth_enabled()):
        ch_host.service_resume('nova-consoleauth')
コード例 #40
0
 def __call__(self):
     ctxt = {'token_flush': (not fernet_enabled() and is_leader())}
     return ctxt
コード例 #41
0
def analytics_changed_departed():
    update_charm_status()
    if is_leader():
        update_southbound_relations()
コード例 #42
0
def start_charm():
    if not hookenv.is_leader():
        hookenv.log("This unit is not a leader.")
        return False

    layer.status.maintenance("configuring container")

    image_info = layer.docker_resource.get_info("oci-image")

    service_name = hookenv.service_name()
    namespace = os.environ["JUJU_MODEL_NAME"]
    public_url = hookenv.config("public-url")
    port = hookenv.config("port")
    oidc_scopes = hookenv.config("oidc-scopes")

    layer.caas_base.pod_spec_set({
        "version":
        2,
        "service": {
            "annotations": {
                "getambassador.io/config":
                yaml.dump_all([
                    {
                        "apiVersion": "ambassador/v1",
                        "kind": "Mapping",
                        "name": "oidc-gatekeeper",
                        "prefix": "/oidc",
                        "service": f"{service_name}.{namespace}:{port}",
                        "timeout_ms": 30000,
                        "bypass_auth": True,
                    },
                    {
                        "apiVersion": "ambassador/v1",
                        "kind": "AuthService",
                        "name": "oidc-gatekeeper-auth",
                        "auth_service": f"{service_name}.{namespace}:{port}",
                        "allowed_authorization_headers": ["kubeflow-userid"],
                    },
                ])
            }
        },
        "containers": [{
            "name": "oidc-gatekeeper",
            "imageDetails": {
                "imagePath": image_info.registry_path,
                "username": image_info.username,
                "password": image_info.password,
            },
            "ports": [{
                "name": "http",
                "containerPort": port
            }],
            "config": {
                "CLIENT_ID": hookenv.config('client-id'),
                "CLIENT_SECRET": hookenv.config("client-secret"),
                "DISABLE_USERINFO": True,
                "OIDC_PROVIDER": f"{public_url}/dex",
                "OIDC_SCOPES": oidc_scopes,
                "SERVER_PORT": port,
                "SELF_URL": f"{public_url}/oidc",
                "USERID_HEADER": "kubeflow-userid",
                "USERID_PREFIX": "",
                "STORE_PATH": "bolt.db",
                "REDIRECT_URL": f"{public_url}/oidc/login/oidc",
            },
        }],
    })

    layer.status.maintenance("creating container")
    set_flag("charm.started")
コード例 #43
0
def update_status():
    # TODO: try to deploy openstack code again if it was not done
    # update_service_ips can be called only on leader. notify controller only if something was updated
    if is_leader() and utils.update_service_ips():
        _notify_controller()
コード例 #44
0
def upgrade_charm():
    if is_leader() and not leader_get('namespace_tenants') == 'True':
        leader_set(namespace_tenants=False)
コード例 #45
0
def refresh_secrets(*args):
    """Refresh secret_id's and re-issue tokens for secret_id retrieval
    on secrets end-points"""
    if not hookenv.is_leader():
        hookenv.action_fail('Please run action on lead unit')
    set_flag('secrets.refresh')
コード例 #46
0
def master_relation_joined(relation_id=None):
    if not ready_for_service(legacy=False):
        log('unit not ready, deferring multisite configuration')
        return

    internal_url = '{}:{}'.format(
        canonical_url(CONFIGS, INTERNAL),
        listen_port(),
    )
    endpoints = [internal_url]
    realm = config('realm')
    zonegroup = config('zonegroup')
    zone = config('zone')
    access_key = leader_get('access_key')
    secret = leader_get('secret')

    if not all((realm, zonegroup, zone)):
        return

    relation_set(relation_id=relation_id,
                 realm=realm,
                 zonegroup=zonegroup,
                 url=endpoints[0],
                 access_key=access_key,
                 secret=secret)

    if not is_leader():
        return

    if not leader_get('restart_nonce'):
        # NOTE(jamespage):
        # This is an ugly kludge to force creation of the required data
        # items in the .rgw.root pool prior to the radosgw process being
        # started; radosgw-admin does not currently have a way of doing
        # this operation but a period update will force it to be created.
        multisite.update_period(fatal=False)

    mutation = False

    if realm not in multisite.list_realms():
        multisite.create_realm(realm, default=True)
        mutation = True

    if zonegroup not in multisite.list_zonegroups():
        multisite.create_zonegroup(zonegroup,
                                   endpoints=endpoints,
                                   default=True,
                                   master=True,
                                   realm=realm)
        mutation = True

    if zone not in multisite.list_zones():
        multisite.create_zone(zone,
                              endpoints=endpoints,
                              default=True,
                              master=True,
                              zonegroup=zonegroup)
        mutation = True

    if MULTISITE_SYSTEM_USER not in multisite.list_users():
        access_key, secret = multisite.create_system_user(
            MULTISITE_SYSTEM_USER)
        multisite.modify_zone(zone, access_key=access_key, secret=secret)
        leader_set(access_key=access_key, secret=secret)
        mutation = True

    if mutation:
        multisite.update_period()
        service_restart(service_name())
        leader_set(restart_nonce=str(uuid.uuid4()))

    relation_set(relation_id=relation_id, access_key=access_key, secret=secret)
コード例 #47
0
def contrail_auth_joined():
    if is_leader():
        update_relations(rid=relation_id())
    update_status()
コード例 #48
0
def slave_relation_changed(relation_id=None, unit=None):
    if not is_leader():
        return
    if not ready_for_service(legacy=False):
        log('unit not ready, deferring multisite configuration')
        return

    master_data = relation_get(rid=relation_id, unit=unit)
    if not all((master_data.get('realm'), master_data.get('zonegroup'),
                master_data.get('access_key'), master_data.get('secret'),
                master_data.get('url'))):
        log("Defer processing until master RGW has provided required data")
        return

    internal_url = '{}:{}'.format(
        canonical_url(CONFIGS, INTERNAL),
        listen_port(),
    )
    endpoints = [internal_url]

    realm = config('realm')
    zonegroup = config('zonegroup')
    zone = config('zone')

    if (realm, zonegroup) != (master_data['realm'], master_data['zonegroup']):
        log("Mismatched configuration so stop multi-site configuration now")
        return

    if not leader_get('restart_nonce'):
        # NOTE(jamespage):
        # This is an ugly kludge to force creation of the required data
        # items in the .rgw.root pool prior to the radosgw process being
        # started; radosgw-admin does not currently have a way of doing
        # this operation but a period update will force it to be created.
        multisite.update_period(fatal=False)

    mutation = False

    if realm not in multisite.list_realms():
        multisite.pull_realm(url=master_data['url'],
                             access_key=master_data['access_key'],
                             secret=master_data['secret'])
        multisite.pull_period(url=master_data['url'],
                              access_key=master_data['access_key'],
                              secret=master_data['secret'])
        multisite.set_default_realm(realm)
        mutation = True

    if zone not in multisite.list_zones():
        multisite.create_zone(zone,
                              endpoints=endpoints,
                              default=False,
                              master=False,
                              zonegroup=zonegroup,
                              access_key=master_data['access_key'],
                              secret=master_data['secret'])
        mutation = True

    if mutation:
        multisite.update_period()
        service_restart(service_name())
        leader_set(restart_nonce=str(uuid.uuid4()))
コード例 #49
0
def bootstrap_source_relation_changed():
    """Handles relation data changes on the bootstrap-source relation.

    The bootstrap-source relation to share remote bootstrap information with
    the ceph-mon charm. This relation is used to exchange the remote
    ceph-public-addresses which are used for the mon's, the fsid, and the
    monitor-secret.
    """
    if not config('no-bootstrap'):
        status_set(
            'blocked', 'Cannot join the bootstrap-source relation when '
            'no-bootstrap is False')
        return

    if not is_leader():
        log('Deferring leader-setting updates to the leader unit')
        return

    curr_fsid = leader_get('fsid')
    curr_secret = leader_get('monitor-secret')
    for relid in relation_ids('bootstrap-source'):
        for unit in related_units(relid=relid):
            mon_secret = relation_get('monitor-secret', unit, relid)
            fsid = relation_get('fsid', unit, relid)

            if not (mon_secret and fsid):
                log('Relation data is not ready as the fsid or the '
                    'monitor-secret are missing from the relation: '
                    'mon_secret = {} and fsid = {} '.format(mon_secret, fsid))
                continue

            if not (curr_fsid or curr_secret):
                curr_fsid = fsid
                curr_secret = mon_secret
            else:
                # The fsids and secrets need to match or the local monitors
                # will fail to join the mon cluster. If they don't,
                # bail because something needs to be investigated.
                assert curr_fsid == fsid, \
                    "bootstrap fsid '{}' != current fsid '{}'".format(
                        fsid, curr_fsid)
                assert curr_secret == mon_secret, \
                    "bootstrap secret '{}' != current secret '{}'".format(
                        mon_secret, curr_secret)
            opts = {
                'fsid': fsid,
                'monitor-secret': mon_secret,
            }
            try:
                leader_set(opts)
                log('Updating leader settings for fsid and monitor-secret '
                    'from remote relation data: {}'.format(opts))
            except Exception as e:
                # we're probably not the leader an exception occured
                # let's log it anyway.
                log("leader_set failed: {}".format(str(e)))

    # The leader unit needs to bootstrap itself as it won't receive the
    # leader-settings-changed hook elsewhere.
    if curr_fsid:
        mon_relation()
コード例 #50
0
def contrail_controller_joined():
    if not is_leader():
        return

    data = _get_orchestrator_info()
    relation_set(**data)
コード例 #51
0
def start_charm():
    if not hookenv.is_leader():
        hookenv.log("This unit is not a leader.")
        return False

    layer.status.maintenance('configuring container')

    image_info = layer.docker_resource.get_info('oci-image')

    crd = yaml.load(Path('files/crds.yaml').read_text())

    layer.caas_base.pod_spec_set(
        {
            'version':
            2,
            'serviceAccount': {
                'rules': [
                    {
                        'apiGroups': ['kubeflow.org'],
                        'resources': [
                            'pytorchjobs',
                            'pytorchjobs/status',
                            'pytorchjobs/finalizers',
                        ],
                        'verbs': ['*'],
                    },
                    {
                        'apiGroups': ['apiextensions.k8s.io'],
                        'resources': ['customresourcedefinitions'],
                        'verbs': ['*'],
                    },
                    {
                        'apiGroups': [''],
                        'resources':
                        ['pods', 'services', 'endpoints', 'events'],
                        'verbs': ['*'],
                    },
                ]
            },
            'containers': [{
                'name':
                'pytorch-operator',
                'imageDetails': {
                    'imagePath': image_info.registry_path,
                    'username': image_info.username,
                    'password': image_info.password,
                },
                'command': [
                    '/pytorch-operator.v1',
                    '--alsologtostderr',
                    '-v=1',
                    '--monitoring-port=8443',
                ],
                'config': {
                    'MY_POD_NAMESPACE': os.environ['JUJU_MODEL_NAME'],
                    'MY_POD_NAME': {
                        "field": {
                            "path": "metadata.name",
                            "api-version": "v1"
                        }
                    },
                },
            }],
        },
        {
            'kubernetesResources': {
                'customResourceDefinitions': {
                    crd['metadata']['name']: crd['spec']
                }
            }
        },
    )

    layer.status.maintenance('creating container')
    set_flag('charm.started')
コード例 #52
0
def start_charm():
    if not hookenv.is_leader():
        hookenv.log("This unit is not a leader.")
        return False

    layer.status.maintenance('configuring container')

    image_info = layer.docker_resource.get_info('oci-image')
    service_name = hookenv.service_name()

    port = hookenv.config('port')

    layer.caas_base.pod_spec_set({
        'version':
        2,
        'serviceAccount': {
            'global':
            True,
            'rules': [
                {
                    'apiGroups': [''],
                    'resources': ['pods', 'pods/exec', 'pods/log'],
                    'verbs': ['get', 'list', 'watch'],
                },
                {
                    'apiGroups': [''],
                    'resources': ['secrets'],
                    'verbs': ['get']
                },
                {
                    'apiGroups': ['argoproj.io'],
                    'resources': ['workflows', 'workflows/finalizers'],
                    'verbs': ['get', 'list', 'watch'],
                },
            ],
        },
        'service': {
            'annotations': {
                'getambassador.io/config':
                yaml.dump_all([{
                    'apiVersion': 'ambassador/v0',
                    'kind': 'Mapping',
                    'name': 'argo-ui',
                    'prefix': '/argo/',
                    'service': f'{service_name}:{port}',
                    'timeout_ms': 30000,
                }])
            }
        },
        'containers': [{
            'name': 'argo-ui',
            'imageDetails': {
                'imagePath': image_info.registry_path,
                'username': image_info.username,
                'password': image_info.password,
            },
            'config': {
                'ARGO_NAMESPACE': os.environ['JUJU_MODEL_NAME'],
                'IN_CLUSTER': 'true',
                'BASE_HREF': '/argo/',
            },
            'ports': [{
                'name': 'http-ui',
                'containerPort': port
            }],
            'kubernetes': {
                'readinessProbe': {
                    'httpGet': {
                        'path': '/',
                        'port': port
                    }
                }
            },
        }],
    })

    layer.status.maintenance('creating container')
    set_flag('charm.started')
コード例 #53
0
ファイル: sentry.py プロジェクト: axinojolais/layer-sentry
def migrate_sentry_db_on_upgrade():
    if is_leader():
        status_set('maintenance', 'Migrating Sentry DB')
        call('{} upgrade --noinput'.format(SENTRY_BIN).split())
        status_set('active', 'Sentry DB migration complete')
コード例 #54
0
def contrail_controller_joined():
    settings = {"private-address": get_ip(), "port": 8082}
    relation_set(relation_settings=settings)
    if is_leader():
        update_southbound_relations(rid=relation_id())
コード例 #55
0
 def update_pools(self):
     # designate-manage communicates with designate via message bus so no
     # need to set OS_ vars
     if hookenv.is_leader():
         cmd = ['designate-manage', 'pool', 'update']
         subprocess.check_call(cmd)
コード例 #56
0
def set_cluster_ip():
    if is_leader():
        leader_set(settings={'cluster-ip': unit_private_ip()})
    else:
        log('Not the leader, passing')
コード例 #57
0
 def pool_manager_cache_sync(self):
     if not self.pool_manager_cache_sync_done() and hookenv.is_leader():
         sync_cmd = "designate-manage pool-manager-cache sync"
         subprocess.check_call(sync_cmd.split(), timeout=60)
         hookenv.leader_set({'pool-manager-cache-sync-done': True})
         self.restart_all()
コード例 #58
0
def update_status():
    if not is_leader():
        return
    changed = update_service_ips()
    if changed:
        _notify_controller()
コード例 #59
0
ファイル: api_charm.py プロジェクト: gnuoy/charm-template-api
 def db_sync(self):
     """Perform a database sync using the command defined in the
     self.sync_cmd attribute. The services defined in self.services are
     restarted after the database sync.
     """
     if not self.db_sync_done() and hookenv.is_leader():
コード例 #60
0
def analyticsdb_joined():
    settings = {"private-address": get_ip()}
    relation_set(relation_settings=settings)
    if is_leader():
        update_northbound_relations(rid=relation_id())